Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

No conflicts.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2022-07-28 18:21:16 -07:00
commit 272ac32f56
152 changed files with 1603 additions and 896 deletions

View File

@ -60,6 +60,10 @@ Arnd Bergmann <arnd@arndb.de>
Atish Patra <atishp@atishpatra.org> <atish.patra@wdc.com> Atish Patra <atishp@atishpatra.org> <atish.patra@wdc.com>
Axel Dyks <xl@xlsigned.net> Axel Dyks <xl@xlsigned.net>
Axel Lin <axel.lin@gmail.com> Axel Lin <axel.lin@gmail.com>
Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@linaro.org>
Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@spreadtrum.com>
Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@unisoc.com>
Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang7@gmail.com>
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com> Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com>
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com> Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
Ben Gardner <bgardner@wabtec.com> Ben Gardner <bgardner@wabtec.com>
@ -135,6 +139,8 @@ Frank Rowand <frowand.list@gmail.com> <frowand@mvista.com>
Frank Zago <fzago@systemfabricworks.com> Frank Zago <fzago@systemfabricworks.com>
Gao Xiang <xiang@kernel.org> <gaoxiang25@huawei.com> Gao Xiang <xiang@kernel.org> <gaoxiang25@huawei.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com> Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@linux.alibaba.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@redhat.com>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com> Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com> Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@linux.vnet.ibm.com> Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@linux.vnet.ibm.com>
@ -371,6 +377,7 @@ Sean Nyekjaer <sean@geanix.com> <sean.nyekjaer@prevas.dk>
Sebastian Reichel <sre@kernel.org> <sebastian.reichel@collabora.co.uk> Sebastian Reichel <sre@kernel.org> <sebastian.reichel@collabora.co.uk>
Sebastian Reichel <sre@kernel.org> <sre@debian.org> Sebastian Reichel <sre@kernel.org> <sre@debian.org>
Sedat Dilek <sedat.dilek@gmail.com> <sedat.dilek@credativ.de> Sedat Dilek <sedat.dilek@gmail.com> <sedat.dilek@credativ.de>
Seth Forshee <sforshee@kernel.org> <seth.forshee@canonical.com>
Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com> Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com> Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com>
Shuah Khan <shuah@kernel.org> <shuah.khan@hp.com> Shuah Khan <shuah@kernel.org> <shuah.khan@hp.com>

View File

@ -5796,6 +5796,24 @@
expediting. Set to zero to disable automatic expediting. Set to zero to disable automatic
expediting. expediting.
srcutree.srcu_max_nodelay [KNL]
Specifies the number of no-delay instances
per jiffy for which the SRCU grace period
worker thread will be rescheduled with zero
delay. Beyond this limit, worker thread will
be rescheduled with a sleep delay of one jiffy.
srcutree.srcu_max_nodelay_phase [KNL]
Specifies the per-grace-period phase, number of
non-sleeping polls of readers. Beyond this limit,
grace period worker thread will be rescheduled
with a sleep delay of one jiffy, between each
rescan of the readers, for a grace period phase.
srcutree.srcu_retry_check_delay [KNL]
Specifies number of microseconds of non-sleeping
delay between each non-sleeping poll of readers.
srcutree.small_contention_lim [KNL] srcutree.small_contention_lim [KNL]
Specifies the number of update-side contention Specifies the number of update-side contention
events per jiffy will be tolerated before events per jiffy will be tolerated before

View File

@ -155,70 +155,65 @@ properties:
- in-band-status - in-band-status
fixed-link: fixed-link:
allOf: oneOf:
- if: - $ref: /schemas/types.yaml#/definitions/uint32-array
type: array deprecated: true
then: items:
deprecated: true - minimum: 0
items: maximum: 31
- minimum: 0 description:
maximum: 31 Emulated PHY ID, choose any but unique to the all
description: specified fixed-links
Emulated PHY ID, choose any but unique to the all
specified fixed-links
- enum: [0, 1] - enum: [0, 1]
description: description:
Duplex configuration. 0 for half duplex or 1 for Duplex configuration. 0 for half duplex or 1 for
full duplex full duplex
- enum: [10, 100, 1000, 2500, 10000] - enum: [10, 100, 1000, 2500, 10000]
description: description:
Link speed in Mbits/sec. Link speed in Mbits/sec.
- enum: [0, 1] - enum: [0, 1]
description: description:
Pause configuration. 0 for no pause, 1 for pause Pause configuration. 0 for no pause, 1 for pause
- enum: [0, 1] - enum: [0, 1]
description: description:
Asymmetric pause configuration. 0 for no asymmetric Asymmetric pause configuration. 0 for no asymmetric
pause, 1 for asymmetric pause pause, 1 for asymmetric pause
- type: object
additionalProperties: false
properties:
speed:
description:
Link speed.
$ref: /schemas/types.yaml#/definitions/uint32
enum: [10, 100, 1000, 2500, 10000]
full-duplex:
$ref: /schemas/types.yaml#/definitions/flag
description:
Indicates that full-duplex is used. When absent, half
duplex is assumed.
- if: pause:
type: object $ref: /schemas/types.yaml#definitions/flag
then: description:
properties: Indicates that pause should be enabled.
speed:
description:
Link speed.
$ref: /schemas/types.yaml#/definitions/uint32
enum: [10, 100, 1000, 2500, 10000]
full-duplex: asym-pause:
$ref: /schemas/types.yaml#/definitions/flag $ref: /schemas/types.yaml#/definitions/flag
description: description:
Indicates that full-duplex is used. When absent, half Indicates that asym_pause should be enabled.
duplex is assumed.
pause: link-gpios:
$ref: /schemas/types.yaml#definitions/flag maxItems: 1
description: description:
Indicates that pause should be enabled. GPIO to determine if the link is up
asym-pause: required:
$ref: /schemas/types.yaml#/definitions/flag - speed
description:
Indicates that asym_pause should be enabled.
link-gpios:
maxItems: 1
description:
GPIO to determine if the link is up
required:
- speed
allOf: allOf:
- if: - if:

View File

@ -187,6 +187,7 @@ properties:
Should specify the gpio for phy reset. Should specify the gpio for phy reset.
phy-reset-duration: phy-reset-duration:
$ref: /schemas/types.yaml#/definitions/uint32
deprecated: true deprecated: true
description: description:
Reset duration in milliseconds. Should present only if property Reset duration in milliseconds. Should present only if property
@ -195,12 +196,14 @@ properties:
and 1 millisecond will be used instead. and 1 millisecond will be used instead.
phy-reset-active-high: phy-reset-active-high:
type: boolean
deprecated: true deprecated: true
description: description:
If present then the reset sequence using the GPIO specified in the If present then the reset sequence using the GPIO specified in the
"phy-reset-gpios" property is reversed (H=reset state, L=operation state). "phy-reset-gpios" property is reversed (H=reset state, L=operation state).
phy-reset-post-delay: phy-reset-post-delay:
$ref: /schemas/types.yaml#/definitions/uint32
deprecated: true deprecated: true
description: description:
Post reset delay in milliseconds. If present then a delay of phy-reset-post-delay Post reset delay in milliseconds. If present then a delay of phy-reset-post-delay

View File

@ -2884,7 +2884,14 @@ sctp_rmem - vector of 3 INTEGERs: min, default, max
Default: 4K Default: 4K
sctp_wmem - vector of 3 INTEGERs: min, default, max sctp_wmem - vector of 3 INTEGERs: min, default, max
Currently this tunable has no effect. Only the first value ("min") is used, "default" and "max" are
ignored.
min: Minimum size of send buffer that can be used by SCTP sockets.
It is guaranteed to each SCTP socket (but not association) even
under moderate memory pressure.
Default: 4K
addr_scope_policy - INTEGER addr_scope_policy - INTEGER
Control IPv4 address scoping - draft-stewart-tsvwg-sctp-ipv4-00 Control IPv4 address scoping - draft-stewart-tsvwg-sctp-ipv4-00

View File

@ -5658,7 +5658,7 @@ by a string of size ``name_size``.
#define KVM_STATS_UNIT_SECONDS (0x2 << KVM_STATS_UNIT_SHIFT) #define KVM_STATS_UNIT_SECONDS (0x2 << KVM_STATS_UNIT_SHIFT)
#define KVM_STATS_UNIT_CYCLES (0x3 << KVM_STATS_UNIT_SHIFT) #define KVM_STATS_UNIT_CYCLES (0x3 << KVM_STATS_UNIT_SHIFT)
#define KVM_STATS_UNIT_BOOLEAN (0x4 << KVM_STATS_UNIT_SHIFT) #define KVM_STATS_UNIT_BOOLEAN (0x4 << KVM_STATS_UNIT_SHIFT)
#define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_CYCLES #define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_BOOLEAN
#define KVM_STATS_BASE_SHIFT 8 #define KVM_STATS_BASE_SHIFT 8
#define KVM_STATS_BASE_MASK (0xF << KVM_STATS_BASE_SHIFT) #define KVM_STATS_BASE_MASK (0xF << KVM_STATS_BASE_SHIFT)

View File

@ -15862,7 +15862,7 @@ PIN CONTROLLER - FREESCALE
M: Dong Aisheng <aisheng.dong@nxp.com> M: Dong Aisheng <aisheng.dong@nxp.com>
M: Fabio Estevam <festevam@gmail.com> M: Fabio Estevam <festevam@gmail.com>
M: Shawn Guo <shawnguo@kernel.org> M: Shawn Guo <shawnguo@kernel.org>
M: Stefan Agner <stefan@agner.ch> M: Jacky Bai <ping.bai@nxp.com>
R: Pengutronix Kernel Team <kernel@pengutronix.de> R: Pengutronix Kernel Team <kernel@pengutronix.de>
L: linux-gpio@vger.kernel.org L: linux-gpio@vger.kernel.org
S: Maintained S: Maintained

View File

@ -2,7 +2,7 @@
VERSION = 5 VERSION = 5
PATCHLEVEL = 19 PATCHLEVEL = 19
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc7 EXTRAVERSION = -rc8
NAME = Superb Owl NAME = Superb Owl
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -38,7 +38,7 @@
sys_clk: sys_clk { sys_clk: sys_clk {
compatible = "fixed-clock"; compatible = "fixed-clock";
#clock-cells = <0>; #clock-cells = <0>;
clock-frequency = <162500000>; clock-frequency = <165625000>;
}; };
cpu_clk: cpu_clk { cpu_clk: cpu_clk {

View File

@ -549,7 +549,7 @@ static struct pxa2xx_spi_controller corgi_spi_info = {
}; };
static struct gpiod_lookup_table corgi_spi_gpio_table = { static struct gpiod_lookup_table corgi_spi_gpio_table = {
.dev_id = "pxa2xx-spi.1", .dev_id = "spi1",
.table = { .table = {
GPIO_LOOKUP_IDX("gpio-pxa", CORGI_GPIO_ADS7846_CS, "cs", 0, GPIO_ACTIVE_LOW), GPIO_LOOKUP_IDX("gpio-pxa", CORGI_GPIO_ADS7846_CS, "cs", 0, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("gpio-pxa", CORGI_GPIO_LCDCON_CS, "cs", 1, GPIO_ACTIVE_LOW), GPIO_LOOKUP_IDX("gpio-pxa", CORGI_GPIO_LCDCON_CS, "cs", 1, GPIO_ACTIVE_LOW),

View File

@ -635,7 +635,7 @@ static struct pxa2xx_spi_controller pxa_ssp2_master_info = {
}; };
static struct gpiod_lookup_table pxa_ssp2_gpio_table = { static struct gpiod_lookup_table pxa_ssp2_gpio_table = {
.dev_id = "pxa2xx-spi.2", .dev_id = "spi2",
.table = { .table = {
GPIO_LOOKUP_IDX("gpio-pxa", GPIO88_HX4700_TSC2046_CS, "cs", 0, GPIO_ACTIVE_LOW), GPIO_LOOKUP_IDX("gpio-pxa", GPIO88_HX4700_TSC2046_CS, "cs", 0, GPIO_ACTIVE_LOW),
{ }, { },

View File

@ -140,7 +140,7 @@ struct platform_device pxa_spi_ssp4 = {
}; };
static struct gpiod_lookup_table pxa_ssp3_gpio_table = { static struct gpiod_lookup_table pxa_ssp3_gpio_table = {
.dev_id = "pxa2xx-spi.3", .dev_id = "spi3",
.table = { .table = {
GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS1, "cs", 0, GPIO_ACTIVE_LOW), GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS1, "cs", 0, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS2, "cs", 1, GPIO_ACTIVE_LOW), GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS2, "cs", 1, GPIO_ACTIVE_LOW),
@ -149,7 +149,7 @@ static struct gpiod_lookup_table pxa_ssp3_gpio_table = {
}; };
static struct gpiod_lookup_table pxa_ssp4_gpio_table = { static struct gpiod_lookup_table pxa_ssp4_gpio_table = {
.dev_id = "pxa2xx-spi.4", .dev_id = "spi4",
.table = { .table = {
GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS3, "cs", 0, GPIO_ACTIVE_LOW), GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS3, "cs", 0, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS4, "cs", 1, GPIO_ACTIVE_LOW), GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS4, "cs", 1, GPIO_ACTIVE_LOW),

View File

@ -207,7 +207,7 @@ static struct spi_board_info littleton_spi_devices[] __initdata = {
}; };
static struct gpiod_lookup_table littleton_spi_gpio_table = { static struct gpiod_lookup_table littleton_spi_gpio_table = {
.dev_id = "pxa2xx-spi.2", .dev_id = "spi2",
.table = { .table = {
GPIO_LOOKUP_IDX("gpio-pxa", LITTLETON_GPIO_LCD_CS, "cs", 0, GPIO_ACTIVE_LOW), GPIO_LOOKUP_IDX("gpio-pxa", LITTLETON_GPIO_LCD_CS, "cs", 0, GPIO_ACTIVE_LOW),
{ }, { },

View File

@ -994,7 +994,7 @@ static struct pxa2xx_spi_controller magician_spi_info = {
}; };
static struct gpiod_lookup_table magician_spi_gpio_table = { static struct gpiod_lookup_table magician_spi_gpio_table = {
.dev_id = "pxa2xx-spi.2", .dev_id = "spi2",
.table = { .table = {
/* NOTICE must be GPIO, incompatibility with hw PXA SPI framing */ /* NOTICE must be GPIO, incompatibility with hw PXA SPI framing */
GPIO_LOOKUP_IDX("gpio-pxa", GPIO14_MAGICIAN_TSC2046_CS, "cs", 0, GPIO_ACTIVE_LOW), GPIO_LOOKUP_IDX("gpio-pxa", GPIO14_MAGICIAN_TSC2046_CS, "cs", 0, GPIO_ACTIVE_LOW),

View File

@ -578,7 +578,7 @@ static struct pxa2xx_spi_controller spitz_spi_info = {
}; };
static struct gpiod_lookup_table spitz_spi_gpio_table = { static struct gpiod_lookup_table spitz_spi_gpio_table = {
.dev_id = "pxa2xx-spi.2", .dev_id = "spi2",
.table = { .table = {
GPIO_LOOKUP_IDX("gpio-pxa", SPITZ_GPIO_ADS7846_CS, "cs", 0, GPIO_ACTIVE_LOW), GPIO_LOOKUP_IDX("gpio-pxa", SPITZ_GPIO_ADS7846_CS, "cs", 0, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("gpio-pxa", SPITZ_GPIO_LCDCON_CS, "cs", 1, GPIO_ACTIVE_LOW), GPIO_LOOKUP_IDX("gpio-pxa", SPITZ_GPIO_LCDCON_CS, "cs", 1, GPIO_ACTIVE_LOW),

View File

@ -623,7 +623,7 @@ static struct pxa2xx_spi_controller pxa_ssp2_master_info = {
}; };
static struct gpiod_lookup_table pxa_ssp1_gpio_table = { static struct gpiod_lookup_table pxa_ssp1_gpio_table = {
.dev_id = "pxa2xx-spi.1", .dev_id = "spi1",
.table = { .table = {
GPIO_LOOKUP_IDX("gpio-pxa", GPIO24_ZIPITZ2_WIFI_CS, "cs", 0, GPIO_ACTIVE_LOW), GPIO_LOOKUP_IDX("gpio-pxa", GPIO24_ZIPITZ2_WIFI_CS, "cs", 0, GPIO_ACTIVE_LOW),
{ }, { },
@ -631,7 +631,7 @@ static struct gpiod_lookup_table pxa_ssp1_gpio_table = {
}; };
static struct gpiod_lookup_table pxa_ssp2_gpio_table = { static struct gpiod_lookup_table pxa_ssp2_gpio_table = {
.dev_id = "pxa2xx-spi.2", .dev_id = "spi2",
.table = { .table = {
GPIO_LOOKUP_IDX("gpio-pxa", GPIO88_ZIPITZ2_LCD_CS, "cs", 0, GPIO_ACTIVE_LOW), GPIO_LOOKUP_IDX("gpio-pxa", GPIO88_ZIPITZ2_LCD_CS, "cs", 0, GPIO_ACTIVE_LOW),
{ }, { },

View File

@ -73,6 +73,7 @@ ifeq ($(CONFIG_PERF_EVENTS),y)
endif endif
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
# GCC versions that support the "-mstrict-align" option default to allowing # GCC versions that support the "-mstrict-align" option default to allowing
# unaligned accesses. While unaligned accesses are explicitly allowed in the # unaligned accesses. While unaligned accesses are explicitly allowed in the

View File

@ -35,7 +35,7 @@
gpio-keys { gpio-keys {
compatible = "gpio-keys"; compatible = "gpio-keys";
key0 { key {
label = "KEY0"; label = "KEY0";
linux,code = <BTN_0>; linux,code = <BTN_0>;
gpios = <&gpio0 10 GPIO_ACTIVE_LOW>; gpios = <&gpio0 10 GPIO_ACTIVE_LOW>;

View File

@ -47,7 +47,7 @@
gpio-keys { gpio-keys {
compatible = "gpio-keys"; compatible = "gpio-keys";
boot { key-boot {
label = "BOOT"; label = "BOOT";
linux,code = <BTN_0>; linux,code = <BTN_0>;
gpios = <&gpio0 0 GPIO_ACTIVE_LOW>; gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;

View File

@ -52,7 +52,7 @@
gpio-keys { gpio-keys {
compatible = "gpio-keys"; compatible = "gpio-keys";
boot { key-boot {
label = "BOOT"; label = "BOOT";
linux,code = <BTN_0>; linux,code = <BTN_0>;
gpios = <&gpio0 0 GPIO_ACTIVE_LOW>; gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;

View File

@ -46,19 +46,19 @@
gpio-keys { gpio-keys {
compatible = "gpio-keys"; compatible = "gpio-keys";
up { key-up {
label = "UP"; label = "UP";
linux,code = <BTN_1>; linux,code = <BTN_1>;
gpios = <&gpio1_0 7 GPIO_ACTIVE_LOW>; gpios = <&gpio1_0 7 GPIO_ACTIVE_LOW>;
}; };
press { key-press {
label = "PRESS"; label = "PRESS";
linux,code = <BTN_0>; linux,code = <BTN_0>;
gpios = <&gpio0 0 GPIO_ACTIVE_LOW>; gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
}; };
down { key-down {
label = "DOWN"; label = "DOWN";
linux,code = <BTN_2>; linux,code = <BTN_2>;
gpios = <&gpio0 1 GPIO_ACTIVE_LOW>; gpios = <&gpio0 1 GPIO_ACTIVE_LOW>;

View File

@ -23,7 +23,7 @@
gpio-keys { gpio-keys {
compatible = "gpio-keys"; compatible = "gpio-keys";
boot { key-boot {
label = "BOOT"; label = "BOOT";
linux,code = <BTN_0>; linux,code = <BTN_0>;
gpios = <&gpio0 0 GPIO_ACTIVE_LOW>; gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;

View File

@ -78,7 +78,7 @@ obj-$(CONFIG_SMP) += cpu_ops_sbi.o
endif endif
obj-$(CONFIG_HOTPLUG_CPU) += cpu-hotplug.o obj-$(CONFIG_HOTPLUG_CPU) += cpu-hotplug.o
obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_KEXEC) += kexec_relocate.o crash_save_regs.o machine_kexec.o obj-$(CONFIG_KEXEC_CORE) += kexec_relocate.o crash_save_regs.o machine_kexec.o
obj-$(CONFIG_KEXEC_FILE) += elf_kexec.o machine_kexec_file.o obj-$(CONFIG_KEXEC_FILE) += elf_kexec.o machine_kexec_file.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o

View File

@ -349,7 +349,7 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
{ {
const char *strtab, *name, *shstrtab; const char *strtab, *name, *shstrtab;
const Elf_Shdr *sechdrs; const Elf_Shdr *sechdrs;
Elf_Rela *relas; Elf64_Rela *relas;
int i, r_type; int i, r_type;
/* String & section header string table */ /* String & section header string table */

View File

@ -2,7 +2,7 @@
/* /*
* Kernel interface for the s390 arch_random_* functions * Kernel interface for the s390 arch_random_* functions
* *
* Copyright IBM Corp. 2017, 2020 * Copyright IBM Corp. 2017, 2022
* *
* Author: Harald Freudenberger <freude@de.ibm.com> * Author: Harald Freudenberger <freude@de.ibm.com>
* *
@ -14,6 +14,7 @@
#ifdef CONFIG_ARCH_RANDOM #ifdef CONFIG_ARCH_RANDOM
#include <linux/static_key.h> #include <linux/static_key.h>
#include <linux/preempt.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <asm/cpacf.h> #include <asm/cpacf.h>
@ -32,7 +33,8 @@ static inline bool __must_check arch_get_random_int(unsigned int *v)
static inline bool __must_check arch_get_random_seed_long(unsigned long *v) static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
{ {
if (static_branch_likely(&s390_arch_random_available)) { if (static_branch_likely(&s390_arch_random_available) &&
in_task()) {
cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v)); cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
atomic64_add(sizeof(*v), &s390_arch_random_counter); atomic64_add(sizeof(*v), &s390_arch_random_counter);
return true; return true;
@ -42,7 +44,8 @@ static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
static inline bool __must_check arch_get_random_seed_int(unsigned int *v) static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
{ {
if (static_branch_likely(&s390_arch_random_available)) { if (static_branch_likely(&s390_arch_random_available) &&
in_task()) {
cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v)); cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
atomic64_add(sizeof(*v), &s390_arch_random_counter); atomic64_add(sizeof(*v), &s390_arch_random_counter);
return true; return true;

View File

@ -2474,7 +2474,7 @@ config RETHUNK
bool "Enable return-thunks" bool "Enable return-thunks"
depends on RETPOLINE && CC_HAS_RETURN_THUNK depends on RETPOLINE && CC_HAS_RETURN_THUNK
select OBJTOOL if HAVE_OBJTOOL select OBJTOOL if HAVE_OBJTOOL
default y default y if X86_64
help help
Compile the kernel with the return-thunks compiler option to guard Compile the kernel with the return-thunks compiler option to guard
against kernel-to-user data leaks by avoiding return speculation. against kernel-to-user data leaks by avoiding return speculation.
@ -2483,21 +2483,21 @@ config RETHUNK
config CPU_UNRET_ENTRY config CPU_UNRET_ENTRY
bool "Enable UNRET on kernel entry" bool "Enable UNRET on kernel entry"
depends on CPU_SUP_AMD && RETHUNK depends on CPU_SUP_AMD && RETHUNK && X86_64
default y default y
help help
Compile the kernel with support for the retbleed=unret mitigation. Compile the kernel with support for the retbleed=unret mitigation.
config CPU_IBPB_ENTRY config CPU_IBPB_ENTRY
bool "Enable IBPB on kernel entry" bool "Enable IBPB on kernel entry"
depends on CPU_SUP_AMD depends on CPU_SUP_AMD && X86_64
default y default y
help help
Compile the kernel with support for the retbleed=ibpb mitigation. Compile the kernel with support for the retbleed=ibpb mitigation.
config CPU_IBRS_ENTRY config CPU_IBRS_ENTRY
bool "Enable IBRS on kernel entry" bool "Enable IBRS on kernel entry"
depends on CPU_SUP_INTEL depends on CPU_SUP_INTEL && X86_64
default y default y
help help
Compile the kernel with support for the spectre_v2=ibrs mitigation. Compile the kernel with support for the spectre_v2=ibrs mitigation.

View File

@ -27,6 +27,7 @@ RETHUNK_CFLAGS := -mfunction-return=thunk-extern
RETPOLINE_CFLAGS += $(RETHUNK_CFLAGS) RETPOLINE_CFLAGS += $(RETHUNK_CFLAGS)
endif endif
export RETHUNK_CFLAGS
export RETPOLINE_CFLAGS export RETPOLINE_CFLAGS
export RETPOLINE_VDSO_CFLAGS export RETPOLINE_VDSO_CFLAGS

View File

@ -278,9 +278,9 @@ enum {
}; };
/* /*
* For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in * For format LBR_FORMAT_EIP_FLAGS2, bits 61:62 in MSR_LAST_BRANCH_FROM_x
* MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when * are the TSX flags when TSX is supported, but when TSX is not supported
* TSX is not supported they have no consistent behavior: * they have no consistent behavior:
* *
* - For wrmsr(), bits 61:62 are considered part of the sign extension. * - For wrmsr(), bits 61:62 are considered part of the sign extension.
* - For HW updates (branch captures) bits 61:62 are always OFF and are not * - For HW updates (branch captures) bits 61:62 are always OFF and are not
@ -288,7 +288,7 @@ enum {
* *
* Therefore, if: * Therefore, if:
* *
* 1) LBR has TSX format * 1) LBR format LBR_FORMAT_EIP_FLAGS2
* 2) CPU has no TSX support enabled * 2) CPU has no TSX support enabled
* *
* ... then any value passed to wrmsr() must be sign extended to 63 bits and any * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
@ -300,7 +300,7 @@ static inline bool lbr_from_signext_quirk_needed(void)
bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) || bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
boot_cpu_has(X86_FEATURE_RTM); boot_cpu_has(X86_FEATURE_RTM);
return !tsx_support && x86_pmu.lbr_has_tsx; return !tsx_support;
} }
static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key); static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
@ -1609,9 +1609,6 @@ void intel_pmu_lbr_init_hsw(void)
x86_pmu.lbr_sel_map = hsw_lbr_sel_map; x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0); x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
if (lbr_from_signext_quirk_needed())
static_branch_enable(&lbr_from_quirk_key);
} }
/* skylake */ /* skylake */
@ -1702,7 +1699,11 @@ void intel_pmu_lbr_init(void)
switch (x86_pmu.intel_cap.lbr_format) { switch (x86_pmu.intel_cap.lbr_format) {
case LBR_FORMAT_EIP_FLAGS2: case LBR_FORMAT_EIP_FLAGS2:
x86_pmu.lbr_has_tsx = 1; x86_pmu.lbr_has_tsx = 1;
fallthrough; x86_pmu.lbr_from_flags = 1;
if (lbr_from_signext_quirk_needed())
static_branch_enable(&lbr_from_quirk_key);
break;
case LBR_FORMAT_EIP_FLAGS: case LBR_FORMAT_EIP_FLAGS:
x86_pmu.lbr_from_flags = 1; x86_pmu.lbr_from_flags = 1;
break; break;

View File

@ -302,6 +302,7 @@
#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */ #define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */ #define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */
#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */ #define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */
#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */

View File

@ -297,6 +297,8 @@ do { \
alternative_msr_write(MSR_IA32_SPEC_CTRL, \ alternative_msr_write(MSR_IA32_SPEC_CTRL, \
spec_ctrl_current() | SPEC_CTRL_IBRS, \ spec_ctrl_current() | SPEC_CTRL_IBRS, \
X86_FEATURE_USE_IBRS_FW); \ X86_FEATURE_USE_IBRS_FW); \
alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, \
X86_FEATURE_USE_IBPB_FW); \
} while (0) } while (0)
#define firmware_restrict_branch_speculation_end() \ #define firmware_restrict_branch_speculation_end() \

View File

@ -555,7 +555,9 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end)
dest = addr + insn.length + insn.immediate.value; dest = addr + insn.length + insn.immediate.value;
if (__static_call_fixup(addr, op, dest) || if (__static_call_fixup(addr, op, dest) ||
WARN_ON_ONCE(dest != &__x86_return_thunk)) WARN_ONCE(dest != &__x86_return_thunk,
"missing return thunk: %pS-%pS: %*ph",
addr, dest, 5, addr))
continue; continue;
DPRINTK("return thunk at: %pS (%px) len: %d to: %pS", DPRINTK("return thunk at: %pS (%px) len: %d to: %pS",

View File

@ -975,6 +975,7 @@ static inline const char *spectre_v2_module_string(void) { return ""; }
#define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n" #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
#define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
#define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n" #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
#define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
#ifdef CONFIG_BPF_SYSCALL #ifdef CONFIG_BPF_SYSCALL
void unpriv_ebpf_notify(int new_state) void unpriv_ebpf_notify(int new_state)
@ -1415,6 +1416,8 @@ static void __init spectre_v2_select_mitigation(void)
case SPECTRE_V2_IBRS: case SPECTRE_V2_IBRS:
setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS); setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
break; break;
case SPECTRE_V2_LFENCE: case SPECTRE_V2_LFENCE:
@ -1516,7 +1519,16 @@ static void __init spectre_v2_select_mitigation(void)
* the CPU supports Enhanced IBRS, kernel might un-intentionally not * the CPU supports Enhanced IBRS, kernel might un-intentionally not
* enable IBRS around firmware calls. * enable IBRS around firmware calls.
*/ */
if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) { if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
(boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
if (retbleed_cmd != RETBLEED_CMD_IBPB) {
setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
pr_info("Enabling Speculation Barrier for firmware calls\n");
}
} else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
pr_info("Enabling Restricted Speculation for firmware calls\n"); pr_info("Enabling Restricted Speculation for firmware calls\n");
} }

View File

@ -6029,6 +6029,11 @@ split_irqchip_unlock:
r = 0; r = 0;
break; break;
case KVM_CAP_X86_USER_SPACE_MSR: case KVM_CAP_X86_USER_SPACE_MSR:
r = -EINVAL;
if (cap->args[0] & ~(KVM_MSR_EXIT_REASON_INVAL |
KVM_MSR_EXIT_REASON_UNKNOWN |
KVM_MSR_EXIT_REASON_FILTER))
break;
kvm->arch.user_space_msr_mask = cap->args[0]; kvm->arch.user_space_msr_mask = cap->args[0];
r = 0; r = 0;
break; break;
@ -6183,6 +6188,9 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
if (copy_from_user(&filter, user_msr_filter, sizeof(filter))) if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
return -EFAULT; return -EFAULT;
if (filter.flags & ~KVM_MSR_FILTER_DEFAULT_DENY)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) for (i = 0; i < ARRAY_SIZE(filter.ranges); i++)
empty &= !filter.ranges[i].nmsrs; empty &= !filter.ranges[i].nmsrs;

View File

@ -43,6 +43,7 @@ config SYSTEM_TRUSTED_KEYRING
bool "Provide system-wide ring of trusted keys" bool "Provide system-wide ring of trusted keys"
depends on KEYS depends on KEYS
depends on ASYMMETRIC_KEY_TYPE depends on ASYMMETRIC_KEY_TYPE
depends on X509_CERTIFICATE_PARSER
help help
Provide a system keyring to which trusted keys can be added. Keys in Provide a system keyring to which trusted keys can be added. Keys in
the keyring are considered to be trusted. Keys may be added at will the keyring are considered to be trusted. Keys may be added at will

View File

@ -782,7 +782,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
if (!osc_cpc_flexible_adr_space_confirmed) { if (!osc_cpc_flexible_adr_space_confirmed) {
pr_debug("Flexible address space capability not supported\n"); pr_debug("Flexible address space capability not supported\n");
goto out_free; if (!cpc_supported_by_cpu())
goto out_free;
} }
addr = ioremap(gas_t->address, gas_t->bit_width/8); addr = ioremap(gas_t->address, gas_t->bit_width/8);
@ -809,7 +810,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
} }
if (!osc_cpc_flexible_adr_space_confirmed) { if (!osc_cpc_flexible_adr_space_confirmed) {
pr_debug("Flexible address space capability not supported\n"); pr_debug("Flexible address space capability not supported\n");
goto out_free; if (!cpc_supported_by_cpu())
goto out_free;
} }
} else { } else {
if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) { if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {

View File

@ -213,7 +213,7 @@ static int lan966x_gate_clk_register(struct device *dev,
hw_data->hws[i] = hw_data->hws[i] =
devm_clk_hw_register_gate(dev, clk_gate_desc[idx].name, devm_clk_hw_register_gate(dev, clk_gate_desc[idx].name,
"lan966x", 0, base, "lan966x", 0, gate_base,
clk_gate_desc[idx].bit_idx, clk_gate_desc[idx].bit_idx,
0, &clk_gate_lock); 0, &clk_gate_lock);

View File

@ -351,6 +351,9 @@ static const struct regmap_config pca953x_i2c_regmap = {
.reg_bits = 8, .reg_bits = 8,
.val_bits = 8, .val_bits = 8,
.use_single_read = true,
.use_single_write = true,
.readable_reg = pca953x_readable_register, .readable_reg = pca953x_readable_register,
.writeable_reg = pca953x_writeable_register, .writeable_reg = pca953x_writeable_register,
.volatile_reg = pca953x_volatile_register, .volatile_reg = pca953x_volatile_register,
@ -906,15 +909,18 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert) static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert)
{ {
DECLARE_BITMAP(val, MAX_LINE); DECLARE_BITMAP(val, MAX_LINE);
u8 regaddr;
int ret; int ret;
ret = regcache_sync_region(chip->regmap, chip->regs->output, regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
chip->regs->output + NBANK(chip)); ret = regcache_sync_region(chip->regmap, regaddr,
regaddr + NBANK(chip) - 1);
if (ret) if (ret)
goto out; goto out;
ret = regcache_sync_region(chip->regmap, chip->regs->direction, regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
chip->regs->direction + NBANK(chip)); ret = regcache_sync_region(chip->regmap, regaddr,
regaddr + NBANK(chip) - 1);
if (ret) if (ret)
goto out; goto out;
@ -1127,14 +1133,14 @@ static int pca953x_regcache_sync(struct device *dev)
* sync these registers first and only then sync the rest. * sync these registers first and only then sync the rest.
*/ */
regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0); regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip)); ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1);
if (ret) { if (ret) {
dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret); dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret);
return ret; return ret;
} }
regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0); regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip)); ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1);
if (ret) { if (ret) {
dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret); dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret);
return ret; return ret;
@ -1144,7 +1150,7 @@ static int pca953x_regcache_sync(struct device *dev)
if (chip->driver_data & PCA_PCAL) { if (chip->driver_data & PCA_PCAL) {
regaddr = pca953x_recalc_addr(chip, PCAL953X_IN_LATCH, 0); regaddr = pca953x_recalc_addr(chip, PCAL953X_IN_LATCH, 0);
ret = regcache_sync_region(chip->regmap, regaddr, ret = regcache_sync_region(chip->regmap, regaddr,
regaddr + NBANK(chip)); regaddr + NBANK(chip) - 1);
if (ret) { if (ret) {
dev_err(dev, "Failed to sync INT latch registers: %d\n", dev_err(dev, "Failed to sync INT latch registers: %d\n",
ret); ret);
@ -1153,7 +1159,7 @@ static int pca953x_regcache_sync(struct device *dev)
regaddr = pca953x_recalc_addr(chip, PCAL953X_INT_MASK, 0); regaddr = pca953x_recalc_addr(chip, PCAL953X_INT_MASK, 0);
ret = regcache_sync_region(chip->regmap, regaddr, ret = regcache_sync_region(chip->regmap, regaddr,
regaddr + NBANK(chip)); regaddr + NBANK(chip) - 1);
if (ret) { if (ret) {
dev_err(dev, "Failed to sync INT mask registers: %d\n", dev_err(dev, "Failed to sync INT mask registers: %d\n",
ret); ret);

View File

@ -99,7 +99,7 @@ static inline void xgpio_set_value32(unsigned long *map, int bit, u32 v)
const unsigned long offset = (bit % BITS_PER_LONG) & BIT(5); const unsigned long offset = (bit % BITS_PER_LONG) & BIT(5);
map[index] &= ~(0xFFFFFFFFul << offset); map[index] &= ~(0xFFFFFFFFul << offset);
map[index] |= v << offset; map[index] |= (unsigned long)v << offset;
} }
static inline int xgpio_regoffset(struct xgpio_instance *chip, int ch) static inline int xgpio_regoffset(struct xgpio_instance *chip, int ch)

View File

@ -1364,16 +1364,10 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
struct amdgpu_vm *vm) struct amdgpu_vm *vm)
{ {
struct amdkfd_process_info *process_info = vm->process_info; struct amdkfd_process_info *process_info = vm->process_info;
struct amdgpu_bo *pd = vm->root.bo;
if (!process_info) if (!process_info)
return; return;
/* Release eviction fence from PD */
amdgpu_bo_reserve(pd, false);
amdgpu_bo_fence(pd, NULL, false);
amdgpu_bo_unreserve(pd);
/* Update process info */ /* Update process info */
mutex_lock(&process_info->lock); mutex_lock(&process_info->lock);
process_info->n_vms--; process_info->n_vms--;

View File

@ -40,7 +40,7 @@ static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu)
{ {
struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list, struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list,
rhead); rhead);
mutex_destroy(&list->bo_list_mutex);
kvfree(list); kvfree(list);
} }
@ -136,6 +136,7 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
trace_amdgpu_cs_bo_status(list->num_entries, total_size); trace_amdgpu_cs_bo_status(list->num_entries, total_size);
mutex_init(&list->bo_list_mutex);
*result = list; *result = list;
return 0; return 0;

View File

@ -47,6 +47,10 @@ struct amdgpu_bo_list {
struct amdgpu_bo *oa_obj; struct amdgpu_bo *oa_obj;
unsigned first_userptr; unsigned first_userptr;
unsigned num_entries; unsigned num_entries;
/* Protect access during command submission.
*/
struct mutex bo_list_mutex;
}; };
int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id, int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,

View File

@ -519,6 +519,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
return r; return r;
} }
mutex_lock(&p->bo_list->bo_list_mutex);
/* One for TTM and one for the CS job */ /* One for TTM and one for the CS job */
amdgpu_bo_list_for_each_entry(e, p->bo_list) amdgpu_bo_list_for_each_entry(e, p->bo_list)
e->tv.num_shared = 2; e->tv.num_shared = 2;
@ -651,6 +653,7 @@ out_free_user_pages:
kvfree(e->user_pages); kvfree(e->user_pages);
e->user_pages = NULL; e->user_pages = NULL;
} }
mutex_unlock(&p->bo_list->bo_list_mutex);
} }
return r; return r;
} }
@ -690,9 +693,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
{ {
unsigned i; unsigned i;
if (error && backoff) if (error && backoff) {
ttm_eu_backoff_reservation(&parser->ticket, ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated); &parser->validated);
mutex_unlock(&parser->bo_list->bo_list_mutex);
}
for (i = 0; i < parser->num_post_deps; i++) { for (i = 0; i < parser->num_post_deps; i++) {
drm_syncobj_put(parser->post_deps[i].syncobj); drm_syncobj_put(parser->post_deps[i].syncobj);
@ -832,12 +837,16 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
continue; continue;
r = amdgpu_vm_bo_update(adev, bo_va, false); r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r) if (r) {
mutex_unlock(&p->bo_list->bo_list_mutex);
return r; return r;
}
r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update); r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
if (r) if (r) {
mutex_unlock(&p->bo_list->bo_list_mutex);
return r; return r;
}
} }
r = amdgpu_vm_handle_moved(adev, vm); r = amdgpu_vm_handle_moved(adev, vm);
@ -1278,6 +1287,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
mutex_unlock(&p->adev->notifier_lock); mutex_unlock(&p->adev->notifier_lock);
mutex_unlock(&p->bo_list->bo_list_mutex);
return 0; return 0;

View File

@ -1653,7 +1653,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work(); adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
#endif #endif
if (dc_enable_dmub_notifications(adev->dm.dc)) { if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
init_completion(&adev->dm.dmub_aux_transfer_done); init_completion(&adev->dm.dmub_aux_transfer_done);
adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
if (!adev->dm.dmub_notify) { if (!adev->dm.dmub_notify) {
@ -1689,6 +1689,13 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error; goto error;
} }
/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
* It is expected that DMUB will resend any pending notifications at this point, for
* example HPD from DPIA.
*/
if (dc_is_dmub_outbox_supported(adev->dm.dc))
dc_enable_dmub_outbox(adev->dm.dc);
/* create fake encoders for MST */ /* create fake encoders for MST */
dm_dp_create_fake_mst_encoders(adev); dm_dp_create_fake_mst_encoders(adev);
@ -2678,9 +2685,6 @@ static int dm_resume(void *handle)
*/ */
link_enc_cfg_copy(adev->dm.dc->current_state, dc_state); link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
if (dc_enable_dmub_notifications(adev->dm.dc))
amdgpu_dm_outbox_init(adev);
r = dm_dmub_hw_init(adev); r = dm_dmub_hw_init(adev);
if (r) if (r)
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
@ -2698,6 +2702,11 @@ static int dm_resume(void *handle)
} }
} }
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
amdgpu_dm_outbox_init(adev);
dc_enable_dmub_outbox(adev->dm.dc);
}
WARN_ON(!dc_commit_state(dm->dc, dc_state)); WARN_ON(!dc_commit_state(dm->dc, dc_state));
dm_gpureset_commit_state(dm->cached_dc_state, dm); dm_gpureset_commit_state(dm->cached_dc_state, dm);
@ -2719,13 +2728,15 @@ static int dm_resume(void *handle)
/* TODO: Remove dc_state->dccg, use dc->dccg directly. */ /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
dc_resource_state_construct(dm->dc, dm_state->context); dc_resource_state_construct(dm->dc, dm_state->context);
/* Re-enable outbox interrupts for DPIA. */
if (dc_enable_dmub_notifications(adev->dm.dc))
amdgpu_dm_outbox_init(adev);
/* Before powering on DC we need to re-initialize DMUB. */ /* Before powering on DC we need to re-initialize DMUB. */
dm_dmub_hw_resume(adev); dm_dmub_hw_resume(adev);
/* Re-enable outbox interrupts for DPIA. */
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
amdgpu_dm_outbox_init(adev);
dc_enable_dmub_outbox(adev->dm.dc);
}
/* power on hardware */ /* power on hardware */
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);

View File

@ -64,8 +64,13 @@ int drm_gem_ttm_vmap(struct drm_gem_object *gem,
struct iosys_map *map) struct iosys_map *map)
{ {
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
int ret;
return ttm_bo_vmap(bo, map); dma_resv_lock(gem->resv, NULL);
ret = ttm_bo_vmap(bo, map);
dma_resv_unlock(gem->resv);
return ret;
} }
EXPORT_SYMBOL(drm_gem_ttm_vmap); EXPORT_SYMBOL(drm_gem_ttm_vmap);
@ -82,7 +87,9 @@ void drm_gem_ttm_vunmap(struct drm_gem_object *gem,
{ {
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
dma_resv_lock(gem->resv, NULL);
ttm_bo_vunmap(bo, map); ttm_bo_vunmap(bo, map);
dma_resv_unlock(gem->resv);
} }
EXPORT_SYMBOL(drm_gem_ttm_vunmap); EXPORT_SYMBOL(drm_gem_ttm_vunmap);

View File

@ -273,10 +273,17 @@ struct intel_context {
u8 child_index; u8 child_index;
/** @guc: GuC specific members for parallel submission */ /** @guc: GuC specific members for parallel submission */
struct { struct {
/** @wqi_head: head pointer in work queue */ /** @wqi_head: cached head pointer in work queue */
u16 wqi_head; u16 wqi_head;
/** @wqi_tail: tail pointer in work queue */ /** @wqi_tail: cached tail pointer in work queue */
u16 wqi_tail; u16 wqi_tail;
/** @wq_head: pointer to the actual head in work queue */
u32 *wq_head;
/** @wq_tail: pointer to the actual head in work queue */
u32 *wq_tail;
/** @wq_status: pointer to the status in work queue */
u32 *wq_status;
/** /**
* @parent_page: page in context state (ce->state) used * @parent_page: page in context state (ce->state) used
* by parent for work queue, process descriptor * by parent for work queue, process descriptor

View File

@ -661,6 +661,16 @@ static inline void execlists_schedule_out(struct i915_request *rq)
i915_request_put(rq); i915_request_put(rq);
} }
static u32 map_i915_prio_to_lrc_desc_prio(int prio)
{
if (prio > I915_PRIORITY_NORMAL)
return GEN12_CTX_PRIORITY_HIGH;
else if (prio < I915_PRIORITY_NORMAL)
return GEN12_CTX_PRIORITY_LOW;
else
return GEN12_CTX_PRIORITY_NORMAL;
}
static u64 execlists_update_context(struct i915_request *rq) static u64 execlists_update_context(struct i915_request *rq)
{ {
struct intel_context *ce = rq->context; struct intel_context *ce = rq->context;
@ -669,7 +679,7 @@ static u64 execlists_update_context(struct i915_request *rq)
desc = ce->lrc.desc; desc = ce->lrc.desc;
if (rq->engine->flags & I915_ENGINE_HAS_EU_PRIORITY) if (rq->engine->flags & I915_ENGINE_HAS_EU_PRIORITY)
desc |= lrc_desc_priority(rq_prio(rq)); desc |= map_i915_prio_to_lrc_desc_prio(rq_prio(rq));
/* /*
* WaIdleLiteRestore:bdw,skl * WaIdleLiteRestore:bdw,skl

View File

@ -111,16 +111,6 @@ enum {
#define XEHP_SW_COUNTER_SHIFT 58 #define XEHP_SW_COUNTER_SHIFT 58
#define XEHP_SW_COUNTER_WIDTH 6 #define XEHP_SW_COUNTER_WIDTH 6
static inline u32 lrc_desc_priority(int prio)
{
if (prio > I915_PRIORITY_NORMAL)
return GEN12_CTX_PRIORITY_HIGH;
else if (prio < I915_PRIORITY_NORMAL)
return GEN12_CTX_PRIORITY_LOW;
else
return GEN12_CTX_PRIORITY_NORMAL;
}
static inline void lrc_runtime_start(struct intel_context *ce) static inline void lrc_runtime_start(struct intel_context *ce)
{ {
struct intel_context_stats *stats = &ce->stats; struct intel_context_stats *stats = &ce->stats;

View File

@ -122,6 +122,9 @@ enum intel_guc_action {
INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002, INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002,
INTEL_GUC_ACTION_SCHED_ENGINE_MODE_SET = 0x1003, INTEL_GUC_ACTION_SCHED_ENGINE_MODE_SET = 0x1003,
INTEL_GUC_ACTION_SCHED_ENGINE_MODE_DONE = 0x1004, INTEL_GUC_ACTION_SCHED_ENGINE_MODE_DONE = 0x1004,
INTEL_GUC_ACTION_V69_SET_CONTEXT_PRIORITY = 0x1005,
INTEL_GUC_ACTION_V69_SET_CONTEXT_EXECUTION_QUANTUM = 0x1006,
INTEL_GUC_ACTION_V69_SET_CONTEXT_PREEMPTION_TIMEOUT = 0x1007,
INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION = 0x1008, INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION = 0x1008,
INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION = 0x1009, INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION = 0x1009,
INTEL_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES = 0x100B, INTEL_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES = 0x100B,

View File

@ -170,6 +170,11 @@ struct intel_guc {
/** @ads_engine_usage_size: size of engine usage in the ADS */ /** @ads_engine_usage_size: size of engine usage in the ADS */
u32 ads_engine_usage_size; u32 ads_engine_usage_size;
/** @lrc_desc_pool_v69: object allocated to hold the GuC LRC descriptor pool */
struct i915_vma *lrc_desc_pool_v69;
/** @lrc_desc_pool_vaddr_v69: contents of the GuC LRC descriptor pool */
void *lrc_desc_pool_vaddr_v69;
/** /**
* @context_lookup: used to resolve intel_context from guc_id, if a * @context_lookup: used to resolve intel_context from guc_id, if a
* context is present in this structure it is registered with the GuC * context is present in this structure it is registered with the GuC

View File

@ -203,6 +203,20 @@ struct guc_wq_item {
u32 fence_id; u32 fence_id;
} __packed; } __packed;
struct guc_process_desc_v69 {
u32 stage_id;
u64 db_base_addr;
u32 head;
u32 tail;
u32 error_offset;
u64 wq_base_addr;
u32 wq_size_bytes;
u32 wq_status;
u32 engine_presence;
u32 priority;
u32 reserved[36];
} __packed;
struct guc_sched_wq_desc { struct guc_sched_wq_desc {
u32 head; u32 head;
u32 tail; u32 tail;
@ -227,6 +241,37 @@ struct guc_ctxt_registration_info {
}; };
#define CONTEXT_REGISTRATION_FLAG_KMD BIT(0) #define CONTEXT_REGISTRATION_FLAG_KMD BIT(0)
/* Preempt to idle on quantum expiry */
#define CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69 BIT(0)
/*
* GuC Context registration descriptor.
* FIXME: This is only required to exist during context registration.
* The current 1:1 between guc_lrc_desc and LRCs for the lifetime of the LRC
* is not required.
*/
struct guc_lrc_desc_v69 {
u32 hw_context_desc;
u32 slpm_perf_mode_hint; /* SPLC v1 only */
u32 slpm_freq_hint;
u32 engine_submit_mask; /* In logical space */
u8 engine_class;
u8 reserved0[3];
u32 priority;
u32 process_desc;
u32 wq_addr;
u32 wq_size;
u32 context_flags; /* CONTEXT_REGISTRATION_* */
/* Time for one workload to execute. (in micro seconds) */
u32 execution_quantum;
/* Time to wait for a preemption request to complete before issuing a
* reset. (in micro seconds).
*/
u32 preemption_timeout;
u32 policy_flags; /* CONTEXT_POLICY_* */
u32 reserved1[19];
} __packed;
/* 32-bit KLV structure as used by policy updates and others */ /* 32-bit KLV structure as used by policy updates and others */
struct guc_klv_generic_dw_t { struct guc_klv_generic_dw_t {
u32 kl; u32 kl;

View File

@ -414,12 +414,15 @@ struct sync_semaphore {
}; };
struct parent_scratch { struct parent_scratch {
struct guc_sched_wq_desc wq_desc; union guc_descs {
struct guc_sched_wq_desc wq_desc;
struct guc_process_desc_v69 pdesc;
} descs;
struct sync_semaphore go; struct sync_semaphore go;
struct sync_semaphore join[MAX_ENGINE_INSTANCE + 1]; struct sync_semaphore join[MAX_ENGINE_INSTANCE + 1];
u8 unused[WQ_OFFSET - sizeof(struct guc_sched_wq_desc) - u8 unused[WQ_OFFSET - sizeof(union guc_descs) -
sizeof(struct sync_semaphore) * (MAX_ENGINE_INSTANCE + 2)]; sizeof(struct sync_semaphore) * (MAX_ENGINE_INSTANCE + 2)];
u32 wq[WQ_SIZE / sizeof(u32)]; u32 wq[WQ_SIZE / sizeof(u32)];
@ -456,17 +459,23 @@ __get_parent_scratch(struct intel_context *ce)
LRC_STATE_OFFSET) / sizeof(u32))); LRC_STATE_OFFSET) / sizeof(u32)));
} }
static struct guc_sched_wq_desc * static struct guc_process_desc_v69 *
__get_wq_desc(struct intel_context *ce) __get_process_desc_v69(struct intel_context *ce)
{ {
struct parent_scratch *ps = __get_parent_scratch(ce); struct parent_scratch *ps = __get_parent_scratch(ce);
return &ps->wq_desc; return &ps->descs.pdesc;
} }
static u32 *get_wq_pointer(struct guc_sched_wq_desc *wq_desc, static struct guc_sched_wq_desc *
struct intel_context *ce, __get_wq_desc_v70(struct intel_context *ce)
u32 wqi_size) {
struct parent_scratch *ps = __get_parent_scratch(ce);
return &ps->descs.wq_desc;
}
static u32 *get_wq_pointer(struct intel_context *ce, u32 wqi_size)
{ {
/* /*
* Check for space in work queue. Caching a value of head pointer in * Check for space in work queue. Caching a value of head pointer in
@ -476,7 +485,7 @@ static u32 *get_wq_pointer(struct guc_sched_wq_desc *wq_desc,
#define AVAILABLE_SPACE \ #define AVAILABLE_SPACE \
CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE) CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE)
if (wqi_size > AVAILABLE_SPACE) { if (wqi_size > AVAILABLE_SPACE) {
ce->parallel.guc.wqi_head = READ_ONCE(wq_desc->head); ce->parallel.guc.wqi_head = READ_ONCE(*ce->parallel.guc.wq_head);
if (wqi_size > AVAILABLE_SPACE) if (wqi_size > AVAILABLE_SPACE)
return NULL; return NULL;
@ -495,11 +504,55 @@ static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id)
return ce; return ce;
} }
static struct guc_lrc_desc_v69 *__get_lrc_desc_v69(struct intel_guc *guc, u32 index)
{
struct guc_lrc_desc_v69 *base = guc->lrc_desc_pool_vaddr_v69;
if (!base)
return NULL;
GEM_BUG_ON(index >= GUC_MAX_CONTEXT_ID);
return &base[index];
}
static int guc_lrc_desc_pool_create_v69(struct intel_guc *guc)
{
u32 size;
int ret;
size = PAGE_ALIGN(sizeof(struct guc_lrc_desc_v69) *
GUC_MAX_CONTEXT_ID);
ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool_v69,
(void **)&guc->lrc_desc_pool_vaddr_v69);
if (ret)
return ret;
return 0;
}
static void guc_lrc_desc_pool_destroy_v69(struct intel_guc *guc)
{
if (!guc->lrc_desc_pool_vaddr_v69)
return;
guc->lrc_desc_pool_vaddr_v69 = NULL;
i915_vma_unpin_and_release(&guc->lrc_desc_pool_v69, I915_VMA_RELEASE_MAP);
}
static inline bool guc_submission_initialized(struct intel_guc *guc) static inline bool guc_submission_initialized(struct intel_guc *guc)
{ {
return guc->submission_initialized; return guc->submission_initialized;
} }
static inline void _reset_lrc_desc_v69(struct intel_guc *guc, u32 id)
{
struct guc_lrc_desc_v69 *desc = __get_lrc_desc_v69(guc, id);
if (desc)
memset(desc, 0, sizeof(*desc));
}
static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id) static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id)
{ {
return __get_context(guc, id); return __get_context(guc, id);
@ -526,6 +579,8 @@ static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id)
if (unlikely(!guc_submission_initialized(guc))) if (unlikely(!guc_submission_initialized(guc)))
return; return;
_reset_lrc_desc_v69(guc, id);
/* /*
* xarray API doesn't have xa_erase_irqsave wrapper, so calling * xarray API doesn't have xa_erase_irqsave wrapper, so calling
* the lower level functions directly. * the lower level functions directly.
@ -611,7 +666,7 @@ int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout)
true, timeout); true, timeout);
} }
static int guc_context_policy_init(struct intel_context *ce, bool loop); static int guc_context_policy_init_v70(struct intel_context *ce, bool loop);
static int try_context_registration(struct intel_context *ce, bool loop); static int try_context_registration(struct intel_context *ce, bool loop);
static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq) static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
@ -639,7 +694,7 @@ static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
GEM_BUG_ON(context_guc_id_invalid(ce)); GEM_BUG_ON(context_guc_id_invalid(ce));
if (context_policy_required(ce)) { if (context_policy_required(ce)) {
err = guc_context_policy_init(ce, false); err = guc_context_policy_init_v70(ce, false);
if (err) if (err)
return err; return err;
} }
@ -737,9 +792,7 @@ static u32 wq_space_until_wrap(struct intel_context *ce)
return (WQ_SIZE - ce->parallel.guc.wqi_tail); return (WQ_SIZE - ce->parallel.guc.wqi_tail);
} }
static void write_wqi(struct guc_sched_wq_desc *wq_desc, static void write_wqi(struct intel_context *ce, u32 wqi_size)
struct intel_context *ce,
u32 wqi_size)
{ {
BUILD_BUG_ON(!is_power_of_2(WQ_SIZE)); BUILD_BUG_ON(!is_power_of_2(WQ_SIZE));
@ -750,13 +803,12 @@ static void write_wqi(struct guc_sched_wq_desc *wq_desc,
ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) & ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) &
(WQ_SIZE - 1); (WQ_SIZE - 1);
WRITE_ONCE(wq_desc->tail, ce->parallel.guc.wqi_tail); WRITE_ONCE(*ce->parallel.guc.wq_tail, ce->parallel.guc.wqi_tail);
} }
static int guc_wq_noop_append(struct intel_context *ce) static int guc_wq_noop_append(struct intel_context *ce)
{ {
struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce); u32 *wqi = get_wq_pointer(ce, wq_space_until_wrap(ce));
u32 *wqi = get_wq_pointer(wq_desc, ce, wq_space_until_wrap(ce));
u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1; u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1;
if (!wqi) if (!wqi)
@ -775,7 +827,6 @@ static int __guc_wq_item_append(struct i915_request *rq)
{ {
struct intel_context *ce = request_to_scheduling_context(rq); struct intel_context *ce = request_to_scheduling_context(rq);
struct intel_context *child; struct intel_context *child;
struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
unsigned int wqi_size = (ce->parallel.number_children + 4) * unsigned int wqi_size = (ce->parallel.number_children + 4) *
sizeof(u32); sizeof(u32);
u32 *wqi; u32 *wqi;
@ -795,7 +846,7 @@ static int __guc_wq_item_append(struct i915_request *rq)
return ret; return ret;
} }
wqi = get_wq_pointer(wq_desc, ce, wqi_size); wqi = get_wq_pointer(ce, wqi_size);
if (!wqi) if (!wqi)
return -EBUSY; return -EBUSY;
@ -810,7 +861,7 @@ static int __guc_wq_item_append(struct i915_request *rq)
for_each_child(ce, child) for_each_child(ce, child)
*wqi++ = child->ring->tail / sizeof(u64); *wqi++ = child->ring->tail / sizeof(u64);
write_wqi(wq_desc, ce, wqi_size); write_wqi(ce, wqi_size);
return 0; return 0;
} }
@ -1868,20 +1919,34 @@ static void reset_fail_worker_func(struct work_struct *w);
int intel_guc_submission_init(struct intel_guc *guc) int intel_guc_submission_init(struct intel_guc *guc)
{ {
struct intel_gt *gt = guc_to_gt(guc); struct intel_gt *gt = guc_to_gt(guc);
int ret;
if (guc->submission_initialized) if (guc->submission_initialized)
return 0; return 0;
if (guc->fw.major_ver_found < 70) {
ret = guc_lrc_desc_pool_create_v69(guc);
if (ret)
return ret;
}
guc->submission_state.guc_ids_bitmap = guc->submission_state.guc_ids_bitmap =
bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL); bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
if (!guc->submission_state.guc_ids_bitmap) if (!guc->submission_state.guc_ids_bitmap) {
return -ENOMEM; ret = -ENOMEM;
goto destroy_pool;
}
guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ; guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
guc->timestamp.shift = gpm_timestamp_shift(gt); guc->timestamp.shift = gpm_timestamp_shift(gt);
guc->submission_initialized = true; guc->submission_initialized = true;
return 0; return 0;
destroy_pool:
guc_lrc_desc_pool_destroy_v69(guc);
return ret;
} }
void intel_guc_submission_fini(struct intel_guc *guc) void intel_guc_submission_fini(struct intel_guc *guc)
@ -1890,6 +1955,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
return; return;
guc_flush_destroyed_contexts(guc); guc_flush_destroyed_contexts(guc);
guc_lrc_desc_pool_destroy_v69(guc);
i915_sched_engine_put(guc->sched_engine); i915_sched_engine_put(guc->sched_engine);
bitmap_free(guc->submission_state.guc_ids_bitmap); bitmap_free(guc->submission_state.guc_ids_bitmap);
guc->submission_initialized = false; guc->submission_initialized = false;
@ -2147,10 +2213,34 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
spin_unlock_irqrestore(&guc->submission_state.lock, flags); spin_unlock_irqrestore(&guc->submission_state.lock, flags);
} }
static int __guc_action_register_multi_lrc(struct intel_guc *guc, static int __guc_action_register_multi_lrc_v69(struct intel_guc *guc,
struct intel_context *ce, struct intel_context *ce,
struct guc_ctxt_registration_info *info, u32 guc_id,
bool loop) u32 offset,
bool loop)
{
struct intel_context *child;
u32 action[4 + MAX_ENGINE_INSTANCE];
int len = 0;
GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
action[len++] = guc_id;
action[len++] = ce->parallel.number_children + 1;
action[len++] = offset;
for_each_child(ce, child) {
offset += sizeof(struct guc_lrc_desc_v69);
action[len++] = offset;
}
return guc_submission_send_busy_loop(guc, action, len, 0, loop);
}
static int __guc_action_register_multi_lrc_v70(struct intel_guc *guc,
struct intel_context *ce,
struct guc_ctxt_registration_info *info,
bool loop)
{ {
struct intel_context *child; struct intel_context *child;
u32 action[13 + (MAX_ENGINE_INSTANCE * 2)]; u32 action[13 + (MAX_ENGINE_INSTANCE * 2)];
@ -2190,9 +2280,24 @@ static int __guc_action_register_multi_lrc(struct intel_guc *guc,
return guc_submission_send_busy_loop(guc, action, len, 0, loop); return guc_submission_send_busy_loop(guc, action, len, 0, loop);
} }
static int __guc_action_register_context(struct intel_guc *guc, static int __guc_action_register_context_v69(struct intel_guc *guc,
struct guc_ctxt_registration_info *info, u32 guc_id,
bool loop) u32 offset,
bool loop)
{
u32 action[] = {
INTEL_GUC_ACTION_REGISTER_CONTEXT,
guc_id,
offset,
};
return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
0, loop);
}
static int __guc_action_register_context_v70(struct intel_guc *guc,
struct guc_ctxt_registration_info *info,
bool loop)
{ {
u32 action[] = { u32 action[] = {
INTEL_GUC_ACTION_REGISTER_CONTEXT, INTEL_GUC_ACTION_REGISTER_CONTEXT,
@ -2213,24 +2318,52 @@ static int __guc_action_register_context(struct intel_guc *guc,
0, loop); 0, loop);
} }
static void prepare_context_registration_info(struct intel_context *ce, static void prepare_context_registration_info_v69(struct intel_context *ce);
struct guc_ctxt_registration_info *info); static void prepare_context_registration_info_v70(struct intel_context *ce,
struct guc_ctxt_registration_info *info);
static int
register_context_v69(struct intel_guc *guc, struct intel_context *ce, bool loop)
{
u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool_v69) +
ce->guc_id.id * sizeof(struct guc_lrc_desc_v69);
prepare_context_registration_info_v69(ce);
if (intel_context_is_parent(ce))
return __guc_action_register_multi_lrc_v69(guc, ce, ce->guc_id.id,
offset, loop);
else
return __guc_action_register_context_v69(guc, ce->guc_id.id,
offset, loop);
}
static int
register_context_v70(struct intel_guc *guc, struct intel_context *ce, bool loop)
{
struct guc_ctxt_registration_info info;
prepare_context_registration_info_v70(ce, &info);
if (intel_context_is_parent(ce))
return __guc_action_register_multi_lrc_v70(guc, ce, &info, loop);
else
return __guc_action_register_context_v70(guc, &info, loop);
}
static int register_context(struct intel_context *ce, bool loop) static int register_context(struct intel_context *ce, bool loop)
{ {
struct guc_ctxt_registration_info info;
struct intel_guc *guc = ce_to_guc(ce); struct intel_guc *guc = ce_to_guc(ce);
int ret; int ret;
GEM_BUG_ON(intel_context_is_child(ce)); GEM_BUG_ON(intel_context_is_child(ce));
trace_intel_context_register(ce); trace_intel_context_register(ce);
prepare_context_registration_info(ce, &info); if (guc->fw.major_ver_found >= 70)
ret = register_context_v70(guc, ce, loop);
if (intel_context_is_parent(ce))
ret = __guc_action_register_multi_lrc(guc, ce, &info, loop);
else else
ret = __guc_action_register_context(guc, &info, loop); ret = register_context_v69(guc, ce, loop);
if (likely(!ret)) { if (likely(!ret)) {
unsigned long flags; unsigned long flags;
@ -2238,7 +2371,8 @@ static int register_context(struct intel_context *ce, bool loop)
set_context_registered(ce); set_context_registered(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags); spin_unlock_irqrestore(&ce->guc_state.lock, flags);
guc_context_policy_init(ce, loop); if (guc->fw.major_ver_found >= 70)
guc_context_policy_init_v70(ce, loop);
} }
return ret; return ret;
@ -2335,7 +2469,7 @@ static int __guc_context_set_context_policies(struct intel_guc *guc,
0, loop); 0, loop);
} }
static int guc_context_policy_init(struct intel_context *ce, bool loop) static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
{ {
struct intel_engine_cs *engine = ce->engine; struct intel_engine_cs *engine = ce->engine;
struct intel_guc *guc = &engine->gt->uc.guc; struct intel_guc *guc = &engine->gt->uc.guc;
@ -2394,8 +2528,108 @@ static int guc_context_policy_init(struct intel_context *ce, bool loop)
return ret; return ret;
} }
static void prepare_context_registration_info(struct intel_context *ce, static void guc_context_policy_init_v69(struct intel_engine_cs *engine,
struct guc_ctxt_registration_info *info) struct guc_lrc_desc_v69 *desc)
{
desc->policy_flags = 0;
if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69;
/* NB: For both of these, zero means disabled. */
desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
}
static u32 map_guc_prio_to_lrc_desc_prio(u8 prio)
{
/*
* this matches the mapping we do in map_i915_prio_to_guc_prio()
* (e.g. prio < I915_PRIORITY_NORMAL maps to GUC_CLIENT_PRIORITY_NORMAL)
*/
switch (prio) {
default:
MISSING_CASE(prio);
fallthrough;
case GUC_CLIENT_PRIORITY_KMD_NORMAL:
return GEN12_CTX_PRIORITY_NORMAL;
case GUC_CLIENT_PRIORITY_NORMAL:
return GEN12_CTX_PRIORITY_LOW;
case GUC_CLIENT_PRIORITY_HIGH:
case GUC_CLIENT_PRIORITY_KMD_HIGH:
return GEN12_CTX_PRIORITY_HIGH;
}
}
static void prepare_context_registration_info_v69(struct intel_context *ce)
{
struct intel_engine_cs *engine = ce->engine;
struct intel_guc *guc = &engine->gt->uc.guc;
u32 ctx_id = ce->guc_id.id;
struct guc_lrc_desc_v69 *desc;
struct intel_context *child;
GEM_BUG_ON(!engine->mask);
/*
* Ensure LRC + CT vmas are is same region as write barrier is done
* based on CT vma region.
*/
GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
i915_gem_object_is_lmem(ce->ring->vma->obj));
desc = __get_lrc_desc_v69(guc, ctx_id);
desc->engine_class = engine_class_to_guc_class(engine->class);
desc->engine_submit_mask = engine->logical_mask;
desc->hw_context_desc = ce->lrc.lrca;
desc->priority = ce->guc_state.prio;
desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
guc_context_policy_init_v69(engine, desc);
/*
* If context is a parent, we need to register a process descriptor
* describing a work queue and register all child contexts.
*/
if (intel_context_is_parent(ce)) {
struct guc_process_desc_v69 *pdesc;
ce->parallel.guc.wqi_tail = 0;
ce->parallel.guc.wqi_head = 0;
desc->process_desc = i915_ggtt_offset(ce->state) +
__get_parent_scratch_offset(ce);
desc->wq_addr = i915_ggtt_offset(ce->state) +
__get_wq_offset(ce);
desc->wq_size = WQ_SIZE;
pdesc = __get_process_desc_v69(ce);
memset(pdesc, 0, sizeof(*(pdesc)));
pdesc->stage_id = ce->guc_id.id;
pdesc->wq_base_addr = desc->wq_addr;
pdesc->wq_size_bytes = desc->wq_size;
pdesc->wq_status = WQ_STATUS_ACTIVE;
ce->parallel.guc.wq_head = &pdesc->head;
ce->parallel.guc.wq_tail = &pdesc->tail;
ce->parallel.guc.wq_status = &pdesc->wq_status;
for_each_child(ce, child) {
desc = __get_lrc_desc_v69(guc, child->guc_id.id);
desc->engine_class =
engine_class_to_guc_class(engine->class);
desc->hw_context_desc = child->lrc.lrca;
desc->priority = ce->guc_state.prio;
desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
guc_context_policy_init_v69(engine, desc);
}
clear_children_join_go_memory(ce);
}
}
static void prepare_context_registration_info_v70(struct intel_context *ce,
struct guc_ctxt_registration_info *info)
{ {
struct intel_engine_cs *engine = ce->engine; struct intel_engine_cs *engine = ce->engine;
struct intel_guc *guc = &engine->gt->uc.guc; struct intel_guc *guc = &engine->gt->uc.guc;
@ -2420,6 +2654,8 @@ static void prepare_context_registration_info(struct intel_context *ce,
*/ */
info->hwlrca_lo = lower_32_bits(ce->lrc.lrca); info->hwlrca_lo = lower_32_bits(ce->lrc.lrca);
info->hwlrca_hi = upper_32_bits(ce->lrc.lrca); info->hwlrca_hi = upper_32_bits(ce->lrc.lrca);
if (engine->flags & I915_ENGINE_HAS_EU_PRIORITY)
info->hwlrca_lo |= map_guc_prio_to_lrc_desc_prio(ce->guc_state.prio);
info->flags = CONTEXT_REGISTRATION_FLAG_KMD; info->flags = CONTEXT_REGISTRATION_FLAG_KMD;
/* /*
@ -2443,10 +2679,14 @@ static void prepare_context_registration_info(struct intel_context *ce,
info->wq_base_hi = upper_32_bits(wq_base_offset); info->wq_base_hi = upper_32_bits(wq_base_offset);
info->wq_size = WQ_SIZE; info->wq_size = WQ_SIZE;
wq_desc = __get_wq_desc(ce); wq_desc = __get_wq_desc_v70(ce);
memset(wq_desc, 0, sizeof(*wq_desc)); memset(wq_desc, 0, sizeof(*wq_desc));
wq_desc->wq_status = WQ_STATUS_ACTIVE; wq_desc->wq_status = WQ_STATUS_ACTIVE;
ce->parallel.guc.wq_head = &wq_desc->head;
ce->parallel.guc.wq_tail = &wq_desc->tail;
ce->parallel.guc.wq_status = &wq_desc->wq_status;
clear_children_join_go_memory(ce); clear_children_join_go_memory(ce);
} }
} }
@ -2761,11 +3001,21 @@ static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
u16 guc_id, u16 guc_id,
u32 preemption_timeout) u32 preemption_timeout)
{ {
struct context_policy policy; if (guc->fw.major_ver_found >= 70) {
struct context_policy policy;
__guc_context_policy_start_klv(&policy, guc_id); __guc_context_policy_start_klv(&policy, guc_id);
__guc_context_policy_add_preemption_timeout(&policy, preemption_timeout); __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
__guc_context_set_context_policies(guc, &policy, true); __guc_context_set_context_policies(guc, &policy, true);
} else {
u32 action[] = {
INTEL_GUC_ACTION_V69_SET_CONTEXT_PREEMPTION_TIMEOUT,
guc_id,
preemption_timeout
};
intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
}
} }
static void guc_context_ban(struct intel_context *ce, struct i915_request *rq) static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
@ -3013,11 +3263,21 @@ static int guc_context_alloc(struct intel_context *ce)
static void __guc_context_set_prio(struct intel_guc *guc, static void __guc_context_set_prio(struct intel_guc *guc,
struct intel_context *ce) struct intel_context *ce)
{ {
struct context_policy policy; if (guc->fw.major_ver_found >= 70) {
struct context_policy policy;
__guc_context_policy_start_klv(&policy, ce->guc_id.id); __guc_context_policy_start_klv(&policy, ce->guc_id.id);
__guc_context_policy_add_priority(&policy, ce->guc_state.prio); __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
__guc_context_set_context_policies(guc, &policy, true); __guc_context_set_context_policies(guc, &policy, true);
} else {
u32 action[] = {
INTEL_GUC_ACTION_V69_SET_CONTEXT_PRIORITY,
ce->guc_id.id,
ce->guc_state.prio,
};
guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
}
} }
static void guc_context_set_prio(struct intel_guc *guc, static void guc_context_set_prio(struct intel_guc *guc,
@ -4527,17 +4787,19 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc,
guc_log_context_priority(p, ce); guc_log_context_priority(p, ce);
if (intel_context_is_parent(ce)) { if (intel_context_is_parent(ce)) {
struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
struct intel_context *child; struct intel_context *child;
drm_printf(p, "\t\tNumber children: %u\n", drm_printf(p, "\t\tNumber children: %u\n",
ce->parallel.number_children); ce->parallel.number_children);
drm_printf(p, "\t\tWQI Head: %u\n",
READ_ONCE(wq_desc->head)); if (ce->parallel.guc.wq_status) {
drm_printf(p, "\t\tWQI Tail: %u\n", drm_printf(p, "\t\tWQI Head: %u\n",
READ_ONCE(wq_desc->tail)); READ_ONCE(*ce->parallel.guc.wq_head));
drm_printf(p, "\t\tWQI Status: %u\n\n", drm_printf(p, "\t\tWQI Tail: %u\n",
READ_ONCE(wq_desc->wq_status)); READ_ONCE(*ce->parallel.guc.wq_tail));
drm_printf(p, "\t\tWQI Status: %u\n\n",
READ_ONCE(*ce->parallel.guc.wq_status));
}
if (ce->engine->emit_bb_start == if (ce->engine->emit_bb_start ==
emit_bb_start_parent_no_preempt_mid_batch) { emit_bb_start_parent_no_preempt_mid_batch) {

View File

@ -70,6 +70,10 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
fw_def(BROXTON, 0, guc_def(bxt, 70, 1, 1)) \ fw_def(BROXTON, 0, guc_def(bxt, 70, 1, 1)) \
fw_def(SKYLAKE, 0, guc_def(skl, 70, 1, 1)) fw_def(SKYLAKE, 0, guc_def(skl, 70, 1, 1))
#define INTEL_GUC_FIRMWARE_DEFS_FALLBACK(fw_def, guc_def) \
fw_def(ALDERLAKE_P, 0, guc_def(adlp, 69, 0, 3)) \
fw_def(ALDERLAKE_S, 0, guc_def(tgl, 69, 0, 3))
#define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \ #define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \
fw_def(ALDERLAKE_P, 0, huc_def(tgl, 7, 9, 3)) \ fw_def(ALDERLAKE_P, 0, huc_def(tgl, 7, 9, 3)) \
fw_def(ALDERLAKE_S, 0, huc_def(tgl, 7, 9, 3)) \ fw_def(ALDERLAKE_S, 0, huc_def(tgl, 7, 9, 3)) \
@ -105,6 +109,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
MODULE_FIRMWARE(uc_); MODULE_FIRMWARE(uc_);
INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH) INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
INTEL_GUC_FIRMWARE_DEFS_FALLBACK(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH) INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH)
/* The below structs and macros are used to iterate across the list of blobs */ /* The below structs and macros are used to iterate across the list of blobs */
@ -149,6 +154,9 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
static const struct uc_fw_platform_requirement blobs_guc[] = { static const struct uc_fw_platform_requirement blobs_guc[] = {
INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB) INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB)
}; };
static const struct uc_fw_platform_requirement blobs_guc_fallback[] = {
INTEL_GUC_FIRMWARE_DEFS_FALLBACK(MAKE_FW_LIST, GUC_FW_BLOB)
};
static const struct uc_fw_platform_requirement blobs_huc[] = { static const struct uc_fw_platform_requirement blobs_huc[] = {
INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB) INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB)
}; };
@ -179,12 +187,29 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) { if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
const struct uc_fw_blob *blob = &fw_blobs[i].blob; const struct uc_fw_blob *blob = &fw_blobs[i].blob;
uc_fw->path = blob->path; uc_fw->path = blob->path;
uc_fw->wanted_path = blob->path;
uc_fw->major_ver_wanted = blob->major; uc_fw->major_ver_wanted = blob->major;
uc_fw->minor_ver_wanted = blob->minor; uc_fw->minor_ver_wanted = blob->minor;
break; break;
} }
} }
if (uc_fw->type == INTEL_UC_FW_TYPE_GUC) {
const struct uc_fw_platform_requirement *blobs = blobs_guc_fallback;
u32 count = ARRAY_SIZE(blobs_guc_fallback);
for (i = 0; i < count && p <= blobs[i].p; i++) {
if (p == blobs[i].p && rev >= blobs[i].rev) {
const struct uc_fw_blob *blob = &blobs[i].blob;
uc_fw->fallback.path = blob->path;
uc_fw->fallback.major_ver = blob->major;
uc_fw->fallback.minor_ver = blob->minor;
break;
}
}
}
/* make sure the list is ordered as expected */ /* make sure the list is ordered as expected */
if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) { if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
for (i = 1; i < fw_count; i++) { for (i = 1; i < fw_count; i++) {
@ -338,7 +363,24 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
__force_fw_fetch_failures(uc_fw, -EINVAL); __force_fw_fetch_failures(uc_fw, -EINVAL);
__force_fw_fetch_failures(uc_fw, -ESTALE); __force_fw_fetch_failures(uc_fw, -ESTALE);
err = request_firmware(&fw, uc_fw->path, dev); err = firmware_request_nowarn(&fw, uc_fw->path, dev);
if (err && !intel_uc_fw_is_overridden(uc_fw) && uc_fw->fallback.path) {
err = firmware_request_nowarn(&fw, uc_fw->fallback.path, dev);
if (!err) {
drm_notice(&i915->drm,
"%s firmware %s is recommended, but only %s was found\n",
intel_uc_fw_type_repr(uc_fw->type),
uc_fw->wanted_path,
uc_fw->fallback.path);
drm_info(&i915->drm,
"Consider updating your linux-firmware pkg or downloading from %s\n",
INTEL_UC_FIRMWARE_URL);
uc_fw->path = uc_fw->fallback.path;
uc_fw->major_ver_wanted = uc_fw->fallback.major_ver;
uc_fw->minor_ver_wanted = uc_fw->fallback.minor_ver;
}
}
if (err) if (err)
goto fail; goto fail;
@ -437,8 +479,8 @@ fail:
INTEL_UC_FIRMWARE_MISSING : INTEL_UC_FIRMWARE_MISSING :
INTEL_UC_FIRMWARE_ERROR); INTEL_UC_FIRMWARE_ERROR);
drm_notice(&i915->drm, "%s firmware %s: fetch failed with error %d\n", i915_probe_error(i915, "%s firmware %s: fetch failed with error %d\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err); intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n", drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL); intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
@ -796,7 +838,13 @@ size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p) void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
{ {
drm_printf(p, "%s firmware: %s\n", drm_printf(p, "%s firmware: %s\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path); intel_uc_fw_type_repr(uc_fw->type), uc_fw->wanted_path);
if (uc_fw->fallback.path) {
drm_printf(p, "%s firmware fallback: %s\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->fallback.path);
drm_printf(p, "fallback selected: %s\n",
str_yes_no(uc_fw->path == uc_fw->fallback.path));
}
drm_printf(p, "\tstatus: %s\n", drm_printf(p, "\tstatus: %s\n",
intel_uc_fw_status_repr(uc_fw->status)); intel_uc_fw_status_repr(uc_fw->status));
drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n", drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",

View File

@ -74,6 +74,7 @@ struct intel_uc_fw {
const enum intel_uc_fw_status status; const enum intel_uc_fw_status status;
enum intel_uc_fw_status __status; /* no accidental overwrites */ enum intel_uc_fw_status __status; /* no accidental overwrites */
}; };
const char *wanted_path;
const char *path; const char *path;
bool user_overridden; bool user_overridden;
size_t size; size_t size;
@ -98,6 +99,12 @@ struct intel_uc_fw {
u16 major_ver_found; u16 major_ver_found;
u16 minor_ver_found; u16 minor_ver_found;
struct {
const char *path;
u16 major_ver;
u16 minor_ver;
} fallback;
u32 rsa_size; u32 rsa_size;
u32 ucode_size; u32 ucode_size;

View File

@ -207,6 +207,7 @@ struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output)
ret = dcss_submodules_init(dcss); ret = dcss_submodules_init(dcss);
if (ret) { if (ret) {
of_node_put(dcss->of_port);
dev_err(dev, "submodules initialization failed\n"); dev_err(dev, "submodules initialization failed\n");
goto clks_err; goto clks_err;
} }
@ -237,6 +238,8 @@ void dcss_dev_destroy(struct dcss_dev *dcss)
dcss_clocks_disable(dcss); dcss_clocks_disable(dcss);
} }
of_node_put(dcss->of_port);
pm_runtime_disable(dcss->dev); pm_runtime_disable(dcss->dev);
dcss_submodules_stop(dcss); dcss_submodules_stop(dcss);

View File

@ -713,7 +713,7 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel)
of_property_read_u32(dev->of_node, "hpd-reliable-delay-ms", &reliable_ms); of_property_read_u32(dev->of_node, "hpd-reliable-delay-ms", &reliable_ms);
desc->delay.hpd_reliable = reliable_ms; desc->delay.hpd_reliable = reliable_ms;
of_property_read_u32(dev->of_node, "hpd-absent-delay-ms", &absent_ms); of_property_read_u32(dev->of_node, "hpd-absent-delay-ms", &absent_ms);
desc->delay.hpd_reliable = absent_ms; desc->delay.hpd_absent = absent_ms;
/* Power the panel on so we can read the EDID */ /* Power the panel on so we can read the EDID */
ret = pm_runtime_get_sync(dev); ret = pm_runtime_get_sync(dev);

View File

@ -190,7 +190,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
} }
EXPORT_SYMBOL(drm_sched_entity_flush); EXPORT_SYMBOL(drm_sched_entity_flush);
static void drm_sched_entity_kill_jobs_irq_work(struct irq_work *wrk) static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
{ {
struct drm_sched_job *job = container_of(wrk, typeof(*job), work); struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
@ -207,8 +207,8 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
struct drm_sched_job *job = container_of(cb, struct drm_sched_job, struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
finish_cb); finish_cb);
init_irq_work(&job->work, drm_sched_entity_kill_jobs_irq_work); INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
irq_work_queue(&job->work); schedule_work(&job->work);
} }
static struct dma_fence * static struct dma_fence *

View File

@ -388,9 +388,9 @@ static irqreturn_t cdns_i2c_slave_isr(void *ptr)
*/ */
static irqreturn_t cdns_i2c_master_isr(void *ptr) static irqreturn_t cdns_i2c_master_isr(void *ptr)
{ {
unsigned int isr_status, avail_bytes, updatetx; unsigned int isr_status, avail_bytes;
unsigned int bytes_to_send; unsigned int bytes_to_send;
bool hold_quirk; bool updatetx;
struct cdns_i2c *id = ptr; struct cdns_i2c *id = ptr;
/* Signal completion only after everything is updated */ /* Signal completion only after everything is updated */
int done_flag = 0; int done_flag = 0;
@ -410,11 +410,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
* Check if transfer size register needs to be updated again for a * Check if transfer size register needs to be updated again for a
* large data receive operation. * large data receive operation.
*/ */
updatetx = 0; updatetx = id->recv_count > id->curr_recv_count;
if (id->recv_count > id->curr_recv_count)
updatetx = 1;
hold_quirk = (id->quirks & CDNS_I2C_BROKEN_HOLD_BIT) && updatetx;
/* When receiving, handle data interrupt and completion interrupt */ /* When receiving, handle data interrupt and completion interrupt */
if (id->p_recv_buf && if (id->p_recv_buf &&
@ -445,7 +441,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
break; break;
} }
if (cdns_is_holdquirk(id, hold_quirk)) if (cdns_is_holdquirk(id, updatetx))
break; break;
} }
@ -456,7 +452,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
* maintain transfer size non-zero while performing a large * maintain transfer size non-zero while performing a large
* receive operation. * receive operation.
*/ */
if (cdns_is_holdquirk(id, hold_quirk)) { if (cdns_is_holdquirk(id, updatetx)) {
/* wait while fifo is full */ /* wait while fifo is full */
while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) != while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) !=
(id->curr_recv_count - CDNS_I2C_FIFO_DEPTH)) (id->curr_recv_count - CDNS_I2C_FIFO_DEPTH))
@ -478,22 +474,6 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
CDNS_I2C_XFER_SIZE_OFFSET); CDNS_I2C_XFER_SIZE_OFFSET);
id->curr_recv_count = id->recv_count; id->curr_recv_count = id->recv_count;
} }
} else if (id->recv_count && !hold_quirk &&
!id->curr_recv_count) {
/* Set the slave address in address register*/
cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK,
CDNS_I2C_ADDR_OFFSET);
if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) {
cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
CDNS_I2C_XFER_SIZE_OFFSET);
id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE;
} else {
cdns_i2c_writereg(id->recv_count,
CDNS_I2C_XFER_SIZE_OFFSET);
id->curr_recv_count = id->recv_count;
}
} }
/* Clear hold (if not repeated start) and signal completion */ /* Clear hold (if not repeated start) and signal completion */

View File

@ -66,7 +66,7 @@
/* IMX I2C registers: /* IMX I2C registers:
* the I2C register offset is different between SoCs, * the I2C register offset is different between SoCs,
* to provid support for all these chips, split the * to provide support for all these chips, split the
* register offset into a fixed base address and a * register offset into a fixed base address and a
* variable shift value, then the full register offset * variable shift value, then the full register offset
* will be calculated by * will be calculated by

View File

@ -49,7 +49,7 @@
#define MLXCPLD_LPCI2C_NACK_IND 2 #define MLXCPLD_LPCI2C_NACK_IND 2
#define MLXCPLD_I2C_FREQ_1000KHZ_SET 0x04 #define MLXCPLD_I2C_FREQ_1000KHZ_SET 0x04
#define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0c #define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0e
#define MLXCPLD_I2C_FREQ_100KHZ_SET 0x42 #define MLXCPLD_I2C_FREQ_100KHZ_SET 0x42
enum mlxcpld_i2c_frequency { enum mlxcpld_i2c_frequency {

View File

@ -7304,7 +7304,9 @@ static struct r5conf *setup_conf(struct mddev *mddev)
goto abort; goto abort;
conf->mddev = mddev; conf->mddev = mddev;
if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) ret = -ENOMEM;
conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!conf->stripe_hashtbl)
goto abort; goto abort;
/* We init hash_locks[0] separately to that it can be used /* We init hash_locks[0] separately to that it can be used

View File

@ -13,10 +13,13 @@ lkdtm-$(CONFIG_LKDTM) += cfi.o
lkdtm-$(CONFIG_LKDTM) += fortify.o lkdtm-$(CONFIG_LKDTM) += fortify.o
lkdtm-$(CONFIG_PPC_64S_HASH_MMU) += powerpc.o lkdtm-$(CONFIG_PPC_64S_HASH_MMU) += powerpc.o
KASAN_SANITIZE_rodata.o := n
KASAN_SANITIZE_stackleak.o := n KASAN_SANITIZE_stackleak.o := n
KCOV_INSTRUMENT_rodata.o := n
CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) KASAN_SANITIZE_rodata.o := n
KCSAN_SANITIZE_rodata.o := n
KCOV_INSTRUMENT_rodata.o := n
OBJECT_FILES_NON_STANDARD_rodata.o := y
CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS)
OBJCOPYFLAGS := OBJCOPYFLAGS :=
OBJCOPYFLAGS_rodata_objcopy.o := \ OBJCOPYFLAGS_rodata_objcopy.o := \

View File

@ -1298,8 +1298,9 @@ static int sdhci_omap_probe(struct platform_device *pdev)
/* /*
* omap_device_pm_domain has callbacks to enable the main * omap_device_pm_domain has callbacks to enable the main
* functional clock, interface clock and also configure the * functional clock, interface clock and also configure the
* SYSCONFIG register of omap devices. The callback will be invoked * SYSCONFIG register to clear any boot loader set voltage
* as part of pm_runtime_get_sync. * capabilities before calling sdhci_setup_host(). The
* callback will be invoked as part of pm_runtime_get_sync.
*/ */
pm_runtime_use_autosuspend(dev); pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 50); pm_runtime_set_autosuspend_delay(dev, 50);
@ -1441,7 +1442,8 @@ static int __maybe_unused sdhci_omap_runtime_suspend(struct device *dev)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
sdhci_runtime_suspend_host(host); if (omap_host->con != -EINVAL)
sdhci_runtime_suspend_host(host);
sdhci_omap_context_save(omap_host); sdhci_omap_context_save(omap_host);
@ -1458,10 +1460,10 @@ static int __maybe_unused sdhci_omap_runtime_resume(struct device *dev)
pinctrl_pm_select_default_state(dev); pinctrl_pm_select_default_state(dev);
if (omap_host->con != -EINVAL) if (omap_host->con != -EINVAL) {
sdhci_omap_context_restore(omap_host); sdhci_omap_context_restore(omap_host);
sdhci_runtime_resume_host(host, 0);
sdhci_runtime_resume_host(host, 0); }
return 0; return 0;
} }

View File

@ -850,9 +850,10 @@ static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
unsigned int tRP_ps; unsigned int tRP_ps;
bool use_half_period; bool use_half_period;
int sample_delay_ps, sample_delay_factor; int sample_delay_ps, sample_delay_factor;
u16 busy_timeout_cycles; unsigned int busy_timeout_cycles;
u8 wrn_dly_sel; u8 wrn_dly_sel;
unsigned long clk_rate, min_rate; unsigned long clk_rate, min_rate;
u64 busy_timeout_ps;
if (sdr->tRC_min >= 30000) { if (sdr->tRC_min >= 30000) {
/* ONFI non-EDO modes [0-3] */ /* ONFI non-EDO modes [0-3] */
@ -885,7 +886,8 @@ static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps); addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps); data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps); data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps); busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max);
busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps);
hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) | hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) | BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |

View File

@ -142,6 +142,7 @@ static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va,
int ref_ok, struct funeth_txq *xdp_q) int ref_ok, struct funeth_txq *xdp_q)
{ {
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
struct xdp_frame *xdpf;
struct xdp_buff xdp; struct xdp_buff xdp;
u32 act; u32 act;
@ -163,7 +164,9 @@ static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va,
case XDP_TX: case XDP_TX:
if (unlikely(!ref_ok)) if (unlikely(!ref_ok))
goto pass; goto pass;
if (!fun_xdp_tx(xdp_q, xdp.data, xdp.data_end - xdp.data))
xdpf = xdp_convert_buff_to_frame(&xdp);
if (!xdpf || !fun_xdp_tx(xdp_q, xdpf))
goto xdp_error; goto xdp_error;
FUN_QSTAT_INC(q, xdp_tx); FUN_QSTAT_INC(q, xdp_tx);
q->xdp_flush |= FUN_XDP_FLUSH_TX; q->xdp_flush |= FUN_XDP_FLUSH_TX;

View File

@ -487,7 +487,7 @@ static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
do { do {
fun_xdp_unmap(q, reclaim_idx); fun_xdp_unmap(q, reclaim_idx);
page_frag_free(q->info[reclaim_idx].vaddr); xdp_return_frame(q->info[reclaim_idx].xdpf);
trace_funeth_tx_free(q, reclaim_idx, 1, head); trace_funeth_tx_free(q, reclaim_idx, 1, head);
@ -500,11 +500,11 @@ static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
return npkts; return npkts;
} }
bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len) bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf)
{ {
struct fun_eth_tx_req *req; struct fun_eth_tx_req *req;
struct fun_dataop_gl *gle; struct fun_dataop_gl *gle;
unsigned int idx; unsigned int idx, len;
dma_addr_t dma; dma_addr_t dma;
if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES) if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES)
@ -515,7 +515,8 @@ bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len)
return false; return false;
} }
dma = dma_map_single(q->dma_dev, data, len, DMA_TO_DEVICE); len = xdpf->len;
dma = dma_map_single(q->dma_dev, xdpf->data, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(q->dma_dev, dma))) { if (unlikely(dma_mapping_error(q->dma_dev, dma))) {
FUN_QSTAT_INC(q, tx_map_err); FUN_QSTAT_INC(q, tx_map_err);
return false; return false;
@ -535,7 +536,7 @@ bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len)
gle = (struct fun_dataop_gl *)req->dataop.imm; gle = (struct fun_dataop_gl *)req->dataop.imm;
fun_dataop_gl_init(gle, 0, 0, len, dma); fun_dataop_gl_init(gle, 0, 0, len, dma);
q->info[idx].vaddr = data; q->info[idx].xdpf = xdpf;
u64_stats_update_begin(&q->syncp); u64_stats_update_begin(&q->syncp);
q->stats.tx_bytes += len; q->stats.tx_bytes += len;
@ -566,12 +567,9 @@ int fun_xdp_xmit_frames(struct net_device *dev, int n,
if (unlikely(q_idx >= fp->num_xdpqs)) if (unlikely(q_idx >= fp->num_xdpqs))
return -ENXIO; return -ENXIO;
for (q = xdpqs[q_idx], i = 0; i < n; i++) { for (q = xdpqs[q_idx], i = 0; i < n; i++)
const struct xdp_frame *xdpf = frames[i]; if (!fun_xdp_tx(q, frames[i]))
if (!fun_xdp_tx(q, xdpf->data, xdpf->len))
break; break;
}
if (unlikely(flags & XDP_XMIT_FLUSH)) if (unlikely(flags & XDP_XMIT_FLUSH))
fun_txq_wr_db(q); fun_txq_wr_db(q);
@ -598,7 +596,7 @@ static void fun_xdpq_purge(struct funeth_txq *q)
unsigned int idx = q->cons_cnt & q->mask; unsigned int idx = q->cons_cnt & q->mask;
fun_xdp_unmap(q, idx); fun_xdp_unmap(q, idx);
page_frag_free(q->info[idx].vaddr); xdp_return_frame(q->info[idx].xdpf);
q->cons_cnt++; q->cons_cnt++;
} }
} }

View File

@ -96,8 +96,8 @@ struct funeth_txq_stats { /* per Tx queue SW counters */
struct funeth_tx_info { /* per Tx descriptor state */ struct funeth_tx_info { /* per Tx descriptor state */
union { union {
struct sk_buff *skb; /* associated packet */ struct sk_buff *skb; /* associated packet (sk_buff path) */
void *vaddr; /* start address for XDP */ struct xdp_frame *xdpf; /* associated XDP frame (XDP path) */
}; };
}; };
@ -246,7 +246,7 @@ static inline int fun_irq_node(const struct fun_irq *p)
int fun_rxq_napi_poll(struct napi_struct *napi, int budget); int fun_rxq_napi_poll(struct napi_struct *napi, int budget);
int fun_txq_napi_poll(struct napi_struct *napi, int budget); int fun_txq_napi_poll(struct napi_struct *napi, int budget);
netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev); netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev);
bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len); bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf);
int fun_xdp_xmit_frames(struct net_device *dev, int n, int fun_xdp_xmit_frames(struct net_device *dev, int n,
struct xdp_frame **frames, u32 flags); struct xdp_frame **frames, u32 flags);

View File

@ -2033,11 +2033,15 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
* non-zero req_queue_pairs says that user requested a new * non-zero req_queue_pairs says that user requested a new
* queue count via ethtool's set_channels, so use this * queue count via ethtool's set_channels, so use this
* value for queues distribution across traffic classes * value for queues distribution across traffic classes
* We need at least one queue pair for the interface
* to be usable as we see in else statement.
*/ */
if (vsi->req_queue_pairs > 0) if (vsi->req_queue_pairs > 0)
vsi->num_queue_pairs = vsi->req_queue_pairs; vsi->num_queue_pairs = vsi->req_queue_pairs;
else if (pf->flags & I40E_FLAG_MSIX_ENABLED) else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
vsi->num_queue_pairs = pf->num_lan_msix; vsi->num_queue_pairs = pf->num_lan_msix;
else
vsi->num_queue_pairs = 1;
} }
/* Number of queues per enabled TC */ /* Number of queues per enabled TC */

View File

@ -658,7 +658,8 @@ static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
rx_desc = ICE_RX_DESC(rx_ring, i); rx_desc = ICE_RX_DESC(rx_ring, i);
if (!(rx_desc->wb.status_error0 & if (!(rx_desc->wb.status_error0 &
cpu_to_le16(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS))) (cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) |
cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)))))
continue; continue;
rx_buf = &rx_ring->rx_buf[i]; rx_buf = &rx_ring->rx_buf[i];

View File

@ -4656,6 +4656,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_set_safe_mode_caps(hw); ice_set_safe_mode_caps(hw);
} }
hw->ucast_shared = true;
err = ice_init_pf(pf); err = ice_init_pf(pf);
if (err) { if (err) {
dev_err(dev, "ice_init_pf failed: %d\n", err); dev_err(dev, "ice_init_pf failed: %d\n", err);
@ -6005,10 +6007,12 @@ int ice_vsi_cfg(struct ice_vsi *vsi)
if (vsi->netdev) { if (vsi->netdev) {
ice_set_rx_mode(vsi->netdev); ice_set_rx_mode(vsi->netdev);
err = ice_vsi_vlan_setup(vsi); if (vsi->type != ICE_VSI_LB) {
err = ice_vsi_vlan_setup(vsi);
if (err) if (err)
return err; return err;
}
} }
ice_vsi_cfg_dcb_rings(vsi); ice_vsi_cfg_dcb_rings(vsi);

View File

@ -1309,39 +1309,6 @@ out_put_vf:
return ret; return ret;
} }
/**
* ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
* @pf: PF used to reference the switch's rules
* @umac: unicast MAC to compare against existing switch rules
*
* Return true on the first/any match, else return false
*/
static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
{
struct ice_sw_recipe *mac_recipe_list =
&pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
struct ice_fltr_mgmt_list_entry *list_itr;
struct list_head *rule_head;
struct mutex *rule_lock; /* protect MAC filter list access */
rule_head = &mac_recipe_list->filt_rules;
rule_lock = &mac_recipe_list->filt_rule_lock;
mutex_lock(rule_lock);
list_for_each_entry(list_itr, rule_head, list_entry) {
u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
if (ether_addr_equal(existing_mac, umac)) {
mutex_unlock(rule_lock);
return true;
}
}
mutex_unlock(rule_lock);
return false;
}
/** /**
* ice_set_vf_mac * ice_set_vf_mac
* @netdev: network interface device structure * @netdev: network interface device structure
@ -1376,13 +1343,6 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
if (ret) if (ret)
goto out_put_vf; goto out_put_vf;
if (ice_unicast_mac_exists(pf, mac)) {
netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
mac, vf_id, mac);
ret = -EINVAL;
goto out_put_vf;
}
mutex_lock(&vf->cfg_lock); mutex_lock(&vf->cfg_lock);
/* VF is notified of its new MAC via the PF's response to the /* VF is notified of its new MAC via the PF's response to the

View File

@ -1751,11 +1751,13 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
protocol = vlan_get_protocol(skb); protocol = vlan_get_protocol(skb);
if (eth_p_mpls(protocol)) if (eth_p_mpls(protocol)) {
ip.hdr = skb_inner_network_header(skb); ip.hdr = skb_inner_network_header(skb);
else l4.hdr = skb_checksum_start(skb);
} else {
ip.hdr = skb_network_header(skb); ip.hdr = skb_network_header(skb);
l4.hdr = skb_checksum_start(skb); l4.hdr = skb_transport_header(skb);
}
/* compute outer L2 header size */ /* compute outer L2 header size */
l2_len = ip.hdr - skb->data; l2_len = ip.hdr - skb->data;

View File

@ -2971,7 +2971,8 @@ ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi,
struct virtchnl_vlan_filtering_caps *vfc, struct virtchnl_vlan_filtering_caps *vfc,
struct virtchnl_vlan_filter_list_v2 *vfl) struct virtchnl_vlan_filter_list_v2 *vfl)
{ {
u16 num_requested_filters = vsi->num_vlan + vfl->num_elements; u16 num_requested_filters = ice_vsi_num_non_zero_vlans(vsi) +
vfl->num_elements;
if (num_requested_filters > vfc->max_filters) if (num_requested_filters > vfc->max_filters)
return false; return false;

View File

@ -28,6 +28,9 @@
#define MAX_RATE_EXPONENT 0x0FULL #define MAX_RATE_EXPONENT 0x0FULL
#define MAX_RATE_MANTISSA 0xFFULL #define MAX_RATE_MANTISSA 0xFFULL
#define CN10K_MAX_BURST_MANTISSA 0x7FFFULL
#define CN10K_MAX_BURST_SIZE 8453888ULL
/* Bitfields in NIX_TLX_PIR register */ /* Bitfields in NIX_TLX_PIR register */
#define TLX_RATE_MANTISSA GENMASK_ULL(8, 1) #define TLX_RATE_MANTISSA GENMASK_ULL(8, 1)
#define TLX_RATE_EXPONENT GENMASK_ULL(12, 9) #define TLX_RATE_EXPONENT GENMASK_ULL(12, 9)
@ -35,6 +38,9 @@
#define TLX_BURST_MANTISSA GENMASK_ULL(36, 29) #define TLX_BURST_MANTISSA GENMASK_ULL(36, 29)
#define TLX_BURST_EXPONENT GENMASK_ULL(40, 37) #define TLX_BURST_EXPONENT GENMASK_ULL(40, 37)
#define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29)
#define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44)
struct otx2_tc_flow_stats { struct otx2_tc_flow_stats {
u64 bytes; u64 bytes;
u64 pkts; u64 pkts;
@ -77,33 +83,42 @@ int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
} }
EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap); EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap);
static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp, static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
u32 *burst_mantissa) u32 *burst_exp, u32 *burst_mantissa)
{ {
int max_burst, max_mantissa;
unsigned int tmp; unsigned int tmp;
if (is_dev_otx2(nic->pdev)) {
max_burst = MAX_BURST_SIZE;
max_mantissa = MAX_BURST_MANTISSA;
} else {
max_burst = CN10K_MAX_BURST_SIZE;
max_mantissa = CN10K_MAX_BURST_MANTISSA;
}
/* Burst is calculated as /* Burst is calculated as
* ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
* Max supported burst size is 130,816 bytes. * Max supported burst size is 130,816 bytes.
*/ */
burst = min_t(u32, burst, MAX_BURST_SIZE); burst = min_t(u32, burst, max_burst);
if (burst) { if (burst) {
*burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0; *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0;
tmp = burst - rounddown_pow_of_two(burst); tmp = burst - rounddown_pow_of_two(burst);
if (burst < MAX_BURST_MANTISSA) if (burst < max_mantissa)
*burst_mantissa = tmp * 2; *burst_mantissa = tmp * 2;
else else
*burst_mantissa = tmp / (1ULL << (*burst_exp - 7)); *burst_mantissa = tmp / (1ULL << (*burst_exp - 7));
} else { } else {
*burst_exp = MAX_BURST_EXPONENT; *burst_exp = MAX_BURST_EXPONENT;
*burst_mantissa = MAX_BURST_MANTISSA; *burst_mantissa = max_mantissa;
} }
} }
static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp, static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp,
u32 *mantissa, u32 *div_exp) u32 *mantissa, u32 *div_exp)
{ {
unsigned int tmp; u64 tmp;
/* Rate calculation by hardware /* Rate calculation by hardware
* *
@ -132,21 +147,44 @@ static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
} }
} }
static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 maxrate) static u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic,
u64 maxrate, u32 burst)
{
u32 burst_exp, burst_mantissa;
u32 exp, mantissa, div_exp;
u64 regval = 0;
/* Get exponent and mantissa values from the desired rate */
otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa);
otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
if (is_dev_otx2(nic->pdev)) {
regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) |
FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) |
FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
FIELD_PREP(TLX_RATE_EXPONENT, exp) |
FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
} else {
regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) |
FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) |
FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
FIELD_PREP(TLX_RATE_EXPONENT, exp) |
FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
}
return regval;
}
static int otx2_set_matchall_egress_rate(struct otx2_nic *nic,
u32 burst, u64 maxrate)
{ {
struct otx2_hw *hw = &nic->hw; struct otx2_hw *hw = &nic->hw;
struct nix_txschq_config *req; struct nix_txschq_config *req;
u32 burst_exp, burst_mantissa;
u32 exp, mantissa, div_exp;
int txschq, err; int txschq, err;
/* All SQs share the same TL4, so pick the first scheduler */ /* All SQs share the same TL4, so pick the first scheduler */
txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
/* Get exponent and mantissa values from the desired rate */
otx2_get_egress_burst_cfg(burst, &burst_exp, &burst_mantissa);
otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
mutex_lock(&nic->mbox.lock); mutex_lock(&nic->mbox.lock);
req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox); req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox);
if (!req) { if (!req) {
@ -157,11 +195,7 @@ static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 ma
req->lvl = NIX_TXSCH_LVL_TL4; req->lvl = NIX_TXSCH_LVL_TL4;
req->num_regs = 1; req->num_regs = 1;
req->reg[0] = NIX_AF_TL4X_PIR(txschq); req->reg[0] = NIX_AF_TL4X_PIR(txschq);
req->regval[0] = FIELD_PREP(TLX_BURST_EXPONENT, burst_exp) | req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst);
FIELD_PREP(TLX_BURST_MANTISSA, burst_mantissa) |
FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
FIELD_PREP(TLX_RATE_EXPONENT, exp) |
FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
err = otx2_sync_mbox_msg(&nic->mbox); err = otx2_sync_mbox_msg(&nic->mbox);
mutex_unlock(&nic->mbox.lock); mutex_unlock(&nic->mbox.lock);
@ -230,7 +264,7 @@ static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
struct netlink_ext_ack *extack = cls->common.extack; struct netlink_ext_ack *extack = cls->common.extack;
struct flow_action *actions = &cls->rule->action; struct flow_action *actions = &cls->rule->action;
struct flow_action_entry *entry; struct flow_action_entry *entry;
u32 rate; u64 rate;
int err; int err;
err = otx2_tc_validate_flow(nic, actions, extack); err = otx2_tc_validate_flow(nic, actions, extack);
@ -256,7 +290,7 @@ static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
} }
/* Convert bytes per second to Mbps */ /* Convert bytes per second to Mbps */
rate = entry->police.rate_bytes_ps * 8; rate = entry->police.rate_bytes_ps * 8;
rate = max_t(u32, rate / 1000000, 1); rate = max_t(u64, rate / 1000000, 1);
err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate); err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate);
if (err) if (err)
return err; return err;
@ -614,21 +648,27 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
flow_spec->dport = match.key->dst; flow_spec->dport = match.key->dst;
flow_mask->dport = match.mask->dst; flow_mask->dport = match.mask->dst;
if (ip_proto == IPPROTO_UDP)
req->features |= BIT_ULL(NPC_DPORT_UDP); if (flow_mask->dport) {
else if (ip_proto == IPPROTO_TCP) if (ip_proto == IPPROTO_UDP)
req->features |= BIT_ULL(NPC_DPORT_TCP); req->features |= BIT_ULL(NPC_DPORT_UDP);
else if (ip_proto == IPPROTO_SCTP) else if (ip_proto == IPPROTO_TCP)
req->features |= BIT_ULL(NPC_DPORT_SCTP); req->features |= BIT_ULL(NPC_DPORT_TCP);
else if (ip_proto == IPPROTO_SCTP)
req->features |= BIT_ULL(NPC_DPORT_SCTP);
}
flow_spec->sport = match.key->src; flow_spec->sport = match.key->src;
flow_mask->sport = match.mask->src; flow_mask->sport = match.mask->src;
if (ip_proto == IPPROTO_UDP)
req->features |= BIT_ULL(NPC_SPORT_UDP); if (flow_mask->sport) {
else if (ip_proto == IPPROTO_TCP) if (ip_proto == IPPROTO_UDP)
req->features |= BIT_ULL(NPC_SPORT_TCP); req->features |= BIT_ULL(NPC_SPORT_UDP);
else if (ip_proto == IPPROTO_SCTP) else if (ip_proto == IPPROTO_TCP)
req->features |= BIT_ULL(NPC_SPORT_SCTP); req->features |= BIT_ULL(NPC_SPORT_TCP);
else if (ip_proto == IPPROTO_SCTP)
req->features |= BIT_ULL(NPC_SPORT_SCTP);
}
} }
return otx2_tc_parse_actions(nic, &rule->action, req, f, node); return otx2_tc_parse_actions(nic, &rule->action, req, f, node);

View File

@ -4233,7 +4233,7 @@ static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
} }
/* If the chain is ended by an load/store pair then this /* If the chain is ended by an load/store pair then this
* could serve as the new head of the the next chain. * could serve as the new head of the next chain.
*/ */
if (curr_pair_is_memcpy(meta1, meta2)) { if (curr_pair_is_memcpy(meta1, meta2)) {
head_ld_meta = meta1; head_ld_meta = meta1;

View File

@ -1100,7 +1100,29 @@ static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb)
tx_queue = efx_channel_get_tx_queue(ptp_data->channel, type); tx_queue = efx_channel_get_tx_queue(ptp_data->channel, type);
if (tx_queue && tx_queue->timestamping) { if (tx_queue && tx_queue->timestamping) {
/* This code invokes normal driver TX code which is always
* protected from softirqs when called from generic TX code,
* which in turn disables preemption. Look at __dev_queue_xmit
* which uses rcu_read_lock_bh disabling preemption for RCU
* plus disabling softirqs. We do not need RCU reader
* protection here.
*
* Although it is theoretically safe for current PTP TX/RX code
* running without disabling softirqs, there are three good
* reasond for doing so:
*
* 1) The code invoked is mainly implemented for non-PTP
* packets and it is always executed with softirqs
* disabled.
* 2) This being a single PTP packet, better to not
* interrupt its processing by softirqs which can lead
* to high latencies.
* 3) netdev_xmit_more checks preemption is disabled and
* triggers a BUG_ON if not.
*/
local_bh_disable();
efx_enqueue_skb(tx_queue, skb); efx_enqueue_skb(tx_queue, skb);
local_bh_enable();
} else { } else {
WARN_ONCE(1, "PTP channel has no timestamped tx queue\n"); WARN_ONCE(1, "PTP channel has no timestamped tx queue\n");
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);

View File

@ -688,18 +688,19 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
ret = mediatek_dwmac_clks_config(priv_plat, true); ret = mediatek_dwmac_clks_config(priv_plat, true);
if (ret) if (ret)
return ret; goto err_remove_config_dt;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret) { if (ret)
stmmac_remove_config_dt(pdev, plat_dat);
goto err_drv_probe; goto err_drv_probe;
}
return 0; return 0;
err_drv_probe: err_drv_probe:
mediatek_dwmac_clks_config(priv_plat, false); mediatek_dwmac_clks_config(priv_plat, false);
err_remove_config_dt:
stmmac_remove_config_dt(pdev, plat_dat);
return ret; return ret;
} }

View File

@ -214,7 +214,7 @@ struct ipa_init_modem_driver_req {
/* The response to a IPA_QMI_INIT_DRIVER request begins with a standard /* The response to a IPA_QMI_INIT_DRIVER request begins with a standard
* QMI response, but contains other information as well. Currently we * QMI response, but contains other information as well. Currently we
* simply wait for the the INIT_DRIVER transaction to complete and * simply wait for the INIT_DRIVER transaction to complete and
* ignore any other data that might be returned. * ignore any other data that might be returned.
*/ */
struct ipa_init_modem_driver_rsp { struct ipa_init_modem_driver_rsp {

View File

@ -243,6 +243,7 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
#define DEFAULT_SEND_SCI true #define DEFAULT_SEND_SCI true
#define DEFAULT_ENCRYPT false #define DEFAULT_ENCRYPT false
#define DEFAULT_ENCODING_SA 0 #define DEFAULT_ENCODING_SA 0
#define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1))
static bool send_sci(const struct macsec_secy *secy) static bool send_sci(const struct macsec_secy *secy)
{ {
@ -1697,7 +1698,7 @@ static bool validate_add_rxsa(struct nlattr **attrs)
return false; return false;
if (attrs[MACSEC_SA_ATTR_PN] && if (attrs[MACSEC_SA_ATTR_PN] &&
*(u64 *)nla_data(attrs[MACSEC_SA_ATTR_PN]) == 0) nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
return false; return false;
if (attrs[MACSEC_SA_ATTR_ACTIVE]) { if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@ -1753,7 +1754,8 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
} }
pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { if (tb_sa[MACSEC_SA_ATTR_PN] &&
nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n", pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
rtnl_unlock(); rtnl_unlock();
@ -1769,7 +1771,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n", pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
MACSEC_SA_ATTR_SALT); MACSEC_SALT_LEN);
rtnl_unlock(); rtnl_unlock();
return -EINVAL; return -EINVAL;
} }
@ -1842,7 +1844,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
return 0; return 0;
cleanup: cleanup:
kfree(rx_sa); macsec_rxsa_put(rx_sa);
rtnl_unlock(); rtnl_unlock();
return err; return err;
} }
@ -1939,7 +1941,7 @@ static bool validate_add_txsa(struct nlattr **attrs)
if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
return false; return false;
if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
return false; return false;
if (attrs[MACSEC_SA_ATTR_ACTIVE]) { if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@ -2011,7 +2013,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n", pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
MACSEC_SA_ATTR_SALT); MACSEC_SALT_LEN);
rtnl_unlock(); rtnl_unlock();
return -EINVAL; return -EINVAL;
} }
@ -2085,7 +2087,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
cleanup: cleanup:
secy->operational = was_operational; secy->operational = was_operational;
kfree(tx_sa); macsec_txsa_put(tx_sa);
rtnl_unlock(); rtnl_unlock();
return err; return err;
} }
@ -2293,7 +2295,7 @@ static bool validate_upd_sa(struct nlattr **attrs)
if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
return false; return false;
if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
return false; return false;
if (attrs[MACSEC_SA_ATTR_ACTIVE]) { if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@ -3745,9 +3747,6 @@ static int macsec_changelink_common(struct net_device *dev,
secy->operational = tx_sa && tx_sa->active; secy->operational = tx_sa && tx_sa->active;
} }
if (data[IFLA_MACSEC_WINDOW])
secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
if (data[IFLA_MACSEC_ENCRYPT]) if (data[IFLA_MACSEC_ENCRYPT])
tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
@ -3793,6 +3792,16 @@ static int macsec_changelink_common(struct net_device *dev,
} }
} }
if (data[IFLA_MACSEC_WINDOW]) {
secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
/* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window
* for XPN cipher suites */
if (secy->xpn &&
secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW)
return -EINVAL;
}
return 0; return 0;
} }
@ -3822,7 +3831,7 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
ret = macsec_changelink_common(dev, data); ret = macsec_changelink_common(dev, data);
if (ret) if (ret)
return ret; goto cleanup;
/* If h/w offloading is available, propagate to the device */ /* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(macsec)) { if (macsec_is_offloaded(macsec)) {

View File

@ -986,7 +986,7 @@ static int xpcs_get_state_c37_sgmii(struct dw_xpcs *xpcs,
*/ */
ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS); ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS);
if (ret < 0) if (ret < 0)
return false; return ret;
if (ret & DW_VR_MII_C37_ANSGM_SP_LNKSTS) { if (ret & DW_VR_MII_C37_ANSGM_SP_LNKSTS) {
int speed_value; int speed_value;

View File

@ -450,6 +450,7 @@ static int bcm5421_init(struct mii_phy* phy)
int can_low_power = 1; int can_low_power = 1;
if (np == NULL || of_get_property(np, "no-autolowpower", NULL)) if (np == NULL || of_get_property(np, "no-autolowpower", NULL))
can_low_power = 0; can_low_power = 0;
of_node_put(np);
if (can_low_power) { if (can_low_power) {
/* Enable automatic low-power */ /* Enable automatic low-power */
sungem_phy_write(phy, 0x1c, 0x9002); sungem_phy_write(phy, 0x1c, 0x9002);

View File

@ -242,9 +242,15 @@ struct virtnet_info {
/* Packet virtio header size */ /* Packet virtio header size */
u8 hdr_len; u8 hdr_len;
/* Work struct for refilling if we run low on memory. */ /* Work struct for delayed refilling if we run low on memory. */
struct delayed_work refill; struct delayed_work refill;
/* Is delayed refill enabled? */
bool refill_enabled;
/* The lock to synchronize the access to refill_enabled */
spinlock_t refill_lock;
/* Work struct for config space updates */ /* Work struct for config space updates */
struct work_struct config_work; struct work_struct config_work;
@ -348,6 +354,20 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
return p; return p;
} }
static void enable_delayed_refill(struct virtnet_info *vi)
{
spin_lock_bh(&vi->refill_lock);
vi->refill_enabled = true;
spin_unlock_bh(&vi->refill_lock);
}
static void disable_delayed_refill(struct virtnet_info *vi)
{
spin_lock_bh(&vi->refill_lock);
vi->refill_enabled = false;
spin_unlock_bh(&vi->refill_lock);
}
static void virtqueue_napi_schedule(struct napi_struct *napi, static void virtqueue_napi_schedule(struct napi_struct *napi,
struct virtqueue *vq) struct virtqueue *vq)
{ {
@ -1527,8 +1547,12 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
} }
if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) { if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
if (!try_fill_recv(vi, rq, GFP_ATOMIC)) if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
schedule_delayed_work(&vi->refill, 0); spin_lock(&vi->refill_lock);
if (vi->refill_enabled)
schedule_delayed_work(&vi->refill, 0);
spin_unlock(&vi->refill_lock);
}
} }
u64_stats_update_begin(&rq->stats.syncp); u64_stats_update_begin(&rq->stats.syncp);
@ -1651,6 +1675,8 @@ static int virtnet_open(struct net_device *dev)
struct virtnet_info *vi = netdev_priv(dev); struct virtnet_info *vi = netdev_priv(dev);
int i, err; int i, err;
enable_delayed_refill(vi);
for (i = 0; i < vi->max_queue_pairs; i++) { for (i = 0; i < vi->max_queue_pairs; i++) {
if (i < vi->curr_queue_pairs) if (i < vi->curr_queue_pairs)
/* Make sure we have some buffers: if oom use wq. */ /* Make sure we have some buffers: if oom use wq. */
@ -2033,6 +2059,8 @@ static int virtnet_close(struct net_device *dev)
struct virtnet_info *vi = netdev_priv(dev); struct virtnet_info *vi = netdev_priv(dev);
int i; int i;
/* Make sure NAPI doesn't schedule refill work */
disable_delayed_refill(vi);
/* Make sure refill_work doesn't re-enable napi! */ /* Make sure refill_work doesn't re-enable napi! */
cancel_delayed_work_sync(&vi->refill); cancel_delayed_work_sync(&vi->refill);
@ -2792,6 +2820,8 @@ static int virtnet_restore_up(struct virtio_device *vdev)
virtio_device_ready(vdev); virtio_device_ready(vdev);
enable_delayed_refill(vi);
if (netif_running(vi->dev)) { if (netif_running(vi->dev)) {
err = virtnet_open(vi->dev); err = virtnet_open(vi->dev);
if (err) if (err)
@ -3535,6 +3565,7 @@ static int virtnet_probe(struct virtio_device *vdev)
vdev->priv = vi; vdev->priv = vi;
INIT_WORK(&vi->config_work, virtnet_config_changed_work); INIT_WORK(&vi->config_work, virtnet_config_changed_work);
spin_lock_init(&vi->refill_lock);
/* If we can receive ANY GSO packets, we must allocate large ones. */ /* If we can receive ANY GSO packets, we must allocate large ones. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||

View File

@ -32,7 +32,7 @@ config DEBUG_PINCTRL
Say Y here to add some extra checks and diagnostics to PINCTRL calls. Say Y here to add some extra checks and diagnostics to PINCTRL calls.
config PINCTRL_AMD config PINCTRL_AMD
tristate "AMD GPIO pin control" bool "AMD GPIO pin control"
depends on HAS_IOMEM depends on HAS_IOMEM
depends on ACPI || COMPILE_TEST depends on ACPI || COMPILE_TEST
select GPIOLIB select GPIOLIB

View File

@ -102,7 +102,7 @@ struct armada_37xx_pinctrl {
struct device *dev; struct device *dev;
struct gpio_chip gpio_chip; struct gpio_chip gpio_chip;
struct irq_chip irq_chip; struct irq_chip irq_chip;
spinlock_t irq_lock; raw_spinlock_t irq_lock;
struct pinctrl_desc pctl; struct pinctrl_desc pctl;
struct pinctrl_dev *pctl_dev; struct pinctrl_dev *pctl_dev;
struct armada_37xx_pin_group *groups; struct armada_37xx_pin_group *groups;
@ -523,9 +523,9 @@ static void armada_37xx_irq_ack(struct irq_data *d)
unsigned long flags; unsigned long flags;
armada_37xx_irq_update_reg(&reg, d); armada_37xx_irq_update_reg(&reg, d);
spin_lock_irqsave(&info->irq_lock, flags); raw_spin_lock_irqsave(&info->irq_lock, flags);
writel(d->mask, info->base + reg); writel(d->mask, info->base + reg);
spin_unlock_irqrestore(&info->irq_lock, flags); raw_spin_unlock_irqrestore(&info->irq_lock, flags);
} }
static void armada_37xx_irq_mask(struct irq_data *d) static void armada_37xx_irq_mask(struct irq_data *d)
@ -536,10 +536,10 @@ static void armada_37xx_irq_mask(struct irq_data *d)
unsigned long flags; unsigned long flags;
armada_37xx_irq_update_reg(&reg, d); armada_37xx_irq_update_reg(&reg, d);
spin_lock_irqsave(&info->irq_lock, flags); raw_spin_lock_irqsave(&info->irq_lock, flags);
val = readl(info->base + reg); val = readl(info->base + reg);
writel(val & ~d->mask, info->base + reg); writel(val & ~d->mask, info->base + reg);
spin_unlock_irqrestore(&info->irq_lock, flags); raw_spin_unlock_irqrestore(&info->irq_lock, flags);
} }
static void armada_37xx_irq_unmask(struct irq_data *d) static void armada_37xx_irq_unmask(struct irq_data *d)
@ -550,10 +550,10 @@ static void armada_37xx_irq_unmask(struct irq_data *d)
unsigned long flags; unsigned long flags;
armada_37xx_irq_update_reg(&reg, d); armada_37xx_irq_update_reg(&reg, d);
spin_lock_irqsave(&info->irq_lock, flags); raw_spin_lock_irqsave(&info->irq_lock, flags);
val = readl(info->base + reg); val = readl(info->base + reg);
writel(val | d->mask, info->base + reg); writel(val | d->mask, info->base + reg);
spin_unlock_irqrestore(&info->irq_lock, flags); raw_spin_unlock_irqrestore(&info->irq_lock, flags);
} }
static int armada_37xx_irq_set_wake(struct irq_data *d, unsigned int on) static int armada_37xx_irq_set_wake(struct irq_data *d, unsigned int on)
@ -564,14 +564,14 @@ static int armada_37xx_irq_set_wake(struct irq_data *d, unsigned int on)
unsigned long flags; unsigned long flags;
armada_37xx_irq_update_reg(&reg, d); armada_37xx_irq_update_reg(&reg, d);
spin_lock_irqsave(&info->irq_lock, flags); raw_spin_lock_irqsave(&info->irq_lock, flags);
val = readl(info->base + reg); val = readl(info->base + reg);
if (on) if (on)
val |= (BIT(d->hwirq % GPIO_PER_REG)); val |= (BIT(d->hwirq % GPIO_PER_REG));
else else
val &= ~(BIT(d->hwirq % GPIO_PER_REG)); val &= ~(BIT(d->hwirq % GPIO_PER_REG));
writel(val, info->base + reg); writel(val, info->base + reg);
spin_unlock_irqrestore(&info->irq_lock, flags); raw_spin_unlock_irqrestore(&info->irq_lock, flags);
return 0; return 0;
} }
@ -583,7 +583,7 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type)
u32 val, reg = IRQ_POL; u32 val, reg = IRQ_POL;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&info->irq_lock, flags); raw_spin_lock_irqsave(&info->irq_lock, flags);
armada_37xx_irq_update_reg(&reg, d); armada_37xx_irq_update_reg(&reg, d);
val = readl(info->base + reg); val = readl(info->base + reg);
switch (type) { switch (type) {
@ -607,11 +607,11 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type)
break; break;
} }
default: default:
spin_unlock_irqrestore(&info->irq_lock, flags); raw_spin_unlock_irqrestore(&info->irq_lock, flags);
return -EINVAL; return -EINVAL;
} }
writel(val, info->base + reg); writel(val, info->base + reg);
spin_unlock_irqrestore(&info->irq_lock, flags); raw_spin_unlock_irqrestore(&info->irq_lock, flags);
return 0; return 0;
} }
@ -626,7 +626,7 @@ static int armada_37xx_edge_both_irq_swap_pol(struct armada_37xx_pinctrl *info,
regmap_read(info->regmap, INPUT_VAL + 4*reg_idx, &l); regmap_read(info->regmap, INPUT_VAL + 4*reg_idx, &l);
spin_lock_irqsave(&info->irq_lock, flags); raw_spin_lock_irqsave(&info->irq_lock, flags);
p = readl(info->base + IRQ_POL + 4 * reg_idx); p = readl(info->base + IRQ_POL + 4 * reg_idx);
if ((p ^ l) & (1 << bit_num)) { if ((p ^ l) & (1 << bit_num)) {
/* /*
@ -647,7 +647,7 @@ static int armada_37xx_edge_both_irq_swap_pol(struct armada_37xx_pinctrl *info,
ret = -1; ret = -1;
} }
spin_unlock_irqrestore(&info->irq_lock, flags); raw_spin_unlock_irqrestore(&info->irq_lock, flags);
return ret; return ret;
} }
@ -664,11 +664,11 @@ static void armada_37xx_irq_handler(struct irq_desc *desc)
u32 status; u32 status;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&info->irq_lock, flags); raw_spin_lock_irqsave(&info->irq_lock, flags);
status = readl_relaxed(info->base + IRQ_STATUS + 4 * i); status = readl_relaxed(info->base + IRQ_STATUS + 4 * i);
/* Manage only the interrupt that was enabled */ /* Manage only the interrupt that was enabled */
status &= readl_relaxed(info->base + IRQ_EN + 4 * i); status &= readl_relaxed(info->base + IRQ_EN + 4 * i);
spin_unlock_irqrestore(&info->irq_lock, flags); raw_spin_unlock_irqrestore(&info->irq_lock, flags);
while (status) { while (status) {
u32 hwirq = ffs(status) - 1; u32 hwirq = ffs(status) - 1;
u32 virq = irq_find_mapping(d, hwirq + u32 virq = irq_find_mapping(d, hwirq +
@ -695,12 +695,12 @@ static void armada_37xx_irq_handler(struct irq_desc *desc)
update_status: update_status:
/* Update status in case a new IRQ appears */ /* Update status in case a new IRQ appears */
spin_lock_irqsave(&info->irq_lock, flags); raw_spin_lock_irqsave(&info->irq_lock, flags);
status = readl_relaxed(info->base + status = readl_relaxed(info->base +
IRQ_STATUS + 4 * i); IRQ_STATUS + 4 * i);
/* Manage only the interrupt that was enabled */ /* Manage only the interrupt that was enabled */
status &= readl_relaxed(info->base + IRQ_EN + 4 * i); status &= readl_relaxed(info->base + IRQ_EN + 4 * i);
spin_unlock_irqrestore(&info->irq_lock, flags); raw_spin_unlock_irqrestore(&info->irq_lock, flags);
} }
} }
chained_irq_exit(chip, desc); chained_irq_exit(chip, desc);
@ -731,7 +731,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
unsigned int i, nr_irq_parent; unsigned int i, nr_irq_parent;
spin_lock_init(&info->irq_lock); raw_spin_lock_init(&info->irq_lock);
nr_irq_parent = of_irq_count(np); nr_irq_parent = of_irq_count(np);
if (!nr_irq_parent) { if (!nr_irq_parent) {
@ -1107,25 +1107,40 @@ static const struct of_device_id armada_37xx_pinctrl_of_match[] = {
{ }, { },
}; };
static const struct regmap_config armada_37xx_pinctrl_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.use_raw_spinlock = true,
};
static int __init armada_37xx_pinctrl_probe(struct platform_device *pdev) static int __init armada_37xx_pinctrl_probe(struct platform_device *pdev)
{ {
struct armada_37xx_pinctrl *info; struct armada_37xx_pinctrl *info;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct regmap *regmap; struct regmap *regmap;
void __iomem *base;
int ret; int ret;
base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(base)) {
dev_err(dev, "failed to ioremap base address: %pe\n", base);
return PTR_ERR(base);
}
regmap = devm_regmap_init_mmio(dev, base,
&armada_37xx_pinctrl_regmap_config);
if (IS_ERR(regmap)) {
dev_err(dev, "failed to create regmap: %pe\n", regmap);
return PTR_ERR(regmap);
}
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info) if (!info)
return -ENOMEM; return -ENOMEM;
info->dev = dev; info->dev = dev;
regmap = syscon_node_to_regmap(np);
if (IS_ERR(regmap))
return dev_err_probe(dev, PTR_ERR(regmap), "cannot get regmap\n");
info->regmap = regmap; info->regmap = regmap;
info->data = of_device_get_match_data(dev); info->data = of_device_get_match_data(dev);
ret = armada_37xx_pinctrl_register(pdev, info); ret = armada_37xx_pinctrl_register(pdev, info);

View File

@ -29,19 +29,12 @@
#define ocelot_clrsetbits(addr, clear, set) \ #define ocelot_clrsetbits(addr, clear, set) \
writel((readl(addr) & ~(clear)) | (set), (addr)) writel((readl(addr) & ~(clear)) | (set), (addr))
/* PINCONFIG bits (sparx5 only) */
enum { enum {
PINCONF_BIAS, PINCONF_BIAS,
PINCONF_SCHMITT, PINCONF_SCHMITT,
PINCONF_DRIVE_STRENGTH, PINCONF_DRIVE_STRENGTH,
}; };
#define BIAS_PD_BIT BIT(4)
#define BIAS_PU_BIT BIT(3)
#define BIAS_BITS (BIAS_PD_BIT|BIAS_PU_BIT)
#define SCHMITT_BIT BIT(2)
#define DRIVE_BITS GENMASK(1, 0)
/* GPIO standard registers */ /* GPIO standard registers */
#define OCELOT_GPIO_OUT_SET 0x0 #define OCELOT_GPIO_OUT_SET 0x0
#define OCELOT_GPIO_OUT_CLR 0x4 #define OCELOT_GPIO_OUT_CLR 0x4
@ -321,6 +314,13 @@ struct ocelot_pin_caps {
unsigned char a_functions[OCELOT_FUNC_PER_PIN]; /* Additional functions */ unsigned char a_functions[OCELOT_FUNC_PER_PIN]; /* Additional functions */
}; };
struct ocelot_pincfg_data {
u8 pd_bit;
u8 pu_bit;
u8 drive_bits;
u8 schmitt_bit;
};
struct ocelot_pinctrl { struct ocelot_pinctrl {
struct device *dev; struct device *dev;
struct pinctrl_dev *pctl; struct pinctrl_dev *pctl;
@ -328,10 +328,16 @@ struct ocelot_pinctrl {
struct regmap *map; struct regmap *map;
struct regmap *pincfg; struct regmap *pincfg;
struct pinctrl_desc *desc; struct pinctrl_desc *desc;
const struct ocelot_pincfg_data *pincfg_data;
struct ocelot_pmx_func func[FUNC_MAX]; struct ocelot_pmx_func func[FUNC_MAX];
u8 stride; u8 stride;
}; };
struct ocelot_match_data {
struct pinctrl_desc desc;
struct ocelot_pincfg_data pincfg_data;
};
#define LUTON_P(p, f0, f1) \ #define LUTON_P(p, f0, f1) \
static struct ocelot_pin_caps luton_pin_##p = { \ static struct ocelot_pin_caps luton_pin_##p = { \
.pin = p, \ .pin = p, \
@ -1325,24 +1331,27 @@ static int ocelot_hw_get_value(struct ocelot_pinctrl *info,
int ret = -EOPNOTSUPP; int ret = -EOPNOTSUPP;
if (info->pincfg) { if (info->pincfg) {
const struct ocelot_pincfg_data *opd = info->pincfg_data;
u32 regcfg; u32 regcfg;
ret = regmap_read(info->pincfg, pin, &regcfg); ret = regmap_read(info->pincfg,
pin * regmap_get_reg_stride(info->pincfg),
&regcfg);
if (ret) if (ret)
return ret; return ret;
ret = 0; ret = 0;
switch (reg) { switch (reg) {
case PINCONF_BIAS: case PINCONF_BIAS:
*val = regcfg & BIAS_BITS; *val = regcfg & (opd->pd_bit | opd->pu_bit);
break; break;
case PINCONF_SCHMITT: case PINCONF_SCHMITT:
*val = regcfg & SCHMITT_BIT; *val = regcfg & opd->schmitt_bit;
break; break;
case PINCONF_DRIVE_STRENGTH: case PINCONF_DRIVE_STRENGTH:
*val = regcfg & DRIVE_BITS; *val = regcfg & opd->drive_bits;
break; break;
default: default:
@ -1359,14 +1368,18 @@ static int ocelot_pincfg_clrsetbits(struct ocelot_pinctrl *info, u32 regaddr,
u32 val; u32 val;
int ret; int ret;
ret = regmap_read(info->pincfg, regaddr, &val); ret = regmap_read(info->pincfg,
regaddr * regmap_get_reg_stride(info->pincfg),
&val);
if (ret) if (ret)
return ret; return ret;
val &= ~clrbits; val &= ~clrbits;
val |= setbits; val |= setbits;
ret = regmap_write(info->pincfg, regaddr, val); ret = regmap_write(info->pincfg,
regaddr * regmap_get_reg_stride(info->pincfg),
val);
return ret; return ret;
} }
@ -1379,23 +1392,27 @@ static int ocelot_hw_set_value(struct ocelot_pinctrl *info,
int ret = -EOPNOTSUPP; int ret = -EOPNOTSUPP;
if (info->pincfg) { if (info->pincfg) {
const struct ocelot_pincfg_data *opd = info->pincfg_data;
ret = 0; ret = 0;
switch (reg) { switch (reg) {
case PINCONF_BIAS: case PINCONF_BIAS:
ret = ocelot_pincfg_clrsetbits(info, pin, BIAS_BITS, ret = ocelot_pincfg_clrsetbits(info, pin,
opd->pd_bit | opd->pu_bit,
val); val);
break; break;
case PINCONF_SCHMITT: case PINCONF_SCHMITT:
ret = ocelot_pincfg_clrsetbits(info, pin, SCHMITT_BIT, ret = ocelot_pincfg_clrsetbits(info, pin,
opd->schmitt_bit,
val); val);
break; break;
case PINCONF_DRIVE_STRENGTH: case PINCONF_DRIVE_STRENGTH:
if (val <= 3) if (val <= 3)
ret = ocelot_pincfg_clrsetbits(info, pin, ret = ocelot_pincfg_clrsetbits(info, pin,
DRIVE_BITS, val); opd->drive_bits,
val);
else else
ret = -EINVAL; ret = -EINVAL;
break; break;
@ -1425,17 +1442,20 @@ static int ocelot_pinconf_get(struct pinctrl_dev *pctldev,
if (param == PIN_CONFIG_BIAS_DISABLE) if (param == PIN_CONFIG_BIAS_DISABLE)
val = (val == 0); val = (val == 0);
else if (param == PIN_CONFIG_BIAS_PULL_DOWN) else if (param == PIN_CONFIG_BIAS_PULL_DOWN)
val = (val & BIAS_PD_BIT ? true : false); val = !!(val & info->pincfg_data->pd_bit);
else /* PIN_CONFIG_BIAS_PULL_UP */ else /* PIN_CONFIG_BIAS_PULL_UP */
val = (val & BIAS_PU_BIT ? true : false); val = !!(val & info->pincfg_data->pu_bit);
break; break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE: case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
if (!info->pincfg_data->schmitt_bit)
return -EOPNOTSUPP;
err = ocelot_hw_get_value(info, pin, PINCONF_SCHMITT, &val); err = ocelot_hw_get_value(info, pin, PINCONF_SCHMITT, &val);
if (err) if (err)
return err; return err;
val = (val & SCHMITT_BIT ? true : false); val = !!(val & info->pincfg_data->schmitt_bit);
break; break;
case PIN_CONFIG_DRIVE_STRENGTH: case PIN_CONFIG_DRIVE_STRENGTH:
@ -1479,6 +1499,7 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *configs, unsigned int num_configs) unsigned long *configs, unsigned int num_configs)
{ {
struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
const struct ocelot_pincfg_data *opd = info->pincfg_data;
u32 param, arg, p; u32 param, arg, p;
int cfg, err = 0; int cfg, err = 0;
@ -1491,8 +1512,8 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
case PIN_CONFIG_BIAS_PULL_UP: case PIN_CONFIG_BIAS_PULL_UP:
case PIN_CONFIG_BIAS_PULL_DOWN: case PIN_CONFIG_BIAS_PULL_DOWN:
arg = (param == PIN_CONFIG_BIAS_DISABLE) ? 0 : arg = (param == PIN_CONFIG_BIAS_DISABLE) ? 0 :
(param == PIN_CONFIG_BIAS_PULL_UP) ? BIAS_PU_BIT : (param == PIN_CONFIG_BIAS_PULL_UP) ?
BIAS_PD_BIT; opd->pu_bit : opd->pd_bit;
err = ocelot_hw_set_value(info, pin, PINCONF_BIAS, arg); err = ocelot_hw_set_value(info, pin, PINCONF_BIAS, arg);
if (err) if (err)
@ -1501,7 +1522,10 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
break; break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE: case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
arg = arg ? SCHMITT_BIT : 0; if (!opd->schmitt_bit)
return -EOPNOTSUPP;
arg = arg ? opd->schmitt_bit : 0;
err = ocelot_hw_set_value(info, pin, PINCONF_SCHMITT, err = ocelot_hw_set_value(info, pin, PINCONF_SCHMITT,
arg); arg);
if (err) if (err)
@ -1562,69 +1586,94 @@ static const struct pinctrl_ops ocelot_pctl_ops = {
.dt_free_map = pinconf_generic_dt_free_map, .dt_free_map = pinconf_generic_dt_free_map,
}; };
static struct pinctrl_desc luton_desc = { static struct ocelot_match_data luton_desc = {
.name = "luton-pinctrl", .desc = {
.pins = luton_pins, .name = "luton-pinctrl",
.npins = ARRAY_SIZE(luton_pins), .pins = luton_pins,
.pctlops = &ocelot_pctl_ops, .npins = ARRAY_SIZE(luton_pins),
.pmxops = &ocelot_pmx_ops, .pctlops = &ocelot_pctl_ops,
.owner = THIS_MODULE, .pmxops = &ocelot_pmx_ops,
.owner = THIS_MODULE,
},
}; };
static struct pinctrl_desc serval_desc = { static struct ocelot_match_data serval_desc = {
.name = "serval-pinctrl", .desc = {
.pins = serval_pins, .name = "serval-pinctrl",
.npins = ARRAY_SIZE(serval_pins), .pins = serval_pins,
.pctlops = &ocelot_pctl_ops, .npins = ARRAY_SIZE(serval_pins),
.pmxops = &ocelot_pmx_ops, .pctlops = &ocelot_pctl_ops,
.owner = THIS_MODULE, .pmxops = &ocelot_pmx_ops,
.owner = THIS_MODULE,
},
}; };
static struct pinctrl_desc ocelot_desc = { static struct ocelot_match_data ocelot_desc = {
.name = "ocelot-pinctrl", .desc = {
.pins = ocelot_pins, .name = "ocelot-pinctrl",
.npins = ARRAY_SIZE(ocelot_pins), .pins = ocelot_pins,
.pctlops = &ocelot_pctl_ops, .npins = ARRAY_SIZE(ocelot_pins),
.pmxops = &ocelot_pmx_ops, .pctlops = &ocelot_pctl_ops,
.owner = THIS_MODULE, .pmxops = &ocelot_pmx_ops,
.owner = THIS_MODULE,
},
}; };
static struct pinctrl_desc jaguar2_desc = { static struct ocelot_match_data jaguar2_desc = {
.name = "jaguar2-pinctrl", .desc = {
.pins = jaguar2_pins, .name = "jaguar2-pinctrl",
.npins = ARRAY_SIZE(jaguar2_pins), .pins = jaguar2_pins,
.pctlops = &ocelot_pctl_ops, .npins = ARRAY_SIZE(jaguar2_pins),
.pmxops = &ocelot_pmx_ops, .pctlops = &ocelot_pctl_ops,
.owner = THIS_MODULE, .pmxops = &ocelot_pmx_ops,
.owner = THIS_MODULE,
},
}; };
static struct pinctrl_desc servalt_desc = { static struct ocelot_match_data servalt_desc = {
.name = "servalt-pinctrl", .desc = {
.pins = servalt_pins, .name = "servalt-pinctrl",
.npins = ARRAY_SIZE(servalt_pins), .pins = servalt_pins,
.pctlops = &ocelot_pctl_ops, .npins = ARRAY_SIZE(servalt_pins),
.pmxops = &ocelot_pmx_ops, .pctlops = &ocelot_pctl_ops,
.owner = THIS_MODULE, .pmxops = &ocelot_pmx_ops,
.owner = THIS_MODULE,
},
}; };
static struct pinctrl_desc sparx5_desc = { static struct ocelot_match_data sparx5_desc = {
.name = "sparx5-pinctrl", .desc = {
.pins = sparx5_pins, .name = "sparx5-pinctrl",
.npins = ARRAY_SIZE(sparx5_pins), .pins = sparx5_pins,
.pctlops = &ocelot_pctl_ops, .npins = ARRAY_SIZE(sparx5_pins),
.pmxops = &ocelot_pmx_ops, .pctlops = &ocelot_pctl_ops,
.confops = &ocelot_confops, .pmxops = &ocelot_pmx_ops,
.owner = THIS_MODULE, .confops = &ocelot_confops,
.owner = THIS_MODULE,
},
.pincfg_data = {
.pd_bit = BIT(4),
.pu_bit = BIT(3),
.drive_bits = GENMASK(1, 0),
.schmitt_bit = BIT(2),
},
}; };
static struct pinctrl_desc lan966x_desc = { static struct ocelot_match_data lan966x_desc = {
.name = "lan966x-pinctrl", .desc = {
.pins = lan966x_pins, .name = "lan966x-pinctrl",
.npins = ARRAY_SIZE(lan966x_pins), .pins = lan966x_pins,
.pctlops = &ocelot_pctl_ops, .npins = ARRAY_SIZE(lan966x_pins),
.pmxops = &lan966x_pmx_ops, .pctlops = &ocelot_pctl_ops,
.confops = &ocelot_confops, .pmxops = &lan966x_pmx_ops,
.owner = THIS_MODULE, .confops = &ocelot_confops,
.owner = THIS_MODULE,
},
.pincfg_data = {
.pd_bit = BIT(3),
.pu_bit = BIT(2),
.drive_bits = GENMASK(1, 0),
},
}; };
static int ocelot_create_group_func_map(struct device *dev, static int ocelot_create_group_func_map(struct device *dev,
@ -1890,7 +1939,8 @@ static const struct of_device_id ocelot_pinctrl_of_match[] = {
{}, {},
}; };
static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev) static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev,
const struct ocelot_pinctrl *info)
{ {
void __iomem *base; void __iomem *base;
@ -1898,7 +1948,7 @@ static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev)
.reg_bits = 32, .reg_bits = 32,
.val_bits = 32, .val_bits = 32,
.reg_stride = 4, .reg_stride = 4,
.max_register = 32, .max_register = info->desc->npins * 4,
.name = "pincfg", .name = "pincfg",
}; };
@ -1913,6 +1963,7 @@ static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev)
static int ocelot_pinctrl_probe(struct platform_device *pdev) static int ocelot_pinctrl_probe(struct platform_device *pdev)
{ {
const struct ocelot_match_data *data;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct ocelot_pinctrl *info; struct ocelot_pinctrl *info;
struct reset_control *reset; struct reset_control *reset;
@ -1929,7 +1980,16 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
if (!info) if (!info)
return -ENOMEM; return -ENOMEM;
info->desc = (struct pinctrl_desc *)device_get_match_data(dev); data = device_get_match_data(dev);
if (!data)
return -EINVAL;
info->desc = devm_kmemdup(dev, &data->desc, sizeof(*info->desc),
GFP_KERNEL);
if (!info->desc)
return -ENOMEM;
info->pincfg_data = &data->pincfg_data;
reset = devm_reset_control_get_optional_shared(dev, "switch"); reset = devm_reset_control_get_optional_shared(dev, "switch");
if (IS_ERR(reset)) if (IS_ERR(reset))
@ -1956,7 +2016,7 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
/* Pinconf registers */ /* Pinconf registers */
if (info->desc->confops) { if (info->desc->confops) {
pincfg = ocelot_pinctrl_create_pincfg(pdev); pincfg = ocelot_pinctrl_create_pincfg(pdev, info);
if (IS_ERR(pincfg)) if (IS_ERR(pincfg))
dev_dbg(dev, "Failed to create pincfg regmap\n"); dev_dbg(dev, "Failed to create pincfg regmap\n");
else else

View File

@ -266,6 +266,8 @@ static int ralink_pinctrl_pins(struct ralink_priv *p)
p->func[i]->pin_count, p->func[i]->pin_count,
sizeof(int), sizeof(int),
GFP_KERNEL); GFP_KERNEL);
if (!p->func[i]->pins)
return -ENOMEM;
for (j = 0; j < p->func[i]->pin_count; j++) for (j = 0; j < p->func[i]->pin_count; j++)
p->func[i]->pins[j] = p->func[i]->pin_first + j; p->func[i]->pins[j] = p->func[i]->pin_first + j;

View File

@ -871,6 +871,9 @@ static int sppctl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node
} }
*map = kcalloc(*num_maps + nmG, sizeof(**map), GFP_KERNEL); *map = kcalloc(*num_maps + nmG, sizeof(**map), GFP_KERNEL);
if (*map == NULL)
return -ENOMEM;
for (i = 0; i < (*num_maps); i++) { for (i = 0; i < (*num_maps); i++) {
dt_pin = be32_to_cpu(list[i]); dt_pin = be32_to_cpu(list[i]);
pin_num = FIELD_GET(GENMASK(31, 24), dt_pin); pin_num = FIELD_GET(GENMASK(31, 24), dt_pin);

View File

@ -176,6 +176,7 @@ config PTP_1588_CLOCK_OCP
depends on !S390 depends on !S390
depends on COMMON_CLK depends on COMMON_CLK
select NET_DEVLINK select NET_DEVLINK
select CRC16
help help
This driver adds support for an OpenCompute time card. This driver adds support for an OpenCompute time card.

View File

@ -3565,7 +3565,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
if (!atomic_read(&queue->set_pci_flags_count)) { if (!atomic_read(&queue->set_pci_flags_count)) {
/* /*
* there's no outstanding PCI any more, so we * there's no outstanding PCI any more, so we
* have to request a PCI to be sure the the PCI * have to request a PCI to be sure the PCI
* will wake at some time in the future then we * will wake at some time in the future then we
* can flush packed buffers that might still be * can flush packed buffers that might still be
* hanging around, which can happen if no * hanging around, which can happen if no

View File

@ -1138,10 +1138,14 @@ static void bcm2835_spi_handle_err(struct spi_controller *ctlr,
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
/* if an error occurred and we have an active dma, then terminate */ /* if an error occurred and we have an active dma, then terminate */
dmaengine_terminate_sync(ctlr->dma_tx); if (ctlr->dma_tx) {
bs->tx_dma_active = false; dmaengine_terminate_sync(ctlr->dma_tx);
dmaengine_terminate_sync(ctlr->dma_rx); bs->tx_dma_active = false;
bs->rx_dma_active = false; }
if (ctlr->dma_rx) {
dmaengine_terminate_sync(ctlr->dma_rx);
bs->rx_dma_active = false;
}
bcm2835_spi_undo_prologue(bs); bcm2835_spi_undo_prologue(bs);
/* and reset */ /* and reset */

View File

@ -69,7 +69,7 @@
#define CDNS_SPI_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift in CR */ #define CDNS_SPI_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift in CR */
#define CDNS_SPI_SS_SHIFT 10 /* Slave Select field shift in CR */ #define CDNS_SPI_SS_SHIFT 10 /* Slave Select field shift in CR */
#define CDNS_SPI_SS0 0x1 /* Slave Select zero */ #define CDNS_SPI_SS0 0x1 /* Slave Select zero */
#define CDNS_SPI_NOSS 0x3C /* No Slave select */ #define CDNS_SPI_NOSS 0xF /* No Slave select */
/* /*
* SPI Interrupt Registers bit Masks * SPI Interrupt Registers bit Masks

View File

@ -613,6 +613,10 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
rspi->dma_callbacked, HZ); rspi->dma_callbacked, HZ);
if (ret > 0 && rspi->dma_callbacked) { if (ret > 0 && rspi->dma_callbacked) {
ret = 0; ret = 0;
if (tx)
dmaengine_synchronize(rspi->ctlr->dma_tx);
if (rx)
dmaengine_synchronize(rspi->ctlr->dma_rx);
} else { } else {
if (!ret) { if (!ret) {
dev_err(&rspi->ctlr->dev, "DMA timeout\n"); dev_err(&rspi->ctlr->dev, "DMA timeout\n");

View File

@ -632,16 +632,19 @@ static int __init sev_guest_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct snp_guest_dev *snp_dev; struct snp_guest_dev *snp_dev;
struct miscdevice *misc; struct miscdevice *misc;
void __iomem *mapping;
int ret; int ret;
if (!dev->platform_data) if (!dev->platform_data)
return -ENODEV; return -ENODEV;
data = (struct sev_guest_platform_data *)dev->platform_data; data = (struct sev_guest_platform_data *)dev->platform_data;
layout = (__force void *)ioremap_encrypted(data->secrets_gpa, PAGE_SIZE); mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
if (!layout) if (!mapping)
return -ENODEV; return -ENODEV;
layout = (__force void *)mapping;
ret = -ENOMEM; ret = -ENOMEM;
snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL); snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL);
if (!snp_dev) if (!snp_dev)
@ -706,7 +709,7 @@ e_free_response:
e_free_request: e_free_request:
free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg)); free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
e_unmap: e_unmap:
iounmap(layout); iounmap(mapping);
return ret; return ret;
} }

View File

@ -1737,6 +1737,14 @@ static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
(req->flags & REQ_F_PARTIAL_IO)) (req->flags & REQ_F_PARTIAL_IO))
return; return;
/*
* READV uses fields in `struct io_rw` (len/addr) to stash the selected
* buffer data. However if that buffer is recycled the original request
* data stored in addr is lost. Therefore forbid recycling for now.
*/
if (req->opcode == IORING_OP_READV)
return;
/* /*
* We don't need to recycle for REQ_F_BUFFER_RING, we can just clear * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
* the flag and hence ensure that bl->head doesn't get incremented. * the flag and hence ensure that bl->head doesn't get incremented.
@ -12931,7 +12939,7 @@ static int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
{ {
struct io_uring_buf_ring *br; struct io_uring_buf_ring *br;
struct io_uring_buf_reg reg; struct io_uring_buf_reg reg;
struct io_buffer_list *bl; struct io_buffer_list *bl, *free_bl = NULL;
struct page **pages; struct page **pages;
int nr_pages; int nr_pages;
@ -12963,7 +12971,7 @@ static int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
if (bl->buf_nr_pages || !list_empty(&bl->buf_list)) if (bl->buf_nr_pages || !list_empty(&bl->buf_list))
return -EEXIST; return -EEXIST;
} else { } else {
bl = kzalloc(sizeof(*bl), GFP_KERNEL); free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
if (!bl) if (!bl)
return -ENOMEM; return -ENOMEM;
} }
@ -12972,7 +12980,7 @@ static int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
struct_size(br, bufs, reg.ring_entries), struct_size(br, bufs, reg.ring_entries),
&nr_pages); &nr_pages);
if (IS_ERR(pages)) { if (IS_ERR(pages)) {
kfree(bl); kfree(free_bl);
return PTR_ERR(pages); return PTR_ERR(pages);
} }

View File

@ -592,8 +592,12 @@ static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name,
a = (ATTR_RECORD*)((u8*)ctx->attr + a = (ATTR_RECORD*)((u8*)ctx->attr +
le32_to_cpu(ctx->attr->length)); le32_to_cpu(ctx->attr->length));
for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) { for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec + u8 *mrec_end = (u8 *)ctx->mrec +
le32_to_cpu(ctx->mrec->bytes_allocated)) le32_to_cpu(ctx->mrec->bytes_allocated);
u8 *name_end = (u8 *)a + le16_to_cpu(a->name_offset) +
a->name_length * sizeof(ntfschar);
if ((u8*)a < (u8*)ctx->mrec || (u8*)a > mrec_end ||
name_end > mrec_end)
break; break;
ctx->attr = a; ctx->attr = a;
if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) || if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||

View File

@ -277,7 +277,6 @@ enum ocfs2_mount_options
OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT = 1 << 15, /* Journal Async Commit */ OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT = 1 << 15, /* Journal Async Commit */
OCFS2_MOUNT_ERRORS_CONT = 1 << 16, /* Return EIO to the calling process on error */ OCFS2_MOUNT_ERRORS_CONT = 1 << 16, /* Return EIO to the calling process on error */
OCFS2_MOUNT_ERRORS_ROFS = 1 << 17, /* Change filesystem to read-only on error */ OCFS2_MOUNT_ERRORS_ROFS = 1 << 17, /* Change filesystem to read-only on error */
OCFS2_MOUNT_NOCLUSTER = 1 << 18, /* No cluster aware filesystem mount */
}; };
#define OCFS2_OSB_SOFT_RO 0x0001 #define OCFS2_OSB_SOFT_RO 0x0001
@ -673,8 +672,7 @@ static inline int ocfs2_cluster_o2cb_global_heartbeat(struct ocfs2_super *osb)
static inline int ocfs2_mount_local(struct ocfs2_super *osb) static inline int ocfs2_mount_local(struct ocfs2_super *osb)
{ {
return ((osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT) return (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT);
|| (osb->s_mount_opt & OCFS2_MOUNT_NOCLUSTER));
} }
static inline int ocfs2_uses_extended_slot_map(struct ocfs2_super *osb) static inline int ocfs2_uses_extended_slot_map(struct ocfs2_super *osb)

View File

@ -252,16 +252,14 @@ static int __ocfs2_find_empty_slot(struct ocfs2_slot_info *si,
int i, ret = -ENOSPC; int i, ret = -ENOSPC;
if ((preferred >= 0) && (preferred < si->si_num_slots)) { if ((preferred >= 0) && (preferred < si->si_num_slots)) {
if (!si->si_slots[preferred].sl_valid || if (!si->si_slots[preferred].sl_valid) {
!si->si_slots[preferred].sl_node_num) {
ret = preferred; ret = preferred;
goto out; goto out;
} }
} }
for(i = 0; i < si->si_num_slots; i++) { for(i = 0; i < si->si_num_slots; i++) {
if (!si->si_slots[i].sl_valid || if (!si->si_slots[i].sl_valid) {
!si->si_slots[i].sl_node_num) {
ret = i; ret = i;
break; break;
} }
@ -456,30 +454,24 @@ int ocfs2_find_slot(struct ocfs2_super *osb)
spin_lock(&osb->osb_lock); spin_lock(&osb->osb_lock);
ocfs2_update_slot_info(si); ocfs2_update_slot_info(si);
if (ocfs2_mount_local(osb)) /* search for ourselves first and take the slot if it already
/* use slot 0 directly in local mode */ * exists. Perhaps we need to mark this in a variable for our
slot = 0; * own journal recovery? Possibly not, though we certainly
else { * need to warn to the user */
/* search for ourselves first and take the slot if it already slot = __ocfs2_node_num_to_slot(si, osb->node_num);
* exists. Perhaps we need to mark this in a variable for our if (slot < 0) {
* own journal recovery? Possibly not, though we certainly /* if no slot yet, then just take 1st available
* need to warn to the user */ * one. */
slot = __ocfs2_node_num_to_slot(si, osb->node_num); slot = __ocfs2_find_empty_slot(si, osb->preferred_slot);
if (slot < 0) { if (slot < 0) {
/* if no slot yet, then just take 1st available spin_unlock(&osb->osb_lock);
* one. */ mlog(ML_ERROR, "no free slots available!\n");
slot = __ocfs2_find_empty_slot(si, osb->preferred_slot); status = -EINVAL;
if (slot < 0) { goto bail;
spin_unlock(&osb->osb_lock); }
mlog(ML_ERROR, "no free slots available!\n"); } else
status = -EINVAL; printk(KERN_INFO "ocfs2: Slot %d on device (%s) was already "
goto bail; "allocated to this node!\n", slot, osb->dev_str);
}
} else
printk(KERN_INFO "ocfs2: Slot %d on device (%s) was "
"already allocated to this node!\n",
slot, osb->dev_str);
}
ocfs2_set_slot(si, slot, osb->node_num); ocfs2_set_slot(si, slot, osb->node_num);
osb->slot_num = slot; osb->slot_num = slot;

View File

@ -172,7 +172,6 @@ enum {
Opt_dir_resv_level, Opt_dir_resv_level,
Opt_journal_async_commit, Opt_journal_async_commit,
Opt_err_cont, Opt_err_cont,
Opt_nocluster,
Opt_err, Opt_err,
}; };
@ -206,7 +205,6 @@ static const match_table_t tokens = {
{Opt_dir_resv_level, "dir_resv_level=%u"}, {Opt_dir_resv_level, "dir_resv_level=%u"},
{Opt_journal_async_commit, "journal_async_commit"}, {Opt_journal_async_commit, "journal_async_commit"},
{Opt_err_cont, "errors=continue"}, {Opt_err_cont, "errors=continue"},
{Opt_nocluster, "nocluster"},
{Opt_err, NULL} {Opt_err, NULL}
}; };
@ -618,13 +616,6 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
goto out; goto out;
} }
tmp = OCFS2_MOUNT_NOCLUSTER;
if ((osb->s_mount_opt & tmp) != (parsed_options.mount_opt & tmp)) {
ret = -EINVAL;
mlog(ML_ERROR, "Cannot change nocluster option on remount\n");
goto out;
}
tmp = OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL | tmp = OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL |
OCFS2_MOUNT_HB_NONE; OCFS2_MOUNT_HB_NONE;
if ((osb->s_mount_opt & tmp) != (parsed_options.mount_opt & tmp)) { if ((osb->s_mount_opt & tmp) != (parsed_options.mount_opt & tmp)) {
@ -865,7 +856,6 @@ static int ocfs2_verify_userspace_stack(struct ocfs2_super *osb,
} }
if (ocfs2_userspace_stack(osb) && if (ocfs2_userspace_stack(osb) &&
!(osb->s_mount_opt & OCFS2_MOUNT_NOCLUSTER) &&
strncmp(osb->osb_cluster_stack, mopt->cluster_stack, strncmp(osb->osb_cluster_stack, mopt->cluster_stack,
OCFS2_STACK_LABEL_LEN)) { OCFS2_STACK_LABEL_LEN)) {
mlog(ML_ERROR, mlog(ML_ERROR,
@ -1137,11 +1127,6 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK ? "writeback" : osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK ? "writeback" :
"ordered"); "ordered");
if ((osb->s_mount_opt & OCFS2_MOUNT_NOCLUSTER) &&
!(osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT))
printk(KERN_NOTICE "ocfs2: The shared device (%s) is mounted "
"without cluster aware mode.\n", osb->dev_str);
atomic_set(&osb->vol_state, VOLUME_MOUNTED); atomic_set(&osb->vol_state, VOLUME_MOUNTED);
wake_up(&osb->osb_mount_event); wake_up(&osb->osb_mount_event);
@ -1452,9 +1437,6 @@ static int ocfs2_parse_options(struct super_block *sb,
case Opt_journal_async_commit: case Opt_journal_async_commit:
mopt->mount_opt |= OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT; mopt->mount_opt |= OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT;
break; break;
case Opt_nocluster:
mopt->mount_opt |= OCFS2_MOUNT_NOCLUSTER;
break;
default: default:
mlog(ML_ERROR, mlog(ML_ERROR,
"Unrecognized mount option \"%s\" " "Unrecognized mount option \"%s\" "
@ -1566,9 +1548,6 @@ static int ocfs2_show_options(struct seq_file *s, struct dentry *root)
if (opts & OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT) if (opts & OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT)
seq_printf(s, ",journal_async_commit"); seq_printf(s, ",journal_async_commit");
if (opts & OCFS2_MOUNT_NOCLUSTER)
seq_printf(s, ",nocluster");
return 0; return 0;
} }

View File

@ -1263,6 +1263,9 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
count, fl); count, fl);
file_end_write(out.file); file_end_write(out.file);
} else { } else {
if (out.file->f_flags & O_NONBLOCK)
fl |= SPLICE_F_NONBLOCK;
retval = splice_file_to_pipe(in.file, opipe, &pos, count, fl); retval = splice_file_to_pipe(in.file, opipe, &pos, count, fl);
} }

View File

@ -192,17 +192,19 @@ static inline void msg_init(struct uffd_msg *msg)
} }
static inline struct uffd_msg userfault_msg(unsigned long address, static inline struct uffd_msg userfault_msg(unsigned long address,
unsigned long real_address,
unsigned int flags, unsigned int flags,
unsigned long reason, unsigned long reason,
unsigned int features) unsigned int features)
{ {
struct uffd_msg msg; struct uffd_msg msg;
msg_init(&msg); msg_init(&msg);
msg.event = UFFD_EVENT_PAGEFAULT; msg.event = UFFD_EVENT_PAGEFAULT;
if (!(features & UFFD_FEATURE_EXACT_ADDRESS)) msg.arg.pagefault.address = (features & UFFD_FEATURE_EXACT_ADDRESS) ?
address &= PAGE_MASK; real_address : address;
msg.arg.pagefault.address = address;
/* /*
* These flags indicate why the userfault occurred: * These flags indicate why the userfault occurred:
* - UFFD_PAGEFAULT_FLAG_WP indicates a write protect fault. * - UFFD_PAGEFAULT_FLAG_WP indicates a write protect fault.
@ -488,8 +490,8 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function); init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
uwq.wq.private = current; uwq.wq.private = current;
uwq.msg = userfault_msg(vmf->real_address, vmf->flags, reason, uwq.msg = userfault_msg(vmf->address, vmf->real_address, vmf->flags,
ctx->features); reason, ctx->features);
uwq.ctx = ctx; uwq.ctx = ctx;
uwq.waken = false; uwq.waken = false;

View File

@ -1125,9 +1125,7 @@ static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
} }
#endif #endif
#ifndef CONFIG_GENERIC_DEVMEM_IS_ALLOWED
extern int devmem_is_allowed(unsigned long pfn); extern int devmem_is_allowed(unsigned long pfn);
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */

Some files were not shown because too many files have changed in this diff Show More