Merge 5.10-rc7 into char-misc-next
We want the fixes in here, and this resolves a merge issue with drivers/misc/habanalabs/common/memory.c. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
a3ab07c642
2
.mailmap
2
.mailmap
|
@ -322,6 +322,8 @@ TripleX Chung <xxx.phy@gmail.com> <zhongyu@18mail.cn>
|
|||
Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
|
||||
Tycho Andersen <tycho@tycho.pizza> <tycho@tycho.ws>
|
||||
Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
|
||||
Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
|
||||
Uwe Kleine-König <ukleinek@strlen.de>
|
||||
Uwe Kleine-König <ukl@pengutronix.de>
|
||||
Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
|
||||
Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
|
||||
|
|
5
CREDITS
5
CREDITS
|
@ -740,6 +740,11 @@ S: (ask for current address)
|
|||
S: Portland, Oregon
|
||||
S: USA
|
||||
|
||||
N: Jason Cooper
|
||||
D: ARM/Marvell SOC co-maintainer
|
||||
D: irqchip co-maintainer
|
||||
D: MVEBU PCI DRIVER co-maintainer
|
||||
|
||||
N: Robin Cornelius
|
||||
E: robincornelius@users.sourceforge.net
|
||||
D: Ralink rt2x00 WLAN driver
|
||||
|
|
|
@ -137,15 +137,24 @@ Boot Kernel With a Boot Config
|
|||
==============================
|
||||
|
||||
Since the boot configuration file is loaded with initrd, it will be added
|
||||
to the end of the initrd (initramfs) image file with size, checksum and
|
||||
12-byte magic word as below.
|
||||
to the end of the initrd (initramfs) image file with padding, size,
|
||||
checksum and 12-byte magic word as below.
|
||||
|
||||
[initrd][bootconfig][size(u32)][checksum(u32)][#BOOTCONFIG\n]
|
||||
[initrd][bootconfig][padding][size(le32)][checksum(le32)][#BOOTCONFIG\n]
|
||||
|
||||
The size and checksum fields are unsigned 32bit little endian value.
|
||||
|
||||
When the boot configuration is added to the initrd image, the total
|
||||
file size is aligned to 4 bytes. To fill the gap, null characters
|
||||
(``\0``) will be added. Thus the ``size`` is the length of the bootconfig
|
||||
file + padding bytes.
|
||||
|
||||
The Linux kernel decodes the last part of the initrd image in memory to
|
||||
get the boot configuration data.
|
||||
Because of this "piggyback" method, there is no need to change or
|
||||
update the boot loader and the kernel image itself.
|
||||
update the boot loader and the kernel image itself as long as the boot
|
||||
loader passes the correct initrd file size. If by any chance, the boot
|
||||
loader passes a longer size, the kernel feils to find the bootconfig data.
|
||||
|
||||
To do this operation, Linux kernel provides "bootconfig" command under
|
||||
tools/bootconfig, which allows admin to apply or delete the config file
|
||||
|
@ -176,7 +185,8 @@ up to 512 key-value pairs. If keys contains 3 words in average, it can
|
|||
contain 256 key-value pairs. In most cases, the number of config items
|
||||
will be under 100 entries and smaller than 8KB, so it would be enough.
|
||||
If the node number exceeds 1024, parser returns an error even if the file
|
||||
size is smaller than 32KB.
|
||||
size is smaller than 32KB. (Note that this maximum size is not including
|
||||
the padding null characters.)
|
||||
Anyway, since bootconfig command verifies it when appending a boot config
|
||||
to initrd image, user can notice it before boot.
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ tcan4x5x: tcan4x5x@0 {
|
|||
spi-max-frequency = <10000000>;
|
||||
bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>;
|
||||
interrupt-parent = <&gpio1>;
|
||||
interrupts = <14 GPIO_ACTIVE_LOW>;
|
||||
interrupts = <14 IRQ_TYPE_LEVEL_LOW>;
|
||||
device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
|
||||
device-wake-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
|
||||
reset-gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>;
|
||||
|
|
|
@ -25,7 +25,7 @@ Example (for ARM-based BeagleBone with NPC100 NFC controller on I2C2):
|
|||
clock-frequency = <100000>;
|
||||
|
||||
interrupt-parent = <&gpio1>;
|
||||
interrupts = <29 GPIO_ACTIVE_HIGH>;
|
||||
interrupts = <29 IRQ_TYPE_LEVEL_HIGH>;
|
||||
|
||||
enable-gpios = <&gpio0 30 GPIO_ACTIVE_HIGH>;
|
||||
firmware-gpios = <&gpio0 31 GPIO_ACTIVE_HIGH>;
|
||||
|
|
|
@ -25,7 +25,7 @@ Example (for ARM-based BeagleBone with PN544 on I2C2):
|
|||
clock-frequency = <400000>;
|
||||
|
||||
interrupt-parent = <&gpio1>;
|
||||
interrupts = <17 GPIO_ACTIVE_HIGH>;
|
||||
interrupts = <17 IRQ_TYPE_LEVEL_HIGH>;
|
||||
|
||||
enable-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
|
||||
firmware-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
|
||||
|
|
31
MAINTAINERS
31
MAINTAINERS
|
@ -2014,7 +2014,6 @@ M: Philipp Zabel <philipp.zabel@gmail.com>
|
|||
S: Maintained
|
||||
|
||||
ARM/Marvell Dove/MV78xx0/Orion SOC support
|
||||
M: Jason Cooper <jason@lakedaemon.net>
|
||||
M: Andrew Lunn <andrew@lunn.ch>
|
||||
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
|
||||
M: Gregory Clement <gregory.clement@bootlin.com>
|
||||
|
@ -2031,7 +2030,6 @@ F: arch/arm/plat-orion/
|
|||
F: drivers/soc/dove/
|
||||
|
||||
ARM/Marvell Kirkwood and Armada 370, 375, 38x, 39x, XP, 3700, 7K/8K, CN9130 SOC support
|
||||
M: Jason Cooper <jason@lakedaemon.net>
|
||||
M: Andrew Lunn <andrew@lunn.ch>
|
||||
M: Gregory Clement <gregory.clement@bootlin.com>
|
||||
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
|
||||
|
@ -3357,6 +3355,17 @@ S: Supported
|
|||
F: arch/x86/net/
|
||||
X: arch/x86/net/bpf_jit_comp32.c
|
||||
|
||||
BPF LSM (Security Audit and Enforcement using BPF)
|
||||
M: KP Singh <kpsingh@chromium.org>
|
||||
R: Florent Revest <revest@chromium.org>
|
||||
R: Brendan Jackman <jackmanb@chromium.org>
|
||||
L: bpf@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/bpf/bpf_lsm.rst
|
||||
F: include/linux/bpf_lsm.h
|
||||
F: kernel/bpf/bpf_lsm.c
|
||||
F: security/bpf/
|
||||
|
||||
BROADCOM B44 10/100 ETHERNET DRIVER
|
||||
M: Michael Chan <michael.chan@broadcom.com>
|
||||
L: netdev@vger.kernel.org
|
||||
|
@ -4276,6 +4285,7 @@ B: https://github.com/ClangBuiltLinux/linux/issues
|
|||
C: irc://chat.freenode.net/clangbuiltlinux
|
||||
F: Documentation/kbuild/llvm.rst
|
||||
F: scripts/clang-tools/
|
||||
F: scripts/lld-version.sh
|
||||
K: \b(?i:clang|llvm)\b
|
||||
|
||||
CLEANCACHE API
|
||||
|
@ -9069,10 +9079,7 @@ S: Supported
|
|||
F: drivers/net/wireless/intel/iwlegacy/
|
||||
|
||||
INTEL WIRELESS WIFI LINK (iwlwifi)
|
||||
M: Johannes Berg <johannes.berg@intel.com>
|
||||
M: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
|
||||
M: Luca Coelho <luciano.coelho@intel.com>
|
||||
M: Intel Linux Wireless <linuxwifi@intel.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Supported
|
||||
W: https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi
|
||||
|
@ -9248,7 +9255,6 @@ F: kernel/irq/
|
|||
|
||||
IRQCHIP DRIVERS
|
||||
M: Thomas Gleixner <tglx@linutronix.de>
|
||||
M: Jason Cooper <jason@lakedaemon.net>
|
||||
M: Marc Zyngier <maz@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
|
@ -13394,7 +13400,6 @@ F: drivers/pci/controller/mobiveil/pcie-mobiveil*
|
|||
|
||||
PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
|
||||
M: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
|
||||
M: Jason Cooper <jason@lakedaemon.net>
|
||||
L: linux-pci@vger.kernel.org
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
|
@ -19122,12 +19127,17 @@ L: netdev@vger.kernel.org
|
|||
L: bpf@vger.kernel.org
|
||||
S: Supported
|
||||
F: include/net/xdp.h
|
||||
F: include/net/xdp_priv.h
|
||||
F: include/trace/events/xdp.h
|
||||
F: kernel/bpf/cpumap.c
|
||||
F: kernel/bpf/devmap.c
|
||||
F: net/core/xdp.c
|
||||
N: xdp
|
||||
K: xdp
|
||||
F: samples/bpf/xdp*
|
||||
F: tools/testing/selftests/bpf/*xdp*
|
||||
F: tools/testing/selftests/bpf/*/*xdp*
|
||||
F: drivers/net/ethernet/*/*/*/*/*xdp*
|
||||
F: drivers/net/ethernet/*/*/*xdp*
|
||||
K: (?:\b|_)xdp(?:\b|_)
|
||||
|
||||
XDP SOCKETS (AF_XDP)
|
||||
M: Björn Töpel <bjorn.topel@intel.com>
|
||||
|
@ -19136,9 +19146,12 @@ R: Jonathan Lemon <jonathan.lemon@gmail.com>
|
|||
L: netdev@vger.kernel.org
|
||||
L: bpf@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/networking/af_xdp.rst
|
||||
F: include/net/xdp_sock*
|
||||
F: include/net/xsk_buff_pool.h
|
||||
F: include/uapi/linux/if_xdp.h
|
||||
F: include/uapi/linux/xdp_diag.h
|
||||
F: include/net/netns/xdp.h
|
||||
F: net/xdp/
|
||||
F: samples/bpf/xdpsock*
|
||||
F: tools/lib/bpf/xsk*
|
||||
|
|
12
Makefile
12
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -826,7 +826,9 @@ else
|
|||
DEBUG_CFLAGS += -g
|
||||
endif
|
||||
|
||||
ifneq ($(LLVM_IAS),1)
|
||||
KBUILD_AFLAGS += -Wa,-gdwarf-2
|
||||
endif
|
||||
|
||||
ifdef CONFIG_DEBUG_INFO_DWARF4
|
||||
DEBUG_CFLAGS += -gdwarf-4
|
||||
|
@ -944,7 +946,7 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types)
|
|||
KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init)
|
||||
|
||||
# change __FILE__ to the relative path from the srctree
|
||||
KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
|
||||
KBUILD_CPPFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
|
||||
|
||||
# ensure -fcf-protection is disabled when using retpoline as it is
|
||||
# incompatible with -mindirect-branch=thunk-extern
|
||||
|
@ -982,6 +984,12 @@ ifeq ($(CONFIG_RELR),y)
|
|||
LDFLAGS_vmlinux += --pack-dyn-relocs=relr
|
||||
endif
|
||||
|
||||
# We never want expected sections to be placed heuristically by the
|
||||
# linker. All sections should be explicitly named in the linker script.
|
||||
ifdef CONFIG_LD_ORPHAN_WARN
|
||||
LDFLAGS_vmlinux += --orphan-handling=warn
|
||||
endif
|
||||
|
||||
# Align the bit size of userspace programs with the kernel
|
||||
KBUILD_USERCFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
|
||||
KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
|
||||
|
|
|
@ -1028,6 +1028,15 @@ config HAVE_STATIC_CALL_INLINE
|
|||
bool
|
||||
depends on HAVE_STATIC_CALL
|
||||
|
||||
config ARCH_WANT_LD_ORPHAN_WARN
|
||||
bool
|
||||
help
|
||||
An arch should select this symbol once all linker sections are explicitly
|
||||
included, size-asserted, or discarded in the linker scripts. This is
|
||||
important because we never want expected sections to be placed heuristically
|
||||
by the linker, since the locations of such sections can change between linker
|
||||
versions.
|
||||
|
||||
source "kernel/gcov/Kconfig"
|
||||
|
||||
source "scripts/gcc-plugins/Kconfig"
|
||||
|
|
|
@ -35,6 +35,7 @@ config ARM
|
|||
select ARCH_USE_CMPXCHG_LOCKREF
|
||||
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
|
||||
select ARCH_WANT_IPC_PARSE_VERSION
|
||||
select ARCH_WANT_LD_ORPHAN_WARN
|
||||
select BINFMT_FLAT_ARGVP_ENVP_ON_STACK
|
||||
select BUILDTIME_TABLE_SORT if MMU
|
||||
select CLONE_BACKWARDS
|
||||
|
|
|
@ -16,10 +16,6 @@ LDFLAGS_vmlinux += --be8
|
|||
KBUILD_LDFLAGS_MODULE += --be8
|
||||
endif
|
||||
|
||||
# We never want expected sections to be placed heuristically by the
|
||||
# linker. All sections should be explicitly named in the linker script.
|
||||
LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
|
||||
|
||||
GZFLAGS :=-9
|
||||
#KBUILD_CFLAGS +=-pipe
|
||||
|
||||
|
|
|
@ -129,7 +129,9 @@ LDFLAGS_vmlinux += --no-undefined
|
|||
# Delete all temporary local symbols
|
||||
LDFLAGS_vmlinux += -X
|
||||
# Report orphan sections
|
||||
LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
|
||||
ifdef CONFIG_LD_ORPHAN_WARN
|
||||
LDFLAGS_vmlinux += --orphan-handling=warn
|
||||
endif
|
||||
# Next argument is a linker script
|
||||
LDFLAGS_vmlinux += -T
|
||||
|
||||
|
|
|
@ -81,7 +81,6 @@ CONFIG_PARTITION_ADVANCED=y
|
|||
CONFIG_BINFMT_MISC=y
|
||||
CONFIG_CMA=y
|
||||
CONFIG_ZSMALLOC=m
|
||||
CONFIG_ZSMALLOC_PGTABLE_MAPPING=y
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
CONFIG_UNIX=y
|
||||
|
|
|
@ -288,7 +288,7 @@ static struct gpiod_lookup_table osk_usb_gpio_table = {
|
|||
.dev_id = "ohci",
|
||||
.table = {
|
||||
/* Power GPIO on the I2C-attached TPS65010 */
|
||||
GPIO_LOOKUP("i2c-tps65010", 1, "power", GPIO_ACTIVE_HIGH),
|
||||
GPIO_LOOKUP("tps65010", 0, "power", GPIO_ACTIVE_HIGH),
|
||||
GPIO_LOOKUP(OMAP_GPIO_LABEL, 9, "overcurrent",
|
||||
GPIO_ACTIVE_HIGH),
|
||||
},
|
||||
|
|
|
@ -81,6 +81,7 @@ config ARM64
|
|||
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
|
||||
select ARCH_WANT_FRAME_POINTERS
|
||||
select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
|
||||
select ARCH_WANT_LD_ORPHAN_WARN
|
||||
select ARCH_HAS_UBSAN_SANITIZE_ALL
|
||||
select ARM_AMBA
|
||||
select ARM_ARCH_TIMER
|
||||
|
|
|
@ -28,10 +28,6 @@ LDFLAGS_vmlinux += --fix-cortex-a53-843419
|
|||
endif
|
||||
endif
|
||||
|
||||
# We never want expected sections to be placed heuristically by the
|
||||
# linker. All sections should be explicitly named in the linker script.
|
||||
LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
|
||||
|
||||
ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS), y)
|
||||
ifneq ($(CONFIG_ARM64_LSE_ATOMICS), y)
|
||||
$(warning LSE atomics not supported by binutils)
|
||||
|
|
|
@ -128,6 +128,9 @@ static inline void local_daif_inherit(struct pt_regs *regs)
|
|||
{
|
||||
unsigned long flags = regs->pstate & DAIF_MASK;
|
||||
|
||||
if (interrupts_enabled(regs))
|
||||
trace_hardirqs_on();
|
||||
|
||||
/*
|
||||
* We can't use local_daif_restore(regs->pstate) here as
|
||||
* system_has_prio_mask_debugging() won't restore the I bit if it can
|
||||
|
|
|
@ -31,7 +31,12 @@ static inline u32 disr_to_esr(u64 disr)
|
|||
return esr;
|
||||
}
|
||||
|
||||
asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs);
|
||||
asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs);
|
||||
asmlinkage void enter_from_user_mode(void);
|
||||
asmlinkage void exit_to_user_mode(void);
|
||||
void arm64_enter_nmi(struct pt_regs *regs);
|
||||
void arm64_exit_nmi(struct pt_regs *regs);
|
||||
void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
|
||||
void do_undefinstr(struct pt_regs *regs);
|
||||
void do_bti(struct pt_regs *regs);
|
||||
|
|
|
@ -193,6 +193,10 @@ struct pt_regs {
|
|||
/* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */
|
||||
u64 pmr_save;
|
||||
u64 stackframe[2];
|
||||
|
||||
/* Only valid for some EL1 exceptions. */
|
||||
u64 lockdep_hardirqs;
|
||||
u64 exit_rcu;
|
||||
};
|
||||
|
||||
static inline bool in_syscall(struct pt_regs const *regs)
|
||||
|
|
|
@ -987,7 +987,7 @@
|
|||
#define SYS_TFSR_EL1_TF0_SHIFT 0
|
||||
#define SYS_TFSR_EL1_TF1_SHIFT 1
|
||||
#define SYS_TFSR_EL1_TF0 (UL(1) << SYS_TFSR_EL1_TF0_SHIFT)
|
||||
#define SYS_TFSR_EL1_TF1 (UK(2) << SYS_TFSR_EL1_TF1_SHIFT)
|
||||
#define SYS_TFSR_EL1_TF1 (UL(1) << SYS_TFSR_EL1_TF1_SHIFT)
|
||||
|
||||
/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */
|
||||
#define SYS_MPIDR_SAFE_VAL (BIT(31))
|
||||
|
|
|
@ -17,40 +17,164 @@
|
|||
#include <asm/mmu.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
static void notrace el1_abort(struct pt_regs *regs, unsigned long esr)
|
||||
/*
|
||||
* This is intended to match the logic in irqentry_enter(), handling the kernel
|
||||
* mode transitions only.
|
||||
*/
|
||||
static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
|
||||
{
|
||||
regs->exit_rcu = false;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
|
||||
lockdep_hardirqs_off(CALLER_ADDR0);
|
||||
rcu_irq_enter();
|
||||
trace_hardirqs_off_finish();
|
||||
|
||||
regs->exit_rcu = true;
|
||||
return;
|
||||
}
|
||||
|
||||
lockdep_hardirqs_off(CALLER_ADDR0);
|
||||
rcu_irq_enter_check_tick();
|
||||
trace_hardirqs_off_finish();
|
||||
}
|
||||
|
||||
/*
|
||||
* This is intended to match the logic in irqentry_exit(), handling the kernel
|
||||
* mode transitions only, and with preemption handled elsewhere.
|
||||
*/
|
||||
static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
|
||||
{
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
if (interrupts_enabled(regs)) {
|
||||
if (regs->exit_rcu) {
|
||||
trace_hardirqs_on_prepare();
|
||||
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
|
||||
rcu_irq_exit();
|
||||
lockdep_hardirqs_on(CALLER_ADDR0);
|
||||
return;
|
||||
}
|
||||
|
||||
trace_hardirqs_on();
|
||||
} else {
|
||||
if (regs->exit_rcu)
|
||||
rcu_irq_exit();
|
||||
}
|
||||
}
|
||||
|
||||
void noinstr arm64_enter_nmi(struct pt_regs *regs)
|
||||
{
|
||||
regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
|
||||
|
||||
__nmi_enter();
|
||||
lockdep_hardirqs_off(CALLER_ADDR0);
|
||||
lockdep_hardirq_enter();
|
||||
rcu_nmi_enter();
|
||||
|
||||
trace_hardirqs_off_finish();
|
||||
ftrace_nmi_enter();
|
||||
}
|
||||
|
||||
void noinstr arm64_exit_nmi(struct pt_regs *regs)
|
||||
{
|
||||
bool restore = regs->lockdep_hardirqs;
|
||||
|
||||
ftrace_nmi_exit();
|
||||
if (restore) {
|
||||
trace_hardirqs_on_prepare();
|
||||
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
|
||||
}
|
||||
|
||||
rcu_nmi_exit();
|
||||
lockdep_hardirq_exit();
|
||||
if (restore)
|
||||
lockdep_hardirqs_on(CALLER_ADDR0);
|
||||
__nmi_exit();
|
||||
}
|
||||
|
||||
asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
|
||||
arm64_enter_nmi(regs);
|
||||
else
|
||||
enter_from_kernel_mode(regs);
|
||||
}
|
||||
|
||||
asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
|
||||
arm64_exit_nmi(regs);
|
||||
else
|
||||
exit_to_kernel_mode(regs);
|
||||
}
|
||||
|
||||
static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
unsigned long far = read_sysreg(far_el1);
|
||||
|
||||
enter_from_kernel_mode(regs);
|
||||
local_daif_inherit(regs);
|
||||
far = untagged_addr(far);
|
||||
do_mem_abort(far, esr, regs);
|
||||
local_daif_mask();
|
||||
exit_to_kernel_mode(regs);
|
||||
}
|
||||
NOKPROBE_SYMBOL(el1_abort);
|
||||
|
||||
static void notrace el1_pc(struct pt_regs *regs, unsigned long esr)
|
||||
static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
unsigned long far = read_sysreg(far_el1);
|
||||
|
||||
enter_from_kernel_mode(regs);
|
||||
local_daif_inherit(regs);
|
||||
do_sp_pc_abort(far, esr, regs);
|
||||
local_daif_mask();
|
||||
exit_to_kernel_mode(regs);
|
||||
}
|
||||
NOKPROBE_SYMBOL(el1_pc);
|
||||
|
||||
static void notrace el1_undef(struct pt_regs *regs)
|
||||
static void noinstr el1_undef(struct pt_regs *regs)
|
||||
{
|
||||
enter_from_kernel_mode(regs);
|
||||
local_daif_inherit(regs);
|
||||
do_undefinstr(regs);
|
||||
local_daif_mask();
|
||||
exit_to_kernel_mode(regs);
|
||||
}
|
||||
NOKPROBE_SYMBOL(el1_undef);
|
||||
|
||||
static void notrace el1_inv(struct pt_regs *regs, unsigned long esr)
|
||||
static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
enter_from_kernel_mode(regs);
|
||||
local_daif_inherit(regs);
|
||||
bad_mode(regs, 0, esr);
|
||||
local_daif_mask();
|
||||
exit_to_kernel_mode(regs);
|
||||
}
|
||||
NOKPROBE_SYMBOL(el1_inv);
|
||||
|
||||
static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr)
|
||||
static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
|
||||
{
|
||||
regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
|
||||
|
||||
lockdep_hardirqs_off(CALLER_ADDR0);
|
||||
rcu_nmi_enter();
|
||||
|
||||
trace_hardirqs_off_finish();
|
||||
}
|
||||
|
||||
static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
|
||||
{
|
||||
bool restore = regs->lockdep_hardirqs;
|
||||
|
||||
if (restore) {
|
||||
trace_hardirqs_on_prepare();
|
||||
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
|
||||
}
|
||||
|
||||
rcu_nmi_exit();
|
||||
if (restore)
|
||||
lockdep_hardirqs_on(CALLER_ADDR0);
|
||||
}
|
||||
|
||||
static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
unsigned long far = read_sysreg(far_el1);
|
||||
|
||||
|
@ -62,18 +186,21 @@ static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr)
|
|||
if (system_uses_irq_prio_masking())
|
||||
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
|
||||
|
||||
arm64_enter_el1_dbg(regs);
|
||||
do_debug_exception(far, esr, regs);
|
||||
arm64_exit_el1_dbg(regs);
|
||||
}
|
||||
NOKPROBE_SYMBOL(el1_dbg);
|
||||
|
||||
static void notrace el1_fpac(struct pt_regs *regs, unsigned long esr)
|
||||
static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
enter_from_kernel_mode(regs);
|
||||
local_daif_inherit(regs);
|
||||
do_ptrauth_fault(regs, esr);
|
||||
local_daif_mask();
|
||||
exit_to_kernel_mode(regs);
|
||||
}
|
||||
NOKPROBE_SYMBOL(el1_fpac);
|
||||
|
||||
asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
|
||||
asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long esr = read_sysreg(esr_el1);
|
||||
|
||||
|
@ -106,20 +233,34 @@ asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
|
|||
el1_inv(regs, esr);
|
||||
}
|
||||
}
|
||||
NOKPROBE_SYMBOL(el1_sync_handler);
|
||||
|
||||
static void notrace el0_da(struct pt_regs *regs, unsigned long esr)
|
||||
asmlinkage void noinstr enter_from_user_mode(void)
|
||||
{
|
||||
lockdep_hardirqs_off(CALLER_ADDR0);
|
||||
CT_WARN_ON(ct_state() != CONTEXT_USER);
|
||||
user_exit_irqoff();
|
||||
trace_hardirqs_off_finish();
|
||||
}
|
||||
|
||||
asmlinkage void noinstr exit_to_user_mode(void)
|
||||
{
|
||||
trace_hardirqs_on_prepare();
|
||||
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
|
||||
user_enter_irqoff();
|
||||
lockdep_hardirqs_on(CALLER_ADDR0);
|
||||
}
|
||||
|
||||
static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
unsigned long far = read_sysreg(far_el1);
|
||||
|
||||
user_exit_irqoff();
|
||||
enter_from_user_mode();
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
far = untagged_addr(far);
|
||||
do_mem_abort(far, esr, regs);
|
||||
}
|
||||
NOKPROBE_SYMBOL(el0_da);
|
||||
|
||||
static void notrace el0_ia(struct pt_regs *regs, unsigned long esr)
|
||||
static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
unsigned long far = read_sysreg(far_el1);
|
||||
|
||||
|
@ -131,90 +272,80 @@ static void notrace el0_ia(struct pt_regs *regs, unsigned long esr)
|
|||
if (!is_ttbr0_addr(far))
|
||||
arm64_apply_bp_hardening();
|
||||
|
||||
user_exit_irqoff();
|
||||
enter_from_user_mode();
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
do_mem_abort(far, esr, regs);
|
||||
}
|
||||
NOKPROBE_SYMBOL(el0_ia);
|
||||
|
||||
static void notrace el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
|
||||
static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
user_exit_irqoff();
|
||||
enter_from_user_mode();
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
do_fpsimd_acc(esr, regs);
|
||||
}
|
||||
NOKPROBE_SYMBOL(el0_fpsimd_acc);
|
||||
|
||||
static void notrace el0_sve_acc(struct pt_regs *regs, unsigned long esr)
|
||||
static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
user_exit_irqoff();
|
||||
enter_from_user_mode();
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
do_sve_acc(esr, regs);
|
||||
}
|
||||
NOKPROBE_SYMBOL(el0_sve_acc);
|
||||
|
||||
static void notrace el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
|
||||
static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
user_exit_irqoff();
|
||||
enter_from_user_mode();
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
do_fpsimd_exc(esr, regs);
|
||||
}
|
||||
NOKPROBE_SYMBOL(el0_fpsimd_exc);
|
||||
|
||||
static void notrace el0_sys(struct pt_regs *regs, unsigned long esr)
|
||||
static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
user_exit_irqoff();
|
||||
enter_from_user_mode();
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
do_sysinstr(esr, regs);
|
||||
}
|
||||
NOKPROBE_SYMBOL(el0_sys);
|
||||
|
||||
static void notrace el0_pc(struct pt_regs *regs, unsigned long esr)
|
||||
static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
unsigned long far = read_sysreg(far_el1);
|
||||
|
||||
if (!is_ttbr0_addr(instruction_pointer(regs)))
|
||||
arm64_apply_bp_hardening();
|
||||
|
||||
user_exit_irqoff();
|
||||
enter_from_user_mode();
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
do_sp_pc_abort(far, esr, regs);
|
||||
}
|
||||
NOKPROBE_SYMBOL(el0_pc);
|
||||
|
||||
static void notrace el0_sp(struct pt_regs *regs, unsigned long esr)
|
||||
static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
user_exit_irqoff();
|
||||
enter_from_user_mode();
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
do_sp_pc_abort(regs->sp, esr, regs);
|
||||
}
|
||||
NOKPROBE_SYMBOL(el0_sp);
|
||||
|
||||
static void notrace el0_undef(struct pt_regs *regs)
|
||||
static void noinstr el0_undef(struct pt_regs *regs)
|
||||
{
|
||||
user_exit_irqoff();
|
||||
enter_from_user_mode();
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
do_undefinstr(regs);
|
||||
}
|
||||
NOKPROBE_SYMBOL(el0_undef);
|
||||
|
||||
static void notrace el0_bti(struct pt_regs *regs)
|
||||
static void noinstr el0_bti(struct pt_regs *regs)
|
||||
{
|
||||
user_exit_irqoff();
|
||||
enter_from_user_mode();
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
do_bti(regs);
|
||||
}
|
||||
NOKPROBE_SYMBOL(el0_bti);
|
||||
|
||||
static void notrace el0_inv(struct pt_regs *regs, unsigned long esr)
|
||||
static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
user_exit_irqoff();
|
||||
enter_from_user_mode();
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
bad_el0_sync(regs, 0, esr);
|
||||
}
|
||||
NOKPROBE_SYMBOL(el0_inv);
|
||||
|
||||
static void notrace el0_dbg(struct pt_regs *regs, unsigned long esr)
|
||||
static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
|
||||
unsigned long far = read_sysreg(far_el1);
|
||||
|
@ -222,30 +353,28 @@ static void notrace el0_dbg(struct pt_regs *regs, unsigned long esr)
|
|||
if (system_uses_irq_prio_masking())
|
||||
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
|
||||
|
||||
user_exit_irqoff();
|
||||
enter_from_user_mode();
|
||||
do_debug_exception(far, esr, regs);
|
||||
local_daif_restore(DAIF_PROCCTX_NOIRQ);
|
||||
}
|
||||
NOKPROBE_SYMBOL(el0_dbg);
|
||||
|
||||
static void notrace el0_svc(struct pt_regs *regs)
|
||||
static void noinstr el0_svc(struct pt_regs *regs)
|
||||
{
|
||||
if (system_uses_irq_prio_masking())
|
||||
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
|
||||
|
||||
enter_from_user_mode();
|
||||
do_el0_svc(regs);
|
||||
}
|
||||
NOKPROBE_SYMBOL(el0_svc);
|
||||
|
||||
static void notrace el0_fpac(struct pt_regs *regs, unsigned long esr)
|
||||
static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
user_exit_irqoff();
|
||||
enter_from_user_mode();
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
do_ptrauth_fault(regs, esr);
|
||||
}
|
||||
NOKPROBE_SYMBOL(el0_fpac);
|
||||
|
||||
asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
|
||||
asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long esr = read_sysreg(esr_el1);
|
||||
|
||||
|
@ -297,27 +426,25 @@ asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
|
|||
el0_inv(regs, esr);
|
||||
}
|
||||
}
|
||||
NOKPROBE_SYMBOL(el0_sync_handler);
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
static void notrace el0_cp15(struct pt_regs *regs, unsigned long esr)
|
||||
static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
user_exit_irqoff();
|
||||
enter_from_user_mode();
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
do_cp15instr(esr, regs);
|
||||
}
|
||||
NOKPROBE_SYMBOL(el0_cp15);
|
||||
|
||||
static void notrace el0_svc_compat(struct pt_regs *regs)
|
||||
static void noinstr el0_svc_compat(struct pt_regs *regs)
|
||||
{
|
||||
if (system_uses_irq_prio_masking())
|
||||
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
|
||||
|
||||
enter_from_user_mode();
|
||||
do_el0_svc_compat(regs);
|
||||
}
|
||||
NOKPROBE_SYMBOL(el0_svc_compat);
|
||||
|
||||
asmlinkage void notrace el0_sync_compat_handler(struct pt_regs *regs)
|
||||
asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long esr = read_sysreg(esr_el1);
|
||||
|
||||
|
@ -360,5 +487,4 @@ asmlinkage void notrace el0_sync_compat_handler(struct pt_regs *regs)
|
|||
el0_inv(regs, esr);
|
||||
}
|
||||
}
|
||||
NOKPROBE_SYMBOL(el0_sync_compat_handler);
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
|
|
@ -30,18 +30,18 @@
|
|||
#include <asm/unistd.h>
|
||||
|
||||
/*
|
||||
* Context tracking subsystem. Used to instrument transitions
|
||||
* between user and kernel mode.
|
||||
* Context tracking and irqflag tracing need to instrument transitions between
|
||||
* user and kernel mode.
|
||||
*/
|
||||
.macro ct_user_exit_irqoff
|
||||
#ifdef CONFIG_CONTEXT_TRACKING
|
||||
.macro user_exit_irqoff
|
||||
#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
|
||||
bl enter_from_user_mode
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro ct_user_enter
|
||||
#ifdef CONFIG_CONTEXT_TRACKING
|
||||
bl context_tracking_user_enter
|
||||
.macro user_enter_irqoff
|
||||
#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
|
||||
bl exit_to_user_mode
|
||||
#endif
|
||||
.endm
|
||||
|
||||
|
@ -298,9 +298,6 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
|
|||
alternative_else_nop_endif
|
||||
|
||||
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
|
||||
.if \el == 0
|
||||
ct_user_enter
|
||||
.endif
|
||||
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
alternative_if_not ARM64_HAS_PAN
|
||||
|
@ -637,16 +634,8 @@ SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
|
|||
gic_prio_irq_setup pmr=x20, tmp=x1
|
||||
enable_da_f
|
||||
|
||||
#ifdef CONFIG_ARM64_PSEUDO_NMI
|
||||
test_irqs_unmasked res=x0, pmr=x20
|
||||
cbz x0, 1f
|
||||
bl asm_nmi_enter
|
||||
1:
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
bl trace_hardirqs_off
|
||||
#endif
|
||||
mov x0, sp
|
||||
bl enter_el1_irq_or_nmi
|
||||
|
||||
irq_handler
|
||||
|
||||
|
@ -665,26 +654,8 @@ alternative_else_nop_endif
|
|||
1:
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM64_PSEUDO_NMI
|
||||
/*
|
||||
* When using IRQ priority masking, we can get spurious interrupts while
|
||||
* PMR is set to GIC_PRIO_IRQOFF. An NMI might also have occurred in a
|
||||
* section with interrupts disabled. Skip tracing in those cases.
|
||||
*/
|
||||
test_irqs_unmasked res=x0, pmr=x20
|
||||
cbz x0, 1f
|
||||
bl asm_nmi_exit
|
||||
1:
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
#ifdef CONFIG_ARM64_PSEUDO_NMI
|
||||
test_irqs_unmasked res=x0, pmr=x20
|
||||
cbnz x0, 1f
|
||||
#endif
|
||||
bl trace_hardirqs_on
|
||||
1:
|
||||
#endif
|
||||
mov x0, sp
|
||||
bl exit_el1_irq_or_nmi
|
||||
|
||||
kernel_exit 1
|
||||
SYM_CODE_END(el1_irq)
|
||||
|
@ -726,21 +697,14 @@ SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
|
|||
kernel_entry 0
|
||||
el0_irq_naked:
|
||||
gic_prio_irq_setup pmr=x20, tmp=x0
|
||||
ct_user_exit_irqoff
|
||||
user_exit_irqoff
|
||||
enable_da_f
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
bl trace_hardirqs_off
|
||||
#endif
|
||||
|
||||
tbz x22, #55, 1f
|
||||
bl do_el0_irq_bp_hardening
|
||||
1:
|
||||
irq_handler
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
bl trace_hardirqs_on
|
||||
#endif
|
||||
b ret_to_user
|
||||
SYM_CODE_END(el0_irq)
|
||||
|
||||
|
@ -759,7 +723,7 @@ SYM_CODE_START_LOCAL(el0_error)
|
|||
el0_error_naked:
|
||||
mrs x25, esr_el1
|
||||
gic_prio_kentry_setup tmp=x2
|
||||
ct_user_exit_irqoff
|
||||
user_exit_irqoff
|
||||
enable_dbg
|
||||
mov x0, sp
|
||||
mov x1, x25
|
||||
|
@ -774,13 +738,17 @@ SYM_CODE_END(el0_error)
|
|||
SYM_CODE_START_LOCAL(ret_to_user)
|
||||
disable_daif
|
||||
gic_prio_kentry_setup tmp=x3
|
||||
ldr x1, [tsk, #TSK_TI_FLAGS]
|
||||
and x2, x1, #_TIF_WORK_MASK
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
bl trace_hardirqs_off
|
||||
#endif
|
||||
ldr x19, [tsk, #TSK_TI_FLAGS]
|
||||
and x2, x19, #_TIF_WORK_MASK
|
||||
cbnz x2, work_pending
|
||||
finish_ret_to_user:
|
||||
user_enter_irqoff
|
||||
/* Ignore asynchronous tag check faults in the uaccess routines */
|
||||
clear_mte_async_tcf
|
||||
enable_step_tsk x1, x2
|
||||
enable_step_tsk x19, x2
|
||||
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
|
||||
bl stackleak_erase
|
||||
#endif
|
||||
|
@ -791,11 +759,9 @@ finish_ret_to_user:
|
|||
*/
|
||||
work_pending:
|
||||
mov x0, sp // 'regs'
|
||||
mov x1, x19
|
||||
bl do_notify_resume
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
bl trace_hardirqs_on // enabled while in userspace
|
||||
#endif
|
||||
ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
|
||||
ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step
|
||||
b finish_ret_to_user
|
||||
SYM_CODE_END(ret_to_user)
|
||||
|
||||
|
|
|
@ -67,18 +67,3 @@ void __init init_IRQ(void)
|
|||
local_daif_restore(DAIF_PROCCTX_NOIRQ);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Stubs to make nmi_enter/exit() code callable from ASM
|
||||
*/
|
||||
asmlinkage void notrace asm_nmi_enter(void)
|
||||
{
|
||||
nmi_enter();
|
||||
}
|
||||
NOKPROBE_SYMBOL(asm_nmi_enter);
|
||||
|
||||
asmlinkage void notrace asm_nmi_exit(void)
|
||||
{
|
||||
nmi_exit();
|
||||
}
|
||||
NOKPROBE_SYMBOL(asm_nmi_exit);
|
||||
|
|
|
@ -72,13 +72,13 @@ EXPORT_SYMBOL_GPL(pm_power_off);
|
|||
|
||||
void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
|
||||
|
||||
static void __cpu_do_idle(void)
|
||||
static void noinstr __cpu_do_idle(void)
|
||||
{
|
||||
dsb(sy);
|
||||
wfi();
|
||||
}
|
||||
|
||||
static void __cpu_do_idle_irqprio(void)
|
||||
static void noinstr __cpu_do_idle_irqprio(void)
|
||||
{
|
||||
unsigned long pmr;
|
||||
unsigned long daif_bits;
|
||||
|
@ -108,7 +108,7 @@ static void __cpu_do_idle_irqprio(void)
|
|||
* ensure that interrupts are not masked at the PMR (because the core will
|
||||
* not wake up if we block the wake up signal in the interrupt controller).
|
||||
*/
|
||||
void cpu_do_idle(void)
|
||||
void noinstr cpu_do_idle(void)
|
||||
{
|
||||
if (system_uses_irq_prio_masking())
|
||||
__cpu_do_idle_irqprio();
|
||||
|
@ -119,7 +119,7 @@ void cpu_do_idle(void)
|
|||
/*
|
||||
* This is our default idle handler.
|
||||
*/
|
||||
void arch_cpu_idle(void)
|
||||
void noinstr arch_cpu_idle(void)
|
||||
{
|
||||
/*
|
||||
* This should do all the clock switching and wait for interrupt
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/exception.h>
|
||||
#include <asm/kprobes.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
@ -223,16 +224,16 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
|
|||
}
|
||||
|
||||
|
||||
asmlinkage __kprobes notrace unsigned long
|
||||
asmlinkage noinstr unsigned long
|
||||
__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
nmi_enter();
|
||||
arm64_enter_nmi(regs);
|
||||
|
||||
ret = _sdei_handler(regs, arg);
|
||||
|
||||
nmi_exit();
|
||||
arm64_exit_nmi(regs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -121,7 +121,6 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
|
|||
|
||||
cortex_a76_erratum_1463225_svc_handler();
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
user_exit();
|
||||
|
||||
if (system_supports_mte() && (flags & _TIF_MTE_ASYNC_FAULT)) {
|
||||
/*
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include <asm/daifflags.h>
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/esr.h>
|
||||
#include <asm/exception.h>
|
||||
#include <asm/extable.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/kprobes.h>
|
||||
|
@ -753,8 +754,10 @@ const char *esr_get_class_string(u32 esr)
|
|||
* bad_mode handles the impossible case in the exception vector. This is always
|
||||
* fatal.
|
||||
*/
|
||||
asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
|
||||
asmlinkage void notrace bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
|
||||
{
|
||||
arm64_enter_nmi(regs);
|
||||
|
||||
console_verbose();
|
||||
|
||||
pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
|
||||
|
@ -786,7 +789,7 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
|
|||
DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
|
||||
__aligned(16);
|
||||
|
||||
asmlinkage void handle_bad_stack(struct pt_regs *regs)
|
||||
asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long tsk_stk = (unsigned long)current->stack;
|
||||
unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
|
||||
|
@ -794,6 +797,8 @@ asmlinkage void handle_bad_stack(struct pt_regs *regs)
|
|||
unsigned int esr = read_sysreg(esr_el1);
|
||||
unsigned long far = read_sysreg(far_el1);
|
||||
|
||||
arm64_enter_nmi(regs);
|
||||
|
||||
console_verbose();
|
||||
pr_emerg("Insufficient stack space to handle exception!");
|
||||
|
||||
|
@ -865,24 +870,17 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
|
|||
}
|
||||
}
|
||||
|
||||
asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
|
||||
asmlinkage void noinstr do_serror(struct pt_regs *regs, unsigned int esr)
|
||||
{
|
||||
nmi_enter();
|
||||
arm64_enter_nmi(regs);
|
||||
|
||||
/* non-RAS errors are not containable */
|
||||
if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
|
||||
arm64_serror_panic(regs, esr);
|
||||
|
||||
nmi_exit();
|
||||
arm64_exit_nmi(regs);
|
||||
}
|
||||
|
||||
asmlinkage void enter_from_user_mode(void)
|
||||
{
|
||||
CT_WARN_ON(ct_state() != CONTEXT_USER);
|
||||
user_exit_irqoff();
|
||||
}
|
||||
NOKPROBE_SYMBOL(enter_from_user_mode);
|
||||
|
||||
/* GENERIC_BUG traps */
|
||||
|
||||
int is_valid_bugaddr(unsigned long addr)
|
||||
|
|
|
@ -789,25 +789,6 @@ void __init hook_debug_fault_code(int nr,
|
|||
*/
|
||||
static void debug_exception_enter(struct pt_regs *regs)
|
||||
{
|
||||
/*
|
||||
* Tell lockdep we disabled irqs in entry.S. Do nothing if they were
|
||||
* already disabled to preserve the last enabled/disabled addresses.
|
||||
*/
|
||||
if (interrupts_enabled(regs))
|
||||
trace_hardirqs_off();
|
||||
|
||||
if (user_mode(regs)) {
|
||||
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
|
||||
} else {
|
||||
/*
|
||||
* We might have interrupted pretty much anything. In
|
||||
* fact, if we're a debug exception, we can even interrupt
|
||||
* NMI processing. We don't want this code makes in_nmi()
|
||||
* to return true, but we need to notify RCU.
|
||||
*/
|
||||
rcu_nmi_enter();
|
||||
}
|
||||
|
||||
preempt_disable();
|
||||
|
||||
/* This code is a bit fragile. Test it. */
|
||||
|
@ -818,12 +799,6 @@ NOKPROBE_SYMBOL(debug_exception_enter);
|
|||
static void debug_exception_exit(struct pt_regs *regs)
|
||||
{
|
||||
preempt_enable_no_resched();
|
||||
|
||||
if (!user_mode(regs))
|
||||
rcu_nmi_exit();
|
||||
|
||||
if (interrupts_enabled(regs))
|
||||
trace_hardirqs_on();
|
||||
}
|
||||
NOKPROBE_SYMBOL(debug_exception_exit);
|
||||
|
||||
|
|
|
@ -152,6 +152,7 @@ config PPC
|
|||
select ARCH_USE_QUEUED_SPINLOCKS if PPC_QUEUED_SPINLOCKS
|
||||
select ARCH_WANT_IPC_PARSE_VERSION
|
||||
select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
|
||||
select ARCH_WANT_LD_ORPHAN_WARN
|
||||
select ARCH_WEAK_RELEASE_ACQUIRE
|
||||
select BINFMT_ELF
|
||||
select BUILDTIME_TABLE_SORT
|
||||
|
|
|
@ -123,7 +123,6 @@ endif
|
|||
LDFLAGS_vmlinux-y := -Bstatic
|
||||
LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) := -pie
|
||||
LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-y)
|
||||
LDFLAGS_vmlinux += $(call ld-option,--orphan-handling=warn)
|
||||
|
||||
ifdef CONFIG_PPC64
|
||||
ifeq ($(call cc-option-yn,-mcmodel=medium),y)
|
||||
|
|
|
@ -242,6 +242,18 @@ extern void radix_init_pseries(void);
|
|||
static inline void radix_init_pseries(void) { };
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
#define arch_clear_mm_cpumask_cpu(cpu, mm) \
|
||||
do { \
|
||||
if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { \
|
||||
atomic_dec(&(mm)->context.active_cpus); \
|
||||
cpumask_clear_cpu(cpu, mm_cpumask(mm)); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
void cleanup_cpu_mmu_context(void);
|
||||
#endif
|
||||
|
||||
static inline int get_user_context(mm_context_t *ctx, unsigned long ea)
|
||||
{
|
||||
int index = ea >> MAX_EA_BITS_PER_CONTEXT;
|
||||
|
|
|
@ -1214,12 +1214,9 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
|
|||
static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu)
|
||||
{
|
||||
/* We have a block of xive->nr_servers VPs. We just need to check
|
||||
* raw vCPU ids are below the expected limit for this guest's
|
||||
* core stride ; kvmppc_pack_vcpu_id() will pack them down to an
|
||||
* index that can be safely used to compute a VP id that belongs
|
||||
* to the VP block.
|
||||
* packed vCPU ids are below that.
|
||||
*/
|
||||
return cpu < xive->nr_servers * xive->kvm->arch.emul_smt_mode;
|
||||
return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers;
|
||||
}
|
||||
|
||||
int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp)
|
||||
|
|
|
@ -68,7 +68,7 @@ static __always_inline void tlbiel_hash_set_isa300(unsigned int set, unsigned in
|
|||
rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
|
||||
|
||||
asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
|
||||
: : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r)
|
||||
: : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "i"(r)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
|
@ -92,16 +92,15 @@ static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
|
|||
asm volatile("ptesync": : :"memory");
|
||||
|
||||
/*
|
||||
* Flush the first set of the TLB, and any caching of partition table
|
||||
* entries. Then flush the remaining sets of the TLB. Hash mode uses
|
||||
* partition scoped TLB translations.
|
||||
* Flush the partition table cache if this is HV mode.
|
||||
*/
|
||||
tlbiel_hash_set_isa300(0, is, 0, 2, 0);
|
||||
for (set = 1; set < num_sets; set++)
|
||||
tlbiel_hash_set_isa300(set, is, 0, 0, 0);
|
||||
if (early_cpu_has_feature(CPU_FTR_HVMODE))
|
||||
tlbiel_hash_set_isa300(0, is, 0, 2, 0);
|
||||
|
||||
/*
|
||||
* Now invalidate the process table cache.
|
||||
* Now invalidate the process table cache. UPRT=0 HPT modes (what
|
||||
* current hardware implements) do not use the process table, but
|
||||
* add the flushes anyway.
|
||||
*
|
||||
* From ISA v3.0B p. 1078:
|
||||
* The following forms are invalid.
|
||||
|
@ -110,6 +109,14 @@ static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
|
|||
*/
|
||||
tlbiel_hash_set_isa300(0, is, 0, 2, 1);
|
||||
|
||||
/*
|
||||
* Then flush the sets of the TLB proper. Hash mode uses
|
||||
* partition scoped TLB translations, which may be flushed
|
||||
* in !HV mode.
|
||||
*/
|
||||
for (set = 0; set < num_sets; set++)
|
||||
tlbiel_hash_set_isa300(set, is, 0, 0, 0);
|
||||
|
||||
ppc_after_tlbiel_barrier();
|
||||
|
||||
asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory");
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/export.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/cpu.h>
|
||||
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/pgalloc.h>
|
||||
|
@ -307,3 +308,22 @@ void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
|
|||
isync();
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* cleanup_cpu_mmu_context - Clean up MMU details for this CPU (newly offlined)
|
||||
*
|
||||
* This clears the CPU from mm_cpumask for all processes, and then flushes the
|
||||
* local TLB to ensure TLB coherency in case the CPU is onlined again.
|
||||
*
|
||||
* KVM guest translations are not necessarily flushed here. If KVM started
|
||||
* using mm_cpumask or the Linux APIs which do, this would have to be resolved.
|
||||
*/
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
void cleanup_cpu_mmu_context(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
clear_tasks_mm_cpumask(cpu);
|
||||
tlbiel_all();
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -742,8 +742,7 @@ static int __init parse_numa_properties(void)
|
|||
of_node_put(cpu);
|
||||
}
|
||||
|
||||
if (likely(nid > 0))
|
||||
node_set_online(nid);
|
||||
node_set_online(nid);
|
||||
}
|
||||
|
||||
get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
|
||||
|
|
|
@ -911,6 +911,8 @@ static int smp_core99_cpu_disable(void)
|
|||
|
||||
mpic_cpu_set_priority(0xf);
|
||||
|
||||
cleanup_cpu_mmu_context();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -211,11 +211,16 @@ static void __init pnv_init(void)
|
|||
add_preferred_console("hvc", 0, NULL);
|
||||
|
||||
if (!radix_enabled()) {
|
||||
size_t size = sizeof(struct slb_entry) * mmu_slb_size;
|
||||
int i;
|
||||
|
||||
/* Allocate per cpu area to save old slb contents during MCE */
|
||||
for_each_possible_cpu(i)
|
||||
paca_ptrs[i]->mce_faulty_slbs = memblock_alloc_node(mmu_slb_size, __alignof__(*paca_ptrs[i]->mce_faulty_slbs), cpu_to_node(i));
|
||||
for_each_possible_cpu(i) {
|
||||
paca_ptrs[i]->mce_faulty_slbs =
|
||||
memblock_alloc_node(size,
|
||||
__alignof__(struct slb_entry),
|
||||
cpu_to_node(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -143,6 +143,9 @@ static int pnv_smp_cpu_disable(void)
|
|||
xive_smp_disable_cpu();
|
||||
else
|
||||
xics_migrate_irqs_away();
|
||||
|
||||
cleanup_cpu_mmu_context();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -90,6 +90,9 @@ static int pseries_cpu_disable(void)
|
|||
xive_smp_disable_cpu();
|
||||
else
|
||||
xics_migrate_irqs_away();
|
||||
|
||||
cleanup_cpu_mmu_context();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -458,7 +458,8 @@ again:
|
|||
return hwirq;
|
||||
}
|
||||
|
||||
virq = irq_create_mapping(NULL, hwirq);
|
||||
virq = irq_create_mapping_affinity(NULL, hwirq,
|
||||
entry->affinity);
|
||||
|
||||
if (!virq) {
|
||||
pr_debug("rtas_msi: Failed mapping hwirq %d\n", hwirq);
|
||||
|
|
|
@ -763,12 +763,7 @@ ENTRY(io_int_handler)
|
|||
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
|
||||
TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
|
||||
jo .Lio_restore
|
||||
#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
|
||||
tmhh %r8,0x300
|
||||
jz 1f
|
||||
TRACE_IRQS_OFF
|
||||
1:
|
||||
#endif
|
||||
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
|
||||
.Lio_loop:
|
||||
lgr %r2,%r11 # pass pointer to pt_regs
|
||||
|
@ -791,12 +786,7 @@ ENTRY(io_int_handler)
|
|||
TSTMSK __LC_CPU_FLAGS,_CIF_WORK
|
||||
jnz .Lio_work
|
||||
.Lio_restore:
|
||||
#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
|
||||
tm __PT_PSW(%r11),3
|
||||
jno 0f
|
||||
TRACE_IRQS_ON
|
||||
0:
|
||||
#endif
|
||||
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
|
||||
tm __PT_PSW+1(%r11),0x01 # returning to user ?
|
||||
jno .Lio_exit_kernel
|
||||
|
@ -976,12 +966,7 @@ ENTRY(ext_int_handler)
|
|||
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
|
||||
TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
|
||||
jo .Lio_restore
|
||||
#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
|
||||
tmhh %r8,0x300
|
||||
jz 1f
|
||||
TRACE_IRQS_OFF
|
||||
1:
|
||||
#endif
|
||||
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
|
||||
lgr %r2,%r11 # pass pointer to pt_regs
|
||||
lghi %r3,EXT_INTERRUPT
|
||||
|
|
|
@ -33,7 +33,7 @@ EXPORT_SYMBOL(__delay);
|
|||
|
||||
static void __udelay_disabled(unsigned long long usecs)
|
||||
{
|
||||
unsigned long cr0, cr0_new, psw_mask, flags;
|
||||
unsigned long cr0, cr0_new, psw_mask;
|
||||
struct s390_idle_data idle;
|
||||
u64 end;
|
||||
|
||||
|
@ -45,9 +45,8 @@ static void __udelay_disabled(unsigned long long usecs)
|
|||
psw_mask = __extract_psw() | PSW_MASK_EXT | PSW_MASK_WAIT;
|
||||
set_clock_comparator(end);
|
||||
set_cpu_flag(CIF_IGNORE_IRQ);
|
||||
local_irq_save(flags);
|
||||
psw_idle(&idle, psw_mask);
|
||||
local_irq_restore(flags);
|
||||
trace_hardirqs_off();
|
||||
clear_cpu_flag(CIF_IGNORE_IRQ);
|
||||
set_clock_comparator(S390_lowcore.clock_comparator);
|
||||
__ctl_load(cr0, 0, 0);
|
||||
|
|
|
@ -103,9 +103,10 @@ static int zpci_set_irq_affinity(struct irq_data *data, const struct cpumask *de
|
|||
{
|
||||
struct msi_desc *entry = irq_get_msi_desc(data->irq);
|
||||
struct msi_msg msg = entry->msg;
|
||||
int cpu_addr = smp_cpu_get_cpu_address(cpumask_first(dest));
|
||||
|
||||
msg.address_lo &= 0xff0000ff;
|
||||
msg.address_lo |= (cpumask_first(dest) << 8);
|
||||
msg.address_lo |= (cpu_addr << 8);
|
||||
pci_write_msi_msg(data->irq, &msg);
|
||||
|
||||
return IRQ_SET_MASK_OK;
|
||||
|
@ -238,6 +239,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
|
|||
unsigned long bit;
|
||||
struct msi_desc *msi;
|
||||
struct msi_msg msg;
|
||||
int cpu_addr;
|
||||
int rc, irq;
|
||||
|
||||
zdev->aisb = -1UL;
|
||||
|
@ -287,9 +289,15 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
|
|||
handle_percpu_irq);
|
||||
msg.data = hwirq - bit;
|
||||
if (irq_delivery == DIRECTED) {
|
||||
if (msi->affinity)
|
||||
cpu = cpumask_first(&msi->affinity->mask);
|
||||
else
|
||||
cpu = 0;
|
||||
cpu_addr = smp_cpu_get_cpu_address(cpu);
|
||||
|
||||
msg.address_lo = zdev->msi_addr & 0xff0000ff;
|
||||
msg.address_lo |= msi->affinity ?
|
||||
(cpumask_first(&msi->affinity->mask) << 8) : 0;
|
||||
msg.address_lo |= (cpu_addr << 8);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
airq_iv_set_data(zpci_ibv[cpu], hwirq, irq);
|
||||
}
|
||||
|
|
|
@ -100,6 +100,7 @@ config X86
|
|||
select ARCH_WANT_DEFAULT_BPF_JIT if X86_64
|
||||
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
|
||||
select ARCH_WANT_HUGE_PMD_SHARE
|
||||
select ARCH_WANT_LD_ORPHAN_WARN
|
||||
select ARCH_WANTS_THP_SWAP if X86_64
|
||||
select BUILDTIME_TABLE_SORT
|
||||
select CLKEVT_I8253
|
||||
|
|
|
@ -209,9 +209,6 @@ ifdef CONFIG_X86_64
|
|||
LDFLAGS_vmlinux += -z max-page-size=0x200000
|
||||
endif
|
||||
|
||||
# We never want expected sections to be placed heuristically by the
|
||||
# linker. All sections should be explicitly named in the linker script.
|
||||
LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
|
||||
|
||||
archscripts: scripts_basic
|
||||
$(Q)$(MAKE) $(build)=arch/x86/tools relocs
|
||||
|
|
|
@ -61,7 +61,9 @@ KBUILD_LDFLAGS += $(call ld-option,--no-ld-generated-unwind-info)
|
|||
# Compressed kernel should be built as PIE since it may be loaded at any
|
||||
# address by the bootloader.
|
||||
LDFLAGS_vmlinux := -pie $(call ld-option, --no-dynamic-linker)
|
||||
LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
|
||||
ifdef CONFIG_LD_ORPHAN_WARN
|
||||
LDFLAGS_vmlinux += --orphan-handling=warn
|
||||
endif
|
||||
LDFLAGS_vmlinux += -T
|
||||
|
||||
hostprogs := mkpiggy
|
||||
|
|
|
@ -32,13 +32,12 @@ struct ghcb *boot_ghcb;
|
|||
*/
|
||||
static bool insn_has_rep_prefix(struct insn *insn)
|
||||
{
|
||||
insn_byte_t p;
|
||||
int i;
|
||||
|
||||
insn_get_prefixes(insn);
|
||||
|
||||
for (i = 0; i < insn->prefixes.nbytes; i++) {
|
||||
insn_byte_t p = insn->prefixes.bytes[i];
|
||||
|
||||
for_each_insn_prefix(insn, i, p) {
|
||||
if (p == 0xf2 || p == 0xf3)
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -1916,7 +1916,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
|
|||
* that caused the PEBS record. It's called collision.
|
||||
* If collision happened, the record will be dropped.
|
||||
*/
|
||||
if (p->status != (1ULL << bit)) {
|
||||
if (pebs_status != (1ULL << bit)) {
|
||||
for_each_set_bit(i, (unsigned long *)&pebs_status, size)
|
||||
error[i]++;
|
||||
continue;
|
||||
|
@ -1940,7 +1940,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
|
|||
if (error[bit]) {
|
||||
perf_log_lost_samples(event, error[bit]);
|
||||
|
||||
if (perf_event_account_interrupt(event))
|
||||
if (iregs && perf_event_account_interrupt(event))
|
||||
x86_pmu_stop(event, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -201,6 +201,21 @@ static inline int insn_offset_immediate(struct insn *insn)
|
|||
return insn_offset_displacement(insn) + insn->displacement.nbytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* for_each_insn_prefix() -- Iterate prefixes in the instruction
|
||||
* @insn: Pointer to struct insn.
|
||||
* @idx: Index storage.
|
||||
* @prefix: Prefix byte.
|
||||
*
|
||||
* Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix
|
||||
* and the index is stored in @idx (note that this @idx is just for a cursor,
|
||||
* do not change it.)
|
||||
* Since prefixes.nbytes can be bigger than 4 if some prefixes
|
||||
* are repeated, it cannot be used for looping over the prefixes.
|
||||
*/
|
||||
#define for_each_insn_prefix(insn, idx, prefix) \
|
||||
for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++)
|
||||
|
||||
#define POP_SS_OPCODE 0x1f
|
||||
#define MOV_SREG_OPCODE 0x8e
|
||||
|
||||
|
|
|
@ -161,7 +161,7 @@ static int __init early_set_hub_type(void)
|
|||
/* UV4/4A only have a revision difference */
|
||||
case UV4_HUB_PART_NUMBER:
|
||||
uv_min_hub_revision_id = node_id.s.revision
|
||||
+ UV4_HUB_REVISION_BASE;
|
||||
+ UV4_HUB_REVISION_BASE - 1;
|
||||
uv_hub_type_set(UV4);
|
||||
if (uv_min_hub_revision_id == UV4A_HUB_REVISION_BASE)
|
||||
uv_hub_type_set(UV4|UV4A);
|
||||
|
|
|
@ -570,6 +570,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
|
|||
|
||||
if (d) {
|
||||
cpumask_set_cpu(cpu, &d->cpu_mask);
|
||||
if (r->cache.arch_has_per_cpu_cfg)
|
||||
rdt_domain_reconfigure_cdp(r);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -923,6 +925,7 @@ static __init void rdt_init_res_defs_intel(void)
|
|||
r->rid == RDT_RESOURCE_L2CODE) {
|
||||
r->cache.arch_has_sparse_bitmaps = false;
|
||||
r->cache.arch_has_empty_bitmaps = false;
|
||||
r->cache.arch_has_per_cpu_cfg = false;
|
||||
} else if (r->rid == RDT_RESOURCE_MBA) {
|
||||
r->msr_base = MSR_IA32_MBA_THRTL_BASE;
|
||||
r->msr_update = mba_wrmsr_intel;
|
||||
|
@ -943,6 +946,7 @@ static __init void rdt_init_res_defs_amd(void)
|
|||
r->rid == RDT_RESOURCE_L2CODE) {
|
||||
r->cache.arch_has_sparse_bitmaps = true;
|
||||
r->cache.arch_has_empty_bitmaps = true;
|
||||
r->cache.arch_has_per_cpu_cfg = true;
|
||||
} else if (r->rid == RDT_RESOURCE_MBA) {
|
||||
r->msr_base = MSR_IA32_MBA_BW_BASE;
|
||||
r->msr_update = mba_wrmsr_amd;
|
||||
|
|
|
@ -360,6 +360,8 @@ struct msr_param {
|
|||
* executing entities
|
||||
* @arch_has_sparse_bitmaps: True if a bitmap like f00f is valid.
|
||||
* @arch_has_empty_bitmaps: True if the '0' bitmap is valid.
|
||||
* @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache
|
||||
* level has CPU scope.
|
||||
*/
|
||||
struct rdt_cache {
|
||||
unsigned int cbm_len;
|
||||
|
@ -369,6 +371,7 @@ struct rdt_cache {
|
|||
unsigned int shareable_bits;
|
||||
bool arch_has_sparse_bitmaps;
|
||||
bool arch_has_empty_bitmaps;
|
||||
bool arch_has_per_cpu_cfg;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -1909,8 +1909,13 @@ static int set_cache_qos_cfg(int level, bool enable)
|
|||
|
||||
r_l = &rdt_resources_all[level];
|
||||
list_for_each_entry(d, &r_l->domains, list) {
|
||||
/* Pick one CPU from each domain instance to update MSR */
|
||||
cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
|
||||
if (r_l->cache.arch_has_per_cpu_cfg)
|
||||
/* Pick all the CPUs in the domain instance */
|
||||
for_each_cpu(cpu, &d->cpu_mask)
|
||||
cpumask_set_cpu(cpu, cpu_mask);
|
||||
else
|
||||
/* Pick one CPU from each domain instance to update MSR */
|
||||
cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
|
||||
}
|
||||
cpu = get_cpu();
|
||||
/* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
|
||||
|
|
|
@ -255,12 +255,13 @@ static volatile u32 good_2byte_insns[256 / 32] = {
|
|||
|
||||
static bool is_prefix_bad(struct insn *insn)
|
||||
{
|
||||
insn_byte_t p;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < insn->prefixes.nbytes; i++) {
|
||||
for_each_insn_prefix(insn, i, p) {
|
||||
insn_attr_t attr;
|
||||
|
||||
attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]);
|
||||
attr = inat_get_opcode_attribute(p);
|
||||
switch (attr) {
|
||||
case INAT_MAKE_PREFIX(INAT_PFX_ES):
|
||||
case INAT_MAKE_PREFIX(INAT_PFX_CS):
|
||||
|
@ -715,6 +716,7 @@ static const struct uprobe_xol_ops push_xol_ops = {
|
|||
static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
|
||||
{
|
||||
u8 opc1 = OPCODE1(insn);
|
||||
insn_byte_t p;
|
||||
int i;
|
||||
|
||||
switch (opc1) {
|
||||
|
@ -746,8 +748,8 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
|
|||
* Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix.
|
||||
* No one uses these insns, reject any branch insns with such prefix.
|
||||
*/
|
||||
for (i = 0; i < insn->prefixes.nbytes; i++) {
|
||||
if (insn->prefixes.bytes[i] == 0x66)
|
||||
for_each_insn_prefix(insn, i, p) {
|
||||
if (p == 0x66)
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
|
|
|
@ -63,13 +63,12 @@ static bool is_string_insn(struct insn *insn)
|
|||
*/
|
||||
bool insn_has_rep_prefix(struct insn *insn)
|
||||
{
|
||||
insn_byte_t p;
|
||||
int i;
|
||||
|
||||
insn_get_prefixes(insn);
|
||||
|
||||
for (i = 0; i < insn->prefixes.nbytes; i++) {
|
||||
insn_byte_t p = insn->prefixes.bytes[i];
|
||||
|
||||
for_each_insn_prefix(insn, i, p) {
|
||||
if (p == 0xf2 || p == 0xf3)
|
||||
return true;
|
||||
}
|
||||
|
@ -95,14 +94,15 @@ static int get_seg_reg_override_idx(struct insn *insn)
|
|||
{
|
||||
int idx = INAT_SEG_REG_DEFAULT;
|
||||
int num_overrides = 0, i;
|
||||
insn_byte_t p;
|
||||
|
||||
insn_get_prefixes(insn);
|
||||
|
||||
/* Look for any segment override prefixes. */
|
||||
for (i = 0; i < insn->prefixes.nbytes; i++) {
|
||||
for_each_insn_prefix(insn, i, p) {
|
||||
insn_attr_t attr;
|
||||
|
||||
attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]);
|
||||
attr = inat_get_opcode_attribute(p);
|
||||
switch (attr) {
|
||||
case INAT_MAKE_PREFIX(INAT_PFX_CS):
|
||||
idx = INAT_SEG_REG_CS;
|
||||
|
|
|
@ -144,7 +144,7 @@ static struct bio *blk_bio_write_same_split(struct request_queue *q,
|
|||
static inline unsigned get_max_io_size(struct request_queue *q,
|
||||
struct bio *bio)
|
||||
{
|
||||
unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
|
||||
unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector, 0);
|
||||
unsigned max_sectors = sectors;
|
||||
unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
|
||||
unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
|
||||
|
|
|
@ -547,7 +547,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
|||
|
||||
t->io_min = max(t->io_min, b->io_min);
|
||||
t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
|
||||
t->chunk_sectors = lcm_not_zero(t->chunk_sectors, b->chunk_sectors);
|
||||
|
||||
/* Set non-power-of-2 compatible chunk_sectors boundary */
|
||||
if (b->chunk_sectors)
|
||||
t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
|
||||
|
||||
/* Physical block size a multiple of the logical block size? */
|
||||
if (t->physical_block_size & (t->logical_block_size - 1)) {
|
||||
|
|
|
@ -145,6 +145,7 @@ obj-$(CONFIG_OF) += of/
|
|||
obj-$(CONFIG_SSB) += ssb/
|
||||
obj-$(CONFIG_BCMA) += bcma/
|
||||
obj-$(CONFIG_VHOST_RING) += vhost/
|
||||
obj-$(CONFIG_VHOST_IOTLB) += vhost/
|
||||
obj-$(CONFIG_VHOST) += vhost/
|
||||
obj-$(CONFIG_VLYNQ) += vlynq/
|
||||
obj-$(CONFIG_GREYBUS) += greybus/
|
||||
|
|
|
@ -47,27 +47,20 @@ static int spk_ttyio_ldisc_open(struct tty_struct *tty)
|
|||
{
|
||||
struct spk_ldisc_data *ldisc_data;
|
||||
|
||||
if (tty != speakup_tty)
|
||||
/* Somebody tried to use this line discipline outside speakup */
|
||||
return -ENODEV;
|
||||
|
||||
if (!tty->ops->write)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&speakup_tty_mutex);
|
||||
if (speakup_tty) {
|
||||
mutex_unlock(&speakup_tty_mutex);
|
||||
return -EBUSY;
|
||||
}
|
||||
speakup_tty = tty;
|
||||
|
||||
ldisc_data = kmalloc(sizeof(*ldisc_data), GFP_KERNEL);
|
||||
if (!ldisc_data) {
|
||||
speakup_tty = NULL;
|
||||
mutex_unlock(&speakup_tty_mutex);
|
||||
if (!ldisc_data)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
init_completion(&ldisc_data->completion);
|
||||
ldisc_data->buf_free = true;
|
||||
speakup_tty->disc_data = ldisc_data;
|
||||
mutex_unlock(&speakup_tty_mutex);
|
||||
tty->disc_data = ldisc_data;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -191,9 +184,25 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
|
|||
|
||||
tty_unlock(tty);
|
||||
|
||||
mutex_lock(&speakup_tty_mutex);
|
||||
speakup_tty = tty;
|
||||
ret = tty_set_ldisc(tty, N_SPEAKUP);
|
||||
if (ret)
|
||||
pr_err("speakup: Failed to set N_SPEAKUP on tty\n");
|
||||
speakup_tty = NULL;
|
||||
mutex_unlock(&speakup_tty_mutex);
|
||||
|
||||
if (!ret)
|
||||
/* Success */
|
||||
return 0;
|
||||
|
||||
pr_err("speakup: Failed to set N_SPEAKUP on tty\n");
|
||||
|
||||
tty_lock(tty);
|
||||
if (tty->ops->close)
|
||||
tty->ops->close(tty, NULL);
|
||||
tty_unlock(tty);
|
||||
|
||||
tty_kclose(tty);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -142,6 +142,7 @@ config FPGA_DFL
|
|||
tristate "FPGA Device Feature List (DFL) support"
|
||||
select FPGA_BRIDGE
|
||||
select FPGA_REGION
|
||||
depends on HAS_IOMEM
|
||||
help
|
||||
Device Feature List (DFL) defines a feature list structure that
|
||||
creates a linked list of feature headers within the MMIO space
|
||||
|
|
|
@ -1011,6 +1011,11 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
|
|||
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
|
||||
|
||||
/* Stall DPG before WPTR/RPTR reset */
|
||||
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
|
||||
UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
|
||||
~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
|
||||
|
||||
/* set the write pointer delay */
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
|
||||
|
||||
|
@ -1033,6 +1038,10 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
|
|||
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
|
||||
lower_32_bits(ring->wptr));
|
||||
|
||||
/* Unstall DPG */
|
||||
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
|
||||
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1556,8 +1565,14 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
|
|||
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
|
||||
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
|
||||
|
||||
/* Stall DPG before WPTR/RPTR reset */
|
||||
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
|
||||
UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
|
||||
~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
|
||||
|
||||
/* Restore */
|
||||
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
|
||||
ring->wptr = 0;
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
|
||||
|
@ -1565,14 +1580,16 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
|
|||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
|
||||
|
||||
ring = &adev->vcn.inst[inst_idx].ring_enc[1];
|
||||
ring->wptr = 0;
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
|
||||
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
|
||||
RREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
|
||||
/* Unstall DPG */
|
||||
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
|
||||
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
|
||||
|
||||
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
|
||||
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
|
||||
|
@ -1630,10 +1647,6 @@ static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
|
|||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
|
||||
WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2,
|
||||
lower_32_bits(ring->wptr) | 0x80000000);
|
||||
|
||||
if (ring->use_doorbell) {
|
||||
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
|
||||
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
|
||||
|
|
|
@ -163,8 +163,17 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
new_clocks->dppclk_khz = 100000;
|
||||
}
|
||||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
|
||||
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
|
||||
/*
|
||||
* Temporally ignore thew 0 cases for disp and dpp clks.
|
||||
* We may have a new feature that requires 0 clks in the future.
|
||||
*/
|
||||
if (new_clocks->dppclk_khz == 0 || new_clocks->dispclk_khz == 0) {
|
||||
new_clocks->dppclk_khz = clk_mgr_base->clks.dppclk_khz;
|
||||
new_clocks->dispclk_khz = clk_mgr_base->clks.dispclk_khz;
|
||||
}
|
||||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr_base->clks.dppclk_khz)) {
|
||||
if (clk_mgr_base->clks.dppclk_khz > new_clocks->dppclk_khz)
|
||||
dpp_clock_lowered = true;
|
||||
clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
|
||||
update_dppclk = true;
|
||||
|
|
|
@ -1164,7 +1164,12 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
crystal_clock_freq = amdgpu_asic_get_xclk(adev);
|
||||
/*
|
||||
* crystal_clock_freq div by 4 is required since the fan control
|
||||
* module refers to 25MHz
|
||||
*/
|
||||
|
||||
crystal_clock_freq = amdgpu_asic_get_xclk(adev) / 4;
|
||||
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
|
||||
WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
|
||||
REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
|
||||
|
|
|
@ -18021,16 +18021,6 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
|
|||
if (!HAS_GMCH(i915))
|
||||
sanitize_watermarks(i915);
|
||||
|
||||
/*
|
||||
* Force all active planes to recompute their states. So that on
|
||||
* mode_setcrtc after probe, all the intel_plane_state variables
|
||||
* are already calculated and there is no assert_plane warnings
|
||||
* during bootup.
|
||||
*/
|
||||
ret = intel_initial_commit(dev);
|
||||
if (ret)
|
||||
drm_dbg_kms(&i915->drm, "Initial commit in probe failed.\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -18039,11 +18029,21 @@ int intel_modeset_init(struct drm_i915_private *i915)
|
|||
{
|
||||
int ret;
|
||||
|
||||
intel_overlay_setup(i915);
|
||||
|
||||
if (!HAS_DISPLAY(i915))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Force all active planes to recompute their states. So that on
|
||||
* mode_setcrtc after probe, all the intel_plane_state variables
|
||||
* are already calculated and there is no assert_plane warnings
|
||||
* during bootup.
|
||||
*/
|
||||
ret = intel_initial_commit(&i915->drm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_overlay_setup(i915);
|
||||
|
||||
ret = intel_fbdev_init(&i915->drm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -101,18 +101,37 @@ static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
|
|||
intel_gt_pm_put_async(b->irq_engine->gt);
|
||||
}
|
||||
|
||||
static void intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
|
||||
{
|
||||
spin_lock(&b->irq_lock);
|
||||
if (b->irq_armed)
|
||||
__intel_breadcrumbs_disarm_irq(b);
|
||||
spin_unlock(&b->irq_lock);
|
||||
}
|
||||
|
||||
static void add_signaling_context(struct intel_breadcrumbs *b,
|
||||
struct intel_context *ce)
|
||||
{
|
||||
intel_context_get(ce);
|
||||
list_add_tail(&ce->signal_link, &b->signalers);
|
||||
lockdep_assert_held(&ce->signal_lock);
|
||||
|
||||
spin_lock(&b->signalers_lock);
|
||||
list_add_rcu(&ce->signal_link, &b->signalers);
|
||||
spin_unlock(&b->signalers_lock);
|
||||
}
|
||||
|
||||
static void remove_signaling_context(struct intel_breadcrumbs *b,
|
||||
static bool remove_signaling_context(struct intel_breadcrumbs *b,
|
||||
struct intel_context *ce)
|
||||
{
|
||||
list_del(&ce->signal_link);
|
||||
intel_context_put(ce);
|
||||
lockdep_assert_held(&ce->signal_lock);
|
||||
|
||||
if (!list_empty(&ce->signals))
|
||||
return false;
|
||||
|
||||
spin_lock(&b->signalers_lock);
|
||||
list_del_rcu(&ce->signal_link);
|
||||
spin_unlock(&b->signalers_lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool __request_completed(const struct i915_request *rq)
|
||||
|
@ -175,6 +194,8 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
|
|||
|
||||
static bool __signal_request(struct i915_request *rq)
|
||||
{
|
||||
GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
|
||||
|
||||
if (!__dma_fence_signal(&rq->fence)) {
|
||||
i915_request_put(rq);
|
||||
return false;
|
||||
|
@ -195,15 +216,12 @@ static void signal_irq_work(struct irq_work *work)
|
|||
struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work);
|
||||
const ktime_t timestamp = ktime_get();
|
||||
struct llist_node *signal, *sn;
|
||||
struct intel_context *ce, *cn;
|
||||
struct list_head *pos, *next;
|
||||
struct intel_context *ce;
|
||||
|
||||
signal = NULL;
|
||||
if (unlikely(!llist_empty(&b->signaled_requests)))
|
||||
signal = llist_del_all(&b->signaled_requests);
|
||||
|
||||
spin_lock(&b->irq_lock);
|
||||
|
||||
/*
|
||||
* Keep the irq armed until the interrupt after all listeners are gone.
|
||||
*
|
||||
|
@ -229,47 +247,44 @@ static void signal_irq_work(struct irq_work *work)
|
|||
* interrupt draw less ire from other users of the system and tools
|
||||
* like powertop.
|
||||
*/
|
||||
if (!signal && b->irq_armed && list_empty(&b->signalers))
|
||||
__intel_breadcrumbs_disarm_irq(b);
|
||||
if (!signal && READ_ONCE(b->irq_armed) && list_empty(&b->signalers))
|
||||
intel_breadcrumbs_disarm_irq(b);
|
||||
|
||||
list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
|
||||
GEM_BUG_ON(list_empty(&ce->signals));
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(ce, &b->signalers, signal_link) {
|
||||
struct i915_request *rq;
|
||||
|
||||
list_for_each_safe(pos, next, &ce->signals) {
|
||||
struct i915_request *rq =
|
||||
list_entry(pos, typeof(*rq), signal_link);
|
||||
list_for_each_entry_rcu(rq, &ce->signals, signal_link) {
|
||||
bool release;
|
||||
|
||||
GEM_BUG_ON(!check_signal_order(ce, rq));
|
||||
if (!__request_completed(rq))
|
||||
break;
|
||||
|
||||
if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL,
|
||||
&rq->fence.flags))
|
||||
break;
|
||||
|
||||
/*
|
||||
* Queue for execution after dropping the signaling
|
||||
* spinlock as the callback chain may end up adding
|
||||
* more signalers to the same context or engine.
|
||||
*/
|
||||
clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
|
||||
spin_lock(&ce->signal_lock);
|
||||
list_del_rcu(&rq->signal_link);
|
||||
release = remove_signaling_context(b, ce);
|
||||
spin_unlock(&ce->signal_lock);
|
||||
|
||||
if (__signal_request(rq))
|
||||
/* We own signal_node now, xfer to local list */
|
||||
signal = slist_add(&rq->signal_node, signal);
|
||||
}
|
||||
|
||||
/*
|
||||
* We process the list deletion in bulk, only using a list_add
|
||||
* (not list_move) above but keeping the status of
|
||||
* rq->signal_link known with the I915_FENCE_FLAG_SIGNAL bit.
|
||||
*/
|
||||
if (!list_is_first(pos, &ce->signals)) {
|
||||
/* Advance the list to the first incomplete request */
|
||||
__list_del_many(&ce->signals, pos);
|
||||
if (&ce->signals == pos) { /* now empty */
|
||||
if (release) {
|
||||
add_retire(b, ce->timeline);
|
||||
remove_signaling_context(b, ce);
|
||||
intel_context_put(ce);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&b->irq_lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
llist_for_each_safe(signal, sn, signal) {
|
||||
struct i915_request *rq =
|
||||
|
@ -298,14 +313,15 @@ intel_breadcrumbs_create(struct intel_engine_cs *irq_engine)
|
|||
if (!b)
|
||||
return NULL;
|
||||
|
||||
spin_lock_init(&b->irq_lock);
|
||||
b->irq_engine = irq_engine;
|
||||
|
||||
spin_lock_init(&b->signalers_lock);
|
||||
INIT_LIST_HEAD(&b->signalers);
|
||||
init_llist_head(&b->signaled_requests);
|
||||
|
||||
spin_lock_init(&b->irq_lock);
|
||||
init_irq_work(&b->irq_work, signal_irq_work);
|
||||
|
||||
b->irq_engine = irq_engine;
|
||||
|
||||
return b;
|
||||
}
|
||||
|
||||
|
@ -347,9 +363,9 @@ void intel_breadcrumbs_free(struct intel_breadcrumbs *b)
|
|||
kfree(b);
|
||||
}
|
||||
|
||||
static void insert_breadcrumb(struct i915_request *rq,
|
||||
struct intel_breadcrumbs *b)
|
||||
static void insert_breadcrumb(struct i915_request *rq)
|
||||
{
|
||||
struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs;
|
||||
struct intel_context *ce = rq->context;
|
||||
struct list_head *pos;
|
||||
|
||||
|
@ -371,6 +387,7 @@ static void insert_breadcrumb(struct i915_request *rq,
|
|||
}
|
||||
|
||||
if (list_empty(&ce->signals)) {
|
||||
intel_context_get(ce);
|
||||
add_signaling_context(b, ce);
|
||||
pos = &ce->signals;
|
||||
} else {
|
||||
|
@ -396,8 +413,9 @@ static void insert_breadcrumb(struct i915_request *rq,
|
|||
break;
|
||||
}
|
||||
}
|
||||
list_add(&rq->signal_link, pos);
|
||||
list_add_rcu(&rq->signal_link, pos);
|
||||
GEM_BUG_ON(!check_signal_order(ce, rq));
|
||||
GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags));
|
||||
set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
|
||||
|
||||
/*
|
||||
|
@ -410,7 +428,7 @@ static void insert_breadcrumb(struct i915_request *rq,
|
|||
|
||||
bool i915_request_enable_breadcrumb(struct i915_request *rq)
|
||||
{
|
||||
struct intel_breadcrumbs *b;
|
||||
struct intel_context *ce = rq->context;
|
||||
|
||||
/* Serialises with i915_request_retire() using rq->lock */
|
||||
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
|
||||
|
@ -425,67 +443,30 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
|
|||
if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* rq->engine is locked by rq->engine->active.lock. That however
|
||||
* is not known until after rq->engine has been dereferenced and
|
||||
* the lock acquired. Hence we acquire the lock and then validate
|
||||
* that rq->engine still matches the lock we hold for it.
|
||||
*
|
||||
* Here, we are using the breadcrumb lock as a proxy for the
|
||||
* rq->engine->active.lock, and we know that since the breadcrumb
|
||||
* will be serialised within i915_request_submit/i915_request_unsubmit,
|
||||
* the engine cannot change while active as long as we hold the
|
||||
* breadcrumb lock on that engine.
|
||||
*
|
||||
* From the dma_fence_enable_signaling() path, we are outside of the
|
||||
* request submit/unsubmit path, and so we must be more careful to
|
||||
* acquire the right lock.
|
||||
*/
|
||||
b = READ_ONCE(rq->engine)->breadcrumbs;
|
||||
spin_lock(&b->irq_lock);
|
||||
while (unlikely(b != READ_ONCE(rq->engine)->breadcrumbs)) {
|
||||
spin_unlock(&b->irq_lock);
|
||||
b = READ_ONCE(rq->engine)->breadcrumbs;
|
||||
spin_lock(&b->irq_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Now that we are finally serialised with request submit/unsubmit,
|
||||
* [with b->irq_lock] and with i915_request_retire() [via checking
|
||||
* SIGNALED with rq->lock] confirm the request is indeed active. If
|
||||
* it is no longer active, the breadcrumb will be attached upon
|
||||
* i915_request_submit().
|
||||
*/
|
||||
spin_lock(&ce->signal_lock);
|
||||
if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags))
|
||||
insert_breadcrumb(rq, b);
|
||||
|
||||
spin_unlock(&b->irq_lock);
|
||||
insert_breadcrumb(rq);
|
||||
spin_unlock(&ce->signal_lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void i915_request_cancel_breadcrumb(struct i915_request *rq)
|
||||
{
|
||||
struct intel_breadcrumbs *b = rq->engine->breadcrumbs;
|
||||
struct intel_context *ce = rq->context;
|
||||
bool release;
|
||||
|
||||
/*
|
||||
* We must wait for b->irq_lock so that we know the interrupt handler
|
||||
* has released its reference to the intel_context and has completed
|
||||
* the DMA_FENCE_FLAG_SIGNALED_BIT/I915_FENCE_FLAG_SIGNAL dance (if
|
||||
* required).
|
||||
*/
|
||||
spin_lock(&b->irq_lock);
|
||||
if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
|
||||
struct intel_context *ce = rq->context;
|
||||
if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
|
||||
return;
|
||||
|
||||
list_del(&rq->signal_link);
|
||||
if (list_empty(&ce->signals))
|
||||
remove_signaling_context(b, ce);
|
||||
spin_lock(&ce->signal_lock);
|
||||
list_del_rcu(&rq->signal_link);
|
||||
release = remove_signaling_context(rq->engine->breadcrumbs, ce);
|
||||
spin_unlock(&ce->signal_lock);
|
||||
if (release)
|
||||
intel_context_put(ce);
|
||||
|
||||
clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
|
||||
i915_request_put(rq);
|
||||
}
|
||||
spin_unlock(&b->irq_lock);
|
||||
i915_request_put(rq);
|
||||
}
|
||||
|
||||
static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p)
|
||||
|
@ -495,18 +476,17 @@ static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p)
|
|||
|
||||
drm_printf(p, "Signals:\n");
|
||||
|
||||
spin_lock_irq(&b->irq_lock);
|
||||
list_for_each_entry(ce, &b->signalers, signal_link) {
|
||||
list_for_each_entry(rq, &ce->signals, signal_link) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(ce, &b->signalers, signal_link) {
|
||||
list_for_each_entry_rcu(rq, &ce->signals, signal_link)
|
||||
drm_printf(p, "\t[%llx:%llx%s] @ %dms\n",
|
||||
rq->fence.context, rq->fence.seqno,
|
||||
i915_request_completed(rq) ? "!" :
|
||||
i915_request_started(rq) ? "*" :
|
||||
"",
|
||||
jiffies_to_msecs(jiffies - rq->emitted_jiffies));
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&b->irq_lock);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
|
||||
|
|
|
@ -29,18 +29,16 @@
|
|||
* the overhead of waking that client is much preferred.
|
||||
*/
|
||||
struct intel_breadcrumbs {
|
||||
spinlock_t irq_lock; /* protects the lists used in hardirq context */
|
||||
|
||||
/* Not all breadcrumbs are attached to physical HW */
|
||||
struct intel_engine_cs *irq_engine;
|
||||
|
||||
spinlock_t signalers_lock; /* protects the list of signalers */
|
||||
struct list_head signalers;
|
||||
struct llist_head signaled_requests;
|
||||
|
||||
spinlock_t irq_lock; /* protects the interrupt from hardirq context */
|
||||
struct irq_work irq_work; /* for use from inside irq_lock */
|
||||
|
||||
unsigned int irq_enabled;
|
||||
|
||||
bool irq_armed;
|
||||
};
|
||||
|
||||
|
|
|
@ -25,9 +25,16 @@ static struct intel_context *intel_context_alloc(void)
|
|||
return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static void rcu_context_free(struct rcu_head *rcu)
|
||||
{
|
||||
struct intel_context *ce = container_of(rcu, typeof(*ce), rcu);
|
||||
|
||||
kmem_cache_free(global.slab_ce, ce);
|
||||
}
|
||||
|
||||
void intel_context_free(struct intel_context *ce)
|
||||
{
|
||||
kmem_cache_free(global.slab_ce, ce);
|
||||
call_rcu(&ce->rcu, rcu_context_free);
|
||||
}
|
||||
|
||||
struct intel_context *
|
||||
|
@ -356,8 +363,7 @@ static int __intel_context_active(struct i915_active *active)
|
|||
}
|
||||
|
||||
void
|
||||
intel_context_init(struct intel_context *ce,
|
||||
struct intel_engine_cs *engine)
|
||||
intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
|
||||
{
|
||||
GEM_BUG_ON(!engine->cops);
|
||||
GEM_BUG_ON(!engine->gt->vm);
|
||||
|
@ -373,7 +379,8 @@ intel_context_init(struct intel_context *ce,
|
|||
|
||||
ce->vm = i915_vm_get(engine->gt->vm);
|
||||
|
||||
INIT_LIST_HEAD(&ce->signal_link);
|
||||
/* NB ce->signal_link/lock is used under RCU */
|
||||
spin_lock_init(&ce->signal_lock);
|
||||
INIT_LIST_HEAD(&ce->signals);
|
||||
|
||||
mutex_init(&ce->pin_mutex);
|
||||
|
|
|
@ -25,6 +25,7 @@ DECLARE_EWMA(runtime, 3, 8);
|
|||
struct i915_gem_context;
|
||||
struct i915_gem_ww_ctx;
|
||||
struct i915_vma;
|
||||
struct intel_breadcrumbs;
|
||||
struct intel_context;
|
||||
struct intel_ring;
|
||||
|
||||
|
@ -44,7 +45,16 @@ struct intel_context_ops {
|
|||
};
|
||||
|
||||
struct intel_context {
|
||||
struct kref ref;
|
||||
/*
|
||||
* Note: Some fields may be accessed under RCU.
|
||||
*
|
||||
* Unless otherwise noted a field can safely be assumed to be protected
|
||||
* by strong reference counting.
|
||||
*/
|
||||
union {
|
||||
struct kref ref; /* no kref_get_unless_zero()! */
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
struct intel_engine_cs *engine;
|
||||
struct intel_engine_cs *inflight;
|
||||
|
@ -54,8 +64,15 @@ struct intel_context {
|
|||
struct i915_address_space *vm;
|
||||
struct i915_gem_context __rcu *gem_context;
|
||||
|
||||
struct list_head signal_link;
|
||||
struct list_head signals;
|
||||
/*
|
||||
* @signal_lock protects the list of requests that need signaling,
|
||||
* @signals. While there are any requests that need signaling,
|
||||
* we add the context to the breadcrumbs worker, and remove it
|
||||
* upon completion/cancellation of the last request.
|
||||
*/
|
||||
struct list_head signal_link; /* Accessed under RCU */
|
||||
struct list_head signals; /* Guarded by signal_lock */
|
||||
spinlock_t signal_lock; /* protects signals, the list of requests */
|
||||
|
||||
struct i915_vma *state;
|
||||
struct intel_ring *ring;
|
||||
|
|
|
@ -131,7 +131,19 @@ static const struct drm_i915_mocs_entry skl_mocs_table[] = {
|
|||
GEN9_MOCS_ENTRIES,
|
||||
MOCS_ENTRY(I915_MOCS_CACHED,
|
||||
LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3),
|
||||
L3_3_WB)
|
||||
L3_3_WB),
|
||||
|
||||
/*
|
||||
* mocs:63
|
||||
* - used by the L3 for all of its evictions.
|
||||
* Thus it is expected to allow LLC cacheability to enable coherent
|
||||
* flows to be maintained.
|
||||
* - used to force L3 uncachable cycles.
|
||||
* Thus it is expected to make the surface L3 uncacheable.
|
||||
*/
|
||||
MOCS_ENTRY(63,
|
||||
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
|
||||
L3_1_UC)
|
||||
};
|
||||
|
||||
/* NOTE: the LE_TGT_CACHE is not used on Broxton */
|
||||
|
|
|
@ -883,6 +883,10 @@ void intel_rps_park(struct intel_rps *rps)
|
|||
adj = -2;
|
||||
rps->last_adj = adj;
|
||||
rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq);
|
||||
if (rps->cur_freq < rps->efficient_freq) {
|
||||
rps->cur_freq = rps->efficient_freq;
|
||||
rps->last_adj = 0;
|
||||
}
|
||||
|
||||
GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq);
|
||||
}
|
||||
|
|
|
@ -103,10 +103,13 @@ static int __shmem_rw(struct file *file, loff_t off,
|
|||
return PTR_ERR(page);
|
||||
|
||||
vaddr = kmap(page);
|
||||
if (write)
|
||||
if (write) {
|
||||
memcpy(vaddr + offset_in_page(off), ptr, this);
|
||||
else
|
||||
set_page_dirty(page);
|
||||
} else {
|
||||
memcpy(ptr, vaddr + offset_in_page(off), this);
|
||||
}
|
||||
mark_page_accessed(page);
|
||||
kunmap(page);
|
||||
put_page(page);
|
||||
|
||||
|
|
|
@ -177,10 +177,8 @@ struct i915_request {
|
|||
struct intel_ring *ring;
|
||||
struct intel_timeline __rcu *timeline;
|
||||
|
||||
union {
|
||||
struct list_head signal_link;
|
||||
struct llist_node signal_node;
|
||||
};
|
||||
struct list_head signal_link;
|
||||
struct llist_node signal_node;
|
||||
|
||||
/*
|
||||
* The rcu epoch of when this request was allocated. Used to judiciously
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <drm/drm_fb_cma_helper.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_gem_cma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_plane.h>
|
||||
#include <drm/drm_plane_helper.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
@ -484,17 +485,27 @@ static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane,
|
|||
writel(ctrl, mxsfb->base + LCDC_AS_CTRL);
|
||||
}
|
||||
|
||||
static bool mxsfb_format_mod_supported(struct drm_plane *plane,
|
||||
uint32_t format,
|
||||
uint64_t modifier)
|
||||
{
|
||||
return modifier == DRM_FORMAT_MOD_LINEAR;
|
||||
}
|
||||
|
||||
static const struct drm_plane_helper_funcs mxsfb_plane_primary_helper_funcs = {
|
||||
.prepare_fb = drm_gem_fb_prepare_fb,
|
||||
.atomic_check = mxsfb_plane_atomic_check,
|
||||
.atomic_update = mxsfb_plane_primary_atomic_update,
|
||||
};
|
||||
|
||||
static const struct drm_plane_helper_funcs mxsfb_plane_overlay_helper_funcs = {
|
||||
.prepare_fb = drm_gem_fb_prepare_fb,
|
||||
.atomic_check = mxsfb_plane_atomic_check,
|
||||
.atomic_update = mxsfb_plane_overlay_atomic_update,
|
||||
};
|
||||
|
||||
static const struct drm_plane_funcs mxsfb_plane_funcs = {
|
||||
.format_mod_supported = mxsfb_format_mod_supported,
|
||||
.update_plane = drm_atomic_helper_update_plane,
|
||||
.disable_plane = drm_atomic_helper_disable_plane,
|
||||
.destroy = drm_plane_cleanup,
|
||||
|
|
|
@ -1214,8 +1214,8 @@ retry:
|
|||
}
|
||||
|
||||
reg->bus.offset = handle;
|
||||
ret = 0;
|
||||
}
|
||||
ret = 0;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
|
|
|
@ -195,8 +195,7 @@ static void sdi_bridge_mode_set(struct drm_bridge *bridge,
|
|||
sdi->pixelclock = adjusted_mode->clock * 1000;
|
||||
}
|
||||
|
||||
static void sdi_bridge_enable(struct drm_bridge *bridge,
|
||||
struct drm_bridge_state *bridge_state)
|
||||
static void sdi_bridge_enable(struct drm_bridge *bridge)
|
||||
{
|
||||
struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
|
||||
struct dispc_clock_info dispc_cinfo;
|
||||
|
@ -259,8 +258,7 @@ err_get_dispc:
|
|||
regulator_disable(sdi->vdds_sdi_reg);
|
||||
}
|
||||
|
||||
static void sdi_bridge_disable(struct drm_bridge *bridge,
|
||||
struct drm_bridge_state *bridge_state)
|
||||
static void sdi_bridge_disable(struct drm_bridge *bridge)
|
||||
{
|
||||
struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
|
||||
|
||||
|
@ -278,8 +276,8 @@ static const struct drm_bridge_funcs sdi_bridge_funcs = {
|
|||
.mode_valid = sdi_bridge_mode_valid,
|
||||
.mode_fixup = sdi_bridge_mode_fixup,
|
||||
.mode_set = sdi_bridge_mode_set,
|
||||
.atomic_enable = sdi_bridge_enable,
|
||||
.atomic_disable = sdi_bridge_disable,
|
||||
.enable = sdi_bridge_enable,
|
||||
.disable = sdi_bridge_disable,
|
||||
};
|
||||
|
||||
static void sdi_bridge_init(struct sdi_device *sdi)
|
||||
|
|
|
@ -629,7 +629,7 @@ static int acx565akm_probe(struct spi_device *spi)
|
|||
lcd->spi = spi;
|
||||
mutex_init(&lcd->mutex);
|
||||
|
||||
lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW);
|
||||
lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH);
|
||||
if (IS_ERR(lcd->reset_gpio)) {
|
||||
dev_err(&spi->dev, "failed to get reset GPIO\n");
|
||||
return PTR_ERR(lcd->reset_gpio);
|
||||
|
|
|
@ -544,7 +544,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
|
|||
struct device_node *port, *endpoint;
|
||||
int ret = 0, child_count = 0;
|
||||
const char *name;
|
||||
u32 endpoint_id;
|
||||
u32 endpoint_id = 0;
|
||||
|
||||
lvds->drm_dev = drm_dev;
|
||||
port = of_graph_get_port_by_id(dev->of_node, 1);
|
||||
|
|
|
@ -90,7 +90,7 @@ static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
|
|||
if (!fpriv)
|
||||
return -ENOMEM;
|
||||
|
||||
idr_init(&fpriv->contexts);
|
||||
idr_init_base(&fpriv->contexts, 1);
|
||||
mutex_init(&fpriv->lock);
|
||||
filp->driver_priv = fpriv;
|
||||
|
||||
|
|
|
@ -129,7 +129,6 @@ int tegra_output_probe(struct tegra_output *output)
|
|||
|
||||
if (!output->ddc) {
|
||||
err = -EPROBE_DEFER;
|
||||
of_node_put(ddc);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -397,7 +397,6 @@ struct tegra_sor;
|
|||
struct tegra_sor_ops {
|
||||
const char *name;
|
||||
int (*probe)(struct tegra_sor *sor);
|
||||
int (*remove)(struct tegra_sor *sor);
|
||||
void (*audio_enable)(struct tegra_sor *sor);
|
||||
void (*audio_disable)(struct tegra_sor *sor);
|
||||
};
|
||||
|
@ -2942,6 +2941,24 @@ static const struct drm_encoder_helper_funcs tegra_sor_dp_helpers = {
|
|||
.atomic_check = tegra_sor_encoder_atomic_check,
|
||||
};
|
||||
|
||||
static void tegra_sor_disable_regulator(void *data)
|
||||
{
|
||||
struct regulator *reg = data;
|
||||
|
||||
regulator_disable(reg);
|
||||
}
|
||||
|
||||
static int tegra_sor_enable_regulator(struct tegra_sor *sor, struct regulator *reg)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = regulator_enable(reg);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return devm_add_action_or_reset(sor->dev, tegra_sor_disable_regulator, reg);
|
||||
}
|
||||
|
||||
static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
|
||||
{
|
||||
int err;
|
||||
|
@ -2953,7 +2970,7 @@ static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
|
|||
return PTR_ERR(sor->avdd_io_supply);
|
||||
}
|
||||
|
||||
err = regulator_enable(sor->avdd_io_supply);
|
||||
err = tegra_sor_enable_regulator(sor, sor->avdd_io_supply);
|
||||
if (err < 0) {
|
||||
dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n",
|
||||
err);
|
||||
|
@ -2967,7 +2984,7 @@ static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
|
|||
return PTR_ERR(sor->vdd_pll_supply);
|
||||
}
|
||||
|
||||
err = regulator_enable(sor->vdd_pll_supply);
|
||||
err = tegra_sor_enable_regulator(sor, sor->vdd_pll_supply);
|
||||
if (err < 0) {
|
||||
dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n",
|
||||
err);
|
||||
|
@ -2981,7 +2998,7 @@ static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
|
|||
return PTR_ERR(sor->hdmi_supply);
|
||||
}
|
||||
|
||||
err = regulator_enable(sor->hdmi_supply);
|
||||
err = tegra_sor_enable_regulator(sor, sor->hdmi_supply);
|
||||
if (err < 0) {
|
||||
dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err);
|
||||
return err;
|
||||
|
@ -2992,19 +3009,9 @@ static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_sor_hdmi_remove(struct tegra_sor *sor)
|
||||
{
|
||||
regulator_disable(sor->hdmi_supply);
|
||||
regulator_disable(sor->vdd_pll_supply);
|
||||
regulator_disable(sor->avdd_io_supply);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
|
||||
.name = "HDMI",
|
||||
.probe = tegra_sor_hdmi_probe,
|
||||
.remove = tegra_sor_hdmi_remove,
|
||||
.audio_enable = tegra_sor_hdmi_audio_enable,
|
||||
.audio_disable = tegra_sor_hdmi_audio_disable,
|
||||
};
|
||||
|
@ -3017,7 +3024,7 @@ static int tegra_sor_dp_probe(struct tegra_sor *sor)
|
|||
if (IS_ERR(sor->avdd_io_supply))
|
||||
return PTR_ERR(sor->avdd_io_supply);
|
||||
|
||||
err = regulator_enable(sor->avdd_io_supply);
|
||||
err = tegra_sor_enable_regulator(sor, sor->avdd_io_supply);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
@ -3025,25 +3032,16 @@ static int tegra_sor_dp_probe(struct tegra_sor *sor)
|
|||
if (IS_ERR(sor->vdd_pll_supply))
|
||||
return PTR_ERR(sor->vdd_pll_supply);
|
||||
|
||||
err = regulator_enable(sor->vdd_pll_supply);
|
||||
err = tegra_sor_enable_regulator(sor, sor->vdd_pll_supply);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_sor_dp_remove(struct tegra_sor *sor)
|
||||
{
|
||||
regulator_disable(sor->vdd_pll_supply);
|
||||
regulator_disable(sor->avdd_io_supply);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct tegra_sor_ops tegra_sor_dp_ops = {
|
||||
.name = "DP",
|
||||
.probe = tegra_sor_dp_probe,
|
||||
.remove = tegra_sor_dp_remove,
|
||||
};
|
||||
|
||||
static int tegra_sor_init(struct host1x_client *client)
|
||||
|
@ -3145,6 +3143,7 @@ static int tegra_sor_init(struct host1x_client *client)
|
|||
if (err < 0) {
|
||||
dev_err(sor->dev, "failed to deassert SOR reset: %d\n",
|
||||
err);
|
||||
clk_disable_unprepare(sor->clk);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -3152,12 +3151,17 @@ static int tegra_sor_init(struct host1x_client *client)
|
|||
}
|
||||
|
||||
err = clk_prepare_enable(sor->clk_safe);
|
||||
if (err < 0)
|
||||
if (err < 0) {
|
||||
clk_disable_unprepare(sor->clk);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = clk_prepare_enable(sor->clk_dp);
|
||||
if (err < 0)
|
||||
if (err < 0) {
|
||||
clk_disable_unprepare(sor->clk_safe);
|
||||
clk_disable_unprepare(sor->clk);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3764,17 +3768,16 @@ static int tegra_sor_probe(struct platform_device *pdev)
|
|||
return err;
|
||||
|
||||
err = tegra_output_probe(&sor->output);
|
||||
if (err < 0) {
|
||||
dev_err(&pdev->dev, "failed to probe output: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
if (err < 0)
|
||||
return dev_err_probe(&pdev->dev, err,
|
||||
"failed to probe output\n");
|
||||
|
||||
if (sor->ops && sor->ops->probe) {
|
||||
err = sor->ops->probe(sor);
|
||||
if (err < 0) {
|
||||
dev_err(&pdev->dev, "failed to probe %s: %d\n",
|
||||
sor->ops->name, err);
|
||||
goto output;
|
||||
goto remove;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3955,9 +3958,6 @@ unregister:
|
|||
rpm_disable:
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
remove:
|
||||
if (sor->ops && sor->ops->remove)
|
||||
sor->ops->remove(sor);
|
||||
output:
|
||||
tegra_output_remove(&sor->output);
|
||||
return err;
|
||||
}
|
||||
|
@ -3976,12 +3976,6 @@ static int tegra_sor_remove(struct platform_device *pdev)
|
|||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
if (sor->ops && sor->ops->remove) {
|
||||
err = sor->ops->remove(sor);
|
||||
if (err < 0)
|
||||
dev_err(&pdev->dev, "failed to remove SOR: %d\n", err);
|
||||
}
|
||||
|
||||
tegra_output_remove(&sor->output);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -734,6 +734,7 @@ config I2C_LPC2K
|
|||
config I2C_MLXBF
|
||||
tristate "Mellanox BlueField I2C controller"
|
||||
depends on MELLANOX_PLATFORM && ARM64
|
||||
select I2C_SLAVE
|
||||
help
|
||||
Enabling this option will add I2C SMBus support for Mellanox BlueField
|
||||
system.
|
||||
|
|
|
@ -412,6 +412,19 @@ static void i2c_imx_dma_free(struct imx_i2c_struct *i2c_imx)
|
|||
dma->chan_using = NULL;
|
||||
}
|
||||
|
||||
static void i2c_imx_clear_irq(struct imx_i2c_struct *i2c_imx, unsigned int bits)
|
||||
{
|
||||
unsigned int temp;
|
||||
|
||||
/*
|
||||
* i2sr_clr_opcode is the value to clear all interrupts. Here we want to
|
||||
* clear only <bits>, so we write ~i2sr_clr_opcode with just <bits>
|
||||
* toggled. This is required because i.MX needs W0C and Vybrid uses W1C.
|
||||
*/
|
||||
temp = ~i2c_imx->hwdata->i2sr_clr_opcode ^ bits;
|
||||
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR);
|
||||
}
|
||||
|
||||
static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy, bool atomic)
|
||||
{
|
||||
unsigned long orig_jiffies = jiffies;
|
||||
|
@ -424,8 +437,7 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy, bool a
|
|||
|
||||
/* check for arbitration lost */
|
||||
if (temp & I2SR_IAL) {
|
||||
temp &= ~I2SR_IAL;
|
||||
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR);
|
||||
i2c_imx_clear_irq(i2c_imx, I2SR_IAL);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
|
@ -469,7 +481,7 @@ static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx, bool atomic)
|
|||
*/
|
||||
readb_poll_timeout_atomic(addr, regval, regval & I2SR_IIF, 5, 1000 + 100);
|
||||
i2c_imx->i2csr = regval;
|
||||
imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
|
||||
i2c_imx_clear_irq(i2c_imx, I2SR_IIF | I2SR_IAL);
|
||||
} else {
|
||||
wait_event_timeout(i2c_imx->queue, i2c_imx->i2csr & I2SR_IIF, HZ / 10);
|
||||
}
|
||||
|
@ -478,6 +490,16 @@ static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx, bool atomic)
|
|||
dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
/* check for arbitration lost */
|
||||
if (i2c_imx->i2csr & I2SR_IAL) {
|
||||
dev_dbg(&i2c_imx->adapter.dev, "<%s> Arbitration lost\n", __func__);
|
||||
i2c_imx_clear_irq(i2c_imx, I2SR_IAL);
|
||||
|
||||
i2c_imx->i2csr = 0;
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
dev_dbg(&i2c_imx->adapter.dev, "<%s> TRX complete\n", __func__);
|
||||
i2c_imx->i2csr = 0;
|
||||
return 0;
|
||||
|
@ -593,6 +615,8 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx, bool atomic)
|
|||
/* Stop I2C transaction */
|
||||
dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
|
||||
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
|
||||
if (!(temp & I2CR_MSTA))
|
||||
i2c_imx->stopped = 1;
|
||||
temp &= ~(I2CR_MSTA | I2CR_MTX);
|
||||
if (i2c_imx->dma)
|
||||
temp &= ~I2CR_DMAEN;
|
||||
|
@ -623,9 +647,7 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id)
|
|||
if (temp & I2SR_IIF) {
|
||||
/* save status register */
|
||||
i2c_imx->i2csr = temp;
|
||||
temp &= ~I2SR_IIF;
|
||||
temp |= (i2c_imx->hwdata->i2sr_clr_opcode & I2SR_IIF);
|
||||
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR);
|
||||
i2c_imx_clear_irq(i2c_imx, I2SR_IIF);
|
||||
wake_up(&i2c_imx->queue);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -758,9 +780,12 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
|
|||
*/
|
||||
dev_dbg(dev, "<%s> clear MSTA\n", __func__);
|
||||
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
|
||||
if (!(temp & I2CR_MSTA))
|
||||
i2c_imx->stopped = 1;
|
||||
temp &= ~(I2CR_MSTA | I2CR_MTX);
|
||||
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
|
||||
i2c_imx_bus_busy(i2c_imx, 0, false);
|
||||
if (!i2c_imx->stopped)
|
||||
i2c_imx_bus_busy(i2c_imx, 0, false);
|
||||
} else {
|
||||
/*
|
||||
* For i2c master receiver repeat restart operation like:
|
||||
|
@ -885,9 +910,12 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs,
|
|||
dev_dbg(&i2c_imx->adapter.dev,
|
||||
"<%s> clear MSTA\n", __func__);
|
||||
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
|
||||
if (!(temp & I2CR_MSTA))
|
||||
i2c_imx->stopped = 1;
|
||||
temp &= ~(I2CR_MSTA | I2CR_MTX);
|
||||
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
|
||||
i2c_imx_bus_busy(i2c_imx, 0, atomic);
|
||||
if (!i2c_imx->stopped)
|
||||
i2c_imx_bus_busy(i2c_imx, 0, atomic);
|
||||
} else {
|
||||
/*
|
||||
* For i2c master receiver repeat restart operation like:
|
||||
|
|
|
@ -1258,9 +1258,9 @@ static int mlxbf_i2c_get_gpio(struct platform_device *pdev,
|
|||
return -EFAULT;
|
||||
|
||||
gpio_res->io = devm_ioremap(dev, params->start, size);
|
||||
if (IS_ERR(gpio_res->io)) {
|
||||
if (!gpio_res->io) {
|
||||
devm_release_mem_region(dev, params->start, size);
|
||||
return PTR_ERR(gpio_res->io);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1323,9 +1323,9 @@ static int mlxbf_i2c_get_corepll(struct platform_device *pdev,
|
|||
return -EFAULT;
|
||||
|
||||
corepll_res->io = devm_ioremap(dev, params->start, size);
|
||||
if (IS_ERR(corepll_res->io)) {
|
||||
if (!corepll_res->io) {
|
||||
devm_release_mem_region(dev, params->start, size);
|
||||
return PTR_ERR(corepll_res->io);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1717,9 +1717,9 @@ static int mlxbf_i2c_init_coalesce(struct platform_device *pdev,
|
|||
return -EFAULT;
|
||||
|
||||
coalesce_res->io = ioremap(params->start, size);
|
||||
if (IS_ERR(coalesce_res->io)) {
|
||||
if (!coalesce_res->io) {
|
||||
release_mem_region(params->start, size);
|
||||
return PTR_ERR(coalesce_res->io);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
priv->coalesce = coalesce_res;
|
||||
|
|
|
@ -194,9 +194,9 @@ static irqreturn_t cci_isr(int irq, void *dev)
|
|||
if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_ERROR)) {
|
||||
if (val & CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR ||
|
||||
val & CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR)
|
||||
cci->master[0].status = -ENXIO;
|
||||
cci->master[1].status = -ENXIO;
|
||||
else
|
||||
cci->master[0].status = -EIO;
|
||||
cci->master[1].status = -EIO;
|
||||
|
||||
writel(CCI_HALT_REQ_I2C_M1_Q0Q1, cci->base + CCI_HALT_REQ);
|
||||
ret = IRQ_HANDLED;
|
||||
|
|
|
@ -801,7 +801,8 @@ static int qup_i2c_bam_schedule_desc(struct qup_i2c_dev *qup)
|
|||
if (ret || qup->bus_err || qup->qup_err) {
|
||||
reinit_completion(&qup->xfer);
|
||||
|
||||
if (qup_i2c_change_state(qup, QUP_RUN_STATE)) {
|
||||
ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
|
||||
if (ret) {
|
||||
dev_err(qup->dev, "change to run state timed out");
|
||||
goto desc_err;
|
||||
}
|
||||
|
|
|
@ -1140,6 +1140,20 @@ static bool __init intel_idle_max_cstate_reached(int cstate)
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool __init intel_idle_state_needs_timer_stop(struct cpuidle_state *state)
|
||||
{
|
||||
unsigned long eax = flg2MWAIT(state->flags);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_ARAT))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Switch over to one-shot tick broadcast if the target C-state
|
||||
* is deeper than C1.
|
||||
*/
|
||||
return !!((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
|
||||
#include <acpi/processor.h>
|
||||
|
||||
|
@ -1210,20 +1224,6 @@ static bool __init intel_idle_acpi_cst_extract(void)
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool __init intel_idle_state_needs_timer_stop(struct cpuidle_state *state)
|
||||
{
|
||||
unsigned long eax = flg2MWAIT(state->flags);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_ARAT))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Switch over to one-shot tick broadcast if the target C-state
|
||||
* is deeper than C1.
|
||||
*/
|
||||
return !!((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK);
|
||||
}
|
||||
|
||||
static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
|
||||
{
|
||||
int cstate, limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count);
|
||||
|
|
|
@ -241,6 +241,7 @@ static const struct xpad_device {
|
|||
{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
|
||||
{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
|
||||
{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
|
||||
{ 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 },
|
||||
{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
|
||||
{ 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
|
||||
{ 0x12ab, 0x0303, "Mortal Kombat Klassic FightStick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
|
||||
|
@ -418,6 +419,7 @@ static const struct usb_device_id xpad_table[] = {
|
|||
XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
|
||||
XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */
|
||||
XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
|
||||
XPAD_XBOX360_VENDOR(0x1209), /* Ardwiino Controllers */
|
||||
XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
|
||||
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
|
||||
XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/input.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/dmi.h>
|
||||
|
|
|
@ -219,6 +219,10 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
|
|||
DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
|
||||
},
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
|
|
@ -1471,7 +1471,8 @@ static int __init i8042_setup_aux(void)
|
|||
if (error)
|
||||
goto err_free_ports;
|
||||
|
||||
if (aux_enable())
|
||||
error = aux_enable();
|
||||
if (error)
|
||||
goto err_free_irq;
|
||||
|
||||
i8042_aux_irq_registered = true;
|
||||
|
|
|
@ -2183,11 +2183,11 @@ static int mxt_initialize(struct mxt_data *data)
|
|||
msleep(MXT_FW_RESET_TIME);
|
||||
}
|
||||
|
||||
error = mxt_acquire_irq(data);
|
||||
error = mxt_check_retrigen(data);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = mxt_check_retrigen(data);
|
||||
error = mxt_acquire_irq(data);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
|
@ -712,10 +712,6 @@ static bool block_size_is_power_of_two(struct cache *cache)
|
|||
return cache->sectors_per_block_shift >= 0;
|
||||
}
|
||||
|
||||
/* gcc on ARM generates spurious references to __udivdi3 and __umoddi3 */
|
||||
#if defined(CONFIG_ARM) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6
|
||||
__always_inline
|
||||
#endif
|
||||
static dm_block_t block_div(dm_block_t b, uint32_t n)
|
||||
{
|
||||
do_div(b, n);
|
||||
|
|
|
@ -3462,7 +3462,7 @@ static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
|
|||
int r;
|
||||
|
||||
if (a->alg_string) {
|
||||
*hash = crypto_alloc_shash(a->alg_string, 0, 0);
|
||||
*hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
|
||||
if (IS_ERR(*hash)) {
|
||||
*error = error_alg;
|
||||
r = PTR_ERR(*hash);
|
||||
|
@ -3519,7 +3519,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
|
|||
struct journal_completion comp;
|
||||
|
||||
comp.ic = ic;
|
||||
ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0);
|
||||
ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
|
||||
if (IS_ERR(ic->journal_crypt)) {
|
||||
*error = "Invalid journal cipher";
|
||||
r = PTR_ERR(ic->journal_crypt);
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
#include <linux/mutex.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/lcm.h>
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/dax.h>
|
||||
|
@ -1247,12 +1246,6 @@ void dm_table_event_callback(struct dm_table *t,
|
|||
|
||||
void dm_table_event(struct dm_table *t)
|
||||
{
|
||||
/*
|
||||
* You can no longer call dm_table_event() from interrupt
|
||||
* context, use a bottom half instead.
|
||||
*/
|
||||
BUG_ON(in_interrupt());
|
||||
|
||||
mutex_lock(&_event_lock);
|
||||
if (t->event_fn)
|
||||
t->event_fn(t->event_context);
|
||||
|
@ -1455,10 +1448,6 @@ int dm_calculate_queue_limits(struct dm_table *table,
|
|||
zone_sectors = ti_limits.chunk_sectors;
|
||||
}
|
||||
|
||||
/* Stack chunk_sectors if target-specific splitting is required */
|
||||
if (ti->max_io_len)
|
||||
ti_limits.chunk_sectors = lcm_not_zero(ti->max_io_len,
|
||||
ti_limits.chunk_sectors);
|
||||
/* Set I/O hints portion of queue limits */
|
||||
if (ti->type->io_hints)
|
||||
ti->type->io_hints(ti, &ti_limits);
|
||||
|
|
|
@ -319,7 +319,7 @@ err1:
|
|||
#else
|
||||
static int persistent_memory_claim(struct dm_writecache *wc)
|
||||
{
|
||||
BUG();
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -2041,7 +2041,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
|||
struct wc_memory_superblock s;
|
||||
|
||||
static struct dm_arg _args[] = {
|
||||
{0, 10, "Invalid number of feature args"},
|
||||
{0, 16, "Invalid number of feature args"},
|
||||
};
|
||||
|
||||
as.argc = argc;
|
||||
|
@ -2479,6 +2479,8 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
|
|||
extra_args += 2;
|
||||
if (wc->autocommit_time_set)
|
||||
extra_args += 2;
|
||||
if (wc->max_age != MAX_AGE_UNSPECIFIED)
|
||||
extra_args += 2;
|
||||
if (wc->cleaner)
|
||||
extra_args++;
|
||||
if (wc->writeback_fua_set)
|
||||
|
|
|
@ -476,8 +476,10 @@ static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
|
|||
return -EAGAIN;
|
||||
|
||||
map = dm_get_live_table(md, &srcu_idx);
|
||||
if (!map)
|
||||
return -EIO;
|
||||
if (!map) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
do {
|
||||
struct dm_target *tgt;
|
||||
|
@ -507,7 +509,6 @@ out:
|
|||
|
||||
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
|
||||
struct block_device **bdev)
|
||||
__acquires(md->io_barrier)
|
||||
{
|
||||
struct dm_target *tgt;
|
||||
struct dm_table *map;
|
||||
|
@ -541,7 +542,6 @@ retry:
|
|||
}
|
||||
|
||||
static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
|
||||
__releases(md->io_barrier)
|
||||
{
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
}
|
||||
|
@ -1037,15 +1037,18 @@ static sector_t max_io_len(struct dm_target *ti, sector_t sector)
|
|||
sector_t max_len;
|
||||
|
||||
/*
|
||||
* Does the target need to split even further?
|
||||
* - q->limits.chunk_sectors reflects ti->max_io_len so
|
||||
* blk_max_size_offset() provides required splitting.
|
||||
* - blk_max_size_offset() also respects q->limits.max_sectors
|
||||
* Does the target need to split IO even further?
|
||||
* - varied (per target) IO splitting is a tenet of DM; this
|
||||
* explains why stacked chunk_sectors based splitting via
|
||||
* blk_max_size_offset() isn't possible here. So pass in
|
||||
* ti->max_io_len to override stacked chunk_sectors.
|
||||
*/
|
||||
max_len = blk_max_size_offset(ti->table->md->queue,
|
||||
target_offset);
|
||||
if (len > max_len)
|
||||
len = max_len;
|
||||
if (ti->max_io_len) {
|
||||
max_len = blk_max_size_offset(ti->table->md->queue,
|
||||
target_offset, ti->max_io_len);
|
||||
if (len > max_len)
|
||||
len = max_len;
|
||||
}
|
||||
|
||||
return len;
|
||||
}
|
||||
|
@ -1196,11 +1199,9 @@ static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
|
|||
* ->zero_page_range() is mandatory dax operation. If we are
|
||||
* here, something is wrong.
|
||||
*/
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
goto out;
|
||||
}
|
||||
ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
|
||||
|
||||
out:
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
|
||||
|
|
|
@ -242,16 +242,16 @@ delete_cdev_device:
|
|||
|
||||
static void device_cdev_sysfs_del(struct hl_device *hdev)
|
||||
{
|
||||
/* device_release() won't be called so must free devices explicitly */
|
||||
if (!hdev->cdev_sysfs_created) {
|
||||
kfree(hdev->dev_ctrl);
|
||||
kfree(hdev->dev);
|
||||
return;
|
||||
}
|
||||
if (!hdev->cdev_sysfs_created)
|
||||
goto put_devices;
|
||||
|
||||
hl_sysfs_fini(hdev);
|
||||
cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
|
||||
cdev_device_del(&hdev->cdev, hdev->dev);
|
||||
|
||||
put_devices:
|
||||
put_device(hdev->dev);
|
||||
put_device(hdev->dev_ctrl);
|
||||
}
|
||||
|
||||
static void device_hard_reset_pending(struct work_struct *work)
|
||||
|
@ -1430,9 +1430,9 @@ sw_fini:
|
|||
early_fini:
|
||||
device_early_fini(hdev);
|
||||
free_dev_ctrl:
|
||||
kfree(hdev->dev_ctrl);
|
||||
put_device(hdev->dev_ctrl);
|
||||
free_dev:
|
||||
kfree(hdev->dev);
|
||||
put_device(hdev->dev);
|
||||
out_disabled:
|
||||
hdev->disabled = true;
|
||||
if (add_cdev_sysfs_on_err)
|
||||
|
|
|
@ -46,14 +46,4 @@ config INTEL_MEI_TXE
|
|||
Supported SoCs:
|
||||
Intel Bay Trail
|
||||
|
||||
config INTEL_MEI_VIRTIO
|
||||
tristate "Intel MEI interface emulation with virtio framework"
|
||||
select INTEL_MEI
|
||||
depends on X86 && PCI && VIRTIO_PCI
|
||||
help
|
||||
This module implements mei hw emulation over virtio transport.
|
||||
The module will be called mei_virtio.
|
||||
Enable this if your virtual machine supports virtual mei
|
||||
device over virtio.
|
||||
|
||||
source "drivers/misc/mei/hdcp/Kconfig"
|
||||
|
|
|
@ -22,9 +22,6 @@ obj-$(CONFIG_INTEL_MEI_TXE) += mei-txe.o
|
|||
mei-txe-objs := pci-txe.o
|
||||
mei-txe-objs += hw-txe.o
|
||||
|
||||
obj-$(CONFIG_INTEL_MEI_VIRTIO) += mei-virtio.o
|
||||
mei-virtio-objs := hw-virtio.o
|
||||
|
||||
mei-$(CONFIG_EVENT_TRACING) += mei-trace.o
|
||||
CFLAGS_mei-trace.o = -I$(src)
|
||||
|
||||
|
|
|
@ -1,874 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Intel Management Engine Interface (Intel MEI) Linux driver
|
||||
* Copyright (c) 2018-2020, Intel Corporation.
|
||||
*/
|
||||
#include <linux/err.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/virtio.h>
|
||||
#include <linux/virtio_config.h>
|
||||
#include <linux/virtio_ids.h>
|
||||
#include <linux/atomic.h>
|
||||
|
||||
#include "mei_dev.h"
|
||||
#include "hbm.h"
|
||||
#include "client.h"
|
||||
|
||||
#define MEI_VIRTIO_RPM_TIMEOUT 500
|
||||
/* ACRN virtio device types */
|
||||
#ifndef VIRTIO_ID_MEI
|
||||
#define VIRTIO_ID_MEI 0xFFFE /* virtio mei */
|
||||
#endif
|
||||
|
||||
/**
|
||||
* struct mei_virtio_cfg - settings passed from the virtio backend
|
||||
* @buf_depth: read buffer depth in slots (4bytes)
|
||||
* @hw_ready: hw is ready for operation
|
||||
* @host_reset: synchronize reset with virtio backend
|
||||
* @reserved: reserved for alignment
|
||||
* @fw_status: FW status
|
||||
*/
|
||||
struct mei_virtio_cfg {
|
||||
u32 buf_depth;
|
||||
u8 hw_ready;
|
||||
u8 host_reset;
|
||||
u8 reserved[2];
|
||||
u32 fw_status[MEI_FW_STATUS_MAX];
|
||||
} __packed;
|
||||
|
||||
struct mei_virtio_hw {
|
||||
struct mei_device mdev;
|
||||
char name[32];
|
||||
|
||||
struct virtqueue *in;
|
||||
struct virtqueue *out;
|
||||
|
||||
bool host_ready;
|
||||
struct work_struct intr_handler;
|
||||
|
||||
u32 *recv_buf;
|
||||
u8 recv_rdy;
|
||||
size_t recv_sz;
|
||||
u32 recv_idx;
|
||||
u32 recv_len;
|
||||
|
||||
/* send buffer */
|
||||
atomic_t hbuf_ready;
|
||||
const void *send_hdr;
|
||||
const void *send_buf;
|
||||
|
||||
struct mei_virtio_cfg cfg;
|
||||
};
|
||||
|
||||
#define to_virtio_hw(_dev) container_of(_dev, struct mei_virtio_hw, mdev)
|
||||
|
||||
/**
|
||||
* mei_virtio_fw_status() - read status register of mei
|
||||
* @dev: mei device
|
||||
* @fw_status: fw status register values
|
||||
*
|
||||
* Return: always 0
|
||||
*/
|
||||
static int mei_virtio_fw_status(struct mei_device *dev,
|
||||
struct mei_fw_status *fw_status)
|
||||
{
|
||||
struct virtio_device *vdev = dev_to_virtio(dev->dev);
|
||||
|
||||
fw_status->count = MEI_FW_STATUS_MAX;
|
||||
virtio_cread_bytes(vdev, offsetof(struct mei_virtio_cfg, fw_status),
|
||||
fw_status->status, sizeof(fw_status->status));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_virtio_pg_state() - translate internal pg state
|
||||
* to the mei power gating state
|
||||
* There is no power management in ACRN mode always return OFF
|
||||
* @dev: mei device
|
||||
*
|
||||
* Return:
|
||||
* * MEI_PG_OFF - if aliveness is on (always)
|
||||
* * MEI_PG_ON - (never)
|
||||
*/
|
||||
static inline enum mei_pg_state mei_virtio_pg_state(struct mei_device *dev)
|
||||
{
|
||||
return MEI_PG_OFF;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_virtio_hw_config() - configure hw dependent settings
|
||||
*
|
||||
* @dev: mei device
|
||||
*
|
||||
* Return: always 0
|
||||
*/
|
||||
static int mei_virtio_hw_config(struct mei_device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_virtio_hbuf_empty_slots() - counts write empty slots.
|
||||
* @dev: the device structure
|
||||
*
|
||||
* Return: always return frontend buf size if buffer is ready, 0 otherwise
|
||||
*/
|
||||
static int mei_virtio_hbuf_empty_slots(struct mei_device *dev)
|
||||
{
|
||||
struct mei_virtio_hw *hw = to_virtio_hw(dev);
|
||||
|
||||
return (atomic_read(&hw->hbuf_ready) == 1) ? hw->cfg.buf_depth : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_virtio_hbuf_is_ready() - checks if write buffer is ready
|
||||
* @dev: the device structure
|
||||
*
|
||||
* Return: true if hbuf is ready
|
||||
*/
|
||||
static bool mei_virtio_hbuf_is_ready(struct mei_device *dev)
|
||||
{
|
||||
struct mei_virtio_hw *hw = to_virtio_hw(dev);
|
||||
|
||||
return atomic_read(&hw->hbuf_ready) == 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_virtio_hbuf_max_depth() - returns depth of FE write buffer.
|
||||
* @dev: the device structure
|
||||
*
|
||||
* Return: size of frontend write buffer in bytes
|
||||
*/
|
||||
static u32 mei_virtio_hbuf_depth(const struct mei_device *dev)
|
||||
{
|
||||
struct mei_virtio_hw *hw = to_virtio_hw(dev);
|
||||
|
||||
return hw->cfg.buf_depth;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_virtio_intr_clear() - clear and stop interrupts
|
||||
* @dev: the device structure
|
||||
*/
|
||||
static void mei_virtio_intr_clear(struct mei_device *dev)
|
||||
{
|
||||
/*
|
||||
* In our virtio solution, there are two types of interrupts,
|
||||
* vq interrupt and config change interrupt.
|
||||
* 1) start/reset rely on virtio config changed interrupt;
|
||||
* 2) send/recv rely on virtio virtqueue interrupts.
|
||||
* They are all virtual interrupts. So, we don't have corresponding
|
||||
* operation to do here.
|
||||
*/
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_virtio_intr_enable() - enables mei BE virtqueues callbacks
|
||||
* @dev: the device structure
|
||||
*/
|
||||
static void mei_virtio_intr_enable(struct mei_device *dev)
|
||||
{
|
||||
struct mei_virtio_hw *hw = to_virtio_hw(dev);
|
||||
struct virtio_device *vdev = dev_to_virtio(dev->dev);
|
||||
|
||||
virtio_config_enable(vdev);
|
||||
|
||||
virtqueue_enable_cb(hw->in);
|
||||
virtqueue_enable_cb(hw->out);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_virtio_intr_disable() - disables mei BE virtqueues callbacks
|
||||
*
|
||||
* @dev: the device structure
|
||||
*/
|
||||
static void mei_virtio_intr_disable(struct mei_device *dev)
|
||||
{
|
||||
struct mei_virtio_hw *hw = to_virtio_hw(dev);
|
||||
struct virtio_device *vdev = dev_to_virtio(dev->dev);
|
||||
|
||||
virtio_config_disable(vdev);
|
||||
|
||||
virtqueue_disable_cb(hw->in);
|
||||
virtqueue_disable_cb(hw->out);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_virtio_synchronize_irq() - wait for pending IRQ handlers for all
|
||||
* virtqueue
|
||||
* @dev: the device structure
|
||||
*/
|
||||
static void mei_virtio_synchronize_irq(struct mei_device *dev)
|
||||
{
|
||||
struct mei_virtio_hw *hw = to_virtio_hw(dev);
|
||||
|
||||
/*
|
||||
* Now, all IRQ handlers are converted to workqueue.
|
||||
* Change synchronize irq to flush this work.
|
||||
*/
|
||||
flush_work(&hw->intr_handler);
|
||||
}
|
||||
|
||||
static void mei_virtio_free_outbufs(struct mei_virtio_hw *hw)
|
||||
{
|
||||
kfree(hw->send_hdr);
|
||||
kfree(hw->send_buf);
|
||||
hw->send_hdr = NULL;
|
||||
hw->send_buf = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_virtio_write_message() - writes a message to mei virtio back-end service.
|
||||
* @dev: the device structure
|
||||
* @hdr: mei header of message
|
||||
* @hdr_len: header length
|
||||
* @data: message payload will be written
|
||||
* @data_len: message payload length
|
||||
*
|
||||
* Return:
|
||||
* * 0: on success
|
||||
* * -EIO: if write has failed
|
||||
* * -ENOMEM: on memory allocation failure
|
||||
*/
|
||||
static int mei_virtio_write_message(struct mei_device *dev,
|
||||
const void *hdr, size_t hdr_len,
|
||||
const void *data, size_t data_len)
|
||||
{
|
||||
struct mei_virtio_hw *hw = to_virtio_hw(dev);
|
||||
struct scatterlist sg[2];
|
||||
const void *hbuf, *dbuf;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(!atomic_add_unless(&hw->hbuf_ready, -1, 0)))
|
||||
return -EIO;
|
||||
|
||||
hbuf = kmemdup(hdr, hdr_len, GFP_KERNEL);
|
||||
hw->send_hdr = hbuf;
|
||||
|
||||
dbuf = kmemdup(data, data_len, GFP_KERNEL);
|
||||
hw->send_buf = dbuf;
|
||||
|
||||
if (!hbuf || !dbuf) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
sg_init_table(sg, 2);
|
||||
sg_set_buf(&sg[0], hbuf, hdr_len);
|
||||
sg_set_buf(&sg[1], dbuf, data_len);
|
||||
|
||||
ret = virtqueue_add_outbuf(hw->out, sg, 2, hw, GFP_KERNEL);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to add outbuf\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
virtqueue_kick(hw->out);
|
||||
return 0;
|
||||
fail:
|
||||
|
||||
mei_virtio_free_outbufs(hw);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_virtio_count_full_read_slots() - counts read full slots.
|
||||
* @dev: the device structure
|
||||
*
|
||||
* Return: -EOVERFLOW if overflow, otherwise filled slots count
|
||||
*/
|
||||
static int mei_virtio_count_full_read_slots(struct mei_device *dev)
|
||||
{
|
||||
struct mei_virtio_hw *hw = to_virtio_hw(dev);
|
||||
|
||||
if (hw->recv_idx > hw->recv_len)
|
||||
return -EOVERFLOW;
|
||||
|
||||
return hw->recv_len - hw->recv_idx;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_virtio_read_hdr() - Reads 32bit dword from mei virtio receive buffer
|
||||
*
|
||||
* @dev: the device structure
|
||||
*
|
||||
* Return: 32bit dword of receive buffer (u32)
|
||||
*/
|
||||
static inline u32 mei_virtio_read_hdr(const struct mei_device *dev)
|
||||
{
|
||||
struct mei_virtio_hw *hw = to_virtio_hw(dev);
|
||||
|
||||
WARN_ON(hw->cfg.buf_depth < hw->recv_idx + 1);
|
||||
|
||||
return hw->recv_buf[hw->recv_idx++];
|
||||
}
|
||||
|
||||
static int mei_virtio_read(struct mei_device *dev, unsigned char *buffer,
|
||||
unsigned long len)
|
||||
{
|
||||
struct mei_virtio_hw *hw = to_virtio_hw(dev);
|
||||
u32 slots = mei_data2slots(len);
|
||||
|
||||
if (WARN_ON(hw->cfg.buf_depth < hw->recv_idx + slots))
|
||||
return -EOVERFLOW;
|
||||
|
||||
/*
|
||||
* Assumption: There is only one MEI message in recv_buf each time.
|
||||
* Backend service need follow this rule too.
|
||||
*/
|
||||
memcpy(buffer, hw->recv_buf + hw->recv_idx, len);
|
||||
hw->recv_idx += slots;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool mei_virtio_pg_is_enabled(struct mei_device *dev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool mei_virtio_pg_in_transition(struct mei_device *dev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static void mei_virtio_add_recv_buf(struct mei_virtio_hw *hw)
|
||||
{
|
||||
struct scatterlist sg;
|
||||
|
||||
if (hw->recv_rdy) /* not needed */
|
||||
return;
|
||||
|
||||
/* refill the recv_buf to IN virtqueue to get next message */
|
||||
sg_init_one(&sg, hw->recv_buf, mei_slots2data(hw->cfg.buf_depth));
|
||||
hw->recv_len = 0;
|
||||
hw->recv_idx = 0;
|
||||
hw->recv_rdy = 1;
|
||||
virtqueue_add_inbuf(hw->in, &sg, 1, hw->recv_buf, GFP_KERNEL);
|
||||
virtqueue_kick(hw->in);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_virtio_hw_is_ready() - check whether the BE(hw) has turned ready
|
||||
* @dev: mei device
|
||||
* Return: bool
|
||||
*/
|
||||
static bool mei_virtio_hw_is_ready(struct mei_device *dev)
|
||||
{
|
||||
struct mei_virtio_hw *hw = to_virtio_hw(dev);
|
||||
struct virtio_device *vdev = dev_to_virtio(dev->dev);
|
||||
|
||||
virtio_cread(vdev, struct mei_virtio_cfg,
|
||||
hw_ready, &hw->cfg.hw_ready);
|
||||
|
||||
dev_dbg(dev->dev, "hw ready %d\n", hw->cfg.hw_ready);
|
||||
|
||||
return hw->cfg.hw_ready;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_virtio_hw_reset - resets virtio hw.
|
||||
*
|
||||
* @dev: the device structure
|
||||
* @intr_enable: virtio use data/config callbacks
|
||||
*
|
||||
* Return: 0 on success an error code otherwise
|
||||
*/
|
||||
static int mei_virtio_hw_reset(struct mei_device *dev, bool intr_enable)
|
||||
{
|
||||
struct mei_virtio_hw *hw = to_virtio_hw(dev);
|
||||
struct virtio_device *vdev = dev_to_virtio(dev->dev);
|
||||
|
||||
dev_dbg(dev->dev, "hw reset\n");
|
||||
|
||||
dev->recvd_hw_ready = false;
|
||||
hw->host_ready = false;
|
||||
atomic_set(&hw->hbuf_ready, 0);
|
||||
hw->recv_len = 0;
|
||||
hw->recv_idx = 0;
|
||||
|
||||
hw->cfg.host_reset = 1;
|
||||
virtio_cwrite(vdev, struct mei_virtio_cfg,
|
||||
host_reset, &hw->cfg.host_reset);
|
||||
|
||||
mei_virtio_hw_is_ready(dev);
|
||||
|
||||
if (intr_enable)
|
||||
mei_virtio_intr_enable(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_virtio_hw_reset_release() - release device from the reset
|
||||
* @dev: the device structure
|
||||
*/
|
||||
static void mei_virtio_hw_reset_release(struct mei_device *dev)
|
||||
{
|
||||
struct mei_virtio_hw *hw = to_virtio_hw(dev);
|
||||
struct virtio_device *vdev = dev_to_virtio(dev->dev);
|
||||
|
||||
dev_dbg(dev->dev, "hw reset release\n");
|
||||
hw->cfg.host_reset = 0;
|
||||
virtio_cwrite(vdev, struct mei_virtio_cfg,
|
||||
host_reset, &hw->cfg.host_reset);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_virtio_hw_ready_wait() - wait until the virtio(hw) has turned ready
|
||||
* or timeout is reached
|
||||
* @dev: mei device
|
||||
*
|
||||
* Return: 0 on success, error otherwise
|
||||
*/
|
||||
static int mei_virtio_hw_ready_wait(struct mei_device *dev)
|
||||
{
|
||||
mutex_unlock(&dev->device_lock);
|
||||
wait_event_timeout(dev->wait_hw_ready,
|
||||
dev->recvd_hw_ready,
|
||||
mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
|
||||
mutex_lock(&dev->device_lock);
|
||||
if (!dev->recvd_hw_ready) {
|
||||
dev_err(dev->dev, "wait hw ready failed\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
dev->recvd_hw_ready = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_virtio_hw_start() - hw start routine
|
||||
* @dev: mei device
|
||||
*
|
||||
* Return: 0 on success, error otherwise
|
||||
*/
|
||||
static int mei_virtio_hw_start(struct mei_device *dev)
|
||||
{
|
||||
struct mei_virtio_hw *hw = to_virtio_hw(dev);
|
||||
int ret;
|
||||
|
||||
dev_dbg(dev->dev, "hw start\n");
|
||||
mei_virtio_hw_reset_release(dev);
|
||||
|
||||
ret = mei_virtio_hw_ready_wait(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mei_virtio_add_recv_buf(hw);
|
||||
atomic_set(&hw->hbuf_ready, 1);
|
||||
dev_dbg(dev->dev, "hw is ready\n");
|
||||
hw->host_ready = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_virtio_host_is_ready() - check whether the FE has turned ready
|
||||
* @dev: mei device
|
||||
*
|
||||
* Return: bool
|
||||
*/
|
||||
static bool mei_virtio_host_is_ready(struct mei_device *dev)
|
||||
{
|
||||
struct mei_virtio_hw *hw = to_virtio_hw(dev);
|
||||
|
||||
dev_dbg(dev->dev, "host ready %d\n", hw->host_ready);
|
||||
|
||||
return hw->host_ready;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_virtio_data_in() - The callback of recv virtqueue of virtio mei
|
||||
* @vq: receiving virtqueue
|
||||
*/
|
||||
static void mei_virtio_data_in(struct virtqueue *vq)
|
||||
{
|
||||
struct mei_virtio_hw *hw = vq->vdev->priv;
|
||||
|
||||
/* disable interrupts (enabled again from in the interrupt worker) */
|
||||
virtqueue_disable_cb(hw->in);
|
||||
|
||||
schedule_work(&hw->intr_handler);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_virtio_data_out() - The callback of send virtqueue of virtio mei
|
||||
* @vq: transmitting virtqueue
|
||||
*/
|
||||
static void mei_virtio_data_out(struct virtqueue *vq)
|
||||
{
|
||||
struct mei_virtio_hw *hw = vq->vdev->priv;
|
||||
|
||||
schedule_work(&hw->intr_handler);
|
||||
}
|
||||
|
||||
static void mei_virtio_intr_handler(struct work_struct *work)
|
||||
{
|
||||
struct mei_virtio_hw *hw =
|
||||
container_of(work, struct mei_virtio_hw, intr_handler);
|
||||
struct mei_device *dev = &hw->mdev;
|
||||
LIST_HEAD(complete_list);
|
||||
s32 slots;
|
||||
int rets = 0;
|
||||
void *data;
|
||||
unsigned int len;
|
||||
|
||||
mutex_lock(&dev->device_lock);
|
||||
|
||||
if (dev->dev_state == MEI_DEV_DISABLED) {
|
||||
dev_warn(dev->dev, "Interrupt in disabled state.\n");
|
||||
mei_virtio_intr_disable(dev);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* check if ME wants a reset */
|
||||
if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
|
||||
dev_warn(dev->dev, "BE service not ready: resetting.\n");
|
||||
schedule_work(&dev->reset_work);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* check if we need to start the dev */
|
||||
if (!mei_host_is_ready(dev)) {
|
||||
if (mei_hw_is_ready(dev)) {
|
||||
dev_dbg(dev->dev, "we need to start the dev.\n");
|
||||
dev->recvd_hw_ready = true;
|
||||
wake_up(&dev->wait_hw_ready);
|
||||
} else {
|
||||
dev_warn(dev->dev, "Spurious Interrupt\n");
|
||||
}
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* read */
|
||||
if (hw->recv_rdy) {
|
||||
data = virtqueue_get_buf(hw->in, &len);
|
||||
if (!data || !len) {
|
||||
dev_dbg(dev->dev, "No data %d", len);
|
||||
} else {
|
||||
dev_dbg(dev->dev, "data_in %d\n", len);
|
||||
WARN_ON(data != hw->recv_buf);
|
||||
hw->recv_len = mei_data2slots(len);
|
||||
hw->recv_rdy = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* write */
|
||||
if (!atomic_read(&hw->hbuf_ready)) {
|
||||
if (!virtqueue_get_buf(hw->out, &len)) {
|
||||
dev_warn(dev->dev, "Failed to getbuf\n");
|
||||
} else {
|
||||
mei_virtio_free_outbufs(hw);
|
||||
atomic_inc(&hw->hbuf_ready);
|
||||
}
|
||||
}
|
||||
|
||||
/* check slots available for reading */
|
||||
slots = mei_count_full_read_slots(dev);
|
||||
while (slots > 0) {
|
||||
dev_dbg(dev->dev, "slots to read = %08x\n", slots);
|
||||
rets = mei_irq_read_handler(dev, &complete_list, &slots);
|
||||
|
||||
if (rets &&
|
||||
(dev->dev_state != MEI_DEV_RESETTING &&
|
||||
dev->dev_state != MEI_DEV_POWER_DOWN)) {
|
||||
dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
|
||||
rets);
|
||||
schedule_work(&dev->reset_work);
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
|
||||
|
||||
mei_irq_write_handler(dev, &complete_list);
|
||||
|
||||
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
|
||||
|
||||
mei_irq_compl_handler(dev, &complete_list);
|
||||
|
||||
mei_virtio_add_recv_buf(hw);
|
||||
|
||||
end:
|
||||
if (dev->dev_state != MEI_DEV_DISABLED) {
|
||||
if (!virtqueue_enable_cb(hw->in))
|
||||
schedule_work(&hw->intr_handler);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->device_lock);
|
||||
}
|
||||
|
||||
static void mei_virtio_config_changed(struct virtio_device *vdev)
|
||||
{
|
||||
struct mei_virtio_hw *hw = vdev->priv;
|
||||
struct mei_device *dev = &hw->mdev;
|
||||
|
||||
virtio_cread(vdev, struct mei_virtio_cfg,
|
||||
hw_ready, &hw->cfg.hw_ready);
|
||||
|
||||
if (dev->dev_state == MEI_DEV_DISABLED) {
|
||||
dev_dbg(dev->dev, "disabled state don't start\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Run intr handler once to handle reset notify */
|
||||
schedule_work(&hw->intr_handler);
|
||||
}
|
||||
|
||||
static void mei_virtio_remove_vqs(struct virtio_device *vdev)
|
||||
{
|
||||
struct mei_virtio_hw *hw = vdev->priv;
|
||||
|
||||
virtqueue_detach_unused_buf(hw->in);
|
||||
hw->recv_len = 0;
|
||||
hw->recv_idx = 0;
|
||||
hw->recv_rdy = 0;
|
||||
|
||||
virtqueue_detach_unused_buf(hw->out);
|
||||
|
||||
mei_virtio_free_outbufs(hw);
|
||||
|
||||
vdev->config->del_vqs(vdev);
|
||||
}
|
||||
|
||||
/*
|
||||
* There are two virtqueues, one is for send and another is for recv.
|
||||
*/
|
||||
static int mei_virtio_init_vqs(struct mei_virtio_hw *hw,
|
||||
struct virtio_device *vdev)
|
||||
{
|
||||
struct virtqueue *vqs[2];
|
||||
|
||||
vq_callback_t *cbs[] = {
|
||||
mei_virtio_data_in,
|
||||
mei_virtio_data_out,
|
||||
};
|
||||
static const char * const names[] = {
|
||||
"in",
|
||||
"out",
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = virtio_find_vqs(vdev, 2, vqs, cbs, names, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hw->in = vqs[0];
|
||||
hw->out = vqs[1];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct mei_hw_ops mei_virtio_ops = {
|
||||
.fw_status = mei_virtio_fw_status,
|
||||
.pg_state = mei_virtio_pg_state,
|
||||
|
||||
.host_is_ready = mei_virtio_host_is_ready,
|
||||
|
||||
.hw_is_ready = mei_virtio_hw_is_ready,
|
||||
.hw_reset = mei_virtio_hw_reset,
|
||||
.hw_config = mei_virtio_hw_config,
|
||||
.hw_start = mei_virtio_hw_start,
|
||||
|
||||
.pg_in_transition = mei_virtio_pg_in_transition,
|
||||
.pg_is_enabled = mei_virtio_pg_is_enabled,
|
||||
|
||||
.intr_clear = mei_virtio_intr_clear,
|
||||
.intr_enable = mei_virtio_intr_enable,
|
||||
.intr_disable = mei_virtio_intr_disable,
|
||||
.synchronize_irq = mei_virtio_synchronize_irq,
|
||||
|
||||
.hbuf_free_slots = mei_virtio_hbuf_empty_slots,
|
||||
.hbuf_is_ready = mei_virtio_hbuf_is_ready,
|
||||
.hbuf_depth = mei_virtio_hbuf_depth,
|
||||
|
||||
.write = mei_virtio_write_message,
|
||||
|
||||
.rdbuf_full_slots = mei_virtio_count_full_read_slots,
|
||||
.read_hdr = mei_virtio_read_hdr,
|
||||
.read = mei_virtio_read,
|
||||
};
|
||||
|
||||
static int mei_virtio_probe(struct virtio_device *vdev)
|
||||
{
|
||||
struct mei_virtio_hw *hw;
|
||||
int ret;
|
||||
|
||||
hw = devm_kzalloc(&vdev->dev, sizeof(*hw), GFP_KERNEL);
|
||||
if (!hw)
|
||||
return -ENOMEM;
|
||||
|
||||
vdev->priv = hw;
|
||||
|
||||
INIT_WORK(&hw->intr_handler, mei_virtio_intr_handler);
|
||||
|
||||
ret = mei_virtio_init_vqs(hw, vdev);
|
||||
if (ret)
|
||||
goto vqs_failed;
|
||||
|
||||
virtio_cread(vdev, struct mei_virtio_cfg,
|
||||
buf_depth, &hw->cfg.buf_depth);
|
||||
|
||||
hw->recv_buf = kzalloc(mei_slots2data(hw->cfg.buf_depth), GFP_KERNEL);
|
||||
if (!hw->recv_buf) {
|
||||
ret = -ENOMEM;
|
||||
goto hbuf_failed;
|
||||
}
|
||||
atomic_set(&hw->hbuf_ready, 0);
|
||||
|
||||
virtio_device_ready(vdev);
|
||||
|
||||
mei_device_init(&hw->mdev, &vdev->dev, &mei_virtio_ops);
|
||||
|
||||
pm_runtime_get_noresume(&vdev->dev);
|
||||
pm_runtime_set_active(&vdev->dev);
|
||||
pm_runtime_enable(&vdev->dev);
|
||||
|
||||
ret = mei_start(&hw->mdev);
|
||||
if (ret)
|
||||
goto mei_start_failed;
|
||||
|
||||
pm_runtime_set_autosuspend_delay(&vdev->dev, MEI_VIRTIO_RPM_TIMEOUT);
|
||||
pm_runtime_use_autosuspend(&vdev->dev);
|
||||
|
||||
ret = mei_register(&hw->mdev, &vdev->dev);
|
||||
if (ret)
|
||||
goto mei_failed;
|
||||
|
||||
pm_runtime_put(&vdev->dev);
|
||||
|
||||
return 0;
|
||||
|
||||
mei_failed:
|
||||
mei_stop(&hw->mdev);
|
||||
mei_start_failed:
|
||||
mei_cancel_work(&hw->mdev);
|
||||
mei_disable_interrupts(&hw->mdev);
|
||||
kfree(hw->recv_buf);
|
||||
hbuf_failed:
|
||||
vdev->config->del_vqs(vdev);
|
||||
vqs_failed:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __maybe_unused mei_virtio_pm_runtime_idle(struct device *device)
|
||||
{
|
||||
struct virtio_device *vdev = dev_to_virtio(device);
|
||||
struct mei_virtio_hw *hw = vdev->priv;
|
||||
|
||||
dev_dbg(&vdev->dev, "rpm: mei_virtio : runtime_idle\n");
|
||||
|
||||
if (!hw)
|
||||
return -ENODEV;
|
||||
|
||||
if (mei_write_is_idle(&hw->mdev))
|
||||
pm_runtime_autosuspend(device);
|
||||
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static int __maybe_unused mei_virtio_pm_runtime_suspend(struct device *device)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused mei_virtio_pm_runtime_resume(struct device *device)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused mei_virtio_freeze(struct virtio_device *vdev)
|
||||
{
|
||||
struct mei_virtio_hw *hw = vdev->priv;
|
||||
|
||||
dev_dbg(&vdev->dev, "freeze\n");
|
||||
|
||||
if (!hw)
|
||||
return -ENODEV;
|
||||
|
||||
mei_stop(&hw->mdev);
|
||||
mei_disable_interrupts(&hw->mdev);
|
||||
cancel_work_sync(&hw->intr_handler);
|
||||
vdev->config->reset(vdev);
|
||||
mei_virtio_remove_vqs(vdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused mei_virtio_restore(struct virtio_device *vdev)
|
||||
{
|
||||
struct mei_virtio_hw *hw = vdev->priv;
|
||||
int ret;
|
||||
|
||||
dev_dbg(&vdev->dev, "restore\n");
|
||||
|
||||
if (!hw)
|
||||
return -ENODEV;
|
||||
|
||||
ret = mei_virtio_init_vqs(hw, vdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
virtio_device_ready(vdev);
|
||||
|
||||
ret = mei_restart(&hw->mdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Start timer if stopped in suspend */
|
||||
schedule_delayed_work(&hw->mdev.timer_work, HZ);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops mei_virtio_pm_ops = {
|
||||
SET_RUNTIME_PM_OPS(mei_virtio_pm_runtime_suspend,
|
||||
mei_virtio_pm_runtime_resume,
|
||||
mei_virtio_pm_runtime_idle)
|
||||
};
|
||||
|
||||
static void mei_virtio_remove(struct virtio_device *vdev)
|
||||
{
|
||||
struct mei_virtio_hw *hw = vdev->priv;
|
||||
|
||||
mei_stop(&hw->mdev);
|
||||
mei_disable_interrupts(&hw->mdev);
|
||||
cancel_work_sync(&hw->intr_handler);
|
||||
mei_deregister(&hw->mdev);
|
||||
vdev->config->reset(vdev);
|
||||
mei_virtio_remove_vqs(vdev);
|
||||
kfree(hw->recv_buf);
|
||||
pm_runtime_disable(&vdev->dev);
|
||||
}
|
||||
|
||||
static struct virtio_device_id id_table[] = {
|
||||
{ VIRTIO_ID_MEI, VIRTIO_DEV_ANY_ID },
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct virtio_driver mei_virtio_driver = {
|
||||
.id_table = id_table,
|
||||
.probe = mei_virtio_probe,
|
||||
.remove = mei_virtio_remove,
|
||||
.config_changed = mei_virtio_config_changed,
|
||||
.driver = {
|
||||
.name = KBUILD_MODNAME,
|
||||
.owner = THIS_MODULE,
|
||||
.pm = &mei_virtio_pm_ops,
|
||||
},
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
.freeze = mei_virtio_freeze,
|
||||
.restore = mei_virtio_restore,
|
||||
#endif
|
||||
};
|
||||
|
||||
module_virtio_driver(mei_virtio_driver);
|
||||
MODULE_DEVICE_TABLE(virtio, id_table);
|
||||
MODULE_DESCRIPTION("Virtio MEI frontend driver");
|
||||
MODULE_LICENSE("GPL v2");
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue