Merge 4.13-rc7 into char-misc-next
We want the binder fix in here as well for testing and merge issues. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
9749c37275
|
@ -228,7 +228,7 @@ Learning on the device port should be enabled, as well as learning_sync:
|
|||
bridge link set dev DEV learning on self
|
||||
bridge link set dev DEV learning_sync on self
|
||||
|
||||
Learning_sync attribute enables syncing of the learned/forgotton FDB entry to
|
||||
Learning_sync attribute enables syncing of the learned/forgotten FDB entry to
|
||||
the bridge's FDB. It's possible, but not optimal, to enable learning on the
|
||||
device port and on the bridge port, and disable learning_sync.
|
||||
|
||||
|
@ -245,7 +245,7 @@ the responsibility of the port driver/device to age out these entries. If the
|
|||
port device supports ageing, when the FDB entry expires, it will notify the
|
||||
driver which in turn will notify the bridge with SWITCHDEV_FDB_DEL. If the
|
||||
device does not support ageing, the driver can simulate ageing using a
|
||||
garbage collection timer to monitor FBD entries. Expired entries will be
|
||||
garbage collection timer to monitor FDB entries. Expired entries will be
|
||||
notified to the bridge using SWITCHDEV_FDB_DEL. See rocker driver for
|
||||
example of driver running ageing timer.
|
||||
|
||||
|
|
|
@ -58,20 +58,23 @@ Symbols/Function Pointers
|
|||
%ps versatile_init
|
||||
%pB prev_fn_of_versatile_init+0x88/0x88
|
||||
|
||||
For printing symbols and function pointers. The ``S`` and ``s`` specifiers
|
||||
result in the symbol name with (``S``) or without (``s``) offsets. Where
|
||||
this is used on a kernel without KALLSYMS - the symbol address is
|
||||
printed instead.
|
||||
The ``F`` and ``f`` specifiers are for printing function pointers,
|
||||
for example, f->func, &gettimeofday. They have the same result as
|
||||
``S`` and ``s`` specifiers. But they do an extra conversion on
|
||||
ia64, ppc64 and parisc64 architectures where the function pointers
|
||||
are actually function descriptors.
|
||||
|
||||
The ``S`` and ``s`` specifiers can be used for printing symbols
|
||||
from direct addresses, for example, __builtin_return_address(0),
|
||||
(void *)regs->ip. They result in the symbol name with (``S``) or
|
||||
without (``s``) offsets. If KALLSYMS are disabled then the symbol
|
||||
address is printed instead.
|
||||
|
||||
The ``B`` specifier results in the symbol name with offsets and should be
|
||||
used when printing stack backtraces. The specifier takes into
|
||||
consideration the effect of compiler optimisations which may occur
|
||||
when tail-call``s are used and marked with the noreturn GCC attribute.
|
||||
|
||||
On ia64, ppc64 and parisc64 architectures function pointers are
|
||||
actually function descriptors which must first be resolved. The ``F`` and
|
||||
``f`` specifiers perform this resolution and then provide the same
|
||||
functionality as the ``S`` and ``s`` specifiers.
|
||||
|
||||
Kernel Pointers
|
||||
===============
|
||||
|
|
|
@ -35,9 +35,34 @@ Table : Subdirectories in /proc/sys/net
|
|||
bpf_jit_enable
|
||||
--------------
|
||||
|
||||
This enables Berkeley Packet Filter Just in Time compiler.
|
||||
Currently supported on x86_64 architecture, bpf_jit provides a framework
|
||||
to speed packet filtering, the one used by tcpdump/libpcap for example.
|
||||
This enables the BPF Just in Time (JIT) compiler. BPF is a flexible
|
||||
and efficient infrastructure allowing to execute bytecode at various
|
||||
hook points. It is used in a number of Linux kernel subsystems such
|
||||
as networking (e.g. XDP, tc), tracing (e.g. kprobes, uprobes, tracepoints)
|
||||
and security (e.g. seccomp). LLVM has a BPF back end that can compile
|
||||
restricted C into a sequence of BPF instructions. After program load
|
||||
through bpf(2) and passing a verifier in the kernel, a JIT will then
|
||||
translate these BPF proglets into native CPU instructions. There are
|
||||
two flavors of JITs, the newer eBPF JIT currently supported on:
|
||||
- x86_64
|
||||
- arm64
|
||||
- ppc64
|
||||
- sparc64
|
||||
- mips64
|
||||
- s390x
|
||||
|
||||
And the older cBPF JIT supported on the following archs:
|
||||
- arm
|
||||
- mips
|
||||
- ppc
|
||||
- sparc
|
||||
|
||||
eBPF JITs are a superset of cBPF JITs, meaning the kernel will
|
||||
migrate cBPF instructions into eBPF instructions and then JIT
|
||||
compile them transparently. Older cBPF JITs can only translate
|
||||
tcpdump filters, seccomp rules, etc, but not mentioned eBPF
|
||||
programs loaded through bpf(2).
|
||||
|
||||
Values :
|
||||
0 - disable the JIT (default value)
|
||||
1 - enable the JIT
|
||||
|
@ -46,9 +71,9 @@ Values :
|
|||
bpf_jit_harden
|
||||
--------------
|
||||
|
||||
This enables hardening for the Berkeley Packet Filter Just in Time compiler.
|
||||
Supported are eBPF JIT backends. Enabling hardening trades off performance,
|
||||
but can mitigate JIT spraying.
|
||||
This enables hardening for the BPF JIT compiler. Supported are eBPF
|
||||
JIT backends. Enabling hardening trades off performance, but can
|
||||
mitigate JIT spraying.
|
||||
Values :
|
||||
0 - disable JIT hardening (default value)
|
||||
1 - enable JIT hardening for unprivileged users only
|
||||
|
@ -57,11 +82,11 @@ Values :
|
|||
bpf_jit_kallsyms
|
||||
----------------
|
||||
|
||||
When Berkeley Packet Filter Just in Time compiler is enabled, then compiled
|
||||
images are unknown addresses to the kernel, meaning they neither show up in
|
||||
traces nor in /proc/kallsyms. This enables export of these addresses, which
|
||||
can be used for debugging/tracing. If bpf_jit_harden is enabled, this feature
|
||||
is disabled.
|
||||
When BPF JIT compiler is enabled, then compiled images are unknown
|
||||
addresses to the kernel, meaning they neither show up in traces nor
|
||||
in /proc/kallsyms. This enables export of these addresses, which can
|
||||
be used for debugging/tracing. If bpf_jit_harden is enabled, this
|
||||
feature is disabled.
|
||||
Values :
|
||||
0 - disable JIT kallsyms export (default value)
|
||||
1 - enable JIT kallsyms export for privileged users only
|
||||
|
|
|
@ -7111,7 +7111,6 @@ M: Marc Zyngier <marc.zyngier@arm.com>
|
|||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
|
||||
T: git git://git.infradead.org/users/jcooper/linux.git irqchip/core
|
||||
F: Documentation/devicetree/bindings/interrupt-controller/
|
||||
F: drivers/irqchip/
|
||||
|
||||
|
|
13
Makefile
13
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 13
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Fearless Coyote
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -396,7 +396,7 @@ LINUXINCLUDE := \
|
|||
KBUILD_CPPFLAGS := -D__KERNEL__
|
||||
|
||||
KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
|
||||
-fno-strict-aliasing -fno-common \
|
||||
-fno-strict-aliasing -fno-common -fshort-wchar \
|
||||
-Werror-implicit-function-declaration \
|
||||
-Wno-format-security \
|
||||
-std=gnu89 $(call cc-option,-fno-PIE)
|
||||
|
@ -442,7 +442,7 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
|
|||
# ===========================================================================
|
||||
# Rules shared between *config targets and build targets
|
||||
|
||||
# Basic helpers built in scripts/
|
||||
# Basic helpers built in scripts/basic/
|
||||
PHONY += scripts_basic
|
||||
scripts_basic:
|
||||
$(Q)$(MAKE) $(build)=scripts/basic
|
||||
|
@ -505,7 +505,7 @@ ifeq ($(KBUILD_EXTMOD),)
|
|||
endif
|
||||
endif
|
||||
endif
|
||||
# install and module_install need also be processed one by one
|
||||
# install and modules_install need also be processed one by one
|
||||
ifneq ($(filter install,$(MAKECMDGOALS)),)
|
||||
ifneq ($(filter modules_install,$(MAKECMDGOALS)),)
|
||||
mixed-targets := 1
|
||||
|
@ -964,7 +964,7 @@ export KBUILD_VMLINUX_MAIN := $(core-y) $(libs-y2) $(drivers-y) $(net-y) $(virt-
|
|||
export KBUILD_VMLINUX_LIBS := $(libs-y1)
|
||||
export KBUILD_LDS := arch/$(SRCARCH)/kernel/vmlinux.lds
|
||||
export LDFLAGS_vmlinux
|
||||
# used by scripts/pacmage/Makefile
|
||||
# used by scripts/package/Makefile
|
||||
export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) arch Documentation include samples scripts tools)
|
||||
|
||||
vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN) $(KBUILD_VMLINUX_LIBS)
|
||||
|
@ -992,7 +992,7 @@ include/generated/autoksyms.h: FORCE
|
|||
ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink)
|
||||
|
||||
# Final link of vmlinux with optional arch pass after final link
|
||||
cmd_link-vmlinux = \
|
||||
cmd_link-vmlinux = \
|
||||
$(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) ; \
|
||||
$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true)
|
||||
|
||||
|
@ -1184,6 +1184,7 @@ PHONY += kselftest
|
|||
kselftest:
|
||||
$(Q)$(MAKE) -C tools/testing/selftests run_tests
|
||||
|
||||
PHONY += kselftest-clean
|
||||
kselftest-clean:
|
||||
$(Q)$(MAKE) -C tools/testing/selftests clean
|
||||
|
||||
|
|
|
@ -96,7 +96,6 @@ menu "ARC Architecture Configuration"
|
|||
|
||||
menu "ARC Platform/SoC/Board"
|
||||
|
||||
source "arch/arc/plat-sim/Kconfig"
|
||||
source "arch/arc/plat-tb10x/Kconfig"
|
||||
source "arch/arc/plat-axs10x/Kconfig"
|
||||
#New platform adds here
|
||||
|
|
|
@ -107,7 +107,7 @@ core-y += arch/arc/
|
|||
# w/o this dtb won't embed into kernel binary
|
||||
core-y += arch/arc/boot/dts/
|
||||
|
||||
core-$(CONFIG_ARC_PLAT_SIM) += arch/arc/plat-sim/
|
||||
core-y += arch/arc/plat-sim/
|
||||
core-$(CONFIG_ARC_PLAT_TB10X) += arch/arc/plat-tb10x/
|
||||
core-$(CONFIG_ARC_PLAT_AXS10X) += arch/arc/plat-axs10x/
|
||||
core-$(CONFIG_ARC_PLAT_EZNPS) += arch/arc/plat-eznps/
|
||||
|
|
|
@ -15,15 +15,15 @@
|
|||
|
||||
/ {
|
||||
compatible = "snps,arc";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
|
||||
cpu_card {
|
||||
compatible = "simple-bus";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
ranges = <0x00000000 0xf0000000 0x10000000>;
|
||||
ranges = <0x00000000 0x0 0xf0000000 0x10000000>;
|
||||
|
||||
core_clk: core_clk {
|
||||
#clock-cells = <0>;
|
||||
|
@ -91,23 +91,21 @@
|
|||
mb_intc: dw-apb-ictl@0xe0012000 {
|
||||
#interrupt-cells = <1>;
|
||||
compatible = "snps,dw-apb-ictl";
|
||||
reg = < 0xe0012000 0x200 >;
|
||||
reg = < 0x0 0xe0012000 0x0 0x200 >;
|
||||
interrupt-controller;
|
||||
interrupt-parent = <&core_intc>;
|
||||
interrupts = < 7 >;
|
||||
};
|
||||
|
||||
memory {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
ranges = <0x00000000 0x80000000 0x20000000>;
|
||||
device_type = "memory";
|
||||
reg = <0x80000000 0x1b000000>; /* (512 - 32) MiB */
|
||||
/* CONFIG_KERNEL_RAM_BASE_ADDRESS needs to match low mem start */
|
||||
reg = <0x0 0x80000000 0x0 0x1b000000>; /* (512 - 32) MiB */
|
||||
};
|
||||
|
||||
reserved-memory {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
ranges;
|
||||
/*
|
||||
* We just move frame buffer area to the very end of
|
||||
|
@ -118,7 +116,7 @@
|
|||
*/
|
||||
frame_buffer: frame_buffer@9e000000 {
|
||||
compatible = "shared-dma-pool";
|
||||
reg = <0x9e000000 0x2000000>;
|
||||
reg = <0x0 0x9e000000 0x0 0x2000000>;
|
||||
no-map;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -14,15 +14,15 @@
|
|||
|
||||
/ {
|
||||
compatible = "snps,arc";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
|
||||
cpu_card {
|
||||
compatible = "simple-bus";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
ranges = <0x00000000 0xf0000000 0x10000000>;
|
||||
ranges = <0x00000000 0x0 0xf0000000 0x10000000>;
|
||||
|
||||
core_clk: core_clk {
|
||||
#clock-cells = <0>;
|
||||
|
@ -94,30 +94,29 @@
|
|||
mb_intc: dw-apb-ictl@0xe0012000 {
|
||||
#interrupt-cells = <1>;
|
||||
compatible = "snps,dw-apb-ictl";
|
||||
reg = < 0xe0012000 0x200 >;
|
||||
reg = < 0x0 0xe0012000 0x0 0x200 >;
|
||||
interrupt-controller;
|
||||
interrupt-parent = <&core_intc>;
|
||||
interrupts = < 24 >;
|
||||
};
|
||||
|
||||
memory {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
ranges = <0x00000000 0x80000000 0x40000000>;
|
||||
device_type = "memory";
|
||||
reg = <0x80000000 0x20000000>; /* 512MiB */
|
||||
/* CONFIG_KERNEL_RAM_BASE_ADDRESS needs to match low mem start */
|
||||
reg = <0x0 0x80000000 0x0 0x20000000 /* 512 MiB low mem */
|
||||
0x1 0xc0000000 0x0 0x40000000>; /* 1 GiB highmem */
|
||||
};
|
||||
|
||||
reserved-memory {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
ranges;
|
||||
/*
|
||||
* Move frame buffer out of IOC aperture (0x8z-0xAz).
|
||||
*/
|
||||
frame_buffer: frame_buffer@be000000 {
|
||||
compatible = "shared-dma-pool";
|
||||
reg = <0xbe000000 0x2000000>;
|
||||
reg = <0x0 0xbe000000 0x0 0x2000000>;
|
||||
no-map;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -14,15 +14,15 @@
|
|||
|
||||
/ {
|
||||
compatible = "snps,arc";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
|
||||
cpu_card {
|
||||
compatible = "simple-bus";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
ranges = <0x00000000 0xf0000000 0x10000000>;
|
||||
ranges = <0x00000000 0x0 0xf0000000 0x10000000>;
|
||||
|
||||
core_clk: core_clk {
|
||||
#clock-cells = <0>;
|
||||
|
@ -100,30 +100,29 @@
|
|||
mb_intc: dw-apb-ictl@0xe0012000 {
|
||||
#interrupt-cells = <1>;
|
||||
compatible = "snps,dw-apb-ictl";
|
||||
reg = < 0xe0012000 0x200 >;
|
||||
reg = < 0x0 0xe0012000 0x0 0x200 >;
|
||||
interrupt-controller;
|
||||
interrupt-parent = <&idu_intc>;
|
||||
interrupts = <0>;
|
||||
};
|
||||
|
||||
memory {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
ranges = <0x00000000 0x80000000 0x40000000>;
|
||||
device_type = "memory";
|
||||
reg = <0x80000000 0x20000000>; /* 512MiB */
|
||||
/* CONFIG_KERNEL_RAM_BASE_ADDRESS needs to match low mem start */
|
||||
reg = <0x0 0x80000000 0x0 0x20000000 /* 512 MiB low mem */
|
||||
0x1 0xc0000000 0x0 0x40000000>; /* 1 GiB highmem */
|
||||
};
|
||||
|
||||
reserved-memory {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
ranges;
|
||||
/*
|
||||
* Move frame buffer out of IOC aperture (0x8z-0xAz).
|
||||
*/
|
||||
frame_buffer: frame_buffer@be000000 {
|
||||
compatible = "shared-dma-pool";
|
||||
reg = <0xbe000000 0x2000000>;
|
||||
reg = <0x0 0xbe000000 0x0 0x2000000>;
|
||||
no-map;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
compatible = "simple-bus";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
ranges = <0x00000000 0xe0000000 0x10000000>;
|
||||
ranges = <0x00000000 0x0 0xe0000000 0x10000000>;
|
||||
interrupt-parent = <&mb_intc>;
|
||||
|
||||
i2sclk: i2sclk@100a0 {
|
||||
|
|
|
@ -21,7 +21,6 @@ CONFIG_MODULES=y
|
|||
# CONFIG_BLK_DEV_BSG is not set
|
||||
# CONFIG_IOSCHED_DEADLINE is not set
|
||||
# CONFIG_IOSCHED_CFQ is not set
|
||||
CONFIG_ARC_PLAT_SIM=y
|
||||
CONFIG_ISA_ARCV2=y
|
||||
CONFIG_ARC_BUILTIN_DTB_NAME="haps_hs"
|
||||
CONFIG_PREEMPT=y
|
||||
|
|
|
@ -23,7 +23,6 @@ CONFIG_MODULES=y
|
|||
# CONFIG_BLK_DEV_BSG is not set
|
||||
# CONFIG_IOSCHED_DEADLINE is not set
|
||||
# CONFIG_IOSCHED_CFQ is not set
|
||||
CONFIG_ARC_PLAT_SIM=y
|
||||
CONFIG_ISA_ARCV2=y
|
||||
CONFIG_SMP=y
|
||||
CONFIG_ARC_BUILTIN_DTB_NAME="haps_hs_idu"
|
||||
|
|
|
@ -39,7 +39,6 @@ CONFIG_IP_PNP=y
|
|||
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
|
||||
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
|
||||
# CONFIG_INET_XFRM_MODE_BEET is not set
|
||||
# CONFIG_INET_LRO is not set
|
||||
# CONFIG_INET_DIAG is not set
|
||||
# CONFIG_IPV6 is not set
|
||||
# CONFIG_WIRELESS is not set
|
||||
|
|
|
@ -23,7 +23,6 @@ CONFIG_MODULES=y
|
|||
# CONFIG_BLK_DEV_BSG is not set
|
||||
# CONFIG_IOSCHED_DEADLINE is not set
|
||||
# CONFIG_IOSCHED_CFQ is not set
|
||||
CONFIG_ARC_PLAT_SIM=y
|
||||
CONFIG_ARC_BUILTIN_DTB_NAME="nsim_700"
|
||||
CONFIG_PREEMPT=y
|
||||
# CONFIG_COMPACTION is not set
|
||||
|
|
|
@ -26,7 +26,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
|
|||
# CONFIG_BLK_DEV_BSG is not set
|
||||
# CONFIG_IOSCHED_DEADLINE is not set
|
||||
# CONFIG_IOSCHED_CFQ is not set
|
||||
CONFIG_ARC_PLAT_SIM=y
|
||||
CONFIG_ISA_ARCV2=y
|
||||
CONFIG_ARC_BUILTIN_DTB_NAME="nsim_hs"
|
||||
CONFIG_PREEMPT=y
|
||||
|
|
|
@ -24,7 +24,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
|
|||
# CONFIG_BLK_DEV_BSG is not set
|
||||
# CONFIG_IOSCHED_DEADLINE is not set
|
||||
# CONFIG_IOSCHED_CFQ is not set
|
||||
CONFIG_ARC_PLAT_SIM=y
|
||||
CONFIG_ISA_ARCV2=y
|
||||
CONFIG_SMP=y
|
||||
CONFIG_ARC_BUILTIN_DTB_NAME="nsim_hs_idu"
|
||||
|
|
|
@ -23,7 +23,6 @@ CONFIG_MODULES=y
|
|||
# CONFIG_BLK_DEV_BSG is not set
|
||||
# CONFIG_IOSCHED_DEADLINE is not set
|
||||
# CONFIG_IOSCHED_CFQ is not set
|
||||
CONFIG_ARC_PLAT_SIM=y
|
||||
CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci"
|
||||
# CONFIG_COMPACTION is not set
|
||||
CONFIG_NET=y
|
||||
|
|
|
@ -23,7 +23,6 @@ CONFIG_MODULES=y
|
|||
# CONFIG_BLK_DEV_BSG is not set
|
||||
# CONFIG_IOSCHED_DEADLINE is not set
|
||||
# CONFIG_IOSCHED_CFQ is not set
|
||||
CONFIG_ARC_PLAT_SIM=y
|
||||
CONFIG_ISA_ARCV2=y
|
||||
CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci_hs"
|
||||
# CONFIG_COMPACTION is not set
|
||||
|
|
|
@ -18,7 +18,6 @@ CONFIG_MODULES=y
|
|||
# CONFIG_BLK_DEV_BSG is not set
|
||||
# CONFIG_IOSCHED_DEADLINE is not set
|
||||
# CONFIG_IOSCHED_CFQ is not set
|
||||
CONFIG_ARC_PLAT_SIM=y
|
||||
CONFIG_ISA_ARCV2=y
|
||||
CONFIG_SMP=y
|
||||
# CONFIG_ARC_TIMERS_64BIT is not set
|
||||
|
|
|
@ -38,7 +38,6 @@ CONFIG_IP_MULTICAST=y
|
|||
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
|
||||
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
|
||||
# CONFIG_INET_XFRM_MODE_BEET is not set
|
||||
# CONFIG_INET_LRO is not set
|
||||
# CONFIG_INET_DIAG is not set
|
||||
# CONFIG_IPV6 is not set
|
||||
# CONFIG_WIRELESS is not set
|
||||
|
|
|
@ -96,7 +96,9 @@ extern unsigned long perip_base, perip_end;
|
|||
#define ARC_REG_SLC_FLUSH 0x904
|
||||
#define ARC_REG_SLC_INVALIDATE 0x905
|
||||
#define ARC_REG_SLC_RGN_START 0x914
|
||||
#define ARC_REG_SLC_RGN_START1 0x915
|
||||
#define ARC_REG_SLC_RGN_END 0x916
|
||||
#define ARC_REG_SLC_RGN_END1 0x917
|
||||
|
||||
/* Bit val in SLC_CONTROL */
|
||||
#define SLC_CTRL_DIS 0x001
|
||||
|
|
|
@ -94,6 +94,8 @@ static inline int is_pae40_enabled(void)
|
|||
return IS_ENABLED(CONFIG_ARC_HAS_PAE40);
|
||||
}
|
||||
|
||||
extern int pae40_exist_but_not_enab(void);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -75,10 +75,13 @@ void arc_init_IRQ(void)
|
|||
* Set a default priority for all available interrupts to prevent
|
||||
* switching of register banks if Fast IRQ and multiple register banks
|
||||
* are supported by CPU.
|
||||
* Also disable all IRQ lines so faulty external hardware won't
|
||||
* trigger interrupt that kernel is not ready to handle.
|
||||
*/
|
||||
for (i = NR_EXCEPTIONS; i < irq_bcr.irqs + NR_EXCEPTIONS; i++) {
|
||||
write_aux_reg(AUX_IRQ_SELECT, i);
|
||||
write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
|
||||
write_aux_reg(AUX_IRQ_ENABLE, 0);
|
||||
}
|
||||
|
||||
/* setup status32, don't enable intr yet as kernel doesn't want */
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
*/
|
||||
void arc_init_IRQ(void)
|
||||
{
|
||||
int level_mask = 0;
|
||||
int level_mask = 0, i;
|
||||
|
||||
/* Is timer high priority Interrupt (Level2 in ARCompact jargon) */
|
||||
level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ;
|
||||
|
@ -40,6 +40,18 @@ void arc_init_IRQ(void)
|
|||
|
||||
if (level_mask)
|
||||
pr_info("Level-2 interrupts bitset %x\n", level_mask);
|
||||
|
||||
/*
|
||||
* Disable all IRQ lines so faulty external hardware won't
|
||||
* trigger interrupt that kernel is not ready to handle.
|
||||
*/
|
||||
for (i = TIMER0_IRQ; i < NR_CPU_IRQS; i++) {
|
||||
unsigned int ienb;
|
||||
|
||||
ienb = read_aux_reg(AUX_IENABLE);
|
||||
ienb &= ~(1 << i);
|
||||
write_aux_reg(AUX_IENABLE, ienb);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -665,6 +665,7 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
|
|||
static DEFINE_SPINLOCK(lock);
|
||||
unsigned long flags;
|
||||
unsigned int ctrl;
|
||||
phys_addr_t end;
|
||||
|
||||
spin_lock_irqsave(&lock, flags);
|
||||
|
||||
|
@ -694,8 +695,19 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
|
|||
* END needs to be setup before START (latter triggers the operation)
|
||||
* END can't be same as START, so add (l2_line_sz - 1) to sz
|
||||
*/
|
||||
write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1));
|
||||
write_aux_reg(ARC_REG_SLC_RGN_START, paddr);
|
||||
end = paddr + sz + l2_line_sz - 1;
|
||||
if (is_pae40_enabled())
|
||||
write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
|
||||
|
||||
write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
|
||||
|
||||
if (is_pae40_enabled())
|
||||
write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
|
||||
|
||||
write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
|
||||
|
||||
/* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
|
||||
read_aux_reg(ARC_REG_SLC_CTRL);
|
||||
|
||||
while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
|
||||
|
||||
|
@ -1111,6 +1123,13 @@ noinline void __init arc_ioc_setup(void)
|
|||
__dc_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* Cache related boot time checks/setups only needed on master CPU:
|
||||
* - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES)
|
||||
* Assume SMP only, so all cores will have same cache config. A check on
|
||||
* one core suffices for all
|
||||
* - IOC setup / dma callbacks only need to be done once
|
||||
*/
|
||||
void __init arc_cache_init_master(void)
|
||||
{
|
||||
unsigned int __maybe_unused cpu = smp_processor_id();
|
||||
|
@ -1190,12 +1209,27 @@ void __ref arc_cache_init(void)
|
|||
|
||||
printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
|
||||
|
||||
/*
|
||||
* Only master CPU needs to execute rest of function:
|
||||
* - Assume SMP so all cores will have same cache config so
|
||||
* any geomtry checks will be same for all
|
||||
* - IOC setup / dma callbacks only need to be setup once
|
||||
*/
|
||||
if (!cpu)
|
||||
arc_cache_init_master();
|
||||
|
||||
/*
|
||||
* In PAE regime, TLB and cache maintenance ops take wider addresses
|
||||
* And even if PAE is not enabled in kernel, the upper 32-bits still need
|
||||
* to be zeroed to keep the ops sane.
|
||||
* As an optimization for more common !PAE enabled case, zero them out
|
||||
* once at init, rather than checking/setting to 0 for every runtime op
|
||||
*/
|
||||
if (is_isa_arcv2() && pae40_exist_but_not_enab()) {
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE))
|
||||
write_aux_reg(ARC_REG_IC_PTAG_HI, 0);
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE))
|
||||
write_aux_reg(ARC_REG_DC_PTAG_HI, 0);
|
||||
|
||||
if (l2_line_sz) {
|
||||
write_aux_reg(ARC_REG_SLC_RGN_END1, 0);
|
||||
write_aux_reg(ARC_REG_SLC_RGN_START1, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -153,6 +153,19 @@ static void _dma_cache_sync(phys_addr_t paddr, size_t size,
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* arc_dma_map_page - map a portion of a page for streaming DMA
|
||||
*
|
||||
* Ensure that any data held in the cache is appropriately discarded
|
||||
* or written back.
|
||||
*
|
||||
* The device owns this memory once this call has completed. The CPU
|
||||
* can regain ownership by calling dma_unmap_page().
|
||||
*
|
||||
* Note: while it takes struct page as arg, caller can "abuse" it to pass
|
||||
* a region larger than PAGE_SIZE, provided it is physically contiguous
|
||||
* and this still works correctly
|
||||
*/
|
||||
static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
|
@ -165,6 +178,24 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
|
|||
return plat_phys_to_dma(dev, paddr);
|
||||
}
|
||||
|
||||
/*
|
||||
* arc_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
|
||||
*
|
||||
* After this call, reads by the CPU to the buffer are guaranteed to see
|
||||
* whatever the device wrote there.
|
||||
*
|
||||
* Note: historically this routine was not implemented for ARC
|
||||
*/
|
||||
static void arc_dma_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
phys_addr_t paddr = plat_dma_to_phys(dev, handle);
|
||||
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
_dma_cache_sync(paddr, size, dir);
|
||||
}
|
||||
|
||||
static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
|
@ -178,6 +209,18 @@ static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
return nents;
|
||||
}
|
||||
|
||||
static void arc_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
for_each_sg(sg, s, nents, i)
|
||||
arc_dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir,
|
||||
attrs);
|
||||
}
|
||||
|
||||
static void arc_dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
|
@ -223,7 +266,9 @@ const struct dma_map_ops arc_dma_ops = {
|
|||
.free = arc_dma_free,
|
||||
.mmap = arc_dma_mmap,
|
||||
.map_page = arc_dma_map_page,
|
||||
.unmap_page = arc_dma_unmap_page,
|
||||
.map_sg = arc_dma_map_sg,
|
||||
.unmap_sg = arc_dma_unmap_sg,
|
||||
.sync_single_for_device = arc_dma_sync_single_for_device,
|
||||
.sync_single_for_cpu = arc_dma_sync_single_for_cpu,
|
||||
.sync_sg_for_cpu = arc_dma_sync_sg_for_cpu,
|
||||
|
|
|
@ -104,6 +104,8 @@
|
|||
/* A copy of the ASID from the PID reg is kept in asid_cache */
|
||||
DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
|
||||
|
||||
static int __read_mostly pae_exists;
|
||||
|
||||
/*
|
||||
* Utility Routine to erase a J-TLB entry
|
||||
* Caller needs to setup Index Reg (manually or via getIndex)
|
||||
|
@ -784,7 +786,7 @@ void read_decode_mmu_bcr(void)
|
|||
mmu->u_dtlb = mmu4->u_dtlb * 4;
|
||||
mmu->u_itlb = mmu4->u_itlb * 4;
|
||||
mmu->sasid = mmu4->sasid;
|
||||
mmu->pae = mmu4->pae;
|
||||
pae_exists = mmu->pae = mmu4->pae;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -809,6 +811,11 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
|
|||
return buf;
|
||||
}
|
||||
|
||||
int pae40_exist_but_not_enab(void)
|
||||
{
|
||||
return pae_exists && !is_pae40_enabled();
|
||||
}
|
||||
|
||||
void arc_mmu_init(void)
|
||||
{
|
||||
char str[256];
|
||||
|
@ -859,6 +866,9 @@ void arc_mmu_init(void)
|
|||
/* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
|
||||
write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
|
||||
#endif
|
||||
|
||||
if (pae40_exist_but_not_enab())
|
||||
write_aux_reg(ARC_REG_TLBPD1HI, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
#
|
||||
# Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
|
||||
menuconfig ARC_PLAT_SIM
|
||||
bool "ARC nSIM based simulation virtual platforms"
|
||||
help
|
||||
Support for nSIM based ARC simulation platforms
|
||||
This includes the standalone nSIM (uart only) vs. System C OSCI VP
|
|
@ -20,11 +20,14 @@
|
|||
*/
|
||||
|
||||
static const char *simulation_compat[] __initconst = {
|
||||
#ifdef CONFIG_ISA_ARCOMPACT
|
||||
"snps,nsim",
|
||||
"snps,nsim_hs",
|
||||
"snps,nsimosci",
|
||||
#else
|
||||
"snps,nsim_hs",
|
||||
"snps,nsimosci_hs",
|
||||
"snps,zebu_hs",
|
||||
#endif
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
|
|
@ -266,6 +266,7 @@
|
|||
|
||||
&hdmicec {
|
||||
status = "okay";
|
||||
needs-hpd;
|
||||
};
|
||||
|
||||
&hsi2c_4 {
|
||||
|
|
|
@ -297,6 +297,7 @@
|
|||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
status = "disabled";
|
||||
ranges;
|
||||
|
||||
adc: adc@50030800 {
|
||||
compatible = "fsl,imx25-gcq";
|
||||
|
|
|
@ -507,7 +507,7 @@
|
|||
pinctrl_pcie: pciegrp {
|
||||
fsl,pins = <
|
||||
/* PCIe reset */
|
||||
MX6QDL_PAD_EIM_BCLK__GPIO6_IO31 0x030b0
|
||||
MX6QDL_PAD_EIM_DA0__GPIO3_IO00 0x030b0
|
||||
MX6QDL_PAD_EIM_DA4__GPIO3_IO04 0x030b0
|
||||
>;
|
||||
};
|
||||
|
@ -668,7 +668,7 @@
|
|||
&pcie {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_pcie>;
|
||||
reset-gpio = <&gpio6 31 GPIO_ACTIVE_LOW>;
|
||||
reset-gpio = <&gpio3 0 GPIO_ACTIVE_LOW>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
|
|
@ -557,6 +557,14 @@
|
|||
>;
|
||||
};
|
||||
|
||||
pinctrl_spi4: spi4grp {
|
||||
fsl,pins = <
|
||||
MX7D_PAD_GPIO1_IO09__GPIO1_IO9 0x59
|
||||
MX7D_PAD_GPIO1_IO12__GPIO1_IO12 0x59
|
||||
MX7D_PAD_GPIO1_IO13__GPIO1_IO13 0x59
|
||||
>;
|
||||
};
|
||||
|
||||
pinctrl_tsc2046_pendown: tsc2046_pendown {
|
||||
fsl,pins = <
|
||||
MX7D_PAD_EPDC_BDR1__GPIO2_IO29 0x59
|
||||
|
@ -697,13 +705,5 @@
|
|||
fsl,pins = <
|
||||
MX7D_PAD_LPSR_GPIO1_IO01__PWM1_OUT 0x110b0
|
||||
>;
|
||||
|
||||
pinctrl_spi4: spi4grp {
|
||||
fsl,pins = <
|
||||
MX7D_PAD_GPIO1_IO09__GPIO1_IO9 0x59
|
||||
MX7D_PAD_GPIO1_IO12__GPIO1_IO12 0x59
|
||||
MX7D_PAD_GPIO1_IO13__GPIO1_IO13 0x59
|
||||
>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -303,7 +303,7 @@
|
|||
#size-cells = <1>;
|
||||
atmel,smc = <&hsmc>;
|
||||
reg = <0x10000000 0x10000000
|
||||
0x40000000 0x30000000>;
|
||||
0x60000000 0x30000000>;
|
||||
ranges = <0x0 0x0 0x10000000 0x10000000
|
||||
0x1 0x0 0x60000000 0x10000000
|
||||
0x2 0x0 0x70000000 0x10000000
|
||||
|
@ -1048,18 +1048,18 @@
|
|||
};
|
||||
|
||||
hsmc: hsmc@f8014000 {
|
||||
compatible = "atmel,sama5d3-smc", "syscon", "simple-mfd";
|
||||
compatible = "atmel,sama5d2-smc", "syscon", "simple-mfd";
|
||||
reg = <0xf8014000 0x1000>;
|
||||
interrupts = <5 IRQ_TYPE_LEVEL_HIGH 6>;
|
||||
interrupts = <17 IRQ_TYPE_LEVEL_HIGH 6>;
|
||||
clocks = <&hsmc_clk>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
ranges;
|
||||
|
||||
pmecc: ecc-engine@ffffc070 {
|
||||
pmecc: ecc-engine@f8014070 {
|
||||
compatible = "atmel,sama5d2-pmecc";
|
||||
reg = <0xffffc070 0x490>,
|
||||
<0xffffc500 0x100>;
|
||||
reg = <0xf8014070 0x490>,
|
||||
<0xf8014500 0x100>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
menuconfig ARCH_AT91
|
||||
bool "Atmel SoCs"
|
||||
depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7 || ARM_SINGLE_ARMV7M
|
||||
select ARM_CPU_SUSPEND if PM
|
||||
select ARM_CPU_SUSPEND if PM && ARCH_MULTI_V7
|
||||
select COMMON_CLK_AT91
|
||||
select GPIOLIB
|
||||
select PINCTRL
|
||||
|
|
|
@ -608,6 +608,9 @@ static void __init at91_pm_init(void (*pm_idle)(void))
|
|||
|
||||
void __init at91rm9200_pm_init(void)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_SOC_AT91RM9200))
|
||||
return;
|
||||
|
||||
at91_dt_ramc();
|
||||
|
||||
/*
|
||||
|
@ -620,18 +623,27 @@ void __init at91rm9200_pm_init(void)
|
|||
|
||||
void __init at91sam9_pm_init(void)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_SOC_AT91SAM9))
|
||||
return;
|
||||
|
||||
at91_dt_ramc();
|
||||
at91_pm_init(at91sam9_idle);
|
||||
}
|
||||
|
||||
void __init sama5_pm_init(void)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_SOC_SAMA5))
|
||||
return;
|
||||
|
||||
at91_dt_ramc();
|
||||
at91_pm_init(NULL);
|
||||
}
|
||||
|
||||
void __init sama5d2_pm_init(void)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_SOC_SAMA5D2))
|
||||
return;
|
||||
|
||||
at91_pm_backup_init();
|
||||
sama5_pm_init();
|
||||
}
|
||||
|
|
|
@ -51,6 +51,7 @@
|
|||
compatible = "sinovoip,bananapi-m64", "allwinner,sun50i-a64";
|
||||
|
||||
aliases {
|
||||
ethernet0 = &emac;
|
||||
serial0 = &uart0;
|
||||
serial1 = &uart1;
|
||||
};
|
||||
|
|
|
@ -51,6 +51,7 @@
|
|||
compatible = "pine64,pine64", "allwinner,sun50i-a64";
|
||||
|
||||
aliases {
|
||||
ethernet0 = &emac;
|
||||
serial0 = &uart0;
|
||||
serial1 = &uart1;
|
||||
serial2 = &uart2;
|
||||
|
|
|
@ -53,6 +53,7 @@
|
|||
"allwinner,sun50i-a64";
|
||||
|
||||
aliases {
|
||||
ethernet0 = &emac;
|
||||
serial0 = &uart0;
|
||||
};
|
||||
|
||||
|
|
|
@ -120,5 +120,8 @@
|
|||
};
|
||||
|
||||
&pio {
|
||||
interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
|
||||
compatible = "allwinner,sun50i-h5-pinctrl";
|
||||
};
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
stdout-path = "serial0:115200n8";
|
||||
};
|
||||
|
||||
audio_clkout: audio_clkout {
|
||||
audio_clkout: audio-clkout {
|
||||
/*
|
||||
* This is same as <&rcar_sound 0>
|
||||
* but needed to avoid cs2000/rcar_sound probe dead-lock
|
||||
|
|
|
@ -65,13 +65,13 @@ DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
|
|||
u64 _val; \
|
||||
if (needs_unstable_timer_counter_workaround()) { \
|
||||
const struct arch_timer_erratum_workaround *wa; \
|
||||
preempt_disable(); \
|
||||
preempt_disable_notrace(); \
|
||||
wa = __this_cpu_read(timer_unstable_counter_workaround); \
|
||||
if (wa && wa->read_##reg) \
|
||||
_val = wa->read_##reg(); \
|
||||
else \
|
||||
_val = read_sysreg(reg); \
|
||||
preempt_enable(); \
|
||||
preempt_enable_notrace(); \
|
||||
} else { \
|
||||
_val = read_sysreg(reg); \
|
||||
} \
|
||||
|
|
|
@ -114,10 +114,10 @@
|
|||
|
||||
/*
|
||||
* This is the base location for PIE (ET_DYN with INTERP) loads. On
|
||||
* 64-bit, this is raised to 4GB to leave the entire 32-bit address
|
||||
* 64-bit, this is above 4GB to leave the entire 32-bit address
|
||||
* space open for things that want to use the area for 32-bit pointers.
|
||||
*/
|
||||
#define ELF_ET_DYN_BASE 0x100000000UL
|
||||
#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
|
|
@ -161,9 +161,11 @@ void fpsimd_flush_thread(void)
|
|||
{
|
||||
if (!system_supports_fpsimd())
|
||||
return;
|
||||
preempt_disable();
|
||||
memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
|
||||
fpsimd_flush_task_state(current);
|
||||
set_thread_flag(TIF_FOREIGN_FPSTATE);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -354,7 +354,6 @@ __primary_switched:
|
|||
tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized?
|
||||
b.ne 0f
|
||||
mov x0, x21 // pass FDT address in x0
|
||||
mov x1, x23 // pass modulo offset in x1
|
||||
bl kaslr_early_init // parse FDT for KASLR options
|
||||
cbz x0, 0f // KASLR disabled? just proceed
|
||||
orr x23, x23, x0 // record KASLR offset
|
||||
|
|
|
@ -75,7 +75,7 @@ extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size,
|
|||
* containing function pointers) to be reinitialized, and zero-initialized
|
||||
* .bss variables will be reset to 0.
|
||||
*/
|
||||
u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
|
||||
u64 __init kaslr_early_init(u64 dt_phys)
|
||||
{
|
||||
void *fdt;
|
||||
u64 seed, offset, mask, module_range;
|
||||
|
@ -131,15 +131,17 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
|
|||
/*
|
||||
* The kernel Image should not extend across a 1GB/32MB/512MB alignment
|
||||
* boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
|
||||
* happens, increase the KASLR offset by the size of the kernel image
|
||||
* rounded up by SWAPPER_BLOCK_SIZE.
|
||||
* happens, round down the KASLR offset by (1 << SWAPPER_TABLE_SHIFT).
|
||||
*
|
||||
* NOTE: The references to _text and _end below will already take the
|
||||
* modulo offset (the physical displacement modulo 2 MB) into
|
||||
* account, given that the physical placement is controlled by
|
||||
* the loader, and will not change as a result of the virtual
|
||||
* mapping we choose.
|
||||
*/
|
||||
if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) !=
|
||||
(((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) {
|
||||
u64 kimg_sz = _end - _text;
|
||||
offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE))
|
||||
& mask;
|
||||
}
|
||||
if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) !=
|
||||
(((u64)_end + offset) >> SWAPPER_TABLE_SHIFT))
|
||||
offset = round_down(offset, 1 << SWAPPER_TABLE_SHIFT);
|
||||
|
||||
if (IS_ENABLED(CONFIG_KASAN))
|
||||
/*
|
||||
|
|
|
@ -435,8 +435,11 @@ retry:
|
|||
* the mmap_sem because it would already be released
|
||||
* in __lock_page_or_retry in mm/filemap.c.
|
||||
*/
|
||||
if (fatal_signal_pending(current))
|
||||
if (fatal_signal_pending(current)) {
|
||||
if (!user_mode(regs))
|
||||
goto no_context;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
|
||||
|
|
|
@ -199,7 +199,7 @@ config PPC
|
|||
select HAVE_OPTPROBES if PPC64
|
||||
select HAVE_PERF_EVENTS
|
||||
select HAVE_PERF_EVENTS_NMI if PPC64
|
||||
select HAVE_HARDLOCKUP_DETECTOR_PERF if HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
|
||||
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
|
||||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select HAVE_RCU_TABLE_FREE if SMP
|
||||
|
|
|
@ -90,6 +90,24 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev,
|
|||
/* Mark this context has been used on the new CPU */
|
||||
if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
|
||||
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
|
||||
|
||||
/*
|
||||
* This full barrier orders the store to the cpumask above vs
|
||||
* a subsequent operation which allows this CPU to begin loading
|
||||
* translations for next.
|
||||
*
|
||||
* When using the radix MMU that operation is the load of the
|
||||
* MMU context id, which is then moved to SPRN_PID.
|
||||
*
|
||||
* For the hash MMU it is either the first load from slb_cache
|
||||
* in switch_slb(), and/or the store of paca->mm_ctx_id in
|
||||
* copy_mm_to_paca().
|
||||
*
|
||||
* On the read side the barrier is in pte_xchg(), which orders
|
||||
* the store to the PTE vs the load of mm_cpumask.
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
new_on_cpu = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -87,6 +87,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
|
|||
unsigned long *p = (unsigned long *)ptep;
|
||||
__be64 prev;
|
||||
|
||||
/* See comment in switch_mm_irqs_off() */
|
||||
prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pte_raw(old),
|
||||
(__force unsigned long)pte_raw(new));
|
||||
|
||||
|
|
|
@ -62,6 +62,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
|
|||
{
|
||||
unsigned long *p = (unsigned long *)ptep;
|
||||
|
||||
/* See comment in switch_mm_irqs_off() */
|
||||
return pte_val(old) == __cmpxchg_u64(p, pte_val(old), pte_val(new));
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -362,7 +362,8 @@ void enable_kernel_vsx(void)
|
|||
|
||||
cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
|
||||
|
||||
if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
|
||||
if (current->thread.regs &&
|
||||
(current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
|
||||
check_if_tm_restore_required(current);
|
||||
/*
|
||||
* If a thread has already been reclaimed then the
|
||||
|
@ -386,7 +387,7 @@ void flush_vsx_to_thread(struct task_struct *tsk)
|
|||
{
|
||||
if (tsk->thread.regs) {
|
||||
preempt_disable();
|
||||
if (tsk->thread.regs->msr & MSR_VSX) {
|
||||
if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
|
||||
BUG_ON(tsk != current);
|
||||
giveup_vsx(tsk);
|
||||
}
|
||||
|
|
|
@ -294,32 +294,26 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
|||
struct kvm_create_spapr_tce_64 *args)
|
||||
{
|
||||
struct kvmppc_spapr_tce_table *stt = NULL;
|
||||
struct kvmppc_spapr_tce_table *siter;
|
||||
unsigned long npages, size;
|
||||
int ret = -ENOMEM;
|
||||
int i;
|
||||
int fd = -1;
|
||||
|
||||
if (!args->size)
|
||||
return -EINVAL;
|
||||
|
||||
/* Check this LIOBN hasn't been previously allocated */
|
||||
list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
|
||||
if (stt->liobn == args->liobn)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
size = _ALIGN_UP(args->size, PAGE_SIZE >> 3);
|
||||
npages = kvmppc_tce_pages(size);
|
||||
ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
|
||||
if (ret) {
|
||||
stt = NULL;
|
||||
goto fail;
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = -ENOMEM;
|
||||
stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
|
||||
GFP_KERNEL);
|
||||
if (!stt)
|
||||
goto fail;
|
||||
goto fail_acct;
|
||||
|
||||
stt->liobn = args->liobn;
|
||||
stt->page_shift = args->page_shift;
|
||||
|
@ -334,24 +328,42 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
kvm_get_kvm(kvm);
|
||||
ret = fd = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
|
||||
stt, O_RDWR | O_CLOEXEC);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
|
||||
/* Check this LIOBN hasn't been previously allocated */
|
||||
ret = 0;
|
||||
list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
|
||||
if (siter->liobn == args->liobn) {
|
||||
ret = -EBUSY;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
|
||||
kvm_get_kvm(kvm);
|
||||
}
|
||||
|
||||
mutex_unlock(&kvm->lock);
|
||||
|
||||
return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
|
||||
stt, O_RDWR | O_CLOEXEC);
|
||||
if (!ret)
|
||||
return fd;
|
||||
|
||||
fail:
|
||||
if (stt) {
|
||||
put_unused_fd(fd);
|
||||
|
||||
fail:
|
||||
for (i = 0; i < npages; i++)
|
||||
if (stt->pages[i])
|
||||
__free_page(stt->pages[i]);
|
||||
|
||||
kfree(stt);
|
||||
}
|
||||
fail_acct:
|
||||
kvmppc_account_memlimit(kvmppc_stt_pages(npages), false);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -1291,6 +1291,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
|||
/* Hypervisor doorbell - exit only if host IPI flag set */
|
||||
cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
|
||||
bne 3f
|
||||
BEGIN_FTR_SECTION
|
||||
PPC_MSGSYNC
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||
lbz r0, HSTATE_HOST_IPI(r13)
|
||||
cmpwi r0, 0
|
||||
beq 4f
|
||||
|
|
|
@ -16,7 +16,22 @@ static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
|
|||
u8 cppr;
|
||||
u16 ack;
|
||||
|
||||
/* XXX DD1 bug workaround: Check PIPR vs. CPPR first ! */
|
||||
/*
|
||||
* Ensure any previous store to CPPR is ordered vs.
|
||||
* the subsequent loads from PIPR or ACK.
|
||||
*/
|
||||
eieio();
|
||||
|
||||
/*
|
||||
* DD1 bug workaround: If PIPR is less favored than CPPR
|
||||
* ignore the interrupt or we might incorrectly lose an IPB
|
||||
* bit.
|
||||
*/
|
||||
if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
|
||||
u8 pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR);
|
||||
if (pipr >= xc->hw_cppr)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Perform the acknowledge OS to register cycle. */
|
||||
ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
|
||||
|
@ -235,6 +250,11 @@ skip_ipi:
|
|||
/*
|
||||
* If we found an interrupt, adjust what the guest CPPR should
|
||||
* be as if we had just fetched that interrupt from HW.
|
||||
*
|
||||
* Note: This can only make xc->cppr smaller as the previous
|
||||
* loop will only exit with hirq != 0 if prio is lower than
|
||||
* the current xc->cppr. Thus we don't need to re-check xc->mfrr
|
||||
* for pending IPIs.
|
||||
*/
|
||||
if (hirq)
|
||||
xc->cppr = prio;
|
||||
|
@ -380,6 +400,12 @@ X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
|
|||
old_cppr = xc->cppr;
|
||||
xc->cppr = cppr;
|
||||
|
||||
/*
|
||||
* Order the above update of xc->cppr with the subsequent
|
||||
* read of xc->mfrr inside push_pending_to_hw()
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
/*
|
||||
* We are masking less, we need to look for pending things
|
||||
* to deliver and set VP pending bits accordingly to trigger
|
||||
|
@ -420,21 +446,37 @@ X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
|
|||
* used to signal MFRR changes is EOId when fetched from
|
||||
* the queue.
|
||||
*/
|
||||
if (irq == XICS_IPI || irq == 0)
|
||||
if (irq == XICS_IPI || irq == 0) {
|
||||
/*
|
||||
* This barrier orders the setting of xc->cppr vs.
|
||||
* subsquent test of xc->mfrr done inside
|
||||
* scan_interrupts and push_pending_to_hw
|
||||
*/
|
||||
smp_mb();
|
||||
goto bail;
|
||||
}
|
||||
|
||||
/* Find interrupt source */
|
||||
sb = kvmppc_xive_find_source(xive, irq, &src);
|
||||
if (!sb) {
|
||||
pr_devel(" source not found !\n");
|
||||
rc = H_PARAMETER;
|
||||
/* Same as above */
|
||||
smp_mb();
|
||||
goto bail;
|
||||
}
|
||||
state = &sb->irq_state[src];
|
||||
kvmppc_xive_select_irq(state, &hw_num, &xd);
|
||||
|
||||
state->in_eoi = true;
|
||||
mb();
|
||||
|
||||
/*
|
||||
* This barrier orders both setting of in_eoi above vs,
|
||||
* subsequent test of guest_priority, and the setting
|
||||
* of xc->cppr vs. subsquent test of xc->mfrr done inside
|
||||
* scan_interrupts and push_pending_to_hw
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
again:
|
||||
if (state->guest_priority == MASKED) {
|
||||
|
@ -461,6 +503,14 @@ again:
|
|||
|
||||
}
|
||||
|
||||
/*
|
||||
* This barrier orders the above guest_priority check
|
||||
* and spin_lock/unlock with clearing in_eoi below.
|
||||
*
|
||||
* It also has to be a full mb() as it must ensure
|
||||
* the MMIOs done in source_eoi() are completed before
|
||||
* state->in_eoi is visible.
|
||||
*/
|
||||
mb();
|
||||
state->in_eoi = false;
|
||||
bail:
|
||||
|
@ -495,6 +545,18 @@ X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
|
|||
/* Locklessly write over MFRR */
|
||||
xc->mfrr = mfrr;
|
||||
|
||||
/*
|
||||
* The load of xc->cppr below and the subsequent MMIO store
|
||||
* to the IPI must happen after the above mfrr update is
|
||||
* globally visible so that:
|
||||
*
|
||||
* - Synchronize with another CPU doing an H_EOI or a H_CPPR
|
||||
* updating xc->cppr then reading xc->mfrr.
|
||||
*
|
||||
* - The target of the IPI sees the xc->mfrr update
|
||||
*/
|
||||
mb();
|
||||
|
||||
/* Shoot the IPI if most favored than target cppr */
|
||||
if (mfrr < xc->cppr)
|
||||
__x_writeq(0, __x_trig_page(&xc->vp_ipi_data));
|
||||
|
|
|
@ -394,7 +394,7 @@ static int sthyi(u64 vaddr)
|
|||
"srl %[cc],28\n"
|
||||
: [cc] "=d" (cc)
|
||||
: [code] "d" (code), [addr] "a" (addr)
|
||||
: "memory", "cc");
|
||||
: "3", "memory", "cc");
|
||||
return cc;
|
||||
}
|
||||
|
||||
|
@ -425,7 +425,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
|
|||
VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
|
||||
trace_kvm_s390_handle_sthyi(vcpu, code, addr);
|
||||
|
||||
if (reg1 == reg2 || reg1 & 1 || reg2 & 1 || addr & ~PAGE_MASK)
|
||||
if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
|
||||
if (code & 0xffff) {
|
||||
|
@ -433,6 +433,9 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (addr & ~PAGE_MASK)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
|
||||
/*
|
||||
* If the page has not yet been faulted in, we want to do that
|
||||
* now and not after all the expensive calculations.
|
||||
|
|
|
@ -68,6 +68,7 @@ typedef struct { unsigned long iopgprot; } iopgprot_t;
|
|||
#define iopgprot_val(x) ((x).iopgprot)
|
||||
|
||||
#define __pte(x) ((pte_t) { (x) } )
|
||||
#define __pmd(x) ((pmd_t) { { (x) }, })
|
||||
#define __iopte(x) ((iopte_t) { (x) } )
|
||||
#define __pgd(x) ((pgd_t) { (x) } )
|
||||
#define __ctxd(x) ((ctxd_t) { (x) } )
|
||||
|
@ -95,6 +96,7 @@ typedef unsigned long iopgprot_t;
|
|||
#define iopgprot_val(x) (x)
|
||||
|
||||
#define __pte(x) (x)
|
||||
#define __pmd(x) ((pmd_t) { { (x) }, })
|
||||
#define __iopte(x) (x)
|
||||
#define __pgd(x) (x)
|
||||
#define __ctxd(x) (x)
|
||||
|
|
|
@ -1266,8 +1266,6 @@ static int pci_sun4v_probe(struct platform_device *op)
|
|||
* ATU group, but ATU hcalls won't be available.
|
||||
*/
|
||||
hv_atu = false;
|
||||
pr_err(PFX "Could not register hvapi ATU err=%d\n",
|
||||
err);
|
||||
} else {
|
||||
pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
|
||||
vatu_major, vatu_minor);
|
||||
|
|
|
@ -602,7 +602,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
|
|||
{
|
||||
struct pci_dev *dev;
|
||||
int i, has_io, has_mem;
|
||||
unsigned int cmd;
|
||||
unsigned int cmd = 0;
|
||||
struct linux_pcic *pcic;
|
||||
/* struct linux_pbm_info* pbm = &pcic->pbm; */
|
||||
int node;
|
||||
|
|
|
@ -5,26 +5,26 @@
|
|||
.align 4
|
||||
ENTRY(__multi3) /* %o0 = u, %o1 = v */
|
||||
mov %o1, %g1
|
||||
srl %o3, 0, %g4
|
||||
mulx %g4, %g1, %o1
|
||||
srl %o3, 0, %o4
|
||||
mulx %o4, %g1, %o1
|
||||
srlx %g1, 0x20, %g3
|
||||
mulx %g3, %g4, %g5
|
||||
sllx %g5, 0x20, %o5
|
||||
srl %g1, 0, %g4
|
||||
mulx %g3, %o4, %g7
|
||||
sllx %g7, 0x20, %o5
|
||||
srl %g1, 0, %o4
|
||||
sub %o1, %o5, %o5
|
||||
srlx %o5, 0x20, %o5
|
||||
addcc %g5, %o5, %g5
|
||||
addcc %g7, %o5, %g7
|
||||
srlx %o3, 0x20, %o5
|
||||
mulx %g4, %o5, %g4
|
||||
mulx %o4, %o5, %o4
|
||||
mulx %g3, %o5, %o5
|
||||
sethi %hi(0x80000000), %g3
|
||||
addcc %g5, %g4, %g5
|
||||
srlx %g5, 0x20, %g5
|
||||
addcc %g7, %o4, %g7
|
||||
srlx %g7, 0x20, %g7
|
||||
add %g3, %g3, %g3
|
||||
movcc %xcc, %g0, %g3
|
||||
addcc %o5, %g5, %o5
|
||||
sllx %g4, 0x20, %g4
|
||||
add %o1, %g4, %o1
|
||||
addcc %o5, %g7, %o5
|
||||
sllx %o4, 0x20, %o4
|
||||
add %o1, %o4, %o1
|
||||
add %o5, %g3, %g2
|
||||
mulx %g1, %o2, %g1
|
||||
add %g1, %g2, %g1
|
||||
|
|
|
@ -100,6 +100,7 @@ config X86
|
|||
select GENERIC_STRNCPY_FROM_USER
|
||||
select GENERIC_STRNLEN_USER
|
||||
select GENERIC_TIME_VSYSCALL
|
||||
select HARDLOCKUP_CHECK_TIMESTAMP if X86_64
|
||||
select HAVE_ACPI_APEI if ACPI
|
||||
select HAVE_ACPI_APEI_NMI if ACPI
|
||||
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
|
||||
|
@ -163,7 +164,7 @@ config X86
|
|||
select HAVE_PCSPKR_PLATFORM
|
||||
select HAVE_PERF_EVENTS
|
||||
select HAVE_PERF_EVENTS_NMI
|
||||
select HAVE_HARDLOCKUP_DETECTOR_PERF if HAVE_PERF_EVENTS_NMI
|
||||
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI
|
||||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
|
|
|
@ -117,11 +117,10 @@
|
|||
.set T1, REG_T1
|
||||
.endm
|
||||
|
||||
#define K_BASE %r8
|
||||
#define HASH_PTR %r9
|
||||
#define BLOCKS_CTR %r8
|
||||
#define BUFFER_PTR %r10
|
||||
#define BUFFER_PTR2 %r13
|
||||
#define BUFFER_END %r11
|
||||
|
||||
#define PRECALC_BUF %r14
|
||||
#define WK_BUF %r15
|
||||
|
@ -205,14 +204,14 @@
|
|||
* blended AVX2 and ALU instruction scheduling
|
||||
* 1 vector iteration per 8 rounds
|
||||
*/
|
||||
vmovdqu ((i * 2) + PRECALC_OFFSET)(BUFFER_PTR), W_TMP
|
||||
vmovdqu (i * 2)(BUFFER_PTR), W_TMP
|
||||
.elseif ((i & 7) == 1)
|
||||
vinsertf128 $1, (((i-1) * 2)+PRECALC_OFFSET)(BUFFER_PTR2),\
|
||||
vinsertf128 $1, ((i-1) * 2)(BUFFER_PTR2),\
|
||||
WY_TMP, WY_TMP
|
||||
.elseif ((i & 7) == 2)
|
||||
vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY
|
||||
.elseif ((i & 7) == 4)
|
||||
vpaddd K_XMM(K_BASE), WY, WY_TMP
|
||||
vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
|
||||
.elseif ((i & 7) == 7)
|
||||
vmovdqu WY_TMP, PRECALC_WK(i&~7)
|
||||
|
||||
|
@ -255,7 +254,7 @@
|
|||
vpxor WY, WY_TMP, WY_TMP
|
||||
.elseif ((i & 7) == 7)
|
||||
vpxor WY_TMP2, WY_TMP, WY
|
||||
vpaddd K_XMM(K_BASE), WY, WY_TMP
|
||||
vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
|
||||
vmovdqu WY_TMP, PRECALC_WK(i&~7)
|
||||
|
||||
PRECALC_ROTATE_WY
|
||||
|
@ -291,7 +290,7 @@
|
|||
vpsrld $30, WY, WY
|
||||
vpor WY, WY_TMP, WY
|
||||
.elseif ((i & 7) == 7)
|
||||
vpaddd K_XMM(K_BASE), WY, WY_TMP
|
||||
vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
|
||||
vmovdqu WY_TMP, PRECALC_WK(i&~7)
|
||||
|
||||
PRECALC_ROTATE_WY
|
||||
|
@ -446,6 +445,16 @@
|
|||
|
||||
.endm
|
||||
|
||||
/* Add constant only if (%2 > %3) condition met (uses RTA as temp)
|
||||
* %1 + %2 >= %3 ? %4 : 0
|
||||
*/
|
||||
.macro ADD_IF_GE a, b, c, d
|
||||
mov \a, RTA
|
||||
add $\d, RTA
|
||||
cmp $\c, \b
|
||||
cmovge RTA, \a
|
||||
.endm
|
||||
|
||||
/*
|
||||
* macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining
|
||||
*/
|
||||
|
@ -463,13 +472,16 @@
|
|||
lea (2*4*80+32)(%rsp), WK_BUF
|
||||
|
||||
# Precalc WK for first 2 blocks
|
||||
PRECALC_OFFSET = 0
|
||||
ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 2, 64
|
||||
.set i, 0
|
||||
.rept 160
|
||||
PRECALC i
|
||||
.set i, i + 1
|
||||
.endr
|
||||
PRECALC_OFFSET = 128
|
||||
|
||||
/* Go to next block if needed */
|
||||
ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 3, 128
|
||||
ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
|
||||
xchg WK_BUF, PRECALC_BUF
|
||||
|
||||
.align 32
|
||||
|
@ -479,8 +491,8 @@ _loop:
|
|||
* we use K_BASE value as a signal of a last block,
|
||||
* it is set below by: cmovae BUFFER_PTR, K_BASE
|
||||
*/
|
||||
cmp K_BASE, BUFFER_PTR
|
||||
jne _begin
|
||||
test BLOCKS_CTR, BLOCKS_CTR
|
||||
jnz _begin
|
||||
.align 32
|
||||
jmp _end
|
||||
.align 32
|
||||
|
@ -512,10 +524,10 @@ _loop0:
|
|||
.set j, j+2
|
||||
.endr
|
||||
|
||||
add $(2*64), BUFFER_PTR /* move to next odd-64-byte block */
|
||||
cmp BUFFER_END, BUFFER_PTR /* is current block the last one? */
|
||||
cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
|
||||
|
||||
/* Update Counter */
|
||||
sub $1, BLOCKS_CTR
|
||||
/* Move to the next block only if needed*/
|
||||
ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 4, 128
|
||||
/*
|
||||
* rounds
|
||||
* 60,62,64,66,68
|
||||
|
@ -532,8 +544,8 @@ _loop0:
|
|||
UPDATE_HASH 12(HASH_PTR), D
|
||||
UPDATE_HASH 16(HASH_PTR), E
|
||||
|
||||
cmp K_BASE, BUFFER_PTR /* is current block the last one? */
|
||||
je _loop
|
||||
test BLOCKS_CTR, BLOCKS_CTR
|
||||
jz _loop
|
||||
|
||||
mov TB, B
|
||||
|
||||
|
@ -575,10 +587,10 @@ _loop2:
|
|||
.set j, j+2
|
||||
.endr
|
||||
|
||||
add $(2*64), BUFFER_PTR2 /* move to next even-64-byte block */
|
||||
|
||||
cmp BUFFER_END, BUFFER_PTR2 /* is current block the last one */
|
||||
cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
|
||||
/* update counter */
|
||||
sub $1, BLOCKS_CTR
|
||||
/* Move to the next block only if needed*/
|
||||
ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
|
||||
|
||||
jmp _loop3
|
||||
_loop3:
|
||||
|
@ -641,19 +653,12 @@ _loop3:
|
|||
|
||||
avx2_zeroupper
|
||||
|
||||
lea K_XMM_AR(%rip), K_BASE
|
||||
|
||||
/* Setup initial values */
|
||||
mov CTX, HASH_PTR
|
||||
mov BUF, BUFFER_PTR
|
||||
lea 64(BUF), BUFFER_PTR2
|
||||
|
||||
shl $6, CNT /* mul by 64 */
|
||||
add BUF, CNT
|
||||
add $64, CNT
|
||||
mov CNT, BUFFER_END
|
||||
|
||||
cmp BUFFER_END, BUFFER_PTR2
|
||||
cmovae K_BASE, BUFFER_PTR2
|
||||
mov BUF, BUFFER_PTR2
|
||||
mov CNT, BLOCKS_CTR
|
||||
|
||||
xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP
|
||||
|
||||
|
|
|
@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
|
|||
|
||||
static bool avx2_usable(void)
|
||||
{
|
||||
if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
|
||||
if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
|
||||
&& boot_cpu_has(X86_FEATURE_BMI1)
|
||||
&& boot_cpu_has(X86_FEATURE_BMI2))
|
||||
return true;
|
||||
|
|
|
@ -1211,6 +1211,8 @@ ENTRY(nmi)
|
|||
* other IST entries.
|
||||
*/
|
||||
|
||||
ASM_CLAC
|
||||
|
||||
/* Use %rdx as our temp variable throughout */
|
||||
pushq %rdx
|
||||
|
||||
|
|
|
@ -2114,7 +2114,7 @@ static void refresh_pce(void *ignored)
|
|||
load_mm_cr4(this_cpu_read(cpu_tlbstate.loaded_mm));
|
||||
}
|
||||
|
||||
static void x86_pmu_event_mapped(struct perf_event *event)
|
||||
static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
|
||||
{
|
||||
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
|
||||
return;
|
||||
|
@ -2129,22 +2129,20 @@ static void x86_pmu_event_mapped(struct perf_event *event)
|
|||
* For now, this can't happen because all callers hold mmap_sem
|
||||
* for write. If this changes, we'll need a different solution.
|
||||
*/
|
||||
lockdep_assert_held_exclusive(¤t->mm->mmap_sem);
|
||||
lockdep_assert_held_exclusive(&mm->mmap_sem);
|
||||
|
||||
if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1)
|
||||
on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
|
||||
if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
|
||||
on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
|
||||
}
|
||||
|
||||
static void x86_pmu_event_unmapped(struct perf_event *event)
|
||||
static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
|
||||
{
|
||||
if (!current->mm)
|
||||
return;
|
||||
|
||||
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
|
||||
return;
|
||||
|
||||
if (atomic_dec_and_test(¤t->mm->context.perf_rdpmc_allowed))
|
||||
on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
|
||||
if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
|
||||
on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
|
||||
}
|
||||
|
||||
static int x86_pmu_event_idx(struct perf_event *event)
|
||||
|
|
|
@ -69,7 +69,7 @@ struct bts_buffer {
|
|||
struct bts_phys buf[0];
|
||||
};
|
||||
|
||||
struct pmu bts_pmu;
|
||||
static struct pmu bts_pmu;
|
||||
|
||||
static size_t buf_size(struct page *page)
|
||||
{
|
||||
|
|
|
@ -587,7 +587,7 @@ static __initconst const u64 p4_hw_cache_event_ids
|
|||
* P4_CONFIG_ALIASABLE or bits for P4_PEBS_METRIC, they are
|
||||
* either up to date automatically or not applicable at all.
|
||||
*/
|
||||
struct p4_event_alias {
|
||||
static struct p4_event_alias {
|
||||
u64 original;
|
||||
u64 alternative;
|
||||
} p4_event_aliases[] = {
|
||||
|
|
|
@ -559,7 +559,7 @@ static struct attribute_group rapl_pmu_format_group = {
|
|||
.attrs = rapl_formats_attr,
|
||||
};
|
||||
|
||||
const struct attribute_group *rapl_attr_groups[] = {
|
||||
static const struct attribute_group *rapl_attr_groups[] = {
|
||||
&rapl_pmu_attr_group,
|
||||
&rapl_pmu_format_group,
|
||||
&rapl_pmu_events_group,
|
||||
|
|
|
@ -721,7 +721,7 @@ static struct attribute *uncore_pmu_attrs[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group uncore_pmu_attr_group = {
|
||||
static const struct attribute_group uncore_pmu_attr_group = {
|
||||
.attrs = uncore_pmu_attrs,
|
||||
};
|
||||
|
||||
|
|
|
@ -272,7 +272,7 @@ static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group nhmex_uncore_ubox_format_group = {
|
||||
static const struct attribute_group nhmex_uncore_ubox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = nhmex_uncore_ubox_formats_attr,
|
||||
};
|
||||
|
@ -299,7 +299,7 @@ static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group nhmex_uncore_cbox_format_group = {
|
||||
static const struct attribute_group nhmex_uncore_cbox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = nhmex_uncore_cbox_formats_attr,
|
||||
};
|
||||
|
@ -407,7 +407,7 @@ static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group nhmex_uncore_bbox_format_group = {
|
||||
static const struct attribute_group nhmex_uncore_bbox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = nhmex_uncore_bbox_formats_attr,
|
||||
};
|
||||
|
@ -484,7 +484,7 @@ static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group nhmex_uncore_sbox_format_group = {
|
||||
static const struct attribute_group nhmex_uncore_sbox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = nhmex_uncore_sbox_formats_attr,
|
||||
};
|
||||
|
@ -898,7 +898,7 @@ static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group nhmex_uncore_mbox_format_group = {
|
||||
static const struct attribute_group nhmex_uncore_mbox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = nhmex_uncore_mbox_formats_attr,
|
||||
};
|
||||
|
@ -1163,7 +1163,7 @@ static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group nhmex_uncore_rbox_format_group = {
|
||||
static const struct attribute_group nhmex_uncore_rbox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = nhmex_uncore_rbox_formats_attr,
|
||||
};
|
||||
|
|
|
@ -130,7 +130,7 @@ static struct attribute *snb_uncore_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group snb_uncore_format_group = {
|
||||
static const struct attribute_group snb_uncore_format_group = {
|
||||
.name = "format",
|
||||
.attrs = snb_uncore_formats_attr,
|
||||
};
|
||||
|
@ -289,7 +289,7 @@ static struct attribute *snb_uncore_imc_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group snb_uncore_imc_format_group = {
|
||||
static const struct attribute_group snb_uncore_imc_format_group = {
|
||||
.name = "format",
|
||||
.attrs = snb_uncore_imc_formats_attr,
|
||||
};
|
||||
|
@ -769,7 +769,7 @@ static struct attribute *nhm_uncore_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group nhm_uncore_format_group = {
|
||||
static const struct attribute_group nhm_uncore_format_group = {
|
||||
.name = "format",
|
||||
.attrs = nhm_uncore_formats_attr,
|
||||
};
|
||||
|
|
|
@ -602,27 +602,27 @@ static struct uncore_event_desc snbep_uncore_qpi_events[] = {
|
|||
{ /* end: all zeroes */ },
|
||||
};
|
||||
|
||||
static struct attribute_group snbep_uncore_format_group = {
|
||||
static const struct attribute_group snbep_uncore_format_group = {
|
||||
.name = "format",
|
||||
.attrs = snbep_uncore_formats_attr,
|
||||
};
|
||||
|
||||
static struct attribute_group snbep_uncore_ubox_format_group = {
|
||||
static const struct attribute_group snbep_uncore_ubox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = snbep_uncore_ubox_formats_attr,
|
||||
};
|
||||
|
||||
static struct attribute_group snbep_uncore_cbox_format_group = {
|
||||
static const struct attribute_group snbep_uncore_cbox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = snbep_uncore_cbox_formats_attr,
|
||||
};
|
||||
|
||||
static struct attribute_group snbep_uncore_pcu_format_group = {
|
||||
static const struct attribute_group snbep_uncore_pcu_format_group = {
|
||||
.name = "format",
|
||||
.attrs = snbep_uncore_pcu_formats_attr,
|
||||
};
|
||||
|
||||
static struct attribute_group snbep_uncore_qpi_format_group = {
|
||||
static const struct attribute_group snbep_uncore_qpi_format_group = {
|
||||
.name = "format",
|
||||
.attrs = snbep_uncore_qpi_formats_attr,
|
||||
};
|
||||
|
@ -1431,27 +1431,27 @@ static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group ivbep_uncore_format_group = {
|
||||
static const struct attribute_group ivbep_uncore_format_group = {
|
||||
.name = "format",
|
||||
.attrs = ivbep_uncore_formats_attr,
|
||||
};
|
||||
|
||||
static struct attribute_group ivbep_uncore_ubox_format_group = {
|
||||
static const struct attribute_group ivbep_uncore_ubox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = ivbep_uncore_ubox_formats_attr,
|
||||
};
|
||||
|
||||
static struct attribute_group ivbep_uncore_cbox_format_group = {
|
||||
static const struct attribute_group ivbep_uncore_cbox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = ivbep_uncore_cbox_formats_attr,
|
||||
};
|
||||
|
||||
static struct attribute_group ivbep_uncore_pcu_format_group = {
|
||||
static const struct attribute_group ivbep_uncore_pcu_format_group = {
|
||||
.name = "format",
|
||||
.attrs = ivbep_uncore_pcu_formats_attr,
|
||||
};
|
||||
|
||||
static struct attribute_group ivbep_uncore_qpi_format_group = {
|
||||
static const struct attribute_group ivbep_uncore_qpi_format_group = {
|
||||
.name = "format",
|
||||
.attrs = ivbep_uncore_qpi_formats_attr,
|
||||
};
|
||||
|
@ -1887,7 +1887,7 @@ static struct attribute *knl_uncore_ubox_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group knl_uncore_ubox_format_group = {
|
||||
static const struct attribute_group knl_uncore_ubox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = knl_uncore_ubox_formats_attr,
|
||||
};
|
||||
|
@ -1927,7 +1927,7 @@ static struct attribute *knl_uncore_cha_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group knl_uncore_cha_format_group = {
|
||||
static const struct attribute_group knl_uncore_cha_format_group = {
|
||||
.name = "format",
|
||||
.attrs = knl_uncore_cha_formats_attr,
|
||||
};
|
||||
|
@ -2037,7 +2037,7 @@ static struct attribute *knl_uncore_pcu_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group knl_uncore_pcu_format_group = {
|
||||
static const struct attribute_group knl_uncore_pcu_format_group = {
|
||||
.name = "format",
|
||||
.attrs = knl_uncore_pcu_formats_attr,
|
||||
};
|
||||
|
@ -2187,7 +2187,7 @@ static struct attribute *knl_uncore_irp_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group knl_uncore_irp_format_group = {
|
||||
static const struct attribute_group knl_uncore_irp_format_group = {
|
||||
.name = "format",
|
||||
.attrs = knl_uncore_irp_formats_attr,
|
||||
};
|
||||
|
@ -2385,7 +2385,7 @@ static struct attribute *hswep_uncore_ubox_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group hswep_uncore_ubox_format_group = {
|
||||
static const struct attribute_group hswep_uncore_ubox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = hswep_uncore_ubox_formats_attr,
|
||||
};
|
||||
|
@ -2439,7 +2439,7 @@ static struct attribute *hswep_uncore_cbox_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group hswep_uncore_cbox_format_group = {
|
||||
static const struct attribute_group hswep_uncore_cbox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = hswep_uncore_cbox_formats_attr,
|
||||
};
|
||||
|
@ -2621,7 +2621,7 @@ static struct attribute *hswep_uncore_sbox_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group hswep_uncore_sbox_format_group = {
|
||||
static const struct attribute_group hswep_uncore_sbox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = hswep_uncore_sbox_formats_attr,
|
||||
};
|
||||
|
@ -3314,7 +3314,7 @@ static struct attribute *skx_uncore_cha_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group skx_uncore_chabox_format_group = {
|
||||
static const struct attribute_group skx_uncore_chabox_format_group = {
|
||||
.name = "format",
|
||||
.attrs = skx_uncore_cha_formats_attr,
|
||||
};
|
||||
|
@ -3427,7 +3427,7 @@ static struct attribute *skx_uncore_iio_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group skx_uncore_iio_format_group = {
|
||||
static const struct attribute_group skx_uncore_iio_format_group = {
|
||||
.name = "format",
|
||||
.attrs = skx_uncore_iio_formats_attr,
|
||||
};
|
||||
|
@ -3484,7 +3484,7 @@ static struct attribute *skx_uncore_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group skx_uncore_format_group = {
|
||||
static const struct attribute_group skx_uncore_format_group = {
|
||||
.name = "format",
|
||||
.attrs = skx_uncore_formats_attr,
|
||||
};
|
||||
|
@ -3605,7 +3605,7 @@ static struct attribute *skx_upi_uncore_formats_attr[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group skx_upi_uncore_format_group = {
|
||||
static const struct attribute_group skx_upi_uncore_format_group = {
|
||||
.name = "format",
|
||||
.attrs = skx_upi_uncore_formats_attr,
|
||||
};
|
||||
|
|
|
@ -286,7 +286,7 @@
|
|||
#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
|
||||
#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
|
||||
#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
|
||||
#define X86_FEATURE_VIRTUAL_VMLOAD_VMSAVE (15*32+15) /* Virtual VMLOAD VMSAVE */
|
||||
#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
|
||||
#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
|
||||
|
|
|
@ -247,11 +247,11 @@ extern int force_personality32;
|
|||
|
||||
/*
|
||||
* This is the base location for PIE (ET_DYN with INTERP) loads. On
|
||||
* 64-bit, this is raised to 4GB to leave the entire 32-bit address
|
||||
* 64-bit, this is above 4GB to leave the entire 32-bit address
|
||||
* space open for things that want to use the area for 32-bit pointers.
|
||||
*/
|
||||
#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
|
||||
0x100000000UL)
|
||||
(TASK_SIZE / 3 * 2))
|
||||
|
||||
/* This yields a mask that user programs can use to figure out what
|
||||
instruction set this CPU supports. This could be done in user space,
|
||||
|
|
|
@ -450,10 +450,10 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate)
|
||||
static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask)
|
||||
{
|
||||
if (use_xsave()) {
|
||||
copy_kernel_to_xregs(&fpstate->xsave, -1);
|
||||
copy_kernel_to_xregs(&fpstate->xsave, mask);
|
||||
} else {
|
||||
if (use_fxsr())
|
||||
copy_kernel_to_fxregs(&fpstate->fxsave);
|
||||
|
@ -477,7 +477,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
|
|||
: : [addr] "m" (fpstate));
|
||||
}
|
||||
|
||||
__copy_kernel_to_fpregs(fpstate);
|
||||
__copy_kernel_to_fpregs(fpstate, -1);
|
||||
}
|
||||
|
||||
extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
|
||||
|
|
|
@ -492,6 +492,7 @@ struct kvm_vcpu_arch {
|
|||
unsigned long cr4;
|
||||
unsigned long cr4_guest_owned_bits;
|
||||
unsigned long cr8;
|
||||
u32 pkru;
|
||||
u32 hflags;
|
||||
u64 efer;
|
||||
u64 apic_base;
|
||||
|
|
|
@ -140,9 +140,7 @@ static inline int init_new_context(struct task_struct *tsk,
|
|||
mm->context.execute_only_pkey = -1;
|
||||
}
|
||||
#endif
|
||||
init_new_context_ldt(tsk, mm);
|
||||
|
||||
return 0;
|
||||
return init_new_context_ldt(tsk, mm);
|
||||
}
|
||||
static inline void destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
|
|
|
@ -40,13 +40,16 @@ static void aperfmperf_snapshot_khz(void *dummy)
|
|||
struct aperfmperf_sample *s = this_cpu_ptr(&samples);
|
||||
ktime_t now = ktime_get();
|
||||
s64 time_delta = ktime_ms_delta(now, s->time);
|
||||
unsigned long flags;
|
||||
|
||||
/* Don't bother re-computing within the cache threshold time. */
|
||||
if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
rdmsrl(MSR_IA32_APERF, aperf);
|
||||
rdmsrl(MSR_IA32_MPERF, mperf);
|
||||
local_irq_restore(flags);
|
||||
|
||||
aperf_delta = aperf - s->aperf;
|
||||
mperf_delta = mperf - s->mperf;
|
||||
|
|
|
@ -122,7 +122,7 @@ static struct attribute *thermal_throttle_attrs[] = {
|
|||
NULL
|
||||
};
|
||||
|
||||
static struct attribute_group thermal_attr_group = {
|
||||
static const struct attribute_group thermal_attr_group = {
|
||||
.attrs = thermal_throttle_attrs,
|
||||
.name = "thermal_throttle"
|
||||
};
|
||||
|
|
|
@ -561,7 +561,7 @@ static struct attribute *mc_default_attrs[] = {
|
|||
NULL
|
||||
};
|
||||
|
||||
static struct attribute_group mc_attr_group = {
|
||||
static const struct attribute_group mc_attr_group = {
|
||||
.attrs = mc_default_attrs,
|
||||
.name = "microcode",
|
||||
};
|
||||
|
@ -707,7 +707,7 @@ static struct attribute *cpu_root_microcode_attrs[] = {
|
|||
NULL
|
||||
};
|
||||
|
||||
static struct attribute_group cpu_root_microcode_group = {
|
||||
static const struct attribute_group cpu_root_microcode_group = {
|
||||
.name = "microcode",
|
||||
.attrs = cpu_root_microcode_attrs,
|
||||
};
|
||||
|
|
|
@ -237,6 +237,18 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ
|
|||
stop_machine(mtrr_rendezvous_handler, &data, cpu_online_mask);
|
||||
}
|
||||
|
||||
static void set_mtrr_cpuslocked(unsigned int reg, unsigned long base,
|
||||
unsigned long size, mtrr_type type)
|
||||
{
|
||||
struct set_mtrr_data data = { .smp_reg = reg,
|
||||
.smp_base = base,
|
||||
.smp_size = size,
|
||||
.smp_type = type
|
||||
};
|
||||
|
||||
stop_machine_cpuslocked(mtrr_rendezvous_handler, &data, cpu_online_mask);
|
||||
}
|
||||
|
||||
static void set_mtrr_from_inactive_cpu(unsigned int reg, unsigned long base,
|
||||
unsigned long size, mtrr_type type)
|
||||
{
|
||||
|
@ -370,7 +382,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
|
|||
/* Search for an empty MTRR */
|
||||
i = mtrr_if->get_free_region(base, size, replace);
|
||||
if (i >= 0) {
|
||||
set_mtrr(i, base, size, type);
|
||||
set_mtrr_cpuslocked(i, base, size, type);
|
||||
if (likely(replace < 0)) {
|
||||
mtrr_usage_table[i] = 1;
|
||||
} else {
|
||||
|
@ -378,7 +390,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
|
|||
if (increment)
|
||||
mtrr_usage_table[i]++;
|
||||
if (unlikely(replace != i)) {
|
||||
set_mtrr(replace, 0, 0, 0);
|
||||
set_mtrr_cpuslocked(replace, 0, 0, 0);
|
||||
mtrr_usage_table[replace] = 0;
|
||||
}
|
||||
}
|
||||
|
@ -506,7 +518,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
|
|||
goto out;
|
||||
}
|
||||
if (--mtrr_usage_table[reg] < 1)
|
||||
set_mtrr(reg, 0, 0, 0);
|
||||
set_mtrr_cpuslocked(reg, 0, 0, 0);
|
||||
error = reg;
|
||||
out:
|
||||
mutex_unlock(&mtrr_mutex);
|
||||
|
|
|
@ -53,6 +53,7 @@ void __head __startup_64(unsigned long physaddr)
|
|||
pudval_t *pud;
|
||||
pmdval_t *pmd, pmd_entry;
|
||||
int i;
|
||||
unsigned int *next_pgt_ptr;
|
||||
|
||||
/* Is the address too large? */
|
||||
if (physaddr >> MAX_PHYSMEM_BITS)
|
||||
|
@ -91,9 +92,9 @@ void __head __startup_64(unsigned long physaddr)
|
|||
* creates a bunch of nonsense entries but that is fine --
|
||||
* it avoids problems around wraparound.
|
||||
*/
|
||||
|
||||
pud = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
|
||||
pmd = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
|
||||
next_pgt_ptr = fixup_pointer(&next_early_pgt, physaddr);
|
||||
pud = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
|
||||
pmd = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
|
||||
|
||||
if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
|
||||
p4d = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
|
||||
|
|
|
@ -55,7 +55,7 @@ static struct bin_attribute *boot_params_data_attrs[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group boot_params_attr_group = {
|
||||
static const struct attribute_group boot_params_attr_group = {
|
||||
.attrs = boot_params_version_attrs,
|
||||
.bin_attrs = boot_params_data_attrs,
|
||||
};
|
||||
|
@ -202,7 +202,7 @@ static struct bin_attribute *setup_data_data_attrs[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group setup_data_attr_group = {
|
||||
static const struct attribute_group setup_data_attr_group = {
|
||||
.attrs = setup_data_type_attrs,
|
||||
.bin_attrs = setup_data_data_attrs,
|
||||
};
|
||||
|
|
|
@ -971,7 +971,8 @@ void common_cpu_up(unsigned int cpu, struct task_struct *idle)
|
|||
* Returns zero if CPU booted OK, else error code from
|
||||
* ->wakeup_secondary_cpu.
|
||||
*/
|
||||
static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
|
||||
static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
|
||||
int *cpu0_nmi_registered)
|
||||
{
|
||||
volatile u32 *trampoline_status =
|
||||
(volatile u32 *) __va(real_mode_header->trampoline_status);
|
||||
|
@ -979,7 +980,6 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
|
|||
unsigned long start_ip = real_mode_header->trampoline_start;
|
||||
|
||||
unsigned long boot_error = 0;
|
||||
int cpu0_nmi_registered = 0;
|
||||
unsigned long timeout;
|
||||
|
||||
idle->thread.sp = (unsigned long)task_pt_regs(idle);
|
||||
|
@ -1035,7 +1035,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
|
|||
boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
|
||||
else
|
||||
boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid,
|
||||
&cpu0_nmi_registered);
|
||||
cpu0_nmi_registered);
|
||||
|
||||
if (!boot_error) {
|
||||
/*
|
||||
|
@ -1080,12 +1080,6 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
|
|||
*/
|
||||
smpboot_restore_warm_reset_vector();
|
||||
}
|
||||
/*
|
||||
* Clean up the nmi handler. Do this after the callin and callout sync
|
||||
* to avoid impact of possible long unregister time.
|
||||
*/
|
||||
if (cpu0_nmi_registered)
|
||||
unregister_nmi_handler(NMI_LOCAL, "wake_cpu0");
|
||||
|
||||
return boot_error;
|
||||
}
|
||||
|
@ -1093,8 +1087,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
|
|||
int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
int apicid = apic->cpu_present_to_apicid(cpu);
|
||||
int cpu0_nmi_registered = 0;
|
||||
unsigned long flags;
|
||||
int err;
|
||||
int err, ret = 0;
|
||||
|
||||
WARN_ON(irqs_disabled());
|
||||
|
||||
|
@ -1131,10 +1126,11 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
|
|||
|
||||
common_cpu_up(cpu, tidle);
|
||||
|
||||
err = do_boot_cpu(apicid, cpu, tidle);
|
||||
err = do_boot_cpu(apicid, cpu, tidle, &cpu0_nmi_registered);
|
||||
if (err) {
|
||||
pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
|
||||
return -EIO;
|
||||
ret = -EIO;
|
||||
goto unreg_nmi;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1150,7 +1146,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
|
|||
touch_nmi_watchdog();
|
||||
}
|
||||
|
||||
return 0;
|
||||
unreg_nmi:
|
||||
/*
|
||||
* Clean up the nmi handler. Do this after the callin and callout sync
|
||||
* to avoid impact of possible long unregister time.
|
||||
*/
|
||||
if (cpu0_nmi_registered)
|
||||
unregister_nmi_handler(NMI_LOCAL, "wake_cpu0");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -469,7 +469,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
|
|||
entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
|
||||
cpuid_mask(&entry->ecx, CPUID_7_ECX);
|
||||
/* PKU is not yet implemented for shadow paging. */
|
||||
if (!tdp_enabled)
|
||||
if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
|
||||
entry->ecx &= ~F(PKU);
|
||||
entry->edx &= kvm_cpuid_7_0_edx_x86_features;
|
||||
entry->edx &= get_scattered_cpuid_leaf(7, 0, CPUID_EDX);
|
||||
|
|
|
@ -84,11 +84,6 @@ static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
|
|||
| ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
|
||||
}
|
||||
|
||||
static inline u32 kvm_read_pkru(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_x86_ops->get_pkru(vcpu);
|
||||
}
|
||||
|
||||
static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.hflags |= HF_GUEST_MASK;
|
||||
|
|
|
@ -185,7 +185,7 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
|||
* index of the protection domain, so pte_pkey * 2 is
|
||||
* is the index of the first bit for the domain.
|
||||
*/
|
||||
pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3;
|
||||
pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
|
||||
|
||||
/* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
|
||||
offset = (pfec & ~1) +
|
||||
|
|
|
@ -1100,7 +1100,7 @@ static __init int svm_hardware_setup(void)
|
|||
|
||||
if (vls) {
|
||||
if (!npt_enabled ||
|
||||
!boot_cpu_has(X86_FEATURE_VIRTUAL_VMLOAD_VMSAVE) ||
|
||||
!boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
|
||||
!IS_ENABLED(CONFIG_X86_64)) {
|
||||
vls = false;
|
||||
} else {
|
||||
|
@ -1777,11 +1777,6 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
|
|||
to_svm(vcpu)->vmcb->save.rflags = rflags;
|
||||
}
|
||||
|
||||
static u32 svm_get_pkru(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
|
||||
{
|
||||
switch (reg) {
|
||||
|
@ -5413,8 +5408,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
|
|||
.get_rflags = svm_get_rflags,
|
||||
.set_rflags = svm_set_rflags,
|
||||
|
||||
.get_pkru = svm_get_pkru,
|
||||
|
||||
.tlb_flush = svm_flush_tlb,
|
||||
|
||||
.run = svm_vcpu_run,
|
||||
|
|
|
@ -636,8 +636,6 @@ struct vcpu_vmx {
|
|||
|
||||
u64 current_tsc_ratio;
|
||||
|
||||
bool guest_pkru_valid;
|
||||
u32 guest_pkru;
|
||||
u32 host_pkru;
|
||||
|
||||
/*
|
||||
|
@ -2383,11 +2381,6 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
|
|||
to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
|
||||
}
|
||||
|
||||
static u32 vmx_get_pkru(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return to_vmx(vcpu)->guest_pkru;
|
||||
}
|
||||
|
||||
static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
|
||||
|
@ -9020,8 +9013,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
|
||||
vmx_set_interrupt_shadow(vcpu, 0);
|
||||
|
||||
if (vmx->guest_pkru_valid)
|
||||
__write_pkru(vmx->guest_pkru);
|
||||
if (static_cpu_has(X86_FEATURE_PKU) &&
|
||||
kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
|
||||
vcpu->arch.pkru != vmx->host_pkru)
|
||||
__write_pkru(vcpu->arch.pkru);
|
||||
|
||||
atomic_switch_perf_msrs(vmx);
|
||||
debugctlmsr = get_debugctlmsr();
|
||||
|
@ -9169,13 +9164,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
* back on host, so it is safe to read guest PKRU from current
|
||||
* XSAVE.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_OSPKE)) {
|
||||
vmx->guest_pkru = __read_pkru();
|
||||
if (vmx->guest_pkru != vmx->host_pkru) {
|
||||
vmx->guest_pkru_valid = true;
|
||||
if (static_cpu_has(X86_FEATURE_PKU) &&
|
||||
kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
|
||||
vcpu->arch.pkru = __read_pkru();
|
||||
if (vcpu->arch.pkru != vmx->host_pkru)
|
||||
__write_pkru(vmx->host_pkru);
|
||||
} else
|
||||
vmx->guest_pkru_valid = false;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -11682,8 +11675,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
|
|||
.get_rflags = vmx_get_rflags,
|
||||
.set_rflags = vmx_set_rflags,
|
||||
|
||||
.get_pkru = vmx_get_pkru,
|
||||
|
||||
.tlb_flush = vmx_flush_tlb,
|
||||
|
||||
.run = vmx_vcpu_run,
|
||||
|
|
|
@ -3245,7 +3245,12 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
|
|||
u32 size, offset, ecx, edx;
|
||||
cpuid_count(XSTATE_CPUID, index,
|
||||
&size, &offset, &ecx, &edx);
|
||||
if (feature == XFEATURE_MASK_PKRU)
|
||||
memcpy(dest + offset, &vcpu->arch.pkru,
|
||||
sizeof(vcpu->arch.pkru));
|
||||
else
|
||||
memcpy(dest + offset, src, size);
|
||||
|
||||
}
|
||||
|
||||
valid -= feature;
|
||||
|
@ -3283,6 +3288,10 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
|
|||
u32 size, offset, ecx, edx;
|
||||
cpuid_count(XSTATE_CPUID, index,
|
||||
&size, &offset, &ecx, &edx);
|
||||
if (feature == XFEATURE_MASK_PKRU)
|
||||
memcpy(&vcpu->arch.pkru, src + offset,
|
||||
sizeof(vcpu->arch.pkru));
|
||||
else
|
||||
memcpy(dest, src + offset, size);
|
||||
}
|
||||
|
||||
|
@ -7633,7 +7642,9 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
vcpu->guest_fpu_loaded = 1;
|
||||
__kernel_fpu_begin();
|
||||
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
|
||||
/* PKRU is separately restored in kvm_x86_ops->run. */
|
||||
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
|
||||
~XFEATURE_MASK_PKRU);
|
||||
trace_kvm_fpu(1);
|
||||
}
|
||||
|
||||
|
|
|
@ -50,8 +50,7 @@ unsigned long tasksize_64bit(void)
|
|||
static unsigned long stack_maxrandom_size(unsigned long task_size)
|
||||
{
|
||||
unsigned long max = 0;
|
||||
if ((current->flags & PF_RANDOMIZE) &&
|
||||
!(current->personality & ADDR_NO_RANDOMIZE)) {
|
||||
if (current->flags & PF_RANDOMIZE) {
|
||||
max = (-1UL) & __STACK_RND_MASK(task_size == tasksize_32bit());
|
||||
max <<= PAGE_SHIFT;
|
||||
}
|
||||
|
@ -79,13 +78,13 @@ static int mmap_is_legacy(void)
|
|||
|
||||
static unsigned long arch_rnd(unsigned int rndbits)
|
||||
{
|
||||
if (!(current->flags & PF_RANDOMIZE))
|
||||
return 0;
|
||||
return (get_random_long() & ((1UL << rndbits) - 1)) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
unsigned long arch_mmap_rnd(void)
|
||||
{
|
||||
if (!(current->flags & PF_RANDOMIZE))
|
||||
return 0;
|
||||
return arch_rnd(mmap_is_ia32() ? mmap32_rnd_bits : mmap64_rnd_bits);
|
||||
}
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
static struct bau_operations ops __ro_after_init;
|
||||
|
||||
/* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
|
||||
static int timeout_base_ns[] = {
|
||||
static const int timeout_base_ns[] = {
|
||||
20,
|
||||
160,
|
||||
1280,
|
||||
|
@ -1216,7 +1216,7 @@ static struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
|
|||
* set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
|
||||
* Such a message must be ignored.
|
||||
*/
|
||||
void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
|
||||
static void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
|
||||
{
|
||||
unsigned long mmr_image;
|
||||
unsigned char swack_vec;
|
||||
|
|
|
@ -75,6 +75,8 @@ static const char *const blk_queue_flag_name[] = {
|
|||
QUEUE_FLAG_NAME(STATS),
|
||||
QUEUE_FLAG_NAME(POLL_STATS),
|
||||
QUEUE_FLAG_NAME(REGISTERED),
|
||||
QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
|
||||
QUEUE_FLAG_NAME(QUIESCED),
|
||||
};
|
||||
#undef QUEUE_FLAG_NAME
|
||||
|
||||
|
@ -265,6 +267,7 @@ static const char *const cmd_flag_name[] = {
|
|||
CMD_FLAG_NAME(RAHEAD),
|
||||
CMD_FLAG_NAME(BACKGROUND),
|
||||
CMD_FLAG_NAME(NOUNMAP),
|
||||
CMD_FLAG_NAME(NOWAIT),
|
||||
};
|
||||
#undef CMD_FLAG_NAME
|
||||
|
||||
|
|
|
@ -36,12 +36,18 @@ int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev)
|
|||
for (queue = 0; queue < set->nr_hw_queues; queue++) {
|
||||
mask = pci_irq_get_affinity(pdev, queue);
|
||||
if (!mask)
|
||||
return -EINVAL;
|
||||
goto fallback;
|
||||
|
||||
for_each_cpu(cpu, mask)
|
||||
set->mq_map[cpu] = queue;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fallback:
|
||||
WARN_ON_ONCE(set->nr_hw_queues > 1);
|
||||
for_each_possible_cpu(cpu)
|
||||
set->mq_map[cpu] = 0;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
|
||||
|
|
|
@ -360,12 +360,12 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
|
|||
return ERR_PTR(ret);
|
||||
|
||||
rq = blk_mq_get_request(q, NULL, op, &alloc_data);
|
||||
blk_queue_exit(q);
|
||||
|
||||
if (!rq)
|
||||
return ERR_PTR(-EWOULDBLOCK);
|
||||
|
||||
blk_mq_put_ctx(alloc_data.ctx);
|
||||
blk_queue_exit(q);
|
||||
|
||||
rq->__data_len = 0;
|
||||
rq->__sector = (sector_t) -1;
|
||||
|
@ -411,12 +411,11 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
|
|||
alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
|
||||
|
||||
rq = blk_mq_get_request(q, NULL, op, &alloc_data);
|
||||
blk_queue_exit(q);
|
||||
|
||||
if (!rq)
|
||||
return ERR_PTR(-EWOULDBLOCK);
|
||||
|
||||
blk_queue_exit(q);
|
||||
|
||||
return rq;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
|
||||
|
|
|
@ -382,6 +382,14 @@ static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
|
|||
} \
|
||||
} while (0)
|
||||
|
||||
static inline unsigned int throtl_bio_data_size(struct bio *bio)
|
||||
{
|
||||
/* assume it's one sector */
|
||||
if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
|
||||
return 512;
|
||||
return bio->bi_iter.bi_size;
|
||||
}
|
||||
|
||||
static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
|
||||
{
|
||||
INIT_LIST_HEAD(&qn->node);
|
||||
|
@ -934,6 +942,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
|
|||
bool rw = bio_data_dir(bio);
|
||||
u64 bytes_allowed, extra_bytes, tmp;
|
||||
unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
|
||||
unsigned int bio_size = throtl_bio_data_size(bio);
|
||||
|
||||
jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
|
||||
|
||||
|
@ -947,14 +956,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
|
|||
do_div(tmp, HZ);
|
||||
bytes_allowed = tmp;
|
||||
|
||||
if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
|
||||
if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
|
||||
if (wait)
|
||||
*wait = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Calc approx time to dispatch */
|
||||
extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
|
||||
extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
|
||||
jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
|
||||
|
||||
if (!jiffy_wait)
|
||||
|
@ -1034,11 +1043,12 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
|
|||
static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
|
||||
{
|
||||
bool rw = bio_data_dir(bio);
|
||||
unsigned int bio_size = throtl_bio_data_size(bio);
|
||||
|
||||
/* Charge the bio to the group */
|
||||
tg->bytes_disp[rw] += bio->bi_iter.bi_size;
|
||||
tg->bytes_disp[rw] += bio_size;
|
||||
tg->io_disp[rw]++;
|
||||
tg->last_bytes_disp[rw] += bio->bi_iter.bi_size;
|
||||
tg->last_bytes_disp[rw] += bio_size;
|
||||
tg->last_io_disp[rw]++;
|
||||
|
||||
/*
|
||||
|
|
|
@ -29,26 +29,25 @@
|
|||
#include <scsi/scsi_cmnd.h>
|
||||
|
||||
/**
|
||||
* bsg_destroy_job - routine to teardown/delete a bsg job
|
||||
* bsg_teardown_job - routine to teardown a bsg job
|
||||
* @job: bsg_job that is to be torn down
|
||||
*/
|
||||
static void bsg_destroy_job(struct kref *kref)
|
||||
static void bsg_teardown_job(struct kref *kref)
|
||||
{
|
||||
struct bsg_job *job = container_of(kref, struct bsg_job, kref);
|
||||
struct request *rq = job->req;
|
||||
|
||||
blk_end_request_all(rq, BLK_STS_OK);
|
||||
|
||||
put_device(job->dev); /* release reference for the request */
|
||||
|
||||
kfree(job->request_payload.sg_list);
|
||||
kfree(job->reply_payload.sg_list);
|
||||
kfree(job);
|
||||
|
||||
blk_end_request_all(rq, BLK_STS_OK);
|
||||
}
|
||||
|
||||
void bsg_job_put(struct bsg_job *job)
|
||||
{
|
||||
kref_put(&job->kref, bsg_destroy_job);
|
||||
kref_put(&job->kref, bsg_teardown_job);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bsg_job_put);
|
||||
|
||||
|
@ -100,7 +99,7 @@ EXPORT_SYMBOL_GPL(bsg_job_done);
|
|||
*/
|
||||
static void bsg_softirq_done(struct request *rq)
|
||||
{
|
||||
struct bsg_job *job = rq->special;
|
||||
struct bsg_job *job = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
bsg_job_put(job);
|
||||
}
|
||||
|
@ -122,33 +121,20 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
|
|||
}
|
||||
|
||||
/**
|
||||
* bsg_create_job - create the bsg_job structure for the bsg request
|
||||
* bsg_prepare_job - create the bsg_job structure for the bsg request
|
||||
* @dev: device that is being sent the bsg request
|
||||
* @req: BSG request that needs a job structure
|
||||
*/
|
||||
static int bsg_create_job(struct device *dev, struct request *req)
|
||||
static int bsg_prepare_job(struct device *dev, struct request *req)
|
||||
{
|
||||
struct request *rsp = req->next_rq;
|
||||
struct request_queue *q = req->q;
|
||||
struct scsi_request *rq = scsi_req(req);
|
||||
struct bsg_job *job;
|
||||
struct bsg_job *job = blk_mq_rq_to_pdu(req);
|
||||
int ret;
|
||||
|
||||
BUG_ON(req->special);
|
||||
|
||||
job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL);
|
||||
if (!job)
|
||||
return -ENOMEM;
|
||||
|
||||
req->special = job;
|
||||
job->req = req;
|
||||
if (q->bsg_job_size)
|
||||
job->dd_data = (void *)&job[1];
|
||||
job->request = rq->cmd;
|
||||
job->request_len = rq->cmd_len;
|
||||
job->reply = rq->sense;
|
||||
job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
|
||||
* allocated */
|
||||
|
||||
if (req->bio) {
|
||||
ret = bsg_map_buffer(&job->request_payload, req);
|
||||
if (ret)
|
||||
|
@ -187,7 +173,6 @@ static void bsg_request_fn(struct request_queue *q)
|
|||
{
|
||||
struct device *dev = q->queuedata;
|
||||
struct request *req;
|
||||
struct bsg_job *job;
|
||||
int ret;
|
||||
|
||||
if (!get_device(dev))
|
||||
|
@ -199,7 +184,7 @@ static void bsg_request_fn(struct request_queue *q)
|
|||
break;
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
ret = bsg_create_job(dev, req);
|
||||
ret = bsg_prepare_job(dev, req);
|
||||
if (ret) {
|
||||
scsi_req(req)->result = ret;
|
||||
blk_end_request_all(req, BLK_STS_OK);
|
||||
|
@ -207,8 +192,7 @@ static void bsg_request_fn(struct request_queue *q)
|
|||
continue;
|
||||
}
|
||||
|
||||
job = req->special;
|
||||
ret = q->bsg_job_fn(job);
|
||||
ret = q->bsg_job_fn(blk_mq_rq_to_pdu(req));
|
||||
spin_lock_irq(q->queue_lock);
|
||||
if (ret)
|
||||
break;
|
||||
|
@ -219,6 +203,35 @@ static void bsg_request_fn(struct request_queue *q)
|
|||
spin_lock_irq(q->queue_lock);
|
||||
}
|
||||
|
||||
static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp)
|
||||
{
|
||||
struct bsg_job *job = blk_mq_rq_to_pdu(req);
|
||||
struct scsi_request *sreq = &job->sreq;
|
||||
|
||||
memset(job, 0, sizeof(*job));
|
||||
|
||||
scsi_req_init(sreq);
|
||||
sreq->sense_len = SCSI_SENSE_BUFFERSIZE;
|
||||
sreq->sense = kzalloc(sreq->sense_len, gfp);
|
||||
if (!sreq->sense)
|
||||
return -ENOMEM;
|
||||
|
||||
job->req = req;
|
||||
job->reply = sreq->sense;
|
||||
job->reply_len = sreq->sense_len;
|
||||
job->dd_data = job + 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bsg_exit_rq(struct request_queue *q, struct request *req)
|
||||
{
|
||||
struct bsg_job *job = blk_mq_rq_to_pdu(req);
|
||||
struct scsi_request *sreq = &job->sreq;
|
||||
|
||||
kfree(sreq->sense);
|
||||
}
|
||||
|
||||
/**
|
||||
* bsg_setup_queue - Create and add the bsg hooks so we can receive requests
|
||||
* @dev: device to attach bsg device to
|
||||
|
@ -235,7 +248,9 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
|
|||
q = blk_alloc_queue(GFP_KERNEL);
|
||||
if (!q)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
q->cmd_size = sizeof(struct scsi_request);
|
||||
q->cmd_size = sizeof(struct bsg_job) + dd_job_size;
|
||||
q->init_rq_fn = bsg_init_rq;
|
||||
q->exit_rq_fn = bsg_exit_rq;
|
||||
q->request_fn = bsg_request_fn;
|
||||
|
||||
ret = blk_init_allocated_queue(q);
|
||||
|
@ -243,7 +258,6 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
|
|||
goto out_cleanup_queue;
|
||||
|
||||
q->queuedata = dev;
|
||||
q->bsg_job_size = dd_job_size;
|
||||
q->bsg_job_fn = job_fn;
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
|
||||
|
|
|
@ -100,10 +100,14 @@ acpi_evaluate_object_typed(acpi_handle handle,
|
|||
free_buffer_on_error = TRUE;
|
||||
}
|
||||
|
||||
if (pathname) {
|
||||
status = acpi_get_handle(handle, pathname, &target_handle);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
} else {
|
||||
target_handle = handle;
|
||||
}
|
||||
|
||||
full_pathname = acpi_ns_get_external_pathname(target_handle);
|
||||
if (!full_pathname) {
|
||||
|
|
|
@ -1741,7 +1741,7 @@ error:
|
|||
* functioning ECDT EC first in order to handle the events.
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=115021
|
||||
*/
|
||||
int __init acpi_ec_ecdt_start(void)
|
||||
static int __init acpi_ec_ecdt_start(void)
|
||||
{
|
||||
acpi_handle handle;
|
||||
|
||||
|
@ -2003,20 +2003,17 @@ static inline void acpi_ec_query_exit(void)
|
|||
int __init acpi_ec_init(void)
|
||||
{
|
||||
int result;
|
||||
int ecdt_fail, dsdt_fail;
|
||||
|
||||
/* register workqueue for _Qxx evaluations */
|
||||
result = acpi_ec_query_init();
|
||||
if (result)
|
||||
goto err_exit;
|
||||
/* Now register the driver for the EC */
|
||||
result = acpi_bus_register_driver(&acpi_ec_driver);
|
||||
if (result)
|
||||
goto err_exit;
|
||||
|
||||
err_exit:
|
||||
if (result)
|
||||
acpi_ec_query_exit();
|
||||
return result;
|
||||
|
||||
/* Drivers must be started after acpi_ec_query_init() */
|
||||
ecdt_fail = acpi_ec_ecdt_start();
|
||||
dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
|
||||
return ecdt_fail && dsdt_fail ? -ENODEV : 0;
|
||||
}
|
||||
|
||||
/* EC driver currently not unloadable */
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue