Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

No conflicts.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2022-02-03 17:36:16 -08:00
commit c59400a68c
405 changed files with 4031 additions and 2044 deletions

View File

@ -80,6 +80,9 @@ Chris Chiu <chris.chiu@canonical.com> <chiu@endlessos.org>
Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com> Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com>
Christian Borntraeger <borntraeger@linux.ibm.com> <cborntra@de.ibm.com> Christian Borntraeger <borntraeger@linux.ibm.com> <cborntra@de.ibm.com>
Christian Borntraeger <borntraeger@linux.ibm.com> <borntrae@de.ibm.com> Christian Borntraeger <borntraeger@linux.ibm.com> <borntrae@de.ibm.com>
Christian Brauner <brauner@kernel.org> <christian@brauner.io>
Christian Brauner <brauner@kernel.org> <christian.brauner@canonical.com>
Christian Brauner <brauner@kernel.org> <christian.brauner@ubuntu.com>
Christophe Ricard <christophe.ricard@gmail.com> Christophe Ricard <christophe.ricard@gmail.com>
Christoph Hellwig <hch@lst.de> Christoph Hellwig <hch@lst.de>
Colin Ian King <colin.king@intel.com> <colin.king@canonical.com> Colin Ian King <colin.king@intel.com> <colin.king@canonical.com>

View File

@ -10,6 +10,7 @@ gpio
gpio-aggregator gpio-aggregator
sysfs sysfs
gpio-mockup gpio-mockup
gpio-sim
.. only:: subproject and html .. only:: subproject and html

View File

@ -266,10 +266,12 @@ Avanta family
------------- -------------
Flavors: Flavors:
- 88F6500
- 88F6510 - 88F6510
- 88F6530P - 88F6530P
- 88F6550 - 88F6550
- 88F6560 - 88F6560
- 88F6601
Homepage: Homepage:
https://web.archive.org/web/20181005145041/http://www.marvell.com/broadband/ https://web.archive.org/web/20181005145041/http://www.marvell.com/broadband/

View File

@ -52,6 +52,12 @@ stable kernels.
| Allwinner | A64/R18 | UNKNOWN1 | SUN50I_ERRATUM_UNKNOWN1 | | Allwinner | A64/R18 | UNKNOWN1 | SUN50I_ERRATUM_UNKNOWN1 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2064142 | ARM64_ERRATUM_2064142 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2038923 | ARM64_ERRATUM_2038923 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #1902691 | ARM64_ERRATUM_1902691 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 | | ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 | | ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 |
@ -92,12 +98,18 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 | | ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2051678 | ARM64_ERRATUM_2051678 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 | | ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 | | ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2224489 | ARM64_ERRATUM_2224489 | | ARM | Cortex-A710 | #2224489 | ARM64_ERRATUM_2224489 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-X2 | #2119858 | ARM64_ERRATUM_2119858 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-X2 | #2224489 | ARM64_ERRATUM_2224489 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 | | ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N1 | #1349291 | N/A | | ARM | Neoverse-N1 | #1349291 | N/A |

View File

@ -7,6 +7,14 @@ directory. These are intended to be small tests to exercise individual code
paths in the kernel. Tests are intended to be run after building, installing paths in the kernel. Tests are intended to be run after building, installing
and booting a kernel. and booting a kernel.
Kselftest from mainline can be run on older stable kernels. Running tests
from mainline offers the best coverage. Several test rings run mainline
kselftest suite on stable releases. The reason is that when a new test
gets added to test existing code to regression test a bug, we should be
able to run that test on an older kernel. Hence, it is important to keep
code that can still test an older kernel and make sure it skips the test
gracefully on newer releases.
You can find additional information on Kselftest framework, how to You can find additional information on Kselftest framework, how to
write new tests using the framework on Kselftest wiki: write new tests using the framework on Kselftest wiki:

View File

@ -36,6 +36,7 @@ properties:
- renesas,intc-ex-r8a77980 # R-Car V3H - renesas,intc-ex-r8a77980 # R-Car V3H
- renesas,intc-ex-r8a77990 # R-Car E3 - renesas,intc-ex-r8a77990 # R-Car E3
- renesas,intc-ex-r8a77995 # R-Car D3 - renesas,intc-ex-r8a77995 # R-Car D3
- renesas,intc-ex-r8a779a0 # R-Car V3U
- const: renesas,irqc - const: renesas,irqc
'#interrupt-cells': '#interrupt-cells':

View File

@ -62,6 +62,7 @@ properties:
interrupts-extended: interrupts-extended:
minItems: 1 minItems: 1
maxItems: 15872
description: description:
Specifies which contexts are connected to the PLIC, with "-1" specifying Specifies which contexts are connected to the PLIC, with "-1" specifying
that a context is not present. Each node pointed to should be a that a context is not present. Each node pointed to should be a
@ -90,12 +91,11 @@ examples:
#interrupt-cells = <1>; #interrupt-cells = <1>;
compatible = "sifive,fu540-c000-plic", "sifive,plic-1.0.0"; compatible = "sifive,fu540-c000-plic", "sifive,plic-1.0.0";
interrupt-controller; interrupt-controller;
interrupts-extended = < interrupts-extended = <&cpu0_intc 11>,
&cpu0_intc 11 <&cpu1_intc 11>, <&cpu1_intc 9>,
&cpu1_intc 11 &cpu1_intc 9 <&cpu2_intc 11>, <&cpu2_intc 9>,
&cpu2_intc 11 &cpu2_intc 9 <&cpu3_intc 11>, <&cpu3_intc 9>,
&cpu3_intc 11 &cpu3_intc 9 <&cpu4_intc 11>, <&cpu4_intc 9>;
&cpu4_intc 11 &cpu4_intc 9>;
reg = <0xc000000 0x4000000>; reg = <0xc000000 0x4000000>;
riscv,ndev = <10>; riscv,ndev = <10>;
}; };

View File

@ -107,6 +107,10 @@ properties:
- const: imem - const: imem
- const: config - const: config
qcom,qmp:
$ref: /schemas/types.yaml#/definitions/phandle
description: phandle to the AOSS side-channel message RAM
qcom,smem-states: qcom,smem-states:
$ref: /schemas/types.yaml#/definitions/phandle-array $ref: /schemas/types.yaml#/definitions/phandle-array
description: State bits used in by the AP to signal the modem. description: State bits used in by the AP to signal the modem.
@ -222,6 +226,8 @@ examples:
"imem", "imem",
"config"; "config";
qcom,qmp = <&aoss_qmp>;
qcom,smem-states = <&ipa_smp2p_out 0>, qcom,smem-states = <&ipa_smp2p_out 0>,
<&ipa_smp2p_out 1>; <&ipa_smp2p_out 1>;
qcom,smem-state-names = "ipa-clock-enabled-valid", qcom,smem-state-names = "ipa-clock-enabled-valid",

View File

@ -23,8 +23,9 @@ properties:
minItems: 1 minItems: 1
maxItems: 256 maxItems: 256
items: items:
minimum: 0 items:
maximum: 256 - minimum: 0
maximum: 256
description: description:
Chip select used by the device. Chip select used by the device.

View File

@ -166,6 +166,7 @@ to ReStructured Text format, or are simply too old.
.. toctree:: .. toctree::
:maxdepth: 2 :maxdepth: 2
tools/index
staging/index staging/index
watch_queue watch_queue

View File

@ -295,7 +295,7 @@ Pete Zaitcev gives the following summary:
- If you are in a process context (any syscall) and want to lock other - If you are in a process context (any syscall) and want to lock other
process out, use a mutex. You can take a mutex and sleep process out, use a mutex. You can take a mutex and sleep
(``copy_from_user*(`` or ``kmalloc(x,GFP_KERNEL)``). (``copy_from_user()`` or ``kmalloc(x,GFP_KERNEL)``).
- Otherwise (== data can be touched in an interrupt), use - Otherwise (== data can be touched in an interrupt), use
spin_lock_irqsave() and spin_lock_irqsave() and

View File

@ -0,0 +1,20 @@
.. SPDX-License-Identifier: GPL-2.0
============
Kernel tools
============
This book covers user-space tools that are shipped with the kernel source;
more additions are needed here:
.. toctree::
:maxdepth: 1
rtla/index
.. only:: subproject and html
Indices
=======
* :ref:`genindex`

View File

@ -0,0 +1,26 @@
.. SPDX-License-Identifier: GPL-2.0
================================
The realtime Linux analysis tool
================================
RTLA provides a set of tools for the analysis of the kernel's realtime
behavior on specific hardware.
.. toctree::
:maxdepth: 1
rtla
rtla-osnoise
rtla-osnoise-hist
rtla-osnoise-top
rtla-timerlat
rtla-timerlat-hist
rtla-timerlat-top
.. only:: subproject and html
Indices
=======
* :ref:`genindex`

View File

@ -3268,6 +3268,7 @@ number.
:Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device, :Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device,
KVM_CAP_VCPU_ATTRIBUTES for vcpu device KVM_CAP_VCPU_ATTRIBUTES for vcpu device
KVM_CAP_SYS_ATTRIBUTES for system (/dev/kvm) device (no set)
:Type: device ioctl, vm ioctl, vcpu ioctl :Type: device ioctl, vm ioctl, vcpu ioctl
:Parameters: struct kvm_device_attr :Parameters: struct kvm_device_attr
:Returns: 0 on success, -1 on error :Returns: 0 on success, -1 on error
@ -3302,7 +3303,8 @@ transferred is defined by the particular attribute.
------------------------ ------------------------
:Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device, :Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device,
KVM_CAP_VCPU_ATTRIBUTES for vcpu device KVM_CAP_VCPU_ATTRIBUTES for vcpu device
KVM_CAP_SYS_ATTRIBUTES for system (/dev/kvm) device
:Type: device ioctl, vm ioctl, vcpu ioctl :Type: device ioctl, vm ioctl, vcpu ioctl
:Parameters: struct kvm_device_attr :Parameters: struct kvm_device_attr
:Returns: 0 on success, -1 on error :Returns: 0 on success, -1 on error

View File

@ -9,7 +9,7 @@ Page Table Check
Introduction Introduction
============ ============
Page table check allows to hardern the kernel by ensuring that some types of Page table check allows to harden the kernel by ensuring that some types of
the memory corruptions are prevented. the memory corruptions are prevented.
Page table check performs extra verifications at the time when new pages become Page table check performs extra verifications at the time when new pages become

View File

@ -4157,9 +4157,8 @@ N: csky
K: csky K: csky
CA8210 IEEE-802.15.4 RADIO DRIVER CA8210 IEEE-802.15.4 RADIO DRIVER
M: Harry Morris <h.morris@cascoda.com>
L: linux-wpan@vger.kernel.org L: linux-wpan@vger.kernel.org
S: Maintained S: Orphan
W: https://github.com/Cascoda/ca8210-linux.git W: https://github.com/Cascoda/ca8210-linux.git
F: Documentation/devicetree/bindings/net/ieee802154/ca8210.txt F: Documentation/devicetree/bindings/net/ieee802154/ca8210.txt
F: drivers/net/ieee802154/ca8210.c F: drivers/net/ieee802154/ca8210.c

View File

@ -2,7 +2,7 @@
VERSION = 5 VERSION = 5
PATCHLEVEL = 17 PATCHLEVEL = 17
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc1 EXTRAVERSION = -rc2
NAME = Gobble Gobble NAME = Gobble Gobble
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -83,6 +83,7 @@ config ARM
select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32 select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32
select HAVE_CONTEXT_TRACKING select HAVE_CONTEXT_TRACKING
select HAVE_C_RECORDMCOUNT select HAVE_C_RECORDMCOUNT
select HAVE_BUILDTIME_MCOUNT_SORT
select HAVE_DEBUG_KMEMLEAK if !XIP_KERNEL select HAVE_DEBUG_KMEMLEAK if !XIP_KERNEL
select HAVE_DMA_CONTIGUOUS if MMU select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU

View File

@ -670,15 +670,25 @@ config ARM64_ERRATUM_1508412
config ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE config ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
bool bool
config ARM64_ERRATUM_2051678
bool "Cortex-A510: 2051678: disable Hardware Update of the page table dirty bit"
help
This options adds the workaround for ARM Cortex-A510 erratum ARM64_ERRATUM_2051678.
Affected Coretex-A510 might not respect the ordering rules for
hardware update of the page table's dirty bit. The workaround
is to not enable the feature on affected CPUs.
If unsure, say Y.
config ARM64_ERRATUM_2119858 config ARM64_ERRATUM_2119858
bool "Cortex-A710: 2119858: workaround TRBE overwriting trace data in FILL mode" bool "Cortex-A710/X2: 2119858: workaround TRBE overwriting trace data in FILL mode"
default y default y
depends on CORESIGHT_TRBE depends on CORESIGHT_TRBE
select ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE select ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
help help
This option adds the workaround for ARM Cortex-A710 erratum 2119858. This option adds the workaround for ARM Cortex-A710/X2 erratum 2119858.
Affected Cortex-A710 cores could overwrite up to 3 cache lines of trace Affected Cortex-A710/X2 cores could overwrite up to 3 cache lines of trace
data at the base of the buffer (pointed to by TRBASER_EL1) in FILL mode in data at the base of the buffer (pointed to by TRBASER_EL1) in FILL mode in
the event of a WRAP event. the event of a WRAP event.
@ -761,14 +771,14 @@ config ARM64_ERRATUM_2253138
If unsure, say Y. If unsure, say Y.
config ARM64_ERRATUM_2224489 config ARM64_ERRATUM_2224489
bool "Cortex-A710: 2224489: workaround TRBE writing to address out-of-range" bool "Cortex-A710/X2: 2224489: workaround TRBE writing to address out-of-range"
depends on CORESIGHT_TRBE depends on CORESIGHT_TRBE
default y default y
select ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE select ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
help help
This option adds the workaround for ARM Cortex-A710 erratum 2224489. This option adds the workaround for ARM Cortex-A710/X2 erratum 2224489.
Affected Cortex-A710 cores might write to an out-of-range address, not reserved Affected Cortex-A710/X2 cores might write to an out-of-range address, not reserved
for TRBE. Under some conditions, the TRBE might generate a write to the next for TRBE. Under some conditions, the TRBE might generate a write to the next
virtually addressed page following the last page of the TRBE address space virtually addressed page following the last page of the TRBE address space
(i.e., the TRBLIMITR_EL1.LIMIT), instead of wrapping around to the base. (i.e., the TRBLIMITR_EL1.LIMIT), instead of wrapping around to the base.
@ -778,6 +788,65 @@ config ARM64_ERRATUM_2224489
If unsure, say Y. If unsure, say Y.
config ARM64_ERRATUM_2064142
bool "Cortex-A510: 2064142: workaround TRBE register writes while disabled"
depends on COMPILE_TEST # Until the CoreSight TRBE driver changes are in
default y
help
This option adds the workaround for ARM Cortex-A510 erratum 2064142.
Affected Cortex-A510 core might fail to write into system registers after the
TRBE has been disabled. Under some conditions after the TRBE has been disabled
writes into TRBE registers TRBLIMITR_EL1, TRBPTR_EL1, TRBBASER_EL1, TRBSR_EL1,
and TRBTRG_EL1 will be ignored and will not be effected.
Work around this in the driver by executing TSB CSYNC and DSB after collection
is stopped and before performing a system register write to one of the affected
registers.
If unsure, say Y.
config ARM64_ERRATUM_2038923
bool "Cortex-A510: 2038923: workaround TRBE corruption with enable"
depends on COMPILE_TEST # Until the CoreSight TRBE driver changes are in
default y
help
This option adds the workaround for ARM Cortex-A510 erratum 2038923.
Affected Cortex-A510 core might cause an inconsistent view on whether trace is
prohibited within the CPU. As a result, the trace buffer or trace buffer state
might be corrupted. This happens after TRBE buffer has been enabled by setting
TRBLIMITR_EL1.E, followed by just a single context synchronization event before
execution changes from a context, in which trace is prohibited to one where it
isn't, or vice versa. In these mentioned conditions, the view of whether trace
is prohibited is inconsistent between parts of the CPU, and the trace buffer or
the trace buffer state might be corrupted.
Work around this in the driver by preventing an inconsistent view of whether the
trace is prohibited or not based on TRBLIMITR_EL1.E by immediately following a
change to TRBLIMITR_EL1.E with at least one ISB instruction before an ERET, or
two ISB instructions if no ERET is to take place.
If unsure, say Y.
config ARM64_ERRATUM_1902691
bool "Cortex-A510: 1902691: workaround TRBE trace corruption"
depends on COMPILE_TEST # Until the CoreSight TRBE driver changes are in
default y
help
This option adds the workaround for ARM Cortex-A510 erratum 1902691.
Affected Cortex-A510 core might cause trace data corruption, when being written
into the memory. Effectively TRBE is broken and hence cannot be used to capture
trace data.
Work around this problem in the driver by just preventing TRBE initialization on
affected cpus. The firmware must have disabled the access to TRBE for the kernel
on such implementations. This will cover the kernel for any firmware that doesn't
do this already.
If unsure, say Y.
config CAVIUM_ERRATUM_22375 config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313" bool "Cavium erratum 22375, 24313"
default y default y

View File

@ -73,7 +73,9 @@
#define ARM_CPU_PART_CORTEX_A76 0xD0B #define ARM_CPU_PART_CORTEX_A76 0xD0B
#define ARM_CPU_PART_NEOVERSE_N1 0xD0C #define ARM_CPU_PART_NEOVERSE_N1 0xD0C
#define ARM_CPU_PART_CORTEX_A77 0xD0D #define ARM_CPU_PART_CORTEX_A77 0xD0D
#define ARM_CPU_PART_CORTEX_A510 0xD46
#define ARM_CPU_PART_CORTEX_A710 0xD47 #define ARM_CPU_PART_CORTEX_A710 0xD47
#define ARM_CPU_PART_CORTEX_X2 0xD48
#define ARM_CPU_PART_NEOVERSE_N2 0xD49 #define ARM_CPU_PART_NEOVERSE_N2 0xD49
#define APM_CPU_PART_POTENZA 0x000 #define APM_CPU_PART_POTENZA 0x000
@ -115,7 +117,9 @@
#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76) #define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1) #define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77) #define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710) #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2) #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)

View File

@ -347,6 +347,7 @@ static const struct midr_range trbe_overwrite_fill_mode_cpus[] = {
#endif #endif
#ifdef CONFIG_ARM64_ERRATUM_2119858 #ifdef CONFIG_ARM64_ERRATUM_2119858
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
#endif #endif
{}, {},
}; };
@ -371,6 +372,7 @@ static struct midr_range trbe_write_out_of_range_cpus[] = {
#endif #endif
#ifdef CONFIG_ARM64_ERRATUM_2224489 #ifdef CONFIG_ARM64_ERRATUM_2224489
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
#endif #endif
{}, {},
}; };
@ -597,6 +599,33 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus), CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
}, },
#endif
#ifdef CONFIG_ARM64_ERRATUM_2064142
{
.desc = "ARM erratum 2064142",
.capability = ARM64_WORKAROUND_2064142,
/* Cortex-A510 r0p0 - r0p2 */
ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_2038923
{
.desc = "ARM erratum 2038923",
.capability = ARM64_WORKAROUND_2038923,
/* Cortex-A510 r0p0 - r0p2 */
ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_1902691
{
.desc = "ARM erratum 1902691",
.capability = ARM64_WORKAROUND_1902691,
/* Cortex-A510 r0p0 - r0p1 */
ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 1)
},
#endif #endif
{ {
} }

View File

@ -1645,6 +1645,9 @@ static bool cpu_has_broken_dbm(void)
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
/* Kryo4xx Silver (rdpe => r1p0) */ /* Kryo4xx Silver (rdpe => r1p0) */
MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe), MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
#endif
#ifdef CONFIG_ARM64_ERRATUM_2051678
MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
#endif #endif
{}, {},
}; };

View File

@ -33,8 +33,8 @@
*/ */
static void start_backtrace(struct stackframe *frame, unsigned long fp, static notrace void start_backtrace(struct stackframe *frame, unsigned long fp,
unsigned long pc) unsigned long pc)
{ {
frame->fp = fp; frame->fp = fp;
frame->pc = pc; frame->pc = pc;
@ -55,6 +55,7 @@ static void start_backtrace(struct stackframe *frame, unsigned long fp,
frame->prev_fp = 0; frame->prev_fp = 0;
frame->prev_type = STACK_TYPE_UNKNOWN; frame->prev_type = STACK_TYPE_UNKNOWN;
} }
NOKPROBE_SYMBOL(start_backtrace);
/* /*
* Unwind from one frame record (A) to the next frame record (B). * Unwind from one frame record (A) to the next frame record (B).

View File

@ -29,8 +29,11 @@ ldflags-y := -shared -soname=linux-vdso.so.1 --hash-style=sysv \
ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18 ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
# -Wmissing-prototypes and -Wmissing-declarations are removed from
# the CFLAGS of vgettimeofday.c to make possible to build the
# kernel with CONFIG_WERROR enabled.
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS) \ CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS) \
$(CC_FLAGS_LTO) $(CC_FLAGS_LTO) -Wmissing-prototypes -Wmissing-declarations
KASAN_SANITIZE := n KASAN_SANITIZE := n
KCSAN_SANITIZE := n KCSAN_SANITIZE := n
UBSAN_SANITIZE := n UBSAN_SANITIZE := n

View File

@ -38,7 +38,10 @@ static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, u64 val) static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, u64 val)
{ {
write_sysreg_el1(val, SYS_SPSR); if (has_vhe())
write_sysreg_el1(val, SYS_SPSR);
else
__vcpu_sys_reg(vcpu, SPSR_EL1) = val;
} }
static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val) static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)

View File

@ -983,13 +983,9 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
*/ */
stage2_put_pte(ptep, mmu, addr, level, mm_ops); stage2_put_pte(ptep, mmu, addr, level, mm_ops);
if (need_flush) { if (need_flush && mm_ops->dcache_clean_inval_poc)
kvm_pte_t *pte_follow = kvm_pte_follow(pte, mm_ops); mm_ops->dcache_clean_inval_poc(kvm_pte_follow(pte, mm_ops),
kvm_granule_size(level));
dcache_clean_inval_poc((unsigned long)pte_follow,
(unsigned long)pte_follow +
kvm_granule_size(level));
}
if (childp) if (childp)
mm_ops->put_page(childp); mm_ops->put_page(childp);
@ -1151,15 +1147,13 @@ static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
struct kvm_pgtable *pgt = arg; struct kvm_pgtable *pgt = arg;
struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops; struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
kvm_pte_t pte = *ptep; kvm_pte_t pte = *ptep;
kvm_pte_t *pte_follow;
if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte)) if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte))
return 0; return 0;
pte_follow = kvm_pte_follow(pte, mm_ops); if (mm_ops->dcache_clean_inval_poc)
dcache_clean_inval_poc((unsigned long)pte_follow, mm_ops->dcache_clean_inval_poc(kvm_pte_follow(pte, mm_ops),
(unsigned long)pte_follow + kvm_granule_size(level));
kvm_granule_size(level));
return 0; return 0;
} }

View File

@ -983,6 +983,9 @@ static void __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT; val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
/* IDbits */ /* IDbits */
val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT; val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
/* SEIS */
if (kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK)
val |= BIT(ICC_CTLR_EL1_SEIS_SHIFT);
/* A3V */ /* A3V */
val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT; val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
/* EOImode */ /* EOImode */

View File

@ -609,6 +609,18 @@ static int __init early_gicv4_enable(char *buf)
} }
early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable); early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
static const struct midr_range broken_seis[] = {
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM),
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM),
{},
};
static bool vgic_v3_broken_seis(void)
{
return ((kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK) &&
is_midr_in_range_list(read_cpuid_id(), broken_seis));
}
/** /**
* vgic_v3_probe - probe for a VGICv3 compatible interrupt controller * vgic_v3_probe - probe for a VGICv3 compatible interrupt controller
* @info: pointer to the GIC description * @info: pointer to the GIC description
@ -676,9 +688,10 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
group1_trap = true; group1_trap = true;
} }
if (kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK) { if (vgic_v3_broken_seis()) {
kvm_info("GICv3 with locally generated SEI\n"); kvm_info("GICv3 with broken locally generated SEI\n");
kvm_vgic_global_state.ich_vtr_el2 &= ~ICH_VTR_SEIS_MASK;
group0_trap = true; group0_trap = true;
group1_trap = true; group1_trap = true;
if (ich_vtr_el2 & ICH_VTR_TDS_MASK) if (ich_vtr_el2 & ICH_VTR_TDS_MASK)

View File

@ -40,8 +40,8 @@ static bool
ex_handler_load_unaligned_zeropad(const struct exception_table_entry *ex, ex_handler_load_unaligned_zeropad(const struct exception_table_entry *ex,
struct pt_regs *regs) struct pt_regs *regs)
{ {
int reg_data = FIELD_GET(EX_DATA_REG_DATA, ex->type); int reg_data = FIELD_GET(EX_DATA_REG_DATA, ex->data);
int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->type); int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->data);
unsigned long data, addr, offset; unsigned long data, addr, offset;
addr = pt_regs_read_reg(regs, reg_addr); addr = pt_regs_read_reg(regs, reg_addr);

View File

@ -55,6 +55,9 @@ WORKAROUND_1418040
WORKAROUND_1463225 WORKAROUND_1463225
WORKAROUND_1508412 WORKAROUND_1508412
WORKAROUND_1542419 WORKAROUND_1542419
WORKAROUND_2064142
WORKAROUND_2038923
WORKAROUND_1902691
WORKAROUND_TRBE_OVERWRITE_FILL_MODE WORKAROUND_TRBE_OVERWRITE_FILL_MODE
WORKAROUND_TSB_FLUSH_FAILURE WORKAROUND_TSB_FLUSH_FAILURE
WORKAROUND_TRBE_WRITE_OUT_OF_RANGE WORKAROUND_TRBE_WRITE_OUT_OF_RANGE

View File

@ -318,7 +318,7 @@ config ARCH_PROC_KCORE_TEXT
depends on PROC_KCORE depends on PROC_KCORE
config IA64_MCA_RECOVERY config IA64_MCA_RECOVERY
tristate "MCA recovery from errors other than TLB." bool "MCA recovery from errors other than TLB."
config IA64_PALINFO config IA64_PALINFO
tristate "/proc/pal support" tristate "/proc/pal support"

View File

@ -76,5 +76,5 @@ static void pci_fixup_video(struct pci_dev *pdev)
} }
} }
} }
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video); PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video);

View File

@ -74,7 +74,7 @@
#define EXC(inst_reg,addr,handler) \ #define EXC(inst_reg,addr,handler) \
9: inst_reg, addr; \ 9: inst_reg, addr; \
.section __ex_table,"a"; \ .section __ex_table,"a"; \
PTR 9b, handler; \ PTR_WD 9b, handler; \
.previous .previous
/* /*

View File

@ -285,7 +285,7 @@ symbol = value
#define PTR_SCALESHIFT 2 #define PTR_SCALESHIFT 2
#define PTR .word #define PTR_WD .word
#define PTRSIZE 4 #define PTRSIZE 4
#define PTRLOG 2 #define PTRLOG 2
#endif #endif
@ -310,7 +310,7 @@ symbol = value
#define PTR_SCALESHIFT 3 #define PTR_SCALESHIFT 3
#define PTR .dword #define PTR_WD .dword
#define PTRSIZE 8 #define PTRSIZE 8
#define PTRLOG 3 #define PTRLOG 3
#endif #endif

View File

@ -32,7 +32,7 @@ do { \
".previous\n" \ ".previous\n" \
\ \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR) "\t1b, 3b\n\t" \ STR(PTR_WD) "\t1b, 3b\n\t" \
".previous\n" \ ".previous\n" \
\ \
: [tmp_dst] "=&r" (dst), [tmp_err] "=r" (error)\ : [tmp_dst] "=&r" (dst), [tmp_err] "=r" (error)\
@ -54,7 +54,7 @@ do { \
".previous\n" \ ".previous\n" \
\ \
".section\t__ex_table,\"a\"\n\t"\ ".section\t__ex_table,\"a\"\n\t"\
STR(PTR) "\t1b, 3b\n\t" \ STR(PTR_WD) "\t1b, 3b\n\t" \
".previous\n" \ ".previous\n" \
\ \
: [tmp_err] "=r" (error) \ : [tmp_err] "=r" (error) \

View File

@ -119,7 +119,7 @@ static inline void flush_scache_line(unsigned long addr)
" j 2b \n" \ " j 2b \n" \
" .previous \n" \ " .previous \n" \
" .section __ex_table,\"a\" \n" \ " .section __ex_table,\"a\" \n" \
" "STR(PTR)" 1b, 3b \n" \ " "STR(PTR_WD)" 1b, 3b \n" \
" .previous" \ " .previous" \
: "+r" (__err) \ : "+r" (__err) \
: "i" (op), "r" (addr), "i" (-EFAULT)); \ : "i" (op), "r" (addr), "i" (-EFAULT)); \
@ -142,7 +142,7 @@ static inline void flush_scache_line(unsigned long addr)
" j 2b \n" \ " j 2b \n" \
" .previous \n" \ " .previous \n" \
" .section __ex_table,\"a\" \n" \ " .section __ex_table,\"a\" \n" \
" "STR(PTR)" 1b, 3b \n" \ " "STR(PTR_WD)" 1b, 3b \n" \
" .previous" \ " .previous" \
: "+r" (__err) \ : "+r" (__err) \
: "i" (op), "r" (addr), "i" (-EFAULT)); \ : "i" (op), "r" (addr), "i" (-EFAULT)); \

View File

@ -20,8 +20,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -41,8 +41,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -74,10 +74,10 @@ do { \
"j\t10b\n\t" \ "j\t10b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 11b\n\t" \ STR(PTR_WD)"\t1b, 11b\n\t" \
STR(PTR)"\t2b, 11b\n\t" \ STR(PTR_WD)"\t2b, 11b\n\t" \
STR(PTR)"\t3b, 11b\n\t" \ STR(PTR_WD)"\t3b, 11b\n\t" \
STR(PTR)"\t4b, 11b\n\t" \ STR(PTR_WD)"\t4b, 11b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -102,8 +102,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -125,8 +125,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -145,8 +145,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -178,10 +178,10 @@ do { \
"j\t10b\n\t" \ "j\t10b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 11b\n\t" \ STR(PTR_WD)"\t1b, 11b\n\t" \
STR(PTR)"\t2b, 11b\n\t" \ STR(PTR_WD)"\t2b, 11b\n\t" \
STR(PTR)"\t3b, 11b\n\t" \ STR(PTR_WD)"\t3b, 11b\n\t" \
STR(PTR)"\t4b, 11b\n\t" \ STR(PTR_WD)"\t4b, 11b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -223,14 +223,14 @@ do { \
"j\t10b\n\t" \ "j\t10b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 11b\n\t" \ STR(PTR_WD)"\t1b, 11b\n\t" \
STR(PTR)"\t2b, 11b\n\t" \ STR(PTR_WD)"\t2b, 11b\n\t" \
STR(PTR)"\t3b, 11b\n\t" \ STR(PTR_WD)"\t3b, 11b\n\t" \
STR(PTR)"\t4b, 11b\n\t" \ STR(PTR_WD)"\t4b, 11b\n\t" \
STR(PTR)"\t5b, 11b\n\t" \ STR(PTR_WD)"\t5b, 11b\n\t" \
STR(PTR)"\t6b, 11b\n\t" \ STR(PTR_WD)"\t6b, 11b\n\t" \
STR(PTR)"\t7b, 11b\n\t" \ STR(PTR_WD)"\t7b, 11b\n\t" \
STR(PTR)"\t8b, 11b\n\t" \ STR(PTR_WD)"\t8b, 11b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -255,8 +255,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=r" (res) \ : "=r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT));\ : "r" (value), "r" (addr), "i" (-EFAULT));\
@ -276,8 +276,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=r" (res) \ : "=r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT)); \ : "r" (value), "r" (addr), "i" (-EFAULT)); \
@ -296,8 +296,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=r" (res) \ : "=r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT)); \ : "r" (value), "r" (addr), "i" (-EFAULT)); \
@ -325,10 +325,10 @@ do { \
"j\t10b\n\t" \ "j\t10b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 11b\n\t" \ STR(PTR_WD)"\t1b, 11b\n\t" \
STR(PTR)"\t2b, 11b\n\t" \ STR(PTR_WD)"\t2b, 11b\n\t" \
STR(PTR)"\t3b, 11b\n\t" \ STR(PTR_WD)"\t3b, 11b\n\t" \
STR(PTR)"\t4b, 11b\n\t" \ STR(PTR_WD)"\t4b, 11b\n\t" \
".previous" \ ".previous" \
: "=&r" (res) \ : "=&r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT) \ : "r" (value), "r" (addr), "i" (-EFAULT) \
@ -365,14 +365,14 @@ do { \
"j\t10b\n\t" \ "j\t10b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 11b\n\t" \ STR(PTR_WD)"\t1b, 11b\n\t" \
STR(PTR)"\t2b, 11b\n\t" \ STR(PTR_WD)"\t2b, 11b\n\t" \
STR(PTR)"\t3b, 11b\n\t" \ STR(PTR_WD)"\t3b, 11b\n\t" \
STR(PTR)"\t4b, 11b\n\t" \ STR(PTR_WD)"\t4b, 11b\n\t" \
STR(PTR)"\t5b, 11b\n\t" \ STR(PTR_WD)"\t5b, 11b\n\t" \
STR(PTR)"\t6b, 11b\n\t" \ STR(PTR_WD)"\t6b, 11b\n\t" \
STR(PTR)"\t7b, 11b\n\t" \ STR(PTR_WD)"\t7b, 11b\n\t" \
STR(PTR)"\t8b, 11b\n\t" \ STR(PTR_WD)"\t8b, 11b\n\t" \
".previous" \ ".previous" \
: "=&r" (res) \ : "=&r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT) \ : "r" (value), "r" (addr), "i" (-EFAULT) \
@ -398,8 +398,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -419,8 +419,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -452,10 +452,10 @@ do { \
"j\t10b\n\t" \ "j\t10b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 11b\n\t" \ STR(PTR_WD)"\t1b, 11b\n\t" \
STR(PTR)"\t2b, 11b\n\t" \ STR(PTR_WD)"\t2b, 11b\n\t" \
STR(PTR)"\t3b, 11b\n\t" \ STR(PTR_WD)"\t3b, 11b\n\t" \
STR(PTR)"\t4b, 11b\n\t" \ STR(PTR_WD)"\t4b, 11b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -481,8 +481,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -504,8 +504,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -524,8 +524,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -557,10 +557,10 @@ do { \
"j\t10b\n\t" \ "j\t10b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 11b\n\t" \ STR(PTR_WD)"\t1b, 11b\n\t" \
STR(PTR)"\t2b, 11b\n\t" \ STR(PTR_WD)"\t2b, 11b\n\t" \
STR(PTR)"\t3b, 11b\n\t" \ STR(PTR_WD)"\t3b, 11b\n\t" \
STR(PTR)"\t4b, 11b\n\t" \ STR(PTR_WD)"\t4b, 11b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -602,14 +602,14 @@ do { \
"j\t10b\n\t" \ "j\t10b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 11b\n\t" \ STR(PTR_WD)"\t1b, 11b\n\t" \
STR(PTR)"\t2b, 11b\n\t" \ STR(PTR_WD)"\t2b, 11b\n\t" \
STR(PTR)"\t3b, 11b\n\t" \ STR(PTR_WD)"\t3b, 11b\n\t" \
STR(PTR)"\t4b, 11b\n\t" \ STR(PTR_WD)"\t4b, 11b\n\t" \
STR(PTR)"\t5b, 11b\n\t" \ STR(PTR_WD)"\t5b, 11b\n\t" \
STR(PTR)"\t6b, 11b\n\t" \ STR(PTR_WD)"\t6b, 11b\n\t" \
STR(PTR)"\t7b, 11b\n\t" \ STR(PTR_WD)"\t7b, 11b\n\t" \
STR(PTR)"\t8b, 11b\n\t" \ STR(PTR_WD)"\t8b, 11b\n\t" \
".previous" \ ".previous" \
: "=&r" (value), "=r" (res) \ : "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \ : "r" (addr), "i" (-EFAULT)); \
@ -632,8 +632,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=r" (res) \ : "=r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT));\ : "r" (value), "r" (addr), "i" (-EFAULT));\
@ -653,8 +653,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=r" (res) \ : "=r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT)); \ : "r" (value), "r" (addr), "i" (-EFAULT)); \
@ -673,8 +673,8 @@ do { \
"j\t3b\n\t" \ "j\t3b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 4b\n\t" \ STR(PTR_WD)"\t1b, 4b\n\t" \
STR(PTR)"\t2b, 4b\n\t" \ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \ ".previous" \
: "=r" (res) \ : "=r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT)); \ : "r" (value), "r" (addr), "i" (-EFAULT)); \
@ -703,10 +703,10 @@ do { \
"j\t10b\n\t" \ "j\t10b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 11b\n\t" \ STR(PTR_WD)"\t1b, 11b\n\t" \
STR(PTR)"\t2b, 11b\n\t" \ STR(PTR_WD)"\t2b, 11b\n\t" \
STR(PTR)"\t3b, 11b\n\t" \ STR(PTR_WD)"\t3b, 11b\n\t" \
STR(PTR)"\t4b, 11b\n\t" \ STR(PTR_WD)"\t4b, 11b\n\t" \
".previous" \ ".previous" \
: "=&r" (res) \ : "=&r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT) \ : "r" (value), "r" (addr), "i" (-EFAULT) \
@ -743,14 +743,14 @@ do { \
"j\t10b\n\t" \ "j\t10b\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \ ".section\t__ex_table,\"a\"\n\t" \
STR(PTR)"\t1b, 11b\n\t" \ STR(PTR_WD)"\t1b, 11b\n\t" \
STR(PTR)"\t2b, 11b\n\t" \ STR(PTR_WD)"\t2b, 11b\n\t" \
STR(PTR)"\t3b, 11b\n\t" \ STR(PTR_WD)"\t3b, 11b\n\t" \
STR(PTR)"\t4b, 11b\n\t" \ STR(PTR_WD)"\t4b, 11b\n\t" \
STR(PTR)"\t5b, 11b\n\t" \ STR(PTR_WD)"\t5b, 11b\n\t" \
STR(PTR)"\t6b, 11b\n\t" \ STR(PTR_WD)"\t6b, 11b\n\t" \
STR(PTR)"\t7b, 11b\n\t" \ STR(PTR_WD)"\t7b, 11b\n\t" \
STR(PTR)"\t8b, 11b\n\t" \ STR(PTR_WD)"\t8b, 11b\n\t" \
".previous" \ ".previous" \
: "=&r" (res) \ : "=&r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT) \ : "r" (value), "r" (addr), "i" (-EFAULT) \

View File

@ -1258,10 +1258,10 @@ fpu_emul:
" j 10b\n" " j 10b\n"
" .previous\n" " .previous\n"
" .section __ex_table,\"a\"\n" " .section __ex_table,\"a\"\n"
STR(PTR) " 1b,8b\n" STR(PTR_WD) " 1b,8b\n"
STR(PTR) " 2b,8b\n" STR(PTR_WD) " 2b,8b\n"
STR(PTR) " 3b,8b\n" STR(PTR_WD) " 3b,8b\n"
STR(PTR) " 4b,8b\n" STR(PTR_WD) " 4b,8b\n"
" .previous\n" " .previous\n"
" .set pop\n" " .set pop\n"
: "+&r"(rt), "=&r"(rs), : "+&r"(rt), "=&r"(rs),
@ -1333,10 +1333,10 @@ fpu_emul:
" j 10b\n" " j 10b\n"
" .previous\n" " .previous\n"
" .section __ex_table,\"a\"\n" " .section __ex_table,\"a\"\n"
STR(PTR) " 1b,8b\n" STR(PTR_WD) " 1b,8b\n"
STR(PTR) " 2b,8b\n" STR(PTR_WD) " 2b,8b\n"
STR(PTR) " 3b,8b\n" STR(PTR_WD) " 3b,8b\n"
STR(PTR) " 4b,8b\n" STR(PTR_WD) " 4b,8b\n"
" .previous\n" " .previous\n"
" .set pop\n" " .set pop\n"
: "+&r"(rt), "=&r"(rs), : "+&r"(rt), "=&r"(rs),
@ -1404,10 +1404,10 @@ fpu_emul:
" j 9b\n" " j 9b\n"
" .previous\n" " .previous\n"
" .section __ex_table,\"a\"\n" " .section __ex_table,\"a\"\n"
STR(PTR) " 1b,8b\n" STR(PTR_WD) " 1b,8b\n"
STR(PTR) " 2b,8b\n" STR(PTR_WD) " 2b,8b\n"
STR(PTR) " 3b,8b\n" STR(PTR_WD) " 3b,8b\n"
STR(PTR) " 4b,8b\n" STR(PTR_WD) " 4b,8b\n"
" .previous\n" " .previous\n"
" .set pop\n" " .set pop\n"
: "+&r"(rt), "=&r"(rs), : "+&r"(rt), "=&r"(rs),
@ -1474,10 +1474,10 @@ fpu_emul:
" j 9b\n" " j 9b\n"
" .previous\n" " .previous\n"
" .section __ex_table,\"a\"\n" " .section __ex_table,\"a\"\n"
STR(PTR) " 1b,8b\n" STR(PTR_WD) " 1b,8b\n"
STR(PTR) " 2b,8b\n" STR(PTR_WD) " 2b,8b\n"
STR(PTR) " 3b,8b\n" STR(PTR_WD) " 3b,8b\n"
STR(PTR) " 4b,8b\n" STR(PTR_WD) " 4b,8b\n"
" .previous\n" " .previous\n"
" .set pop\n" " .set pop\n"
: "+&r"(rt), "=&r"(rs), : "+&r"(rt), "=&r"(rs),
@ -1589,14 +1589,14 @@ fpu_emul:
" j 9b\n" " j 9b\n"
" .previous\n" " .previous\n"
" .section __ex_table,\"a\"\n" " .section __ex_table,\"a\"\n"
STR(PTR) " 1b,8b\n" STR(PTR_WD) " 1b,8b\n"
STR(PTR) " 2b,8b\n" STR(PTR_WD) " 2b,8b\n"
STR(PTR) " 3b,8b\n" STR(PTR_WD) " 3b,8b\n"
STR(PTR) " 4b,8b\n" STR(PTR_WD) " 4b,8b\n"
STR(PTR) " 5b,8b\n" STR(PTR_WD) " 5b,8b\n"
STR(PTR) " 6b,8b\n" STR(PTR_WD) " 6b,8b\n"
STR(PTR) " 7b,8b\n" STR(PTR_WD) " 7b,8b\n"
STR(PTR) " 0b,8b\n" STR(PTR_WD) " 0b,8b\n"
" .previous\n" " .previous\n"
" .set pop\n" " .set pop\n"
: "+&r"(rt), "=&r"(rs), : "+&r"(rt), "=&r"(rs),
@ -1708,14 +1708,14 @@ fpu_emul:
" j 9b\n" " j 9b\n"
" .previous\n" " .previous\n"
" .section __ex_table,\"a\"\n" " .section __ex_table,\"a\"\n"
STR(PTR) " 1b,8b\n" STR(PTR_WD) " 1b,8b\n"
STR(PTR) " 2b,8b\n" STR(PTR_WD) " 2b,8b\n"
STR(PTR) " 3b,8b\n" STR(PTR_WD) " 3b,8b\n"
STR(PTR) " 4b,8b\n" STR(PTR_WD) " 4b,8b\n"
STR(PTR) " 5b,8b\n" STR(PTR_WD) " 5b,8b\n"
STR(PTR) " 6b,8b\n" STR(PTR_WD) " 6b,8b\n"
STR(PTR) " 7b,8b\n" STR(PTR_WD) " 7b,8b\n"
STR(PTR) " 0b,8b\n" STR(PTR_WD) " 0b,8b\n"
" .previous\n" " .previous\n"
" .set pop\n" " .set pop\n"
: "+&r"(rt), "=&r"(rs), : "+&r"(rt), "=&r"(rs),
@ -1827,14 +1827,14 @@ fpu_emul:
" j 9b\n" " j 9b\n"
" .previous\n" " .previous\n"
" .section __ex_table,\"a\"\n" " .section __ex_table,\"a\"\n"
STR(PTR) " 1b,8b\n" STR(PTR_WD) " 1b,8b\n"
STR(PTR) " 2b,8b\n" STR(PTR_WD) " 2b,8b\n"
STR(PTR) " 3b,8b\n" STR(PTR_WD) " 3b,8b\n"
STR(PTR) " 4b,8b\n" STR(PTR_WD) " 4b,8b\n"
STR(PTR) " 5b,8b\n" STR(PTR_WD) " 5b,8b\n"
STR(PTR) " 6b,8b\n" STR(PTR_WD) " 6b,8b\n"
STR(PTR) " 7b,8b\n" STR(PTR_WD) " 7b,8b\n"
STR(PTR) " 0b,8b\n" STR(PTR_WD) " 0b,8b\n"
" .previous\n" " .previous\n"
" .set pop\n" " .set pop\n"
: "+&r"(rt), "=&r"(rs), : "+&r"(rt), "=&r"(rs),
@ -1945,14 +1945,14 @@ fpu_emul:
" j 9b\n" " j 9b\n"
" .previous\n" " .previous\n"
" .section __ex_table,\"a\"\n" " .section __ex_table,\"a\"\n"
STR(PTR) " 1b,8b\n" STR(PTR_WD) " 1b,8b\n"
STR(PTR) " 2b,8b\n" STR(PTR_WD) " 2b,8b\n"
STR(PTR) " 3b,8b\n" STR(PTR_WD) " 3b,8b\n"
STR(PTR) " 4b,8b\n" STR(PTR_WD) " 4b,8b\n"
STR(PTR) " 5b,8b\n" STR(PTR_WD) " 5b,8b\n"
STR(PTR) " 6b,8b\n" STR(PTR_WD) " 6b,8b\n"
STR(PTR) " 7b,8b\n" STR(PTR_WD) " 7b,8b\n"
STR(PTR) " 0b,8b\n" STR(PTR_WD) " 0b,8b\n"
" .previous\n" " .previous\n"
" .set pop\n" " .set pop\n"
: "+&r"(rt), "=&r"(rs), : "+&r"(rt), "=&r"(rs),
@ -2007,7 +2007,7 @@ fpu_emul:
"j 2b\n" "j 2b\n"
".previous\n" ".previous\n"
".section __ex_table,\"a\"\n" ".section __ex_table,\"a\"\n"
STR(PTR) " 1b,3b\n" STR(PTR_WD) " 1b,3b\n"
".previous\n" ".previous\n"
: "=&r"(res), "+&r"(err) : "=&r"(res), "+&r"(err)
: "r"(vaddr), "i"(SIGSEGV) : "r"(vaddr), "i"(SIGSEGV)
@ -2065,7 +2065,7 @@ fpu_emul:
"j 2b\n" "j 2b\n"
".previous\n" ".previous\n"
".section __ex_table,\"a\"\n" ".section __ex_table,\"a\"\n"
STR(PTR) " 1b,3b\n" STR(PTR_WD) " 1b,3b\n"
".previous\n" ".previous\n"
: "+&r"(res), "+&r"(err) : "+&r"(res), "+&r"(err)
: "r"(vaddr), "i"(SIGSEGV)); : "r"(vaddr), "i"(SIGSEGV));
@ -2126,7 +2126,7 @@ fpu_emul:
"j 2b\n" "j 2b\n"
".previous\n" ".previous\n"
".section __ex_table,\"a\"\n" ".section __ex_table,\"a\"\n"
STR(PTR) " 1b,3b\n" STR(PTR_WD) " 1b,3b\n"
".previous\n" ".previous\n"
: "=&r"(res), "+&r"(err) : "=&r"(res), "+&r"(err)
: "r"(vaddr), "i"(SIGSEGV) : "r"(vaddr), "i"(SIGSEGV)
@ -2189,7 +2189,7 @@ fpu_emul:
"j 2b\n" "j 2b\n"
".previous\n" ".previous\n"
".section __ex_table,\"a\"\n" ".section __ex_table,\"a\"\n"
STR(PTR) " 1b,3b\n" STR(PTR_WD) " 1b,3b\n"
".previous\n" ".previous\n"
: "+&r"(res), "+&r"(err) : "+&r"(res), "+&r"(err)
: "r"(vaddr), "i"(SIGSEGV)); : "r"(vaddr), "i"(SIGSEGV));

View File

@ -23,14 +23,14 @@
#define EX(a,b) \ #define EX(a,b) \
9: a,##b; \ 9: a,##b; \
.section __ex_table,"a"; \ .section __ex_table,"a"; \
PTR 9b,fault; \ PTR_WD 9b,fault; \
.previous .previous
#define EX2(a,b) \ #define EX2(a,b) \
9: a,##b; \ 9: a,##b; \
.section __ex_table,"a"; \ .section __ex_table,"a"; \
PTR 9b,fault; \ PTR_WD 9b,fault; \
PTR 9b+4,fault; \ PTR_WD 9b+4,fault; \
.previous .previous
.set mips1 .set mips1

View File

@ -31,7 +31,7 @@
.ex\@: \insn \reg, \src .ex\@: \insn \reg, \src
.set pop .set pop
.section __ex_table,"a" .section __ex_table,"a"
PTR .ex\@, fault PTR_WD .ex\@, fault
.previous .previous
.endm .endm

View File

@ -147,10 +147,10 @@ LEAF(kexec_smp_wait)
kexec_args: kexec_args:
EXPORT(kexec_args) EXPORT(kexec_args)
arg0: PTR 0x0 arg0: PTR_WD 0x0
arg1: PTR 0x0 arg1: PTR_WD 0x0
arg2: PTR 0x0 arg2: PTR_WD 0x0
arg3: PTR 0x0 arg3: PTR_WD 0x0
.size kexec_args,PTRSIZE*4 .size kexec_args,PTRSIZE*4
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@ -161,10 +161,10 @@ arg3: PTR 0x0
*/ */
secondary_kexec_args: secondary_kexec_args:
EXPORT(secondary_kexec_args) EXPORT(secondary_kexec_args)
s_arg0: PTR 0x0 s_arg0: PTR_WD 0x0
s_arg1: PTR 0x0 s_arg1: PTR_WD 0x0
s_arg2: PTR 0x0 s_arg2: PTR_WD 0x0
s_arg3: PTR 0x0 s_arg3: PTR_WD 0x0
.size secondary_kexec_args,PTRSIZE*4 .size secondary_kexec_args,PTRSIZE*4
kexec_flag: kexec_flag:
LONG 0x1 LONG 0x1
@ -173,17 +173,17 @@ kexec_flag:
kexec_start_address: kexec_start_address:
EXPORT(kexec_start_address) EXPORT(kexec_start_address)
PTR 0x0 PTR_WD 0x0
.size kexec_start_address, PTRSIZE .size kexec_start_address, PTRSIZE
kexec_indirection_page: kexec_indirection_page:
EXPORT(kexec_indirection_page) EXPORT(kexec_indirection_page)
PTR 0 PTR_WD 0
.size kexec_indirection_page, PTRSIZE .size kexec_indirection_page, PTRSIZE
relocate_new_kernel_end: relocate_new_kernel_end:
relocate_new_kernel_size: relocate_new_kernel_size:
EXPORT(relocate_new_kernel_size) EXPORT(relocate_new_kernel_size)
PTR relocate_new_kernel_end - relocate_new_kernel PTR_WD relocate_new_kernel_end - relocate_new_kernel
.size relocate_new_kernel_size, PTRSIZE .size relocate_new_kernel_size, PTRSIZE

View File

@ -72,10 +72,10 @@ loads_done:
.set pop .set pop
.section __ex_table,"a" .section __ex_table,"a"
PTR load_a4, bad_stack_a4 PTR_WD load_a4, bad_stack_a4
PTR load_a5, bad_stack_a5 PTR_WD load_a5, bad_stack_a5
PTR load_a6, bad_stack_a6 PTR_WD load_a6, bad_stack_a6
PTR load_a7, bad_stack_a7 PTR_WD load_a7, bad_stack_a7
.previous .previous
lw t0, TI_FLAGS($28) # syscall tracing enabled? lw t0, TI_FLAGS($28) # syscall tracing enabled?
@ -216,7 +216,7 @@ einval: li v0, -ENOSYS
#endif /* CONFIG_MIPS_MT_FPAFF */ #endif /* CONFIG_MIPS_MT_FPAFF */
#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
#define __SYSCALL(nr, entry) PTR entry #define __SYSCALL(nr, entry) PTR_WD entry
.align 2 .align 2
.type sys_call_table, @object .type sys_call_table, @object
EXPORT(sys_call_table) EXPORT(sys_call_table)

View File

@ -101,7 +101,7 @@ not_n32_scall:
END(handle_sysn32) END(handle_sysn32)
#define __SYSCALL(nr, entry) PTR entry #define __SYSCALL(nr, entry) PTR_WD entry
.type sysn32_call_table, @object .type sysn32_call_table, @object
EXPORT(sysn32_call_table) EXPORT(sysn32_call_table)
#include <asm/syscall_table_n32.h> #include <asm/syscall_table_n32.h>

View File

@ -109,7 +109,7 @@ illegal_syscall:
j n64_syscall_exit j n64_syscall_exit
END(handle_sys64) END(handle_sys64)
#define __SYSCALL(nr, entry) PTR entry #define __SYSCALL(nr, entry) PTR_WD entry
.align 3 .align 3
.type sys_call_table, @object .type sys_call_table, @object
EXPORT(sys_call_table) EXPORT(sys_call_table)

View File

@ -73,10 +73,10 @@ load_a7: lw a7, 28(t0) # argument #8 from usp
loads_done: loads_done:
.section __ex_table,"a" .section __ex_table,"a"
PTR load_a4, bad_stack_a4 PTR_WD load_a4, bad_stack_a4
PTR load_a5, bad_stack_a5 PTR_WD load_a5, bad_stack_a5
PTR load_a6, bad_stack_a6 PTR_WD load_a6, bad_stack_a6
PTR load_a7, bad_stack_a7 PTR_WD load_a7, bad_stack_a7
.previous .previous
li t1, _TIF_WORK_SYSCALL_ENTRY li t1, _TIF_WORK_SYSCALL_ENTRY
@ -214,7 +214,7 @@ einval: li v0, -ENOSYS
END(sys32_syscall) END(sys32_syscall)
#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat)
#define __SYSCALL(nr, entry) PTR entry #define __SYSCALL(nr, entry) PTR_WD entry
.align 3 .align 3
.type sys32_call_table,@object .type sys32_call_table,@object
EXPORT(sys32_call_table) EXPORT(sys32_call_table)

View File

@ -122,8 +122,8 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
" j 3b \n" " j 3b \n"
" .previous \n" " .previous \n"
" .section __ex_table,\"a\" \n" " .section __ex_table,\"a\" \n"
" "STR(PTR)" 1b, 4b \n" " "STR(PTR_WD)" 1b, 4b \n"
" "STR(PTR)" 2b, 4b \n" " "STR(PTR_WD)" 2b, 4b \n"
" .previous \n" " .previous \n"
" .set pop \n" " .set pop \n"
: [old] "=&r" (old), : [old] "=&r" (old),
@ -152,8 +152,8 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
" j 3b \n" " j 3b \n"
" .previous \n" " .previous \n"
" .section __ex_table,\"a\" \n" " .section __ex_table,\"a\" \n"
" "STR(PTR)" 1b, 5b \n" " "STR(PTR_WD)" 1b, 5b \n"
" "STR(PTR)" 2b, 5b \n" " "STR(PTR_WD)" 2b, 5b \n"
" .previous \n" " .previous \n"
" .set pop \n" " .set pop \n"
: [old] "=&r" (old), : [old] "=&r" (old),

View File

@ -458,8 +458,8 @@ void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu)
/** /**
* _kvm_vz_save_htimer() - Switch to software emulation of guest timer. * _kvm_vz_save_htimer() - Switch to software emulation of guest timer.
* @vcpu: Virtual CPU. * @vcpu: Virtual CPU.
* @compare: Pointer to write compare value to. * @out_compare: Pointer to write compare value to.
* @cause: Pointer to write cause value to. * @out_cause: Pointer to write cause value to.
* *
* Save VZ guest timer state and switch to software emulation of guest CP0 * Save VZ guest timer state and switch to software emulation of guest CP0
* timer. The hard timer must already be in use, so preemption should be * timer. The hard timer must already be in use, so preemption should be
@ -1541,11 +1541,14 @@ static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
} }
/** /**
* kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor. * kvm_trap_vz_handle_cop_unusable() - Guest used unusable coprocessor.
* @vcpu: Virtual CPU context. * @vcpu: Virtual CPU context.
* *
* Handle when the guest attempts to use a coprocessor which hasn't been allowed * Handle when the guest attempts to use a coprocessor which hasn't been allowed
* by the root context. * by the root context.
*
* Return: value indicating whether to resume the host or the guest
* (RESUME_HOST or RESUME_GUEST)
*/ */
static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu) static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
{ {
@ -1592,6 +1595,9 @@ static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
* *
* Handle when the guest attempts to use MSA when it is disabled in the root * Handle when the guest attempts to use MSA when it is disabled in the root
* context. * context.
*
* Return: value indicating whether to resume the host or the guest
* (RESUME_HOST or RESUME_GUEST)
*/ */
static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu) static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
{ {

View File

@ -347,7 +347,7 @@ EXPORT_SYMBOL(csum_partial)
.if \mode == LEGACY_MODE; \ .if \mode == LEGACY_MODE; \
9: insn reg, addr; \ 9: insn reg, addr; \
.section __ex_table,"a"; \ .section __ex_table,"a"; \
PTR 9b, .L_exc; \ PTR_WD 9b, .L_exc; \
.previous; \ .previous; \
/* This is enabled in EVA mode */ \ /* This is enabled in EVA mode */ \
.else; \ .else; \
@ -356,7 +356,7 @@ EXPORT_SYMBOL(csum_partial)
((\to == USEROP) && (type == ST_INSN)); \ ((\to == USEROP) && (type == ST_INSN)); \
9: __BUILD_EVA_INSN(insn##e, reg, addr); \ 9: __BUILD_EVA_INSN(insn##e, reg, addr); \
.section __ex_table,"a"; \ .section __ex_table,"a"; \
PTR 9b, .L_exc; \ PTR_WD 9b, .L_exc; \
.previous; \ .previous; \
.else; \ .else; \
/* EVA without exception */ \ /* EVA without exception */ \

View File

@ -116,7 +116,7 @@
.if \mode == LEGACY_MODE; \ .if \mode == LEGACY_MODE; \
9: insn reg, addr; \ 9: insn reg, addr; \
.section __ex_table,"a"; \ .section __ex_table,"a"; \
PTR 9b, handler; \ PTR_WD 9b, handler; \
.previous; \ .previous; \
/* This is assembled in EVA mode */ \ /* This is assembled in EVA mode */ \
.else; \ .else; \
@ -125,7 +125,7 @@
((\to == USEROP) && (type == ST_INSN)); \ ((\to == USEROP) && (type == ST_INSN)); \
9: __BUILD_EVA_INSN(insn##e, reg, addr); \ 9: __BUILD_EVA_INSN(insn##e, reg, addr); \
.section __ex_table,"a"; \ .section __ex_table,"a"; \
PTR 9b, handler; \ PTR_WD 9b, handler; \
.previous; \ .previous; \
.else; \ .else; \
/* \ /* \

View File

@ -52,7 +52,7 @@
9: ___BUILD_EVA_INSN(insn, reg, addr); \ 9: ___BUILD_EVA_INSN(insn, reg, addr); \
.endif; \ .endif; \
.section __ex_table,"a"; \ .section __ex_table,"a"; \
PTR 9b, handler; \ PTR_WD 9b, handler; \
.previous .previous
.macro f_fill64 dst, offset, val, fixup, mode .macro f_fill64 dst, offset, val, fixup, mode

View File

@ -15,7 +15,7 @@
#define EX(insn,reg,addr,handler) \ #define EX(insn,reg,addr,handler) \
9: insn reg, addr; \ 9: insn reg, addr; \
.section __ex_table,"a"; \ .section __ex_table,"a"; \
PTR 9b, handler; \ PTR_WD 9b, handler; \
.previous .previous
/* /*
@ -59,7 +59,7 @@ LEAF(__strncpy_from_user_asm)
jr ra jr ra
.section __ex_table,"a" .section __ex_table,"a"
PTR 1b, .Lfault PTR_WD 1b, .Lfault
.previous .previous
EXPORT_SYMBOL(__strncpy_from_user_asm) EXPORT_SYMBOL(__strncpy_from_user_asm)

View File

@ -14,7 +14,7 @@
#define EX(insn,reg,addr,handler) \ #define EX(insn,reg,addr,handler) \
9: insn reg, addr; \ 9: insn reg, addr; \
.section __ex_table,"a"; \ .section __ex_table,"a"; \
PTR 9b, handler; \ PTR_WD 9b, handler; \
.previous .previous
/* /*

View File

@ -3,7 +3,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <loongson.h> #include <loongson.h>
static void pci_fixup_radeon(struct pci_dev *pdev) static void pci_fixup_video(struct pci_dev *pdev)
{ {
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
@ -22,8 +22,7 @@ static void pci_fixup_radeon(struct pci_dev *pdev)
res->flags = IORESOURCE_MEM | IORESOURCE_ROM_SHADOW | res->flags = IORESOURCE_MEM | IORESOURCE_ROM_SHADOW |
IORESOURCE_PCI_FIXED; IORESOURCE_PCI_FIXED;
dev_info(&pdev->dev, "BAR %d: assigned %pR for Radeon ROM\n", dev_info(&pdev->dev, "Video device with shadowed ROM at %pR\n", res);
PCI_ROM_RESOURCE, res);
} }
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, 0x9615, DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, 0x9615,
PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_radeon); PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video);

View File

@ -178,6 +178,7 @@ static inline bool pte_user(pte_t pte)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
void unmap_kernel_page(unsigned long va);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */

View File

@ -1082,6 +1082,8 @@ static inline int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t p
return hash__map_kernel_page(ea, pa, prot); return hash__map_kernel_page(ea, pa, prot);
} }
void unmap_kernel_page(unsigned long va);
static inline int __meminit vmemmap_create_mapping(unsigned long start, static inline int __meminit vmemmap_create_mapping(unsigned long start,
unsigned long page_size, unsigned long page_size,
unsigned long phys) unsigned long phys)

View File

@ -111,8 +111,10 @@ static inline void __set_fixmap(enum fixed_addresses idx,
BUILD_BUG_ON(idx >= __end_of_fixed_addresses); BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
else if (WARN_ON(idx >= __end_of_fixed_addresses)) else if (WARN_ON(idx >= __end_of_fixed_addresses))
return; return;
if (pgprot_val(flags))
map_kernel_page(__fix_to_virt(idx), phys, flags); map_kernel_page(__fix_to_virt(idx), phys, flags);
else
unmap_kernel_page(__fix_to_virt(idx));
} }
#define __early_set_fixmap __set_fixmap #define __early_set_fixmap __set_fixmap

View File

@ -39,7 +39,6 @@ struct kvm_nested_guest {
pgd_t *shadow_pgtable; /* our page table for this guest */ pgd_t *shadow_pgtable; /* our page table for this guest */
u64 l1_gr_to_hr; /* L1's addr of part'n-scoped table */ u64 l1_gr_to_hr; /* L1's addr of part'n-scoped table */
u64 process_table; /* process table entry for this guest */ u64 process_table; /* process table entry for this guest */
u64 hfscr; /* HFSCR that the L1 requested for this nested guest */
long refcnt; /* number of pointers to this struct */ long refcnt; /* number of pointers to this struct */
struct mutex tlb_lock; /* serialize page faults and tlbies */ struct mutex tlb_lock; /* serialize page faults and tlbies */
struct kvm_nested_guest *next; struct kvm_nested_guest *next;

View File

@ -818,6 +818,7 @@ struct kvm_vcpu_arch {
/* For support of nested guests */ /* For support of nested guests */
struct kvm_nested_guest *nested; struct kvm_nested_guest *nested;
u64 nested_hfscr; /* HFSCR that the L1 requested for the nested guest */
u32 nested_vcpu_id; u32 nested_vcpu_id;
gpa_t nested_io_gpr; gpa_t nested_io_gpr;
#endif #endif

View File

@ -64,6 +64,7 @@ extern int icache_44x_need_flush;
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
void unmap_kernel_page(unsigned long va);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */

View File

@ -308,6 +308,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
#define __swp_entry_to_pte(x) __pte((x).val) #define __swp_entry_to_pte(x) __pte((x).val)
int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot); int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
void unmap_kernel_page(unsigned long va);
extern int __meminit vmemmap_create_mapping(unsigned long start, extern int __meminit vmemmap_create_mapping(unsigned long start,
unsigned long page_size, unsigned long page_size,
unsigned long phys); unsigned long phys);

View File

@ -649,8 +649,9 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
__this_cpu_inc(irq_stat.timer_irqs_event); __this_cpu_inc(irq_stat.timer_irqs_event);
} else { } else {
now = *next_tb - now; now = *next_tb - now;
if (now <= decrementer_max) if (now > decrementer_max)
set_dec_or_work(now); now = decrementer_max;
set_dec_or_work(now);
__this_cpu_inc(irq_stat.timer_irqs_others); __this_cpu_inc(irq_stat.timer_irqs_others);
} }

View File

@ -1816,7 +1816,6 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu) static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
{ {
struct kvm_nested_guest *nested = vcpu->arch.nested;
int r; int r;
int srcu_idx; int srcu_idx;
@ -1922,7 +1921,7 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
* it into a HEAI. * it into a HEAI.
*/ */
if (!(vcpu->arch.hfscr_permitted & (1UL << cause)) || if (!(vcpu->arch.hfscr_permitted & (1UL << cause)) ||
(nested->hfscr & (1UL << cause))) { (vcpu->arch.nested_hfscr & (1UL << cause))) {
vcpu->arch.trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST; vcpu->arch.trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST;
/* /*

View File

@ -363,7 +363,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
/* set L1 state to L2 state */ /* set L1 state to L2 state */
vcpu->arch.nested = l2; vcpu->arch.nested = l2;
vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token; vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token;
l2->hfscr = l2_hv.hfscr; vcpu->arch.nested_hfscr = l2_hv.hfscr;
vcpu->arch.regs = l2_regs; vcpu->arch.regs = l2_regs;
/* Guest must always run with ME enabled, HV disabled. */ /* Guest must always run with ME enabled, HV disabled. */

View File

@ -206,6 +206,15 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
__set_pte_at(mm, addr, ptep, pte, 0); __set_pte_at(mm, addr, ptep, pte, 0);
} }
void unmap_kernel_page(unsigned long va)
{
pmd_t *pmdp = pmd_off_k(va);
pte_t *ptep = pte_offset_kernel(pmdp, va);
pte_clear(&init_mm, va, ptep);
flush_tlb_kernel_range(va, va + PAGE_SIZE);
}
/* /*
* This is called when relaxing access to a PTE. It's also called in the page * This is called when relaxing access to a PTE. It's also called in the page
* fault path when we don't hit any of the major fault cases, ie, a minor * fault path when we don't hit any of the major fault cases, ie, a minor

View File

@ -1355,9 +1355,20 @@ static void power_pmu_disable(struct pmu *pmu)
* Otherwise provide a warning if there is PMI pending, but * Otherwise provide a warning if there is PMI pending, but
* no counter is found overflown. * no counter is found overflown.
*/ */
if (any_pmc_overflown(cpuhw)) if (any_pmc_overflown(cpuhw)) {
clear_pmi_irq_pending(); /*
else * Since power_pmu_disable runs under local_irq_save, it
* could happen that code hits a PMC overflow without PMI
* pending in paca. Hence only clear PMI pending if it was
* set.
*
* If a PMI is pending, then MSR[EE] must be disabled (because
* the masked PMI handler disabling EE). So it is safe to
* call clear_pmi_irq_pending().
*/
if (pmi_irq_pending())
clear_pmi_irq_pending();
} else
WARN_ON(pmi_irq_pending()); WARN_ON(pmi_irq_pending());
val = mmcra = cpuhw->mmcr.mmcra; val = mmcra = cpuhw->mmcr.mmcra;

View File

@ -945,6 +945,9 @@ config S390_GUEST
endmenu endmenu
config S390_MODULES_SANITY_TEST_HELPERS
def_bool n
menu "Selftests" menu "Selftests"
config S390_UNWIND_SELFTEST config S390_UNWIND_SELFTEST
@ -971,4 +974,16 @@ config S390_KPROBES_SANITY_TEST
Say N if you are unsure. Say N if you are unsure.
config S390_MODULES_SANITY_TEST
def_tristate n
depends on KUNIT
default KUNIT_ALL_TESTS
prompt "Enable s390 specific modules tests"
select S390_MODULES_SANITY_TEST_HELPERS
help
This option enables an s390 specific modules test. This option is
not useful for distributions or general kernels, but only for
kernel developers working on architecture code.
Say N if you are unsure.
endmenu endmenu

View File

@ -63,6 +63,7 @@ CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m CONFIG_KVM=m
CONFIG_S390_UNWIND_SELFTEST=m CONFIG_S390_UNWIND_SELFTEST=m
CONFIG_S390_KPROBES_SANITY_TEST=m CONFIG_S390_KPROBES_SANITY_TEST=m
CONFIG_S390_MODULES_SANITY_TEST=m
CONFIG_KPROBES=y CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y CONFIG_JUMP_LABEL=y
CONFIG_STATIC_KEYS_SELFTEST=y CONFIG_STATIC_KEYS_SELFTEST=y
@ -96,7 +97,6 @@ CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_FRONTSWAP=y
CONFIG_CMA_DEBUG=y CONFIG_CMA_DEBUG=y
CONFIG_CMA_DEBUGFS=y CONFIG_CMA_DEBUGFS=y
CONFIG_CMA_SYSFS=y CONFIG_CMA_SYSFS=y
@ -109,6 +109,7 @@ CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
CONFIG_IDLE_PAGE_TRACKING=y CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_PERCPU_STATS=y CONFIG_PERCPU_STATS=y
CONFIG_GUP_TEST=y CONFIG_GUP_TEST=y
CONFIG_ANON_VMA_NAME=y
CONFIG_NET=y CONFIG_NET=y
CONFIG_PACKET=y CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m CONFIG_PACKET_DIAG=m
@ -116,7 +117,6 @@ CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m CONFIG_NET_KEY=m
CONFIG_NET_SWITCHDEV=y
CONFIG_SMC=m CONFIG_SMC=m
CONFIG_SMC_DIAG=m CONFIG_SMC_DIAG=m
CONFIG_INET=y CONFIG_INET=y
@ -185,7 +185,6 @@ CONFIG_NF_CT_NETLINK_TIMEOUT=m
CONFIG_NF_TABLES=m CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_INET=y CONFIG_NF_TABLES_INET=y
CONFIG_NFT_CT=m CONFIG_NFT_CT=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m CONFIG_NFT_NAT=m
@ -391,6 +390,7 @@ CONFIG_OPENVSWITCH=m
CONFIG_VSOCKETS=m CONFIG_VSOCKETS=m
CONFIG_VIRTIO_VSOCKETS=m CONFIG_VIRTIO_VSOCKETS=m
CONFIG_NETLINK_DIAG=m CONFIG_NETLINK_DIAG=m
CONFIG_NET_SWITCHDEV=y
CONFIG_CGROUP_NET_PRIO=y CONFIG_CGROUP_NET_PRIO=y
CONFIG_NET_PKTGEN=m CONFIG_NET_PKTGEN=m
CONFIG_PCI=y CONFIG_PCI=y
@ -400,6 +400,7 @@ CONFIG_PCI_IOV=y
CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y CONFIG_HOTPLUG_PCI_S390=y
CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_SAFE=y
CONFIG_CONNECTOR=y CONFIG_CONNECTOR=y
CONFIG_ZRAM=y CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_LOOP=m
@ -501,6 +502,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set # CONFIG_NET_VENDOR_EMULEX is not set
# CONFIG_NET_VENDOR_ENGLEDER is not set
# CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_GOOGLE is not set # CONFIG_NET_VENDOR_GOOGLE is not set
# CONFIG_NET_VENDOR_HUAWEI is not set # CONFIG_NET_VENDOR_HUAWEI is not set
@ -511,7 +513,6 @@ CONFIG_NLMON=m
CONFIG_MLX4_EN=m CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y CONFIG_MLX5_CORE_EN=y
CONFIG_MLX5_ESWITCH=y
# CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MICROCHIP is not set # CONFIG_NET_VENDOR_MICROCHIP is not set
# CONFIG_NET_VENDOR_MICROSEMI is not set # CONFIG_NET_VENDOR_MICROSEMI is not set
@ -542,6 +543,7 @@ CONFIG_MLX5_ESWITCH=y
# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_TEHUTI is not set # CONFIG_NET_VENDOR_TEHUTI is not set
# CONFIG_NET_VENDOR_TI is not set # CONFIG_NET_VENDOR_TI is not set
# CONFIG_NET_VENDOR_VERTEXCOM is not set
# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_NET_VENDOR_WIZNET is not set
# CONFIG_NET_VENDOR_XILINX is not set # CONFIG_NET_VENDOR_XILINX is not set
@ -592,6 +594,7 @@ CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y CONFIG_VIRTIO_INPUT=y
CONFIG_VHOST_NET=m CONFIG_VHOST_NET=m
CONFIG_VHOST_VSOCK=m CONFIG_VHOST_VSOCK=m
# CONFIG_SURFACE_PLATFORMS is not set
CONFIG_S390_CCW_IOMMU=y CONFIG_S390_CCW_IOMMU=y
CONFIG_S390_AP_IOMMU=y CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y CONFIG_EXT4_FS=y
@ -756,9 +759,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_STATS=y CONFIG_CRYPTO_STATS=y
CONFIG_CRYPTO_LIB_BLAKE2S=m
CONFIG_CRYPTO_LIB_CURVE25519=m
CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
CONFIG_ZCRYPT=m CONFIG_ZCRYPT=m
CONFIG_PKEY=m CONFIG_PKEY=m
CONFIG_CRYPTO_PAES_S390=m CONFIG_CRYPTO_PAES_S390=m
@ -774,6 +774,8 @@ CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRYPTO_DEV_VIRTIO=m CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_CORDIC=m CONFIG_CORDIC=m
CONFIG_CRYPTO_LIB_CURVE25519=m
CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
CONFIG_CRC32_SELFTEST=y CONFIG_CRC32_SELFTEST=y
CONFIG_CRC4=m CONFIG_CRC4=m
CONFIG_CRC7=m CONFIG_CRC7=m
@ -807,7 +809,6 @@ CONFIG_SLUB_DEBUG_ON=y
CONFIG_SLUB_STATS=y CONFIG_SLUB_STATS=y
CONFIG_DEBUG_STACK_USAGE=y CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_VM=y CONFIG_DEBUG_VM=y
CONFIG_DEBUG_VM_VMACACHE=y
CONFIG_DEBUG_VM_PGFLAGS=y CONFIG_DEBUG_VM_PGFLAGS=y
CONFIG_DEBUG_MEMORY_INIT=y CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
@ -819,12 +820,11 @@ CONFIG_PANIC_ON_OOPS=y
CONFIG_DETECT_HUNG_TASK=y CONFIG_DETECT_HUNG_TASK=y
CONFIG_WQ_WATCHDOG=y CONFIG_WQ_WATCHDOG=y
CONFIG_TEST_LOCKUP=m CONFIG_TEST_LOCKUP=m
CONFIG_DEBUG_TIMEKEEPING=y
CONFIG_PROVE_LOCKING=y CONFIG_PROVE_LOCKING=y
CONFIG_LOCK_STAT=y CONFIG_LOCK_STAT=y
CONFIG_DEBUG_LOCKDEP=y
CONFIG_DEBUG_ATOMIC_SLEEP=y CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_DEBUG_LOCKING_API_SELFTESTS=y CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
CONFIG_DEBUG_IRQFLAGS=y
CONFIG_DEBUG_SG=y CONFIG_DEBUG_SG=y
CONFIG_DEBUG_NOTIFIERS=y CONFIG_DEBUG_NOTIFIERS=y
CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_BUG_ON_DATA_CORRUPTION=y

View File

@ -61,6 +61,7 @@ CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m CONFIG_KVM=m
CONFIG_S390_UNWIND_SELFTEST=m CONFIG_S390_UNWIND_SELFTEST=m
CONFIG_S390_KPROBES_SANITY_TEST=m CONFIG_S390_KPROBES_SANITY_TEST=m
CONFIG_S390_MODULES_SANITY_TEST=m
CONFIG_KPROBES=y CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y CONFIG_JUMP_LABEL=y
# CONFIG_GCC_PLUGINS is not set # CONFIG_GCC_PLUGINS is not set
@ -91,7 +92,6 @@ CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_FRONTSWAP=y
CONFIG_CMA_SYSFS=y CONFIG_CMA_SYSFS=y
CONFIG_CMA_AREAS=7 CONFIG_CMA_AREAS=7
CONFIG_MEM_SOFT_DIRTY=y CONFIG_MEM_SOFT_DIRTY=y
@ -101,6 +101,7 @@ CONFIG_ZSMALLOC_STAT=y
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
CONFIG_IDLE_PAGE_TRACKING=y CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_PERCPU_STATS=y CONFIG_PERCPU_STATS=y
CONFIG_ANON_VMA_NAME=y
CONFIG_NET=y CONFIG_NET=y
CONFIG_PACKET=y CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m CONFIG_PACKET_DIAG=m
@ -108,7 +109,6 @@ CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m CONFIG_NET_KEY=m
CONFIG_NET_SWITCHDEV=y
CONFIG_SMC=m CONFIG_SMC=m
CONFIG_SMC_DIAG=m CONFIG_SMC_DIAG=m
CONFIG_INET=y CONFIG_INET=y
@ -177,7 +177,6 @@ CONFIG_NF_CT_NETLINK_TIMEOUT=m
CONFIG_NF_TABLES=m CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_INET=y CONFIG_NF_TABLES_INET=y
CONFIG_NFT_CT=m CONFIG_NFT_CT=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m CONFIG_NFT_NAT=m
@ -382,6 +381,7 @@ CONFIG_OPENVSWITCH=m
CONFIG_VSOCKETS=m CONFIG_VSOCKETS=m
CONFIG_VIRTIO_VSOCKETS=m CONFIG_VIRTIO_VSOCKETS=m
CONFIG_NETLINK_DIAG=m CONFIG_NETLINK_DIAG=m
CONFIG_NET_SWITCHDEV=y
CONFIG_CGROUP_NET_PRIO=y CONFIG_CGROUP_NET_PRIO=y
CONFIG_NET_PKTGEN=m CONFIG_NET_PKTGEN=m
CONFIG_PCI=y CONFIG_PCI=y
@ -391,6 +391,7 @@ CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y CONFIG_HOTPLUG_PCI_S390=y
CONFIG_UEVENT_HELPER=y CONFIG_UEVENT_HELPER=y
CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_SAFE=y
CONFIG_CONNECTOR=y CONFIG_CONNECTOR=y
CONFIG_ZRAM=y CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_LOOP=m
@ -492,6 +493,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set # CONFIG_NET_VENDOR_EMULEX is not set
# CONFIG_NET_VENDOR_ENGLEDER is not set
# CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_GOOGLE is not set # CONFIG_NET_VENDOR_GOOGLE is not set
# CONFIG_NET_VENDOR_HUAWEI is not set # CONFIG_NET_VENDOR_HUAWEI is not set
@ -502,7 +504,6 @@ CONFIG_NLMON=m
CONFIG_MLX4_EN=m CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y CONFIG_MLX5_CORE_EN=y
CONFIG_MLX5_ESWITCH=y
# CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MICROCHIP is not set # CONFIG_NET_VENDOR_MICROCHIP is not set
# CONFIG_NET_VENDOR_MICROSEMI is not set # CONFIG_NET_VENDOR_MICROSEMI is not set
@ -533,6 +534,7 @@ CONFIG_MLX5_ESWITCH=y
# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_TEHUTI is not set # CONFIG_NET_VENDOR_TEHUTI is not set
# CONFIG_NET_VENDOR_TI is not set # CONFIG_NET_VENDOR_TI is not set
# CONFIG_NET_VENDOR_VERTEXCOM is not set
# CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_NET_VENDOR_WIZNET is not set
# CONFIG_NET_VENDOR_XILINX is not set # CONFIG_NET_VENDOR_XILINX is not set
@ -582,6 +584,7 @@ CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y CONFIG_VIRTIO_INPUT=y
CONFIG_VHOST_NET=m CONFIG_VHOST_NET=m
CONFIG_VHOST_VSOCK=m CONFIG_VHOST_VSOCK=m
# CONFIG_SURFACE_PLATFORMS is not set
CONFIG_S390_CCW_IOMMU=y CONFIG_S390_CCW_IOMMU=y
CONFIG_S390_AP_IOMMU=y CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y CONFIG_EXT4_FS=y
@ -743,9 +746,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_STATS=y CONFIG_CRYPTO_STATS=y
CONFIG_CRYPTO_LIB_BLAKE2S=m
CONFIG_CRYPTO_LIB_CURVE25519=m
CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
CONFIG_ZCRYPT=m CONFIG_ZCRYPT=m
CONFIG_PKEY=m CONFIG_PKEY=m
CONFIG_CRYPTO_PAES_S390=m CONFIG_CRYPTO_PAES_S390=m
@ -762,6 +762,8 @@ CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRYPTO_DEV_VIRTIO=m CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_CORDIC=m CONFIG_CORDIC=m
CONFIG_PRIME_NUMBERS=m CONFIG_PRIME_NUMBERS=m
CONFIG_CRYPTO_LIB_CURVE25519=m
CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
CONFIG_CRC4=m CONFIG_CRC4=m
CONFIG_CRC7=m CONFIG_CRC7=m
CONFIG_CRC8=m CONFIG_CRC8=m

View File

@ -1,6 +1,7 @@
# CONFIG_SWAP is not set # CONFIG_SWAP is not set
CONFIG_NO_HZ_IDLE=y CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y CONFIG_HIGH_RES_TIMERS=y
CONFIG_BPF_SYSCALL=y
# CONFIG_CPU_ISOLATION is not set # CONFIG_CPU_ISOLATION is not set
# CONFIG_UTS_NS is not set # CONFIG_UTS_NS is not set
# CONFIG_TIME_NS is not set # CONFIG_TIME_NS is not set
@ -34,6 +35,7 @@ CONFIG_NET=y
# CONFIG_PCPU_DEV_REFCNT is not set # CONFIG_PCPU_DEV_REFCNT is not set
# CONFIG_ETHTOOL_NETLINK is not set # CONFIG_ETHTOOL_NETLINK is not set
CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_SAFE=y
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
# CONFIG_DCSSBLK is not set # CONFIG_DCSSBLK is not set
# CONFIG_DASD is not set # CONFIG_DASD is not set
@ -58,6 +60,7 @@ CONFIG_ZFCP=y
# CONFIG_HID is not set # CONFIG_HID is not set
# CONFIG_VIRTIO_MENU is not set # CONFIG_VIRTIO_MENU is not set
# CONFIG_VHOST_MENU is not set # CONFIG_VHOST_MENU is not set
# CONFIG_SURFACE_PLATFORMS is not set
# CONFIG_IOMMU_SUPPORT is not set # CONFIG_IOMMU_SUPPORT is not set
# CONFIG_DNOTIFY is not set # CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY_USER is not set # CONFIG_INOTIFY_USER is not set

View File

@ -20,6 +20,7 @@
static char local_guest[] = " "; static char local_guest[] = " ";
static char all_guests[] = "* "; static char all_guests[] = "* ";
static char *all_groups = all_guests;
static char *guest_query; static char *guest_query;
struct diag2fc_data { struct diag2fc_data {
@ -62,10 +63,11 @@ static int diag2fc(int size, char* query, void *addr)
memcpy(parm_list.userid, query, NAME_LEN); memcpy(parm_list.userid, query, NAME_LEN);
ASCEBC(parm_list.userid, NAME_LEN); ASCEBC(parm_list.userid, NAME_LEN);
parm_list.addr = (unsigned long) addr ; memcpy(parm_list.aci_grp, all_groups, NAME_LEN);
ASCEBC(parm_list.aci_grp, NAME_LEN);
parm_list.addr = (unsigned long)addr;
parm_list.size = size; parm_list.size = size;
parm_list.fmt = 0x02; parm_list.fmt = 0x02;
memset(parm_list.aci_grp, 0x40, NAME_LEN);
rc = -1; rc = -1;
diag_stat_inc(DIAG_STAT_X2FC); diag_stat_inc(DIAG_STAT_X2FC);

View File

@ -47,8 +47,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n);
int __put_user_bad(void) __attribute__((noreturn)); int __put_user_bad(void) __attribute__((noreturn));
int __get_user_bad(void) __attribute__((noreturn)); int __get_user_bad(void) __attribute__((noreturn));
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
union oac { union oac {
unsigned int val; unsigned int val;
struct { struct {
@ -71,6 +69,8 @@ union oac {
}; };
}; };
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
#define __put_get_user_asm(to, from, size, oac_spec) \ #define __put_get_user_asm(to, from, size, oac_spec) \
({ \ ({ \
int __rc; \ int __rc; \

View File

@ -33,7 +33,7 @@
#define DEBUGP(fmt , ...) #define DEBUGP(fmt , ...)
#endif #endif
#define PLT_ENTRY_SIZE 20 #define PLT_ENTRY_SIZE 22
void *module_alloc(unsigned long size) void *module_alloc(unsigned long size)
{ {
@ -341,27 +341,26 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */ case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */
case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
if (info->plt_initialized == 0) { if (info->plt_initialized == 0) {
unsigned int insn[5]; unsigned char insn[PLT_ENTRY_SIZE];
unsigned int *ip = me->core_layout.base + char *plt_base;
me->arch.plt_offset + char *ip;
info->plt_offset;
insn[0] = 0x0d10e310; /* basr 1,0 */ plt_base = me->core_layout.base + me->arch.plt_offset;
insn[1] = 0x100a0004; /* lg 1,10(1) */ ip = plt_base + info->plt_offset;
*(int *)insn = 0x0d10e310; /* basr 1,0 */
*(int *)&insn[4] = 0x100c0004; /* lg 1,12(1) */
if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) { if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
unsigned int *ij; char *jump_r1;
ij = me->core_layout.base +
me->arch.plt_offset + jump_r1 = plt_base + me->arch.plt_size -
me->arch.plt_size - PLT_ENTRY_SIZE; PLT_ENTRY_SIZE;
insn[2] = 0xa7f40000 + /* j __jump_r1 */ /* brcl 0xf,__jump_r1 */
(unsigned int)(u16) *(short *)&insn[8] = 0xc0f4;
(((unsigned long) ij - 8 - *(int *)&insn[10] = (jump_r1 - (ip + 8)) / 2;
(unsigned long) ip) / 2);
} else { } else {
insn[2] = 0x07f10000; /* br %r1 */ *(int *)&insn[8] = 0x07f10000; /* br %r1 */
} }
insn[3] = (unsigned int) (val >> 32); *(long *)&insn[14] = val;
insn[4] = (unsigned int) val;
write(ip, insn, sizeof(insn)); write(ip, insn, sizeof(insn));
info->plt_initialized = 1; info->plt_initialized = 1;

View File

@ -264,7 +264,14 @@ static int notrace s390_validate_registers(union mci mci, int umode)
/* Validate vector registers */ /* Validate vector registers */
union ctlreg0 cr0; union ctlreg0 cr0;
if (!mci.vr) { /*
* The vector validity must only be checked if not running a
* KVM guest. For KVM guests the machine check is forwarded by
* KVM and it is the responsibility of the guest to take
* appropriate actions. The host vector or FPU values have been
* saved by KVM and will be restored by KVM.
*/
if (!mci.vr && !test_cpu_flag(CIF_MCCK_GUEST)) {
/* /*
* Vector registers can't be restored. If the kernel * Vector registers can't be restored. If the kernel
* currently uses vector registers the system is * currently uses vector registers the system is
@ -307,11 +314,21 @@ static int notrace s390_validate_registers(union mci mci, int umode)
if (cr2.gse) { if (cr2.gse) {
if (!mci.gs) { if (!mci.gs) {
/* /*
* Guarded storage register can't be restored and * 2 cases:
* the current processes uses guarded storage. * - machine check in kernel or userspace
* It has to be terminated. * - machine check while running SIE (KVM guest)
* For kernel or userspace the userspace values of
* guarded storage control can not be recreated, the
* process must be terminated.
* For SIE the guest values of guarded storage can not
* be recreated. This is either due to a bug or due to
* GS being disabled in the guest. The guest will be
* notified by KVM code and the guests machine check
* handling must take care of this. The host values
* are saved by KVM and are not affected.
*/ */
kill_task = 1; if (!test_cpu_flag(CIF_MCCK_GUEST))
kill_task = 1;
} else { } else {
load_gs_cb((struct gs_cb *)mcesa->guarded_storage_save_area); load_gs_cb((struct gs_cb *)mcesa->guarded_storage_save_area);
} }

View File

@ -17,4 +17,7 @@ KASAN_SANITIZE_uaccess.o := n
obj-$(CONFIG_S390_UNWIND_SELFTEST) += test_unwind.o obj-$(CONFIG_S390_UNWIND_SELFTEST) += test_unwind.o
CFLAGS_test_unwind.o += -fno-optimize-sibling-calls CFLAGS_test_unwind.o += -fno-optimize-sibling-calls
obj-$(CONFIG_S390_MODULES_SANITY_TEST) += test_modules.o
obj-$(CONFIG_S390_MODULES_SANITY_TEST_HELPERS) += test_modules_helpers.o
lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o

View File

@ -0,0 +1,35 @@
// SPDX-License-Identifier: GPL-2.0+
#include <kunit/test.h>
#include <linux/module.h>
#include "test_modules.h"
#define DECLARE_RETURN(i) int test_modules_return_ ## i(void)
REPEAT_10000(DECLARE_RETURN);
/*
* Test that modules with many relocations are loaded properly.
*/
static void test_modules_many_vmlinux_relocs(struct kunit *test)
{
int result = 0;
#define CALL_RETURN(i) result += test_modules_return_ ## i()
REPEAT_10000(CALL_RETURN);
KUNIT_ASSERT_EQ(test, result, 49995000);
}
static struct kunit_case modules_testcases[] = {
KUNIT_CASE(test_modules_many_vmlinux_relocs),
{}
};
static struct kunit_suite modules_test_suite = {
.name = "modules_test_s390",
.test_cases = modules_testcases,
};
kunit_test_suites(&modules_test_suite);
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,50 @@
/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef TEST_MODULES_H
#define TEST_MODULES_H
#define __REPEAT_10000_3(f, x) \
f(x ## 0); \
f(x ## 1); \
f(x ## 2); \
f(x ## 3); \
f(x ## 4); \
f(x ## 5); \
f(x ## 6); \
f(x ## 7); \
f(x ## 8); \
f(x ## 9)
#define __REPEAT_10000_2(f, x) \
__REPEAT_10000_3(f, x ## 0); \
__REPEAT_10000_3(f, x ## 1); \
__REPEAT_10000_3(f, x ## 2); \
__REPEAT_10000_3(f, x ## 3); \
__REPEAT_10000_3(f, x ## 4); \
__REPEAT_10000_3(f, x ## 5); \
__REPEAT_10000_3(f, x ## 6); \
__REPEAT_10000_3(f, x ## 7); \
__REPEAT_10000_3(f, x ## 8); \
__REPEAT_10000_3(f, x ## 9)
#define __REPEAT_10000_1(f, x) \
__REPEAT_10000_2(f, x ## 0); \
__REPEAT_10000_2(f, x ## 1); \
__REPEAT_10000_2(f, x ## 2); \
__REPEAT_10000_2(f, x ## 3); \
__REPEAT_10000_2(f, x ## 4); \
__REPEAT_10000_2(f, x ## 5); \
__REPEAT_10000_2(f, x ## 6); \
__REPEAT_10000_2(f, x ## 7); \
__REPEAT_10000_2(f, x ## 8); \
__REPEAT_10000_2(f, x ## 9)
#define REPEAT_10000(f) \
__REPEAT_10000_1(f, 0); \
__REPEAT_10000_1(f, 1); \
__REPEAT_10000_1(f, 2); \
__REPEAT_10000_1(f, 3); \
__REPEAT_10000_1(f, 4); \
__REPEAT_10000_1(f, 5); \
__REPEAT_10000_1(f, 6); \
__REPEAT_10000_1(f, 7); \
__REPEAT_10000_1(f, 8); \
__REPEAT_10000_1(f, 9)
#endif

View File

@ -0,0 +1,13 @@
// SPDX-License-Identifier: GPL-2.0+
#include <linux/export.h>
#include "test_modules.h"
#define DEFINE_RETURN(i) \
int test_modules_return_ ## i(void) \
{ \
return 1 ## i - 10000; \
} \
EXPORT_SYMBOL_GPL(test_modules_return_ ## i)
REPEAT_10000(DEFINE_RETURN);

View File

@ -186,6 +186,7 @@ config X86
select HAVE_CONTEXT_TRACKING_OFFSTACK if HAVE_CONTEXT_TRACKING select HAVE_CONTEXT_TRACKING_OFFSTACK if HAVE_CONTEXT_TRACKING
select HAVE_C_RECORDMCOUNT select HAVE_C_RECORDMCOUNT
select HAVE_OBJTOOL_MCOUNT if STACK_VALIDATION select HAVE_OBJTOOL_MCOUNT if STACK_VALIDATION
select HAVE_BUILDTIME_MCOUNT_SORT
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE

View File

@ -1483,7 +1483,8 @@ struct kvm_x86_ops {
int (*get_msr_feature)(struct kvm_msr_entry *entry); int (*get_msr_feature)(struct kvm_msr_entry *entry);
bool (*can_emulate_instruction)(struct kvm_vcpu *vcpu, void *insn, int insn_len); bool (*can_emulate_instruction)(struct kvm_vcpu *vcpu, int emul_type,
void *insn, int insn_len);
bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu); bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu); int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu);
@ -1496,6 +1497,7 @@ struct kvm_x86_ops {
}; };
struct kvm_x86_nested_ops { struct kvm_x86_nested_ops {
void (*leave_nested)(struct kvm_vcpu *vcpu);
int (*check_events)(struct kvm_vcpu *vcpu); int (*check_events)(struct kvm_vcpu *vcpu);
bool (*hv_timer_pending)(struct kvm_vcpu *vcpu); bool (*hv_timer_pending)(struct kvm_vcpu *vcpu);
void (*triple_fault)(struct kvm_vcpu *vcpu); void (*triple_fault)(struct kvm_vcpu *vcpu);
@ -1861,7 +1863,6 @@ int kvm_cpu_has_extint(struct kvm_vcpu *v);
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
int kvm_cpu_get_interrupt(struct kvm_vcpu *v); int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
unsigned long ipi_bitmap_high, u32 min, unsigned long ipi_bitmap_high, u32 min,

View File

@ -452,6 +452,9 @@ struct kvm_sync_regs {
#define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001 #define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001
/* attributes for system fd (group 0) */
#define KVM_X86_XCOMP_GUEST_SUPP 0
struct kvm_vmx_nested_state_data { struct kvm_vmx_nested_state_data {
__u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; __u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
__u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; __u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];

View File

@ -423,7 +423,7 @@ static void threshold_restart_bank(void *_tr)
u32 hi, lo; u32 hi, lo;
/* sysfs write might race against an offline operation */ /* sysfs write might race against an offline operation */
if (this_cpu_read(threshold_banks)) if (!this_cpu_read(threshold_banks) && !tr->set_lvt_off)
return; return;
rdmsr(tr->b->address, lo, hi); rdmsr(tr->b->address, lo, hi);

View File

@ -486,6 +486,7 @@ static void intel_ppin_init(struct cpuinfo_x86 *c)
case INTEL_FAM6_BROADWELL_X: case INTEL_FAM6_BROADWELL_X:
case INTEL_FAM6_SKYLAKE_X: case INTEL_FAM6_SKYLAKE_X:
case INTEL_FAM6_ICELAKE_X: case INTEL_FAM6_ICELAKE_X:
case INTEL_FAM6_ICELAKE_D:
case INTEL_FAM6_SAPPHIRERAPIDS_X: case INTEL_FAM6_SAPPHIRERAPIDS_X:
case INTEL_FAM6_XEON_PHI_KNL: case INTEL_FAM6_XEON_PHI_KNL:
case INTEL_FAM6_XEON_PHI_KNM: case INTEL_FAM6_XEON_PHI_KNM:

View File

@ -133,6 +133,7 @@ static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2
orig = &vcpu->arch.cpuid_entries[i]; orig = &vcpu->arch.cpuid_entries[i];
if (e2[i].function != orig->function || if (e2[i].function != orig->function ||
e2[i].index != orig->index || e2[i].index != orig->index ||
e2[i].flags != orig->flags ||
e2[i].eax != orig->eax || e2[i].ebx != orig->ebx || e2[i].eax != orig->eax || e2[i].ebx != orig->ebx ||
e2[i].ecx != orig->ecx || e2[i].edx != orig->edx) e2[i].ecx != orig->ecx || e2[i].edx != orig->edx)
return -EINVAL; return -EINVAL;
@ -196,10 +197,26 @@ void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
vcpu->arch.pv_cpuid.features = best->eax; vcpu->arch.pv_cpuid.features = best->eax;
} }
/*
* Calculate guest's supported XCR0 taking into account guest CPUID data and
* supported_xcr0 (comprised of host configuration and KVM_SUPPORTED_XCR0).
*/
static u64 cpuid_get_supported_xcr0(struct kvm_cpuid_entry2 *entries, int nent)
{
struct kvm_cpuid_entry2 *best;
best = cpuid_entry2_find(entries, nent, 0xd, 0);
if (!best)
return 0;
return (best->eax | ((u64)best->edx << 32)) & supported_xcr0;
}
static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entries, static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entries,
int nent) int nent)
{ {
struct kvm_cpuid_entry2 *best; struct kvm_cpuid_entry2 *best;
u64 guest_supported_xcr0 = cpuid_get_supported_xcr0(entries, nent);
best = cpuid_entry2_find(entries, nent, 1, 0); best = cpuid_entry2_find(entries, nent, 1, 0);
if (best) { if (best) {
@ -238,6 +255,21 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
vcpu->arch.ia32_misc_enable_msr & vcpu->arch.ia32_misc_enable_msr &
MSR_IA32_MISC_ENABLE_MWAIT); MSR_IA32_MISC_ENABLE_MWAIT);
} }
/*
* Bits 127:0 of the allowed SECS.ATTRIBUTES (CPUID.0x12.0x1) enumerate
* the supported XSAVE Feature Request Mask (XFRM), i.e. the enclave's
* requested XCR0 value. The enclave's XFRM must be a subset of XCRO
* at the time of EENTER, thus adjust the allowed XFRM by the guest's
* supported XCR0. Similar to XCR0 handling, FP and SSE are forced to
* '1' even on CPUs that don't support XSAVE.
*/
best = cpuid_entry2_find(entries, nent, 0x12, 0x1);
if (best) {
best->ecx &= guest_supported_xcr0 & 0xffffffff;
best->edx &= guest_supported_xcr0 >> 32;
best->ecx |= XFEATURE_MASK_FPSSE;
}
} }
void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu) void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
@ -261,27 +293,8 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
kvm_apic_set_version(vcpu); kvm_apic_set_version(vcpu);
} }
best = kvm_find_cpuid_entry(vcpu, 0xD, 0); vcpu->arch.guest_supported_xcr0 =
if (!best) cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
vcpu->arch.guest_supported_xcr0 = 0;
else
vcpu->arch.guest_supported_xcr0 =
(best->eax | ((u64)best->edx << 32)) & supported_xcr0;
/*
* Bits 127:0 of the allowed SECS.ATTRIBUTES (CPUID.0x12.0x1) enumerate
* the supported XSAVE Feature Request Mask (XFRM), i.e. the enclave's
* requested XCR0 value. The enclave's XFRM must be a subset of XCRO
* at the time of EENTER, thus adjust the allowed XFRM by the guest's
* supported XCR0. Similar to XCR0 handling, FP and SSE are forced to
* '1' even on CPUs that don't support XSAVE.
*/
best = kvm_find_cpuid_entry(vcpu, 0x12, 0x1);
if (best) {
best->ecx &= vcpu->arch.guest_supported_xcr0 & 0xffffffff;
best->edx &= vcpu->arch.guest_supported_xcr0 >> 32;
best->ecx |= XFEATURE_MASK_FPSSE;
}
kvm_update_pv_runtime(vcpu); kvm_update_pv_runtime(vcpu);
@ -346,8 +359,14 @@ static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
* KVM_SET_CPUID{,2} again. To support this legacy behavior, check * KVM_SET_CPUID{,2} again. To support this legacy behavior, check
* whether the supplied CPUID data is equal to what's already set. * whether the supplied CPUID data is equal to what's already set.
*/ */
if (vcpu->arch.last_vmentry_cpu != -1) if (vcpu->arch.last_vmentry_cpu != -1) {
return kvm_cpuid_check_equal(vcpu, e2, nent); r = kvm_cpuid_check_equal(vcpu, e2, nent);
if (r)
return r;
kvfree(e2);
return 0;
}
r = kvm_check_cpuid(vcpu, e2, nent); r = kvm_check_cpuid(vcpu, e2, nent);
if (r) if (r)
@ -887,13 +906,14 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
} }
break; break;
case 0xd: { case 0xd: {
u64 guest_perm = xstate_get_guest_group_perm(); u64 permitted_xcr0 = supported_xcr0 & xstate_get_guest_group_perm();
u64 permitted_xss = supported_xss;
entry->eax &= supported_xcr0 & guest_perm; entry->eax &= permitted_xcr0;
entry->ebx = xstate_required_size(supported_xcr0, false); entry->ebx = xstate_required_size(permitted_xcr0, false);
entry->ecx = entry->ebx; entry->ecx = entry->ebx;
entry->edx &= (supported_xcr0 & guest_perm) >> 32; entry->edx &= permitted_xcr0 >> 32;
if (!supported_xcr0) if (!permitted_xcr0)
break; break;
entry = do_host_cpuid(array, function, 1); entry = do_host_cpuid(array, function, 1);
@ -902,20 +922,20 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
cpuid_entry_override(entry, CPUID_D_1_EAX); cpuid_entry_override(entry, CPUID_D_1_EAX);
if (entry->eax & (F(XSAVES)|F(XSAVEC))) if (entry->eax & (F(XSAVES)|F(XSAVEC)))
entry->ebx = xstate_required_size(supported_xcr0 | supported_xss, entry->ebx = xstate_required_size(permitted_xcr0 | permitted_xss,
true); true);
else { else {
WARN_ON_ONCE(supported_xss != 0); WARN_ON_ONCE(permitted_xss != 0);
entry->ebx = 0; entry->ebx = 0;
} }
entry->ecx &= supported_xss; entry->ecx &= permitted_xss;
entry->edx &= supported_xss >> 32; entry->edx &= permitted_xss >> 32;
for (i = 2; i < 64; ++i) { for (i = 2; i < 64; ++i) {
bool s_state; bool s_state;
if (supported_xcr0 & BIT_ULL(i)) if (permitted_xcr0 & BIT_ULL(i))
s_state = false; s_state = false;
else if (supported_xss & BIT_ULL(i)) else if (permitted_xss & BIT_ULL(i))
s_state = true; s_state = true;
else else
continue; continue;
@ -929,7 +949,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
* invalid sub-leafs. Only valid sub-leafs should * invalid sub-leafs. Only valid sub-leafs should
* reach this point, and they should have a non-zero * reach this point, and they should have a non-zero
* save state size. Furthermore, check whether the * save state size. Furthermore, check whether the
* processor agrees with supported_xcr0/supported_xss * processor agrees with permitted_xcr0/permitted_xss
* on whether this is an XCR0- or IA32_XSS-managed area. * on whether this is an XCR0- or IA32_XSS-managed area.
*/ */
if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) { if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) {

View File

@ -2629,7 +2629,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
kvm_apic_set_version(vcpu); kvm_apic_set_version(vcpu);
apic_update_ppr(apic); apic_update_ppr(apic);
hrtimer_cancel(&apic->lapic_timer.timer); cancel_apic_timer(apic);
apic->lapic_timer.expired_tscdeadline = 0; apic->lapic_timer.expired_tscdeadline = 0;
apic_update_lvtt(apic); apic_update_lvtt(apic);
apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0)); apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));

View File

@ -983,9 +983,9 @@ void svm_free_nested(struct vcpu_svm *svm)
/* /*
* Forcibly leave nested mode in order to be able to reset the VCPU later on. * Forcibly leave nested mode in order to be able to reset the VCPU later on.
*/ */
void svm_leave_nested(struct vcpu_svm *svm) void svm_leave_nested(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu *vcpu = &svm->vcpu; struct vcpu_svm *svm = to_svm(vcpu);
if (is_guest_mode(vcpu)) { if (is_guest_mode(vcpu)) {
svm->nested.nested_run_pending = 0; svm->nested.nested_run_pending = 0;
@ -1411,7 +1411,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
return -EINVAL; return -EINVAL;
if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) { if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
svm_leave_nested(svm); svm_leave_nested(vcpu);
svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
return 0; return 0;
} }
@ -1478,7 +1478,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
*/ */
if (is_guest_mode(vcpu)) if (is_guest_mode(vcpu))
svm_leave_nested(svm); svm_leave_nested(vcpu);
else else
svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save; svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
@ -1532,6 +1532,7 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
} }
struct kvm_x86_nested_ops svm_nested_ops = { struct kvm_x86_nested_ops svm_nested_ops = {
.leave_nested = svm_leave_nested,
.check_events = svm_check_nested_events, .check_events = svm_check_nested_events,
.triple_fault = nested_svm_triple_fault, .triple_fault = nested_svm_triple_fault,
.get_nested_state_pages = svm_get_nested_state_pages, .get_nested_state_pages = svm_get_nested_state_pages,

View File

@ -2100,8 +2100,13 @@ void __init sev_hardware_setup(void)
if (!sev_enabled || !npt_enabled) if (!sev_enabled || !npt_enabled)
goto out; goto out;
/* Does the CPU support SEV? */ /*
if (!boot_cpu_has(X86_FEATURE_SEV)) * SEV must obviously be supported in hardware. Sanity check that the
* CPU supports decode assists, which is mandatory for SEV guests to
* support instruction emulation.
*/
if (!boot_cpu_has(X86_FEATURE_SEV) ||
WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_DECODEASSISTS)))
goto out; goto out;
/* Retrieve SEV CPUID information */ /* Retrieve SEV CPUID information */

View File

@ -290,7 +290,7 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) { if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) {
if (!(efer & EFER_SVME)) { if (!(efer & EFER_SVME)) {
svm_leave_nested(svm); svm_leave_nested(vcpu);
svm_set_gif(svm, true); svm_set_gif(svm, true);
/* #GP intercept is still needed for vmware backdoor */ /* #GP intercept is still needed for vmware backdoor */
if (!enable_vmware_backdoor) if (!enable_vmware_backdoor)
@ -312,7 +312,11 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
return ret; return ret;
} }
if (svm_gp_erratum_intercept) /*
* Never intercept #GP for SEV guests, KVM can't
* decrypt guest memory to workaround the erratum.
*/
if (svm_gp_erratum_intercept && !sev_guest(vcpu->kvm))
set_exception_intercept(svm, GP_VECTOR); set_exception_intercept(svm, GP_VECTOR);
} }
} }
@ -1010,9 +1014,10 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
* Guest access to VMware backdoor ports could legitimately * Guest access to VMware backdoor ports could legitimately
* trigger #GP because of TSS I/O permission bitmap. * trigger #GP because of TSS I/O permission bitmap.
* We intercept those #GP and allow access to them anyway * We intercept those #GP and allow access to them anyway
* as VMware does. * as VMware does. Don't intercept #GP for SEV guests as KVM can't
* decrypt guest memory to decode the faulting instruction.
*/ */
if (enable_vmware_backdoor) if (enable_vmware_backdoor && !sev_guest(vcpu->kvm))
set_exception_intercept(svm, GP_VECTOR); set_exception_intercept(svm, GP_VECTOR);
svm_set_intercept(svm, INTERCEPT_INTR); svm_set_intercept(svm, INTERCEPT_INTR);
@ -2091,10 +2096,6 @@ static int gp_interception(struct kvm_vcpu *vcpu)
if (error_code) if (error_code)
goto reinject; goto reinject;
/* All SVM instructions expect page aligned RAX */
if (svm->vmcb->save.rax & ~PAGE_MASK)
goto reinject;
/* Decode the instruction for usage later */ /* Decode the instruction for usage later */
if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK) if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK)
goto reinject; goto reinject;
@ -2112,8 +2113,13 @@ static int gp_interception(struct kvm_vcpu *vcpu)
if (!is_guest_mode(vcpu)) if (!is_guest_mode(vcpu))
return kvm_emulate_instruction(vcpu, return kvm_emulate_instruction(vcpu,
EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE); EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
} else } else {
/* All SVM instructions expect page aligned RAX */
if (svm->vmcb->save.rax & ~PAGE_MASK)
goto reinject;
return emulate_svm_instr(vcpu, opcode); return emulate_svm_instr(vcpu, opcode);
}
reinject: reinject:
kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
@ -4252,79 +4258,140 @@ static void svm_enable_smi_window(struct kvm_vcpu *vcpu)
} }
} }
static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int insn_len) static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
void *insn, int insn_len)
{ {
bool smep, smap, is_user; bool smep, smap, is_user;
unsigned long cr4; unsigned long cr4;
u64 error_code;
/* Emulation is always possible when KVM has access to all guest state. */
if (!sev_guest(vcpu->kvm))
return true;
/* #UD and #GP should never be intercepted for SEV guests. */
WARN_ON_ONCE(emul_type & (EMULTYPE_TRAP_UD |
EMULTYPE_TRAP_UD_FORCED |
EMULTYPE_VMWARE_GP));
/* /*
* When the guest is an SEV-ES guest, emulation is not possible. * Emulation is impossible for SEV-ES guests as KVM doesn't have access
* to guest register state.
*/ */
if (sev_es_guest(vcpu->kvm)) if (sev_es_guest(vcpu->kvm))
return false; return false;
/* /*
* Detect and workaround Errata 1096 Fam_17h_00_0Fh. * Emulation is possible if the instruction is already decoded, e.g.
* * when completing I/O after returning from userspace.
* Errata:
* When CPU raise #NPF on guest data access and vCPU CR4.SMAP=1, it is
* possible that CPU microcode implementing DecodeAssist will fail
* to read bytes of instruction which caused #NPF. In this case,
* GuestIntrBytes field of the VMCB on a VMEXIT will incorrectly
* return 0 instead of the correct guest instruction bytes.
*
* This happens because CPU microcode reading instruction bytes
* uses a special opcode which attempts to read data using CPL=0
* privileges. The microcode reads CS:RIP and if it hits a SMAP
* fault, it gives up and returns no instruction bytes.
*
* Detection:
* We reach here in case CPU supports DecodeAssist, raised #NPF and
* returned 0 in GuestIntrBytes field of the VMCB.
* First, errata can only be triggered in case vCPU CR4.SMAP=1.
* Second, if vCPU CR4.SMEP=1, errata could only be triggered
* in case vCPU CPL==3 (Because otherwise guest would have triggered
* a SMEP fault instead of #NPF).
* Otherwise, vCPU CR4.SMEP=0, errata could be triggered by any vCPU CPL.
* As most guests enable SMAP if they have also enabled SMEP, use above
* logic in order to attempt minimize false-positive of detecting errata
* while still preserving all cases semantic correctness.
*
* Workaround:
* To determine what instruction the guest was executing, the hypervisor
* will have to decode the instruction at the instruction pointer.
*
* In non SEV guest, hypervisor will be able to read the guest
* memory to decode the instruction pointer when insn_len is zero
* so we return true to indicate that decoding is possible.
*
* But in the SEV guest, the guest memory is encrypted with the
* guest specific key and hypervisor will not be able to decode the
* instruction pointer so we will not able to workaround it. Lets
* print the error and request to kill the guest.
*/ */
if (likely(!insn || insn_len)) if (emul_type & EMULTYPE_NO_DECODE)
return true; return true;
/* /*
* If RIP is invalid, go ahead with emulation which will cause an * Emulation is possible for SEV guests if and only if a prefilled
* internal error exit. * buffer containing the bytes of the intercepted instruction is
* available. SEV guest memory is encrypted with a guest specific key
* and cannot be decrypted by KVM, i.e. KVM would read cyphertext and
* decode garbage.
*
* Inject #UD if KVM reached this point without an instruction buffer.
* In practice, this path should never be hit by a well-behaved guest,
* e.g. KVM doesn't intercept #UD or #GP for SEV guests, but this path
* is still theoretically reachable, e.g. via unaccelerated fault-like
* AVIC access, and needs to be handled by KVM to avoid putting the
* guest into an infinite loop. Injecting #UD is somewhat arbitrary,
* but its the least awful option given lack of insight into the guest.
*/ */
if (!kvm_vcpu_gfn_to_memslot(vcpu, kvm_rip_read(vcpu) >> PAGE_SHIFT)) if (unlikely(!insn)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return false;
}
/*
* Emulate for SEV guests if the insn buffer is not empty. The buffer
* will be empty if the DecodeAssist microcode cannot fetch bytes for
* the faulting instruction because the code fetch itself faulted, e.g.
* the guest attempted to fetch from emulated MMIO or a guest page
* table used to translate CS:RIP resides in emulated MMIO.
*/
if (likely(insn_len))
return true; return true;
/*
* Detect and workaround Errata 1096 Fam_17h_00_0Fh.
*
* Errata:
* When CPU raises #NPF on guest data access and vCPU CR4.SMAP=1, it is
* possible that CPU microcode implementing DecodeAssist will fail to
* read guest memory at CS:RIP and vmcb.GuestIntrBytes will incorrectly
* be '0'. This happens because microcode reads CS:RIP using a _data_
* loap uop with CPL=0 privileges. If the load hits a SMAP #PF, ucode
* gives up and does not fill the instruction bytes buffer.
*
* As above, KVM reaches this point iff the VM is an SEV guest, the CPU
* supports DecodeAssist, a #NPF was raised, KVM's page fault handler
* triggered emulation (e.g. for MMIO), and the CPU returned 0 in the
* GuestIntrBytes field of the VMCB.
*
* This does _not_ mean that the erratum has been encountered, as the
* DecodeAssist will also fail if the load for CS:RIP hits a legitimate
* #PF, e.g. if the guest attempt to execute from emulated MMIO and
* encountered a reserved/not-present #PF.
*
* To hit the erratum, the following conditions must be true:
* 1. CR4.SMAP=1 (obviously).
* 2. CR4.SMEP=0 || CPL=3. If SMEP=1 and CPL<3, the erratum cannot
* have been hit as the guest would have encountered a SMEP
* violation #PF, not a #NPF.
* 3. The #NPF is not due to a code fetch, in which case failure to
* retrieve the instruction bytes is legitimate (see abvoe).
*
* In addition, don't apply the erratum workaround if the #NPF occurred
* while translating guest page tables (see below).
*/
error_code = to_svm(vcpu)->vmcb->control.exit_info_1;
if (error_code & (PFERR_GUEST_PAGE_MASK | PFERR_FETCH_MASK))
goto resume_guest;
cr4 = kvm_read_cr4(vcpu); cr4 = kvm_read_cr4(vcpu);
smep = cr4 & X86_CR4_SMEP; smep = cr4 & X86_CR4_SMEP;
smap = cr4 & X86_CR4_SMAP; smap = cr4 & X86_CR4_SMAP;
is_user = svm_get_cpl(vcpu) == 3; is_user = svm_get_cpl(vcpu) == 3;
if (smap && (!smep || is_user)) { if (smap && (!smep || is_user)) {
if (!sev_guest(vcpu->kvm))
return true;
pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n"); pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n");
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
/*
* If the fault occurred in userspace, arbitrarily inject #GP
* to avoid killing the guest and to hopefully avoid confusing
* the guest kernel too much, e.g. injecting #PF would not be
* coherent with respect to the guest's page tables. Request
* triple fault if the fault occurred in the kernel as there's
* no fault that KVM can inject without confusing the guest.
* In practice, the triple fault is moot as no sane SEV kernel
* will execute from user memory while also running with SMAP=1.
*/
if (is_user)
kvm_inject_gp(vcpu, 0);
else
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
} }
resume_guest:
/*
* If the erratum was not hit, simply resume the guest and let it fault
* again. While awful, e.g. the vCPU may get stuck in an infinite loop
* if the fault is at CPL=0, it's the lesser of all evils. Exiting to
* userspace will kill the guest, and letting the emulator read garbage
* will yield random behavior and potentially corrupt the guest.
*
* Simply resuming the guest is technically not a violation of the SEV
* architecture. AMD's APM states that all code fetches and page table
* accesses for SEV guest are encrypted, regardless of the C-Bit. The
* APM also states that encrypted accesses to MMIO are "ignored", but
* doesn't explicitly define "ignored", i.e. doing nothing and letting
* the guest spin is technically "ignoring" the access.
*/
return false; return false;
} }

View File

@ -304,11 +304,6 @@ static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
& ~VMCB_ALWAYS_DIRTY_MASK; & ~VMCB_ALWAYS_DIRTY_MASK;
} }
static inline bool vmcb_is_clean(struct vmcb *vmcb, int bit)
{
return (vmcb->control.clean & (1 << bit));
}
static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit) static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
{ {
vmcb->control.clean &= ~(1 << bit); vmcb->control.clean &= ~(1 << bit);
@ -525,7 +520,7 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
int enter_svm_guest_mode(struct kvm_vcpu *vcpu, int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun); u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
void svm_leave_nested(struct vcpu_svm *svm); void svm_leave_nested(struct kvm_vcpu *vcpu);
void svm_free_nested(struct vcpu_svm *svm); void svm_free_nested(struct vcpu_svm *svm);
int svm_allocate_nested(struct vcpu_svm *svm); int svm_allocate_nested(struct vcpu_svm *svm);
int nested_svm_vmrun(struct kvm_vcpu *vcpu); int nested_svm_vmrun(struct kvm_vcpu *vcpu);

View File

@ -46,6 +46,9 @@ static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
if (npt_enabled && if (npt_enabled &&
ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB) ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB)
hve->hv_enlightenments_control.enlightened_npt_tlb = 1; hve->hv_enlightenments_control.enlightened_npt_tlb = 1;
if (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)
hve->hv_enlightenments_control.msr_bitmap = 1;
} }
static inline void svm_hv_hardware_setup(void) static inline void svm_hv_hardware_setup(void)
@ -83,14 +86,7 @@ static inline void svm_hv_vmcb_dirty_nested_enlightenments(
struct hv_enlightenments *hve = struct hv_enlightenments *hve =
(struct hv_enlightenments *)vmcb->control.reserved_sw; (struct hv_enlightenments *)vmcb->control.reserved_sw;
/* if (hve->hv_enlightenments_control.msr_bitmap)
* vmcb can be NULL if called during early vcpu init.
* And its okay not to mark vmcb dirty during vcpu init
* as we mark it dirty unconditionally towards end of vcpu
* init phase.
*/
if (vmcb_is_clean(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS) &&
hve->hv_enlightenments_control.msr_bitmap)
vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS); vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
} }

View File

@ -54,7 +54,6 @@ struct nested_vmx_msrs {
struct vmcs_config { struct vmcs_config {
int size; int size;
int order;
u32 basic_cap; u32 basic_cap;
u32 revision_id; u32 revision_id;
u32 pin_based_exec_ctrl; u32 pin_based_exec_ctrl;

View File

@ -12,8 +12,6 @@
DEFINE_STATIC_KEY_FALSE(enable_evmcs); DEFINE_STATIC_KEY_FALSE(enable_evmcs);
#if IS_ENABLED(CONFIG_HYPERV)
#define EVMCS1_OFFSET(x) offsetof(struct hv_enlightened_vmcs, x) #define EVMCS1_OFFSET(x) offsetof(struct hv_enlightened_vmcs, x)
#define EVMCS1_FIELD(number, name, clean_field)[ROL16(number, 6)] = \ #define EVMCS1_FIELD(number, name, clean_field)[ROL16(number, 6)] = \
{EVMCS1_OFFSET(name), clean_field} {EVMCS1_OFFSET(name), clean_field}
@ -296,6 +294,7 @@ const struct evmcs_field vmcs_field_to_evmcs_1[] = {
}; };
const unsigned int nr_evmcs_1_fields = ARRAY_SIZE(vmcs_field_to_evmcs_1); const unsigned int nr_evmcs_1_fields = ARRAY_SIZE(vmcs_field_to_evmcs_1);
#if IS_ENABLED(CONFIG_HYPERV)
__init void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) __init void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
{ {
vmcs_conf->pin_based_exec_ctrl &= ~EVMCS1_UNSUPPORTED_PINCTRL; vmcs_conf->pin_based_exec_ctrl &= ~EVMCS1_UNSUPPORTED_PINCTRL;
@ -362,6 +361,7 @@ void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata)
case MSR_IA32_VMX_PROCBASED_CTLS2: case MSR_IA32_VMX_PROCBASED_CTLS2:
ctl_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC; ctl_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC;
break; break;
case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
case MSR_IA32_VMX_PINBASED_CTLS: case MSR_IA32_VMX_PINBASED_CTLS:
ctl_high &= ~EVMCS1_UNSUPPORTED_PINCTRL; ctl_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
break; break;

View File

@ -59,12 +59,12 @@ DECLARE_STATIC_KEY_FALSE(enable_evmcs);
SECONDARY_EXEC_SHADOW_VMCS | \ SECONDARY_EXEC_SHADOW_VMCS | \
SECONDARY_EXEC_TSC_SCALING | \ SECONDARY_EXEC_TSC_SCALING | \
SECONDARY_EXEC_PAUSE_LOOP_EXITING) SECONDARY_EXEC_PAUSE_LOOP_EXITING)
#define EVMCS1_UNSUPPORTED_VMEXIT_CTRL (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) #define EVMCS1_UNSUPPORTED_VMEXIT_CTRL \
(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | \
VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
#define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) #define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
#define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING) #define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING)
#if IS_ENABLED(CONFIG_HYPERV)
struct evmcs_field { struct evmcs_field {
u16 offset; u16 offset;
u16 clean_field; u16 clean_field;
@ -73,26 +73,56 @@ struct evmcs_field {
extern const struct evmcs_field vmcs_field_to_evmcs_1[]; extern const struct evmcs_field vmcs_field_to_evmcs_1[];
extern const unsigned int nr_evmcs_1_fields; extern const unsigned int nr_evmcs_1_fields;
static __always_inline int get_evmcs_offset(unsigned long field, static __always_inline int evmcs_field_offset(unsigned long field,
u16 *clean_field) u16 *clean_field)
{ {
unsigned int index = ROL16(field, 6); unsigned int index = ROL16(field, 6);
const struct evmcs_field *evmcs_field; const struct evmcs_field *evmcs_field;
if (unlikely(index >= nr_evmcs_1_fields)) { if (unlikely(index >= nr_evmcs_1_fields))
WARN_ONCE(1, "KVM: accessing unsupported EVMCS field %lx\n",
field);
return -ENOENT; return -ENOENT;
}
evmcs_field = &vmcs_field_to_evmcs_1[index]; evmcs_field = &vmcs_field_to_evmcs_1[index];
/*
* Use offset=0 to detect holes in eVMCS. This offset belongs to
* 'revision_id' but this field has no encoding and is supposed to
* be accessed directly.
*/
if (unlikely(!evmcs_field->offset))
return -ENOENT;
if (clean_field) if (clean_field)
*clean_field = evmcs_field->clean_field; *clean_field = evmcs_field->clean_field;
return evmcs_field->offset; return evmcs_field->offset;
} }
static inline u64 evmcs_read_any(struct hv_enlightened_vmcs *evmcs,
unsigned long field, u16 offset)
{
/*
* vmcs12_read_any() doesn't care whether the supplied structure
* is 'struct vmcs12' or 'struct hv_enlightened_vmcs' as it takes
* the exact offset of the required field, use it for convenience
* here.
*/
return vmcs12_read_any((void *)evmcs, field, offset);
}
#if IS_ENABLED(CONFIG_HYPERV)
static __always_inline int get_evmcs_offset(unsigned long field,
u16 *clean_field)
{
int offset = evmcs_field_offset(field, clean_field);
WARN_ONCE(offset < 0, "KVM: accessing unsupported EVMCS field %lx\n",
field);
return offset;
}
static __always_inline void evmcs_write64(unsigned long field, u64 value) static __always_inline void evmcs_write64(unsigned long field, u64 value)
{ {
u16 clean_field; u16 clean_field;

View File

@ -7,6 +7,7 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include "cpuid.h" #include "cpuid.h"
#include "evmcs.h"
#include "hyperv.h" #include "hyperv.h"
#include "mmu.h" #include "mmu.h"
#include "nested.h" #include "nested.h"
@ -4851,18 +4852,20 @@ static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs; struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
/* /*
* We should allocate a shadow vmcs for vmcs01 only when L1 * KVM allocates a shadow VMCS only when L1 executes VMXON and frees it
* executes VMXON and free it when L1 executes VMXOFF. * when L1 executes VMXOFF or the vCPU is forced out of nested
* As it is invalid to execute VMXON twice, we shouldn't reach * operation. VMXON faults if the CPU is already post-VMXON, so it
* here when vmcs01 already have an allocated shadow vmcs. * should be impossible to already have an allocated shadow VMCS. KVM
* doesn't support virtualization of VMCS shadowing, so vmcs01 should
* always be the loaded VMCS.
*/ */
WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs); if (WARN_ON(loaded_vmcs != &vmx->vmcs01 || loaded_vmcs->shadow_vmcs))
return loaded_vmcs->shadow_vmcs;
loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
if (loaded_vmcs->shadow_vmcs)
vmcs_clear(loaded_vmcs->shadow_vmcs);
if (!loaded_vmcs->shadow_vmcs) {
loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
if (loaded_vmcs->shadow_vmcs)
vmcs_clear(loaded_vmcs->shadow_vmcs);
}
return loaded_vmcs->shadow_vmcs; return loaded_vmcs->shadow_vmcs;
} }
@ -5099,27 +5102,49 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu)) if (!nested_vmx_check_permission(vcpu))
return 1; return 1;
/*
* In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA,
* any VMREAD sets the ALU flags for VMfailInvalid.
*/
if (vmx->nested.current_vmptr == INVALID_GPA ||
(is_guest_mode(vcpu) &&
get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA))
return nested_vmx_failInvalid(vcpu);
/* Decode instruction info and find the field to read */ /* Decode instruction info and find the field to read */
field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf)); field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
offset = vmcs_field_to_offset(field); if (!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
if (offset < 0) /*
return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA,
* any VMREAD sets the ALU flags for VMfailInvalid.
*/
if (vmx->nested.current_vmptr == INVALID_GPA ||
(is_guest_mode(vcpu) &&
get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA))
return nested_vmx_failInvalid(vcpu);
if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field)) offset = get_vmcs12_field_offset(field);
copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12); if (offset < 0)
return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
/* Read the field, zero-extended to a u64 value */ if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field))
value = vmcs12_read_any(vmcs12, field, offset); copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
/* Read the field, zero-extended to a u64 value */
value = vmcs12_read_any(vmcs12, field, offset);
} else {
/*
* Hyper-V TLFS (as of 6.0b) explicitly states, that while an
* enlightened VMCS is active VMREAD/VMWRITE instructions are
* unsupported. Unfortunately, certain versions of Windows 11
* don't comply with this requirement which is not enforced in
* genuine Hyper-V. Allow VMREAD from an enlightened VMCS as a
* workaround, as misbehaving guests will panic on VM-Fail.
* Note, enlightened VMCS is incompatible with shadow VMCS so
* all VMREADs from L2 should go to L1.
*/
if (WARN_ON_ONCE(is_guest_mode(vcpu)))
return nested_vmx_failInvalid(vcpu);
offset = evmcs_field_offset(field, NULL);
if (offset < 0)
return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
/* Read the field, zero-extended to a u64 value */
value = evmcs_read_any(vmx->nested.hv_evmcs, field, offset);
}
/* /*
* Now copy part of this value to register or memory, as requested. * Now copy part of this value to register or memory, as requested.
@ -5214,7 +5239,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf)); field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
offset = vmcs_field_to_offset(field); offset = get_vmcs12_field_offset(field);
if (offset < 0) if (offset < 0)
return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
@ -6462,7 +6487,7 @@ static u64 nested_vmx_calc_vmcs_enum_msr(void)
max_idx = 0; max_idx = 0;
for (i = 0; i < nr_vmcs12_fields; i++) { for (i = 0; i < nr_vmcs12_fields; i++) {
/* The vmcs12 table is very, very sparsely populated. */ /* The vmcs12 table is very, very sparsely populated. */
if (!vmcs_field_to_offset_table[i]) if (!vmcs12_field_offsets[i])
continue; continue;
idx = vmcs_field_index(VMCS12_IDX_TO_ENC(i)); idx = vmcs_field_index(VMCS12_IDX_TO_ENC(i));
@ -6771,6 +6796,7 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
} }
struct kvm_x86_nested_ops vmx_nested_ops = { struct kvm_x86_nested_ops vmx_nested_ops = {
.leave_nested = vmx_leave_nested,
.check_events = vmx_check_nested_events, .check_events = vmx_check_nested_events,
.hv_timer_pending = nested_vmx_preemption_timer_pending, .hv_timer_pending = nested_vmx_preemption_timer_pending,
.triple_fault = nested_vmx_triple_fault, .triple_fault = nested_vmx_triple_fault,

View File

@ -8,7 +8,7 @@
FIELD(number, name), \ FIELD(number, name), \
[ROL16(number##_HIGH, 6)] = VMCS12_OFFSET(name) + sizeof(u32) [ROL16(number##_HIGH, 6)] = VMCS12_OFFSET(name) + sizeof(u32)
const unsigned short vmcs_field_to_offset_table[] = { const unsigned short vmcs12_field_offsets[] = {
FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id), FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
FIELD(POSTED_INTR_NV, posted_intr_nv), FIELD(POSTED_INTR_NV, posted_intr_nv),
FIELD(GUEST_ES_SELECTOR, guest_es_selector), FIELD(GUEST_ES_SELECTOR, guest_es_selector),
@ -151,4 +151,4 @@ const unsigned short vmcs_field_to_offset_table[] = {
FIELD(HOST_RSP, host_rsp), FIELD(HOST_RSP, host_rsp),
FIELD(HOST_RIP, host_rip), FIELD(HOST_RIP, host_rip),
}; };
const unsigned int nr_vmcs12_fields = ARRAY_SIZE(vmcs_field_to_offset_table); const unsigned int nr_vmcs12_fields = ARRAY_SIZE(vmcs12_field_offsets);

View File

@ -361,10 +361,10 @@ static inline void vmx_check_vmcs12_offsets(void)
CHECK_OFFSET(guest_pml_index, 996); CHECK_OFFSET(guest_pml_index, 996);
} }
extern const unsigned short vmcs_field_to_offset_table[]; extern const unsigned short vmcs12_field_offsets[];
extern const unsigned int nr_vmcs12_fields; extern const unsigned int nr_vmcs12_fields;
static inline short vmcs_field_to_offset(unsigned long field) static inline short get_vmcs12_field_offset(unsigned long field)
{ {
unsigned short offset; unsigned short offset;
unsigned int index; unsigned int index;
@ -377,7 +377,7 @@ static inline short vmcs_field_to_offset(unsigned long field)
return -ENOENT; return -ENOENT;
index = array_index_nospec(index, nr_vmcs12_fields); index = array_index_nospec(index, nr_vmcs12_fields);
offset = vmcs_field_to_offset_table[index]; offset = vmcs12_field_offsets[index];
if (offset == 0) if (offset == 0)
return -ENOENT; return -ENOENT;
return offset; return offset;

View File

@ -1487,11 +1487,12 @@ static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data)
return 0; return 0;
} }
static bool vmx_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int insn_len) static bool vmx_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
void *insn, int insn_len)
{ {
/* /*
* Emulation of instructions in SGX enclaves is impossible as RIP does * Emulation of instructions in SGX enclaves is impossible as RIP does
* not point tthe failing instruction, and even if it did, the code * not point at the failing instruction, and even if it did, the code
* stream is inaccessible. Inject #UD instead of exiting to userspace * stream is inaccessible. Inject #UD instead of exiting to userspace
* so that guest userspace can't DoS the guest simply by triggering * so that guest userspace can't DoS the guest simply by triggering
* emulation (enclaves are CPL3 only). * emulation (enclaves are CPL3 only).
@ -2603,7 +2604,6 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
return -EIO; return -EIO;
vmcs_conf->size = vmx_msr_high & 0x1fff; vmcs_conf->size = vmx_msr_high & 0x1fff;
vmcs_conf->order = get_order(vmcs_conf->size);
vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff; vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;
vmcs_conf->revision_id = vmx_msr_low; vmcs_conf->revision_id = vmx_msr_low;
@ -2628,7 +2628,7 @@ struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags)
struct page *pages; struct page *pages;
struct vmcs *vmcs; struct vmcs *vmcs;
pages = __alloc_pages_node(node, flags, vmcs_config.order); pages = __alloc_pages_node(node, flags, 0);
if (!pages) if (!pages)
return NULL; return NULL;
vmcs = page_address(pages); vmcs = page_address(pages);
@ -2647,7 +2647,7 @@ struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags)
void free_vmcs(struct vmcs *vmcs) void free_vmcs(struct vmcs *vmcs)
{ {
free_pages((unsigned long)vmcs, vmcs_config.order); free_page((unsigned long)vmcs);
} }
/* /*
@ -4094,10 +4094,14 @@ void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
vmcs_write32(HOST_IA32_SYSENTER_CS, low32); vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
/* /*
* If 32-bit syscall is enabled, vmx_vcpu_load_vcms rewrites * SYSENTER is used for 32-bit system calls on either 32-bit or
* HOST_IA32_SYSENTER_ESP. * 64-bit kernels. It is always zero If neither is allowed, otherwise
* vmx_vcpu_load_vmcs loads it with the per-CPU entry stack (and may
* have already done so!).
*/ */
vmcs_writel(HOST_IA32_SYSENTER_ESP, 0); if (!IS_ENABLED(CONFIG_IA32_EMULATION) && !IS_ENABLED(CONFIG_X86_32))
vmcs_writel(HOST_IA32_SYSENTER_ESP, 0);
rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl); rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */ vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */
@ -4901,8 +4905,33 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
dr6 = vmx_get_exit_qual(vcpu); dr6 = vmx_get_exit_qual(vcpu);
if (!(vcpu->guest_debug & if (!(vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
/*
* If the #DB was due to ICEBP, a.k.a. INT1, skip the
* instruction. ICEBP generates a trap-like #DB, but
* despite its interception control being tied to #DB,
* is an instruction intercept, i.e. the VM-Exit occurs
* on the ICEBP itself. Note, skipping ICEBP also
* clears STI and MOVSS blocking.
*
* For all other #DBs, set vmcs.PENDING_DBG_EXCEPTIONS.BS
* if single-step is enabled in RFLAGS and STI or MOVSS
* blocking is active, as the CPU doesn't set the bit
* on VM-Exit due to #DB interception. VM-Entry has a
* consistency check that a single-step #DB is pending
* in this scenario as the previous instruction cannot
* have toggled RFLAGS.TF 0=>1 (because STI and POP/MOV
* don't modify RFLAGS), therefore the one instruction
* delay when activating single-step breakpoints must
* have already expired. Note, the CPU sets/clears BS
* as appropriate for all other VM-Exits types.
*/
if (is_icebp(intr_info)) if (is_icebp(intr_info))
WARN_ON(!skip_emulated_instruction(vcpu)); WARN_ON(!skip_emulated_instruction(vcpu));
else if ((vmx_get_rflags(vcpu) & X86_EFLAGS_TF) &&
(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)))
vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS) | DR6_BS);
kvm_queue_exception_p(vcpu, DB_VECTOR, dr6); kvm_queue_exception_p(vcpu, DB_VECTOR, dr6);
return 1; return 1;
@ -5397,7 +5426,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
{ {
gpa_t gpa; gpa_t gpa;
if (!vmx_can_emulate_instruction(vcpu, NULL, 0)) if (!vmx_can_emulate_instruction(vcpu, EMULTYPE_PF, NULL, 0))
return 1; return 1;
/* /*

View File

@ -3535,6 +3535,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (data & ~supported_xss) if (data & ~supported_xss)
return 1; return 1;
vcpu->arch.ia32_xss = data; vcpu->arch.ia32_xss = data;
kvm_update_cpuid_runtime(vcpu);
break; break;
case MSR_SMI_COUNT: case MSR_SMI_COUNT:
if (!msr_info->host_initiated) if (!msr_info->host_initiated)
@ -4229,6 +4230,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_SREGS2: case KVM_CAP_SREGS2:
case KVM_CAP_EXIT_ON_EMULATION_FAILURE: case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
case KVM_CAP_VCPU_ATTRIBUTES: case KVM_CAP_VCPU_ATTRIBUTES:
case KVM_CAP_SYS_ATTRIBUTES:
r = 1; r = 1;
break; break;
case KVM_CAP_EXIT_HYPERCALL: case KVM_CAP_EXIT_HYPERCALL:
@ -4331,7 +4333,49 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break; break;
} }
return r; return r;
}
static inline void __user *kvm_get_attr_addr(struct kvm_device_attr *attr)
{
void __user *uaddr = (void __user*)(unsigned long)attr->addr;
if ((u64)(unsigned long)uaddr != attr->addr)
return ERR_PTR(-EFAULT);
return uaddr;
}
static int kvm_x86_dev_get_attr(struct kvm_device_attr *attr)
{
u64 __user *uaddr = kvm_get_attr_addr(attr);
if (attr->group)
return -ENXIO;
if (IS_ERR(uaddr))
return PTR_ERR(uaddr);
switch (attr->attr) {
case KVM_X86_XCOMP_GUEST_SUPP:
if (put_user(supported_xcr0, uaddr))
return -EFAULT;
return 0;
default:
return -ENXIO;
break;
}
}
static int kvm_x86_dev_has_attr(struct kvm_device_attr *attr)
{
if (attr->group)
return -ENXIO;
switch (attr->attr) {
case KVM_X86_XCOMP_GUEST_SUPP:
return 0;
default:
return -ENXIO;
}
} }
long kvm_arch_dev_ioctl(struct file *filp, long kvm_arch_dev_ioctl(struct file *filp,
@ -4422,6 +4466,22 @@ long kvm_arch_dev_ioctl(struct file *filp,
case KVM_GET_SUPPORTED_HV_CPUID: case KVM_GET_SUPPORTED_HV_CPUID:
r = kvm_ioctl_get_supported_hv_cpuid(NULL, argp); r = kvm_ioctl_get_supported_hv_cpuid(NULL, argp);
break; break;
case KVM_GET_DEVICE_ATTR: {
struct kvm_device_attr attr;
r = -EFAULT;
if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
break;
r = kvm_x86_dev_get_attr(&attr);
break;
}
case KVM_HAS_DEVICE_ATTR: {
struct kvm_device_attr attr;
r = -EFAULT;
if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
break;
r = kvm_x86_dev_has_attr(&attr);
break;
}
default: default:
r = -EINVAL; r = -EINVAL;
break; break;
@ -4860,8 +4920,10 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
vcpu->arch.apic->sipi_vector = events->sipi_vector; vcpu->arch.apic->sipi_vector = events->sipi_vector;
if (events->flags & KVM_VCPUEVENT_VALID_SMM) { if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
kvm_x86_ops.nested_ops->leave_nested(vcpu);
kvm_smm_changed(vcpu, events->smi.smm); kvm_smm_changed(vcpu, events->smi.smm);
}
vcpu->arch.smi_pending = events->smi.pending; vcpu->arch.smi_pending = events->smi.pending;
@ -5022,11 +5084,11 @@ static int kvm_arch_tsc_has_attr(struct kvm_vcpu *vcpu,
static int kvm_arch_tsc_get_attr(struct kvm_vcpu *vcpu, static int kvm_arch_tsc_get_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr) struct kvm_device_attr *attr)
{ {
u64 __user *uaddr = (u64 __user *)(unsigned long)attr->addr; u64 __user *uaddr = kvm_get_attr_addr(attr);
int r; int r;
if ((u64)(unsigned long)uaddr != attr->addr) if (IS_ERR(uaddr))
return -EFAULT; return PTR_ERR(uaddr);
switch (attr->attr) { switch (attr->attr) {
case KVM_VCPU_TSC_OFFSET: case KVM_VCPU_TSC_OFFSET:
@ -5045,12 +5107,12 @@ static int kvm_arch_tsc_get_attr(struct kvm_vcpu *vcpu,
static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu, static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr) struct kvm_device_attr *attr)
{ {
u64 __user *uaddr = (u64 __user *)(unsigned long)attr->addr; u64 __user *uaddr = kvm_get_attr_addr(attr);
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
int r; int r;
if ((u64)(unsigned long)uaddr != attr->addr) if (IS_ERR(uaddr))
return -EFAULT; return PTR_ERR(uaddr);
switch (attr->attr) { switch (attr->attr) {
case KVM_VCPU_TSC_OFFSET: { case KVM_VCPU_TSC_OFFSET: {
@ -6810,6 +6872,13 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
} }
EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
static int kvm_can_emulate_insn(struct kvm_vcpu *vcpu, int emul_type,
void *insn, int insn_len)
{
return static_call(kvm_x86_can_emulate_instruction)(vcpu, emul_type,
insn, insn_len);
}
int handle_ud(struct kvm_vcpu *vcpu) int handle_ud(struct kvm_vcpu *vcpu)
{ {
static const char kvm_emulate_prefix[] = { __KVM_EMULATE_PREFIX }; static const char kvm_emulate_prefix[] = { __KVM_EMULATE_PREFIX };
@ -6817,7 +6886,7 @@ int handle_ud(struct kvm_vcpu *vcpu)
char sig[5]; /* ud2; .ascii "kvm" */ char sig[5]; /* ud2; .ascii "kvm" */
struct x86_exception e; struct x86_exception e;
if (unlikely(!static_call(kvm_x86_can_emulate_instruction)(vcpu, NULL, 0))) if (unlikely(!kvm_can_emulate_insn(vcpu, emul_type, NULL, 0)))
return 1; return 1;
if (force_emulation_prefix && if (force_emulation_prefix &&
@ -8193,7 +8262,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
bool writeback = true; bool writeback = true;
bool write_fault_to_spt; bool write_fault_to_spt;
if (unlikely(!static_call(kvm_x86_can_emulate_instruction)(vcpu, insn, insn_len))) if (unlikely(!kvm_can_emulate_insn(vcpu, emulation_type, insn, insn_len)))
return 1; return 1;
vcpu->arch.l1tf_flush_l1d = true; vcpu->arch.l1tf_flush_l1d = true;
@ -9706,7 +9775,7 @@ void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
} }
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
{ {
if (!lapic_in_kernel(vcpu)) if (!lapic_in_kernel(vcpu))
return; return;
@ -11209,7 +11278,8 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vcpu->arch.msr_misc_features_enables = 0; vcpu->arch.msr_misc_features_enables = 0;
vcpu->arch.xcr0 = XFEATURE_MASK_FP; __kvm_set_xcr(vcpu, 0, XFEATURE_MASK_FP);
__kvm_set_msr(vcpu, MSR_IA32_XSS, 0, true);
} }
/* All GPRs except RDX (handled below) are zeroed on RESET/INIT. */ /* All GPRs except RDX (handled below) are zeroed on RESET/INIT. */
@ -11226,8 +11296,6 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
cpuid_0x1 = kvm_find_cpuid_entry(vcpu, 1, 0); cpuid_0x1 = kvm_find_cpuid_entry(vcpu, 1, 0);
kvm_rdx_write(vcpu, cpuid_0x1 ? cpuid_0x1->eax : 0x600); kvm_rdx_write(vcpu, cpuid_0x1 ? cpuid_0x1->eax : 0x600);
vcpu->arch.ia32_xss = 0;
static_call(kvm_x86_vcpu_reset)(vcpu, init_event); static_call(kvm_x86_vcpu_reset)(vcpu, init_event);
kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);

View File

@ -316,10 +316,7 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
"\tnotq %0\n" "\tnotq %0\n"
"\t" LOCK_PREFIX "andq %0, %2\n" "\t" LOCK_PREFIX "andq %0, %2\n"
"2:\n" "2:\n"
"\t.section .fixup,\"ax\"\n" _ASM_EXTABLE_UA(1b, 2b)
"3:\tjmp\t2b\n"
"\t.previous\n"
_ASM_EXTABLE_UA(1b, 3b)
: "=r" (evtchn_pending_sel), : "=r" (evtchn_pending_sel),
"+m" (vi->evtchn_pending_sel), "+m" (vi->evtchn_pending_sel),
"+m" (v->arch.xen.evtchn_pending_sel) "+m" (v->arch.xen.evtchn_pending_sel)
@ -335,10 +332,7 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
"\tnotl %0\n" "\tnotl %0\n"
"\t" LOCK_PREFIX "andl %0, %2\n" "\t" LOCK_PREFIX "andl %0, %2\n"
"2:\n" "2:\n"
"\t.section .fixup,\"ax\"\n" _ASM_EXTABLE_UA(1b, 2b)
"3:\tjmp\t2b\n"
"\t.previous\n"
_ASM_EXTABLE_UA(1b, 3b)
: "=r" (evtchn_pending_sel32), : "=r" (evtchn_pending_sel32),
"+m" (vi->evtchn_pending_sel), "+m" (vi->evtchn_pending_sel),
"+m" (v->arch.xen.evtchn_pending_sel) "+m" (v->arch.xen.evtchn_pending_sel)

View File

@ -353,8 +353,8 @@ static void pci_fixup_video(struct pci_dev *pdev)
} }
} }
} }
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video); PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video);
static const struct dmi_system_id msi_k8t_dmi_table[] = { static const struct dmi_system_id msi_k8t_dmi_table[] = {

View File

@ -1061,21 +1061,33 @@ again:
} }
static unsigned long __part_start_io_acct(struct block_device *part, static unsigned long __part_start_io_acct(struct block_device *part,
unsigned int sectors, unsigned int op) unsigned int sectors, unsigned int op,
unsigned long start_time)
{ {
const int sgrp = op_stat_group(op); const int sgrp = op_stat_group(op);
unsigned long now = READ_ONCE(jiffies);
part_stat_lock(); part_stat_lock();
update_io_ticks(part, now, false); update_io_ticks(part, start_time, false);
part_stat_inc(part, ios[sgrp]); part_stat_inc(part, ios[sgrp]);
part_stat_add(part, sectors[sgrp], sectors); part_stat_add(part, sectors[sgrp], sectors);
part_stat_local_inc(part, in_flight[op_is_write(op)]); part_stat_local_inc(part, in_flight[op_is_write(op)]);
part_stat_unlock(); part_stat_unlock();
return now; return start_time;
} }
/**
* bio_start_io_acct_time - start I/O accounting for bio based drivers
* @bio: bio to start account for
* @start_time: start time that should be passed back to bio_end_io_acct().
*/
void bio_start_io_acct_time(struct bio *bio, unsigned long start_time)
{
__part_start_io_acct(bio->bi_bdev, bio_sectors(bio),
bio_op(bio), start_time);
}
EXPORT_SYMBOL_GPL(bio_start_io_acct_time);
/** /**
* bio_start_io_acct - start I/O accounting for bio based drivers * bio_start_io_acct - start I/O accounting for bio based drivers
* @bio: bio to start account for * @bio: bio to start account for
@ -1084,14 +1096,15 @@ static unsigned long __part_start_io_acct(struct block_device *part,
*/ */
unsigned long bio_start_io_acct(struct bio *bio) unsigned long bio_start_io_acct(struct bio *bio)
{ {
return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), bio_op(bio)); return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio),
bio_op(bio), jiffies);
} }
EXPORT_SYMBOL_GPL(bio_start_io_acct); EXPORT_SYMBOL_GPL(bio_start_io_acct);
unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
unsigned int op) unsigned int op)
{ {
return __part_start_io_acct(disk->part0, sectors, op); return __part_start_io_acct(disk->part0, sectors, op, jiffies);
} }
EXPORT_SYMBOL(disk_start_io_acct); EXPORT_SYMBOL(disk_start_io_acct);

Some files were not shown because too many files have changed in this diff Show More