Merge 5.13-rc4 into char-misc-next

We need the char/misc fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2021-05-31 09:03:10 +02:00
commit 2d06954e23
702 changed files with 8271 additions and 3497 deletions

View File

@ -1458,11 +1458,22 @@ unprivileged_bpf_disabled
=========================
Writing 1 to this entry will disable unprivileged calls to ``bpf()``;
once disabled, calling ``bpf()`` without ``CAP_SYS_ADMIN`` will return
``-EPERM``.
once disabled, calling ``bpf()`` without ``CAP_SYS_ADMIN`` or ``CAP_BPF``
will return ``-EPERM``. Once set to 1, this can't be cleared from the
running kernel anymore.
Once set, this can't be cleared.
Writing 2 to this entry will also disable unprivileged calls to ``bpf()``,
however, an admin can still change this setting later on, if needed, by
writing 0 or 1 to this entry.
If ``BPF_UNPRIV_DEFAULT_OFF`` is enabled in the kernel config, then this
entry will default to 2 instead of 0.
= =============================================================
0 Unprivileged calls to ``bpf()`` are enabled
1 Unprivileged calls to ``bpf()`` are disabled without recovery
2 Unprivileged calls to ``bpf()`` are disabled
= =============================================================
watchdog
========

View File

@ -60,7 +60,6 @@ properties:
maxItems: 2
idt,xtal-load-femtofarads:
$ref: /schemas/types.yaml#/definitions/uint32
minimum: 9000
maximum: 22760
description: Optional load capacitor for XTAL1 and XTAL2
@ -84,7 +83,6 @@ patternProperties:
enum: [ 1800000, 2500000, 3300000 ]
idt,slew-percent:
description: The Slew rate control for CMOS single-ended.
$ref: /schemas/types.yaml#/definitions/uint32
enum: [ 80, 85, 90, 100 ]
required:

View File

@ -46,6 +46,13 @@ properties:
description: |
I2C bus timeout in microseconds
fsl,i2c-erratum-a004447:
$ref: /schemas/types.yaml#/definitions/flag
description: |
Indicates the presence of QorIQ erratum A-004447, which
says that the standard i2c recovery scheme mechanism does
not work and an alternate implementation is needed.
required:
- compatible
- reg

View File

@ -102,7 +102,6 @@ patternProperties:
st,adc-channel-names:
description: List of single-ended channel names.
$ref: /schemas/types.yaml#/definitions/string-array
st,filter-order:
description: |

View File

@ -38,6 +38,5 @@ properties:
Duration in seconds which the key should be kept pressed for device to
reset automatically. Device with key pressed reset feature can specify
this property.
$ref: /schemas/types.yaml#/definitions/uint32
additionalProperties: true

View File

@ -92,7 +92,6 @@ properties:
this interconnect to send RPMh commands.
qcom,bcm-voter-names:
$ref: /schemas/types.yaml#/definitions/string-array
description: |
Names for each of the qcom,bcm-voters specified.

View File

@ -4,8 +4,8 @@ This controller is present on BCM6318, BCM6328, BCM6362 and BCM63268.
In these SoCs it's possible to control LEDs both as GPIOs or by hardware.
However, on some devices there are Serial LEDs (LEDs connected to a 74x164
controller), which can either be controlled by software (exporting the 74x164
as spi-gpio. See Documentation/devicetree/bindings/gpio/gpio-74x164.txt), or
by hardware using this driver.
as spi-gpio. See Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml),
or by hardware using this driver.
Some of these Serial LEDs are hardware controlled (e.g. ethernet LEDs) and
exporting the 74x164 as spi-gpio prevents those LEDs to be hardware
controlled, so the only chance to keep them working is by using this driver.

View File

@ -3,7 +3,7 @@ LEDs connected to Broadcom BCM6358 controller
This controller is present on BCM6358 and BCM6368.
In these SoCs there are Serial LEDs (LEDs connected to a 74x164 controller),
which can either be controlled by software (exporting the 74x164 as spi-gpio.
See Documentation/devicetree/bindings/gpio/gpio-74x164.txt), or
See Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml), or
by hardware using this driver.
Required properties:

View File

@ -99,32 +99,26 @@ properties:
Indicates that the channel acts as primary among the bonded channels.
port:
type: object
$ref: /schemas/graph.yaml#/properties/port
unevaluatedProperties: false
description:
Child port node corresponding to the data input, in accordance with the
video interface bindings defined in
Documentation/devicetree/bindings/media/video-interfaces.txt.
The port node must contain at least one endpoint.
Child port node corresponding to the data input. The port node must
contain at least one endpoint.
properties:
endpoint:
type: object
$ref: /schemas/graph.yaml#/$defs/endpoint-base
unevaluatedProperties: false
properties:
remote-endpoint:
description:
A phandle to the remote tuner endpoint subnode in remote node
port.
sync-active:
$ref: /schemas/types.yaml#/definitions/uint32
enum: [0, 1]
description:
Indicates sync signal polarity, 0/1 for low/high respectively.
This property maps to SYNCAC bit in the hardware manual. The
default is 1 (active high).
additionalProperties: false
required:
- compatible
- reg

View File

@ -105,7 +105,6 @@ properties:
- description: Whether the IPA clock is enabled (if valid)
qcom,smem-state-names:
$ref: /schemas/types.yaml#/definitions/string-array
description: The names of the state bits used for SMP2P output
items:
- const: ipa-clock-enabled-valid

View File

@ -10,7 +10,7 @@ allOf:
- $ref: ethernet-controller.yaml#
maintainers:
- Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
- Sergei Shtylyov <sergei.shtylyov@gmail.com>
properties:
compatible:

View File

@ -9,7 +9,6 @@ Required properties:
"mediatek,mt8173-efuse" or "mediatek,efuse": for MT8173
"mediatek,mt8192-efuse", "mediatek,efuse": for MT8192
"mediatek,mt8516-efuse", "mediatek,efuse": for MT8516
"mediatek,mt8192-efuse", "mediatek,efuse": for MT8192
- reg: Should contain registers location and length
= Data cells =

View File

@ -118,7 +118,7 @@ patternProperties:
description:
Specifies the Spread Spectrum Clocking mode used. It can be NO_SSC,
EXTERNAL_SSC or INTERNAL_SSC.
Refer include/dt-bindings/phy/phy-cadence-torrent.h for the constants to be used.
Refer include/dt-bindings/phy/phy-cadence.h for the constants to be used.
$ref: /schemas/types.yaml#/definitions/uint32
enum: [0, 1, 2]
default: 0

View File

@ -20,7 +20,7 @@ properties:
maxItems: 1
phys:
$ref: /schemas/types.yaml#/definitions/phandle
maxItems: 1
description: phandle to the USB phy
monitored-battery:

View File

@ -49,7 +49,7 @@ properties:
maxItems: 1
memory-region:
$ref: /schemas/types.yaml#/definitions/phandle
maxItems: 1
description:
phandle to a node describing reserved memory (System RAM memory)
The M core can't access all the DDR memory space on some platform,

View File

@ -72,7 +72,7 @@ examples:
mux-controls = <&mux>;
spi-flash@0 {
flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <40000000>;

View File

@ -123,6 +123,8 @@ are in ``drivers/usb/common/common.c``.
In addition, some functions useful for creating debugging output are
defined in ``drivers/usb/common/debug.c``.
.. _usb_header:
Host-Side Data Types and Macros
===============================

View File

@ -109,6 +109,16 @@ auxiliary vector.
scv 0 syscalls will always behave as PPC_FEATURE2_HTM_NOSC.
ptrace
------
When ptracing system calls (PTRACE_SYSCALL), the pt_regs.trap value contains
the system call type that can be used to distinguish between sc and scv 0
system calls, and the different register conventions can be accounted for.
If the value of (pt_regs.trap & 0xfff0) is 0xc00 then the system call was
performed with the sc instruction, if it is 0x3000 then the system call was
performed with the scv 0 instruction.
vsyscall
========

View File

@ -250,14 +250,14 @@ Users can read via ``ioctl(SECCOMP_IOCTL_NOTIF_RECV)`` (or ``poll()``) on a
seccomp notification fd to receive a ``struct seccomp_notif``, which contains
five members: the input length of the structure, a unique-per-filter ``id``,
the ``pid`` of the task which triggered this request (which may be 0 if the
task is in a pid ns not visible from the listener's pid namespace), a ``flags``
member which for now only has ``SECCOMP_NOTIF_FLAG_SIGNALED``, representing
whether or not the notification is a result of a non-fatal signal, and the
``data`` passed to seccomp. Userspace can then make a decision based on this
information about what to do, and ``ioctl(SECCOMP_IOCTL_NOTIF_SEND)`` a
response, indicating what should be returned to userspace. The ``id`` member of
``struct seccomp_notif_resp`` should be the same ``id`` as in ``struct
seccomp_notif``.
task is in a pid ns not visible from the listener's pid namespace). The
notification also contains the ``data`` passed to seccomp, and a filters flag.
The structure should be zeroed out prior to calling the ioctl.
Userspace can then make a decision based on this information about what to do,
and ``ioctl(SECCOMP_IOCTL_NOTIF_SEND)`` a response, indicating what should be
returned to userspace. The ``id`` member of ``struct seccomp_notif_resp`` should
be the same ``id`` as in ``struct seccomp_notif``.
It is worth noting that ``struct seccomp_data`` contains the values of register
arguments to the syscall, but does not contain pointers to memory. The task's

View File

@ -118,10 +118,12 @@ KVM_REQ_MMU_RELOAD
necessary to inform each VCPU to completely refresh the tables. This
request is used for that.
KVM_REQ_PENDING_TIMER
KVM_REQ_UNBLOCK
This request may be made from a timer handler run on the host on behalf
of a VCPU. It informs the VCPU thread to inject a timer interrupt.
This request informs the vCPU to exit kvm_vcpu_block. It is used for
example from timer handlers that run on the host on behalf of a vCPU,
or in order to update the interrupt routing and ensure that assigned
devices will wake up the vCPU.
KVM_REQ_UNHALT

View File

@ -1618,8 +1618,8 @@ F: Documentation/devicetree/bindings/sound/amlogic*
F: sound/soc/meson/
ARM/Amlogic Meson SoC support
M: Neil Armstrong <narmstrong@baylibre.com>
M: Kevin Hilman <khilman@baylibre.com>
R: Neil Armstrong <narmstrong@baylibre.com>
R: Jerome Brunet <jbrunet@baylibre.com>
R: Martin Blumenstingl <martin.blumenstingl@googlemail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@ -4138,6 +4138,14 @@ S: Odd Fixes
F: Documentation/devicetree/bindings/arm/cavium-thunder2.txt
F: arch/arm64/boot/dts/cavium/thunder2-99xx*
CBS/ETF/TAPRIO QDISCS
M: Vinicius Costa Gomes <vinicius.gomes@intel.com>
S: Maintained
L: netdev@vger.kernel.org
F: net/sched/sch_cbs.c
F: net/sched/sch_etf.c
F: net/sched/sch_taprio.c
CC2520 IEEE-802.15.4 RADIO DRIVER
M: Varka Bhadram <varkabhadram@gmail.com>
L: linux-wpan@vger.kernel.org
@ -5569,7 +5577,6 @@ F: drivers/soc/fsl/dpio
DPAA2 ETHERNET DRIVER
M: Ioana Ciornei <ioana.ciornei@nxp.com>
M: Ioana Radulescu <ruxandra.radulescu@nxp.com>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/networking/device_drivers/ethernet/freescale/dpaa2/ethernet-driver.rst
@ -12180,6 +12187,7 @@ F: drivers/platform/surface/surfacepro3_button.c
MICROSOFT SURFACE SYSTEM AGGREGATOR SUBSYSTEM
M: Maximilian Luz <luzmaximilian@gmail.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
W: https://github.com/linux-surface/surface-aggregator-module
C: irc://chat.freenode.net/##linux-surface
@ -12680,9 +12688,9 @@ F: drivers/rtc/rtc-ntxec.c
F: include/linux/mfd/ntxec.h
NETRONOME ETHERNET DRIVERS
M: Simon Horman <simon.horman@netronome.com>
M: Simon Horman <simon.horman@corigine.com>
R: Jakub Kicinski <kuba@kernel.org>
L: oss-drivers@netronome.com
L: oss-drivers@corigine.com
S: Maintained
F: drivers/net/ethernet/netronome/
@ -12709,7 +12717,6 @@ M: "David S. Miller" <davem@davemloft.net>
M: Jakub Kicinski <kuba@kernel.org>
L: netdev@vger.kernel.org
S: Maintained
W: http://www.linuxfoundation.org/en/Net
Q: https://patchwork.kernel.org/project/netdevbpf/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
@ -12754,7 +12761,6 @@ M: "David S. Miller" <davem@davemloft.net>
M: Jakub Kicinski <kuba@kernel.org>
L: netdev@vger.kernel.org
S: Maintained
W: http://www.linuxfoundation.org/en/Net
Q: https://patchwork.kernel.org/project/netdevbpf/list/
B: mailto:netdev@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
@ -12896,8 +12902,10 @@ F: include/uapi/linux/nexthop.h
F: net/ipv4/nexthop.c
NFC SUBSYSTEM
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
L: linux-nfc@lists.01.org (moderated for non-subscribers)
L: netdev@vger.kernel.org
S: Orphan
S: Maintained
F: Documentation/devicetree/bindings/net/nfc/
F: drivers/nfc/
F: include/linux/platform_data/nfcmrvl.h
@ -13205,7 +13213,6 @@ F: Documentation/devicetree/bindings/sound/tfa9879.txt
F: sound/soc/codecs/tfa9879*
NXP-NCI NFC DRIVER
M: Clément Perrochaud <clement.perrochaud@effinnov.com>
R: Charles Gorand <charles.gorand@effinnov.com>
L: linux-nfc@lists.01.org (moderated for non-subscribers)
S: Supported
@ -14317,10 +14324,12 @@ PER-CPU MEMORY ALLOCATOR
M: Dennis Zhou <dennis@kernel.org>
M: Tejun Heo <tj@kernel.org>
M: Christoph Lameter <cl@linux.com>
L: linux-mm@kvack.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu.git
F: arch/*/include/asm/percpu.h
F: include/linux/percpu*.h
F: lib/percpu*.c
F: mm/percpu*.c
PER-TASK DELAY ACCOUNTING
@ -14734,7 +14743,6 @@ W: https://wireless.wiki.kernel.org/en/users/Drivers/p54
F: drivers/net/wireless/intersil/prism54/
PROC FILESYSTEM
R: Alexey Dobriyan <adobriyan@gmail.com>
L: linux-kernel@vger.kernel.org
L: linux-fsdevel@vger.kernel.org
S: Maintained
@ -15945,6 +15953,7 @@ S390 IUCV NETWORK LAYER
M: Julian Wiedmann <jwi@linux.ibm.com>
M: Karsten Graul <kgraul@linux.ibm.com>
L: linux-s390@vger.kernel.org
L: netdev@vger.kernel.org
S: Supported
W: http://www.ibm.com/developerworks/linux/linux390/
F: drivers/s390/net/*iucv*
@ -15955,6 +15964,7 @@ S390 NETWORK DRIVERS
M: Julian Wiedmann <jwi@linux.ibm.com>
M: Karsten Graul <kgraul@linux.ibm.com>
L: linux-s390@vger.kernel.org
L: netdev@vger.kernel.org
S: Supported
W: http://www.ibm.com/developerworks/linux/linux390/
F: drivers/s390/net/
@ -17304,6 +17314,12 @@ L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/i2c/busses/i2c-stm32*
ST STM32 SPI DRIVER
M: Alain Volmat <alain.volmat@foss.st.com>
L: linux-spi@vger.kernel.org
S: Maintained
F: drivers/spi/spi-stm32.c
ST STPDDC60 DRIVER
M: Daniel Nilsson <daniel.nilsson@flex.com>
L: linux-hwmon@vger.kernel.org
@ -17662,7 +17678,6 @@ R: Mika Westerberg <mika.westerberg@linux.intel.com>
L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/i2c/busses/i2c-designware-*
F: include/linux/platform_data/i2c-designware.h
SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
M: Jaehoon Chung <jh80.chung@samsung.com>
@ -19998,6 +20013,7 @@ F: arch/x86/xen/*swiotlb*
F: drivers/xen/*swiotlb*
XFS FILESYSTEM
C: irc://irc.oftc.net/xfs
M: Darrick J. Wong <djwong@kernel.org>
M: linux-xfs@vger.kernel.org
L: linux-xfs@vger.kernel.org

View File

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 13
SUBLEVEL = 0
EXTRAVERSION = -rc2
EXTRAVERSION = -rc4
NAME = Frozen Wasteland
# *DOCUMENTATION*
@ -928,6 +928,11 @@ CC_FLAGS_LTO += -fvisibility=hidden
# Limit inlining across translation units to reduce binary size
KBUILD_LDFLAGS += -mllvm -import-instr-limit=5
# Check for frame size exceeding threshold during prolog/epilog insertion.
ifneq ($(CONFIG_FRAME_WARN),0)
KBUILD_LDFLAGS += -plugin-opt=-warn-stack-size=$(CONFIG_FRAME_WARN)
endif
endif
ifdef CONFIG_LTO

View File

@ -10,6 +10,7 @@ config ARCH_WPCM450
bool "Support for WPCM450 BMC (Hermon)"
depends on ARCH_MULTI_V5
select CPU_ARM926T
select WPCM450_AIC
select NPCM7XX_TIMER
help
General support for WPCM450 BMC (Hermon).

View File

@ -121,8 +121,13 @@ static int cplds_probe(struct platform_device *pdev)
return fpga->irq;
base_irq = platform_get_irq(pdev, 1);
if (base_irq < 0)
if (base_irq < 0) {
base_irq = 0;
} else {
ret = devm_irq_alloc_descs(&pdev->dev, base_irq, base_irq, CPLDS_NB_IRQ, 0);
if (ret < 0)
return ret;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
fpga->base = devm_ioremap_resource(&pdev->dev, res);

View File

@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y += kernel/ mm/
obj-$(CONFIG_NET) += net/
obj-y += kernel/ mm/ net/
obj-$(CONFIG_KVM) += kvm/
obj-$(CONFIG_XEN) += xen/
obj-$(CONFIG_CRYPTO) += crypto/

View File

@ -14,7 +14,6 @@
ports {
port@0 {
reg = <0>;
csi20_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2>;
@ -29,7 +28,6 @@
ports {
port@0 {
reg = <0>;
csi40_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2>;

View File

@ -2573,6 +2573,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@ -2628,6 +2632,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -2419,6 +2419,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@ -2474,6 +2478,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -33,7 +33,7 @@
status = "okay";
ports {
port {
port@0 {
csi40_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2>;

View File

@ -1823,6 +1823,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -2709,6 +2709,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@ -2764,6 +2768,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -192,6 +192,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -3097,6 +3097,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@ -3152,6 +3156,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@ -3191,6 +3199,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -2761,6 +2761,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@ -2816,6 +2820,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -2499,6 +2499,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@ -2554,6 +2558,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -2575,6 +2575,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@ -2630,6 +2634,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -1106,6 +1106,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -1439,6 +1439,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@ -1478,6 +1482,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -298,8 +298,6 @@
ports {
port@0 {
reg = <0>;
csi40_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2>;

View File

@ -1970,6 +1970,10 @@
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -349,7 +349,6 @@
ports {
port@0 {
reg = <0>;
csi20_in: endpoint {
clock-lanes = <0>;
data-lanes = <1>;
@ -364,8 +363,6 @@
ports {
port@0 {
reg = <0>;
csi40_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2 3 4>;

View File

@ -74,7 +74,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long idx,
* This insanity brought to you by speculative system register reads,
* out-of-order memory accesses, sequence locks and Thomas Gleixner.
*
* http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
* https://lore.kernel.org/r/alpine.DEB.2.21.1902081950260.1662@nanos.tec.linutronix.de/
*/
#define arch_counter_enforce_ordering(val) do { \
u64 tmp, _val = (val); \

View File

@ -63,6 +63,7 @@
#define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector 18
#define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize 19
#define __KVM_HOST_SMCCC_FUNC___pkvm_mark_hyp 20
#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 21
#ifndef __ASSEMBLY__
@ -201,6 +202,8 @@ extern void __kvm_timer_set_cntvoff(u64 cntvoff);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu);
extern u64 __vgic_v3_get_gic_config(void);
extern u64 __vgic_v3_read_vmcr(void);
extern void __vgic_v3_write_vmcr(u32 vmcr);

View File

@ -463,4 +463,9 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
}
static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
{
return test_bit(feature, vcpu->arch.features);
}
#endif /* __ARM64_KVM_EMULATE_H__ */

View File

@ -720,11 +720,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
return ret;
}
if (run->immediate_exit)
return -EINTR;
vcpu_load(vcpu);
if (run->immediate_exit) {
ret = -EINTR;
goto out;
}
kvm_sigset_activate(vcpu);
ret = 1;
@ -897,6 +899,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_sigset_deactivate(vcpu);
out:
/*
* In the unlikely event that we are returning to userspace
* with pending exceptions or PC adjustment, commit these
* adjustments in order to give userspace a consistent view of
* the vcpu state. Note that this relies on __kvm_adjust_pc()
* being preempt-safe on VHE.
*/
if (unlikely(vcpu->arch.flags & (KVM_ARM64_PENDING_EXCEPTION |
KVM_ARM64_INCREMENT_PC)))
kvm_call_hyp(__kvm_adjust_pc, vcpu);
vcpu_put(vcpu);
return ret;
}

View File

@ -296,7 +296,7 @@ static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
*vcpu_pc(vcpu) = vect_offset;
}
void kvm_inject_exception(struct kvm_vcpu *vcpu)
static void kvm_inject_exception(struct kvm_vcpu *vcpu)
{
if (vcpu_el1_is_32bit(vcpu)) {
switch (vcpu->arch.flags & KVM_ARM64_EXCEPT_MASK) {
@ -329,3 +329,19 @@ void kvm_inject_exception(struct kvm_vcpu *vcpu)
}
}
}
/*
* Adjust the guest PC (and potentially exception state) depending on
* flags provided by the emulation code.
*/
void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
kvm_inject_exception(vcpu);
vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
KVM_ARM64_EXCEPT_MASK);
} else if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
kvm_skip_instr(vcpu);
vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;
}
}

View File

@ -13,8 +13,6 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_host.h>
void kvm_inject_exception(struct kvm_vcpu *vcpu);
static inline void kvm_skip_instr(struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu)) {
@ -43,22 +41,6 @@ static inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
}
/*
* Adjust the guest PC on entry, depending on flags provided by EL1
* for the purpose of emulation (MMIO, sysreg) or exception injection.
*/
static inline void __adjust_pc(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
kvm_inject_exception(vcpu);
vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
KVM_ARM64_EXCEPT_MASK);
} else if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
kvm_skip_instr(vcpu);
vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;
}
}
/*
* Skip an instruction while host sysregs are live.
* Assumes host is always 64-bit.

View File

@ -28,6 +28,13 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu));
}
static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
__kvm_adjust_pc(kern_hyp_va(vcpu));
}
static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt)
{
__kvm_flush_vm_context();
@ -170,6 +177,7 @@ typedef void (*hcall_t)(struct kvm_cpu_context *);
static const hcall_t host_hcall[] = {
HANDLE_FUNC(__kvm_vcpu_run),
HANDLE_FUNC(__kvm_adjust_pc),
HANDLE_FUNC(__kvm_flush_vm_context),
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
HANDLE_FUNC(__kvm_tlb_flush_vmid),

View File

@ -23,8 +23,8 @@
extern unsigned long hyp_nr_cpus;
struct host_kvm host_kvm;
struct hyp_pool host_s2_mem;
struct hyp_pool host_s2_dev;
static struct hyp_pool host_s2_mem;
static struct hyp_pool host_s2_dev;
/*
* Copies of the host's CPU features registers holding sanitized values.

View File

@ -17,7 +17,6 @@
#include <nvhe/trap_handler.h>
struct hyp_pool hpool;
struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
unsigned long hyp_nr_cpus;
#define hyp_percpu_size ((unsigned long)__per_cpu_end - \
@ -27,6 +26,7 @@ static void *vmemmap_base;
static void *hyp_pgt_base;
static void *host_s2_mem_pgt_base;
static void *host_s2_dev_pgt_base;
static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
static int divide_memory_pool(void *virt, unsigned long size)
{

View File

@ -4,7 +4,6 @@
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
#include <hyp/adjust_pc.h>
#include <hyp/switch.h>
#include <hyp/sysreg-sr.h>
@ -201,7 +200,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
*/
__debug_save_host_buffers_nvhe(vcpu);
__adjust_pc(vcpu);
__kvm_adjust_pc(vcpu);
/*
* We must restore the 32-bit state before the sysregs, thanks

View File

@ -4,7 +4,6 @@
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
#include <hyp/adjust_pc.h>
#include <hyp/switch.h>
#include <linux/arm-smccc.h>
@ -132,7 +131,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
__load_guest_stage2(vcpu->arch.hw_mmu);
__activate_traps(vcpu);
__adjust_pc(vcpu);
__kvm_adjust_pc(vcpu);
sysreg_restore_guest_state_vhe(guest_ctxt);
__debug_switch_to_guest(vcpu);

View File

@ -1156,13 +1156,13 @@ out_unlock:
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
if (!kvm->arch.mmu.pgt)
return 0;
return false;
__unmap_stage2_range(&kvm->arch.mmu, range->start << PAGE_SHIFT,
(range->end - range->start) << PAGE_SHIFT,
range->may_block);
return 0;
return false;
}
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
@ -1170,7 +1170,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
kvm_pfn_t pfn = pte_pfn(range->pte);
if (!kvm->arch.mmu.pgt)
return 0;
return false;
WARN_ON(range->end - range->start != 1);
@ -1190,7 +1190,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
PAGE_SIZE, __pfn_to_phys(pfn),
KVM_PGTABLE_PROT_R, NULL);
return 0;
return false;
}
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
@ -1200,7 +1200,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
pte_t pte;
if (!kvm->arch.mmu.pgt)
return 0;
return false;
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
@ -1213,7 +1213,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
if (!kvm->arch.mmu.pgt)
return 0;
return false;
return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt,
range->start << PAGE_SHIFT);

View File

@ -166,6 +166,25 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
return 0;
}
static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu *tmp;
bool is32bit;
int i;
is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
return false;
/* Check that the vcpus are either all 32bit or all 64bit */
kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit)
return false;
}
return true;
}
/**
* kvm_reset_vcpu - sets core registers and sys_regs to reset value
* @vcpu: The VCPU pointer
@ -217,13 +236,14 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
}
}
if (!vcpu_allowed_register_width(vcpu)) {
ret = -EINVAL;
goto out;
}
switch (vcpu->arch.target) {
default:
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) {
ret = -EINVAL;
goto out;
}
pstate = VCPU_RESET_PSTATE_SVC;
} else {
pstate = VCPU_RESET_PSTATE_EL1;

View File

@ -399,14 +399,14 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
if (p->is_write)
reg_to_dbg(vcpu, p, rd, dbg_reg);
else
dbg_to_reg(vcpu, p, rd, dbg_reg);
trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
return true;
}
@ -414,7 +414,7 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@ -424,7 +424,7 @@ static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@ -434,21 +434,21 @@ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_bvr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
}
static bool trap_bcr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
if (p->is_write)
reg_to_dbg(vcpu, p, rd, dbg_reg);
else
dbg_to_reg(vcpu, p, rd, dbg_reg);
trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
return true;
}
@ -456,7 +456,7 @@ static bool trap_bcr(struct kvm_vcpu *vcpu,
static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@ -467,7 +467,7 @@ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@ -477,22 +477,22 @@ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_bcr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
}
static bool trap_wvr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
if (p->is_write)
reg_to_dbg(vcpu, p, rd, dbg_reg);
else
dbg_to_reg(vcpu, p, rd, dbg_reg);
trace_trap_reg(__func__, rd->reg, p->is_write,
vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
trace_trap_reg(__func__, rd->CRm, p->is_write,
vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
return true;
}
@ -500,7 +500,7 @@ static bool trap_wvr(struct kvm_vcpu *vcpu,
static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@ -510,7 +510,7 @@ static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@ -520,21 +520,21 @@ static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_wvr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
}
static bool trap_wcr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
if (p->is_write)
reg_to_dbg(vcpu, p, rd, dbg_reg);
else
dbg_to_reg(vcpu, p, rd, dbg_reg);
trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
return true;
}
@ -542,7 +542,7 @@ static bool trap_wcr(struct kvm_vcpu *vcpu,
static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@ -552,7 +552,7 @@ static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@ -562,7 +562,7 @@ static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_wcr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
}
static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)

View File

@ -515,7 +515,8 @@ static void __init map_mem(pgd_t *pgdp)
*/
BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
if (rodata_full || crash_mem_map || debug_pagealloc_enabled())
if (rodata_full || crash_mem_map || debug_pagealloc_enabled() ||
IS_ENABLED(CONFIG_KFENCE))
flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
/*

View File

@ -623,7 +623,8 @@ static inline void siginfo_build_tests(void)
BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x12);
/* _sigfault._perf */
BUILD_BUG_ON(offsetof(siginfo_t, si_perf) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_perf_data) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_perf_type) != 0x14);
/* _sigpoll */
BUILD_BUG_ON(offsetof(siginfo_t, si_band) != 0x0c);

View File

@ -18,6 +18,7 @@
#include <asm/reboot.h>
#include <asm/setup.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/gpio-au1000.h>
#include <prom.h>
const char *get_system_type(void)

View File

@ -3,6 +3,9 @@
*
*/
#ifndef _ASM_MIPS_BOARDS_LAUNCH_H
#define _ASM_MIPS_BOARDS_LAUNCH_H
#ifndef _ASSEMBLER_
struct cpulaunch {
@ -34,3 +37,5 @@ struct cpulaunch {
/* Polling period in count cycles for secondary CPU's */
#define LAUNCHPERIOD 10000
#endif /* _ASM_MIPS_BOARDS_LAUNCH_H */

View File

@ -37,7 +37,7 @@
*/
notrace void arch_local_irq_disable(void)
{
preempt_disable();
preempt_disable_notrace();
__asm__ __volatile__(
" .set push \n"
@ -53,7 +53,7 @@ notrace void arch_local_irq_disable(void)
: /* no inputs */
: "memory");
preempt_enable();
preempt_enable_notrace();
}
EXPORT_SYMBOL(arch_local_irq_disable);
@ -61,7 +61,7 @@ notrace unsigned long arch_local_irq_save(void)
{
unsigned long flags;
preempt_disable();
preempt_disable_notrace();
__asm__ __volatile__(
" .set push \n"
@ -78,7 +78,7 @@ notrace unsigned long arch_local_irq_save(void)
: /* no inputs */
: "memory");
preempt_enable();
preempt_enable_notrace();
return flags;
}
@ -88,7 +88,7 @@ notrace void arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
preempt_disable();
preempt_disable_notrace();
__asm__ __volatile__(
" .set push \n"
@ -106,7 +106,7 @@ notrace void arch_local_irq_restore(unsigned long flags)
: "0" (flags)
: "memory");
preempt_enable();
preempt_enable_notrace();
}
EXPORT_SYMBOL(arch_local_irq_restore);

View File

@ -8,6 +8,7 @@
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/sizes.h>
#include <linux/of_fdt.h>
@ -25,6 +26,7 @@
__iomem void *rt_sysc_membase;
__iomem void *rt_memc_membase;
EXPORT_SYMBOL_GPL(rt_sysc_membase);
__iomem void *plat_of_remap_node(const char *node)
{

View File

@ -0,0 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_BARRIER_H
#define __ASM_BARRIER_H
#define mb() asm volatile ("l.msync" ::: "memory")
#include <asm-generic/barrier.h>
#endif /* __ASM_BARRIER_H */

View File

@ -278,6 +278,8 @@ void calibrate_delay(void)
pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
loops_per_jiffy / (500000 / HZ),
(loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy);
of_node_put(cpu);
}
void __init setup_arch(char **cmdline_p)

View File

@ -75,7 +75,6 @@ static void __init map_ram(void)
/* These mark extents of read-only kernel pages...
* ...from vmlinux.lds.S
*/
struct memblock_region *region;
v = PAGE_OFFSET;
@ -121,7 +120,7 @@ static void __init map_ram(void)
}
printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
region->base, region->base + region->size);
start, end);
}
}
@ -129,7 +128,6 @@ void __init paging_init(void)
{
extern void tlb_init(void);
unsigned long end;
int i;
printk(KERN_INFO "Setting up paging and PTEs.\n");
@ -145,8 +143,6 @@ void __init paging_init(void)
*/
current_pgd[smp_processor_id()] = init_mm.pgd;
end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
map_ram();
zone_sizes_init();

View File

@ -122,7 +122,15 @@
};
/include/ "pq3-i2c-0.dtsi"
i2c@3000 {
fsl,i2c-erratum-a004447;
};
/include/ "pq3-i2c-1.dtsi"
i2c@3100 {
fsl,i2c-erratum-a004447;
};
/include/ "pq3-duart-0.dtsi"
/include/ "pq3-espi-0.dtsi"
spi0: spi@7000 {

View File

@ -371,7 +371,23 @@
};
/include/ "qoriq-i2c-0.dtsi"
i2c@118000 {
fsl,i2c-erratum-a004447;
};
i2c@118100 {
fsl,i2c-erratum-a004447;
};
/include/ "qoriq-i2c-1.dtsi"
i2c@119000 {
fsl,i2c-erratum-a004447;
};
i2c@119100 {
fsl,i2c-erratum-a004447;
};
/include/ "qoriq-duart-0.dtsi"
/include/ "qoriq-duart-1.dtsi"
/include/ "qoriq-gpio-0.dtsi"

View File

@ -51,6 +51,7 @@
/* PPC-specific vcpu->requests bit members */
#define KVM_REQ_WATCHDOG KVM_ARCH_REQ(0)
#define KVM_REQ_EPR_EXIT KVM_ARCH_REQ(1)
#define KVM_REQ_PENDING_TIMER KVM_ARCH_REQ(2)
#include <linux/mmu_notifier.h>

View File

@ -19,6 +19,7 @@
#ifndef _ASM_POWERPC_PTRACE_H
#define _ASM_POWERPC_PTRACE_H
#include <linux/err.h>
#include <uapi/asm/ptrace.h>
#include <asm/asm-const.h>
@ -152,25 +153,6 @@ extern unsigned long profile_pc(struct pt_regs *regs);
long do_syscall_trace_enter(struct pt_regs *regs);
void do_syscall_trace_leave(struct pt_regs *regs);
#define kernel_stack_pointer(regs) ((regs)->gpr[1])
static inline int is_syscall_success(struct pt_regs *regs)
{
return !(regs->ccr & 0x10000000);
}
static inline long regs_return_value(struct pt_regs *regs)
{
if (is_syscall_success(regs))
return regs->gpr[3];
else
return -regs->gpr[3];
}
static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
{
regs->gpr[3] = rc;
}
#ifdef __powerpc64__
#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
#else
@ -235,6 +217,31 @@ static __always_inline void set_trap_norestart(struct pt_regs *regs)
regs->trap |= 0x1;
}
#define kernel_stack_pointer(regs) ((regs)->gpr[1])
static inline int is_syscall_success(struct pt_regs *regs)
{
if (trap_is_scv(regs))
return !IS_ERR_VALUE((unsigned long)regs->gpr[3]);
else
return !(regs->ccr & 0x10000000);
}
static inline long regs_return_value(struct pt_regs *regs)
{
if (trap_is_scv(regs))
return regs->gpr[3];
if (is_syscall_success(regs))
return regs->gpr[3];
else
return -regs->gpr[3];
}
static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
{
regs->gpr[3] = rc;
}
#define arch_has_single_step() (1)
#define arch_has_block_step() (true)
#define ARCH_HAS_USER_SINGLE_STEP_REPORT

View File

@ -41,11 +41,17 @@ static inline void syscall_rollback(struct task_struct *task,
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
/*
* If the system call failed,
* regs->gpr[3] contains a positive ERRORCODE.
*/
return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
if (trap_is_scv(regs)) {
unsigned long error = regs->gpr[3];
return IS_ERR_VALUE(error) ? error : 0;
} else {
/*
* If the system call failed,
* regs->gpr[3] contains a positive ERRORCODE.
*/
return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
}
}
static inline long syscall_get_return_value(struct task_struct *task,
@ -58,18 +64,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
{
/*
* In the general case it's not obvious that we must deal with CCR
* here, as the syscall exit path will also do that for us. However
* there are some places, eg. the signal code, which check ccr to
* decide if the value in r3 is actually an error.
*/
if (error) {
regs->ccr |= 0x10000000L;
regs->gpr[3] = error;
if (trap_is_scv(regs)) {
regs->gpr[3] = (long) error ?: val;
} else {
regs->ccr &= ~0x10000000L;
regs->gpr[3] = val;
/*
* In the general case it's not obvious that we must deal with
* CCR here, as the syscall exit path will also do that for us.
* However there are some places, eg. the signal code, which
* check ccr to decide if the value in r3 is actually an error.
*/
if (error) {
regs->ccr |= 0x10000000L;
regs->gpr[3] = error;
} else {
regs->ccr &= ~0x10000000L;
regs->gpr[3] = val;
}
}
}

View File

@ -369,11 +369,11 @@ void __init early_setup(unsigned long dt_ptr)
apply_feature_fixups();
setup_feature_keys();
early_ioremap_setup();
/* Initialize the hash table or TLB handling */
early_init_mmu();
early_ioremap_setup();
/*
* After firmware and early platform setup code has set things up,
* we note the SPR values for configurable control/performance

View File

@ -3936,7 +3936,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
break;
}
cur = ktime_get();
} while (single_task_running() && ktime_before(cur, stop));
} while (kvm_vcpu_can_poll(cur, stop));
spin_lock(&vc->lock);
vc->vcore_state = VCORE_INACTIVE;

View File

@ -34,6 +34,7 @@ config RISCV
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
select ARCH_SUPPORTS_HUGETLBFS if MMU
select ARCH_USE_MEMTEST
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT

View File

@ -42,8 +42,8 @@ struct kimage_arch {
unsigned long fdt_addr;
};
const extern unsigned char riscv_kexec_relocate[];
const extern unsigned int riscv_kexec_relocate_size;
extern const unsigned char riscv_kexec_relocate[];
extern const unsigned int riscv_kexec_relocate_size;
typedef void (*riscv_kexec_method)(unsigned long first_ind_entry,
unsigned long jump_addr,

View File

@ -14,8 +14,9 @@
#include <asm/set_memory.h> /* For set_memory_x() */
#include <linux/compiler.h> /* For unreachable() */
#include <linux/cpu.h> /* For cpu_down() */
#include <linux/reboot.h>
/**
/*
* kexec_image_info - Print received image details
*/
static void
@ -39,7 +40,7 @@ kexec_image_info(const struct kimage *image)
}
}
/**
/*
* machine_kexec_prepare - Initialize kexec
*
* This function is called from do_kexec_load, when the user has
@ -100,7 +101,7 @@ machine_kexec_prepare(struct kimage *image)
}
/**
/*
* machine_kexec_cleanup - Cleanup any leftovers from
* machine_kexec_prepare
*
@ -135,7 +136,7 @@ void machine_shutdown(void)
#endif
}
/**
/*
* machine_crash_shutdown - Prepare to kexec after a kernel crash
*
* This function is called by crash_kexec just before machine_kexec
@ -151,7 +152,7 @@ machine_crash_shutdown(struct pt_regs *regs)
pr_info("Starting crashdump kernel...\n");
}
/**
/*
* machine_kexec - Jump to the loaded kimage
*
* This function is called by kernel_kexec which is called by the

View File

@ -84,6 +84,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
return 0;
}
#ifdef CONFIG_MMU
void *alloc_insn_page(void)
{
return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
@ -91,6 +92,7 @@ void *alloc_insn_page(void)
VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
__builtin_return_address(0));
}
#endif
/* install breakpoint in text */
void __kprobes arch_arm_kprobe(struct kprobe *p)

View File

@ -27,10 +27,10 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
fp = frame_pointer(regs);
sp = user_stack_pointer(regs);
pc = instruction_pointer(regs);
} else if (task == NULL || task == current) {
fp = (unsigned long)__builtin_frame_address(0);
sp = sp_in_global;
pc = (unsigned long)walk_stackframe;
} else if (task == current) {
fp = (unsigned long)__builtin_frame_address(1);
sp = (unsigned long)__builtin_frame_address(0);
pc = (unsigned long)__builtin_return_address(0);
} else {
/* task blocked in __switch_to */
fp = task->thread.s[0];
@ -106,15 +106,15 @@ static bool print_trace_address(void *arg, unsigned long pc)
return true;
}
void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
noinline void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
const char *loglvl)
{
pr_cont("%sCall Trace:\n", loglvl);
walk_stackframe(task, regs, print_trace_address, (void *)loglvl);
}
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
pr_cont("%sCall Trace:\n", loglvl);
dump_backtrace(NULL, task, loglvl);
}
@ -139,7 +139,7 @@ unsigned long get_wchan(struct task_struct *task)
#ifdef CONFIG_STACKTRACE
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
{
walk_stackframe(task, regs, consume_entry, cookie);

View File

@ -178,11 +178,6 @@ ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,)
endif
ifdef CONFIG_LTO_CLANG
KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
-plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
endif
# Workaround for a gcc prelease that unfortunately was shipped in a suse release
KBUILD_CFLAGS += -Wno-sign-compare
#
@ -202,7 +197,12 @@ ifdef CONFIG_RETPOLINE
endif
endif
KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE)
KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
ifdef CONFIG_LTO_CLANG
KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
-plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
endif
ifdef CONFIG_X86_NEED_RELOCS
LDFLAGS_vmlinux := --emit-relocs --discard-none

View File

@ -396,10 +396,12 @@ int x86_reserve_hardware(void)
if (!atomic_inc_not_zero(&pmc_refcount)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&pmc_refcount) == 0) {
if (!reserve_pmc_hardware())
if (!reserve_pmc_hardware()) {
err = -EBUSY;
else
} else {
reserve_ds_buffers();
reserve_lbr_buffers();
}
}
if (!err)
atomic_inc(&pmc_refcount);

View File

@ -6253,7 +6253,7 @@ __init int intel_pmu_init(void)
* Check all LBT MSR here.
* Disable LBR access if any LBR MSRs can not be accessed.
*/
if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL))
x86_pmu.lbr_nr = 0;
for (i = 0; i < x86_pmu.lbr_nr; i++) {
if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&

View File

@ -658,7 +658,6 @@ static inline bool branch_user_callstack(unsigned br_sel)
void intel_pmu_lbr_add(struct perf_event *event)
{
struct kmem_cache *kmem_cache = event->pmu->task_ctx_cache;
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (!x86_pmu.lbr_nr)
@ -696,11 +695,6 @@ void intel_pmu_lbr_add(struct perf_event *event)
perf_sched_cb_inc(event->ctx->pmu);
if (!cpuc->lbr_users++ && !event->total_time_running)
intel_pmu_lbr_reset();
if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
kmem_cache && !cpuc->lbr_xsave &&
(cpuc->lbr_users != cpuc->lbr_pebs_users))
cpuc->lbr_xsave = kmem_cache_alloc(kmem_cache, GFP_KERNEL);
}
void release_lbr_buffers(void)
@ -722,6 +716,26 @@ void release_lbr_buffers(void)
}
}
void reserve_lbr_buffers(void)
{
struct kmem_cache *kmem_cache;
struct cpu_hw_events *cpuc;
int cpu;
if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
return;
for_each_possible_cpu(cpu) {
cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
if (!kmem_cache || cpuc->lbr_xsave)
continue;
cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache, GFP_KERNEL,
cpu_to_node(cpu));
}
}
void intel_pmu_lbr_del(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);

View File

@ -1244,6 +1244,8 @@ void reserve_ds_buffers(void);
void release_lbr_buffers(void);
void reserve_lbr_buffers(void);
extern struct event_constraint bts_constraint;
extern struct event_constraint vlbr_constraint;
@ -1393,6 +1395,10 @@ static inline void release_lbr_buffers(void)
{
}
static inline void reserve_lbr_buffers(void)
{
}
static inline int intel_pmu_init(void)
{
return 0;

View File

@ -99,6 +99,7 @@ KVM_X86_OP_NULL(post_block)
KVM_X86_OP_NULL(vcpu_blocking)
KVM_X86_OP_NULL(vcpu_unblocking)
KVM_X86_OP_NULL(update_pi_irte)
KVM_X86_OP_NULL(start_assignment)
KVM_X86_OP_NULL(apicv_post_state_restore)
KVM_X86_OP_NULL(dy_apicv_has_pending_interrupt)
KVM_X86_OP_NULL(set_hv_timer)

View File

@ -1352,6 +1352,7 @@ struct kvm_x86_ops {
int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
void (*start_assignment)(struct kvm *kvm);
void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);

View File

@ -63,6 +63,7 @@ static bool sev_es_negotiate_protocol(void)
static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb)
{
ghcb->save.sw_exit_code = 0;
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
}

View File

@ -203,8 +203,18 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
if (unlikely(data->ghcb_active)) {
/* GHCB is already in use - save its contents */
if (unlikely(data->backup_ghcb_active))
return NULL;
if (unlikely(data->backup_ghcb_active)) {
/*
* Backup-GHCB is also already in use. There is no way
* to continue here so just kill the machine. To make
* panic() work, mark GHCBs inactive so that messages
* can be printed out.
*/
data->ghcb_active = false;
data->backup_ghcb_active = false;
panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
}
/* Mark backup_ghcb active before writing to it */
data->backup_ghcb_active = true;
@ -221,24 +231,6 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
return ghcb;
}
static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
{
struct sev_es_runtime_data *data;
struct ghcb *ghcb;
data = this_cpu_read(runtime_data);
ghcb = &data->ghcb_page;
if (state->ghcb) {
/* Restore GHCB from Backup */
*ghcb = *state->ghcb;
data->backup_ghcb_active = false;
state->ghcb = NULL;
} else {
data->ghcb_active = false;
}
}
/* Needed in vc_early_forward_exception */
void do_early_exception(struct pt_regs *regs, int trapnr);
@ -323,31 +315,44 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
u16 d2;
u8 d1;
/* If instruction ran in kernel mode and the I/O buffer is in kernel space */
if (!user_mode(ctxt->regs) && !access_ok(target, size)) {
memcpy(dst, buf, size);
return ES_OK;
}
/*
* This function uses __put_user() independent of whether kernel or user
* memory is accessed. This works fine because __put_user() does no
* sanity checks of the pointer being accessed. All that it does is
* to report when the access failed.
*
* Also, this function runs in atomic context, so __put_user() is not
* allowed to sleep. The page-fault handler detects that it is running
* in atomic context and will not try to take mmap_sem and handle the
* fault, so additional pagefault_enable()/disable() calls are not
* needed.
*
* The access can't be done via copy_to_user() here because
* vc_write_mem() must not use string instructions to access unsafe
* memory. The reason is that MOVS is emulated by the #VC handler by
* splitting the move up into a read and a write and taking a nested #VC
* exception on whatever of them is the MMIO access. Using string
* instructions here would cause infinite nesting.
*/
switch (size) {
case 1:
memcpy(&d1, buf, 1);
if (put_user(d1, target))
if (__put_user(d1, target))
goto fault;
break;
case 2:
memcpy(&d2, buf, 2);
if (put_user(d2, target))
if (__put_user(d2, target))
goto fault;
break;
case 4:
memcpy(&d4, buf, 4);
if (put_user(d4, target))
if (__put_user(d4, target))
goto fault;
break;
case 8:
memcpy(&d8, buf, 8);
if (put_user(d8, target))
if (__put_user(d8, target))
goto fault;
break;
default:
@ -378,30 +383,43 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
u16 d2;
u8 d1;
/* If instruction ran in kernel mode and the I/O buffer is in kernel space */
if (!user_mode(ctxt->regs) && !access_ok(s, size)) {
memcpy(buf, src, size);
return ES_OK;
}
/*
* This function uses __get_user() independent of whether kernel or user
* memory is accessed. This works fine because __get_user() does no
* sanity checks of the pointer being accessed. All that it does is
* to report when the access failed.
*
* Also, this function runs in atomic context, so __get_user() is not
* allowed to sleep. The page-fault handler detects that it is running
* in atomic context and will not try to take mmap_sem and handle the
* fault, so additional pagefault_enable()/disable() calls are not
* needed.
*
* The access can't be done via copy_from_user() here because
* vc_read_mem() must not use string instructions to access unsafe
* memory. The reason is that MOVS is emulated by the #VC handler by
* splitting the move up into a read and a write and taking a nested #VC
* exception on whatever of them is the MMIO access. Using string
* instructions here would cause infinite nesting.
*/
switch (size) {
case 1:
if (get_user(d1, s))
if (__get_user(d1, s))
goto fault;
memcpy(buf, &d1, 1);
break;
case 2:
if (get_user(d2, s))
if (__get_user(d2, s))
goto fault;
memcpy(buf, &d2, 2);
break;
case 4:
if (get_user(d4, s))
if (__get_user(d4, s))
goto fault;
memcpy(buf, &d4, 4);
break;
case 8:
if (get_user(d8, s))
if (__get_user(d8, s))
goto fault;
memcpy(buf, &d8, 8);
break;
@ -461,6 +479,29 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
/* Include code shared with pre-decompression boot stage */
#include "sev-shared.c"
static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
{
struct sev_es_runtime_data *data;
struct ghcb *ghcb;
data = this_cpu_read(runtime_data);
ghcb = &data->ghcb_page;
if (state->ghcb) {
/* Restore GHCB from Backup */
*ghcb = *state->ghcb;
data->backup_ghcb_active = false;
state->ghcb = NULL;
} else {
/*
* Invalidate the GHCB so a VMGEXIT instruction issued
* from userspace won't appear to be valid.
*/
vc_ghcb_invalidate(ghcb);
data->ghcb_active = false;
}
}
void noinstr __sev_es_nmi_complete(void)
{
struct ghcb_state state;
@ -1255,6 +1296,10 @@ static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
case X86_TRAP_UD:
exc_invalid_op(ctxt->regs);
break;
case X86_TRAP_PF:
write_cr2(ctxt->fi.cr2);
exc_page_fault(ctxt->regs, error_code);
break;
case X86_TRAP_AC:
exc_alignment_check(ctxt->regs, error_code);
break;
@ -1284,7 +1329,6 @@ static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
*/
DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
{
struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
irqentry_state_t irq_state;
struct ghcb_state state;
struct es_em_ctxt ctxt;
@ -1310,16 +1354,6 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
*/
ghcb = sev_es_get_ghcb(&state);
if (!ghcb) {
/*
* Mark GHCBs inactive so that panic() is able to print the
* message.
*/
data->ghcb_active = false;
data->backup_ghcb_active = false;
panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
}
vc_ghcb_invalidate(ghcb);
result = vc_init_em_ctxt(&ctxt, regs, error_code);

View File

@ -127,6 +127,9 @@ static inline void signal_compat_build_tests(void)
BUILD_BUG_ON(offsetof(siginfo_t, si_addr) != 0x10);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr) != 0x0C);
BUILD_BUG_ON(offsetof(siginfo_t, si_trapno) != 0x18);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_trapno) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_addr_lsb) != 0x18);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr_lsb) != 0x10);
@ -138,8 +141,10 @@ static inline void signal_compat_build_tests(void)
BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x20);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pkey) != 0x14);
BUILD_BUG_ON(offsetof(siginfo_t, si_perf) != 0x18);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_perf_data) != 0x18);
BUILD_BUG_ON(offsetof(siginfo_t, si_perf_type) != 0x20);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf_data) != 0x10);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf_type) != 0x14);
CHECK_CSI_OFFSET(_sigpoll);
CHECK_CSI_SIZE (_sigpoll, 2*sizeof(int));

View File

@ -5111,7 +5111,7 @@ done:
return rc;
}
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
{
int rc = X86EMUL_CONTINUE;
int mode = ctxt->mode;
@ -5322,7 +5322,8 @@ done_prefixes:
ctxt->execute = opcode.u.execute;
if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
if (unlikely(emulation_type & EMULTYPE_TRAP_UD) &&
likely(!(ctxt->d & EmulateOnUD)))
return EMULATION_FAILED;
if (unlikely(ctxt->d &

View File

@ -1172,6 +1172,7 @@ void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
{
struct kvm_hv *hv = to_kvm_hv(kvm);
u64 gfn;
int idx;
if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET ||
@ -1190,9 +1191,16 @@ void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
hv->tsc_ref.tsc_sequence = 0;
/*
* Take the srcu lock as memslots will be accessed to check the gfn
* cache generation against the memslots generation.
*/
idx = srcu_read_lock(&kvm->srcu);
if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
srcu_read_unlock(&kvm->srcu, idx);
out_unlock:
mutex_unlock(&hv->hv_lock);

View File

@ -314,7 +314,6 @@ struct x86_emulate_ctxt {
int interruptibility;
bool perm_ok; /* do not check permissions if true */
bool ud; /* inject an #UD if host doesn't support insn */
bool tf; /* TF value before instruction (after for syscall/sysret) */
bool have_exception;
@ -491,7 +490,7 @@ enum x86_intercept {
#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
#endif
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type);
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
#define EMULATION_FAILED -1
#define EMULATION_OK 0

View File

@ -1598,11 +1598,19 @@ static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
apic->lapic_timer.advance_expire_delta = guest_tsc - tsc_deadline;
if (lapic_timer_advance_dynamic) {
adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta);
/*
* If the timer fired early, reread the TSC to account for the
* overhead of the above adjustment to avoid waiting longer
* than is necessary.
*/
if (guest_tsc < tsc_deadline)
guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
}
if (guest_tsc < tsc_deadline)
__wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
if (lapic_timer_advance_dynamic)
adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta);
}
void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
@ -1661,7 +1669,7 @@ static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
}
atomic_inc(&apic->lapic_timer.pending);
kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
if (from_timer_fn)
kvm_vcpu_kick(vcpu);
}

View File

@ -1192,9 +1192,9 @@ bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
}
/*
* Remove write access from all the SPTEs mapping GFNs [start, end). If
* skip_4k is set, SPTEs that map 4k pages, will not be write-protected.
* Returns true if an SPTE has been changed and the TLBs need to be flushed.
* Remove write access from all SPTEs at or above min_level that map GFNs
* [start, end). Returns true if an SPTE has been changed and the TLBs need to
* be flushed.
*/
static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end, int min_level)

View File

@ -28,10 +28,8 @@
#include "svm.h"
/* enable / disable AVIC */
int avic;
#ifdef CONFIG_X86_LOCAL_APIC
module_param(avic, int, S_IRUGO);
#endif
bool avic;
module_param(avic, bool, S_IRUGO);
#define SVM_AVIC_DOORBELL 0xc001011b

View File

@ -1010,9 +1010,7 @@ static __init int svm_hardware_setup(void)
}
if (avic) {
if (!npt_enabled ||
!boot_cpu_has(X86_FEATURE_AVIC) ||
!IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
if (!npt_enabled || !boot_cpu_has(X86_FEATURE_AVIC)) {
avic = false;
} else {
pr_info("AVIC enabled\n");

View File

@ -480,7 +480,7 @@ extern struct kvm_x86_nested_ops svm_nested_ops;
#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
extern int avic;
extern bool avic;
static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
{

View File

@ -90,8 +90,7 @@ static inline bool cpu_has_vmx_preemption_timer(void)
static inline bool cpu_has_vmx_posted_intr(void)
{
return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
return vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
}
static inline bool cpu_has_load_ia32_efer(void)

View File

@ -237,6 +237,20 @@ bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu)
}
/*
* Bail out of the block loop if the VM has an assigned
* device, but the blocking vCPU didn't reconfigure the
* PI.NV to the wakeup vector, i.e. the assigned device
* came along after the initial check in pi_pre_block().
*/
void vmx_pi_start_assignment(struct kvm *kvm)
{
if (!irq_remapping_cap(IRQ_POSTING_CAP))
return;
kvm_make_all_cpus_request(kvm, KVM_REQ_UNBLOCK);
}
/*
* pi_update_irte - set IRTE for Posted-Interrupts
*

View File

@ -95,5 +95,6 @@ void __init pi_init_cpu(int cpu);
bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu);
int pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq,
bool set);
void vmx_pi_start_assignment(struct kvm *kvm);
#endif /* __KVM_X86_VMX_POSTED_INTR_H */

View File

@ -4843,7 +4843,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_run *kvm_run = vcpu->run;
u32 intr_info, ex_no, error_code;
unsigned long cr2, rip, dr6;
unsigned long cr2, dr6;
u32 vect_info;
vect_info = vmx->idt_vectoring_info;
@ -4933,8 +4933,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
vmx->vcpu.arch.event_exit_inst_len =
vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
kvm_run->exit_reason = KVM_EXIT_DEBUG;
rip = kvm_rip_read(vcpu);
kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu);
kvm_run->debug.arch.exception = ex_no;
break;
case AC_VECTOR:
@ -7721,6 +7720,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.nested_ops = &vmx_nested_ops,
.update_pi_irte = pi_update_irte,
.start_assignment = vmx_pi_start_assignment,
#ifdef CONFIG_X86_64
.set_hv_timer = vmx_set_hv_timer,

View File

@ -3105,6 +3105,8 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
st->preempted & KVM_VCPU_FLUSH_TLB);
if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
kvm_vcpu_flush_tlb_guest(vcpu);
} else {
st->preempted = 0;
}
vcpu->arch.st.preempted = 0;
@ -7226,6 +7228,11 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
ctxt->interruptibility = 0;
ctxt->have_exception = false;
ctxt->exception.vector = -1;
ctxt->perm_ok = false;
init_decode_cache(ctxt);
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
}
@ -7561,14 +7568,7 @@ int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
kvm_vcpu_check_breakpoint(vcpu, &r))
return r;
ctxt->interruptibility = 0;
ctxt->have_exception = false;
ctxt->exception.vector = -1;
ctxt->perm_ok = false;
ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
r = x86_decode_insn(ctxt, insn, insn_len);
r = x86_decode_insn(ctxt, insn, insn_len, emulation_type);
trace_kvm_emulate_insn_start(vcpu);
++vcpu->stat.insn_emulation;
@ -8360,6 +8360,9 @@ static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
vcpu->stat.directed_yield_attempted++;
if (single_task_running())
goto no_yield;
rcu_read_lock();
map = rcu_dereference(vcpu->kvm->arch.apic_map);
@ -9496,7 +9499,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
if (r <= 0)
break;
kvm_clear_request(KVM_REQ_PENDING_TIMER, vcpu);
kvm_clear_request(KVM_REQ_UNBLOCK, vcpu);
if (kvm_cpu_has_pending_timer(vcpu))
kvm_inject_pending_timer_irqs(vcpu);
@ -10115,8 +10118,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
kvm_update_dr7(vcpu);
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
get_segment_base(vcpu, VCPU_SREG_CS);
vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu);
/*
* Trigger an rflags update that will inject or remove the trace
@ -11499,7 +11501,8 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
void kvm_arch_start_assignment(struct kvm *kvm)
{
atomic_inc(&kvm->arch.assigned_device_count);
if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1)
static_call_cond(kvm_x86_start_assignment)(kvm);
}
EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);

View File

@ -1273,16 +1273,16 @@ asmlinkage __visible void __init xen_start_kernel(void)
/* Get mfn list */
xen_build_dynamic_phys_to_machine();
/* Work out if we support NX */
get_cpu_cap(&boot_cpu_data);
x86_configure_nx();
/*
* Set up kernel GDT and segment registers, mainly so that
* -fstack-protector code can be executed.
*/
xen_setup_gdt(0);
/* Work out if we support NX */
get_cpu_cap(&boot_cpu_data);
x86_configure_nx();
/* Determine virtual and physical address sizes */
get_cpu_address_sizes(&boot_cpu_data);

Some files were not shown because too many files have changed in this diff Show More