KVM: s390: Features and Enhancements for 5.7 part1

1. Allow to disable gisa
 2. protected virtual machines
   Protected VMs (PVM) are KVM VMs, where KVM can't access the VM's
   state like guest memory and guest registers anymore. Instead the
   PVMs are mostly managed by a new entity called Ultravisor (UV),
   which provides an API, so KVM and the PV can request management
   actions.
 
   PVMs are encrypted at rest and protected from hypervisor access
   while running.  They switch from a normal operation into protected
   mode, so we can still use the standard boot process to load a
   encrypted blob and then move it into protected mode.
 
   Rebooting is only possible by passing through the unprotected/normal
   mode and switching to protected again.
 
   One mm related patch will go via Andrews mm tree ( mm/gup/writeback:
   add callbacks for inaccessible pages)
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.22 (GNU/Linux)
 
 iQIcBAABAgAGBQJeZf9tAAoJEBF7vIC1phx89J0P/iv3wCoMNDqAttnHa/UQFF04
 njUadNYkAADDrsabIEOs9O+BE1/4BVspnIunE4+xw76p5M/7/g5eIhXWcLudhlnL
 +XtvuEwz/2ffA9JWAAYNKB7cGqBM9BCC+iYzAF9ah6sPLmlDCoF+hRe0g+0tXSON
 cklUJFril9bOcxd/MxrzFLcmipbxT/Z4/10eBY+FHcm6SQGOKAtJH0xL7X3PfPI5
 L/6ZhML9exsj1Iplkrl8BomMRoYOrvfq/jMaZp9SwmfXaOKYmNU3a19MhzfZ593h
 bfR92H8kZRy/TpBd7EnpxYGQ/n53HkUhFMhtqkkkeHW1rCo8ccwC4VfnXb+KqQp+
 nJ8KieWG+OlKKFDuZPl5Gq+jQqjJfzchbyMTYnBNe+GPT5zg76tJXmQyDn5X9p3R
 mfg+9ZEeEonMu7px93Ht1gLdPiC2gjRckjuBDPqMGEhG2z2SQ/MLri+WnproIQRa
 TcE7rZBtuyrGFTq4M4dEcsUW02xnOaav6H57kkl8EwqYwgDHlqoUbt0AvLFyW07a
 RlH7drmhKDwTJkcOhOLeLNM8Un6NvnsLZ8Lbcr9rRf9Z9Lpc+zW88BSwJ7MM/GH8
 FEQM8Omnn8KAJTENpIm3bHHyvsi0kJEhl+c3Ila3QnYzXZbJ3ZDaJZngMAbUUnVl
 YNeFyyALzOgVVBx4kvTm
 =x6Hn
 -----END PGP SIGNATURE-----

Merge tag 'kvm-s390-next-5.7-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

KVM: s390: Features and Enhancements for 5.7 part1

1. Allow to disable gisa
2. protected virtual machines
  Protected VMs (PVM) are KVM VMs, where KVM can't access the VM's
  state like guest memory and guest registers anymore. Instead the
  PVMs are mostly managed by a new entity called Ultravisor (UV),
  which provides an API, so KVM and the PV can request management
  actions.

  PVMs are encrypted at rest and protected from hypervisor access
  while running.  They switch from a normal operation into protected
  mode, so we can still use the standard boot process to load a
  encrypted blob and then move it into protected mode.

  Rebooting is only possible by passing through the unprotected/normal
  mode and switching to protected again.

  One mm related patch will go via Andrews mm tree ( mm/gup/writeback:
  add callbacks for inaccessible pages)
This commit is contained in:
Paolo Bonzini 2020-03-16 18:19:34 +01:00
commit 1c482452d5
362 changed files with 6434 additions and 2589 deletions

View File

@ -3795,6 +3795,11 @@
before loading.
See Documentation/admin-guide/blockdev/ramdisk.rst.
prot_virt= [S390] enable hosting protected virtual machines
isolated from the hypervisor (if hardware supports
that).
Format: <bool>
psi= [KNL] Enable or disable pressure stall information
tracking.
Format: <bool>

View File

@ -1,9 +1,10 @@
Ilitek ILI210x/ILI2117/ILI251x touchscreen controller
Ilitek ILI210x/ILI2117/ILI2120/ILI251x touchscreen controller
Required properties:
- compatible:
ilitek,ili210x for ILI210x
ilitek,ili2117 for ILI2117
ilitek,ili2120 for ILI2120
ilitek,ili251x for ILI251x
- reg: The I2C address of the device

View File

@ -71,9 +71,13 @@ b) Example for device tree::
ipmb@10 {
compatible = "ipmb-dev";
reg = <0x10>;
i2c-protocol;
};
};
If xmit of data to be done using raw i2c block vs smbus
then "i2c-protocol" needs to be defined as above.
2) Manually from Linux::
modprobe ipmb-dev-int

View File

@ -2110,7 +2110,8 @@ Errors:
====== ============================================================
 ENOENT   no such register
 EINVAL   invalid register ID, or no such register
 EINVAL   invalid register ID, or no such register or used with VMs in
protected virtualization mode on s390
 EPERM    (arm64) register access not allowed before vcpu finalization
====== ============================================================
@ -2545,7 +2546,8 @@ Errors include:
======== ============================================================
 ENOENT   no such register
 EINVAL   invalid register ID, or no such register
 EINVAL   invalid register ID, or no such register or used with VMs in
protected virtualization mode on s390
 EPERM    (arm64) register access not allowed before vcpu finalization
======== ============================================================
@ -4635,6 +4637,54 @@ the clear cpu reset definition in the POP. However, the cpu is not put
into ESA mode. This reset is a superset of the initial reset.
4.125 KVM_S390_PV_COMMAND
-------------------------
:Capability: KVM_CAP_S390_PROTECTED
:Architectures: s390
:Type: vm ioctl
:Parameters: struct kvm_pv_cmd
:Returns: 0 on success, < 0 on error
::
struct kvm_pv_cmd {
__u32 cmd; /* Command to be executed */
__u16 rc; /* Ultravisor return code */
__u16 rrc; /* Ultravisor return reason code */
__u64 data; /* Data or address */
__u32 flags; /* flags for future extensions. Must be 0 for now */
__u32 reserved[3];
};
cmd values:
KVM_PV_ENABLE
Allocate memory and register the VM with the Ultravisor, thereby
donating memory to the Ultravisor that will become inaccessible to
KVM. All existing CPUs are converted to protected ones. After this
command has succeeded, any CPU added via hotplug will become
protected during its creation as well.
KVM_PV_DISABLE
Deregister the VM from the Ultravisor and reclaim the memory that
had been donated to the Ultravisor, making it usable by the kernel
again. All registered VCPUs are converted back to non-protected
ones.
KVM_PV_VM_SET_SEC_PARMS
Pass the image header from VM memory to the Ultravisor in
preparation of image unpacking and verification.
KVM_PV_VM_UNPACK
Unpack (protect and decrypt) a page of the encrypted boot image.
KVM_PV_VM_VERIFY
Verify the integrity of the unpacked image. Only if this succeeds,
KVM is allowed to start protected VCPUs.
5. The kvm_run structure
========================
@ -6025,3 +6075,14 @@ Architectures: s390
This capability indicates that the KVM_S390_NORMAL_RESET and
KVM_S390_CLEAR_RESET ioctls are available.
8.23 KVM_CAP_S390_PROTECTED
Architecture: s390
This capability indicates that the Ultravisor has been initialized and
KVM can therefore start protected VMs.
This capability governs the KVM_S390_PV_COMMAND ioctl and the
KVM_MP_STATE_LOAD MP_STATE. KVM_SET_MP_STATE can fail for protected
guests when the state change is invalid.

View File

@ -108,16 +108,9 @@ Groups:
mask or unmask the adapter, as specified in mask
KVM_S390_IO_ADAPTER_MAP
perform a gmap translation for the guest address provided in addr,
pin a userspace page for the translated address and add it to the
list of mappings
.. note:: A new mapping will be created unconditionally; therefore,
the calling code should avoid making duplicate mappings.
This is now a no-op. The mapping is purely done by the irq route.
KVM_S390_IO_ADAPTER_UNMAP
release a userspace page for the translated address specified in addr
from the list of mappings
This is now a no-op. The mapping is purely done by the irq route.
KVM_DEV_FLIC_AISM
modify the adapter-interruption-suppression mode for a given isc if the

View File

@ -18,6 +18,8 @@ KVM
nested-vmx
ppc-pv
s390-diag
s390-pv
s390-pv-boot
timekeeping
vcpu-requests

View File

@ -0,0 +1,84 @@
.. SPDX-License-Identifier: GPL-2.0
======================================
s390 (IBM Z) Boot/IPL of Protected VMs
======================================
Summary
-------
The memory of Protected Virtual Machines (PVMs) is not accessible to
I/O or the hypervisor. In those cases where the hypervisor needs to
access the memory of a PVM, that memory must be made accessible.
Memory made accessible to the hypervisor will be encrypted. See
:doc:`s390-pv` for details."
On IPL (boot) a small plaintext bootloader is started, which provides
information about the encrypted components and necessary metadata to
KVM to decrypt the protected virtual machine.
Based on this data, KVM will make the protected virtual machine known
to the Ultravisor (UV) and instruct it to secure the memory of the
PVM, decrypt the components and verify the data and address list
hashes, to ensure integrity. Afterwards KVM can run the PVM via the
SIE instruction which the UV will intercept and execute on KVM's
behalf.
As the guest image is just like an opaque kernel image that does the
switch into PV mode itself, the user can load encrypted guest
executables and data via every available method (network, dasd, scsi,
direct kernel, ...) without the need to change the boot process.
Diag308
-------
This diagnose instruction is the basic mechanism to handle IPL and
related operations for virtual machines. The VM can set and retrieve
IPL information blocks, that specify the IPL method/devices and
request VM memory and subsystem resets, as well as IPLs.
For PVMs this concept has been extended with new subcodes:
Subcode 8: Set an IPL Information Block of type 5 (information block
for PVMs)
Subcode 9: Store the saved block in guest memory
Subcode 10: Move into Protected Virtualization mode
The new PV load-device-specific-parameters field specifies all data
that is necessary to move into PV mode.
* PV Header origin
* PV Header length
* List of Components composed of
* AES-XTS Tweak prefix
* Origin
* Size
The PV header contains the keys and hashes, which the UV will use to
decrypt and verify the PV, as well as control flags and a start PSW.
The components are for instance an encrypted kernel, kernel parameters
and initrd. The components are decrypted by the UV.
After the initial import of the encrypted data, all defined pages will
contain the guest content. All non-specified pages will start out as
zero pages on first access.
When running in protected virtualization mode, some subcodes will result in
exceptions or return error codes.
Subcodes 4 and 7, which specify operations that do not clear the guest
memory, will result in specification exceptions. This is because the
UV will clear all memory when a secure VM is removed, and therefore
non-clearing IPL subcodes are not allowed.
Subcodes 8, 9, 10 will result in specification exceptions.
Re-IPL into a protected mode is only possible via a detour into non
protected mode.
Keys
----
Every CEC will have a unique public key to enable tooling to build
encrypted images.
See `s390-tools <https://github.com/ibm-s390-tools/s390-tools/>`_
for the tooling.

View File

@ -0,0 +1,116 @@
.. SPDX-License-Identifier: GPL-2.0
=========================================
s390 (IBM Z) Ultravisor and Protected VMs
=========================================
Summary
-------
Protected virtual machines (PVM) are KVM VMs that do not allow KVM to
access VM state like guest memory or guest registers. Instead, the
PVMs are mostly managed by a new entity called Ultravisor (UV). The UV
provides an API that can be used by PVMs and KVM to request management
actions.
Each guest starts in non-protected mode and then may make a request to
transition into protected mode. On transition, KVM registers the guest
and its VCPUs with the Ultravisor and prepares everything for running
it.
The Ultravisor will secure and decrypt the guest's boot memory
(i.e. kernel/initrd). It will safeguard state changes like VCPU
starts/stops and injected interrupts while the guest is running.
As access to the guest's state, such as the SIE state description, is
normally needed to be able to run a VM, some changes have been made in
the behavior of the SIE instruction. A new format 4 state description
has been introduced, where some fields have different meanings for a
PVM. SIE exits are minimized as much as possible to improve speed and
reduce exposed guest state.
Interrupt injection
-------------------
Interrupt injection is safeguarded by the Ultravisor. As KVM doesn't
have access to the VCPUs' lowcores, injection is handled via the
format 4 state description.
Machine check, external, IO and restart interruptions each can be
injected on SIE entry via a bit in the interrupt injection control
field (offset 0x54). If the guest cpu is not enabled for the interrupt
at the time of injection, a validity interception is recognized. The
format 4 state description contains fields in the interception data
block where data associated with the interrupt can be transported.
Program and Service Call exceptions have another layer of
safeguarding; they can only be injected for instructions that have
been intercepted into KVM. The exceptions need to be a valid outcome
of an instruction emulation by KVM, e.g. we can never inject a
addressing exception as they are reported by SIE since KVM has no
access to the guest memory.
Mask notification interceptions
-------------------------------
KVM cannot intercept lctl(g) and lpsw(e) anymore in order to be
notified when a PVM enables a certain class of interrupt. As a
replacement, two new interception codes have been introduced: One
indicating that the contents of CRs 0, 6, or 14 have been changed,
indicating different interruption subclasses; and one indicating that
PSW bit 13 has been changed, indicating that a machine check
intervention was requested and those are now enabled.
Instruction emulation
---------------------
With the format 4 state description for PVMs, the SIE instruction already
interprets more instructions than it does with format 2. It is not able
to interpret every instruction, but needs to hand some tasks to KVM;
therefore, the SIE and the ultravisor safeguard emulation inputs and outputs.
The control structures associated with SIE provide the Secure
Instruction Data Area (SIDA), the Interception Parameters (IP) and the
Secure Interception General Register Save Area. Guest GRs and most of
the instruction data, such as I/O data structures, are filtered.
Instruction data is copied to and from the SIDA when needed. Guest
GRs are put into / retrieved from the Secure Interception General
Register Save Area.
Only GR values needed to emulate an instruction will be copied into this
save area and the real register numbers will be hidden.
The Interception Parameters state description field still contains the
the bytes of the instruction text, but with pre-set register values
instead of the actual ones. I.e. each instruction always uses the same
instruction text, in order not to leak guest instruction text.
This also implies that the register content that a guest had in r<n>
may be in r<m> from the hypervisor's point of view.
The Secure Instruction Data Area contains instruction storage
data. Instruction data, i.e. data being referenced by an instruction
like the SCCB for sclp, is moved via the SIDA. When an instruction is
intercepted, the SIE will only allow data and program interrupts for
this instruction to be moved to the guest via the two data areas
discussed before. Other data is either ignored or results in validity
interceptions.
Instruction emulation interceptions
-----------------------------------
There are two types of SIE secure instruction intercepts: the normal
and the notification type. Normal secure instruction intercepts will
make the guest pending for instruction completion of the intercepted
instruction type, i.e. on SIE entry it is attempted to complete
emulation of the instruction with the data provided by KVM. That might
be a program exception or instruction completion.
The notification type intercepts inform KVM about guest environment
changes due to guest instruction interpretation. Such an interception
is recognized, for example, for the store prefix instruction to provide
the new lowcore location. On SIE reentry, any KVM data in the data areas
is ignored and execution continues as if the guest instruction had
completed. For that reason KVM is not allowed to inject a program
interrupt.
Links
-----
`KVM Forum 2019 presentation <https://static.sched.com/hosted_files/kvmforum2019/3b/ibm_protected_vms_s390x.pdf>`_

View File

@ -2796,11 +2796,11 @@ F: drivers/block/aoe/
ATHEROS 71XX/9XXX GPIO DRIVER
M: Alban Bedel <albeu@free.fr>
S: Maintained
W: https://github.com/AlbanBedel/linux
T: git git://github.com/AlbanBedel/linux
S: Maintained
F: drivers/gpio/gpio-ath79.c
F: Documentation/devicetree/bindings/gpio/gpio-ath79.txt
F: drivers/gpio/gpio-ath79.c
ATHEROS 71XX/9XXX USB PHY DRIVER
M: Alban Bedel <albeu@free.fr>
@ -3422,8 +3422,8 @@ BROADCOM BRCMSTB GPIO DRIVER
M: Gregory Fong <gregory.0xf0@gmail.com>
L: bcm-kernel-feedback-list@broadcom.com
S: Supported
F: drivers/gpio/gpio-brcmstb.c
F: Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
F: drivers/gpio/gpio-brcmstb.c
BROADCOM BRCMSTB I2C DRIVER
M: Kamal Dasu <kdasu.kdev@gmail.com>
@ -3481,8 +3481,8 @@ BROADCOM KONA GPIO DRIVER
M: Ray Jui <rjui@broadcom.com>
L: bcm-kernel-feedback-list@broadcom.com
S: Supported
F: drivers/gpio/gpio-bcm-kona.c
F: Documentation/devicetree/bindings/gpio/brcm,kona-gpio.txt
F: drivers/gpio/gpio-bcm-kona.c
BROADCOM NETXTREME-E ROCE DRIVER
M: Selvin Xavier <selvin.xavier@broadcom.com>
@ -3597,8 +3597,8 @@ F: sound/pci/bt87x.c
BT8XXGPIO DRIVER
M: Michael Buesch <m@bues.ch>
W: http://bu3sch.de/btgpio.php
S: Maintained
W: http://bu3sch.de/btgpio.php
F: drivers/gpio/gpio-bt8xx.c
BTRFS FILE SYSTEM
@ -7143,18 +7143,18 @@ GPIO SUBSYSTEM
M: Linus Walleij <linus.walleij@linaro.org>
M: Bartosz Golaszewski <bgolaszewski@baylibre.com>
L: linux-gpio@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
F: Documentation/ABI/obsolete/sysfs-gpio
F: Documentation/ABI/testing/gpio-cdev
F: Documentation/admin-guide/gpio/
F: Documentation/devicetree/bindings/gpio/
F: Documentation/driver-api/gpio/
F: Documentation/admin-guide/gpio/
F: Documentation/ABI/testing/gpio-cdev
F: Documentation/ABI/obsolete/sysfs-gpio
F: drivers/gpio/
F: include/asm-generic/gpio.h
F: include/linux/gpio/
F: include/linux/gpio.h
F: include/linux/of_gpio.h
F: include/asm-generic/gpio.h
F: include/uapi/linux/gpio.h
F: tools/gpio/
@ -8055,8 +8055,8 @@ F: drivers/scsi/ips.*
ICH LPC AND GPIO DRIVER
M: Peter Tyser <ptyser@xes-inc.com>
S: Maintained
F: drivers/mfd/lpc_ich.c
F: drivers/gpio/gpio-ich.c
F: drivers/mfd/lpc_ich.c
ICY I2C DRIVER
M: Max Staudt <max@enpas.org>
@ -9209,6 +9209,7 @@ L: kvm@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git
S: Supported
F: Documentation/virt/kvm/s390*
F: arch/s390/include/uapi/asm/kvm*
F: arch/s390/include/asm/gmap.h
F: arch/s390/include/asm/kvm*
@ -16075,8 +16076,8 @@ F: Documentation/devicetree/bindings/reset/snps,axs10x-reset.txt
SYNOPSYS CREG GPIO DRIVER
M: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
S: Maintained
F: drivers/gpio/gpio-creg-snps.c
F: Documentation/devicetree/bindings/gpio/snps,creg-gpio.txt
F: drivers/gpio/gpio-creg-snps.c
SYNOPSYS DESIGNWARE 8250 UART DRIVER
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
@ -16087,8 +16088,8 @@ SYNOPSYS DESIGNWARE APB GPIO DRIVER
M: Hoan Tran <hoan@os.amperecomputing.com>
L: linux-gpio@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-dwapb.c
F: Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt
F: drivers/gpio/gpio-dwapb.c
SYNOPSYS DESIGNWARE AXI DMAC DRIVER
M: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
@ -18414,8 +18415,8 @@ M: Nandor Han <nandor.han@ge.com>
M: Semi Malinen <semi.malinen@ge.com>
L: linux-gpio@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-xra1403.c
F: Documentation/devicetree/bindings/gpio/gpio-xra1403.txt
F: drivers/gpio/gpio-xra1403.c
XTENSA XTFPGA PLATFORM SUPPORT
M: Max Filippov <jcmvbkbc@gmail.com>

View File

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 6
SUBLEVEL = 0
EXTRAVERSION = -rc1
EXTRAVERSION = -rc2
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*

View File

@ -178,9 +178,6 @@
phy-mode = "rgmii";
pinctrl-0 = <&pinctrl_rgmii1 &pinctrl_rgmii1_mdio_1>;
snps,phy-bus-name = "stmmac";
snps,phy-bus-id = <0>;
snps,phy-addr = <0>;
snps,reset-gpio = <&pio0 7 0>;
snps,reset-active-low;
snps,reset-delays-us = <0 10000 1000000>;

View File

@ -46,7 +46,7 @@
/* DAC */
format = "i2s";
mclk-fs = <256>;
frame-inversion = <1>;
frame-inversion;
cpu {
sound-dai = <&sti_uni_player2>;
};

View File

@ -11,8 +11,6 @@ CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_PXA=y
CONFIG_ARCH_GUMSTIX=y
CONFIG_PCCARD=y

View File

@ -25,7 +25,6 @@ CONFIG_EMBEDDED=y
CONFIG_PROFILING=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_AXXIA=y
CONFIG_GPIO_PCA953X=y
CONFIG_ARM_LPAE=y

View File

@ -7,7 +7,6 @@ CONFIG_EMBEDDED=y
CONFIG_SLOB=y
CONFIG_JUMP_LABEL=y
CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_CLPS711X=y
CONFIG_ARCH_AUTCPU12=y
CONFIG_ARCH_CDB89712=y

View File

@ -17,7 +17,7 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_IOSCHED_CFQ=m
CONFIG_IOSCHED_BFQ=m
CONFIG_ARCH_MULTI_V6=y
#CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_CNS3XXX=y

View File

@ -43,7 +43,6 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_MON=y
CONFIG_USB_STORAGE=y
CONFIG_MMC=y
# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_PXA=y
CONFIG_EXT3_FS=y
CONFIG_NFS_FS=y

View File

@ -7,8 +7,6 @@ CONFIG_EXPERT=y
# CONFIG_BASE_FULL is not set
# CONFIG_EPOLL is not set
CONFIG_SLOB=y
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_SA1100=y
CONFIG_SA1100_COLLIE=y
CONFIG_PCCARD=y

View File

@ -15,8 +15,6 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_MULTIPLATFORM=y
CONFIG_ARCH_MULTI_V7=n
CONFIG_ARCH_MULTI_V5=y

View File

@ -12,8 +12,6 @@ CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
# CONFIG_MMU is not set
CONFIG_ARM_SINGLE_ARMV7M=y
CONFIG_ARCH_EFM32=y

View File

@ -11,7 +11,6 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_EP93XX=y
CONFIG_CRUNCH=y
CONFIG_MACH_ADSSPHERE=y

View File

@ -9,8 +9,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_PXA=y
CONFIG_ARCH_PXA_ESERIES=y
# CONFIG_ARM_THUMB is not set

View File

@ -14,7 +14,6 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_PXA=y
CONFIG_PXA_EZX=y
CONFIG_NO_HZ=y

View File

@ -5,8 +5,6 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_SA1100=y
CONFIG_SA1100_H3600=y
CONFIG_PCCARD=y

View File

@ -10,7 +10,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_PXA=y
CONFIG_MACH_H5000=y
CONFIG_AEABI=y

View File

@ -13,7 +13,6 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_PXA=y
CONFIG_MACH_INTELMOTE2=y
CONFIG_NO_HZ=y

View File

@ -32,8 +32,6 @@ CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y

View File

@ -1,4 +1,3 @@
CONFIG_CROSS_COMPILE="arm-linux-gnueabihf-"
CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
CONFIG_BLK_DEV_INITRD=y
@ -28,10 +27,7 @@ CONFIG_FLASH_SIZE=0x00080000
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_ARM_APPENDED_DTB=y
# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_BINFMT_FLAT=y
CONFIG_BINFMT_ZFLAT=y
CONFIG_BINFMT_SHARED_FLAT=y

View File

@ -9,8 +9,6 @@ CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_PXA=y
CONFIG_MACH_H4700=y
CONFIG_MACH_MAGICIAN=y

View File

@ -15,7 +15,6 @@ CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_MULTI_V4=y
# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_MOXART=y

View File

@ -25,8 +25,6 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_BLK_DEV_INTEGRITY=y
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y

View File

@ -18,8 +18,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_OMAP=y
CONFIG_ARCH_OMAP1=y
CONFIG_OMAP_RESET_CLOCKS=y

View File

@ -7,8 +7,6 @@ CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_PXA=y
CONFIG_ARCH_PXA_PALM=y
# CONFIG_MACH_PALMTX is not set

View File

@ -13,8 +13,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_PXA=y
CONFIG_MACH_PCM027=y
CONFIG_MACH_PCM990_BASEBOARD=y

View File

@ -6,8 +6,6 @@ CONFIG_EXPERT=y
# CONFIG_HOTPLUG is not set
# CONFIG_SHMEM is not set
CONFIG_MODULES=y
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_SA1100=y
CONFIG_SA1100_PLEB=y
CONFIG_ZBOOT_ROM_TEXT=0x0

View File

@ -8,7 +8,6 @@ CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_MULTI_V6=y
CONFIG_ARCH_REALVIEW=y
CONFIG_MACH_REALVIEW_EB=y

View File

@ -14,8 +14,6 @@ CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_AT91=y
CONFIG_SOC_SAMA5D2=y
CONFIG_SOC_SAMA5D3=y
@ -182,7 +180,6 @@ CONFIG_USB_GADGET=y
CONFIG_USB_ATMEL_USBA=y
CONFIG_USB_G_SERIAL=y
CONFIG_MMC=y
# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_OF_AT91=y

View File

@ -14,8 +14,6 @@ CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
# CONFIG_MMU is not set
CONFIG_ARCH_STM32=y
CONFIG_CPU_V7M_NUM_IRQ=240

View File

@ -85,6 +85,7 @@ CONFIG_BATTERY_AXP20X=y
CONFIG_AXP20X_POWER=y
CONFIG_THERMAL=y
CONFIG_CPU_THERMAL=y
CONFIG_SUN8I_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_SUNXI_WATCHDOG=y
CONFIG_MFD_AC100=y

View File

@ -11,7 +11,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_CFQ is not set
# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_U300=y
CONFIG_MACH_U300_SPIDUMMY=y
@ -46,7 +45,6 @@ CONFIG_FB=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
# CONFIG_USB_SUPPORT is not set
CONFIG_MMC=y
# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_ARMMMCI=y
CONFIG_RTC_CLASS=y
# CONFIG_RTC_HCTOSYS is not set

View File

@ -15,8 +15,6 @@ CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_VEXPRESS_DCSCB=y
CONFIG_ARCH_VEXPRESS_TC2_PM=y

View File

@ -9,7 +9,6 @@ CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_PXA=y
CONFIG_ARCH_VIPER=y
CONFIG_IWMMXT=y

View File

@ -4,7 +4,6 @@ CONFIG_LOG_BUF_SHIFT=13
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_PXA=y
CONFIG_MACH_ARCOM_ZEUS=y
CONFIG_PCCARD=m
@ -137,7 +136,6 @@ CONFIG_USB_MASS_STORAGE=m
CONFIG_USB_G_SERIAL=m
CONFIG_USB_G_PRINTER=m
CONFIG_MMC=y
# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_PXA=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=m

View File

@ -16,7 +16,6 @@ CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
CONFIG_SLAB=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_ZX=y
CONFIG_SOC_ZX296702=y
# CONFIG_SWP_EMULATE is not set

View File

@ -78,13 +78,10 @@ static int ftrace_modify_code(unsigned long pc, unsigned long old,
{
unsigned long replaced;
if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
if (IS_ENABLED(CONFIG_THUMB2_KERNEL))
old = __opcode_to_mem_thumb32(old);
new = __opcode_to_mem_thumb32(new);
} else {
else
old = __opcode_to_mem_arm(old);
new = __opcode_to_mem_arm(new);
}
if (validate) {
if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))

View File

@ -16,10 +16,10 @@ struct patch {
unsigned int insn;
};
#ifdef CONFIG_MMU
static DEFINE_RAW_SPINLOCK(patch_lock);
static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
__acquires(&patch_lock)
{
unsigned int uintaddr = (uintptr_t) addr;
bool module = !core_kernel_text(uintaddr);
@ -34,8 +34,6 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
if (flags)
raw_spin_lock_irqsave(&patch_lock, *flags);
else
__acquire(&patch_lock);
set_fixmap(fixmap, page_to_phys(page));
@ -43,15 +41,19 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
}
static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
__releases(&patch_lock)
{
clear_fixmap(fixmap);
if (flags)
raw_spin_unlock_irqrestore(&patch_lock, *flags);
else
__release(&patch_lock);
}
#else
static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
{
return addr;
}
static void __kprobes patch_unmap(int fixmap, unsigned long *flags) { }
#endif
void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
{
@ -64,8 +66,6 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
if (remap)
waddr = patch_map(addr, FIX_TEXT_POKE0, &flags);
else
__acquire(&patch_lock);
if (thumb2 && __opcode_is_thumb16(insn)) {
*(u16 *)waddr = __opcode_to_mem_thumb16(insn);
@ -102,8 +102,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
if (waddr != addr) {
flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
patch_unmap(FIX_TEXT_POKE0, &flags);
} else
__release(&patch_lock);
}
flush_icache_range((uintptr_t)(addr),
(uintptr_t)(addr) + size);

View File

@ -11,7 +11,7 @@ config ARCH_NPCM7XX
depends on ARCH_MULTI_V7
select PINCTRL_NPCM7XX
select NPCM7XX_TIMER
select ARCH_REQUIRE_GPIOLIB
select GPIOLIB
select CACHE_L2X0
select ARM_GIC
select HAVE_ARM_TWD if SMP

View File

@ -161,10 +161,10 @@
bus-range = <0x0 0x1>;
reg = <0x0 0x40000000 0x0 0x10000000>;
ranges = <0x2000000 0x0 0x50000000 0x0 0x50000000 0x0 0x10000000>;
interrupt-map = <0 0 0 1 &gic GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 2 &gic GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 3 &gic GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 4 &gic GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
interrupt-map = <0 0 0 1 &gic 0 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 2 &gic 0 0 GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 3 &gic 0 0 GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 4 &gic 0 0 GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
msi-map = <0x0 &its 0x0 0x10000>;
iommu-map = <0x0 &smmu 0x0 0x10000>;

View File

@ -452,6 +452,7 @@ CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
CONFIG_CPU_THERMAL=y
CONFIG_THERMAL_EMULATION=y
CONFIG_QORIQ_THERMAL=m
CONFIG_SUN8I_THERMAL=y
CONFIG_ROCKCHIP_THERMAL=m
CONFIG_RCAR_THERMAL=y
CONFIG_RCAR_GEN3_THERMAL=y
@ -547,6 +548,7 @@ CONFIG_ROCKCHIP_DW_MIPI_DSI=y
CONFIG_ROCKCHIP_INNO_HDMI=y
CONFIG_DRM_RCAR_DU=m
CONFIG_DRM_SUN4I=m
CONFIG_DRM_SUN6I_DSI=m
CONFIG_DRM_SUN8I_DW_HDMI=m
CONFIG_DRM_SUN8I_MIXER=m
CONFIG_DRM_MSM=m
@ -681,7 +683,7 @@ CONFIG_RTC_DRV_SNVS=m
CONFIG_RTC_DRV_IMX_SC=m
CONFIG_RTC_DRV_XGENE=y
CONFIG_DMADEVICES=y
CONFIG_DMA_BCM2835=m
CONFIG_DMA_BCM2835=y
CONFIG_DMA_SUN6I=m
CONFIG_FSL_EDMA=y
CONFIG_IMX_SDMA=y

View File

@ -33,7 +33,6 @@ static inline u32 disr_to_esr(u64 disr)
asmlinkage void enter_from_user_mode(void);
void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
void do_undefinstr(struct pt_regs *regs);
asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr);
void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
@ -47,7 +46,4 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr);
void do_cp15instr(unsigned int esr, struct pt_regs *regs);
void do_el0_svc(struct pt_regs *regs);
void do_el0_svc_compat(struct pt_regs *regs);
void do_el0_ia_bp_hardening(unsigned long addr, unsigned int esr,
struct pt_regs *regs);
#endif /* __ASM_EXCEPTION_H */

View File

@ -18,6 +18,10 @@
* See:
* https://lore.kernel.org/lkml/20200110100612.GC2827@hirez.programming.kicks-ass.net
*/
#define vcpu_is_preempted(cpu) false
#define vcpu_is_preempted vcpu_is_preempted
static inline bool vcpu_is_preempted(int cpu)
{
return false;
}
#endif /* __ASM_SPINLOCK_H */

View File

@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/types.h>
#include <asm/archrandom.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
#include <asm/kernel-pgtable.h>

View File

@ -466,6 +466,13 @@ static void ssbs_thread_switch(struct task_struct *next)
if (unlikely(next->flags & PF_KTHREAD))
return;
/*
* If all CPUs implement the SSBS extension, then we just need to
* context-switch the PSTATE field.
*/
if (cpu_have_feature(cpu_feature(SSBS)))
return;
/* If the mitigation is enabled, then we leave SSBS clear. */
if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
test_tsk_thread_flag(next, TIF_SSBD))
@ -608,8 +615,6 @@ long get_tagged_addr_ctrl(void)
* only prevents the tagged address ABI enabling via prctl() and does not
* disable it for tasks that already opted in to the relaxed ABI.
*/
static int zero;
static int one = 1;
static struct ctl_table tagged_addr_sysctl_table[] = {
{
@ -618,8 +623,8 @@ static struct ctl_table tagged_addr_sysctl_table[] = {
.data = &tagged_addr_disabled,
.maxlen = sizeof(int),
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &one,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{ }
};

View File

@ -23,7 +23,7 @@
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/clocksource.h>
#include <linux/clk-provider.h>
#include <linux/of_clk.h>
#include <linux/acpi.h>
#include <clocksource/arm_arch_timer.h>

View File

@ -37,7 +37,7 @@ CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o
obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
obj-y += version.o pgm_check_info.o ctype.o text_dma.o
obj-$(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) += uv.o
obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o
obj-$(CONFIG_RELOCATABLE) += machine_kexec_reloc.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
targets := bzImage startup.a section_cmp.boot.data section_cmp.boot.preserved.data $(obj-y)

View File

@ -3,7 +3,13 @@
#include <asm/facility.h>
#include <asm/sections.h>
/* will be used in arch/s390/kernel/uv.c */
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
int __bootdata_preserved(prot_virt_guest);
#endif
#if IS_ENABLED(CONFIG_KVM)
struct uv_info __bootdata_preserved(uv_info);
#endif
void uv_query_info(void)
{
@ -15,10 +21,25 @@ void uv_query_info(void)
if (!test_facility(158))
return;
if (uv_call(0, (uint64_t)&uvcb))
/* rc==0x100 means that there is additional data we do not process */
if (uv_call(0, (uint64_t)&uvcb) && uvcb.header.rc != 0x100)
return;
if (IS_ENABLED(CONFIG_KVM)) {
memcpy(uv_info.inst_calls_list, uvcb.inst_calls_list, sizeof(uv_info.inst_calls_list));
uv_info.uv_base_stor_len = uvcb.uv_base_stor_len;
uv_info.guest_base_stor_len = uvcb.conf_base_phys_stor_len;
uv_info.guest_virt_base_stor_len = uvcb.conf_base_virt_stor_len;
uv_info.guest_virt_var_stor_len = uvcb.conf_virt_var_stor_len;
uv_info.guest_cpu_stor_len = uvcb.cpu_stor_len;
uv_info.max_sec_stor_addr = ALIGN(uvcb.max_guest_stor_addr, PAGE_SIZE);
uv_info.max_num_sec_conf = uvcb.max_num_sec_conf;
uv_info.max_guest_cpus = uvcb.max_guest_cpus;
}
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
if (test_bit_inv(BIT_UVC_CMD_SET_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list) &&
test_bit_inv(BIT_UVC_CMD_REMOVE_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list))
prot_virt_guest = 1;
#endif
}

View File

@ -9,6 +9,7 @@
#ifndef _ASM_S390_GMAP_H
#define _ASM_S390_GMAP_H
#include <linux/radix-tree.h>
#include <linux/refcount.h>
/* Generic bits for GMAP notification on DAT table entry changes. */
@ -31,6 +32,7 @@
* @table: pointer to the page directory
* @asce: address space control element for gmap page table
* @pfault_enabled: defines if pfaults are applicable for the guest
* @guest_handle: protected virtual machine handle for the ultravisor
* @host_to_rmap: radix tree with gmap_rmap lists
* @children: list of shadow gmap structures
* @pt_list: list of all page tables used in the shadow guest address space
@ -54,6 +56,8 @@ struct gmap {
unsigned long asce_end;
void *private;
bool pfault_enabled;
/* only set for protected virtual machines */
unsigned long guest_handle;
/* Additional data for shadow guest address spaces */
struct radix_tree_root host_to_rmap;
struct list_head children;
@ -144,4 +148,6 @@ int gmap_mprotect_notify(struct gmap *, unsigned long start,
void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
unsigned long gaddr, unsigned long vmaddr);
int gmap_mark_unmergeable(void);
void s390_reset_acc(struct mm_struct *mm);
#endif /* _ASM_S390_GMAP_H */

View File

@ -127,6 +127,12 @@ struct mcck_volatile_info {
#define CR14_INITIAL_MASK (CR14_UNUSED_32 | CR14_UNUSED_33 | \
CR14_EXTERNAL_DAMAGE_SUBMASK)
#define SIDAD_SIZE_MASK 0xff
#define sida_origin(sie_block) \
((sie_block)->sidad & PAGE_MASK)
#define sida_size(sie_block) \
((((sie_block)->sidad & SIDAD_SIZE_MASK) + 1) * PAGE_SIZE)
#define CPUSTAT_STOPPED 0x80000000
#define CPUSTAT_WAIT 0x10000000
#define CPUSTAT_ECALL_PEND 0x08000000
@ -160,7 +166,13 @@ struct kvm_s390_sie_block {
__u8 reserved08[4]; /* 0x0008 */
#define PROG_IN_SIE (1<<0)
__u32 prog0c; /* 0x000c */
__u8 reserved10[16]; /* 0x0010 */
union {
__u8 reserved10[16]; /* 0x0010 */
struct {
__u64 pv_handle_cpu;
__u64 pv_handle_config;
};
};
#define PROG_BLOCK_SIE (1<<0)
#define PROG_REQUEST (1<<1)
atomic_t prog20; /* 0x0020 */
@ -209,10 +221,23 @@ struct kvm_s390_sie_block {
#define ICPT_PARTEXEC 0x38
#define ICPT_IOINST 0x40
#define ICPT_KSS 0x5c
#define ICPT_MCHKREQ 0x60
#define ICPT_INT_ENABLE 0x64
#define ICPT_PV_INSTR 0x68
#define ICPT_PV_NOTIFY 0x6c
#define ICPT_PV_PREF 0x70
__u8 icptcode; /* 0x0050 */
__u8 icptstatus; /* 0x0051 */
__u16 ihcpu; /* 0x0052 */
__u8 reserved54[2]; /* 0x0054 */
__u8 reserved54; /* 0x0054 */
#define IICTL_CODE_NONE 0x00
#define IICTL_CODE_MCHK 0x01
#define IICTL_CODE_EXT 0x02
#define IICTL_CODE_IO 0x03
#define IICTL_CODE_RESTART 0x04
#define IICTL_CODE_SPECIFICATION 0x10
#define IICTL_CODE_OPERAND 0x11
__u8 iictl; /* 0x0055 */
__u16 ipa; /* 0x0056 */
__u32 ipb; /* 0x0058 */
__u32 scaoh; /* 0x005c */
@ -233,7 +258,7 @@ struct kvm_s390_sie_block {
#define ECB3_RI 0x01
__u8 ecb3; /* 0x0063 */
__u32 scaol; /* 0x0064 */
__u8 reserved68; /* 0x0068 */
__u8 sdf; /* 0x0068 */
__u8 epdx; /* 0x0069 */
__u8 reserved6a[2]; /* 0x006a */
__u32 todpr; /* 0x006c */
@ -249,31 +274,58 @@ struct kvm_s390_sie_block {
#define HPID_KVM 0x4
#define HPID_VSIE 0x5
__u8 hpid; /* 0x00b8 */
__u8 reservedb9[11]; /* 0x00b9 */
__u16 extcpuaddr; /* 0x00c4 */
__u16 eic; /* 0x00c6 */
__u8 reservedb9[7]; /* 0x00b9 */
union {
struct {
__u32 eiparams; /* 0x00c0 */
__u16 extcpuaddr; /* 0x00c4 */
__u16 eic; /* 0x00c6 */
};
__u64 mcic; /* 0x00c0 */
} __packed;
__u32 reservedc8; /* 0x00c8 */
__u16 pgmilc; /* 0x00cc */
__u16 iprcc; /* 0x00ce */
__u32 dxc; /* 0x00d0 */
__u16 mcn; /* 0x00d4 */
__u8 perc; /* 0x00d6 */
__u8 peratmid; /* 0x00d7 */
union {
struct {
__u16 pgmilc; /* 0x00cc */
__u16 iprcc; /* 0x00ce */
};
__u32 edc; /* 0x00cc */
} __packed;
union {
struct {
__u32 dxc; /* 0x00d0 */
__u16 mcn; /* 0x00d4 */
__u8 perc; /* 0x00d6 */
__u8 peratmid; /* 0x00d7 */
};
__u64 faddr; /* 0x00d0 */
} __packed;
__u64 peraddr; /* 0x00d8 */
__u8 eai; /* 0x00e0 */
__u8 peraid; /* 0x00e1 */
__u8 oai; /* 0x00e2 */
__u8 armid; /* 0x00e3 */
__u8 reservede4[4]; /* 0x00e4 */
__u64 tecmc; /* 0x00e8 */
__u8 reservedf0[12]; /* 0x00f0 */
union {
__u64 tecmc; /* 0x00e8 */
struct {
__u16 subchannel_id; /* 0x00e8 */
__u16 subchannel_nr; /* 0x00ea */
__u32 io_int_parm; /* 0x00ec */
__u32 io_int_word; /* 0x00f0 */
};
} __packed;
__u8 reservedf4[8]; /* 0x00f4 */
#define CRYCB_FORMAT_MASK 0x00000003
#define CRYCB_FORMAT0 0x00000000
#define CRYCB_FORMAT1 0x00000001
#define CRYCB_FORMAT2 0x00000003
__u32 crycbd; /* 0x00fc */
__u64 gcr[16]; /* 0x0100 */
__u64 gbea; /* 0x0180 */
union {
__u64 gbea; /* 0x0180 */
__u64 sidad;
};
__u8 reserved188[8]; /* 0x0188 */
__u64 sdnxo; /* 0x0190 */
__u8 reserved198[8]; /* 0x0198 */
@ -301,7 +353,9 @@ struct kvm_s390_itdb {
struct sie_page {
struct kvm_s390_sie_block sie_block;
struct mcck_volatile_info mcck_info; /* 0x0200 */
__u8 reserved218[1000]; /* 0x0218 */
__u8 reserved218[360]; /* 0x0218 */
__u64 pv_grregs[16]; /* 0x0380 */
__u8 reserved400[512]; /* 0x0400 */
struct kvm_s390_itdb itdb; /* 0x0600 */
__u8 reserved700[2304]; /* 0x0700 */
};
@ -476,6 +530,7 @@ enum irq_types {
IRQ_PEND_PFAULT_INIT,
IRQ_PEND_EXT_HOST,
IRQ_PEND_EXT_SERVICE,
IRQ_PEND_EXT_SERVICE_EV,
IRQ_PEND_EXT_TIMING,
IRQ_PEND_EXT_CPU_TIMER,
IRQ_PEND_EXT_CLOCK_COMP,
@ -520,6 +575,7 @@ enum irq_types {
(1UL << IRQ_PEND_EXT_TIMING) | \
(1UL << IRQ_PEND_EXT_HOST) | \
(1UL << IRQ_PEND_EXT_SERVICE) | \
(1UL << IRQ_PEND_EXT_SERVICE_EV) | \
(1UL << IRQ_PEND_VIRTIO) | \
(1UL << IRQ_PEND_PFAULT_INIT) | \
(1UL << IRQ_PEND_PFAULT_DONE))
@ -536,6 +592,13 @@ enum irq_types {
#define IRQ_PEND_MCHK_MASK ((1UL << IRQ_PEND_MCHK_REP) | \
(1UL << IRQ_PEND_MCHK_EX))
#define IRQ_PEND_EXT_II_MASK ((1UL << IRQ_PEND_EXT_CPU_TIMER) | \
(1UL << IRQ_PEND_EXT_CLOCK_COMP) | \
(1UL << IRQ_PEND_EXT_EMERGENCY) | \
(1UL << IRQ_PEND_EXT_EXTERNAL) | \
(1UL << IRQ_PEND_EXT_SERVICE) | \
(1UL << IRQ_PEND_EXT_SERVICE_EV))
struct kvm_s390_interrupt_info {
struct list_head list;
u64 type;
@ -594,6 +657,7 @@ struct kvm_s390_local_interrupt {
struct kvm_s390_float_interrupt {
unsigned long pending_irqs;
unsigned long masked_irqs;
spinlock_t lock;
struct list_head lists[FIRQ_LIST_COUNT];
int counters[FIRQ_MAX_COUNT];
@ -645,6 +709,11 @@ struct kvm_guestdbg_info_arch {
unsigned long last_bp;
};
struct kvm_s390_pv_vcpu {
u64 handle;
unsigned long stor_base;
};
struct kvm_vcpu_arch {
struct kvm_s390_sie_block *sie_block;
/* if vsie is active, currently executed shadow sie control block */
@ -673,6 +742,7 @@ struct kvm_vcpu_arch {
__u64 cputm_start;
bool gs_enabled;
bool skey_enabled;
struct kvm_s390_pv_vcpu pv;
};
struct kvm_vm_stat {
@ -701,9 +771,6 @@ struct s390_io_adapter {
bool masked;
bool swap;
bool suppressible;
struct rw_semaphore maps_lock;
struct list_head maps;
atomic_t nr_maps;
};
#define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8)
@ -846,6 +913,13 @@ struct kvm_s390_gisa_interrupt {
DECLARE_BITMAP(kicked_mask, KVM_MAX_VCPUS);
};
struct kvm_s390_pv {
u64 handle;
u64 guest_len;
unsigned long stor_base;
void *stor_var;
};
struct kvm_arch{
void *sca;
int use_esca;
@ -881,6 +955,7 @@ struct kvm_arch{
DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
DECLARE_BITMAP(idle_mask, KVM_MAX_VCPUS);
struct kvm_s390_gisa_interrupt gisa_int;
struct kvm_s390_pv pv;
};
#define KVM_HVA_ERR_BAD (-1UL)

View File

@ -16,6 +16,8 @@ typedef struct {
unsigned long asce;
unsigned long asce_limit;
unsigned long vdso_base;
/* The mmu context belongs to a secure guest. */
atomic_t is_protected;
/*
* The following bitfields need a down_write on the mm
* semaphore when they are written to. As they are only

View File

@ -23,6 +23,7 @@ static inline int init_new_context(struct task_struct *tsk,
INIT_LIST_HEAD(&mm->context.gmap_list);
cpumask_clear(&mm->context.cpu_attach_mask);
atomic_set(&mm->context.flush_count, 0);
atomic_set(&mm->context.is_protected, 0);
mm->context.gmap_asce = 0;
mm->context.flush_mm = 0;
mm->context.compat_mm = test_thread_flag(TIF_31BIT);

View File

@ -153,6 +153,11 @@ static inline int devmem_is_allowed(unsigned long pfn)
#define HAVE_ARCH_FREE_PAGE
#define HAVE_ARCH_ALLOC_PAGE
#if IS_ENABLED(CONFIG_PGSTE)
int arch_make_page_accessible(struct page *page);
#define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
#endif
#endif /* !__ASSEMBLY__ */
#define __PAGE_OFFSET 0x0UL

View File

@ -19,6 +19,7 @@
#include <linux/atomic.h>
#include <asm/bug.h>
#include <asm/page.h>
#include <asm/uv.h>
extern pgd_t swapper_pg_dir[];
extern void paging_init(void);
@ -520,6 +521,15 @@ static inline int mm_has_pgste(struct mm_struct *mm)
return 0;
}
static inline int mm_is_protected(struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
if (unlikely(atomic_read(&mm->context.is_protected)))
return 1;
#endif
return 0;
}
static inline int mm_alloc_pgste(struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
@ -1061,7 +1071,12 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
pte_t res;
res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
if (mm_is_protected(mm) && pte_present(res))
uv_convert_from_secure(pte_val(res) & PAGE_MASK);
return res;
}
#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
@ -1073,7 +1088,12 @@ void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
pte_t res;
res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
if (mm_is_protected(vma->vm_mm) && pte_present(res))
uv_convert_from_secure(pte_val(res) & PAGE_MASK);
return res;
}
/*
@ -1088,12 +1108,17 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep, int full)
{
pte_t res;
if (full) {
pte_t pte = *ptep;
res = *ptep;
*ptep = __pte(_PAGE_INVALID);
return pte;
} else {
res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
}
return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
if (mm_is_protected(mm) && pte_present(res))
uv_convert_from_secure(pte_val(res) & PAGE_MASK);
return res;
}
#define __HAVE_ARCH_PTEP_SET_WRPROTECT

View File

@ -155,7 +155,7 @@ static inline void get_tod_clock_ext(char *clk)
static inline unsigned long long get_tod_clock(void)
{
unsigned char clk[STORE_CLOCK_EXT_SIZE];
char clk[STORE_CLOCK_EXT_SIZE];
get_tod_clock_ext(clk);
return *((unsigned long long *)&clk[1]);

View File

@ -14,23 +14,62 @@
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/bug.h>
#include <linux/sched.h>
#include <asm/page.h>
#include <asm/gmap.h>
#define UVC_RC_EXECUTED 0x0001
#define UVC_RC_INV_CMD 0x0002
#define UVC_RC_INV_STATE 0x0003
#define UVC_RC_INV_LEN 0x0005
#define UVC_RC_NO_RESUME 0x0007
#define UVC_RC_NEED_DESTROY 0x8000
#define UVC_CMD_QUI 0x0001
#define UVC_CMD_INIT_UV 0x000f
#define UVC_CMD_CREATE_SEC_CONF 0x0100
#define UVC_CMD_DESTROY_SEC_CONF 0x0101
#define UVC_CMD_CREATE_SEC_CPU 0x0120
#define UVC_CMD_DESTROY_SEC_CPU 0x0121
#define UVC_CMD_CONV_TO_SEC_STOR 0x0200
#define UVC_CMD_CONV_FROM_SEC_STOR 0x0201
#define UVC_CMD_SET_SEC_CONF_PARAMS 0x0300
#define UVC_CMD_UNPACK_IMG 0x0301
#define UVC_CMD_VERIFY_IMG 0x0302
#define UVC_CMD_CPU_RESET 0x0310
#define UVC_CMD_CPU_RESET_INITIAL 0x0311
#define UVC_CMD_PREPARE_RESET 0x0320
#define UVC_CMD_CPU_RESET_CLEAR 0x0321
#define UVC_CMD_CPU_SET_STATE 0x0330
#define UVC_CMD_SET_UNSHARE_ALL 0x0340
#define UVC_CMD_PIN_PAGE_SHARED 0x0341
#define UVC_CMD_UNPIN_PAGE_SHARED 0x0342
#define UVC_CMD_SET_SHARED_ACCESS 0x1000
#define UVC_CMD_REMOVE_SHARED_ACCESS 0x1001
/* Bits in installed uv calls */
enum uv_cmds_inst {
BIT_UVC_CMD_QUI = 0,
BIT_UVC_CMD_INIT_UV = 1,
BIT_UVC_CMD_CREATE_SEC_CONF = 2,
BIT_UVC_CMD_DESTROY_SEC_CONF = 3,
BIT_UVC_CMD_CREATE_SEC_CPU = 4,
BIT_UVC_CMD_DESTROY_SEC_CPU = 5,
BIT_UVC_CMD_CONV_TO_SEC_STOR = 6,
BIT_UVC_CMD_CONV_FROM_SEC_STOR = 7,
BIT_UVC_CMD_SET_SHARED_ACCESS = 8,
BIT_UVC_CMD_REMOVE_SHARED_ACCESS = 9,
BIT_UVC_CMD_SET_SEC_PARMS = 11,
BIT_UVC_CMD_UNPACK_IMG = 13,
BIT_UVC_CMD_VERIFY_IMG = 14,
BIT_UVC_CMD_CPU_RESET = 15,
BIT_UVC_CMD_CPU_RESET_INITIAL = 16,
BIT_UVC_CMD_CPU_SET_STATE = 17,
BIT_UVC_CMD_PREPARE_RESET = 18,
BIT_UVC_CMD_CPU_PERFORM_CLEAR_RESET = 19,
BIT_UVC_CMD_UNSHARE_ALL = 20,
BIT_UVC_CMD_PIN_PAGE_SHARED = 21,
BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22,
};
struct uv_cb_header {
@ -40,13 +79,127 @@ struct uv_cb_header {
u16 rrc; /* Return Reason Code */
} __packed __aligned(8);
/* Query Ultravisor Information */
struct uv_cb_qui {
struct uv_cb_header header;
u64 reserved08;
u64 inst_calls_list[4];
u64 reserved30[15];
u64 reserved30[2];
u64 uv_base_stor_len;
u64 reserved48;
u64 conf_base_phys_stor_len;
u64 conf_base_virt_stor_len;
u64 conf_virt_var_stor_len;
u64 cpu_stor_len;
u32 reserved70[3];
u32 max_num_sec_conf;
u64 max_guest_stor_addr;
u8 reserved88[158 - 136];
u16 max_guest_cpus;
u8 reserveda0[200 - 160];
} __packed __aligned(8);
/* Initialize Ultravisor */
struct uv_cb_init {
struct uv_cb_header header;
u64 reserved08[2];
u64 stor_origin;
u64 stor_len;
u64 reserved28[4];
} __packed __aligned(8);
/* Create Guest Configuration */
struct uv_cb_cgc {
struct uv_cb_header header;
u64 reserved08[2];
u64 guest_handle;
u64 conf_base_stor_origin;
u64 conf_virt_stor_origin;
u64 reserved30;
u64 guest_stor_origin;
u64 guest_stor_len;
u64 guest_sca;
u64 guest_asce;
u64 reserved58[5];
} __packed __aligned(8);
/* Create Secure CPU */
struct uv_cb_csc {
struct uv_cb_header header;
u64 reserved08[2];
u64 cpu_handle;
u64 guest_handle;
u64 stor_origin;
u8 reserved30[6];
u16 num;
u64 state_origin;
u64 reserved40[4];
} __packed __aligned(8);
/* Convert to Secure */
struct uv_cb_cts {
struct uv_cb_header header;
u64 reserved08[2];
u64 guest_handle;
u64 gaddr;
} __packed __aligned(8);
/* Convert from Secure / Pin Page Shared */
struct uv_cb_cfs {
struct uv_cb_header header;
u64 reserved08[2];
u64 paddr;
} __packed __aligned(8);
/* Set Secure Config Parameter */
struct uv_cb_ssc {
struct uv_cb_header header;
u64 reserved08[2];
u64 guest_handle;
u64 sec_header_origin;
u32 sec_header_len;
u32 reserved2c;
u64 reserved30[4];
} __packed __aligned(8);
/* Unpack */
struct uv_cb_unp {
struct uv_cb_header header;
u64 reserved08[2];
u64 guest_handle;
u64 gaddr;
u64 tweak[2];
u64 reserved38[3];
} __packed __aligned(8);
#define PV_CPU_STATE_OPR 1
#define PV_CPU_STATE_STP 2
#define PV_CPU_STATE_CHKSTP 3
#define PV_CPU_STATE_OPR_LOAD 5
struct uv_cb_cpu_set_state {
struct uv_cb_header header;
u64 reserved08[2];
u64 cpu_handle;
u8 reserved20[7];
u8 state;
u64 reserved28[5];
};
/*
* A common UV call struct for calls that take no payload
* Examples:
* Destroy cpu/config
* Verify
*/
struct uv_cb_nodata {
struct uv_cb_header header;
u64 reserved08[2];
u64 handle;
u64 reserved20[4];
} __packed __aligned(8);
/* Set Shared Access */
struct uv_cb_share {
struct uv_cb_header header;
u64 reserved08[3];
@ -54,21 +207,76 @@ struct uv_cb_share {
u64 reserved28;
} __packed __aligned(8);
static inline int uv_call(unsigned long r1, unsigned long r2)
static inline int __uv_call(unsigned long r1, unsigned long r2)
{
int cc;
asm volatile(
"0: .insn rrf,0xB9A40000,%[r1],%[r2],0,0\n"
" brc 3,0b\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
" .insn rrf,0xB9A40000,%[r1],%[r2],0,0\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=d" (cc)
: [r1] "a" (r1), [r2] "a" (r2)
: "memory", "cc");
return cc;
}
static inline int uv_call(unsigned long r1, unsigned long r2)
{
int cc;
do {
cc = __uv_call(r1, r2);
} while (cc > 1);
return cc;
}
/* Low level uv_call that avoids stalls for long running busy conditions */
static inline int uv_call_sched(unsigned long r1, unsigned long r2)
{
int cc;
do {
cc = __uv_call(r1, r2);
cond_resched();
} while (cc > 1);
return cc;
}
/*
* special variant of uv_call that only transports the cpu or guest
* handle and the command, like destroy or verify.
*/
static inline int uv_cmd_nodata(u64 handle, u16 cmd, u16 *rc, u16 *rrc)
{
struct uv_cb_nodata uvcb = {
.header.cmd = cmd,
.header.len = sizeof(uvcb),
.handle = handle,
};
int cc;
WARN(!handle, "No handle provided to Ultravisor call cmd %x\n", cmd);
cc = uv_call_sched(0, (u64)&uvcb);
*rc = uvcb.header.rc;
*rrc = uvcb.header.rrc;
return cc ? -EINVAL : 0;
}
struct uv_info {
unsigned long inst_calls_list[4];
unsigned long uv_base_stor_len;
unsigned long guest_base_stor_len;
unsigned long guest_virt_base_stor_len;
unsigned long guest_virt_var_stor_len;
unsigned long guest_cpu_stor_len;
unsigned long max_sec_stor_addr;
unsigned int max_num_sec_conf;
unsigned short max_guest_cpus;
};
extern struct uv_info uv_info;
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
extern int prot_virt_guest;
@ -121,11 +329,40 @@ static inline int uv_remove_shared(unsigned long addr)
return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS);
}
void uv_query_info(void);
#else
#define is_prot_virt_guest() 0
static inline int uv_set_shared(unsigned long addr) { return 0; }
static inline int uv_remove_shared(unsigned long addr) { return 0; }
#endif
#if IS_ENABLED(CONFIG_KVM)
extern int prot_virt_host;
static inline int is_prot_virt_host(void)
{
return prot_virt_host;
}
int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
int uv_convert_from_secure(unsigned long paddr);
int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
void setup_uv(void);
void adjust_to_uv_max(unsigned long *vmax);
#else
#define is_prot_virt_host() 0
static inline void setup_uv(void) {}
static inline void adjust_to_uv_max(unsigned long *vmax) {}
static inline int uv_convert_from_secure(unsigned long paddr)
{
return 0;
}
#endif
#if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
void uv_query_info(void);
#else
static inline void uv_query_info(void) {}
#endif

View File

@ -78,6 +78,7 @@ obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o
obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_diag.o
obj-$(CONFIG_TRACEPOINTS) += trace.o
obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o
# vdso
obj-y += vdso64/

View File

@ -24,6 +24,8 @@ asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
void do_protection_exception(struct pt_regs *regs);
void do_dat_exception(struct pt_regs *regs);
void do_secure_storage_access(struct pt_regs *regs);
void do_non_secure_storage_access(struct pt_regs *regs);
void addressing_exception(struct pt_regs *regs);
void data_exception(struct pt_regs *regs);

View File

@ -78,8 +78,8 @@ PGM_CHECK(do_dat_exception) /* 39 */
PGM_CHECK(do_dat_exception) /* 3a */
PGM_CHECK(do_dat_exception) /* 3b */
PGM_CHECK_DEFAULT /* 3c */
PGM_CHECK_DEFAULT /* 3d */
PGM_CHECK_DEFAULT /* 3e */
PGM_CHECK(do_secure_storage_access) /* 3d */
PGM_CHECK(do_non_secure_storage_access) /* 3e */
PGM_CHECK_DEFAULT /* 3f */
PGM_CHECK(monitor_event_exception) /* 40 */
PGM_CHECK_DEFAULT /* 41 */

View File

@ -92,10 +92,6 @@ char elf_platform[ELF_PLATFORM_SIZE];
unsigned long int_hwcap = 0;
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
int __bootdata_preserved(prot_virt_guest);
#endif
int __bootdata(noexec_disabled);
int __bootdata(memory_end_set);
unsigned long __bootdata(memory_end);
@ -564,6 +560,9 @@ static void __init setup_memory_end(void)
vmax = _REGION1_SIZE; /* 4-level kernel page table */
}
if (is_prot_virt_host())
adjust_to_uv_max(&vmax);
/* module area is at the end of the kernel address space. */
MODULES_END = vmax;
MODULES_VADDR = MODULES_END - MODULES_LEN;
@ -1138,6 +1137,8 @@ void __init setup_arch(char **cmdline_p)
*/
memblock_trim_memory(1UL << (MAX_ORDER - 1 + PAGE_SHIFT));
if (is_prot_virt_host())
setup_uv();
setup_memory_end();
setup_memory();
dma_contiguous_reserve(memory_end);

414
arch/s390/kernel/uv.c Normal file
View File

@ -0,0 +1,414 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Common Ultravisor functions and initialization
*
* Copyright IBM Corp. 2019, 2020
*/
#define KMSG_COMPONENT "prot_virt"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/sizes.h>
#include <linux/bitmap.h>
#include <linux/memblock.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <asm/facility.h>
#include <asm/sections.h>
#include <asm/uv.h>
/* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
int __bootdata_preserved(prot_virt_guest);
#endif
#if IS_ENABLED(CONFIG_KVM)
int prot_virt_host;
EXPORT_SYMBOL(prot_virt_host);
struct uv_info __bootdata_preserved(uv_info);
EXPORT_SYMBOL(uv_info);
static int __init prot_virt_setup(char *val)
{
bool enabled;
int rc;
rc = kstrtobool(val, &enabled);
if (!rc && enabled)
prot_virt_host = 1;
if (is_prot_virt_guest() && prot_virt_host) {
prot_virt_host = 0;
pr_warn("Protected virtualization not available in protected guests.");
}
if (prot_virt_host && !test_facility(158)) {
prot_virt_host = 0;
pr_warn("Protected virtualization not supported by the hardware.");
}
return rc;
}
early_param("prot_virt", prot_virt_setup);
static int __init uv_init(unsigned long stor_base, unsigned long stor_len)
{
struct uv_cb_init uvcb = {
.header.cmd = UVC_CMD_INIT_UV,
.header.len = sizeof(uvcb),
.stor_origin = stor_base,
.stor_len = stor_len,
};
if (uv_call(0, (uint64_t)&uvcb)) {
pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
uvcb.header.rc, uvcb.header.rrc);
return -1;
}
return 0;
}
void __init setup_uv(void)
{
unsigned long uv_stor_base;
uv_stor_base = (unsigned long)memblock_alloc_try_nid(
uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
if (!uv_stor_base) {
pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
uv_info.uv_base_stor_len);
goto fail;
}
if (uv_init(uv_stor_base, uv_info.uv_base_stor_len)) {
memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
goto fail;
}
pr_info("Reserving %luMB as ultravisor base storage\n",
uv_info.uv_base_stor_len >> 20);
return;
fail:
pr_info("Disabling support for protected virtualization");
prot_virt_host = 0;
}
void adjust_to_uv_max(unsigned long *vmax)
{
*vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
}
/*
* Requests the Ultravisor to pin the page in the shared state. This will
* cause an intercept when the guest attempts to unshare the pinned page.
*/
static int uv_pin_shared(unsigned long paddr)
{
struct uv_cb_cfs uvcb = {
.header.cmd = UVC_CMD_PIN_PAGE_SHARED,
.header.len = sizeof(uvcb),
.paddr = paddr,
};
if (uv_call(0, (u64)&uvcb))
return -EINVAL;
return 0;
}
/*
* Requests the Ultravisor to encrypt a guest page and make it
* accessible to the host for paging (export).
*
* @paddr: Absolute host address of page to be exported
*/
int uv_convert_from_secure(unsigned long paddr)
{
struct uv_cb_cfs uvcb = {
.header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
.header.len = sizeof(uvcb),
.paddr = paddr
};
if (uv_call(0, (u64)&uvcb))
return -EINVAL;
return 0;
}
/*
* Calculate the expected ref_count for a page that would otherwise have no
* further pins. This was cribbed from similar functions in other places in
* the kernel, but with some slight modifications. We know that a secure
* page can not be a huge page for example.
*/
static int expected_page_refs(struct page *page)
{
int res;
res = page_mapcount(page);
if (PageSwapCache(page)) {
res++;
} else if (page_mapping(page)) {
res++;
if (page_has_private(page))
res++;
}
return res;
}
static int make_secure_pte(pte_t *ptep, unsigned long addr,
struct page *exp_page, struct uv_cb_header *uvcb)
{
pte_t entry = READ_ONCE(*ptep);
struct page *page;
int expected, rc = 0;
if (!pte_present(entry))
return -ENXIO;
if (pte_val(entry) & _PAGE_INVALID)
return -ENXIO;
page = pte_page(entry);
if (page != exp_page)
return -ENXIO;
if (PageWriteback(page))
return -EAGAIN;
expected = expected_page_refs(page);
if (!page_ref_freeze(page, expected))
return -EBUSY;
set_bit(PG_arch_1, &page->flags);
rc = uv_call(0, (u64)uvcb);
page_ref_unfreeze(page, expected);
/* Return -ENXIO if the page was not mapped, -EINVAL otherwise */
if (rc)
rc = uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
return rc;
}
/*
* Requests the Ultravisor to make a page accessible to a guest.
* If it's brought in the first time, it will be cleared. If
* it has been exported before, it will be decrypted and integrity
* checked.
*/
int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
{
struct vm_area_struct *vma;
bool local_drain = false;
spinlock_t *ptelock;
unsigned long uaddr;
struct page *page;
pte_t *ptep;
int rc;
again:
rc = -EFAULT;
down_read(&gmap->mm->mmap_sem);
uaddr = __gmap_translate(gmap, gaddr);
if (IS_ERR_VALUE(uaddr))
goto out;
vma = find_vma(gmap->mm, uaddr);
if (!vma)
goto out;
/*
* Secure pages cannot be huge and userspace should not combine both.
* In case userspace does it anyway this will result in an -EFAULT for
* the unpack. The guest is thus never reaching secure mode. If
* userspace is playing dirty tricky with mapping huge pages later
* on this will result in a segmentation fault.
*/
if (is_vm_hugetlb_page(vma))
goto out;
rc = -ENXIO;
page = follow_page(vma, uaddr, FOLL_WRITE);
if (IS_ERR_OR_NULL(page))
goto out;
lock_page(page);
ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
rc = make_secure_pte(ptep, uaddr, page, uvcb);
pte_unmap_unlock(ptep, ptelock);
unlock_page(page);
out:
up_read(&gmap->mm->mmap_sem);
if (rc == -EAGAIN) {
wait_on_page_writeback(page);
} else if (rc == -EBUSY) {
/*
* If we have tried a local drain and the page refcount
* still does not match our expected safe value, try with a
* system wide drain. This is needed if the pagevecs holding
* the page are on a different CPU.
*/
if (local_drain) {
lru_add_drain_all();
/* We give up here, and let the caller try again */
return -EAGAIN;
}
/*
* We are here if the page refcount does not match the
* expected safe value. The main culprits are usually
* pagevecs. With lru_add_drain() we drain the pagevecs
* on the local CPU so that hopefully the refcount will
* reach the expected safe value.
*/
lru_add_drain();
local_drain = true;
/* And now we try again immediately after draining */
goto again;
} else if (rc == -ENXIO) {
if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
return -EFAULT;
return -EAGAIN;
}
return rc;
}
EXPORT_SYMBOL_GPL(gmap_make_secure);
int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
{
struct uv_cb_cts uvcb = {
.header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
.header.len = sizeof(uvcb),
.guest_handle = gmap->guest_handle,
.gaddr = gaddr,
};
return gmap_make_secure(gmap, gaddr, &uvcb);
}
EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
/*
* To be called with the page locked or with an extra reference! This will
* prevent gmap_make_secure from touching the page concurrently. Having 2
* parallel make_page_accessible is fine, as the UV calls will become a
* no-op if the page is already exported.
*/
int arch_make_page_accessible(struct page *page)
{
int rc = 0;
/* Hugepage cannot be protected, so nothing to do */
if (PageHuge(page))
return 0;
/*
* PG_arch_1 is used in 3 places:
* 1. for kernel page tables during early boot
* 2. for storage keys of huge pages and KVM
* 3. As an indication that this page might be secure. This can
* overindicate, e.g. we set the bit before calling
* convert_to_secure.
* As secure pages are never huge, all 3 variants can co-exists.
*/
if (!test_bit(PG_arch_1, &page->flags))
return 0;
rc = uv_pin_shared(page_to_phys(page));
if (!rc) {
clear_bit(PG_arch_1, &page->flags);
return 0;
}
rc = uv_convert_from_secure(page_to_phys(page));
if (!rc) {
clear_bit(PG_arch_1, &page->flags);
return 0;
}
return rc;
}
EXPORT_SYMBOL_GPL(arch_make_page_accessible);
#endif
#if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
static ssize_t uv_query_facilities(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return snprintf(page, PAGE_SIZE, "%lx\n%lx\n%lx\n%lx\n",
uv_info.inst_calls_list[0],
uv_info.inst_calls_list[1],
uv_info.inst_calls_list[2],
uv_info.inst_calls_list[3]);
}
static struct kobj_attribute uv_query_facilities_attr =
__ATTR(facilities, 0444, uv_query_facilities, NULL);
static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return snprintf(page, PAGE_SIZE, "%d\n",
uv_info.max_guest_cpus);
}
static struct kobj_attribute uv_query_max_guest_cpus_attr =
__ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return snprintf(page, PAGE_SIZE, "%d\n",
uv_info.max_num_sec_conf);
}
static struct kobj_attribute uv_query_max_guest_vms_attr =
__ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return snprintf(page, PAGE_SIZE, "%lx\n",
uv_info.max_sec_stor_addr);
}
static struct kobj_attribute uv_query_max_guest_addr_attr =
__ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
static struct attribute *uv_query_attrs[] = {
&uv_query_facilities_attr.attr,
&uv_query_max_guest_cpus_attr.attr,
&uv_query_max_guest_vms_attr.attr,
&uv_query_max_guest_addr_attr.attr,
NULL,
};
static struct attribute_group uv_query_attr_group = {
.attrs = uv_query_attrs,
};
static struct kset *uv_query_kset;
static struct kobject *uv_kobj;
static int __init uv_info_init(void)
{
int rc = -ENOMEM;
if (!test_facility(158))
return 0;
uv_kobj = kobject_create_and_add("uv", firmware_kobj);
if (!uv_kobj)
return -ENOMEM;
uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
if (!uv_query_kset)
goto out_kobj;
rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group);
if (!rc)
return 0;
kset_unregister(uv_query_kset);
out_kobj:
kobject_del(uv_kobj);
kobject_put(uv_kobj);
return rc;
}
device_initcall(uv_info_init);
#endif

View File

@ -9,6 +9,6 @@ common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqch
ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
kvm-objs := $(common-objs) kvm-s390.o intercept.o interrupt.o priv.o sigp.o
kvm-objs += diag.o gaccess.o guestdbg.o vsie.o
kvm-objs += diag.o gaccess.o guestdbg.o vsie.o pv.o
obj-$(CONFIG_KVM) += kvm.o

View File

@ -2,7 +2,7 @@
/*
* handling diagnose instructions
*
* Copyright IBM Corp. 2008, 2011
* Copyright IBM Corp. 2008, 2020
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
@ -201,6 +201,10 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
}
/*
* no need to check the return value of vcpu_stop as it can only have
* an error for protvirt, but protvirt means user cpu state
*/
if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
kvm_s390_vcpu_stop(vcpu);
vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;

View File

@ -2,7 +2,7 @@
/*
* in-kernel handling for sie intercepts
*
* Copyright IBM Corp. 2008, 2014
* Copyright IBM Corp. 2008, 2020
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
@ -15,6 +15,7 @@
#include <asm/asm-offsets.h>
#include <asm/irq.h>
#include <asm/sysinfo.h>
#include <asm/uv.h>
#include "kvm-s390.h"
#include "gaccess.h"
@ -78,6 +79,10 @@ static int handle_stop(struct kvm_vcpu *vcpu)
return rc;
}
/*
* no need to check the return value of vcpu_stop as it can only have
* an error for protvirt, but protvirt means user cpu state
*/
if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
kvm_s390_vcpu_stop(vcpu);
return -EOPNOTSUPP;
@ -230,6 +235,13 @@ static int handle_prog(struct kvm_vcpu *vcpu)
vcpu->stat.exit_program_interruption++;
/*
* Intercept 8 indicates a loop of specification exceptions
* for protected guests.
*/
if (kvm_s390_pv_cpu_is_protected(vcpu))
return -EOPNOTSUPP;
if (guestdbg_enabled(vcpu) && per_event(vcpu)) {
rc = kvm_s390_handle_per_event(vcpu);
if (rc)
@ -383,7 +395,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
goto out;
}
if (addr & ~PAGE_MASK)
if (!kvm_s390_pv_cpu_is_protected(vcpu) && (addr & ~PAGE_MASK))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
sctns = (void *)get_zeroed_page(GFP_KERNEL);
@ -394,10 +406,15 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
out:
if (!cc) {
r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
if (r) {
free_page((unsigned long)sctns);
return kvm_s390_inject_prog_cond(vcpu, r);
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
memcpy((void *)(sida_origin(vcpu->arch.sie_block)),
sctns, PAGE_SIZE);
} else {
r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
if (r) {
free_page((unsigned long)sctns);
return kvm_s390_inject_prog_cond(vcpu, r);
}
}
}
@ -443,6 +460,77 @@ static int handle_operexc(struct kvm_vcpu *vcpu)
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
}
static int handle_pv_spx(struct kvm_vcpu *vcpu)
{
u32 pref = *(u32 *)vcpu->arch.sie_block->sidad;
kvm_s390_set_prefix(vcpu, pref);
trace_kvm_s390_handle_prefix(vcpu, 1, pref);
return 0;
}
static int handle_pv_sclp(struct kvm_vcpu *vcpu)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
spin_lock(&fi->lock);
/*
* 2 cases:
* a: an sccb answering interrupt was already pending or in flight.
* As the sccb value is not known we can simply set some value to
* trigger delivery of a saved SCCB. UV will then use its saved
* copy of the SCCB value.
* b: an error SCCB interrupt needs to be injected so we also inject
* a fake SCCB address. Firmware will use the proper one.
* This makes sure, that both errors and real sccb returns will only
* be delivered after a notification intercept (instruction has
* finished) but not after others.
*/
fi->srv_signal.ext_params |= 0x43000;
set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
clear_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs);
spin_unlock(&fi->lock);
return 0;
}
static int handle_pv_uvc(struct kvm_vcpu *vcpu)
{
struct uv_cb_share *guest_uvcb = (void *)vcpu->arch.sie_block->sidad;
struct uv_cb_cts uvcb = {
.header.cmd = UVC_CMD_UNPIN_PAGE_SHARED,
.header.len = sizeof(uvcb),
.guest_handle = kvm_s390_pv_get_handle(vcpu->kvm),
.gaddr = guest_uvcb->paddr,
};
int rc;
if (guest_uvcb->header.cmd != UVC_CMD_REMOVE_SHARED_ACCESS) {
WARN_ONCE(1, "Unexpected notification intercept for UVC 0x%x\n",
guest_uvcb->header.cmd);
return 0;
}
rc = gmap_make_secure(vcpu->arch.gmap, uvcb.gaddr, &uvcb);
/*
* If the unpin did not succeed, the guest will exit again for the UVC
* and we will retry the unpin.
*/
if (rc == -EINVAL)
return 0;
return rc;
}
static int handle_pv_notification(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.sie_block->ipa == 0xb210)
return handle_pv_spx(vcpu);
if (vcpu->arch.sie_block->ipa == 0xb220)
return handle_pv_sclp(vcpu);
if (vcpu->arch.sie_block->ipa == 0xb9a4)
return handle_pv_uvc(vcpu);
return handle_instruction(vcpu);
}
int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
{
int rc, per_rc = 0;
@ -479,6 +567,28 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
case ICPT_KSS:
rc = kvm_s390_skey_check_enable(vcpu);
break;
case ICPT_MCHKREQ:
case ICPT_INT_ENABLE:
/*
* PSW bit 13 or a CR (0, 6, 14) changed and we might
* now be able to deliver interrupts. The pre-run code
* will take care of this.
*/
rc = 0;
break;
case ICPT_PV_INSTR:
rc = handle_instruction(vcpu);
break;
case ICPT_PV_NOTIFY:
rc = handle_pv_notification(vcpu);
break;
case ICPT_PV_PREF:
rc = 0;
gmap_convert_to_secure(vcpu->arch.gmap,
kvm_s390_get_prefix(vcpu));
gmap_convert_to_secure(vcpu->arch.gmap,
kvm_s390_get_prefix(vcpu) + PAGE_SIZE);
break;
default:
return -EOPNOTSUPP;
}

View File

@ -2,7 +2,7 @@
/*
* handling kvm guest interrupts
*
* Copyright IBM Corp. 2008, 2015
* Copyright IBM Corp. 2008, 2020
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
*/
@ -324,8 +324,11 @@ static inline int gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu)
{
return vcpu->kvm->arch.float_int.pending_irqs |
vcpu->arch.local_int.pending_irqs;
unsigned long pending = vcpu->kvm->arch.float_int.pending_irqs |
vcpu->arch.local_int.pending_irqs;
pending &= ~vcpu->kvm->arch.float_int.masked_irqs;
return pending;
}
static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
@ -383,10 +386,18 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
__clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
if (!(vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK))
__clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) {
__clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
__clear_bit(IRQ_PEND_EXT_SERVICE_EV, &active_mask);
}
if (psw_mchk_disabled(vcpu))
active_mask &= ~IRQ_PEND_MCHK_MASK;
/* PV guest cpus can have a single interruption injected at a time. */
if (kvm_s390_pv_cpu_is_protected(vcpu) &&
vcpu->arch.sie_block->iictl != IICTL_CODE_NONE)
active_mask &= ~(IRQ_PEND_EXT_II_MASK |
IRQ_PEND_IO_MASK |
IRQ_PEND_MCHK_MASK);
/*
* Check both floating and local interrupt's cr14 because
* bit IRQ_PEND_MCHK_REP could be set in both cases.
@ -479,19 +490,23 @@ static void set_intercept_indicators(struct kvm_vcpu *vcpu)
static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc;
int rc = 0;
vcpu->stat.deliver_cputm++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
0, 0);
rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
(u16 *)__LC_EXT_INT_CODE);
rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
vcpu->arch.sie_block->eic = EXT_IRQ_CPU_TIMER;
} else {
rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
(u16 *)__LC_EXT_INT_CODE);
rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
}
clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
return rc ? -EFAULT : 0;
}
@ -499,19 +514,23 @@ static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc;
int rc = 0;
vcpu->stat.deliver_ckc++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
0, 0);
rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
(u16 __user *)__LC_EXT_INT_CODE);
rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
vcpu->arch.sie_block->eic = EXT_IRQ_CLK_COMP;
} else {
rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
(u16 __user *)__LC_EXT_INT_CODE);
rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
}
clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
return rc ? -EFAULT : 0;
}
@ -553,6 +572,20 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
union mci mci;
int rc;
/*
* All other possible payload for a machine check (e.g. the register
* contents in the save area) will be handled by the ultravisor, as
* the hypervisor does not not have the needed information for
* protected guests.
*/
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
vcpu->arch.sie_block->iictl = IICTL_CODE_MCHK;
vcpu->arch.sie_block->mcic = mchk->mcic;
vcpu->arch.sie_block->faddr = mchk->failing_storage_address;
vcpu->arch.sie_block->edc = mchk->ext_damage_code;
return 0;
}
mci.val = mchk->mcic;
/* take care of lazy register loading */
save_fpu_regs();
@ -696,17 +729,21 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc;
int rc = 0;
VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
vcpu->stat.deliver_restart_signal++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
rc = write_guest_lc(vcpu,
offsetof(struct lowcore, restart_old_psw),
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw),
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
vcpu->arch.sie_block->iictl = IICTL_CODE_RESTART;
} else {
rc = write_guest_lc(vcpu,
offsetof(struct lowcore, restart_old_psw),
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw),
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
}
clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
return rc ? -EFAULT : 0;
}
@ -748,6 +785,12 @@ static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
vcpu->stat.deliver_emergency_signal++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
cpu_addr, 0);
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
vcpu->arch.sie_block->eic = EXT_IRQ_EMERGENCY_SIG;
vcpu->arch.sie_block->extcpuaddr = cpu_addr;
return 0;
}
rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
(u16 *)__LC_EXT_INT_CODE);
@ -776,6 +819,12 @@ static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
KVM_S390_INT_EXTERNAL_CALL,
extcall.code, 0);
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
vcpu->arch.sie_block->eic = EXT_IRQ_EXTERNAL_CALL;
vcpu->arch.sie_block->extcpuaddr = extcall.code;
return 0;
}
rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
(u16 *)__LC_EXT_INT_CODE);
@ -787,6 +836,21 @@ static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
return rc ? -EFAULT : 0;
}
static int __deliver_prog_pv(struct kvm_vcpu *vcpu, u16 code)
{
switch (code) {
case PGM_SPECIFICATION:
vcpu->arch.sie_block->iictl = IICTL_CODE_SPECIFICATION;
break;
case PGM_OPERAND:
vcpu->arch.sie_block->iictl = IICTL_CODE_OPERAND;
break;
default:
return -EINVAL;
}
return 0;
}
static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
@ -807,6 +871,10 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
pgm_info.code, 0);
/* PER is handled by the ultravisor */
if (kvm_s390_pv_cpu_is_protected(vcpu))
return __deliver_prog_pv(vcpu, pgm_info.code & ~PGM_PER);
switch (pgm_info.code & ~PGM_PER) {
case PGM_AFX_TRANSLATION:
case PGM_ASX_TRANSLATION:
@ -902,20 +970,49 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
return rc ? -EFAULT : 0;
}
#define SCCB_MASK 0xFFFFFFF8
#define SCCB_EVENT_PENDING 0x3
static int write_sclp(struct kvm_vcpu *vcpu, u32 parm)
{
int rc;
if (kvm_s390_pv_cpu_get_handle(vcpu)) {
vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
vcpu->arch.sie_block->eic = EXT_IRQ_SERVICE_SIG;
vcpu->arch.sie_block->eiparams = parm;
return 0;
}
rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= put_guest_lc(vcpu, parm,
(u32 *)__LC_EXT_PARAMS);
return rc ? -EFAULT : 0;
}
static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_ext_info ext;
int rc = 0;
spin_lock(&fi->lock);
if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs) ||
!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
spin_unlock(&fi->lock);
return 0;
}
ext = fi->srv_signal;
memset(&fi->srv_signal, 0, sizeof(ext));
clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
clear_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
if (kvm_s390_pv_cpu_is_protected(vcpu))
set_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs);
spin_unlock(&fi->lock);
VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
@ -924,16 +1021,31 @@ static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
ext.ext_params, 0);
rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= put_guest_lc(vcpu, ext.ext_params,
(u32 *)__LC_EXT_PARAMS);
return write_sclp(vcpu, ext.ext_params);
}
return rc ? -EFAULT : 0;
static int __must_check __deliver_service_ev(struct kvm_vcpu *vcpu)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_ext_info ext;
spin_lock(&fi->lock);
if (!(test_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs))) {
spin_unlock(&fi->lock);
return 0;
}
ext = fi->srv_signal;
/* only clear the event bit */
fi->srv_signal.ext_params &= ~SCCB_EVENT_PENDING;
clear_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
spin_unlock(&fi->lock);
VCPU_EVENT(vcpu, 4, "%s", "deliver: sclp parameter event");
vcpu->stat.deliver_service_signal++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
ext.ext_params, 0);
return write_sclp(vcpu, SCCB_EVENT_PENDING);
}
static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
@ -1028,6 +1140,15 @@ static int __do_deliver_io(struct kvm_vcpu *vcpu, struct kvm_s390_io_info *io)
{
int rc;
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
vcpu->arch.sie_block->iictl = IICTL_CODE_IO;
vcpu->arch.sie_block->subchannel_id = io->subchannel_id;
vcpu->arch.sie_block->subchannel_nr = io->subchannel_nr;
vcpu->arch.sie_block->io_int_parm = io->io_int_parm;
vcpu->arch.sie_block->io_int_word = io->io_int_word;
return 0;
}
rc = put_guest_lc(vcpu, io->subchannel_id, (u16 *)__LC_SUBCHANNEL_ID);
rc |= put_guest_lc(vcpu, io->subchannel_nr, (u16 *)__LC_SUBCHANNEL_NR);
rc |= put_guest_lc(vcpu, io->io_int_parm, (u32 *)__LC_IO_INT_PARM);
@ -1329,6 +1450,9 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
case IRQ_PEND_EXT_SERVICE:
rc = __deliver_service(vcpu);
break;
case IRQ_PEND_EXT_SERVICE_EV:
rc = __deliver_service_ev(vcpu);
break;
case IRQ_PEND_PFAULT_DONE:
rc = __deliver_pfault_done(vcpu);
break;
@ -1421,7 +1545,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
return -EINVAL;
if (sclp.has_sigpif)
if (sclp.has_sigpif && !kvm_s390_pv_cpu_get_handle(vcpu))
return sca_inject_ext_call(vcpu, src_id);
if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
@ -1681,9 +1805,6 @@ out:
return inti;
}
#define SCCB_MASK 0xFFFFFFF8
#define SCCB_EVENT_PENDING 0x3
static int __inject_service(struct kvm *kvm,
struct kvm_s390_interrupt_info *inti)
{
@ -1692,6 +1813,11 @@ static int __inject_service(struct kvm *kvm,
kvm->stat.inject_service_signal++;
spin_lock(&fi->lock);
fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
/* We always allow events, track them separately from the sccb ints */
if (fi->srv_signal.ext_params & SCCB_EVENT_PENDING)
set_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
/*
* Early versions of the QEMU s390 bios will inject several
* service interrupts after another without handling a
@ -1773,7 +1899,14 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
kvm->stat.inject_io++;
isc = int_word_to_isc(inti->io.io_int_word);
if (gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) {
/*
* Do not make use of gisa in protected mode. We do not use the lock
* checking variant as this is just a performance optimization and we
* do not hold the lock here. This is ok as the code will pick
* interrupts from both "lists" for delivery.
*/
if (!kvm_s390_pv_get_handle(kvm) &&
gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) {
VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc);
gisa_set_ipm_gisc(gi->origin, isc);
kfree(inti);
@ -1834,7 +1967,8 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
if (!(type & KVM_S390_INT_IO_AI_MASK &&
kvm->arch.gisa_int.origin))
kvm->arch.gisa_int.origin) ||
kvm_s390_pv_cpu_get_handle(dst_vcpu))
kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
break;
default:
@ -2080,6 +2214,10 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
int i;
mutex_lock(&kvm->lock);
if (!kvm_s390_pv_is_protected(kvm))
fi->masked_irqs = 0;
mutex_unlock(&kvm->lock);
spin_lock(&fi->lock);
fi->pending_irqs = 0;
memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
@ -2146,7 +2284,8 @@ static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
n++;
}
}
if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) {
if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs) ||
test_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs)) {
if (n == max_irqs) {
/* signal userspace to try again */
ret = -ENOMEM;
@ -2327,9 +2466,6 @@ static int register_io_adapter(struct kvm_device *dev,
if (!adapter)
return -ENOMEM;
INIT_LIST_HEAD(&adapter->maps);
init_rwsem(&adapter->maps_lock);
atomic_set(&adapter->nr_maps, 0);
adapter->id = adapter_info.id;
adapter->isc = adapter_info.isc;
adapter->maskable = adapter_info.maskable;
@ -2354,87 +2490,12 @@ int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
return ret;
}
static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
{
struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
struct s390_map_info *map;
int ret;
if (!adapter || !addr)
return -EINVAL;
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
ret = -ENOMEM;
goto out;
}
INIT_LIST_HEAD(&map->list);
map->guest_addr = addr;
map->addr = gmap_translate(kvm->arch.gmap, addr);
if (map->addr == -EFAULT) {
ret = -EFAULT;
goto out;
}
ret = get_user_pages_fast(map->addr, 1, FOLL_WRITE, &map->page);
if (ret < 0)
goto out;
BUG_ON(ret != 1);
down_write(&adapter->maps_lock);
if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
list_add_tail(&map->list, &adapter->maps);
ret = 0;
} else {
put_page(map->page);
ret = -EINVAL;
}
up_write(&adapter->maps_lock);
out:
if (ret)
kfree(map);
return ret;
}
static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
{
struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
struct s390_map_info *map, *tmp;
int found = 0;
if (!adapter || !addr)
return -EINVAL;
down_write(&adapter->maps_lock);
list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
if (map->guest_addr == addr) {
found = 1;
atomic_dec(&adapter->nr_maps);
list_del(&map->list);
put_page(map->page);
kfree(map);
break;
}
}
up_write(&adapter->maps_lock);
return found ? 0 : -EINVAL;
}
void kvm_s390_destroy_adapters(struct kvm *kvm)
{
int i;
struct s390_map_info *map, *tmp;
for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
if (!kvm->arch.adapters[i])
continue;
list_for_each_entry_safe(map, tmp,
&kvm->arch.adapters[i]->maps, list) {
list_del(&map->list);
put_page(map->page);
kfree(map);
}
for (i = 0; i < MAX_S390_IO_ADAPTERS; i++)
kfree(kvm->arch.adapters[i]);
}
}
static int modify_io_adapter(struct kvm_device *dev,
@ -2456,11 +2517,14 @@ static int modify_io_adapter(struct kvm_device *dev,
if (ret > 0)
ret = 0;
break;
/*
* The following operations are no longer needed and therefore no-ops.
* The gpa to hva translation is done when an IRQ route is set up. The
* set_irq code uses get_user_pages_remote() to do the actual write.
*/
case KVM_S390_IO_ADAPTER_MAP:
ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
break;
case KVM_S390_IO_ADAPTER_UNMAP:
ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
ret = 0;
break;
default:
ret = -EINVAL;
@ -2699,19 +2763,15 @@ static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
}
static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
u64 addr)
static struct page *get_map_page(struct kvm *kvm, u64 uaddr)
{
struct s390_map_info *map;
struct page *page = NULL;
if (!adapter)
return NULL;
list_for_each_entry(map, &adapter->maps, list) {
if (map->guest_addr == addr)
return map;
}
return NULL;
down_read(&kvm->mm->mmap_sem);
get_user_pages_remote(NULL, kvm->mm, uaddr, 1, FOLL_WRITE,
&page, NULL, NULL);
up_read(&kvm->mm->mmap_sem);
return page;
}
static int adapter_indicators_set(struct kvm *kvm,
@ -2720,30 +2780,35 @@ static int adapter_indicators_set(struct kvm *kvm,
{
unsigned long bit;
int summary_set, idx;
struct s390_map_info *info;
struct page *ind_page, *summary_page;
void *map;
info = get_map_info(adapter, adapter_int->ind_addr);
if (!info)
ind_page = get_map_page(kvm, adapter_int->ind_addr);
if (!ind_page)
return -1;
map = page_address(info->page);
bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
set_bit(bit, map);
idx = srcu_read_lock(&kvm->srcu);
mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
set_page_dirty_lock(info->page);
info = get_map_info(adapter, adapter_int->summary_addr);
if (!info) {
srcu_read_unlock(&kvm->srcu, idx);
summary_page = get_map_page(kvm, adapter_int->summary_addr);
if (!summary_page) {
put_page(ind_page);
return -1;
}
map = page_address(info->page);
bit = get_ind_bit(info->addr, adapter_int->summary_offset,
adapter->swap);
idx = srcu_read_lock(&kvm->srcu);
map = page_address(ind_page);
bit = get_ind_bit(adapter_int->ind_addr,
adapter_int->ind_offset, adapter->swap);
set_bit(bit, map);
mark_page_dirty(kvm, adapter_int->ind_addr >> PAGE_SHIFT);
set_page_dirty_lock(ind_page);
map = page_address(summary_page);
bit = get_ind_bit(adapter_int->summary_addr,
adapter_int->summary_offset, adapter->swap);
summary_set = test_and_set_bit(bit, map);
mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
set_page_dirty_lock(info->page);
mark_page_dirty(kvm, adapter_int->summary_addr >> PAGE_SHIFT);
set_page_dirty_lock(summary_page);
srcu_read_unlock(&kvm->srcu, idx);
put_page(ind_page);
put_page(summary_page);
return summary_set ? 0 : 1;
}
@ -2765,9 +2830,7 @@ static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
adapter = get_io_adapter(kvm, e->adapter.adapter_id);
if (!adapter)
return -1;
down_read(&adapter->maps_lock);
ret = adapter_indicators_set(kvm, adapter, &e->adapter);
up_read(&adapter->maps_lock);
if ((ret > 0) && !adapter->masked) {
ret = kvm_s390_inject_airq(kvm, adapter);
if (ret == 0)
@ -2818,23 +2881,27 @@ int kvm_set_routing_entry(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue)
{
int ret;
u64 uaddr;
switch (ue->type) {
/* we store the userspace addresses instead of the guest addresses */
case KVM_IRQ_ROUTING_S390_ADAPTER:
e->set = set_adapter_int;
e->adapter.summary_addr = ue->u.adapter.summary_addr;
e->adapter.ind_addr = ue->u.adapter.ind_addr;
uaddr = gmap_translate(kvm->arch.gmap, ue->u.adapter.summary_addr);
if (uaddr == -EFAULT)
return -EFAULT;
e->adapter.summary_addr = uaddr;
uaddr = gmap_translate(kvm->arch.gmap, ue->u.adapter.ind_addr);
if (uaddr == -EFAULT)
return -EFAULT;
e->adapter.ind_addr = uaddr;
e->adapter.summary_offset = ue->u.adapter.summary_offset;
e->adapter.ind_offset = ue->u.adapter.ind_offset;
e->adapter.adapter_id = ue->u.adapter.adapter_id;
ret = 0;
break;
return 0;
default:
ret = -EINVAL;
return -EINVAL;
}
return ret;
}
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,

View File

@ -2,7 +2,7 @@
/*
* hosting IBM Z kernel virtual machines (s390x)
*
* Copyright IBM Corp. 2008, 2018
* Copyright IBM Corp. 2008, 2020
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
@ -44,6 +44,7 @@
#include <asm/cpacf.h>
#include <asm/timex.h>
#include <asm/ap.h>
#include <asm/uv.h>
#include "kvm-s390.h"
#include "gaccess.h"
@ -184,6 +185,11 @@ static u8 halt_poll_max_steal = 10;
module_param(halt_poll_max_steal, byte, 0644);
MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
/* if set to true, the GISA will be initialized and used if available */
static bool use_gisa = true;
module_param(use_gisa, bool, 0644);
MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
/*
* For now we handle at most 16 double words as this is what the s390 base
* kernel handles and stores in the prefix page. If we ever need to go beyond
@ -220,6 +226,7 @@ static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
static struct gmap_notifier gmap_notifier;
static struct gmap_notifier vsie_gmap_notifier;
debug_info_t *kvm_s390_dbf;
debug_info_t *kvm_s390_dbf_uv;
/* Section: not file related */
int kvm_arch_hardware_enable(void)
@ -233,8 +240,10 @@ int kvm_arch_check_processor_compat(void)
return 0;
}
/* forward declarations */
static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
unsigned long end);
static int sca_switch_to_extended(struct kvm *kvm);
static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
{
@ -460,7 +469,12 @@ int kvm_arch_init(void *opaque)
if (!kvm_s390_dbf)
return -ENOMEM;
if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view))
kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
if (!kvm_s390_dbf_uv)
goto out;
if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
goto out;
kvm_s390_cpu_feat_init();
@ -487,6 +501,7 @@ void kvm_arch_exit(void)
{
kvm_s390_gib_destroy();
debug_unregister(kvm_s390_dbf);
debug_unregister(kvm_s390_dbf_uv);
}
/* Section: device related */
@ -564,6 +579,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_BPB:
r = test_facility(82);
break;
case KVM_CAP_S390_PROTECTED:
r = is_prot_virt_host();
break;
default:
r = 0;
}
@ -2149,6 +2167,194 @@ out:
return r;
}
static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp)
{
struct kvm_vcpu *vcpu;
u16 rc, rrc;
int ret = 0;
int i;
/*
* We ignore failures and try to destroy as many CPUs as possible.
* At the same time we must not free the assigned resources when
* this fails, as the ultravisor has still access to that memory.
* So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
* behind.
* We want to return the first failure rc and rrc, though.
*/
kvm_for_each_vcpu(i, vcpu, kvm) {
mutex_lock(&vcpu->mutex);
if (kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc) && !ret) {
*rcp = rc;
*rrcp = rrc;
ret = -EIO;
}
mutex_unlock(&vcpu->mutex);
}
return ret;
}
static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
{
int i, r = 0;
u16 dummy;
struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm) {
mutex_lock(&vcpu->mutex);
r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
mutex_unlock(&vcpu->mutex);
if (r)
break;
}
if (r)
kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
return r;
}
static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
{
int r = 0;
u16 dummy;
void __user *argp = (void __user *)cmd->data;
switch (cmd->cmd) {
case KVM_PV_ENABLE: {
r = -EINVAL;
if (kvm_s390_pv_is_protected(kvm))
break;
/*
* FMT 4 SIE needs esca. As we never switch back to bsca from
* esca, we need no cleanup in the error cases below
*/
r = sca_switch_to_extended(kvm);
if (r)
break;
down_write(&current->mm->mmap_sem);
r = gmap_mark_unmergeable();
up_write(&current->mm->mmap_sem);
if (r)
break;
r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
if (r)
break;
r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
if (r)
kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
/* we need to block service interrupts from now on */
set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
break;
}
case KVM_PV_DISABLE: {
r = -EINVAL;
if (!kvm_s390_pv_is_protected(kvm))
break;
r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
/*
* If a CPU could not be destroyed, destroy VM will also fail.
* There is no point in trying to destroy it. Instead return
* the rc and rrc from the first CPU that failed destroying.
*/
if (r)
break;
r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc);
/* no need to block service interrupts any more */
clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
break;
}
case KVM_PV_SET_SEC_PARMS: {
struct kvm_s390_pv_sec_parm parms = {};
void *hdr;
r = -EINVAL;
if (!kvm_s390_pv_is_protected(kvm))
break;
r = -EFAULT;
if (copy_from_user(&parms, argp, sizeof(parms)))
break;
/* Currently restricted to 8KB */
r = -EINVAL;
if (parms.length > PAGE_SIZE * 2)
break;
r = -ENOMEM;
hdr = vmalloc(parms.length);
if (!hdr)
break;
r = -EFAULT;
if (!copy_from_user(hdr, (void __user *)parms.origin,
parms.length))
r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
&cmd->rc, &cmd->rrc);
vfree(hdr);
break;
}
case KVM_PV_UNPACK: {
struct kvm_s390_pv_unp unp = {};
r = -EINVAL;
if (!kvm_s390_pv_is_protected(kvm))
break;
r = -EFAULT;
if (copy_from_user(&unp, argp, sizeof(unp)))
break;
r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
&cmd->rc, &cmd->rrc);
break;
}
case KVM_PV_VERIFY: {
r = -EINVAL;
if (!kvm_s390_pv_is_protected(kvm))
break;
r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
cmd->rrc);
break;
}
case KVM_PV_PREP_RESET: {
r = -EINVAL;
if (!kvm_s390_pv_is_protected(kvm))
break;
r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
cmd->rc, cmd->rrc);
break;
}
case KVM_PV_UNSHARE_ALL: {
r = -EINVAL;
if (!kvm_s390_pv_is_protected(kvm))
break;
r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
cmd->rc, cmd->rrc);
break;
}
default:
r = -ENOTTY;
}
return r;
}
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@ -2246,6 +2452,33 @@ long kvm_arch_vm_ioctl(struct file *filp,
mutex_unlock(&kvm->slots_lock);
break;
}
case KVM_S390_PV_COMMAND: {
struct kvm_pv_cmd args;
/* protvirt means user sigp */
kvm->arch.user_cpu_state_ctrl = 1;
r = 0;
if (!is_prot_virt_host()) {
r = -EINVAL;
break;
}
if (copy_from_user(&args, argp, sizeof(args))) {
r = -EFAULT;
break;
}
if (args.flags) {
r = -EINVAL;
break;
}
mutex_lock(&kvm->lock);
r = kvm_s390_handle_pv(kvm, &args);
mutex_unlock(&kvm->lock);
if (copy_to_user(argp, &args, sizeof(args))) {
r = -EFAULT;
break;
}
break;
}
default:
r = -ENOTTY;
}
@ -2495,7 +2728,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.use_skf = sclp.has_skey;
spin_lock_init(&kvm->arch.start_stop_lock);
kvm_s390_vsie_init(kvm);
kvm_s390_gisa_init(kvm);
if (use_gisa)
kvm_s390_gisa_init(kvm);
KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
return 0;
@ -2509,6 +2743,8 @@ out_err:
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
u16 rc, rrc;
VCPU_EVENT(vcpu, 3, "%s", "free cpu");
trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
kvm_s390_clear_local_irqs(vcpu);
@ -2521,6 +2757,9 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
if (vcpu->kvm->arch.use_cmma)
kvm_s390_vcpu_unsetup_cmma(vcpu);
/* We can not hold the vcpu mutex here, we are already dying */
if (kvm_s390_pv_cpu_get_handle(vcpu))
kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
free_page((unsigned long)(vcpu->arch.sie_block));
}
@ -2542,10 +2781,20 @@ static void kvm_free_vcpus(struct kvm *kvm)
void kvm_arch_destroy_vm(struct kvm *kvm)
{
u16 rc, rrc;
kvm_free_vcpus(kvm);
sca_dispose(kvm);
debug_unregister(kvm->arch.dbf);
kvm_s390_gisa_destroy(kvm);
/*
* We are already at the end of life and kvm->lock is not taken.
* This is ok as the file descriptor is closed by now and nobody
* can mess with the pv state. To avoid lockdep_assert_held from
* complaining we do not use kvm_s390_pv_is_protected.
*/
if (kvm_s390_pv_get_handle(kvm))
kvm_s390_pv_deinit_vm(kvm, &rc, &rrc);
debug_unregister(kvm->arch.dbf);
free_page((unsigned long)kvm->arch.sie_page2);
if (!kvm_is_ucontrol(kvm))
gmap_remove(kvm->arch.gmap);
@ -2641,6 +2890,9 @@ static int sca_switch_to_extended(struct kvm *kvm)
unsigned int vcpu_idx;
u32 scaol, scaoh;
if (kvm->arch.use_esca)
return 0;
new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
if (!new_sca)
return -ENOMEM;
@ -2892,6 +3144,7 @@ static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
{
int rc = 0;
u16 uvrc, uvrrc;
atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
CPUSTAT_SM |
@ -2959,6 +3212,14 @@ static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
kvm_s390_vcpu_crypto_setup(vcpu);
mutex_lock(&vcpu->kvm->lock);
if (kvm_s390_pv_is_protected(vcpu->kvm)) {
rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
if (rc)
kvm_s390_vcpu_unsetup_cmma(vcpu);
}
mutex_unlock(&vcpu->kvm->lock);
return rc;
}
@ -3265,14 +3526,21 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
kvm_s390_set_prefix(vcpu, 0);
kvm_s390_set_cpu_timer(vcpu, 0);
vcpu->arch.sie_block->ckc = 0;
vcpu->arch.sie_block->todpr = 0;
memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
vcpu->run->s.regs.fpc = 0;
vcpu->arch.sie_block->gbea = 1;
vcpu->arch.sie_block->pp = 0;
vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
/*
* Do not reset these registers in the protected case, as some of
* them are overlayed and they are not accessible in this case
* anyway.
*/
if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
vcpu->arch.sie_block->gbea = 1;
vcpu->arch.sie_block->pp = 0;
vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
vcpu->arch.sie_block->todpr = 0;
}
}
static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
@ -3462,12 +3730,18 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
switch (mp_state->mp_state) {
case KVM_MP_STATE_STOPPED:
kvm_s390_vcpu_stop(vcpu);
rc = kvm_s390_vcpu_stop(vcpu);
break;
case KVM_MP_STATE_OPERATING:
kvm_s390_vcpu_start(vcpu);
rc = kvm_s390_vcpu_start(vcpu);
break;
case KVM_MP_STATE_LOAD:
if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
rc = -ENXIO;
break;
}
rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
break;
case KVM_MP_STATE_CHECK_STOP:
/* fall through - CHECK_STOP and LOAD are not supported yet */
default:
@ -3819,9 +4093,11 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
return vcpu_post_run_fault_in_sie(vcpu);
}
#define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
static int __vcpu_run(struct kvm_vcpu *vcpu)
{
int rc, exit_reason;
struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
/*
* We try to hold kvm->srcu during most of vcpu_run (except when run-
@ -3843,8 +4119,28 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
guest_enter_irqoff();
__disable_cpu_timer_accounting(vcpu);
local_irq_enable();
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
memcpy(sie_page->pv_grregs,
vcpu->run->s.regs.gprs,
sizeof(sie_page->pv_grregs));
}
exit_reason = sie64a(vcpu->arch.sie_block,
vcpu->run->s.regs.gprs);
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
memcpy(vcpu->run->s.regs.gprs,
sie_page->pv_grregs,
sizeof(sie_page->pv_grregs));
/*
* We're not allowed to inject interrupts on intercepts
* that leave the guest state in an "in-between" state
* where the next SIE entry will do a continuation.
* Fence interrupts in our "internal" PSW.
*/
if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
}
}
local_irq_disable();
__enable_cpu_timer_accounting(vcpu);
guest_exit_irqoff();
@ -3858,7 +4154,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
return rc;
}
static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
struct runtime_instr_cb *riccb;
struct gs_cb *gscb;
@ -3867,16 +4163,7 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
/* some control register changes require a tlb flush */
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
@ -3917,20 +4204,6 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
}
save_access_regs(vcpu->arch.host_acrs);
restore_access_regs(vcpu->run->s.regs.acrs);
/* save host (userspace) fprs/vrs */
save_fpu_regs();
vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
if (MACHINE_HAS_VX)
current->thread.fpu.regs = vcpu->run->s.regs.vrs;
else
current->thread.fpu.regs = vcpu->run->s.regs.fprs;
current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
if (test_fp_ctl(current->thread.fpu.fpc))
/* User space provided an invalid FPC, let's clear it */
current->thread.fpu.fpc = 0;
if (MACHINE_HAS_GS) {
preempt_disable();
__ctl_set_bit(2, 4);
@ -3946,33 +4219,63 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
preempt_enable();
}
/* SIE will load etoken directly from SDNX and therefore kvm_run */
}
static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
/* some control register changes require a tlb flush */
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
}
save_access_regs(vcpu->arch.host_acrs);
restore_access_regs(vcpu->run->s.regs.acrs);
/* save host (userspace) fprs/vrs */
save_fpu_regs();
vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
if (MACHINE_HAS_VX)
current->thread.fpu.regs = vcpu->run->s.regs.vrs;
else
current->thread.fpu.regs = vcpu->run->s.regs.fprs;
current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
if (test_fp_ctl(current->thread.fpu.fpc))
/* User space provided an invalid FPC, let's clear it */
current->thread.fpu.fpc = 0;
/* Sync fmt2 only data */
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
sync_regs_fmt2(vcpu, kvm_run);
} else {
/*
* In several places we have to modify our internal view to
* not do things that are disallowed by the ultravisor. For
* example we must not inject interrupts after specific exits
* (e.g. 112 prefix page not secure). We do this by turning
* off the machine check, external and I/O interrupt bits
* of our PSW copy. To avoid getting validity intercepts, we
* do only accept the condition code from userspace.
*/
vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
PSW_MASK_CC;
}
kvm_run->kvm_dirty_regs = 0;
}
static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
static void store_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
kvm_run->s.regs.pft = vcpu->arch.pfault_token;
kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
save_access_regs(vcpu->run->s.regs.acrs);
restore_access_regs(vcpu->arch.host_acrs);
/* Save guest register state */
save_fpu_regs();
vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
/* Restore will be done lazily at return */
current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
if (MACHINE_HAS_GS) {
__ctl_set_bit(2, 4);
if (vcpu->arch.gs_enabled)
@ -3988,6 +4291,29 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
/* SIE will save etoken directly into SDNX and therefore kvm_run */
}
static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
kvm_run->s.regs.pft = vcpu->arch.pfault_token;
kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
save_access_regs(vcpu->run->s.regs.acrs);
restore_access_regs(vcpu->arch.host_acrs);
/* Save guest register state */
save_fpu_regs();
vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
/* Restore will be done lazily at return */
current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
store_regs_fmt2(vcpu, kvm_run);
}
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
int rc;
@ -4009,6 +4335,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_sigset_activate(vcpu);
/*
* no need to check the return value of vcpu_start as it can only have
* an error for protvirt, but protvirt means user cpu state
*/
if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
kvm_s390_vcpu_start(vcpu);
} else if (is_vcpu_stopped(vcpu)) {
@ -4146,18 +4476,27 @@ static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
}
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
{
int i, online_vcpus, started_vcpus = 0;
int i, online_vcpus, r = 0, started_vcpus = 0;
if (!is_vcpu_stopped(vcpu))
return;
return 0;
trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
/* Only one cpu at a time may enter/leave the STOPPED state. */
spin_lock(&vcpu->kvm->arch.start_stop_lock);
online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
/* Let's tell the UV that we want to change into the operating state */
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
if (r) {
spin_unlock(&vcpu->kvm->arch.start_stop_lock);
return r;
}
}
for (i = 0; i < online_vcpus; i++) {
if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
started_vcpus++;
@ -4176,28 +4515,44 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
}
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
/*
* The real PSW might have changed due to a RESTART interpreted by the
* ultravisor. We block all interrupts and let the next sie exit
* refresh our view.
*/
if (kvm_s390_pv_cpu_is_protected(vcpu))
vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
/*
* Another VCPU might have used IBS while we were offline.
* Let's play safe and flush the VCPU at startup.
*/
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
spin_unlock(&vcpu->kvm->arch.start_stop_lock);
return;
return 0;
}
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
{
int i, online_vcpus, started_vcpus = 0;
int i, online_vcpus, r = 0, started_vcpus = 0;
struct kvm_vcpu *started_vcpu = NULL;
if (is_vcpu_stopped(vcpu))
return;
return 0;
trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
/* Only one cpu at a time may enter/leave the STOPPED state. */
spin_lock(&vcpu->kvm->arch.start_stop_lock);
online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
/* Let's tell the UV that we want to change into the stopped state */
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
if (r) {
spin_unlock(&vcpu->kvm->arch.start_stop_lock);
return r;
}
}
/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
kvm_s390_clear_stop_irq(vcpu);
@ -4220,7 +4575,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
}
spin_unlock(&vcpu->kvm->arch.start_stop_lock);
return;
return 0;
}
static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
@ -4247,12 +4602,40 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
return r;
}
static long kvm_s390_guest_sida_op(struct kvm_vcpu *vcpu,
struct kvm_s390_mem_op *mop)
{
void __user *uaddr = (void __user *)mop->buf;
int r = 0;
if (mop->flags || !mop->size)
return -EINVAL;
if (mop->size + mop->sida_offset < mop->size)
return -EINVAL;
if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
return -E2BIG;
switch (mop->op) {
case KVM_S390_MEMOP_SIDA_READ:
if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) +
mop->sida_offset), mop->size))
r = -EFAULT;
break;
case KVM_S390_MEMOP_SIDA_WRITE:
if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) +
mop->sida_offset), uaddr, mop->size))
r = -EFAULT;
break;
}
return r;
}
static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
struct kvm_s390_mem_op *mop)
{
void __user *uaddr = (void __user *)mop->buf;
void *tmpbuf = NULL;
int r, srcu_idx;
int r = 0;
const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
| KVM_S390_MEMOP_F_CHECK_ONLY;
@ -4262,14 +4645,15 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
if (mop->size > MEM_OP_MAX_SIZE)
return -E2BIG;
if (kvm_s390_pv_cpu_is_protected(vcpu))
return -EINVAL;
if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
tmpbuf = vmalloc(mop->size);
if (!tmpbuf)
return -ENOMEM;
}
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
switch (mop->op) {
case KVM_S390_MEMOP_LOGICAL_READ:
if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
@ -4295,12 +4679,8 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
}
r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
break;
default:
r = -EINVAL;
}
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
@ -4308,6 +4688,31 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
return r;
}
static long kvm_s390_guest_memsida_op(struct kvm_vcpu *vcpu,
struct kvm_s390_mem_op *mop)
{
int r, srcu_idx;
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
switch (mop->op) {
case KVM_S390_MEMOP_LOGICAL_READ:
case KVM_S390_MEMOP_LOGICAL_WRITE:
r = kvm_s390_guest_mem_op(vcpu, mop);
break;
case KVM_S390_MEMOP_SIDA_READ:
case KVM_S390_MEMOP_SIDA_WRITE:
/* we are locked against sida going away by the vcpu->mutex */
r = kvm_s390_guest_sida_op(vcpu, mop);
break;
default:
r = -EINVAL;
}
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
return r;
}
long kvm_arch_vcpu_async_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@ -4343,6 +4748,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
void __user *argp = (void __user *)arg;
int idx;
long r;
u16 rc, rrc;
vcpu_load(vcpu);
@ -4364,18 +4770,40 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
case KVM_S390_CLEAR_RESET:
r = 0;
kvm_arch_vcpu_ioctl_clear_reset(vcpu);
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
rc, rrc);
}
break;
case KVM_S390_INITIAL_RESET:
r = 0;
kvm_arch_vcpu_ioctl_initial_reset(vcpu);
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
UVC_CMD_CPU_RESET_INITIAL,
&rc, &rrc);
VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
rc, rrc);
}
break;
case KVM_S390_NORMAL_RESET:
r = 0;
kvm_arch_vcpu_ioctl_normal_reset(vcpu);
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
UVC_CMD_CPU_RESET, &rc, &rrc);
VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
rc, rrc);
}
break;
case KVM_SET_ONE_REG:
case KVM_GET_ONE_REG: {
struct kvm_one_reg reg;
r = -EINVAL;
if (kvm_s390_pv_cpu_is_protected(vcpu))
break;
r = -EFAULT;
if (copy_from_user(&reg, argp, sizeof(reg)))
break;
@ -4438,7 +4866,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
struct kvm_s390_mem_op mem_op;
if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
r = kvm_s390_guest_mem_op(vcpu, &mem_op);
r = kvm_s390_guest_memsida_op(vcpu, &mem_op);
else
r = -EFAULT;
break;
@ -4518,6 +4946,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
return -EINVAL;
/* When we are protected, we should not change the memory slots */
if (kvm_s390_pv_get_handle(kvm))
return -EINVAL;
return 0;
}

View File

@ -2,7 +2,7 @@
/*
* definition for kvm on s390
*
* Copyright IBM Corp. 2008, 2009
* Copyright IBM Corp. 2008, 2020
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
@ -15,6 +15,7 @@
#include <linux/hrtimer.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/lockdep.h>
#include <asm/facility.h>
#include <asm/processor.h>
#include <asm/sclp.h>
@ -25,6 +26,17 @@
#define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1))
extern debug_info_t *kvm_s390_dbf;
extern debug_info_t *kvm_s390_dbf_uv;
#define KVM_UV_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
do { \
debug_sprintf_event((d_kvm)->arch.dbf, d_loglevel, d_string "\n", \
d_args); \
debug_sprintf_event(kvm_s390_dbf_uv, d_loglevel, \
"%d: " d_string "\n", (d_kvm)->userspace_pid, \
d_args); \
} while (0)
#define KVM_EVENT(d_loglevel, d_string, d_args...)\
do { \
debug_sprintf_event(kvm_s390_dbf, d_loglevel, d_string "\n", \
@ -196,6 +208,39 @@ static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
return kvm->arch.user_cpu_state_ctrl != 0;
}
/* implemented in pv.c */
int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
u16 *rrc);
int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
unsigned long tweak, u16 *rc, u16 *rrc);
int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state);
static inline u64 kvm_s390_pv_get_handle(struct kvm *kvm)
{
return kvm->arch.pv.handle;
}
static inline u64 kvm_s390_pv_cpu_get_handle(struct kvm_vcpu *vcpu)
{
return vcpu->arch.pv.handle;
}
static inline bool kvm_s390_pv_is_protected(struct kvm *kvm)
{
lockdep_assert_held(&kvm->lock);
return !!kvm_s390_pv_get_handle(kvm);
}
static inline bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu)
{
lockdep_assert_held(&vcpu->mutex);
return !!kvm_s390_pv_cpu_get_handle(vcpu);
}
/* implemented in interrupt.c */
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
@ -286,8 +331,8 @@ void kvm_s390_set_tod_clock(struct kvm *kvm,
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu);

View File

@ -2,7 +2,7 @@
/*
* handling privileged instructions
*
* Copyright IBM Corp. 2008, 2018
* Copyright IBM Corp. 2008, 2020
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
@ -872,7 +872,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
if (operand2 & 0xfff)
if (!kvm_s390_pv_cpu_is_protected(vcpu) && (operand2 & 0xfff))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
switch (fc) {
@ -893,8 +893,13 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
handle_stsi_3_2_2(vcpu, (void *) mem);
break;
}
rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
memcpy((void *)sida_origin(vcpu->arch.sie_block), (void *)mem,
PAGE_SIZE);
rc = 0;
} else {
rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
}
if (rc) {
rc = kvm_s390_inject_prog_cond(vcpu, rc);
goto out;

303
arch/s390/kvm/pv.c Normal file
View File

@ -0,0 +1,303 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Hosting Protected Virtual Machines
*
* Copyright IBM Corp. 2019, 2020
* Author(s): Janosch Frank <frankja@linux.ibm.com>
*/
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/pagemap.h>
#include <linux/sched/signal.h>
#include <asm/pgalloc.h>
#include <asm/gmap.h>
#include <asm/uv.h>
#include <asm/mman.h>
#include "kvm-s390.h"
int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
{
int cc = 0;
if (kvm_s390_pv_cpu_get_handle(vcpu)) {
cc = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
UVC_CMD_DESTROY_SEC_CPU, rc, rrc);
KVM_UV_EVENT(vcpu->kvm, 3,
"PROTVIRT DESTROY VCPU %d: rc %x rrc %x",
vcpu->vcpu_id, *rc, *rrc);
WARN_ONCE(cc, "protvirt destroy cpu failed rc %x rrc %x",
*rc, *rrc);
}
/* Intended memory leak for something that should never happen. */
if (!cc)
free_pages(vcpu->arch.pv.stor_base,
get_order(uv_info.guest_cpu_stor_len));
free_page(sida_origin(vcpu->arch.sie_block));
vcpu->arch.sie_block->pv_handle_cpu = 0;
vcpu->arch.sie_block->pv_handle_config = 0;
memset(&vcpu->arch.pv, 0, sizeof(vcpu->arch.pv));
vcpu->arch.sie_block->sdf = 0;
/*
* The sidad field (for sdf == 2) is now the gbea field (for sdf == 0).
* Use the reset value of gbea to avoid leaking the kernel pointer of
* the just freed sida.
*/
vcpu->arch.sie_block->gbea = 1;
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
return cc ? EIO : 0;
}
int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
{
struct uv_cb_csc uvcb = {
.header.cmd = UVC_CMD_CREATE_SEC_CPU,
.header.len = sizeof(uvcb),
};
int cc;
if (kvm_s390_pv_cpu_get_handle(vcpu))
return -EINVAL;
vcpu->arch.pv.stor_base = __get_free_pages(GFP_KERNEL,
get_order(uv_info.guest_cpu_stor_len));
if (!vcpu->arch.pv.stor_base)
return -ENOMEM;
/* Input */
uvcb.guest_handle = kvm_s390_pv_get_handle(vcpu->kvm);
uvcb.num = vcpu->arch.sie_block->icpua;
uvcb.state_origin = (u64)vcpu->arch.sie_block;
uvcb.stor_origin = (u64)vcpu->arch.pv.stor_base;
/* Alloc Secure Instruction Data Area Designation */
vcpu->arch.sie_block->sidad = __get_free_page(GFP_KERNEL | __GFP_ZERO);
if (!vcpu->arch.sie_block->sidad) {
free_pages(vcpu->arch.pv.stor_base,
get_order(uv_info.guest_cpu_stor_len));
return -ENOMEM;
}
cc = uv_call(0, (u64)&uvcb);
*rc = uvcb.header.rc;
*rrc = uvcb.header.rrc;
KVM_UV_EVENT(vcpu->kvm, 3,
"PROTVIRT CREATE VCPU: cpu %d handle %llx rc %x rrc %x",
vcpu->vcpu_id, uvcb.cpu_handle, uvcb.header.rc,
uvcb.header.rrc);
if (cc) {
u16 dummy;
kvm_s390_pv_destroy_cpu(vcpu, &dummy, &dummy);
return -EIO;
}
/* Output */
vcpu->arch.pv.handle = uvcb.cpu_handle;
vcpu->arch.sie_block->pv_handle_cpu = uvcb.cpu_handle;
vcpu->arch.sie_block->pv_handle_config = kvm_s390_pv_get_handle(vcpu->kvm);
vcpu->arch.sie_block->sdf = 2;
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
return 0;
}
/* only free resources when the destroy was successful */
static void kvm_s390_pv_dealloc_vm(struct kvm *kvm)
{
vfree(kvm->arch.pv.stor_var);
free_pages(kvm->arch.pv.stor_base,
get_order(uv_info.guest_base_stor_len));
memset(&kvm->arch.pv, 0, sizeof(kvm->arch.pv));
}
static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
{
unsigned long base = uv_info.guest_base_stor_len;
unsigned long virt = uv_info.guest_virt_var_stor_len;
unsigned long npages = 0, vlen = 0;
struct kvm_memory_slot *memslot;
kvm->arch.pv.stor_var = NULL;
kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL, get_order(base));
if (!kvm->arch.pv.stor_base)
return -ENOMEM;
/*
* Calculate current guest storage for allocation of the
* variable storage, which is based on the length in MB.
*
* Slots are sorted by GFN
*/
mutex_lock(&kvm->slots_lock);
memslot = kvm_memslots(kvm)->memslots;
npages = memslot->base_gfn + memslot->npages;
mutex_unlock(&kvm->slots_lock);
kvm->arch.pv.guest_len = npages * PAGE_SIZE;
/* Allocate variable storage */
vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE);
vlen += uv_info.guest_virt_base_stor_len;
kvm->arch.pv.stor_var = vzalloc(vlen);
if (!kvm->arch.pv.stor_var)
goto out_err;
return 0;
out_err:
kvm_s390_pv_dealloc_vm(kvm);
return -ENOMEM;
}
/* this should not fail, but if it does, we must not free the donated memory */
int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
{
int cc;
/* make all pages accessible before destroying the guest */
s390_reset_acc(kvm->mm);
cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
UVC_CMD_DESTROY_SEC_CONF, rc, rrc);
WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
atomic_set(&kvm->mm->context.is_protected, 0);
KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM: rc %x rrc %x", *rc, *rrc);
WARN_ONCE(cc, "protvirt destroy vm failed rc %x rrc %x", *rc, *rrc);
/* Inteded memory leak on "impossible" error */
if (!cc)
kvm_s390_pv_dealloc_vm(kvm);
return cc ? -EIO : 0;
}
int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
{
struct uv_cb_cgc uvcb = {
.header.cmd = UVC_CMD_CREATE_SEC_CONF,
.header.len = sizeof(uvcb)
};
int cc, ret;
u16 dummy;
ret = kvm_s390_pv_alloc_vm(kvm);
if (ret)
return ret;
/* Inputs */
uvcb.guest_stor_origin = 0; /* MSO is 0 for KVM */
uvcb.guest_stor_len = kvm->arch.pv.guest_len;
uvcb.guest_asce = kvm->arch.gmap->asce;
uvcb.guest_sca = (unsigned long)kvm->arch.sca;
uvcb.conf_base_stor_origin = (u64)kvm->arch.pv.stor_base;
uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var;
cc = uv_call(0, (u64)&uvcb);
*rc = uvcb.header.rc;
*rrc = uvcb.header.rrc;
KVM_UV_EVENT(kvm, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x",
uvcb.guest_handle, uvcb.guest_stor_len, *rc, *rrc);
/* Outputs */
kvm->arch.pv.handle = uvcb.guest_handle;
if (cc) {
if (uvcb.header.rc & UVC_RC_NEED_DESTROY)
kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
else
kvm_s390_pv_dealloc_vm(kvm);
return -EIO;
}
kvm->arch.gmap->guest_handle = uvcb.guest_handle;
atomic_set(&kvm->mm->context.is_protected, 1);
return 0;
}
int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
u16 *rrc)
{
struct uv_cb_ssc uvcb = {
.header.cmd = UVC_CMD_SET_SEC_CONF_PARAMS,
.header.len = sizeof(uvcb),
.sec_header_origin = (u64)hdr,
.sec_header_len = length,
.guest_handle = kvm_s390_pv_get_handle(kvm),
};
int cc = uv_call(0, (u64)&uvcb);
*rc = uvcb.header.rc;
*rrc = uvcb.header.rrc;
KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x",
*rc, *rrc);
return cc ? -EINVAL : 0;
}
static int unpack_one(struct kvm *kvm, unsigned long addr, u64 tweak,
u64 offset, u16 *rc, u16 *rrc)
{
struct uv_cb_unp uvcb = {
.header.cmd = UVC_CMD_UNPACK_IMG,
.header.len = sizeof(uvcb),
.guest_handle = kvm_s390_pv_get_handle(kvm),
.gaddr = addr,
.tweak[0] = tweak,
.tweak[1] = offset,
};
int ret = gmap_make_secure(kvm->arch.gmap, addr, &uvcb);
*rc = uvcb.header.rc;
*rrc = uvcb.header.rrc;
if (ret && ret != -EAGAIN)
KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: failed addr %llx with rc %x rrc %x",
uvcb.gaddr, *rc, *rrc);
return ret;
}
int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
unsigned long tweak, u16 *rc, u16 *rrc)
{
u64 offset = 0;
int ret = 0;
if (addr & ~PAGE_MASK || !size || size & ~PAGE_MASK)
return -EINVAL;
KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: start addr %lx size %lx",
addr, size);
while (offset < size) {
ret = unpack_one(kvm, addr, tweak, offset, rc, rrc);
if (ret == -EAGAIN) {
cond_resched();
if (fatal_signal_pending(current))
break;
continue;
}
if (ret)
break;
addr += PAGE_SIZE;
offset += PAGE_SIZE;
}
if (!ret)
KVM_UV_EVENT(kvm, 3, "%s", "PROTVIRT VM UNPACK: successful");
return ret;
}
int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state)
{
struct uv_cb_cpu_set_state uvcb = {
.header.cmd = UVC_CMD_CPU_SET_STATE,
.header.len = sizeof(uvcb),
.cpu_handle = kvm_s390_pv_cpu_get_handle(vcpu),
.state = state,
};
int cc;
cc = uv_call(0, (u64)&uvcb);
KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT SET CPU %d STATE %d rc %x rrc %x",
vcpu->vcpu_id, state, uvcb.header.rc, uvcb.header.rrc);
if (cc)
return -EINVAL;
return 0;
}

View File

@ -38,6 +38,7 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/facility.h>
#include <asm/uv.h>
#include "../kernel/entry.h"
#define __FAIL_ADDR_MASK -4096L
@ -816,3 +817,80 @@ out_extint:
early_initcall(pfault_irq_init);
#endif /* CONFIG_PFAULT */
#if IS_ENABLED(CONFIG_PGSTE)
void do_secure_storage_access(struct pt_regs *regs)
{
unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK;
struct vm_area_struct *vma;
struct mm_struct *mm;
struct page *page;
int rc;
switch (get_fault_type(regs)) {
case USER_FAULT:
mm = current->mm;
down_read(&mm->mmap_sem);
vma = find_vma(mm, addr);
if (!vma) {
up_read(&mm->mmap_sem);
do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
break;
}
page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
if (IS_ERR_OR_NULL(page)) {
up_read(&mm->mmap_sem);
break;
}
if (arch_make_page_accessible(page))
send_sig(SIGSEGV, current, 0);
put_page(page);
up_read(&mm->mmap_sem);
break;
case KERNEL_FAULT:
page = phys_to_page(addr);
if (unlikely(!try_get_page(page)))
break;
rc = arch_make_page_accessible(page);
put_page(page);
if (rc)
BUG();
break;
case VDSO_FAULT:
/* fallthrough */
case GMAP_FAULT:
/* fallthrough */
default:
do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
WARN_ON_ONCE(1);
}
}
NOKPROBE_SYMBOL(do_secure_storage_access);
void do_non_secure_storage_access(struct pt_regs *regs)
{
unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
if (get_fault_type(regs) != GMAP_FAULT) {
do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
WARN_ON_ONCE(1);
return;
}
if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
send_sig(SIGSEGV, current, 0);
}
NOKPROBE_SYMBOL(do_non_secure_storage_access);
#else
void do_secure_storage_access(struct pt_regs *regs)
{
default_trap_handler(regs);
}
void do_non_secure_storage_access(struct pt_regs *regs)
{
default_trap_handler(regs);
}
#endif

View File

@ -2548,6 +2548,22 @@ int s390_enable_sie(void)
}
EXPORT_SYMBOL_GPL(s390_enable_sie);
int gmap_mark_unmergeable(void)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
MADV_UNMERGEABLE, &vma->vm_flags)) {
return -ENOMEM;
}
}
mm->def_flags &= ~VM_MERGEABLE;
return 0;
}
EXPORT_SYMBOL_GPL(gmap_mark_unmergeable);
/*
* Enable storage key handling from now on and initialize the storage
* keys with the default key.
@ -2593,7 +2609,6 @@ static const struct mm_walk_ops enable_skey_walk_ops = {
int s390_enable_skey(void)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int rc = 0;
down_write(&mm->mmap_sem);
@ -2601,16 +2616,11 @@ int s390_enable_skey(void)
goto out_up;
mm->context.uses_skeys = 1;
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
MADV_UNMERGEABLE, &vma->vm_flags)) {
mm->context.uses_skeys = 0;
rc = -ENOMEM;
goto out_up;
}
rc = gmap_mark_unmergeable();
if (rc) {
mm->context.uses_skeys = 0;
goto out_up;
}
mm->def_flags &= ~VM_MERGEABLE;
walk_page_range(mm, 0, TASK_SIZE, &enable_skey_walk_ops, NULL);
out_up:
@ -2640,3 +2650,38 @@ void s390_reset_cmma(struct mm_struct *mm)
up_write(&mm->mmap_sem);
}
EXPORT_SYMBOL_GPL(s390_reset_cmma);
/*
* make inaccessible pages accessible again
*/
static int __s390_reset_acc(pte_t *ptep, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
pte_t pte = READ_ONCE(*ptep);
if (pte_present(pte))
WARN_ON_ONCE(uv_convert_from_secure(pte_val(pte) & PAGE_MASK));
return 0;
}
static const struct mm_walk_ops reset_acc_walk_ops = {
.pte_entry = __s390_reset_acc,
};
#include <linux/sched/mm.h>
void s390_reset_acc(struct mm_struct *mm)
{
/*
* we might be called during
* reset: we walk the pages and clear
* close of all kvm file descriptors: we walk the pages and clear
* exit of process on fd closure: vma already gone, do nothing
*/
if (!mmget_not_zero(mm))
return;
down_read(&mm->mmap_sem);
walk_page_range(mm, 0, TASK_SIZE, &reset_acc_walk_ops, NULL);
up_read(&mm->mmap_sem);
mmput(mm);
}
EXPORT_SYMBOL_GPL(s390_reset_acc);

View File

@ -250,6 +250,7 @@ static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
[PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
[PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,

View File

@ -4765,6 +4765,7 @@ __init int intel_pmu_init(void)
break;
case INTEL_FAM6_ATOM_TREMONT_D:
case INTEL_FAM6_ATOM_TREMONT:
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
sizeof(hw_cache_event_ids));

View File

@ -40,17 +40,18 @@
* Model specific counters:
* MSR_CORE_C1_RES: CORE C1 Residency Counter
* perf code: 0x00
* Available model: SLM,AMT,GLM,CNL
* Available model: SLM,AMT,GLM,CNL,TNT
* Scope: Core (each processor core has a MSR)
* MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
* perf code: 0x01
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM,
* CNL,KBL,CML
* CNL,KBL,CML,TNT
* Scope: Core
* MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
* perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
* SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL
* SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL,
* TNT
* Scope: Core
* MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
* perf code: 0x03
@ -60,17 +61,18 @@
* MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
* perf code: 0x00
* Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
* KBL,CML,ICL,TGL
* KBL,CML,ICL,TGL,TNT
* Scope: Package (physical package)
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
* perf code: 0x01
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
* GLM,CNL,KBL,CML,ICL,TGL
* GLM,CNL,KBL,CML,ICL,TGL,TNT
* Scope: Package (physical package)
* MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
* perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
* SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
* SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL,
* TNT
* Scope: Package (physical package)
* MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
* perf code: 0x03
@ -87,7 +89,8 @@
* Scope: Package (physical package)
* MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
* perf code: 0x06
* Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL
* Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL,
* TNT
* Scope: Package (physical package)
*
*/
@ -640,8 +643,9 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_D, glm_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_TREMONT_D, glm_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_TREMONT, glm_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_L, icl_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE, icl_cstates),

View File

@ -1714,6 +1714,8 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
old = ((s64)(prev_raw_count << shift) >> shift);
local64_add(new - old + count * period, &event->count);
local64_set(&hwc->period_left, -new);
perf_event_update_userpage(event);
return 0;

View File

@ -75,8 +75,9 @@ static bool test_intel(int idx, void *data)
case INTEL_FAM6_ATOM_GOLDMONT:
case INTEL_FAM6_ATOM_GOLDMONT_D:
case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
case INTEL_FAM6_ATOM_TREMONT_D:
case INTEL_FAM6_ATOM_TREMONT:
case INTEL_FAM6_XEON_PHI_KNL:
case INTEL_FAM6_XEON_PHI_KNM:

View File

@ -136,8 +136,6 @@ config CRYPTO_USER
Userspace configuration for cryptographic instantiations such as
cbc(aes).
if CRYPTO_MANAGER2
config CRYPTO_MANAGER_DISABLE_TESTS
bool "Disable run-time self tests"
default y
@ -155,8 +153,6 @@ config CRYPTO_MANAGER_EXTRA_TESTS
This is intended for developer use only, as these tests take much
longer to run than the normal self tests.
endif # if CRYPTO_MANAGER2
config CRYPTO_GF128MUL
tristate

View File

@ -4436,6 +4436,15 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(tf_cbc_tv_template)
},
}, {
#if IS_ENABLED(CONFIG_CRYPTO_PAES_S390)
.alg = "cbc-paes-s390",
.fips_allowed = 1,
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(aes_cbc_tv_template)
}
}, {
#endif
.alg = "cbcmac(aes)",
.fips_allowed = 1,
.test = alg_test_hash,
@ -4587,6 +4596,15 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(tf_ctr_tv_template)
}
}, {
#if IS_ENABLED(CONFIG_CRYPTO_PAES_S390)
.alg = "ctr-paes-s390",
.fips_allowed = 1,
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(aes_ctr_tv_template)
}
}, {
#endif
.alg = "cts(cbc(aes))",
.test = alg_test_skcipher,
.fips_allowed = 1,
@ -4879,6 +4897,15 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(xtea_tv_template)
}
}, {
#if IS_ENABLED(CONFIG_CRYPTO_PAES_S390)
.alg = "ecb-paes-s390",
.fips_allowed = 1,
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(aes_tv_template)
}
}, {
#endif
.alg = "ecdh",
.test = alg_test_kpp,
.fips_allowed = 1,
@ -5465,6 +5492,15 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(tf_xts_tv_template)
}
}, {
#if IS_ENABLED(CONFIG_CRYPTO_PAES_S390)
.alg = "xts-paes-s390",
.fips_allowed = 1,
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(aes_xts_tv_template)
}
}, {
#endif
.alg = "xts4096(paes)",
.test = alg_test_null,
.fips_allowed = 1,

View File

@ -101,6 +101,8 @@ acpi_status acpi_hw_enable_all_runtime_gpes(void);
acpi_status acpi_hw_enable_all_wakeup_gpes(void);
u8 acpi_hw_check_all_gpes(void);
acpi_status
acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block,

View File

@ -795,6 +795,38 @@ acpi_status acpi_enable_all_wakeup_gpes(void)
ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes)
/******************************************************************************
*
* FUNCTION: acpi_any_gpe_status_set
*
* PARAMETERS: None
*
* RETURN: Whether or not the status bit is set for any GPE
*
* DESCRIPTION: Check the status bits of all enabled GPEs and return TRUE if any
* of them is set or FALSE otherwise.
*
******************************************************************************/
u32 acpi_any_gpe_status_set(void)
{
acpi_status status;
u8 ret;
ACPI_FUNCTION_TRACE(acpi_any_gpe_status_set);
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
return (FALSE);
}
ret = acpi_hw_check_all_gpes();
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return (ret);
}
ACPI_EXPORT_SYMBOL(acpi_any_gpe_status_set)
/*******************************************************************************
*
* FUNCTION: acpi_install_gpe_block

View File

@ -444,6 +444,53 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
return (AE_OK);
}
/******************************************************************************
*
* FUNCTION: acpi_hw_get_gpe_block_status
*
* PARAMETERS: gpe_xrupt_info - GPE Interrupt info
* gpe_block - Gpe Block info
*
* RETURN: Success
*
* DESCRIPTION: Produce a combined GPE status bits mask for the given block.
*
******************************************************************************/
static acpi_status
acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block,
void *ret_ptr)
{
struct acpi_gpe_register_info *gpe_register_info;
u64 in_enable, in_status;
acpi_status status;
u8 *ret = ret_ptr;
u32 i;
/* Examine each GPE Register within the block */
for (i = 0; i < gpe_block->register_count; i++) {
gpe_register_info = &gpe_block->register_info[i];
status = acpi_hw_read(&in_enable,
&gpe_register_info->enable_address);
if (ACPI_FAILURE(status)) {
continue;
}
status = acpi_hw_read(&in_status,
&gpe_register_info->status_address);
if (ACPI_FAILURE(status)) {
continue;
}
*ret |= in_enable & in_status;
}
return (AE_OK);
}
/******************************************************************************
*
* FUNCTION: acpi_hw_disable_all_gpes
@ -510,4 +557,28 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void)
return_ACPI_STATUS(status);
}
/******************************************************************************
*
* FUNCTION: acpi_hw_check_all_gpes
*
* PARAMETERS: None
*
* RETURN: Combined status of all GPEs
*
* DESCRIPTION: Check all enabled GPEs in all GPE blocks and return TRUE if the
* status bit is set for at least one of them of FALSE otherwise.
*
******************************************************************************/
u8 acpi_hw_check_all_gpes(void)
{
u8 ret = 0;
ACPI_FUNCTION_TRACE(acpi_hw_check_all_gpes);
(void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &ret);
return (ret != 0);
}
#endif /* !ACPI_REDUCED_HARDWARE */

View File

@ -179,6 +179,7 @@ EXPORT_SYMBOL(first_ec);
static struct acpi_ec *boot_ec;
static bool boot_ec_is_ecdt = false;
static struct workqueue_struct *ec_wq;
static struct workqueue_struct *ec_query_wq;
static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
@ -469,7 +470,7 @@ static void acpi_ec_submit_query(struct acpi_ec *ec)
ec_dbg_evt("Command(%s) submitted/blocked",
acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
ec->nr_pending_queries++;
schedule_work(&ec->work);
queue_work(ec_wq, &ec->work);
}
}
@ -535,7 +536,7 @@ static void acpi_ec_enable_event(struct acpi_ec *ec)
#ifdef CONFIG_PM_SLEEP
static void __acpi_ec_flush_work(void)
{
flush_scheduled_work(); /* flush ec->work */
drain_workqueue(ec_wq); /* flush ec->work */
flush_workqueue(ec_query_wq); /* flush queries */
}
@ -556,8 +557,8 @@ static void acpi_ec_disable_event(struct acpi_ec *ec)
void acpi_ec_flush_work(void)
{
/* Without ec_query_wq there is nothing to flush. */
if (!ec_query_wq)
/* Without ec_wq there is nothing to flush. */
if (!ec_wq)
return;
__acpi_ec_flush_work();
@ -2107,25 +2108,33 @@ static struct acpi_driver acpi_ec_driver = {
.drv.pm = &acpi_ec_pm,
};
static inline int acpi_ec_query_init(void)
static void acpi_ec_destroy_workqueues(void)
{
if (!ec_query_wq) {
ec_query_wq = alloc_workqueue("kec_query", 0,
ec_max_queries);
if (!ec_query_wq)
return -ENODEV;
if (ec_wq) {
destroy_workqueue(ec_wq);
ec_wq = NULL;
}
return 0;
}
static inline void acpi_ec_query_exit(void)
{
if (ec_query_wq) {
destroy_workqueue(ec_query_wq);
ec_query_wq = NULL;
}
}
static int acpi_ec_init_workqueues(void)
{
if (!ec_wq)
ec_wq = alloc_ordered_workqueue("kec", 0);
if (!ec_query_wq)
ec_query_wq = alloc_workqueue("kec_query", 0, ec_max_queries);
if (!ec_wq || !ec_query_wq) {
acpi_ec_destroy_workqueues();
return -ENODEV;
}
return 0;
}
static const struct dmi_system_id acpi_ec_no_wakeup[] = {
{
.ident = "Thinkpad X1 Carbon 6th",
@ -2156,8 +2165,7 @@ int __init acpi_ec_init(void)
int result;
int ecdt_fail, dsdt_fail;
/* register workqueue for _Qxx evaluations */
result = acpi_ec_query_init();
result = acpi_ec_init_workqueues();
if (result)
return result;
@ -2188,6 +2196,6 @@ static void __exit acpi_ec_exit(void)
{
acpi_bus_unregister_driver(&acpi_ec_driver);
acpi_ec_query_exit();
acpi_ec_destroy_workqueues();
}
#endif /* 0 */

View File

@ -990,21 +990,34 @@ static void acpi_s2idle_sync(void)
acpi_os_wait_events_complete(); /* synchronize Notify handling */
}
static void acpi_s2idle_wake(void)
static bool acpi_s2idle_wake(void)
{
/*
* If IRQD_WAKEUP_ARMED is set for the SCI at this point, the SCI has
* not triggered while suspended, so bail out.
*/
if (!acpi_sci_irq_valid() ||
irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
return;
if (!acpi_sci_irq_valid())
return pm_wakeup_pending();
while (pm_wakeup_pending()) {
/*
* If IRQD_WAKEUP_ARMED is set for the SCI at this point, the
* SCI has not triggered while suspended, so bail out (the
* wakeup is pending anyway and the SCI is not the source of
* it).
*/
if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
return true;
/*
* If there are no EC events to process and at least one of the
* other enabled GPEs is active, the wakeup is regarded as a
* genuine one.
*
* Note that the checks below must be carried out in this order
* to avoid returning prematurely due to a change of the EC GPE
* status bit from unset to set between the checks with the
* status bits of all the other GPEs unset.
*/
if (acpi_any_gpe_status_set() && !acpi_ec_dispatch_gpe())
return true;
/*
* If there are EC events to process, the wakeup may be a spurious one
* coming from the EC.
*/
if (acpi_ec_dispatch_gpe()) {
/*
* Cancel the wakeup and process all pending events in case
* there are any wakeup ones in there.
@ -1017,8 +1030,19 @@ static void acpi_s2idle_wake(void)
acpi_s2idle_sync();
/*
* The SCI is in the "suspended" state now and it cannot produce
* new wakeup events till the rearming below, so if any of them
* are pending here, they must be resulting from the processing
* of EC events above or coming from somewhere else.
*/
if (pm_wakeup_pending())
return true;
rearm_wake_irq(acpi_sci_irq);
}
return false;
}
static void acpi_s2idle_restore_early(void)

View File

@ -465,7 +465,7 @@ static ssize_t input_read(struct file *file, char __user *buf, size_t len,
{
struct moxtet *moxtet = file->private_data;
u8 bin[TURRIS_MOX_MAX_MODULES];
u8 hex[sizeof(buf) * 2 + 1];
u8 hex[sizeof(bin) * 2 + 1];
int ret, n;
ret = moxtet_spi_read(moxtet, bin);

View File

@ -19,7 +19,7 @@
#include <linux/spinlock.h>
#include <linux/wait.h>
#define MAX_MSG_LEN 128
#define MAX_MSG_LEN 240
#define IPMB_REQUEST_LEN_MIN 7
#define NETFN_RSP_BIT_MASK 0x4
#define REQUEST_QUEUE_MAX_LEN 256
@ -63,6 +63,7 @@ struct ipmb_dev {
spinlock_t lock;
wait_queue_head_t wait_queue;
struct mutex file_mutex;
bool is_i2c_protocol;
};
static inline struct ipmb_dev *to_ipmb_dev(struct file *file)
@ -112,6 +113,25 @@ static ssize_t ipmb_read(struct file *file, char __user *buf, size_t count,
return ret < 0 ? ret : count;
}
static int ipmb_i2c_write(struct i2c_client *client, u8 *msg, u8 addr)
{
struct i2c_msg i2c_msg;
/*
* subtract 1 byte (rq_sa) from the length of the msg passed to
* raw i2c_transfer
*/
i2c_msg.len = msg[IPMB_MSG_LEN_IDX] - 1;
/* Assign message to buffer except first 2 bytes (length and address) */
i2c_msg.buf = msg + 2;
i2c_msg.addr = addr;
i2c_msg.flags = client->flags & I2C_CLIENT_PEC;
return i2c_transfer(client->adapter, &i2c_msg, 1);
}
static ssize_t ipmb_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
@ -133,6 +153,12 @@ static ssize_t ipmb_write(struct file *file, const char __user *buf,
rq_sa = GET_7BIT_ADDR(msg[RQ_SA_8BIT_IDX]);
netf_rq_lun = msg[NETFN_LUN_IDX];
/* Check i2c block transfer vs smbus */
if (ipmb_dev->is_i2c_protocol) {
ret = ipmb_i2c_write(ipmb_dev->client, msg, rq_sa);
return (ret == 1) ? count : ret;
}
/*
* subtract rq_sa and netf_rq_lun from the length of the msg passed to
* i2c_smbus_xfer
@ -253,7 +279,7 @@ static int ipmb_slave_cb(struct i2c_client *client,
break;
case I2C_SLAVE_WRITE_RECEIVED:
if (ipmb_dev->msg_idx >= sizeof(struct ipmb_msg))
if (ipmb_dev->msg_idx >= sizeof(struct ipmb_msg) - 1)
break;
buf[++ipmb_dev->msg_idx] = *val;
@ -302,6 +328,9 @@ static int ipmb_probe(struct i2c_client *client,
if (ret)
return ret;
ipmb_dev->is_i2c_protocol
= device_property_read_bool(&client->dev, "i2c-protocol");
ipmb_dev->client = client;
i2c_set_clientdata(client, ipmb_dev);
ret = i2c_slave_register(client, ipmb_slave_cb);

View File

@ -775,10 +775,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
msg = ssif_info->curr_msg;
if (msg) {
if (data) {
if (len > IPMI_MAX_MSG_LENGTH)
len = IPMI_MAX_MSG_LENGTH;
memcpy(msg->rsp, data, len);
} else {
len = 0;
}
msg->rsp_size = len;
if (msg->rsp_size > IPMI_MAX_MSG_LENGTH)
msg->rsp_size = IPMI_MAX_MSG_LENGTH;
memcpy(msg->rsp, data, msg->rsp_size);
ssif_info->curr_msg = NULL;
}

View File

@ -105,6 +105,8 @@ bool have_governor_per_policy(void)
}
EXPORT_SYMBOL_GPL(have_governor_per_policy);
static struct kobject *cpufreq_global_kobject;
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
{
if (have_governor_per_policy())
@ -2745,9 +2747,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
}
EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
struct kobject *cpufreq_global_kobject;
EXPORT_SYMBOL(cpufreq_global_kobject);
static int __init cpufreq_core_init(void)
{
if (cpufreq_disabled())

View File

@ -61,7 +61,7 @@ struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
{
if (!blk_queue_dax(bdev->bd_queue))
return NULL;
return fs_dax_get_by_host(bdev->bd_disk->disk_name);
return dax_get_by_host(bdev->bd_disk->disk_name);
}
EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
#endif

View File

@ -505,16 +505,10 @@ void edac_mc_free(struct mem_ctl_info *mci)
{
edac_dbg(1, "\n");
/* If we're not yet registered with sysfs free only what was allocated
* in edac_mc_alloc().
*/
if (!device_is_registered(&mci->dev)) {
_edac_mc_free(mci);
return;
}
if (device_is_registered(&mci->dev))
edac_unregister_sysfs(mci);
/* the mci instance is freed here, when the sysfs object is dropped */
edac_unregister_sysfs(mci);
_edac_mc_free(mci);
}
EXPORT_SYMBOL_GPL(edac_mc_free);

View File

@ -276,10 +276,7 @@ static const struct attribute_group *csrow_attr_groups[] = {
static void csrow_attr_release(struct device *dev)
{
struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
edac_dbg(1, "device %s released\n", dev_name(dev));
kfree(csrow);
/* release device with _edac_mc_free() */
}
static const struct device_type csrow_attr_type = {
@ -447,8 +444,7 @@ error:
csrow = mci->csrows[i];
if (!nr_pages_per_csrow(csrow))
continue;
device_del(&mci->csrows[i]->dev);
device_unregister(&mci->csrows[i]->dev);
}
return err;
@ -608,10 +604,7 @@ static const struct attribute_group *dimm_attr_groups[] = {
static void dimm_attr_release(struct device *dev)
{
struct dimm_info *dimm = container_of(dev, struct dimm_info, dev);
edac_dbg(1, "device %s released\n", dev_name(dev));
kfree(dimm);
/* release device with _edac_mc_free() */
}
static const struct device_type dimm_attr_type = {
@ -893,10 +886,7 @@ static const struct attribute_group *mci_attr_groups[] = {
static void mci_attr_release(struct device *dev)
{
struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
edac_dbg(1, "device %s released\n", dev_name(dev));
kfree(mci);
/* release device with _edac_mc_free() */
}
static const struct device_type mci_attr_type = {

View File

@ -10,16 +10,6 @@
#define GPIO_OUT_REG(off) (BD71828_REG_GPIO_CTRL1 + (off))
#define HALL_GPIO_OFFSET 3
/*
* These defines can be removed when
* "gpio: Add definition for GPIO direction"
* (9208b1e77d6e8e9776f34f46ef4079ecac9c3c25 in GPIO tree) gets merged,
*/
#ifndef GPIO_LINE_DIRECTION_IN
#define GPIO_LINE_DIRECTION_IN 1
#define GPIO_LINE_DIRECTION_OUT 0
#endif
struct bd71828_gpio {
struct rohm_regmap_dev chip;
struct gpio_chip gpio;

View File

@ -35,7 +35,7 @@ struct sifive_gpio {
void __iomem *base;
struct gpio_chip gc;
struct regmap *regs;
u32 irq_state;
unsigned long irq_state;
unsigned int trigger[SIFIVE_GPIO_MAX];
unsigned int irq_parent[SIFIVE_GPIO_MAX];
};
@ -94,7 +94,7 @@ static void sifive_gpio_irq_enable(struct irq_data *d)
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
/* Enable interrupts */
assign_bit(offset, (unsigned long *)&chip->irq_state, 1);
assign_bit(offset, &chip->irq_state, 1);
sifive_gpio_set_ie(chip, offset);
}
@ -104,7 +104,7 @@ static void sifive_gpio_irq_disable(struct irq_data *d)
struct sifive_gpio *chip = gpiochip_get_data(gc);
int offset = irqd_to_hwirq(d) % SIFIVE_GPIO_MAX;
assign_bit(offset, (unsigned long *)&chip->irq_state, 0);
assign_bit(offset, &chip->irq_state, 0);
sifive_gpio_set_ie(chip, offset);
irq_chip_disable_parent(d);
}

Some files were not shown because too many files have changed in this diff Show More