KVM: s390: Features and Fixes for 4.1 (kvm/next)
1. Several Fixes and enhancements --------------------------------- - These 3 patches have cc stable:b75f4c9
KVM: s390: Zero out current VMDB of STSI before including level3 data.261520d
KVM: s390: fix handling of write errors in the tpi handler15462e3
KVM: s390: reinjection of irqs can fail in the tpi handler 2. SIMD support the kernel part (introduced with z13) ----------------------------------------------------- - two KVM-generic changes in kvm.h: 1. New capability that can be enabled: KVM_CAP_S390_VECTOR_REGISTERS 2. increased padding size for sync regs in struct kvm_run to clarify that sync regs can be larger than 1k. This is fine as this is the last element in the structure. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.14 (GNU/Linux) iQIcBAABAgAGBQJU+ab+AAoJEBF7vIC1phx8jMMQAJ1lXYJUbnOSJ7GD1aqQe4b3 pGW9rjQtZ1sZ6tH45sQeHzHg9tbD5ZHvSwMRuW2itLZXtusUXYajkR3TdHiR2bcR karOqEducjPR6UCLdyfCAKkssZAkPyTXMnj3PRr9vpISvw53W5ZKxulIMtqSbMP0 Cl5eP1x+qz24csDV7u/S0k4Kccol43XfmVqVtZm2yVVt1qXIM1qqbUoJBTGECLHC Ux9BbtTYC4lZl8Q/m9hyCD6YV7flhSKPKN6VbyAnRIM1LQiLCEAmppWJIbTbTI+3 m+fM52ue/QlkogGCQ7Mg9+lJkwBKfNCwneSPds4/M41Atwqz5m66L/D1rIlfD4Bh HbnMB9FaRXIeyDAgdG/pemZQeOsnDHiJBHphQ0Q5TPl1lLbWjHFweG8uQ9vGMizf Diy5Rk16MBVbgjYBf8Jy7ZXHisBN+gbOkzXD5j28+j3OGdzhVilFQIJu2oqv5wzW r30ogxmdGH7ljh6bWJKj7YXZx0C3oDF/8HRkf4Fevh064c66S0jTLd8wb4YqGSy5 3qfNqovnvaSjPyKLVajmSOeFIVwtSnylFTqmTOxsC5o786BWgU0Mh6fhKGyUQlcs PMmn5B5VrLLb8YZtX0XOWkeA45KXXeT19qb2WaB/VZSSgb4fG7W/oqqQ6LrZl0sc pEBYwbsFvkFG+04aIYN3 =bzDF -----END PGP SIGNATURE----- Merge tag 'kvm-s390-next-20150306' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into queue KVM: s390: Features and Fixes for 4.1 (kvm/next) 1. Several Fixes and enhancements --------------------------------- - These 3 patches have cc stable:b75f4c9
KVM: s390: Zero out current VMDB of STSI before including level3 data.261520d
KVM: s390: fix handling of write errors in the tpi handler15462e3
KVM: s390: reinjection of irqs can fail in the tpi handler 2. SIMD support the kernel part (introduced with z13) ----------------------------------------------------- - two KVM-generic changes in kvm.h: 1. New capability that can be enabled: KVM_CAP_S390_VECTOR_REGISTERS 2. increased padding size for sync regs in struct kvm_run to clarify that sync regs can be larger than 1k. This is fine as this is the last element in the structure.
This commit is contained in:
commit
2b25385761
|
@ -3248,3 +3248,13 @@ All other orders will be handled completely in user space.
|
|||
Only privileged operation exceptions will be checked for in the kernel (or even
|
||||
in the hardware prior to interception). If this capability is not enabled, the
|
||||
old way of handling SIGP orders is used (partially in kernel and user space).
|
||||
|
||||
7.3 KVM_CAP_S390_VECTOR_REGISTERS
|
||||
|
||||
Architectures: s390
|
||||
Parameters: none
|
||||
Returns: 0 on success, negative value on error
|
||||
|
||||
Allows use of the vector registers introduced with z13 processor, and
|
||||
provides for the synchronization between host and user space. Will
|
||||
return -EINVAL if the machine does not support vectors.
|
||||
|
|
|
@ -172,7 +172,9 @@ struct kvm_s390_sie_block {
|
|||
__u32 fac; /* 0x01a0 */
|
||||
__u8 reserved1a4[20]; /* 0x01a4 */
|
||||
__u64 cbrlo; /* 0x01b8 */
|
||||
__u8 reserved1c0[30]; /* 0x01c0 */
|
||||
__u8 reserved1c0[8]; /* 0x01c0 */
|
||||
__u32 ecd; /* 0x01c8 */
|
||||
__u8 reserved1cc[18]; /* 0x01cc */
|
||||
__u64 pp; /* 0x01de */
|
||||
__u8 reserved1e6[2]; /* 0x01e6 */
|
||||
__u64 itdba; /* 0x01e8 */
|
||||
|
@ -183,11 +185,17 @@ struct kvm_s390_itdb {
|
|||
__u8 data[256];
|
||||
} __packed;
|
||||
|
||||
struct kvm_s390_vregs {
|
||||
__vector128 vrs[32];
|
||||
__u8 reserved200[512]; /* for future vector expansion */
|
||||
} __packed;
|
||||
|
||||
struct sie_page {
|
||||
struct kvm_s390_sie_block sie_block;
|
||||
__u8 reserved200[1024]; /* 0x0200 */
|
||||
struct kvm_s390_itdb itdb; /* 0x0600 */
|
||||
__u8 reserved700[2304]; /* 0x0700 */
|
||||
__u8 reserved700[1280]; /* 0x0700 */
|
||||
struct kvm_s390_vregs vregs; /* 0x0c00 */
|
||||
} __packed;
|
||||
|
||||
struct kvm_vcpu_stat {
|
||||
|
@ -238,6 +246,7 @@ struct kvm_vcpu_stat {
|
|||
u32 instruction_sigp_stop;
|
||||
u32 instruction_sigp_stop_store_status;
|
||||
u32 instruction_sigp_store_status;
|
||||
u32 instruction_sigp_store_adtl_status;
|
||||
u32 instruction_sigp_arch;
|
||||
u32 instruction_sigp_prefix;
|
||||
u32 instruction_sigp_restart;
|
||||
|
@ -270,6 +279,7 @@ struct kvm_vcpu_stat {
|
|||
#define PGM_SPECIAL_OPERATION 0x13
|
||||
#define PGM_OPERAND 0x15
|
||||
#define PGM_TRACE_TABEL 0x16
|
||||
#define PGM_VECTOR_PROCESSING 0x1b
|
||||
#define PGM_SPACE_SWITCH 0x1c
|
||||
#define PGM_HFP_SQUARE_ROOT 0x1d
|
||||
#define PGM_PC_TRANSLATION_SPEC 0x1f
|
||||
|
@ -465,6 +475,7 @@ struct kvm_vcpu_arch {
|
|||
s390_fp_regs host_fpregs;
|
||||
unsigned int host_acrs[NUM_ACRS];
|
||||
s390_fp_regs guest_fpregs;
|
||||
struct kvm_s390_vregs *host_vregs;
|
||||
struct kvm_s390_local_interrupt local_int;
|
||||
struct hrtimer ckc_timer;
|
||||
struct kvm_s390_pgm_info pgm;
|
||||
|
@ -551,6 +562,7 @@ struct kvm_arch{
|
|||
int css_support;
|
||||
int use_irqchip;
|
||||
int use_cmma;
|
||||
int use_vectors;
|
||||
int user_cpu_state_ctrl;
|
||||
int user_sigp;
|
||||
struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
|
||||
|
|
|
@ -150,6 +150,7 @@ struct kvm_guest_debug_arch {
|
|||
#define KVM_SYNC_CRS (1UL << 3)
|
||||
#define KVM_SYNC_ARCH0 (1UL << 4)
|
||||
#define KVM_SYNC_PFAULT (1UL << 5)
|
||||
#define KVM_SYNC_VRS (1UL << 6)
|
||||
/* definition of registers in kvm_run */
|
||||
struct kvm_sync_regs {
|
||||
__u64 prefix; /* prefix register */
|
||||
|
@ -164,6 +165,9 @@ struct kvm_sync_regs {
|
|||
__u64 pft; /* pfault token [PFAULT] */
|
||||
__u64 pfs; /* pfault select [PFAULT] */
|
||||
__u64 pfc; /* pfault compare [PFAULT] */
|
||||
__u64 vrs[32][2]; /* vector registers */
|
||||
__u8 reserved[512]; /* for future vector expansion */
|
||||
__u32 fpc; /* only valid with vector registers */
|
||||
};
|
||||
|
||||
#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
|
||||
|
|
|
@ -230,7 +230,7 @@
|
|||
* and returns a key, which can be used to find a mnemonic name
|
||||
* of the instruction in the icpt_insn_codes table.
|
||||
*/
|
||||
#define icpt_insn_decoder(insn) \
|
||||
#define icpt_insn_decoder(insn) ( \
|
||||
INSN_DECODE_IPA0(0x01, insn, 48, 0xff) \
|
||||
INSN_DECODE_IPA0(0xaa, insn, 48, 0x0f) \
|
||||
INSN_DECODE_IPA0(0xb2, insn, 48, 0xff) \
|
||||
|
@ -239,6 +239,6 @@
|
|||
INSN_DECODE_IPA0(0xe5, insn, 48, 0xff) \
|
||||
INSN_DECODE_IPA0(0xeb, insn, 16, 0xff) \
|
||||
INSN_DECODE_IPA0(0xc8, insn, 48, 0x0f) \
|
||||
INSN_DECODE(insn)
|
||||
INSN_DECODE(insn))
|
||||
|
||||
#endif /* _UAPI_ASM_S390_SIE_H */
|
||||
|
|
|
@ -171,6 +171,7 @@ int main(void)
|
|||
#else /* CONFIG_32BIT */
|
||||
DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code));
|
||||
DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address));
|
||||
DEFINE(__LC_VX_SAVE_AREA_ADDR, offsetof(struct _lowcore, vector_save_area_addr));
|
||||
DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2));
|
||||
DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area));
|
||||
DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
|
||||
|
|
|
@ -333,7 +333,7 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
|
|||
* @write: indicates if access is a write access
|
||||
*
|
||||
* Translate a guest virtual address into a guest absolute address by means
|
||||
* of dynamic address translation as specified by the architecuture.
|
||||
* of dynamic address translation as specified by the architecture.
|
||||
* If the resulting absolute address is not available in the configuration
|
||||
* an addressing exception is indicated and @gpa will not be changed.
|
||||
*
|
||||
|
|
|
@ -191,8 +191,8 @@ static int __import_wp_info(struct kvm_vcpu *vcpu,
|
|||
if (!wp_info->old_data)
|
||||
return -ENOMEM;
|
||||
/* try to backup the original value */
|
||||
ret = read_guest(vcpu, wp_info->phys_addr, wp_info->old_data,
|
||||
wp_info->len);
|
||||
ret = read_guest_abs(vcpu, wp_info->phys_addr, wp_info->old_data,
|
||||
wp_info->len);
|
||||
if (ret) {
|
||||
kfree(wp_info->old_data);
|
||||
wp_info->old_data = NULL;
|
||||
|
@ -362,8 +362,8 @@ static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu)
|
|||
continue;
|
||||
|
||||
/* refetch the wp data and compare it to the old value */
|
||||
if (!read_guest(vcpu, wp_info->phys_addr, temp,
|
||||
wp_info->len)) {
|
||||
if (!read_guest_abs(vcpu, wp_info->phys_addr, temp,
|
||||
wp_info->len)) {
|
||||
if (memcmp(temp, wp_info->old_data, wp_info->len)) {
|
||||
kfree(temp);
|
||||
return wp_info;
|
||||
|
|
|
@ -165,6 +165,7 @@ static void __extract_prog_irq(struct kvm_vcpu *vcpu,
|
|||
pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn;
|
||||
pgm_info->mon_code = vcpu->arch.sie_block->tecmc;
|
||||
break;
|
||||
case PGM_VECTOR_PROCESSING:
|
||||
case PGM_DATA:
|
||||
pgm_info->data_exc_code = vcpu->arch.sie_block->dxc;
|
||||
break;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* handling kvm guest interrupts
|
||||
*
|
||||
* Copyright IBM Corp. 2008,2014
|
||||
* Copyright IBM Corp. 2008, 2015
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License (version 2 only)
|
||||
|
@ -18,6 +18,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/dis.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/sclp.h>
|
||||
#include "kvm-s390.h"
|
||||
|
@ -265,8 +266,6 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
|
|||
|
||||
static u16 get_ilc(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
const unsigned short table[] = { 2, 4, 4, 6 };
|
||||
|
||||
switch (vcpu->arch.sie_block->icptcode) {
|
||||
case ICPT_INST:
|
||||
case ICPT_INSTPROGI:
|
||||
|
@ -274,7 +273,7 @@ static u16 get_ilc(struct kvm_vcpu *vcpu)
|
|||
case ICPT_PARTEXEC:
|
||||
case ICPT_IOINST:
|
||||
/* last instruction only stored for these icptcodes */
|
||||
return table[vcpu->arch.sie_block->ipa >> 14];
|
||||
return insn_length(vcpu->arch.sie_block->ipa >> 8);
|
||||
case ICPT_PROGI:
|
||||
return vcpu->arch.sie_block->pgmilc;
|
||||
default:
|
||||
|
@ -352,6 +351,7 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
|
||||
struct kvm_s390_mchk_info mchk;
|
||||
unsigned long adtl_status_addr;
|
||||
int rc;
|
||||
|
||||
spin_lock(&li->lock);
|
||||
|
@ -372,6 +372,9 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
|
|||
mchk.cr14, mchk.mcic);
|
||||
|
||||
rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
|
||||
rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR,
|
||||
&adtl_status_addr, sizeof(unsigned long));
|
||||
rc |= kvm_s390_vcpu_store_adtl_status(vcpu, adtl_status_addr);
|
||||
rc |= put_guest_lc(vcpu, mchk.mcic,
|
||||
(u64 __user *) __LC_MCCK_CODE);
|
||||
rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
|
||||
|
@ -484,7 +487,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
|
||||
struct kvm_s390_pgm_info pgm_info;
|
||||
int rc = 0;
|
||||
int rc = 0, nullifying = false;
|
||||
u16 ilc = get_ilc(vcpu);
|
||||
|
||||
spin_lock(&li->lock);
|
||||
|
@ -509,6 +512,8 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
|
|||
case PGM_LX_TRANSLATION:
|
||||
case PGM_PRIMARY_AUTHORITY:
|
||||
case PGM_SECONDARY_AUTHORITY:
|
||||
nullifying = true;
|
||||
/* fall through */
|
||||
case PGM_SPACE_SWITCH:
|
||||
rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
|
||||
(u64 *)__LC_TRANS_EXC_CODE);
|
||||
|
@ -521,6 +526,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
|
|||
case PGM_EXTENDED_AUTHORITY:
|
||||
rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
|
||||
(u8 *)__LC_EXC_ACCESS_ID);
|
||||
nullifying = true;
|
||||
break;
|
||||
case PGM_ASCE_TYPE:
|
||||
case PGM_PAGE_TRANSLATION:
|
||||
|
@ -534,6 +540,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
|
|||
(u8 *)__LC_EXC_ACCESS_ID);
|
||||
rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
|
||||
(u8 *)__LC_OP_ACCESS_ID);
|
||||
nullifying = true;
|
||||
break;
|
||||
case PGM_MONITOR:
|
||||
rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
|
||||
|
@ -541,6 +548,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
|
|||
rc |= put_guest_lc(vcpu, pgm_info.mon_code,
|
||||
(u64 *)__LC_MON_CODE);
|
||||
break;
|
||||
case PGM_VECTOR_PROCESSING:
|
||||
case PGM_DATA:
|
||||
rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
|
||||
(u32 *)__LC_DATA_EXC_CODE);
|
||||
|
@ -551,6 +559,15 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
|
|||
rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
|
||||
(u8 *)__LC_EXC_ACCESS_ID);
|
||||
break;
|
||||
case PGM_STACK_FULL:
|
||||
case PGM_STACK_EMPTY:
|
||||
case PGM_STACK_SPECIFICATION:
|
||||
case PGM_STACK_TYPE:
|
||||
case PGM_STACK_OPERATION:
|
||||
case PGM_TRACE_TABEL:
|
||||
case PGM_CRYPTO_OPERATION:
|
||||
nullifying = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (pgm_info.code & PGM_PER) {
|
||||
|
@ -564,6 +581,9 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
|
|||
(u8 *) __LC_PER_ACCESS_ID);
|
||||
}
|
||||
|
||||
if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST)
|
||||
kvm_s390_rewind_psw(vcpu, ilc);
|
||||
|
||||
rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
|
||||
rc |= put_guest_lc(vcpu, pgm_info.code,
|
||||
(u16 *)__LC_PGM_INT_CODE);
|
||||
|
@ -1332,10 +1352,10 @@ int kvm_s390_inject_vm(struct kvm *kvm,
|
|||
return rc;
|
||||
}
|
||||
|
||||
void kvm_s390_reinject_io_int(struct kvm *kvm,
|
||||
int kvm_s390_reinject_io_int(struct kvm *kvm,
|
||||
struct kvm_s390_interrupt_info *inti)
|
||||
{
|
||||
__inject_vm(kvm, inti);
|
||||
return __inject_vm(kvm, inti);
|
||||
}
|
||||
|
||||
int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
|
||||
|
|
|
@ -87,6 +87,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
|||
{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
|
||||
{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
|
||||
{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
|
||||
{ "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
|
||||
{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
|
||||
{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
|
||||
{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
|
||||
|
@ -103,6 +104,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
|||
unsigned long kvm_s390_fac_list_mask[] = {
|
||||
0xff82fffbf4fc2000UL,
|
||||
0x005c000000000000UL,
|
||||
0x4000000000000000UL,
|
||||
};
|
||||
|
||||
unsigned long kvm_s390_fac_list_mask_size(void)
|
||||
|
@ -185,6 +187,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|||
case KVM_CAP_S390_COW:
|
||||
r = MACHINE_HAS_ESOP;
|
||||
break;
|
||||
case KVM_CAP_S390_VECTOR_REGISTERS:
|
||||
r = MACHINE_HAS_VX;
|
||||
break;
|
||||
default:
|
||||
r = 0;
|
||||
}
|
||||
|
@ -265,6 +270,10 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
|
|||
kvm->arch.user_sigp = 1;
|
||||
r = 0;
|
||||
break;
|
||||
case KVM_CAP_S390_VECTOR_REGISTERS:
|
||||
kvm->arch.use_vectors = MACHINE_HAS_VX;
|
||||
r = MACHINE_HAS_VX ? 0 : -EINVAL;
|
||||
break;
|
||||
default:
|
||||
r = -EINVAL;
|
||||
break;
|
||||
|
@ -942,6 +951,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|||
|
||||
kvm->arch.css_support = 0;
|
||||
kvm->arch.use_irqchip = 0;
|
||||
kvm->arch.use_vectors = 0;
|
||||
kvm->arch.epoch = 0;
|
||||
|
||||
spin_lock_init(&kvm->arch.start_stop_lock);
|
||||
|
@ -1035,6 +1045,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
|||
KVM_SYNC_CRS |
|
||||
KVM_SYNC_ARCH0 |
|
||||
KVM_SYNC_PFAULT;
|
||||
if (test_kvm_facility(vcpu->kvm, 129))
|
||||
vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
|
||||
|
||||
if (kvm_is_ucontrol(vcpu->kvm))
|
||||
return __kvm_ucontrol_vcpu_init(vcpu);
|
||||
|
@ -1045,10 +1057,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
|||
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
|
||||
save_fp_regs(vcpu->arch.host_fpregs.fprs);
|
||||
if (vcpu->kvm->arch.use_vectors)
|
||||
save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
|
||||
else
|
||||
save_fp_regs(vcpu->arch.host_fpregs.fprs);
|
||||
save_access_regs(vcpu->arch.host_acrs);
|
||||
restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
|
||||
restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
|
||||
if (vcpu->kvm->arch.use_vectors) {
|
||||
restore_fp_ctl(&vcpu->run->s.regs.fpc);
|
||||
restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
|
||||
} else {
|
||||
restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
|
||||
restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
|
||||
}
|
||||
restore_access_regs(vcpu->run->s.regs.acrs);
|
||||
gmap_enable(vcpu->arch.gmap);
|
||||
atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
|
||||
|
@ -1058,11 +1078,19 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
|
||||
gmap_disable(vcpu->arch.gmap);
|
||||
save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
|
||||
save_fp_regs(vcpu->arch.guest_fpregs.fprs);
|
||||
if (vcpu->kvm->arch.use_vectors) {
|
||||
save_fp_ctl(&vcpu->run->s.regs.fpc);
|
||||
save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
|
||||
} else {
|
||||
save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
|
||||
save_fp_regs(vcpu->arch.guest_fpregs.fprs);
|
||||
}
|
||||
save_access_regs(vcpu->run->s.regs.acrs);
|
||||
restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
|
||||
restore_fp_regs(vcpu->arch.host_fpregs.fprs);
|
||||
if (vcpu->kvm->arch.use_vectors)
|
||||
restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
|
||||
else
|
||||
restore_fp_regs(vcpu->arch.host_fpregs.fprs);
|
||||
restore_access_regs(vcpu->arch.host_acrs);
|
||||
}
|
||||
|
||||
|
@ -1130,6 +1158,15 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
|
||||
|
||||
vcpu->arch.cpu_id = model->cpu_id;
|
||||
vcpu->arch.sie_block->ibc = model->ibc;
|
||||
vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int rc = 0;
|
||||
|
@ -1138,6 +1175,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
|||
CPUSTAT_SM |
|
||||
CPUSTAT_STOPPED |
|
||||
CPUSTAT_GED);
|
||||
kvm_s390_vcpu_setup_model(vcpu);
|
||||
|
||||
vcpu->arch.sie_block->ecb = 6;
|
||||
if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
|
||||
vcpu->arch.sie_block->ecb |= 0x10;
|
||||
|
@ -1148,8 +1187,11 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
|||
vcpu->arch.sie_block->eca |= 1;
|
||||
if (sclp_has_sigpif())
|
||||
vcpu->arch.sie_block->eca |= 0x10000000U;
|
||||
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
|
||||
ICTL_TPROT;
|
||||
if (vcpu->kvm->arch.use_vectors) {
|
||||
vcpu->arch.sie_block->eca |= 0x00020000;
|
||||
vcpu->arch.sie_block->ecd |= 0x20000000;
|
||||
}
|
||||
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
|
||||
|
||||
if (kvm_s390_cmma_enabled(vcpu->kvm)) {
|
||||
rc = kvm_s390_vcpu_setup_cmma(vcpu);
|
||||
|
@ -1159,11 +1201,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
|||
hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
|
||||
|
||||
mutex_lock(&vcpu->kvm->lock);
|
||||
vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id;
|
||||
vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc;
|
||||
mutex_unlock(&vcpu->kvm->lock);
|
||||
|
||||
kvm_s390_vcpu_crypto_setup(vcpu);
|
||||
|
||||
return rc;
|
||||
|
@ -1191,6 +1228,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
|
|||
|
||||
vcpu->arch.sie_block = &sie_page->sie_block;
|
||||
vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
|
||||
vcpu->arch.host_vregs = &sie_page->vregs;
|
||||
|
||||
vcpu->arch.sie_block->icpua = id;
|
||||
if (!kvm_is_ucontrol(kvm)) {
|
||||
|
@ -1206,7 +1244,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
|
|||
vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
|
||||
set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
|
||||
}
|
||||
vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->list;
|
||||
|
||||
spin_lock_init(&vcpu->arch.local_int.lock);
|
||||
vcpu->arch.local_int.float_int = &kvm->arch.float_int;
|
||||
|
@ -1726,6 +1763,31 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
||||
u8 opcode;
|
||||
int rc;
|
||||
|
||||
VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
|
||||
trace_kvm_s390_sie_fault(vcpu);
|
||||
|
||||
/*
|
||||
* We want to inject an addressing exception, which is defined as a
|
||||
* suppressing or terminating exception. However, since we came here
|
||||
* by a DAT access exception, the PSW still points to the faulting
|
||||
* instruction since DAT exceptions are nullifying. So we've got
|
||||
* to look up the current opcode to get the length of the instruction
|
||||
* to be able to forward the PSW.
|
||||
*/
|
||||
rc = read_guest(vcpu, psw->addr, &opcode, 1);
|
||||
if (rc)
|
||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
psw->addr = __rewind_psw(*psw, -insn_length(opcode));
|
||||
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
||||
}
|
||||
|
||||
static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
|
||||
{
|
||||
int rc = -1;
|
||||
|
@ -1757,11 +1819,8 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
|
|||
}
|
||||
}
|
||||
|
||||
if (rc == -1) {
|
||||
VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
|
||||
trace_kvm_s390_sie_fault(vcpu);
|
||||
rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
||||
}
|
||||
if (rc == -1)
|
||||
rc = vcpu_post_run_fault_in_sie(vcpu);
|
||||
|
||||
memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
|
||||
|
||||
|
@ -1977,6 +2036,35 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
|
|||
return kvm_s390_store_status_unloaded(vcpu, addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* store additional status at address
|
||||
*/
|
||||
int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
|
||||
unsigned long gpa)
|
||||
{
|
||||
/* Only bits 0-53 are used for address formation */
|
||||
if (!(gpa & ~0x3ff))
|
||||
return 0;
|
||||
|
||||
return write_guest_abs(vcpu, gpa & ~0x3ff,
|
||||
(void *)&vcpu->run->s.regs.vrs, 512);
|
||||
}
|
||||
|
||||
int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
|
||||
{
|
||||
if (!test_kvm_facility(vcpu->kvm, 129))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* The guest VXRS are in the host VXRs due to the lazy
|
||||
* copying in vcpu load/put. Let's update our copies before we save
|
||||
* it into the save area.
|
||||
*/
|
||||
save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
|
||||
|
||||
return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
|
||||
}
|
||||
|
||||
static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
|
||||
|
|
|
@ -151,8 +151,8 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
|
|||
int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
|
||||
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
|
||||
u64 cr6, u64 schid);
|
||||
void kvm_s390_reinject_io_int(struct kvm *kvm,
|
||||
struct kvm_s390_interrupt_info *inti);
|
||||
int kvm_s390_reinject_io_int(struct kvm *kvm,
|
||||
struct kvm_s390_interrupt_info *inti);
|
||||
int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
|
||||
|
||||
/* implemented in intercept.c */
|
||||
|
@ -177,7 +177,10 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
|
|||
/* implemented in kvm-s390.c */
|
||||
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
|
||||
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
|
||||
unsigned long addr);
|
||||
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
|
||||
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
|
||||
void s390_vcpu_block(struct kvm_vcpu *vcpu);
|
||||
|
|
|
@ -229,18 +229,19 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
|
|||
struct kvm_s390_interrupt_info *inti;
|
||||
unsigned long len;
|
||||
u32 tpi_data[3];
|
||||
int cc, rc;
|
||||
int rc;
|
||||
u64 addr;
|
||||
|
||||
rc = 0;
|
||||
addr = kvm_s390_get_base_disp_s(vcpu);
|
||||
if (addr & 3)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
cc = 0;
|
||||
|
||||
inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
|
||||
if (!inti)
|
||||
goto no_interrupt;
|
||||
cc = 1;
|
||||
if (!inti) {
|
||||
kvm_s390_set_psw_cc(vcpu, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
|
||||
tpi_data[1] = inti->io.io_int_parm;
|
||||
tpi_data[2] = inti->io.io_int_word;
|
||||
|
@ -251,30 +252,38 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
len = sizeof(tpi_data) - 4;
|
||||
rc = write_guest(vcpu, addr, &tpi_data, len);
|
||||
if (rc)
|
||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
if (rc) {
|
||||
rc = kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
goto reinject_interrupt;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Store the three-word I/O interruption code into
|
||||
* the appropriate lowcore area.
|
||||
*/
|
||||
len = sizeof(tpi_data);
|
||||
if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len))
|
||||
if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
|
||||
/* failed writes to the low core are not recoverable */
|
||||
rc = -EFAULT;
|
||||
goto reinject_interrupt;
|
||||
}
|
||||
}
|
||||
|
||||
/* irq was successfully handed to the guest */
|
||||
kfree(inti);
|
||||
kvm_s390_set_psw_cc(vcpu, 1);
|
||||
return 0;
|
||||
reinject_interrupt:
|
||||
/*
|
||||
* If we encounter a problem storing the interruption code, the
|
||||
* instruction is suppressed from the guest's view: reinject the
|
||||
* interrupt.
|
||||
*/
|
||||
if (!rc)
|
||||
if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
|
||||
kfree(inti);
|
||||
else
|
||||
kvm_s390_reinject_io_int(vcpu->kvm, inti);
|
||||
no_interrupt:
|
||||
/* Set condition code and we're done. */
|
||||
if (!rc)
|
||||
kvm_s390_set_psw_cc(vcpu, cc);
|
||||
rc = -EFAULT;
|
||||
}
|
||||
/* don't set the cc, a pgm irq was injected or we drop to user space */
|
||||
return rc ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
|
@ -467,6 +476,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
|
|||
for (n = mem->count - 1; n > 0 ; n--)
|
||||
memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
|
||||
|
||||
memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
|
||||
mem->vm[0].cpus_total = cpus;
|
||||
mem->vm[0].cpus_configured = cpus;
|
||||
mem->vm[0].cpus_standby = 0;
|
||||
|
|
|
@ -393,6 +393,9 @@ static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code)
|
|||
case SIGP_STORE_STATUS_AT_ADDRESS:
|
||||
vcpu->stat.instruction_sigp_store_status++;
|
||||
break;
|
||||
case SIGP_STORE_ADDITIONAL_STATUS:
|
||||
vcpu->stat.instruction_sigp_store_adtl_status++;
|
||||
break;
|
||||
case SIGP_SET_PREFIX:
|
||||
vcpu->stat.instruction_sigp_prefix++;
|
||||
break;
|
||||
|
|
|
@ -324,7 +324,7 @@ struct kvm_run {
|
|||
__u64 kvm_dirty_regs;
|
||||
union {
|
||||
struct kvm_sync_regs regs;
|
||||
char padding[1024];
|
||||
char padding[2048];
|
||||
} s;
|
||||
};
|
||||
|
||||
|
@ -760,6 +760,7 @@ struct kvm_ppc_smmu_info {
|
|||
#define KVM_CAP_PPC_ENABLE_HCALL 104
|
||||
#define KVM_CAP_CHECK_EXTENSION_VM 105
|
||||
#define KVM_CAP_S390_USER_SIGP 106
|
||||
#define KVM_CAP_S390_VECTOR_REGISTERS 107
|
||||
|
||||
#ifdef KVM_CAP_IRQ_ROUTING
|
||||
|
||||
|
|
Loading…
Reference in New Issue