2020-03-24 17:41:52 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
/*
|
|
|
|
* Kernel-based Virtual Machine driver for Linux
|
|
|
|
*
|
|
|
|
* AMD SVM support
|
|
|
|
*
|
|
|
|
* Copyright (C) 2006 Qumranet, Inc.
|
|
|
|
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Yaniv Kamay <yaniv@qumranet.com>
|
|
|
|
* Avi Kivity <avi@qumranet.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define pr_fmt(fmt) "SVM: " fmt
|
|
|
|
|
|
|
|
#include <linux/kvm_types.h>
|
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
|
|
|
|
#include <asm/msr-index.h>
|
2020-05-04 23:28:25 +08:00
|
|
|
#include <asm/debugreg.h>
|
2020-03-24 17:41:52 +08:00
|
|
|
|
|
|
|
#include "kvm_emulate.h"
|
|
|
|
#include "trace.h"
|
|
|
|
#include "mmu.h"
|
|
|
|
#include "x86.h"
|
2022-09-30 01:20:09 +08:00
|
|
|
#include "smm.h"
|
2020-05-14 01:36:32 +08:00
|
|
|
#include "cpuid.h"
|
2020-05-16 20:50:35 +08:00
|
|
|
#include "lapic.h"
|
2020-03-24 17:41:52 +08:00
|
|
|
#include "svm.h"
|
2022-02-02 17:51:00 +08:00
|
|
|
#include "hyperv.h"
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2021-02-04 08:01:17 +08:00
|
|
|
#define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
|
|
|
|
|
2020-03-24 17:41:52 +08:00
|
|
|
static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
|
|
|
|
struct x86_exception *fault)
|
|
|
|
{
|
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
2022-03-23 01:24:44 +08:00
|
|
|
struct vmcb *vmcb = svm->vmcb;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2022-03-23 01:24:44 +08:00
|
|
|
if (vmcb->control.exit_code != SVM_EXIT_NPF) {
|
2020-03-24 17:41:52 +08:00
|
|
|
/*
|
|
|
|
* TODO: track the cause of the nested page fault, and
|
|
|
|
* correctly fill in the high bits of exit_info_1.
|
|
|
|
*/
|
2022-03-23 01:24:44 +08:00
|
|
|
vmcb->control.exit_code = SVM_EXIT_NPF;
|
|
|
|
vmcb->control.exit_code_hi = 0;
|
|
|
|
vmcb->control.exit_info_1 = (1ULL << 32);
|
|
|
|
vmcb->control.exit_info_2 = fault->address;
|
2020-03-24 17:41:52 +08:00
|
|
|
}
|
|
|
|
|
2022-03-23 01:24:44 +08:00
|
|
|
vmcb->control.exit_info_1 &= ~0xffffffffULL;
|
|
|
|
vmcb->control.exit_info_1 |= fault->error_code;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
|
|
|
nested_svm_vmexit(svm);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
|
|
|
|
{
|
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
2020-05-14 01:16:12 +08:00
|
|
|
u64 cr3 = svm->nested.ctl.nested_cr3;
|
2020-03-24 17:41:52 +08:00
|
|
|
u64 pdpte;
|
|
|
|
int ret;
|
|
|
|
|
KVM: nSVM: Don't strip host's C-bit from guest's CR3 when reading PDPTRs
Don't clear the SME C-bit when reading a guest PDPTR, as the GPA (CR3) is
in the guest domain.
Barring a bizarre paravirtual use case, this is likely a benign bug. SME
is not emulated by KVM, loading SEV guest PDPTRs is doomed as KVM can't
use the correct key to read guest memory, and setting guest MAXPHYADDR
higher than the host, i.e. overlapping the C-bit, would cause faults in
the guest.
Note, for SEV guests, stripping the C-bit is technically aligned with CPU
behavior, but for KVM it's the greater of two evils. Because KVM doesn't
have access to the guest's encryption key, ignoring the C-bit would at
best result in KVM reading garbage. By keeping the C-bit, KVM will
fail its read (unless userspace creates a memslot with the C-bit set).
The guest will still undoubtedly die, as KVM will use '0' for the PDPTR
value, but that's preferable to interpreting encrypted data as a PDPTR.
Fixes: d0ec49d4de90 ("kvm/x86/svm: Support Secure Memory Encryption within KVM")
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Brijesh Singh <brijesh.singh@amd.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210204000117.3303214-3-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-02-04 08:01:07 +08:00
|
|
|
ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
|
2020-03-24 17:41:52 +08:00
|
|
|
offset_in_page(cr3) + index * 8, 8);
|
|
|
|
if (ret)
|
|
|
|
return 0;
|
|
|
|
return pdpte;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
|
|
|
|
2020-05-14 01:16:12 +08:00
|
|
|
return svm->nested.ctl.nested_cr3;
|
2020-03-24 17:41:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2020-05-19 18:18:31 +08:00
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
|
|
|
|
2020-03-24 17:41:52 +08:00
|
|
|
WARN_ON(mmu_is_nested(vcpu));
|
|
|
|
|
|
|
|
vcpu->arch.mmu = &vcpu->arch.guest_mmu;
|
2021-06-23 01:57:00 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The NPT format depends on L1's CR4 and EFER, which is in vmcb01. Note,
|
|
|
|
* when called via KVM_SET_NESTED_STATE, that state may _not_ match current
|
|
|
|
* vCPU state. CR0.WP is explicitly ignored, while CR0.PG is required.
|
|
|
|
*/
|
2021-01-13 20:07:52 +08:00
|
|
|
kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
|
|
|
|
svm->vmcb01.ptr->save.efer,
|
2020-07-10 22:11:49 +08:00
|
|
|
svm->nested.ctl.nested_cr3);
|
2020-03-24 17:41:52 +08:00
|
|
|
vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
|
|
|
|
vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
|
|
|
|
vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
|
|
|
|
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
vcpu->arch.mmu = &vcpu->arch.root_mmu;
|
|
|
|
vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
|
|
|
|
}
|
|
|
|
|
2022-03-01 22:36:46 +08:00
|
|
|
static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm)
|
|
|
|
{
|
|
|
|
if (!svm->v_vmload_vmsave_enabled)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (!nested_npt_enabled(svm))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (!(svm->nested.ctl.virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-03-24 17:41:52 +08:00
|
|
|
void recalc_intercepts(struct vcpu_svm *svm)
|
|
|
|
{
|
2021-11-03 22:05:26 +08:00
|
|
|
struct vmcb_control_area *c, *h;
|
|
|
|
struct vmcb_ctrl_area_cached *g;
|
2020-09-12 03:27:58 +08:00
|
|
|
unsigned int i;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-06-25 16:03:23 +08:00
|
|
|
vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
|
|
|
if (!is_guest_mode(&svm->vcpu))
|
|
|
|
return;
|
|
|
|
|
|
|
|
c = &svm->vmcb->control;
|
2021-01-13 20:07:52 +08:00
|
|
|
h = &svm->vmcb01.ptr->control;
|
2020-05-14 01:16:12 +08:00
|
|
|
g = &svm->nested.ctl;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-09-12 03:27:58 +08:00
|
|
|
for (i = 0; i < MAX_INTERCEPT; i++)
|
|
|
|
c->intercepts[i] = h->intercepts[i];
|
|
|
|
|
2020-05-14 01:28:23 +08:00
|
|
|
if (g->int_ctl & V_INTR_MASKING_MASK) {
|
2020-03-24 17:41:52 +08:00
|
|
|
/* We only want the cr8 intercept bits of L1 */
|
2020-09-12 03:28:05 +08:00
|
|
|
vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
|
|
|
|
vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
|
|
|
|
* affect any interrupt we may want to inject; therefore,
|
|
|
|
* interrupt window vmexits are irrelevant to L0.
|
|
|
|
*/
|
2020-09-12 03:28:28 +08:00
|
|
|
vmcb_clr_intercept(c, INTERCEPT_VINTR);
|
2020-03-24 17:41:52 +08:00
|
|
|
}
|
|
|
|
|
2022-11-01 22:54:06 +08:00
|
|
|
/*
|
|
|
|
* We want to see VMMCALLs from a nested guest only when Hyper-V L2 TLB
|
|
|
|
* flush feature is enabled.
|
|
|
|
*/
|
|
|
|
if (!nested_svm_l2_tlb_flush_enabled(&svm->vcpu))
|
|
|
|
vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-09-12 03:27:58 +08:00
|
|
|
for (i = 0; i < MAX_INTERCEPT; i++)
|
|
|
|
c->intercepts[i] |= g->intercepts[i];
|
2021-07-07 20:51:00 +08:00
|
|
|
|
|
|
|
/* If SMI is not intercepted, ignore guest SMI intercept as well */
|
|
|
|
if (!intercept_smi)
|
|
|
|
vmcb_clr_intercept(c, INTERCEPT_SMI);
|
2021-07-19 21:05:00 +08:00
|
|
|
|
2022-03-01 22:36:46 +08:00
|
|
|
if (nested_vmcb_needs_vls_intercept(svm)) {
|
|
|
|
/*
|
|
|
|
* If the virtual VMLOAD/VMSAVE is not enabled for the L2,
|
|
|
|
* we must intercept these instructions to correctly
|
|
|
|
* emulate them in case L1 doesn't intercept them.
|
|
|
|
*/
|
|
|
|
vmcb_set_intercept(c, INTERCEPT_VMLOAD);
|
|
|
|
vmcb_set_intercept(c, INTERCEPT_VMSAVE);
|
|
|
|
} else {
|
|
|
|
WARN_ON(!(c->virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK));
|
|
|
|
}
|
2020-03-24 17:41:52 +08:00
|
|
|
}
|
|
|
|
|
2022-02-02 17:51:00 +08:00
|
|
|
/*
|
|
|
|
* Merge L0's (KVM) and L1's (Nested VMCB) MSR permission bitmaps. The function
|
|
|
|
* is optimized in that it only merges the parts where KVM MSR permission bitmap
|
|
|
|
* may contain zero bits.
|
|
|
|
*/
|
2020-03-24 17:41:52 +08:00
|
|
|
static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
|
|
|
|
{
|
2022-11-01 22:53:42 +08:00
|
|
|
struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments;
|
2022-02-02 17:51:00 +08:00
|
|
|
int i;
|
|
|
|
|
2020-03-24 17:41:52 +08:00
|
|
|
/*
|
2022-02-02 17:51:00 +08:00
|
|
|
* MSR bitmap update can be skipped when:
|
|
|
|
* - MSR bitmap for L1 hasn't changed.
|
|
|
|
* - Nested hypervisor (L1) is attempting to launch the same L2 as
|
|
|
|
* before.
|
|
|
|
* - Nested hypervisor (L1) is using Hyper-V emulation interface and
|
|
|
|
* tells KVM (L0) there were no changes in MSR bitmap for L2.
|
2020-03-24 17:41:52 +08:00
|
|
|
*/
|
2022-02-02 17:51:00 +08:00
|
|
|
if (!svm->nested.force_msr_bitmap_recalc &&
|
|
|
|
kvm_hv_hypercall_enabled(&svm->vcpu) &&
|
|
|
|
hve->hv_enlightenments_control.msr_bitmap &&
|
2022-11-01 22:53:39 +08:00
|
|
|
(svm->nested.ctl.clean & BIT(HV_VMCB_NESTED_ENLIGHTENMENTS)))
|
2022-02-02 17:51:00 +08:00
|
|
|
goto set_msrpm_base_pa;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2021-11-03 22:05:26 +08:00
|
|
|
if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
|
2020-03-24 17:41:52 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
for (i = 0; i < MSRPM_OFFSETS; i++) {
|
|
|
|
u32 value, p;
|
|
|
|
u64 offset;
|
|
|
|
|
|
|
|
if (msrpm_offsets[i] == 0xffffffff)
|
|
|
|
break;
|
|
|
|
|
|
|
|
p = msrpm_offsets[i];
|
2022-05-19 18:27:02 +08:00
|
|
|
|
|
|
|
/* x2apic msrs are intercepted always for the nested guest */
|
|
|
|
if (is_x2apic_msrpm_offset(p))
|
|
|
|
continue;
|
|
|
|
|
2020-05-14 01:16:12 +08:00
|
|
|
offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
|
|
|
if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
svm->nested.msrpm[p] = svm->msrpm[p] | value;
|
|
|
|
}
|
|
|
|
|
2022-02-02 17:50:57 +08:00
|
|
|
svm->nested.force_msr_bitmap_recalc = false;
|
|
|
|
|
2022-02-02 17:51:00 +08:00
|
|
|
set_msrpm_base_pa:
|
2020-03-24 17:41:52 +08:00
|
|
|
svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-04-13 05:56:08 +08:00
|
|
|
/*
|
|
|
|
* Bits 11:0 of bitmap address are ignored by hardware
|
|
|
|
*/
|
|
|
|
static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size)
|
|
|
|
{
|
|
|
|
u64 addr = PAGE_ALIGN(pa);
|
|
|
|
|
|
|
|
return kvm_vcpu_is_legal_gpa(vcpu, addr) &&
|
|
|
|
kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1);
|
|
|
|
}
|
|
|
|
|
2021-09-21 07:51:31 +08:00
|
|
|
static bool nested_svm_check_tlb_ctl(struct kvm_vcpu *vcpu, u8 tlb_ctl)
|
|
|
|
{
|
|
|
|
/* Nested FLUSHBYASID is not supported yet. */
|
|
|
|
switch(tlb_ctl) {
|
|
|
|
case TLB_CONTROL_DO_NOTHING:
|
|
|
|
case TLB_CONTROL_FLUSH_ALL_ASID:
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-11 22:14:08 +08:00
|
|
|
static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
|
2021-11-03 22:05:26 +08:00
|
|
|
struct vmcb_ctrl_area_cached *control)
|
2020-03-24 17:41:52 +08:00
|
|
|
{
|
2021-11-03 22:05:26 +08:00
|
|
|
if (CC(!vmcb12_is_intercept(control, INTERCEPT_VMRUN)))
|
2020-03-24 17:41:52 +08:00
|
|
|
return false;
|
|
|
|
|
2021-02-04 08:01:17 +08:00
|
|
|
if (CC(control->asid == 0))
|
2020-04-10 04:50:33 +08:00
|
|
|
return false;
|
|
|
|
|
2021-02-04 08:01:17 +08:00
|
|
|
if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled))
|
2020-03-24 17:41:52 +08:00
|
|
|
return false;
|
|
|
|
|
2021-04-13 05:56:08 +08:00
|
|
|
if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa,
|
|
|
|
MSRPM_SIZE)))
|
|
|
|
return false;
|
|
|
|
if (CC(!nested_svm_check_bitmap_pa(vcpu, control->iopm_base_pa,
|
|
|
|
IOPM_SIZE)))
|
|
|
|
return false;
|
|
|
|
|
2021-09-21 07:51:31 +08:00
|
|
|
if (CC(!nested_svm_check_tlb_ctl(vcpu, control->tlb_ctl)))
|
|
|
|
return false;
|
|
|
|
|
2020-05-19 01:02:15 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-10-07 03:06:52 +08:00
|
|
|
/* Common checks that apply to both L1 and L2 state. */
|
2021-11-03 22:05:24 +08:00
|
|
|
static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcb_save_area_cached *save)
|
2020-10-07 03:06:52 +08:00
|
|
|
{
|
2021-02-04 08:01:17 +08:00
|
|
|
if (CC(!(save->efer & EFER_SVME)))
|
2020-03-24 17:41:52 +08:00
|
|
|
return false;
|
|
|
|
|
2021-02-04 08:01:17 +08:00
|
|
|
if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) ||
|
|
|
|
CC(save->cr0 & ~0xffffffffULL))
|
2020-03-24 17:41:52 +08:00
|
|
|
return false;
|
|
|
|
|
2021-02-04 08:01:17 +08:00
|
|
|
if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
|
2020-05-23 06:19:52 +08:00
|
|
|
return false;
|
|
|
|
|
2021-11-03 22:05:21 +08:00
|
|
|
/*
|
|
|
|
* These checks are also performed by KVM_SET_SREGS,
|
|
|
|
* except that EFER.LMA is not checked by SVM against
|
|
|
|
* CR0.PG && EFER.LME.
|
|
|
|
*/
|
|
|
|
if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
|
|
|
|
if (CC(!(save->cr4 & X86_CR4_PAE)) ||
|
|
|
|
CC(!(save->cr0 & X86_CR0_PE)) ||
|
|
|
|
CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3)))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
KVM: x86: Split kvm_is_valid_cr4() and export only the non-vendor bits
Split the common x86 parts of kvm_is_valid_cr4(), i.e. the reserved bits
checks, into a separate helper, __kvm_is_valid_cr4(), and export only the
inner helper to vendor code in order to prevent nested VMX from calling
back into vmx_is_valid_cr4() via kvm_is_valid_cr4().
On SVM, this is a nop as SVM doesn't place any additional restrictions on
CR4.
On VMX, this is also currently a nop, but only because nested VMX is
missing checks on reserved CR4 bits for nested VM-Enter. That bug will
be fixed in a future patch, and could simply use kvm_is_valid_cr4() as-is,
but nVMX has _another_ bug where VMXON emulation doesn't enforce VMX's
restrictions on CR0/CR4. The cleanest and most intuitive way to fix the
VMXON bug is to use nested_host_cr{0,4}_valid(). If the CR4 variant
routes through kvm_is_valid_cr4(), using nested_host_cr4_valid() won't do
the right thing for the VMXON case as vmx_is_valid_cr4() enforces VMX's
restrictions if and only if the vCPU is post-VMXON.
Cc: stable@vger.kernel.org
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220607213604.3346000-2-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-06-08 05:35:50 +08:00
|
|
|
/* Note, SVM doesn't have any additional restrictions on CR4. */
|
|
|
|
if (CC(!__kvm_is_valid_cr4(vcpu, save->cr4)))
|
2020-10-07 03:06:52 +08:00
|
|
|
return false;
|
2020-07-08 08:39:56 +08:00
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
if (CC(!kvm_valid_efer(vcpu, save->efer)))
|
2020-10-07 03:06:52 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-11-03 22:05:24 +08:00
|
|
|
static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
|
|
|
struct vmcb_save_area_cached *save = &svm->nested.save;
|
|
|
|
|
|
|
|
return __nested_vmcb_check_save(vcpu, save);
|
|
|
|
}
|
|
|
|
|
2021-11-11 22:14:08 +08:00
|
|
|
static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
2021-11-03 22:05:26 +08:00
|
|
|
struct vmcb_ctrl_area_cached *ctl = &svm->nested.ctl;
|
2021-11-11 22:14:08 +08:00
|
|
|
|
|
|
|
return __nested_vmcb_check_controls(vcpu, ctl);
|
|
|
|
}
|
|
|
|
|
2021-11-03 22:05:23 +08:00
|
|
|
static
|
2022-02-02 17:51:00 +08:00
|
|
|
void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcb_ctrl_area_cached *to,
|
2021-11-03 22:05:23 +08:00
|
|
|
struct vmcb_control_area *from)
|
2020-05-14 01:07:26 +08:00
|
|
|
{
|
2021-11-03 22:05:23 +08:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < MAX_INTERCEPT; i++)
|
|
|
|
to->intercepts[i] = from->intercepts[i];
|
|
|
|
|
|
|
|
to->iopm_base_pa = from->iopm_base_pa;
|
|
|
|
to->msrpm_base_pa = from->msrpm_base_pa;
|
|
|
|
to->tsc_offset = from->tsc_offset;
|
|
|
|
to->tlb_ctl = from->tlb_ctl;
|
|
|
|
to->int_ctl = from->int_ctl;
|
|
|
|
to->int_vector = from->int_vector;
|
|
|
|
to->int_state = from->int_state;
|
|
|
|
to->exit_code = from->exit_code;
|
|
|
|
to->exit_code_hi = from->exit_code_hi;
|
|
|
|
to->exit_info_1 = from->exit_info_1;
|
|
|
|
to->exit_info_2 = from->exit_info_2;
|
|
|
|
to->exit_int_info = from->exit_int_info;
|
|
|
|
to->exit_int_info_err = from->exit_int_info_err;
|
|
|
|
to->nested_ctl = from->nested_ctl;
|
|
|
|
to->event_inj = from->event_inj;
|
|
|
|
to->event_inj_err = from->event_inj_err;
|
2022-05-02 06:07:25 +08:00
|
|
|
to->next_rip = from->next_rip;
|
2021-11-03 22:05:23 +08:00
|
|
|
to->nested_cr3 = from->nested_cr3;
|
|
|
|
to->virt_ext = from->virt_ext;
|
|
|
|
to->pause_filter_count = from->pause_filter_count;
|
|
|
|
to->pause_filter_thresh = from->pause_filter_thresh;
|
|
|
|
|
|
|
|
/* Copy asid here because nested_vmcb_check_controls will check it. */
|
|
|
|
to->asid = from->asid;
|
|
|
|
to->msrpm_base_pa &= ~0x0fffULL;
|
|
|
|
to->iopm_base_pa &= ~0x0fffULL;
|
2022-02-02 17:51:00 +08:00
|
|
|
|
|
|
|
/* Hyper-V extensions (Enlightened VMCB) */
|
|
|
|
if (kvm_hv_hypercall_enabled(vcpu)) {
|
|
|
|
to->clean = from->clean;
|
2022-11-01 22:53:41 +08:00
|
|
|
memcpy(&to->hv_enlightenments, &from->hv_enlightenments,
|
|
|
|
sizeof(to->hv_enlightenments));
|
2022-02-02 17:51:00 +08:00
|
|
|
}
|
2021-11-03 22:05:23 +08:00
|
|
|
}
|
2020-05-14 01:07:26 +08:00
|
|
|
|
2021-11-03 22:05:23 +08:00
|
|
|
void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
|
|
|
|
struct vmcb_control_area *control)
|
|
|
|
{
|
2022-02-02 17:51:00 +08:00
|
|
|
__nested_copy_vmcb_control_to_cache(&svm->vcpu, &svm->nested.ctl, control);
|
2020-05-14 01:07:26 +08:00
|
|
|
}
|
|
|
|
|
2021-11-03 22:05:22 +08:00
|
|
|
static void __nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached *to,
|
|
|
|
struct vmcb_save_area *from)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Copy only fields that are validated, as we need them
|
|
|
|
* to avoid TOC/TOU races.
|
|
|
|
*/
|
|
|
|
to->efer = from->efer;
|
|
|
|
to->cr0 = from->cr0;
|
|
|
|
to->cr3 = from->cr3;
|
|
|
|
to->cr4 = from->cr4;
|
|
|
|
|
|
|
|
to->dr6 = from->dr6;
|
|
|
|
to->dr7 = from->dr7;
|
|
|
|
}
|
|
|
|
|
|
|
|
void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
|
|
|
|
struct vmcb_save_area *save)
|
|
|
|
{
|
|
|
|
__nested_copy_vmcb_save_to_cache(&svm->nested.save, save);
|
|
|
|
}
|
|
|
|
|
2020-05-22 15:50:14 +08:00
|
|
|
/*
|
|
|
|
* Synchronize fields that are written by the processor, so that
|
2020-11-17 18:15:41 +08:00
|
|
|
* they can be copied back into the vmcb12.
|
2020-05-22 15:50:14 +08:00
|
|
|
*/
|
2020-11-17 18:15:41 +08:00
|
|
|
void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
|
2020-05-22 15:50:14 +08:00
|
|
|
{
|
|
|
|
u32 mask;
|
|
|
|
svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
|
|
|
|
svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
|
|
|
|
|
|
|
|
/* Only a few fields of int_ctl are written by the processor. */
|
|
|
|
mask = V_IRQ_MASK | V_TPR_MASK;
|
|
|
|
if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
|
2020-06-25 16:03:24 +08:00
|
|
|
svm_is_intercept(svm, INTERCEPT_VINTR)) {
|
2020-05-22 15:50:14 +08:00
|
|
|
/*
|
|
|
|
* In order to request an interrupt window, L0 is usurping
|
|
|
|
* svm->vmcb->control.int_ctl and possibly setting V_IRQ
|
|
|
|
* even if it was clear in L1's VMCB. Restoring it would be
|
|
|
|
* wrong. However, in this case V_IRQ will remain true until
|
|
|
|
* interrupt_window_interception calls svm_clear_vintr and
|
|
|
|
* restores int_ctl. We can just leave it aside.
|
|
|
|
*/
|
|
|
|
mask &= ~V_IRQ_MASK;
|
|
|
|
}
|
2022-03-23 01:40:48 +08:00
|
|
|
|
|
|
|
if (nested_vgif_enabled(svm))
|
|
|
|
mask |= V_GIF_MASK;
|
|
|
|
|
2020-05-22 15:50:14 +08:00
|
|
|
svm->nested.ctl.int_ctl &= ~mask;
|
|
|
|
svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
|
|
|
|
}
|
|
|
|
|
2020-05-22 18:04:57 +08:00
|
|
|
/*
|
|
|
|
* Transfer any event that L0 or L1 wanted to inject into L2 to
|
|
|
|
* EXIT_INT_INFO.
|
|
|
|
*/
|
2020-11-17 18:15:41 +08:00
|
|
|
static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
|
|
|
|
struct vmcb *vmcb12)
|
2020-05-22 18:04:57 +08:00
|
|
|
{
|
|
|
|
struct kvm_vcpu *vcpu = &svm->vcpu;
|
|
|
|
u32 exit_int_info = 0;
|
|
|
|
unsigned int nr;
|
|
|
|
|
|
|
|
if (vcpu->arch.exception.injected) {
|
2022-08-31 07:16:01 +08:00
|
|
|
nr = vcpu->arch.exception.vector;
|
2020-05-22 18:04:57 +08:00
|
|
|
exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
|
|
|
|
|
|
|
|
if (vcpu->arch.exception.has_error_code) {
|
|
|
|
exit_int_info |= SVM_EVTINJ_VALID_ERR;
|
2020-08-28 01:11:39 +08:00
|
|
|
vmcb12->control.exit_int_info_err =
|
2020-05-22 18:04:57 +08:00
|
|
|
vcpu->arch.exception.error_code;
|
|
|
|
}
|
|
|
|
|
|
|
|
} else if (vcpu->arch.nmi_injected) {
|
|
|
|
exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
|
|
|
|
|
|
|
|
} else if (vcpu->arch.interrupt.injected) {
|
|
|
|
nr = vcpu->arch.interrupt.nr;
|
|
|
|
exit_int_info = nr | SVM_EVTINJ_VALID;
|
|
|
|
|
|
|
|
if (vcpu->arch.interrupt.soft)
|
|
|
|
exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
|
|
|
|
else
|
|
|
|
exit_int_info |= SVM_EVTINJ_TYPE_INTR;
|
|
|
|
}
|
|
|
|
|
2020-08-28 01:11:39 +08:00
|
|
|
vmcb12->control.exit_int_info = exit_int_info;
|
2020-05-22 18:04:57 +08:00
|
|
|
}
|
|
|
|
|
2021-06-10 07:42:26 +08:00
|
|
|
static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2022-11-01 22:54:06 +08:00
|
|
|
/*
|
|
|
|
* KVM_REQ_HV_TLB_FLUSH flushes entries from either L1's VP_ID or
|
|
|
|
* L2's VP_ID upon request from the guest. Make sure we check for
|
|
|
|
* pending entries in the right FIFO upon L1/L2 transition as these
|
|
|
|
* requests are put by other vCPUs asynchronously.
|
|
|
|
*/
|
|
|
|
if (to_hv_vcpu(vcpu) && npt_enabled)
|
|
|
|
kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu);
|
|
|
|
|
2021-06-10 07:42:26 +08:00
|
|
|
/*
|
|
|
|
* TODO: optimize unconditional TLB flush/MMU sync. A partial list of
|
|
|
|
* things to fix before this can be conditional:
|
|
|
|
*
|
|
|
|
* - Flush TLBs for both L1 and L2 remote TLB flush
|
|
|
|
* - Honor L1's request to flush an ASID on nested VMRUN
|
|
|
|
* - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*]
|
|
|
|
* - Don't crush a pending TLB flush in vmcb02 on nested VMRUN
|
|
|
|
* - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST
|
|
|
|
*
|
|
|
|
* [*] Unlike nested EPT, SVM's ASID management can invalidate nested
|
|
|
|
* NPT guest-physical mappings on VMRUN.
|
|
|
|
*/
|
|
|
|
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
|
|
|
|
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
|
|
|
|
}
|
|
|
|
|
2020-07-10 22:11:53 +08:00
|
|
|
/*
|
2020-07-10 22:11:56 +08:00
|
|
|
* Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
|
|
|
|
* if we are emulating VM-Entry into a guest with NPT enabled.
|
2020-07-10 22:11:53 +08:00
|
|
|
*/
|
|
|
|
static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
|
2021-06-07 17:01:59 +08:00
|
|
|
bool nested_npt, bool reload_pdptrs)
|
2020-07-10 22:11:53 +08:00
|
|
|
{
|
2021-02-04 08:01:17 +08:00
|
|
|
if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3)))
|
2020-07-10 22:11:55 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2021-06-07 17:01:59 +08:00
|
|
|
if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) &&
|
2021-11-24 20:20:52 +08:00
|
|
|
CC(!load_pdptrs(vcpu, cr3)))
|
2021-06-07 17:01:57 +08:00
|
|
|
return -EINVAL;
|
2020-07-10 22:11:55 +08:00
|
|
|
|
|
|
|
vcpu->arch.cr3 = cr3;
|
|
|
|
|
2021-06-23 01:57:34 +08:00
|
|
|
/* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
|
2021-06-10 07:42:33 +08:00
|
|
|
kvm_init_mmu(vcpu);
|
2020-07-10 22:11:55 +08:00
|
|
|
|
2022-02-04 17:12:31 +08:00
|
|
|
if (!nested_npt)
|
|
|
|
kvm_mmu_new_pgd(vcpu, cr3);
|
|
|
|
|
2020-07-10 22:11:55 +08:00
|
|
|
return 0;
|
2020-07-10 22:11:53 +08:00
|
|
|
}
|
|
|
|
|
2021-01-13 20:07:52 +08:00
|
|
|
void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
|
|
|
|
{
|
|
|
|
if (!svm->nested.vmcb02.ptr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* FIXME: merge g_pat from vmcb01 and vmcb12. */
|
|
|
|
svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
|
|
|
|
}
|
|
|
|
|
2020-11-17 18:15:41 +08:00
|
|
|
static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
|
2020-03-24 17:41:52 +08:00
|
|
|
{
|
2021-03-02 04:08:44 +08:00
|
|
|
bool new_vmcb12 = false;
|
2022-03-23 01:40:45 +08:00
|
|
|
struct vmcb *vmcb01 = svm->vmcb01.ptr;
|
2022-03-23 01:24:44 +08:00
|
|
|
struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
|
2021-03-02 04:08:44 +08:00
|
|
|
|
2021-01-13 20:07:52 +08:00
|
|
|
nested_vmcb02_compute_g_pat(svm);
|
|
|
|
|
2020-03-24 17:41:52 +08:00
|
|
|
/* Load the nested guest state */
|
2021-03-02 04:08:44 +08:00
|
|
|
if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) {
|
|
|
|
new_vmcb12 = true;
|
|
|
|
svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa;
|
2022-02-02 17:50:57 +08:00
|
|
|
svm->nested.force_msr_bitmap_recalc = true;
|
2021-03-02 04:08:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) {
|
2022-03-23 01:24:44 +08:00
|
|
|
vmcb02->save.es = vmcb12->save.es;
|
|
|
|
vmcb02->save.cs = vmcb12->save.cs;
|
|
|
|
vmcb02->save.ss = vmcb12->save.ss;
|
|
|
|
vmcb02->save.ds = vmcb12->save.ds;
|
|
|
|
vmcb02->save.cpl = vmcb12->save.cpl;
|
|
|
|
vmcb_mark_dirty(vmcb02, VMCB_SEG);
|
2021-03-02 04:08:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) {
|
2022-03-23 01:24:44 +08:00
|
|
|
vmcb02->save.gdtr = vmcb12->save.gdtr;
|
|
|
|
vmcb02->save.idtr = vmcb12->save.idtr;
|
|
|
|
vmcb_mark_dirty(vmcb02, VMCB_DT);
|
2021-03-02 04:08:44 +08:00
|
|
|
}
|
2020-11-16 19:38:19 +08:00
|
|
|
|
2020-11-28 01:46:36 +08:00
|
|
|
kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
|
2021-03-31 18:28:01 +08:00
|
|
|
|
2021-11-03 22:05:25 +08:00
|
|
|
svm_set_efer(&svm->vcpu, svm->nested.save.efer);
|
2021-03-31 18:28:01 +08:00
|
|
|
|
2021-11-03 22:05:25 +08:00
|
|
|
svm_set_cr0(&svm->vcpu, svm->nested.save.cr0);
|
|
|
|
svm_set_cr4(&svm->vcpu, svm->nested.save.cr4);
|
2020-11-16 19:38:19 +08:00
|
|
|
|
|
|
|
svm->vcpu.arch.cr2 = vmcb12->save.cr2;
|
2021-03-02 04:08:44 +08:00
|
|
|
|
2020-08-28 01:11:39 +08:00
|
|
|
kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
|
|
|
|
kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
|
|
|
|
kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
|
|
|
/* In case we don't even reach vcpu_run, the fields are not updated */
|
2022-03-23 01:24:44 +08:00
|
|
|
vmcb02->save.rax = vmcb12->save.rax;
|
|
|
|
vmcb02->save.rsp = vmcb12->save.rsp;
|
|
|
|
vmcb02->save.rip = vmcb12->save.rip;
|
2020-11-16 19:38:19 +08:00
|
|
|
|
2021-03-02 04:08:44 +08:00
|
|
|
/* These bits will be set properly on the first execution when new_vmc12 is true */
|
|
|
|
if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) {
|
2022-03-23 01:24:44 +08:00
|
|
|
vmcb02->save.dr7 = svm->nested.save.dr7 | DR7_FIXED_1;
|
2021-11-03 22:05:25 +08:00
|
|
|
svm->vcpu.arch.dr6 = svm->nested.save.dr6 | DR6_ACTIVE_LOW;
|
2022-03-23 01:24:44 +08:00
|
|
|
vmcb_mark_dirty(vmcb02, VMCB_DR);
|
2021-03-02 04:08:44 +08:00
|
|
|
}
|
2022-03-23 01:40:45 +08:00
|
|
|
|
2022-03-23 01:40:46 +08:00
|
|
|
if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
|
|
|
|
/*
|
|
|
|
* Reserved bits of DEBUGCTL are ignored. Be consistent with
|
|
|
|
* svm_set_msr's definition of reserved bits.
|
|
|
|
*/
|
|
|
|
svm_copy_lbrs(vmcb02, vmcb12);
|
|
|
|
vmcb02->save.dbgctl &= ~DEBUGCTL_RESERVED_BITS;
|
|
|
|
svm_update_lbrv(&svm->vcpu);
|
|
|
|
|
|
|
|
} else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
|
2022-03-23 01:40:45 +08:00
|
|
|
svm_copy_lbrs(vmcb02, vmcb01);
|
2022-03-23 01:40:46 +08:00
|
|
|
}
|
2020-05-18 22:56:43 +08:00
|
|
|
}
|
2020-03-24 17:41:52 +08:00
|
|
|
|
KVM: SVM: Re-inject INT3/INTO instead of retrying the instruction
Re-inject INT3/INTO instead of retrying the instruction if the CPU
encountered an intercepted exception while vectoring the software
exception, e.g. if vectoring INT3 encounters a #PF and KVM is using
shadow paging. Retrying the instruction is architecturally wrong, e.g.
will result in a spurious #DB if there's a code breakpoint on the INT3/O,
and lack of re-injection also breaks nested virtualization, e.g. if L1
injects a software exception and vectoring the injected exception
encounters an exception that is intercepted by L0 but not L1.
Due to, ahem, deficiencies in the SVM architecture, acquiring the next
RIP may require flowing through the emulator even if NRIPS is supported,
as the CPU clears next_rip if the VM-Exit is due to an exception other
than "exceptions caused by the INT3, INTO, and BOUND instructions". To
deal with this, "skip" the instruction to calculate next_rip (if it's
not already known), and then unwind the RIP write and any side effects
(RFLAGS updates).
Save the computed next_rip and use it to re-stuff next_rip if injection
doesn't complete. This allows KVM to do the right thing if next_rip was
known prior to injection, e.g. if L1 injects a soft event into L2, and
there is no backing INTn instruction, e.g. if L1 is injecting an
arbitrary event.
Note, it's impossible to guarantee architectural correctness given SVM's
architectural flaws. E.g. if the guest executes INTn (no KVM injection),
an exit occurs while vectoring the INTn, and the guest modifies the code
stream while the exit is being handled, KVM will compute the incorrect
next_rip due to "skipping" the wrong instruction. A future enhancement
to make this less awful would be for KVM to detect that the decoded
instruction is not the correct INTn and drop the to-be-injected soft
event (retrying is a lesser evil compared to shoving the wrong RIP on the
exception stack).
Reported-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Message-Id: <65cb88deab40bc1649d509194864312a89bbe02e.1651440202.git.maciej.szmigiero@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-05-02 06:07:29 +08:00
|
|
|
static inline bool is_evtinj_soft(u32 evtinj)
|
|
|
|
{
|
|
|
|
u32 type = evtinj & SVM_EVTINJ_TYPE_MASK;
|
|
|
|
u8 vector = evtinj & SVM_EVTINJ_VEC_MASK;
|
|
|
|
|
|
|
|
if (!(evtinj & SVM_EVTINJ_VALID))
|
|
|
|
return false;
|
|
|
|
|
2022-05-02 06:07:30 +08:00
|
|
|
if (type == SVM_EVTINJ_TYPE_SOFT)
|
|
|
|
return true;
|
|
|
|
|
KVM: SVM: Re-inject INT3/INTO instead of retrying the instruction
Re-inject INT3/INTO instead of retrying the instruction if the CPU
encountered an intercepted exception while vectoring the software
exception, e.g. if vectoring INT3 encounters a #PF and KVM is using
shadow paging. Retrying the instruction is architecturally wrong, e.g.
will result in a spurious #DB if there's a code breakpoint on the INT3/O,
and lack of re-injection also breaks nested virtualization, e.g. if L1
injects a software exception and vectoring the injected exception
encounters an exception that is intercepted by L0 but not L1.
Due to, ahem, deficiencies in the SVM architecture, acquiring the next
RIP may require flowing through the emulator even if NRIPS is supported,
as the CPU clears next_rip if the VM-Exit is due to an exception other
than "exceptions caused by the INT3, INTO, and BOUND instructions". To
deal with this, "skip" the instruction to calculate next_rip (if it's
not already known), and then unwind the RIP write and any side effects
(RFLAGS updates).
Save the computed next_rip and use it to re-stuff next_rip if injection
doesn't complete. This allows KVM to do the right thing if next_rip was
known prior to injection, e.g. if L1 injects a soft event into L2, and
there is no backing INTn instruction, e.g. if L1 is injecting an
arbitrary event.
Note, it's impossible to guarantee architectural correctness given SVM's
architectural flaws. E.g. if the guest executes INTn (no KVM injection),
an exit occurs while vectoring the INTn, and the guest modifies the code
stream while the exit is being handled, KVM will compute the incorrect
next_rip due to "skipping" the wrong instruction. A future enhancement
to make this less awful would be for KVM to detect that the decoded
instruction is not the correct INTn and drop the to-be-injected soft
event (retrying is a lesser evil compared to shoving the wrong RIP on the
exception stack).
Reported-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Message-Id: <65cb88deab40bc1649d509194864312a89bbe02e.1651440202.git.maciej.szmigiero@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-05-02 06:07:29 +08:00
|
|
|
return type == SVM_EVTINJ_TYPE_EXEPT && kvm_exception_is_soft(vector);
|
|
|
|
}
|
|
|
|
|
2022-05-02 06:07:34 +08:00
|
|
|
static bool is_evtinj_nmi(u32 evtinj)
|
|
|
|
{
|
|
|
|
u32 type = evtinj & SVM_EVTINJ_TYPE_MASK;
|
|
|
|
|
|
|
|
if (!(evtinj & SVM_EVTINJ_VALID))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return type == SVM_EVTINJ_TYPE_NMI;
|
|
|
|
}
|
|
|
|
|
2022-05-02 06:07:25 +08:00
|
|
|
static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
|
2022-07-18 23:47:13 +08:00
|
|
|
unsigned long vmcb12_rip,
|
|
|
|
unsigned long vmcb12_csbase)
|
2020-05-18 22:56:43 +08:00
|
|
|
{
|
2022-03-23 01:40:48 +08:00
|
|
|
u32 int_ctl_vmcb01_bits = V_INTR_MASKING_MASK;
|
|
|
|
u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
|
2021-07-15 06:56:24 +08:00
|
|
|
|
2021-06-10 07:42:26 +08:00
|
|
|
struct kvm_vcpu *vcpu = &svm->vcpu;
|
2022-03-23 01:24:44 +08:00
|
|
|
struct vmcb *vmcb01 = svm->vmcb01.ptr;
|
|
|
|
struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
|
KVM: x86: SVM: fix nested PAUSE filtering when L0 intercepts PAUSE
Commit 74fd41ed16fd ("KVM: x86: nSVM: support PAUSE filtering when L0
doesn't intercept PAUSE") introduced passthrough support for nested pause
filtering, (when the host doesn't intercept PAUSE) (either disabled with
kvm module param, or disabled with '-overcommit cpu-pm=on')
Before this commit, L1 KVM didn't intercept PAUSE at all; afterwards,
the feature was exposed as supported by KVM cpuid unconditionally, thus
if L1 could try to use it even when the L0 KVM can't really support it.
In this case the fallback caused KVM to intercept each PAUSE instruction;
in some cases, such intercept can slow down the nested guest so much
that it can fail to boot. Instead, before the problematic commit KVM
was already setting both thresholds to 0 in vmcb02, but after the first
userspace VM exit shrink_ple_window was called and would reset the
pause_filter_count to the default value.
To fix this, change the fallback strategy - ignore the guest threshold
values, but use/update the host threshold values unless the guest
specifically requests disabling PAUSE filtering (either simple or
advanced).
Also fix a minor bug: on nested VM exit, when PAUSE filter counter
were copied back to vmcb01, a dirty bit was not set.
Thanks a lot to Suravee Suthikulpanit for debugging this!
Fixes: 74fd41ed16fd ("KVM: x86: nSVM: support PAUSE filtering when L0 doesn't intercept PAUSE")
Reported-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Tested-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Co-developed-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20220518072709.730031-1-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-06-01 01:57:32 +08:00
|
|
|
u32 pause_count12;
|
|
|
|
u32 pause_thresh12;
|
2020-07-10 22:11:53 +08:00
|
|
|
|
2020-11-16 19:13:15 +08:00
|
|
|
/*
|
|
|
|
* Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
|
|
|
|
* exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
|
|
|
|
*/
|
|
|
|
|
2022-03-23 01:40:48 +08:00
|
|
|
if (svm->vgif_enabled && (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK))
|
|
|
|
int_ctl_vmcb12_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
|
|
|
|
else
|
|
|
|
int_ctl_vmcb01_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
|
|
|
|
|
2020-11-16 19:13:15 +08:00
|
|
|
/* Copied from vmcb01. msrpm_base can be overwritten later. */
|
2022-03-23 01:24:44 +08:00
|
|
|
vmcb02->control.nested_ctl = vmcb01->control.nested_ctl;
|
|
|
|
vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa;
|
|
|
|
vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa;
|
2020-11-16 19:13:15 +08:00
|
|
|
|
|
|
|
/* Done at vmrun: asid. */
|
|
|
|
|
|
|
|
/* Also overwritten later if necessary. */
|
2022-03-23 01:24:44 +08:00
|
|
|
vmcb02->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
|
2021-01-13 20:07:52 +08:00
|
|
|
|
2020-11-16 19:13:15 +08:00
|
|
|
/* nested_cr3. */
|
2020-07-10 22:11:53 +08:00
|
|
|
if (nested_npt_enabled(svm))
|
2021-06-10 07:42:26 +08:00
|
|
|
nested_svm_init_mmu_context(vcpu);
|
2020-05-22 17:27:46 +08:00
|
|
|
|
2021-09-14 23:48:24 +08:00
|
|
|
vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
|
|
|
|
vcpu->arch.l1_tsc_offset,
|
|
|
|
svm->nested.ctl.tsc_offset,
|
|
|
|
svm->tsc_ratio_msr);
|
|
|
|
|
2022-03-23 01:24:44 +08:00
|
|
|
vmcb02->control.tsc_offset = vcpu->arch.tsc_offset;
|
2021-09-14 23:48:24 +08:00
|
|
|
|
2022-05-24 21:56:23 +08:00
|
|
|
if (svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio) {
|
2021-09-14 23:48:24 +08:00
|
|
|
WARN_ON(!svm->tsc_scaling_enabled);
|
|
|
|
nested_svm_update_tsc_ratio_msr(vcpu);
|
|
|
|
}
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2022-03-23 01:24:44 +08:00
|
|
|
vmcb02->control.int_ctl =
|
2021-07-15 06:56:24 +08:00
|
|
|
(svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
|
2022-03-23 01:24:44 +08:00
|
|
|
(vmcb01->control.int_ctl & int_ctl_vmcb01_bits);
|
2020-05-23 00:28:52 +08:00
|
|
|
|
2022-03-23 01:24:44 +08:00
|
|
|
vmcb02->control.int_vector = svm->nested.ctl.int_vector;
|
|
|
|
vmcb02->control.int_state = svm->nested.ctl.int_state;
|
|
|
|
vmcb02->control.event_inj = svm->nested.ctl.event_inj;
|
|
|
|
vmcb02->control.event_inj_err = svm->nested.ctl.event_inj_err;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2022-05-02 06:07:25 +08:00
|
|
|
/*
|
|
|
|
* next_rip is consumed on VMRUN as the return address pushed on the
|
|
|
|
* stack for injected soft exceptions/interrupts. If nrips is exposed
|
|
|
|
* to L1, take it verbatim from vmcb12. If nrips is supported in
|
|
|
|
* hardware but not exposed to L1, stuff the actual L2 RIP to emulate
|
|
|
|
* what a nrips=0 CPU would do (L1 is responsible for advancing RIP
|
|
|
|
* prior to injecting the event).
|
|
|
|
*/
|
|
|
|
if (svm->nrips_enabled)
|
|
|
|
vmcb02->control.next_rip = svm->nested.ctl.next_rip;
|
|
|
|
else if (boot_cpu_has(X86_FEATURE_NRIPS))
|
|
|
|
vmcb02->control.next_rip = vmcb12_rip;
|
|
|
|
|
2022-05-02 06:07:34 +08:00
|
|
|
svm->nmi_l1_to_l2 = is_evtinj_nmi(vmcb02->control.event_inj);
|
KVM: SVM: Re-inject INT3/INTO instead of retrying the instruction
Re-inject INT3/INTO instead of retrying the instruction if the CPU
encountered an intercepted exception while vectoring the software
exception, e.g. if vectoring INT3 encounters a #PF and KVM is using
shadow paging. Retrying the instruction is architecturally wrong, e.g.
will result in a spurious #DB if there's a code breakpoint on the INT3/O,
and lack of re-injection also breaks nested virtualization, e.g. if L1
injects a software exception and vectoring the injected exception
encounters an exception that is intercepted by L0 but not L1.
Due to, ahem, deficiencies in the SVM architecture, acquiring the next
RIP may require flowing through the emulator even if NRIPS is supported,
as the CPU clears next_rip if the VM-Exit is due to an exception other
than "exceptions caused by the INT3, INTO, and BOUND instructions". To
deal with this, "skip" the instruction to calculate next_rip (if it's
not already known), and then unwind the RIP write and any side effects
(RFLAGS updates).
Save the computed next_rip and use it to re-stuff next_rip if injection
doesn't complete. This allows KVM to do the right thing if next_rip was
known prior to injection, e.g. if L1 injects a soft event into L2, and
there is no backing INTn instruction, e.g. if L1 is injecting an
arbitrary event.
Note, it's impossible to guarantee architectural correctness given SVM's
architectural flaws. E.g. if the guest executes INTn (no KVM injection),
an exit occurs while vectoring the INTn, and the guest modifies the code
stream while the exit is being handled, KVM will compute the incorrect
next_rip due to "skipping" the wrong instruction. A future enhancement
to make this less awful would be for KVM to detect that the decoded
instruction is not the correct INTn and drop the to-be-injected soft
event (retrying is a lesser evil compared to shoving the wrong RIP on the
exception stack).
Reported-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Message-Id: <65cb88deab40bc1649d509194864312a89bbe02e.1651440202.git.maciej.szmigiero@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-05-02 06:07:29 +08:00
|
|
|
if (is_evtinj_soft(vmcb02->control.event_inj)) {
|
|
|
|
svm->soft_int_injected = true;
|
2022-07-18 23:47:13 +08:00
|
|
|
svm->soft_int_csbase = vmcb12_csbase;
|
KVM: SVM: Re-inject INT3/INTO instead of retrying the instruction
Re-inject INT3/INTO instead of retrying the instruction if the CPU
encountered an intercepted exception while vectoring the software
exception, e.g. if vectoring INT3 encounters a #PF and KVM is using
shadow paging. Retrying the instruction is architecturally wrong, e.g.
will result in a spurious #DB if there's a code breakpoint on the INT3/O,
and lack of re-injection also breaks nested virtualization, e.g. if L1
injects a software exception and vectoring the injected exception
encounters an exception that is intercepted by L0 but not L1.
Due to, ahem, deficiencies in the SVM architecture, acquiring the next
RIP may require flowing through the emulator even if NRIPS is supported,
as the CPU clears next_rip if the VM-Exit is due to an exception other
than "exceptions caused by the INT3, INTO, and BOUND instructions". To
deal with this, "skip" the instruction to calculate next_rip (if it's
not already known), and then unwind the RIP write and any side effects
(RFLAGS updates).
Save the computed next_rip and use it to re-stuff next_rip if injection
doesn't complete. This allows KVM to do the right thing if next_rip was
known prior to injection, e.g. if L1 injects a soft event into L2, and
there is no backing INTn instruction, e.g. if L1 is injecting an
arbitrary event.
Note, it's impossible to guarantee architectural correctness given SVM's
architectural flaws. E.g. if the guest executes INTn (no KVM injection),
an exit occurs while vectoring the INTn, and the guest modifies the code
stream while the exit is being handled, KVM will compute the incorrect
next_rip due to "skipping" the wrong instruction. A future enhancement
to make this less awful would be for KVM to detect that the decoded
instruction is not the correct INTn and drop the to-be-injected soft
event (retrying is a lesser evil compared to shoving the wrong RIP on the
exception stack).
Reported-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Message-Id: <65cb88deab40bc1649d509194864312a89bbe02e.1651440202.git.maciej.szmigiero@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-05-02 06:07:29 +08:00
|
|
|
svm->soft_int_old_rip = vmcb12_rip;
|
|
|
|
if (svm->nrips_enabled)
|
|
|
|
svm->soft_int_next_rip = svm->nested.ctl.next_rip;
|
|
|
|
else
|
|
|
|
svm->soft_int_next_rip = vmcb12_rip;
|
|
|
|
}
|
|
|
|
|
2022-03-23 01:40:45 +08:00
|
|
|
vmcb02->control.virt_ext = vmcb01->control.virt_ext &
|
|
|
|
LBR_CTL_ENABLE_MASK;
|
2022-03-23 01:40:46 +08:00
|
|
|
if (svm->lbrv_enabled)
|
|
|
|
vmcb02->control.virt_ext |=
|
|
|
|
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
|
2022-03-23 01:40:45 +08:00
|
|
|
|
2022-03-01 22:36:46 +08:00
|
|
|
if (!nested_vmcb_needs_vls_intercept(svm))
|
2022-03-23 01:24:44 +08:00
|
|
|
vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
|
2022-03-01 22:36:46 +08:00
|
|
|
|
KVM: x86: SVM: fix nested PAUSE filtering when L0 intercepts PAUSE
Commit 74fd41ed16fd ("KVM: x86: nSVM: support PAUSE filtering when L0
doesn't intercept PAUSE") introduced passthrough support for nested pause
filtering, (when the host doesn't intercept PAUSE) (either disabled with
kvm module param, or disabled with '-overcommit cpu-pm=on')
Before this commit, L1 KVM didn't intercept PAUSE at all; afterwards,
the feature was exposed as supported by KVM cpuid unconditionally, thus
if L1 could try to use it even when the L0 KVM can't really support it.
In this case the fallback caused KVM to intercept each PAUSE instruction;
in some cases, such intercept can slow down the nested guest so much
that it can fail to boot. Instead, before the problematic commit KVM
was already setting both thresholds to 0 in vmcb02, but after the first
userspace VM exit shrink_ple_window was called and would reset the
pause_filter_count to the default value.
To fix this, change the fallback strategy - ignore the guest threshold
values, but use/update the host threshold values unless the guest
specifically requests disabling PAUSE filtering (either simple or
advanced).
Also fix a minor bug: on nested VM exit, when PAUSE filter counter
were copied back to vmcb01, a dirty bit was not set.
Thanks a lot to Suravee Suthikulpanit for debugging this!
Fixes: 74fd41ed16fd ("KVM: x86: nSVM: support PAUSE filtering when L0 doesn't intercept PAUSE")
Reported-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Tested-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Co-developed-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20220518072709.730031-1-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-06-01 01:57:32 +08:00
|
|
|
pause_count12 = svm->pause_filter_enabled ? svm->nested.ctl.pause_filter_count : 0;
|
|
|
|
pause_thresh12 = svm->pause_threshold_enabled ? svm->nested.ctl.pause_filter_thresh : 0;
|
2022-03-23 01:40:47 +08:00
|
|
|
if (kvm_pause_in_guest(svm->vcpu.kvm)) {
|
KVM: x86: SVM: fix nested PAUSE filtering when L0 intercepts PAUSE
Commit 74fd41ed16fd ("KVM: x86: nSVM: support PAUSE filtering when L0
doesn't intercept PAUSE") introduced passthrough support for nested pause
filtering, (when the host doesn't intercept PAUSE) (either disabled with
kvm module param, or disabled with '-overcommit cpu-pm=on')
Before this commit, L1 KVM didn't intercept PAUSE at all; afterwards,
the feature was exposed as supported by KVM cpuid unconditionally, thus
if L1 could try to use it even when the L0 KVM can't really support it.
In this case the fallback caused KVM to intercept each PAUSE instruction;
in some cases, such intercept can slow down the nested guest so much
that it can fail to boot. Instead, before the problematic commit KVM
was already setting both thresholds to 0 in vmcb02, but after the first
userspace VM exit shrink_ple_window was called and would reset the
pause_filter_count to the default value.
To fix this, change the fallback strategy - ignore the guest threshold
values, but use/update the host threshold values unless the guest
specifically requests disabling PAUSE filtering (either simple or
advanced).
Also fix a minor bug: on nested VM exit, when PAUSE filter counter
were copied back to vmcb01, a dirty bit was not set.
Thanks a lot to Suravee Suthikulpanit for debugging this!
Fixes: 74fd41ed16fd ("KVM: x86: nSVM: support PAUSE filtering when L0 doesn't intercept PAUSE")
Reported-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Tested-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Co-developed-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20220518072709.730031-1-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-06-01 01:57:32 +08:00
|
|
|
/* use guest values since host doesn't intercept PAUSE */
|
|
|
|
vmcb02->control.pause_filter_count = pause_count12;
|
|
|
|
vmcb02->control.pause_filter_thresh = pause_thresh12;
|
2022-03-23 01:40:47 +08:00
|
|
|
|
KVM: x86: SVM: fix nested PAUSE filtering when L0 intercepts PAUSE
Commit 74fd41ed16fd ("KVM: x86: nSVM: support PAUSE filtering when L0
doesn't intercept PAUSE") introduced passthrough support for nested pause
filtering, (when the host doesn't intercept PAUSE) (either disabled with
kvm module param, or disabled with '-overcommit cpu-pm=on')
Before this commit, L1 KVM didn't intercept PAUSE at all; afterwards,
the feature was exposed as supported by KVM cpuid unconditionally, thus
if L1 could try to use it even when the L0 KVM can't really support it.
In this case the fallback caused KVM to intercept each PAUSE instruction;
in some cases, such intercept can slow down the nested guest so much
that it can fail to boot. Instead, before the problematic commit KVM
was already setting both thresholds to 0 in vmcb02, but after the first
userspace VM exit shrink_ple_window was called and would reset the
pause_filter_count to the default value.
To fix this, change the fallback strategy - ignore the guest threshold
values, but use/update the host threshold values unless the guest
specifically requests disabling PAUSE filtering (either simple or
advanced).
Also fix a minor bug: on nested VM exit, when PAUSE filter counter
were copied back to vmcb01, a dirty bit was not set.
Thanks a lot to Suravee Suthikulpanit for debugging this!
Fixes: 74fd41ed16fd ("KVM: x86: nSVM: support PAUSE filtering when L0 doesn't intercept PAUSE")
Reported-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Tested-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Co-developed-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20220518072709.730031-1-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-06-01 01:57:32 +08:00
|
|
|
} else {
|
|
|
|
/* start from host values otherwise */
|
2022-03-23 01:40:47 +08:00
|
|
|
vmcb02->control.pause_filter_count = vmcb01->control.pause_filter_count;
|
|
|
|
vmcb02->control.pause_filter_thresh = vmcb01->control.pause_filter_thresh;
|
KVM: x86: SVM: fix nested PAUSE filtering when L0 intercepts PAUSE
Commit 74fd41ed16fd ("KVM: x86: nSVM: support PAUSE filtering when L0
doesn't intercept PAUSE") introduced passthrough support for nested pause
filtering, (when the host doesn't intercept PAUSE) (either disabled with
kvm module param, or disabled with '-overcommit cpu-pm=on')
Before this commit, L1 KVM didn't intercept PAUSE at all; afterwards,
the feature was exposed as supported by KVM cpuid unconditionally, thus
if L1 could try to use it even when the L0 KVM can't really support it.
In this case the fallback caused KVM to intercept each PAUSE instruction;
in some cases, such intercept can slow down the nested guest so much
that it can fail to boot. Instead, before the problematic commit KVM
was already setting both thresholds to 0 in vmcb02, but after the first
userspace VM exit shrink_ple_window was called and would reset the
pause_filter_count to the default value.
To fix this, change the fallback strategy - ignore the guest threshold
values, but use/update the host threshold values unless the guest
specifically requests disabling PAUSE filtering (either simple or
advanced).
Also fix a minor bug: on nested VM exit, when PAUSE filter counter
were copied back to vmcb01, a dirty bit was not set.
Thanks a lot to Suravee Suthikulpanit for debugging this!
Fixes: 74fd41ed16fd ("KVM: x86: nSVM: support PAUSE filtering when L0 doesn't intercept PAUSE")
Reported-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Tested-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Co-developed-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20220518072709.730031-1-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-06-01 01:57:32 +08:00
|
|
|
|
|
|
|
/* ... but ensure filtering is disabled if so requested. */
|
|
|
|
if (vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_PAUSE)) {
|
|
|
|
if (!pause_count12)
|
|
|
|
vmcb02->control.pause_filter_count = 0;
|
|
|
|
if (!pause_thresh12)
|
|
|
|
vmcb02->control.pause_filter_thresh = 0;
|
|
|
|
}
|
2022-03-23 01:40:47 +08:00
|
|
|
}
|
|
|
|
|
2021-06-10 07:42:26 +08:00
|
|
|
nested_svm_transition_tlb_flush(vcpu);
|
|
|
|
|
2020-03-24 17:41:52 +08:00
|
|
|
/* Enter Guest-Mode */
|
2021-06-10 07:42:26 +08:00
|
|
|
enter_guest_mode(vcpu);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
|
|
|
/*
|
2020-11-16 19:38:19 +08:00
|
|
|
* Merge guest and host intercepts - must be called with vcpu in
|
|
|
|
* guest-mode to take effect.
|
2020-03-24 17:41:52 +08:00
|
|
|
*/
|
|
|
|
recalc_intercepts(svm);
|
2020-05-18 22:56:43 +08:00
|
|
|
}
|
|
|
|
|
KVM: SVM: Add support for Virtual SPEC_CTRL
Newer AMD processors have a feature to virtualize the use of the
SPEC_CTRL MSR. Presence of this feature is indicated via CPUID
function 0x8000000A_EDX[20]: GuestSpecCtrl. Hypervisors are not
required to enable this feature since it is automatically enabled on
processors that support it.
A hypervisor may wish to impose speculation controls on guest
execution or a guest may want to impose its own speculation controls.
Therefore, the processor implements both host and guest
versions of SPEC_CTRL.
When in host mode, the host SPEC_CTRL value is in effect and writes
update only the host version of SPEC_CTRL. On a VMRUN, the processor
loads the guest version of SPEC_CTRL from the VMCB. When the guest
writes SPEC_CTRL, only the guest version is updated. On a VMEXIT,
the guest version is saved into the VMCB and the processor returns
to only using the host SPEC_CTRL for speculation control. The guest
SPEC_CTRL is located at offset 0x2E0 in the VMCB.
The effective SPEC_CTRL setting is the guest SPEC_CTRL setting or'ed
with the hypervisor SPEC_CTRL setting. This allows the hypervisor to
ensure a minimum SPEC_CTRL if desired.
This support also fixes an issue where a guest may sometimes see an
inconsistent value for the SPEC_CTRL MSR on processors that support
this feature. With the current SPEC_CTRL support, the first write to
SPEC_CTRL is intercepted and the virtualized version of the SPEC_CTRL
MSR is not updated. When the guest reads back the SPEC_CTRL MSR, it
will be 0x0, instead of the actual expected value. There isn’t a
security concern here, because the host SPEC_CTRL value is or’ed with
the Guest SPEC_CTRL value to generate the effective SPEC_CTRL value.
KVM writes with the guest's virtualized SPEC_CTRL value to SPEC_CTRL
MSR just before the VMRUN, so it will always have the actual value
even though it doesn’t appear that way in the guest. The guest will
only see the proper value for the SPEC_CTRL register if the guest was
to write to the SPEC_CTRL register again. With Virtual SPEC_CTRL
support, the save area spec_ctrl is properly saved and restored.
So, the guest will always see the proper value when it is read back.
Signed-off-by: Babu Moger <babu.moger@amd.com>
Message-Id: <161188100955.28787.11816849358413330720.stgit@bmoger-ubuntu>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-02-17 23:56:04 +08:00
|
|
|
static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Some VMCB state is shared between L1 and L2 and thus has to be
|
|
|
|
* moved at the time of nested vmrun and vmexit.
|
|
|
|
*
|
|
|
|
* VMLOAD/VMSAVE state would also belong in this category, but KVM
|
|
|
|
* always performs VMLOAD and VMSAVE from the VMCB01.
|
|
|
|
*/
|
|
|
|
to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl;
|
|
|
|
}
|
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
|
2021-09-13 22:09:51 +08:00
|
|
|
struct vmcb *vmcb12, bool from_vmrun)
|
2020-05-18 22:56:43 +08:00
|
|
|
{
|
2021-03-03 03:40:39 +08:00
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
2020-07-10 22:11:55 +08:00
|
|
|
int ret;
|
|
|
|
|
2022-08-26 06:57:53 +08:00
|
|
|
trace_kvm_nested_vmenter(svm->vmcb->save.rip,
|
|
|
|
vmcb12_gpa,
|
|
|
|
vmcb12->save.rip,
|
|
|
|
vmcb12->control.int_ctl,
|
|
|
|
vmcb12->control.event_inj,
|
|
|
|
vmcb12->control.nested_ctl,
|
2022-08-26 06:57:55 +08:00
|
|
|
vmcb12->control.nested_cr3,
|
|
|
|
vmcb12->save.cr3,
|
2022-08-26 06:57:53 +08:00
|
|
|
KVM_ISA_SVM);
|
2021-02-17 22:57:13 +08:00
|
|
|
|
|
|
|
trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
|
|
|
|
vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
|
|
|
|
vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
|
|
|
|
vmcb12->control.intercepts[INTERCEPT_WORD3],
|
|
|
|
vmcb12->control.intercepts[INTERCEPT_WORD4],
|
|
|
|
vmcb12->control.intercepts[INTERCEPT_WORD5]);
|
|
|
|
|
|
|
|
|
2020-08-28 01:11:39 +08:00
|
|
|
svm->nested.vmcb12_gpa = vmcb12_gpa;
|
2021-01-13 20:07:52 +08:00
|
|
|
|
|
|
|
WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);
|
|
|
|
|
KVM: SVM: Add support for Virtual SPEC_CTRL
Newer AMD processors have a feature to virtualize the use of the
SPEC_CTRL MSR. Presence of this feature is indicated via CPUID
function 0x8000000A_EDX[20]: GuestSpecCtrl. Hypervisors are not
required to enable this feature since it is automatically enabled on
processors that support it.
A hypervisor may wish to impose speculation controls on guest
execution or a guest may want to impose its own speculation controls.
Therefore, the processor implements both host and guest
versions of SPEC_CTRL.
When in host mode, the host SPEC_CTRL value is in effect and writes
update only the host version of SPEC_CTRL. On a VMRUN, the processor
loads the guest version of SPEC_CTRL from the VMCB. When the guest
writes SPEC_CTRL, only the guest version is updated. On a VMEXIT,
the guest version is saved into the VMCB and the processor returns
to only using the host SPEC_CTRL for speculation control. The guest
SPEC_CTRL is located at offset 0x2E0 in the VMCB.
The effective SPEC_CTRL setting is the guest SPEC_CTRL setting or'ed
with the hypervisor SPEC_CTRL setting. This allows the hypervisor to
ensure a minimum SPEC_CTRL if desired.
This support also fixes an issue where a guest may sometimes see an
inconsistent value for the SPEC_CTRL MSR on processors that support
this feature. With the current SPEC_CTRL support, the first write to
SPEC_CTRL is intercepted and the virtualized version of the SPEC_CTRL
MSR is not updated. When the guest reads back the SPEC_CTRL MSR, it
will be 0x0, instead of the actual expected value. There isn’t a
security concern here, because the host SPEC_CTRL value is or’ed with
the Guest SPEC_CTRL value to generate the effective SPEC_CTRL value.
KVM writes with the guest's virtualized SPEC_CTRL value to SPEC_CTRL
MSR just before the VMRUN, so it will always have the actual value
even though it doesn’t appear that way in the guest. The guest will
only see the proper value for the SPEC_CTRL register if the guest was
to write to the SPEC_CTRL register again. With Virtual SPEC_CTRL
support, the save area spec_ctrl is properly saved and restored.
So, the guest will always see the proper value when it is read back.
Signed-off-by: Babu Moger <babu.moger@amd.com>
Message-Id: <161188100955.28787.11816849358413330720.stgit@bmoger-ubuntu>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-02-17 23:56:04 +08:00
|
|
|
nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
|
2021-01-13 20:07:52 +08:00
|
|
|
|
|
|
|
svm_switch_vmcb(svm, &svm->nested.vmcb02);
|
2022-07-18 23:47:13 +08:00
|
|
|
nested_vmcb02_prepare_control(svm, vmcb12->save.rip, vmcb12->save.cs.base);
|
2020-11-17 18:15:41 +08:00
|
|
|
nested_vmcb02_prepare_save(svm, vmcb12);
|
2020-05-18 22:56:43 +08:00
|
|
|
|
2021-11-03 22:05:25 +08:00
|
|
|
ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3,
|
2021-09-13 22:09:51 +08:00
|
|
|
nested_npt_enabled(svm), from_vmrun);
|
2020-07-10 22:11:55 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2021-09-13 22:09:51 +08:00
|
|
|
if (!from_vmrun)
|
|
|
|
kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
|
|
|
|
|
2020-05-23 00:18:27 +08:00
|
|
|
svm_set_gif(svm, true);
|
2020-07-10 22:11:52 +08:00
|
|
|
|
2022-03-23 01:40:50 +08:00
|
|
|
if (kvm_vcpu_apicv_active(vcpu))
|
|
|
|
kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
|
|
|
|
|
2022-11-01 22:53:58 +08:00
|
|
|
nested_svm_hv_update_vm_vp_ids(vcpu);
|
|
|
|
|
2020-07-10 22:11:52 +08:00
|
|
|
return 0;
|
2020-03-24 17:41:52 +08:00
|
|
|
}
|
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
int nested_svm_vmrun(struct kvm_vcpu *vcpu)
|
2020-03-24 17:41:52 +08:00
|
|
|
{
|
2021-03-03 03:40:39 +08:00
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
2020-03-24 17:41:52 +08:00
|
|
|
int ret;
|
2020-08-28 01:11:39 +08:00
|
|
|
struct vmcb *vmcb12;
|
2020-03-24 17:41:52 +08:00
|
|
|
struct kvm_host_map map;
|
2020-08-28 01:11:39 +08:00
|
|
|
u64 vmcb12_gpa;
|
2022-03-23 01:24:44 +08:00
|
|
|
struct vmcb *vmcb01 = svm->vmcb01.ptr;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2021-06-28 18:44:21 +08:00
|
|
|
if (!svm->nested.hsave_msr) {
|
|
|
|
kvm_inject_gp(vcpu, 0);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
if (is_smm(vcpu)) {
|
|
|
|
kvm_queue_exception(vcpu, UD_VECTOR);
|
2020-04-23 22:52:48 +08:00
|
|
|
return 1;
|
|
|
|
}
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2022-11-01 22:54:06 +08:00
|
|
|
/* This fails when VP assist page is enabled but the supplied GPA is bogus */
|
|
|
|
ret = kvm_hv_verify_vp_assist(vcpu);
|
|
|
|
if (ret) {
|
|
|
|
kvm_inject_gp(vcpu, 0);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-08-28 01:11:39 +08:00
|
|
|
vmcb12_gpa = svm->vmcb->save.rax;
|
2021-03-03 03:40:39 +08:00
|
|
|
ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
|
2020-03-24 17:41:52 +08:00
|
|
|
if (ret == -EINVAL) {
|
2021-03-03 03:40:39 +08:00
|
|
|
kvm_inject_gp(vcpu, 0);
|
2020-03-24 17:41:52 +08:00
|
|
|
return 1;
|
|
|
|
} else if (ret) {
|
2021-03-03 03:40:39 +08:00
|
|
|
return kvm_skip_emulated_instruction(vcpu);
|
2020-03-24 17:41:52 +08:00
|
|
|
}
|
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
ret = kvm_skip_emulated_instruction(vcpu);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-08-28 01:11:39 +08:00
|
|
|
vmcb12 = map.hva;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-10-01 19:29:54 +08:00
|
|
|
if (WARN_ON_ONCE(!svm->nested.initialized))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2021-11-03 22:05:23 +08:00
|
|
|
nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
|
2021-11-03 22:05:22 +08:00
|
|
|
nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
|
2021-03-31 18:24:43 +08:00
|
|
|
|
2021-11-03 22:05:24 +08:00
|
|
|
if (!nested_vmcb_check_save(vcpu) ||
|
2021-11-11 22:14:08 +08:00
|
|
|
!nested_vmcb_check_controls(vcpu)) {
|
2020-08-28 01:11:39 +08:00
|
|
|
vmcb12->control.exit_code = SVM_EXIT_ERR;
|
|
|
|
vmcb12->control.exit_code_hi = 0;
|
|
|
|
vmcb12->control.exit_info_1 = 0;
|
|
|
|
vmcb12->control.exit_info_2 = 0;
|
2020-05-14 00:57:26 +08:00
|
|
|
goto out;
|
2020-03-24 17:41:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-01-13 20:07:52 +08:00
|
|
|
* Since vmcb01 is not in use, we can use it to store some of the L1
|
|
|
|
* state.
|
2020-03-24 17:41:52 +08:00
|
|
|
*/
|
2022-03-23 01:24:44 +08:00
|
|
|
vmcb01->save.efer = vcpu->arch.efer;
|
|
|
|
vmcb01->save.cr0 = kvm_read_cr0(vcpu);
|
|
|
|
vmcb01->save.cr4 = vcpu->arch.cr4;
|
|
|
|
vmcb01->save.rflags = kvm_get_rflags(vcpu);
|
|
|
|
vmcb01->save.rip = kvm_rip_read(vcpu);
|
2021-01-13 20:07:52 +08:00
|
|
|
|
|
|
|
if (!npt_enabled)
|
2022-03-23 01:24:44 +08:00
|
|
|
vmcb01->save.cr3 = kvm_read_cr3(vcpu);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-04-24 01:22:27 +08:00
|
|
|
svm->nested.nested_run_pending = 1;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2021-09-13 22:09:51 +08:00
|
|
|
if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
|
2020-07-10 22:11:52 +08:00
|
|
|
goto out_exit_err;
|
2020-07-10 22:11:51 +08:00
|
|
|
|
2020-07-10 22:11:52 +08:00
|
|
|
if (nested_svm_vmrun_msrpm(svm))
|
|
|
|
goto out;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-07-10 22:11:52 +08:00
|
|
|
out_exit_err:
|
|
|
|
svm->nested.nested_run_pending = 0;
|
2022-05-02 06:07:34 +08:00
|
|
|
svm->nmi_l1_to_l2 = false;
|
KVM: SVM: Re-inject INT3/INTO instead of retrying the instruction
Re-inject INT3/INTO instead of retrying the instruction if the CPU
encountered an intercepted exception while vectoring the software
exception, e.g. if vectoring INT3 encounters a #PF and KVM is using
shadow paging. Retrying the instruction is architecturally wrong, e.g.
will result in a spurious #DB if there's a code breakpoint on the INT3/O,
and lack of re-injection also breaks nested virtualization, e.g. if L1
injects a software exception and vectoring the injected exception
encounters an exception that is intercepted by L0 but not L1.
Due to, ahem, deficiencies in the SVM architecture, acquiring the next
RIP may require flowing through the emulator even if NRIPS is supported,
as the CPU clears next_rip if the VM-Exit is due to an exception other
than "exceptions caused by the INT3, INTO, and BOUND instructions". To
deal with this, "skip" the instruction to calculate next_rip (if it's
not already known), and then unwind the RIP write and any side effects
(RFLAGS updates).
Save the computed next_rip and use it to re-stuff next_rip if injection
doesn't complete. This allows KVM to do the right thing if next_rip was
known prior to injection, e.g. if L1 injects a soft event into L2, and
there is no backing INTn instruction, e.g. if L1 is injecting an
arbitrary event.
Note, it's impossible to guarantee architectural correctness given SVM's
architectural flaws. E.g. if the guest executes INTn (no KVM injection),
an exit occurs while vectoring the INTn, and the guest modifies the code
stream while the exit is being handled, KVM will compute the incorrect
next_rip due to "skipping" the wrong instruction. A future enhancement
to make this less awful would be for KVM to detect that the decoded
instruction is not the correct INTn and drop the to-be-injected soft
event (retrying is a lesser evil compared to shoving the wrong RIP on the
exception stack).
Reported-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Message-Id: <65cb88deab40bc1649d509194864312a89bbe02e.1651440202.git.maciej.szmigiero@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-05-02 06:07:29 +08:00
|
|
|
svm->soft_int_injected = false;
|
2020-07-10 22:11:52 +08:00
|
|
|
|
|
|
|
svm->vmcb->control.exit_code = SVM_EXIT_ERR;
|
|
|
|
svm->vmcb->control.exit_code_hi = 0;
|
|
|
|
svm->vmcb->control.exit_info_1 = 0;
|
|
|
|
svm->vmcb->control.exit_info_2 = 0;
|
|
|
|
|
|
|
|
nested_svm_vmexit(svm);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-05-14 00:57:26 +08:00
|
|
|
out:
|
2021-03-03 03:40:39 +08:00
|
|
|
kvm_vcpu_unmap(vcpu, &map, true);
|
2020-05-14 00:57:26 +08:00
|
|
|
|
2020-03-24 17:41:52 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-06-28 18:44:22 +08:00
|
|
|
/* Copy state save area fields which are handled by VMRUN */
|
2021-07-19 17:03:22 +08:00
|
|
|
void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
|
|
|
|
struct vmcb_save_area *from_save)
|
2021-06-28 18:44:22 +08:00
|
|
|
{
|
|
|
|
to_save->es = from_save->es;
|
|
|
|
to_save->cs = from_save->cs;
|
|
|
|
to_save->ss = from_save->ss;
|
|
|
|
to_save->ds = from_save->ds;
|
|
|
|
to_save->gdtr = from_save->gdtr;
|
|
|
|
to_save->idtr = from_save->idtr;
|
|
|
|
to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED;
|
|
|
|
to_save->efer = from_save->efer;
|
|
|
|
to_save->cr0 = from_save->cr0;
|
|
|
|
to_save->cr3 = from_save->cr3;
|
|
|
|
to_save->cr4 = from_save->cr4;
|
|
|
|
to_save->rax = from_save->rax;
|
|
|
|
to_save->rsp = from_save->rsp;
|
|
|
|
to_save->rip = from_save->rip;
|
|
|
|
to_save->cpl = 0;
|
|
|
|
}
|
|
|
|
|
2021-07-19 17:03:22 +08:00
|
|
|
void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
|
2020-03-24 17:41:52 +08:00
|
|
|
{
|
|
|
|
to_vmcb->save.fs = from_vmcb->save.fs;
|
|
|
|
to_vmcb->save.gs = from_vmcb->save.gs;
|
|
|
|
to_vmcb->save.tr = from_vmcb->save.tr;
|
|
|
|
to_vmcb->save.ldtr = from_vmcb->save.ldtr;
|
|
|
|
to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
|
|
|
|
to_vmcb->save.star = from_vmcb->save.star;
|
|
|
|
to_vmcb->save.lstar = from_vmcb->save.lstar;
|
|
|
|
to_vmcb->save.cstar = from_vmcb->save.cstar;
|
|
|
|
to_vmcb->save.sfmask = from_vmcb->save.sfmask;
|
|
|
|
to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
|
|
|
|
to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
|
|
|
|
to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
|
|
|
|
}
|
|
|
|
|
|
|
|
int nested_svm_vmexit(struct vcpu_svm *svm)
|
|
|
|
{
|
2021-03-03 03:40:39 +08:00
|
|
|
struct kvm_vcpu *vcpu = &svm->vcpu;
|
2022-03-23 01:24:44 +08:00
|
|
|
struct vmcb *vmcb01 = svm->vmcb01.ptr;
|
|
|
|
struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
|
2020-08-28 01:11:39 +08:00
|
|
|
struct vmcb *vmcb12;
|
2020-03-24 17:41:52 +08:00
|
|
|
struct kvm_host_map map;
|
2021-03-03 03:40:39 +08:00
|
|
|
int rc;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
|
2020-03-24 17:41:52 +08:00
|
|
|
if (rc) {
|
|
|
|
if (rc == -EINVAL)
|
2021-03-03 03:40:39 +08:00
|
|
|
kvm_inject_gp(vcpu, 0);
|
2020-03-24 17:41:52 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2020-08-28 01:11:39 +08:00
|
|
|
vmcb12 = map.hva;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
|
|
|
/* Exit Guest-Mode */
|
2021-03-03 03:40:39 +08:00
|
|
|
leave_guest_mode(vcpu);
|
2020-08-28 01:11:39 +08:00
|
|
|
svm->nested.vmcb12_gpa = 0;
|
2020-05-22 15:50:14 +08:00
|
|
|
WARN_ON_ONCE(svm->nested.nested_run_pending);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
|
2021-01-07 17:38:51 +08:00
|
|
|
|
2020-04-24 01:13:09 +08:00
|
|
|
/* in case we halted in L2 */
|
|
|
|
svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
|
|
|
|
2020-03-24 17:41:52 +08:00
|
|
|
/* Give the current vmcb to the guest */
|
|
|
|
|
2022-03-23 01:24:44 +08:00
|
|
|
vmcb12->save.es = vmcb02->save.es;
|
|
|
|
vmcb12->save.cs = vmcb02->save.cs;
|
|
|
|
vmcb12->save.ss = vmcb02->save.ss;
|
|
|
|
vmcb12->save.ds = vmcb02->save.ds;
|
|
|
|
vmcb12->save.gdtr = vmcb02->save.gdtr;
|
|
|
|
vmcb12->save.idtr = vmcb02->save.idtr;
|
2020-08-28 01:11:39 +08:00
|
|
|
vmcb12->save.efer = svm->vcpu.arch.efer;
|
2021-03-03 03:40:39 +08:00
|
|
|
vmcb12->save.cr0 = kvm_read_cr0(vcpu);
|
|
|
|
vmcb12->save.cr3 = kvm_read_cr3(vcpu);
|
2022-03-23 01:24:44 +08:00
|
|
|
vmcb12->save.cr2 = vmcb02->save.cr2;
|
2020-08-28 01:11:39 +08:00
|
|
|
vmcb12->save.cr4 = svm->vcpu.arch.cr4;
|
2021-03-03 03:40:39 +08:00
|
|
|
vmcb12->save.rflags = kvm_get_rflags(vcpu);
|
|
|
|
vmcb12->save.rip = kvm_rip_read(vcpu);
|
|
|
|
vmcb12->save.rsp = kvm_rsp_read(vcpu);
|
|
|
|
vmcb12->save.rax = kvm_rax_read(vcpu);
|
2022-03-23 01:24:44 +08:00
|
|
|
vmcb12->save.dr7 = vmcb02->save.dr7;
|
2020-08-28 01:11:39 +08:00
|
|
|
vmcb12->save.dr6 = svm->vcpu.arch.dr6;
|
2022-03-23 01:24:44 +08:00
|
|
|
vmcb12->save.cpl = vmcb02->save.cpl;
|
2020-08-28 01:11:39 +08:00
|
|
|
|
2022-03-23 01:24:44 +08:00
|
|
|
vmcb12->control.int_state = vmcb02->control.int_state;
|
|
|
|
vmcb12->control.exit_code = vmcb02->control.exit_code;
|
|
|
|
vmcb12->control.exit_code_hi = vmcb02->control.exit_code_hi;
|
|
|
|
vmcb12->control.exit_info_1 = vmcb02->control.exit_info_1;
|
|
|
|
vmcb12->control.exit_info_2 = vmcb02->control.exit_info_2;
|
2020-08-28 01:11:39 +08:00
|
|
|
|
|
|
|
if (vmcb12->control.exit_code != SVM_EXIT_ERR)
|
2020-11-17 18:15:41 +08:00
|
|
|
nested_save_pending_event_to_vmcb12(svm, vmcb12);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
|
|
|
if (svm->nrips_enabled)
|
2022-03-23 01:24:44 +08:00
|
|
|
vmcb12->control.next_rip = vmcb02->control.next_rip;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-08-28 01:11:39 +08:00
|
|
|
vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
|
|
|
|
vmcb12->control.tlb_ctl = svm->nested.ctl.tlb_ctl;
|
|
|
|
vmcb12->control.event_inj = svm->nested.ctl.event_inj;
|
|
|
|
vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
KVM: x86: SVM: fix nested PAUSE filtering when L0 intercepts PAUSE
Commit 74fd41ed16fd ("KVM: x86: nSVM: support PAUSE filtering when L0
doesn't intercept PAUSE") introduced passthrough support for nested pause
filtering, (when the host doesn't intercept PAUSE) (either disabled with
kvm module param, or disabled with '-overcommit cpu-pm=on')
Before this commit, L1 KVM didn't intercept PAUSE at all; afterwards,
the feature was exposed as supported by KVM cpuid unconditionally, thus
if L1 could try to use it even when the L0 KVM can't really support it.
In this case the fallback caused KVM to intercept each PAUSE instruction;
in some cases, such intercept can slow down the nested guest so much
that it can fail to boot. Instead, before the problematic commit KVM
was already setting both thresholds to 0 in vmcb02, but after the first
userspace VM exit shrink_ple_window was called and would reset the
pause_filter_count to the default value.
To fix this, change the fallback strategy - ignore the guest threshold
values, but use/update the host threshold values unless the guest
specifically requests disabling PAUSE filtering (either simple or
advanced).
Also fix a minor bug: on nested VM exit, when PAUSE filter counter
were copied back to vmcb01, a dirty bit was not set.
Thanks a lot to Suravee Suthikulpanit for debugging this!
Fixes: 74fd41ed16fd ("KVM: x86: nSVM: support PAUSE filtering when L0 doesn't intercept PAUSE")
Reported-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Tested-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Co-developed-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20220518072709.730031-1-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-06-01 01:57:32 +08:00
|
|
|
if (!kvm_pause_in_guest(vcpu->kvm)) {
|
2022-03-23 01:40:47 +08:00
|
|
|
vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count;
|
KVM: x86: SVM: fix nested PAUSE filtering when L0 intercepts PAUSE
Commit 74fd41ed16fd ("KVM: x86: nSVM: support PAUSE filtering when L0
doesn't intercept PAUSE") introduced passthrough support for nested pause
filtering, (when the host doesn't intercept PAUSE) (either disabled with
kvm module param, or disabled with '-overcommit cpu-pm=on')
Before this commit, L1 KVM didn't intercept PAUSE at all; afterwards,
the feature was exposed as supported by KVM cpuid unconditionally, thus
if L1 could try to use it even when the L0 KVM can't really support it.
In this case the fallback caused KVM to intercept each PAUSE instruction;
in some cases, such intercept can slow down the nested guest so much
that it can fail to boot. Instead, before the problematic commit KVM
was already setting both thresholds to 0 in vmcb02, but after the first
userspace VM exit shrink_ple_window was called and would reset the
pause_filter_count to the default value.
To fix this, change the fallback strategy - ignore the guest threshold
values, but use/update the host threshold values unless the guest
specifically requests disabling PAUSE filtering (either simple or
advanced).
Also fix a minor bug: on nested VM exit, when PAUSE filter counter
were copied back to vmcb01, a dirty bit was not set.
Thanks a lot to Suravee Suthikulpanit for debugging this!
Fixes: 74fd41ed16fd ("KVM: x86: nSVM: support PAUSE filtering when L0 doesn't intercept PAUSE")
Reported-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Tested-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Co-developed-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20220518072709.730031-1-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-06-01 01:57:32 +08:00
|
|
|
vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
|
|
|
|
|
|
|
|
}
|
2022-03-23 01:40:47 +08:00
|
|
|
|
KVM: SVM: Add support for Virtual SPEC_CTRL
Newer AMD processors have a feature to virtualize the use of the
SPEC_CTRL MSR. Presence of this feature is indicated via CPUID
function 0x8000000A_EDX[20]: GuestSpecCtrl. Hypervisors are not
required to enable this feature since it is automatically enabled on
processors that support it.
A hypervisor may wish to impose speculation controls on guest
execution or a guest may want to impose its own speculation controls.
Therefore, the processor implements both host and guest
versions of SPEC_CTRL.
When in host mode, the host SPEC_CTRL value is in effect and writes
update only the host version of SPEC_CTRL. On a VMRUN, the processor
loads the guest version of SPEC_CTRL from the VMCB. When the guest
writes SPEC_CTRL, only the guest version is updated. On a VMEXIT,
the guest version is saved into the VMCB and the processor returns
to only using the host SPEC_CTRL for speculation control. The guest
SPEC_CTRL is located at offset 0x2E0 in the VMCB.
The effective SPEC_CTRL setting is the guest SPEC_CTRL setting or'ed
with the hypervisor SPEC_CTRL setting. This allows the hypervisor to
ensure a minimum SPEC_CTRL if desired.
This support also fixes an issue where a guest may sometimes see an
inconsistent value for the SPEC_CTRL MSR on processors that support
this feature. With the current SPEC_CTRL support, the first write to
SPEC_CTRL is intercepted and the virtualized version of the SPEC_CTRL
MSR is not updated. When the guest reads back the SPEC_CTRL MSR, it
will be 0x0, instead of the actual expected value. There isn’t a
security concern here, because the host SPEC_CTRL value is or’ed with
the Guest SPEC_CTRL value to generate the effective SPEC_CTRL value.
KVM writes with the guest's virtualized SPEC_CTRL value to SPEC_CTRL
MSR just before the VMRUN, so it will always have the actual value
even though it doesn’t appear that way in the guest. The guest will
only see the proper value for the SPEC_CTRL register if the guest was
to write to the SPEC_CTRL register again. With Virtual SPEC_CTRL
support, the save area spec_ctrl is properly saved and restored.
So, the guest will always see the proper value when it is read back.
Signed-off-by: Babu Moger <babu.moger@amd.com>
Message-Id: <161188100955.28787.11816849358413330720.stgit@bmoger-ubuntu>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-02-17 23:56:04 +08:00
|
|
|
nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
|
|
|
|
|
2021-01-13 20:07:52 +08:00
|
|
|
svm_switch_vmcb(svm, &svm->vmcb01);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2022-03-23 01:40:46 +08:00
|
|
|
if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
|
|
|
|
svm_copy_lbrs(vmcb12, vmcb02);
|
|
|
|
svm_update_lbrv(vcpu);
|
|
|
|
} else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
|
2022-03-23 01:40:45 +08:00
|
|
|
svm_copy_lbrs(vmcb01, vmcb02);
|
|
|
|
svm_update_lbrv(vcpu);
|
|
|
|
}
|
|
|
|
|
2021-01-13 20:07:52 +08:00
|
|
|
/*
|
|
|
|
* On vmexit the GIF is set to false and
|
|
|
|
* no event can be injected in L1.
|
|
|
|
*/
|
SVM: nSVM: correctly restore GIF on vmexit from nesting after migration
Currently code in svm_set_nested_state copies the current vmcb control
area to L1 control area (hsave->control), under assumption that
it mostly reflects the defaults that kvm choose, and later qemu
overrides these defaults with L2 state using standard KVM interfaces,
like KVM_SET_REGS.
However nested GIF (which is AMD specific thing) is by default is true,
and it is copied to hsave area as such.
This alone is not a big deal since on VMexit, GIF is always set to false,
regardless of what it was on VM entry. However in nested_svm_vmexit we
were first were setting GIF to false, but then we overwrite the control
fields with value from the hsave area. (including the nested GIF field
itself if GIF virtualization is enabled).
Now on normal vm entry this is not a problem, since GIF is usually false
prior to normal vm entry, and this is the value that copied to hsave,
and then restored, but this is not always the case when the nested state
is loaded as explained above.
To fix this issue, move svm_set_gif after we restore the L1 control
state in nested_svm_vmexit, so that even with wrong GIF in the
saved L1 control area, we still clear GIF as the spec says.
Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20200827162720.278690-2-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-08-28 00:27:18 +08:00
|
|
|
svm_set_gif(svm, false);
|
2022-03-23 01:24:44 +08:00
|
|
|
vmcb01->control.exit_int_info = 0;
|
SVM: nSVM: correctly restore GIF on vmexit from nesting after migration
Currently code in svm_set_nested_state copies the current vmcb control
area to L1 control area (hsave->control), under assumption that
it mostly reflects the defaults that kvm choose, and later qemu
overrides these defaults with L2 state using standard KVM interfaces,
like KVM_SET_REGS.
However nested GIF (which is AMD specific thing) is by default is true,
and it is copied to hsave area as such.
This alone is not a big deal since on VMexit, GIF is always set to false,
regardless of what it was on VM entry. However in nested_svm_vmexit we
were first were setting GIF to false, but then we overwrite the control
fields with value from the hsave area. (including the nested GIF field
itself if GIF virtualization is enabled).
Now on normal vm entry this is not a problem, since GIF is usually false
prior to normal vm entry, and this is the value that copied to hsave,
and then restored, but this is not always the case when the nested state
is loaded as explained above.
To fix this issue, move svm_set_gif after we restore the L1 control
state in nested_svm_vmexit, so that even with wrong GIF in the
saved L1 control area, we still clear GIF as the spec says.
Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20200827162720.278690-2-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-08-28 00:27:18 +08:00
|
|
|
|
2020-11-16 19:38:19 +08:00
|
|
|
svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset;
|
2022-03-23 01:24:44 +08:00
|
|
|
if (vmcb01->control.tsc_offset != svm->vcpu.arch.tsc_offset) {
|
|
|
|
vmcb01->control.tsc_offset = svm->vcpu.arch.tsc_offset;
|
|
|
|
vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
|
2020-11-16 19:38:19 +08:00
|
|
|
}
|
2020-05-18 23:07:08 +08:00
|
|
|
|
2022-05-24 21:56:23 +08:00
|
|
|
if (svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio) {
|
2021-09-14 23:48:24 +08:00
|
|
|
WARN_ON(!svm->tsc_scaling_enabled);
|
|
|
|
vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
|
2022-06-07 02:11:49 +08:00
|
|
|
__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
|
2021-09-14 23:48:24 +08:00
|
|
|
}
|
|
|
|
|
2020-05-14 01:16:12 +08:00
|
|
|
svm->nested.ctl.nested_cr3 = 0;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2021-01-13 20:07:52 +08:00
|
|
|
/*
|
|
|
|
* Restore processor state that had been saved in vmcb01
|
|
|
|
*/
|
2022-03-23 01:24:44 +08:00
|
|
|
kvm_set_rflags(vcpu, vmcb01->save.rflags);
|
|
|
|
svm_set_efer(vcpu, vmcb01->save.efer);
|
|
|
|
svm_set_cr0(vcpu, vmcb01->save.cr0 | X86_CR0_PE);
|
|
|
|
svm_set_cr4(vcpu, vmcb01->save.cr4);
|
|
|
|
kvm_rax_write(vcpu, vmcb01->save.rax);
|
|
|
|
kvm_rsp_write(vcpu, vmcb01->save.rsp);
|
|
|
|
kvm_rip_write(vcpu, vmcb01->save.rip);
|
2021-01-13 20:07:52 +08:00
|
|
|
|
|
|
|
svm->vcpu.arch.dr7 = DR7_FIXED_1;
|
|
|
|
kvm_update_dr7(&svm->vcpu);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-08-28 01:11:39 +08:00
|
|
|
trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
|
|
|
|
vmcb12->control.exit_info_1,
|
|
|
|
vmcb12->control.exit_info_2,
|
|
|
|
vmcb12->control.exit_int_info,
|
|
|
|
vmcb12->control.exit_int_info_err,
|
2020-05-22 18:04:57 +08:00
|
|
|
KVM_ISA_SVM);
|
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
kvm_vcpu_unmap(vcpu, &map, true);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2021-06-10 07:42:26 +08:00
|
|
|
nested_svm_transition_tlb_flush(vcpu);
|
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
nested_svm_uninit_mmu_context(vcpu);
|
2020-07-10 22:11:54 +08:00
|
|
|
|
2022-03-23 01:24:44 +08:00
|
|
|
rc = nested_svm_load_cr3(vcpu, vmcb01->save.cr3, false, true);
|
2020-07-10 22:11:56 +08:00
|
|
|
if (rc)
|
|
|
|
return 1;
|
2020-07-10 22:11:54 +08:00
|
|
|
|
2020-03-24 17:41:52 +08:00
|
|
|
/*
|
|
|
|
* Drop what we picked up for L2 via svm_complete_interrupts() so it
|
|
|
|
* doesn't end up in L1.
|
|
|
|
*/
|
|
|
|
svm->vcpu.arch.nmi_injected = false;
|
2021-03-03 03:40:39 +08:00
|
|
|
kvm_clear_exception_queue(vcpu);
|
|
|
|
kvm_clear_interrupt_queue(vcpu);
|
KVM: nSVM: If VMRUN is single-stepped, queue the #DB intercept in nested_svm_vmexit()
According to APM, the #DB intercept for a single-stepped VMRUN must happen
after the completion of that instruction, when the guest does #VMEXIT to
the host. However, in the current implementation of KVM, the #DB intercept
for a single-stepped VMRUN happens after the completion of the instruction
that follows the VMRUN instruction. When the #DB intercept handler is
invoked, it shows the RIP of the instruction that follows VMRUN, instead of
of VMRUN itself. This is an incorrect RIP as far as single-stepping VMRUN
is concerned.
This patch fixes the problem by checking, in nested_svm_vmexit(), for the
condition that the VMRUN instruction is being single-stepped and if so,
queues the pending #DB intercept so that the #DB is accounted for before
we execute L1's next instruction.
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oraacle.com>
Message-Id: <20210323175006.73249-2-krish.sadhukhan@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-03-24 01:50:03 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are here following the completion of a VMRUN that
|
|
|
|
* is being single-stepped, queue the pending #DB intercept
|
|
|
|
* right now so that it an be accounted for before we execute
|
|
|
|
* L1's next instruction.
|
|
|
|
*/
|
2022-03-23 01:24:44 +08:00
|
|
|
if (unlikely(vmcb01->save.rflags & X86_EFLAGS_TF))
|
KVM: nSVM: If VMRUN is single-stepped, queue the #DB intercept in nested_svm_vmexit()
According to APM, the #DB intercept for a single-stepped VMRUN must happen
after the completion of that instruction, when the guest does #VMEXIT to
the host. However, in the current implementation of KVM, the #DB intercept
for a single-stepped VMRUN happens after the completion of the instruction
that follows the VMRUN instruction. When the #DB intercept handler is
invoked, it shows the RIP of the instruction that follows VMRUN, instead of
of VMRUN itself. This is an incorrect RIP as far as single-stepping VMRUN
is concerned.
This patch fixes the problem by checking, in nested_svm_vmexit(), for the
condition that the VMRUN instruction is being single-stepped and if so,
queues the pending #DB intercept so that the #DB is accounted for before
we execute L1's next instruction.
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oraacle.com>
Message-Id: <20210323175006.73249-2-krish.sadhukhan@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-03-24 01:50:03 +08:00
|
|
|
kvm_queue_exception(&(svm->vcpu), DB_VECTOR);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2022-03-23 01:40:50 +08:00
|
|
|
/*
|
|
|
|
* Un-inhibit the AVIC right away, so that other vCPUs can start
|
|
|
|
* to benefit from it right away.
|
|
|
|
*/
|
|
|
|
if (kvm_apicv_activated(vcpu->kvm))
|
|
|
|
kvm_vcpu_update_apicv(vcpu);
|
|
|
|
|
2020-03-24 17:41:52 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-03-03 01:45:14 +08:00
|
|
|
static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2022-11-03 22:13:49 +08:00
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
|
|
|
|
|
|
|
if (!vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SHUTDOWN))
|
|
|
|
return;
|
|
|
|
|
|
|
|
kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu);
|
2021-03-03 01:45:15 +08:00
|
|
|
nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN);
|
2021-03-03 01:45:14 +08:00
|
|
|
}
|
|
|
|
|
2020-10-01 19:29:54 +08:00
|
|
|
int svm_allocate_nested(struct vcpu_svm *svm)
|
|
|
|
{
|
2021-01-13 20:07:52 +08:00
|
|
|
struct page *vmcb02_page;
|
2020-10-01 19:29:54 +08:00
|
|
|
|
|
|
|
if (svm->nested.initialized)
|
|
|
|
return 0;
|
|
|
|
|
2021-01-13 20:07:52 +08:00
|
|
|
vmcb02_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
|
|
|
|
if (!vmcb02_page)
|
2020-10-01 19:29:54 +08:00
|
|
|
return -ENOMEM;
|
2021-01-13 20:07:52 +08:00
|
|
|
svm->nested.vmcb02.ptr = page_address(vmcb02_page);
|
|
|
|
svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT);
|
2020-10-01 19:29:54 +08:00
|
|
|
|
|
|
|
svm->nested.msrpm = svm_vcpu_alloc_msrpm();
|
|
|
|
if (!svm->nested.msrpm)
|
2021-01-13 20:07:52 +08:00
|
|
|
goto err_free_vmcb02;
|
2020-10-01 19:29:54 +08:00
|
|
|
svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
|
|
|
|
|
|
|
|
svm->nested.initialized = true;
|
|
|
|
return 0;
|
|
|
|
|
2021-01-13 20:07:52 +08:00
|
|
|
err_free_vmcb02:
|
|
|
|
__free_page(vmcb02_page);
|
2020-10-01 19:29:54 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
void svm_free_nested(struct vcpu_svm *svm)
|
|
|
|
{
|
|
|
|
if (!svm->nested.initialized)
|
|
|
|
return;
|
|
|
|
|
2022-11-03 22:13:44 +08:00
|
|
|
if (WARN_ON_ONCE(svm->vmcb != svm->vmcb01.ptr))
|
|
|
|
svm_switch_vmcb(svm, &svm->vmcb01);
|
|
|
|
|
2020-10-01 19:29:54 +08:00
|
|
|
svm_vcpu_free_msrpm(svm->nested.msrpm);
|
|
|
|
svm->nested.msrpm = NULL;
|
|
|
|
|
2021-01-13 20:07:52 +08:00
|
|
|
__free_page(virt_to_page(svm->nested.vmcb02.ptr));
|
|
|
|
svm->nested.vmcb02.ptr = NULL;
|
2020-10-01 19:29:54 +08:00
|
|
|
|
2021-05-03 20:54:43 +08:00
|
|
|
/*
|
|
|
|
* When last_vmcb12_gpa matches the current vmcb12 gpa,
|
|
|
|
* some vmcb12 fields are not loaded if they are marked clean
|
|
|
|
* in the vmcb12, since in this case they are up to date already.
|
|
|
|
*
|
|
|
|
* When the vmcb02 is freed, this optimization becomes invalid.
|
|
|
|
*/
|
|
|
|
svm->nested.last_vmcb12_gpa = INVALID_GPA;
|
|
|
|
|
2020-10-01 19:29:54 +08:00
|
|
|
svm->nested.initialized = false;
|
|
|
|
}
|
|
|
|
|
KVM: x86: Forcibly leave nested virt when SMM state is toggled
Forcibly leave nested virtualization operation if userspace toggles SMM
state via KVM_SET_VCPU_EVENTS or KVM_SYNC_X86_EVENTS. If userspace
forces the vCPU out of SMM while it's post-VMXON and then injects an SMI,
vmx_enter_smm() will overwrite vmx->nested.smm.vmxon and end up with both
vmxon=false and smm.vmxon=false, but all other nVMX state allocated.
Don't attempt to gracefully handle the transition as (a) most transitions
are nonsencial, e.g. forcing SMM while L2 is running, (b) there isn't
sufficient information to handle all transitions, e.g. SVM wants access
to the SMRAM save state, and (c) KVM_SET_VCPU_EVENTS must precede
KVM_SET_NESTED_STATE during state restore as the latter disallows putting
the vCPU into L2 if SMM is active, and disallows tagging the vCPU as
being post-VMXON in SMM if SMM is not active.
Abuse of KVM_SET_VCPU_EVENTS manifests as a WARN and memory leak in nVMX
due to failure to free vmcs01's shadow VMCS, but the bug goes far beyond
just a memory leak, e.g. toggling SMM on while L2 is active puts the vCPU
in an architecturally impossible state.
WARNING: CPU: 0 PID: 3606 at free_loaded_vmcs arch/x86/kvm/vmx/vmx.c:2665 [inline]
WARNING: CPU: 0 PID: 3606 at free_loaded_vmcs+0x158/0x1a0 arch/x86/kvm/vmx/vmx.c:2656
Modules linked in:
CPU: 1 PID: 3606 Comm: syz-executor725 Not tainted 5.17.0-rc1-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
RIP: 0010:free_loaded_vmcs arch/x86/kvm/vmx/vmx.c:2665 [inline]
RIP: 0010:free_loaded_vmcs+0x158/0x1a0 arch/x86/kvm/vmx/vmx.c:2656
Code: <0f> 0b eb b3 e8 8f 4d 9f 00 e9 f7 fe ff ff 48 89 df e8 92 4d 9f 00
Call Trace:
<TASK>
kvm_arch_vcpu_destroy+0x72/0x2f0 arch/x86/kvm/x86.c:11123
kvm_vcpu_destroy arch/x86/kvm/../../../virt/kvm/kvm_main.c:441 [inline]
kvm_destroy_vcpus+0x11f/0x290 arch/x86/kvm/../../../virt/kvm/kvm_main.c:460
kvm_free_vcpus arch/x86/kvm/x86.c:11564 [inline]
kvm_arch_destroy_vm+0x2e8/0x470 arch/x86/kvm/x86.c:11676
kvm_destroy_vm arch/x86/kvm/../../../virt/kvm/kvm_main.c:1217 [inline]
kvm_put_kvm+0x4fa/0xb00 arch/x86/kvm/../../../virt/kvm/kvm_main.c:1250
kvm_vm_release+0x3f/0x50 arch/x86/kvm/../../../virt/kvm/kvm_main.c:1273
__fput+0x286/0x9f0 fs/file_table.c:311
task_work_run+0xdd/0x1a0 kernel/task_work.c:164
exit_task_work include/linux/task_work.h:32 [inline]
do_exit+0xb29/0x2a30 kernel/exit.c:806
do_group_exit+0xd2/0x2f0 kernel/exit.c:935
get_signal+0x4b0/0x28c0 kernel/signal.c:2862
arch_do_signal_or_restart+0x2a9/0x1c40 arch/x86/kernel/signal.c:868
handle_signal_work kernel/entry/common.c:148 [inline]
exit_to_user_mode_loop kernel/entry/common.c:172 [inline]
exit_to_user_mode_prepare+0x17d/0x290 kernel/entry/common.c:207
__syscall_exit_to_user_mode_work kernel/entry/common.c:289 [inline]
syscall_exit_to_user_mode+0x19/0x60 kernel/entry/common.c:300
do_syscall_64+0x42/0xb0 arch/x86/entry/common.c:86
entry_SYSCALL_64_after_hwframe+0x44/0xae
</TASK>
Cc: stable@vger.kernel.org
Reported-by: syzbot+8112db3ab20e70d50c31@syzkaller.appspotmail.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220125220358.2091737-1-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-01-26 06:03:58 +08:00
|
|
|
void svm_leave_nested(struct kvm_vcpu *vcpu)
|
2020-05-19 01:08:37 +08:00
|
|
|
{
|
KVM: x86: Forcibly leave nested virt when SMM state is toggled
Forcibly leave nested virtualization operation if userspace toggles SMM
state via KVM_SET_VCPU_EVENTS or KVM_SYNC_X86_EVENTS. If userspace
forces the vCPU out of SMM while it's post-VMXON and then injects an SMI,
vmx_enter_smm() will overwrite vmx->nested.smm.vmxon and end up with both
vmxon=false and smm.vmxon=false, but all other nVMX state allocated.
Don't attempt to gracefully handle the transition as (a) most transitions
are nonsencial, e.g. forcing SMM while L2 is running, (b) there isn't
sufficient information to handle all transitions, e.g. SVM wants access
to the SMRAM save state, and (c) KVM_SET_VCPU_EVENTS must precede
KVM_SET_NESTED_STATE during state restore as the latter disallows putting
the vCPU into L2 if SMM is active, and disallows tagging the vCPU as
being post-VMXON in SMM if SMM is not active.
Abuse of KVM_SET_VCPU_EVENTS manifests as a WARN and memory leak in nVMX
due to failure to free vmcs01's shadow VMCS, but the bug goes far beyond
just a memory leak, e.g. toggling SMM on while L2 is active puts the vCPU
in an architecturally impossible state.
WARNING: CPU: 0 PID: 3606 at free_loaded_vmcs arch/x86/kvm/vmx/vmx.c:2665 [inline]
WARNING: CPU: 0 PID: 3606 at free_loaded_vmcs+0x158/0x1a0 arch/x86/kvm/vmx/vmx.c:2656
Modules linked in:
CPU: 1 PID: 3606 Comm: syz-executor725 Not tainted 5.17.0-rc1-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
RIP: 0010:free_loaded_vmcs arch/x86/kvm/vmx/vmx.c:2665 [inline]
RIP: 0010:free_loaded_vmcs+0x158/0x1a0 arch/x86/kvm/vmx/vmx.c:2656
Code: <0f> 0b eb b3 e8 8f 4d 9f 00 e9 f7 fe ff ff 48 89 df e8 92 4d 9f 00
Call Trace:
<TASK>
kvm_arch_vcpu_destroy+0x72/0x2f0 arch/x86/kvm/x86.c:11123
kvm_vcpu_destroy arch/x86/kvm/../../../virt/kvm/kvm_main.c:441 [inline]
kvm_destroy_vcpus+0x11f/0x290 arch/x86/kvm/../../../virt/kvm/kvm_main.c:460
kvm_free_vcpus arch/x86/kvm/x86.c:11564 [inline]
kvm_arch_destroy_vm+0x2e8/0x470 arch/x86/kvm/x86.c:11676
kvm_destroy_vm arch/x86/kvm/../../../virt/kvm/kvm_main.c:1217 [inline]
kvm_put_kvm+0x4fa/0xb00 arch/x86/kvm/../../../virt/kvm/kvm_main.c:1250
kvm_vm_release+0x3f/0x50 arch/x86/kvm/../../../virt/kvm/kvm_main.c:1273
__fput+0x286/0x9f0 fs/file_table.c:311
task_work_run+0xdd/0x1a0 kernel/task_work.c:164
exit_task_work include/linux/task_work.h:32 [inline]
do_exit+0xb29/0x2a30 kernel/exit.c:806
do_group_exit+0xd2/0x2f0 kernel/exit.c:935
get_signal+0x4b0/0x28c0 kernel/signal.c:2862
arch_do_signal_or_restart+0x2a9/0x1c40 arch/x86/kernel/signal.c:868
handle_signal_work kernel/entry/common.c:148 [inline]
exit_to_user_mode_loop kernel/entry/common.c:172 [inline]
exit_to_user_mode_prepare+0x17d/0x290 kernel/entry/common.c:207
__syscall_exit_to_user_mode_work kernel/entry/common.c:289 [inline]
syscall_exit_to_user_mode+0x19/0x60 kernel/entry/common.c:300
do_syscall_64+0x42/0xb0 arch/x86/entry/common.c:86
entry_SYSCALL_64_after_hwframe+0x44/0xae
</TASK>
Cc: stable@vger.kernel.org
Reported-by: syzbot+8112db3ab20e70d50c31@syzkaller.appspotmail.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220125220358.2091737-1-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-01-26 06:03:58 +08:00
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
2021-03-03 03:40:39 +08:00
|
|
|
|
|
|
|
if (is_guest_mode(vcpu)) {
|
2020-05-19 01:08:37 +08:00
|
|
|
svm->nested.nested_run_pending = 0;
|
2021-05-03 20:54:43 +08:00
|
|
|
svm->nested.vmcb12_gpa = INVALID_GPA;
|
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
leave_guest_mode(vcpu);
|
2021-01-13 20:07:52 +08:00
|
|
|
|
2021-05-03 20:54:42 +08:00
|
|
|
svm_switch_vmcb(svm, &svm->vmcb01);
|
2021-01-13 20:07:52 +08:00
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
nested_svm_uninit_mmu_context(vcpu);
|
2021-01-07 17:38:54 +08:00
|
|
|
vmcb_mark_all_dirty(svm->vmcb);
|
2020-05-19 01:08:37 +08:00
|
|
|
}
|
2020-09-22 19:43:14 +08:00
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
|
2020-05-19 01:08:37 +08:00
|
|
|
}
|
|
|
|
|
2020-03-24 17:41:52 +08:00
|
|
|
static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
|
|
|
|
{
|
|
|
|
u32 offset, msr, value;
|
|
|
|
int write, mask;
|
|
|
|
|
2021-11-03 22:05:26 +08:00
|
|
|
if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
|
2020-03-24 17:41:52 +08:00
|
|
|
return NESTED_EXIT_HOST;
|
|
|
|
|
|
|
|
msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
|
|
|
|
offset = svm_msrpm_offset(msr);
|
|
|
|
write = svm->vmcb->control.exit_info_1 & 1;
|
|
|
|
mask = 1 << ((2 * (msr & 0xf)) + write);
|
|
|
|
|
|
|
|
if (offset == MSR_INVALID)
|
|
|
|
return NESTED_EXIT_DONE;
|
|
|
|
|
|
|
|
/* Offset is in 32 bit units but need in 8 bit units */
|
|
|
|
offset *= 4;
|
|
|
|
|
2020-05-14 01:16:12 +08:00
|
|
|
if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
|
2020-03-24 17:41:52 +08:00
|
|
|
return NESTED_EXIT_DONE;
|
|
|
|
|
|
|
|
return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
|
|
|
|
{
|
|
|
|
unsigned port, size, iopm_len;
|
|
|
|
u16 val, mask;
|
|
|
|
u8 start_bit;
|
|
|
|
u64 gpa;
|
|
|
|
|
2021-11-03 22:05:26 +08:00
|
|
|
if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
|
2020-03-24 17:41:52 +08:00
|
|
|
return NESTED_EXIT_HOST;
|
|
|
|
|
|
|
|
port = svm->vmcb->control.exit_info_1 >> 16;
|
|
|
|
size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
|
|
|
|
SVM_IOIO_SIZE_SHIFT;
|
2020-05-14 01:16:12 +08:00
|
|
|
gpa = svm->nested.ctl.iopm_base_pa + (port / 8);
|
2020-03-24 17:41:52 +08:00
|
|
|
start_bit = port % 8;
|
|
|
|
iopm_len = (start_bit + size > 8) ? 2 : 1;
|
|
|
|
mask = (0xf >> (4 - size)) << start_bit;
|
|
|
|
val = 0;
|
|
|
|
|
|
|
|
if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
|
|
|
|
return NESTED_EXIT_DONE;
|
|
|
|
|
|
|
|
return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nested_svm_intercept(struct vcpu_svm *svm)
|
|
|
|
{
|
|
|
|
u32 exit_code = svm->vmcb->control.exit_code;
|
|
|
|
int vmexit = NESTED_EXIT_HOST;
|
|
|
|
|
|
|
|
switch (exit_code) {
|
|
|
|
case SVM_EXIT_MSR:
|
|
|
|
vmexit = nested_svm_exit_handled_msr(svm);
|
|
|
|
break;
|
|
|
|
case SVM_EXIT_IOIO:
|
|
|
|
vmexit = nested_svm_intercept_ioio(svm);
|
|
|
|
break;
|
|
|
|
case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
|
2021-11-03 22:05:26 +08:00
|
|
|
if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
|
2020-03-24 17:41:52 +08:00
|
|
|
vmexit = NESTED_EXIT_DONE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
|
2021-11-03 22:05:26 +08:00
|
|
|
if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
|
2020-03-24 17:41:52 +08:00
|
|
|
vmexit = NESTED_EXIT_DONE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
|
2020-05-16 20:42:28 +08:00
|
|
|
/*
|
|
|
|
* Host-intercepted exceptions have been checked already in
|
|
|
|
* nested_svm_exit_special. There is nothing to do here,
|
|
|
|
* the vmexit is injected by svm_check_nested_events.
|
|
|
|
*/
|
|
|
|
vmexit = NESTED_EXIT_DONE;
|
2020-03-24 17:41:52 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case SVM_EXIT_ERR: {
|
|
|
|
vmexit = NESTED_EXIT_DONE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default: {
|
2021-11-03 22:05:26 +08:00
|
|
|
if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
|
2020-03-24 17:41:52 +08:00
|
|
|
vmexit = NESTED_EXIT_DONE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return vmexit;
|
|
|
|
}
|
|
|
|
|
|
|
|
int nested_svm_exit_handled(struct vcpu_svm *svm)
|
|
|
|
{
|
|
|
|
int vmexit;
|
|
|
|
|
|
|
|
vmexit = nested_svm_intercept(svm);
|
|
|
|
|
|
|
|
if (vmexit == NESTED_EXIT_DONE)
|
|
|
|
nested_svm_vmexit(svm);
|
|
|
|
|
|
|
|
return vmexit;
|
|
|
|
}
|
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
|
2020-03-24 17:41:52 +08:00
|
|
|
{
|
2021-03-03 03:40:39 +08:00
|
|
|
if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) {
|
|
|
|
kvm_queue_exception(vcpu, UD_VECTOR);
|
2020-03-24 17:41:52 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
if (to_svm(vcpu)->vmcb->save.cpl) {
|
|
|
|
kvm_inject_gp(vcpu, 0);
|
2020-03-24 17:41:52 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
KVM: x86: Morph pending exceptions to pending VM-Exits at queue time
Morph pending exceptions to pending VM-Exits (due to interception) when
the exception is queued instead of waiting until nested events are
checked at VM-Entry. This fixes a longstanding bug where KVM fails to
handle an exception that occurs during delivery of a previous exception,
KVM (L0) and L1 both want to intercept the exception (e.g. #PF for shadow
paging), and KVM determines that the exception is in the guest's domain,
i.e. queues the new exception for L2. Deferring the interception check
causes KVM to esclate various combinations of injected+pending exceptions
to double fault (#DF) without consulting L1's interception desires, and
ends up injecting a spurious #DF into L2.
KVM has fudged around the issue for #PF by special casing emulated #PF
injection for shadow paging, but the underlying issue is not unique to
shadow paging in L0, e.g. if KVM is intercepting #PF because the guest
has a smaller maxphyaddr and L1 (but not L0) is using shadow paging.
Other exceptions are affected as well, e.g. if KVM is intercepting #GP
for one of SVM's workaround or for the VMware backdoor emulation stuff.
The other cases have gone unnoticed because the #DF is spurious if and
only if L1 resolves the exception, e.g. KVM's goofs go unnoticed if L1
would have injected #DF anyways.
The hack-a-fix has also led to ugly code, e.g. bailing from the emulator
if #PF injection forced a nested VM-Exit and the emulator finds itself
back in L1. Allowing for direct-to-VM-Exit queueing also neatly solves
the async #PF in L2 mess; no need to set a magic flag and token, simply
queue a #PF nested VM-Exit.
Deal with event migration by flagging that a pending exception was queued
by userspace and check for interception at the next KVM_RUN, e.g. so that
KVM does the right thing regardless of the order in which userspace
restores nested state vs. event state.
When "getting" events from userspace, simply drop any pending excpetion
that is destined to be intercepted if there is also an injected exception
to be migrated. Ideally, KVM would migrate both events, but that would
require new ABI, and practically speaking losing the event is unlikely to
be noticed, let alone fatal. The injected exception is captured, RIP
still points at the original faulting instruction, etc... So either the
injection on the target will trigger the same intercepted exception, or
the source of the intercepted exception was transient and/or
non-deterministic, thus dropping it is ok-ish.
Fixes: a04aead144fd ("KVM: nSVM: fix running nested guests when npt=0")
Fixes: feaf0c7dc473 ("KVM: nVMX: Do not generate #DF if #PF happens during exception delivery into L2")
Cc: Jim Mattson <jmattson@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Link: https://lore.kernel.org/r/20220830231614.3580124-22-seanjc@google.com
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-08-31 07:16:08 +08:00
|
|
|
static bool nested_svm_is_exception_vmexit(struct kvm_vcpu *vcpu, u8 vector,
|
|
|
|
u32 error_code)
|
2020-03-24 17:41:52 +08:00
|
|
|
{
|
KVM: x86: Morph pending exceptions to pending VM-Exits at queue time
Morph pending exceptions to pending VM-Exits (due to interception) when
the exception is queued instead of waiting until nested events are
checked at VM-Entry. This fixes a longstanding bug where KVM fails to
handle an exception that occurs during delivery of a previous exception,
KVM (L0) and L1 both want to intercept the exception (e.g. #PF for shadow
paging), and KVM determines that the exception is in the guest's domain,
i.e. queues the new exception for L2. Deferring the interception check
causes KVM to esclate various combinations of injected+pending exceptions
to double fault (#DF) without consulting L1's interception desires, and
ends up injecting a spurious #DF into L2.
KVM has fudged around the issue for #PF by special casing emulated #PF
injection for shadow paging, but the underlying issue is not unique to
shadow paging in L0, e.g. if KVM is intercepting #PF because the guest
has a smaller maxphyaddr and L1 (but not L0) is using shadow paging.
Other exceptions are affected as well, e.g. if KVM is intercepting #GP
for one of SVM's workaround or for the VMware backdoor emulation stuff.
The other cases have gone unnoticed because the #DF is spurious if and
only if L1 resolves the exception, e.g. KVM's goofs go unnoticed if L1
would have injected #DF anyways.
The hack-a-fix has also led to ugly code, e.g. bailing from the emulator
if #PF injection forced a nested VM-Exit and the emulator finds itself
back in L1. Allowing for direct-to-VM-Exit queueing also neatly solves
the async #PF in L2 mess; no need to set a magic flag and token, simply
queue a #PF nested VM-Exit.
Deal with event migration by flagging that a pending exception was queued
by userspace and check for interception at the next KVM_RUN, e.g. so that
KVM does the right thing regardless of the order in which userspace
restores nested state vs. event state.
When "getting" events from userspace, simply drop any pending excpetion
that is destined to be intercepted if there is also an injected exception
to be migrated. Ideally, KVM would migrate both events, but that would
require new ABI, and practically speaking losing the event is unlikely to
be noticed, let alone fatal. The injected exception is captured, RIP
still points at the original faulting instruction, etc... So either the
injection on the target will trigger the same intercepted exception, or
the source of the intercepted exception was transient and/or
non-deterministic, thus dropping it is ok-ish.
Fixes: a04aead144fd ("KVM: nSVM: fix running nested guests when npt=0")
Fixes: feaf0c7dc473 ("KVM: nVMX: Do not generate #DF if #PF happens during exception delivery into L2")
Cc: Jim Mattson <jmattson@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Link: https://lore.kernel.org/r/20220830231614.3580124-22-seanjc@google.com
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-08-31 07:16:08 +08:00
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2022-08-31 07:16:01 +08:00
|
|
|
return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(vector));
|
2020-05-16 20:42:28 +08:00
|
|
|
}
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2022-08-31 07:16:01 +08:00
|
|
|
static void nested_svm_inject_exception_vmexit(struct kvm_vcpu *vcpu)
|
2020-05-16 20:42:28 +08:00
|
|
|
{
|
KVM: x86: Morph pending exceptions to pending VM-Exits at queue time
Morph pending exceptions to pending VM-Exits (due to interception) when
the exception is queued instead of waiting until nested events are
checked at VM-Entry. This fixes a longstanding bug where KVM fails to
handle an exception that occurs during delivery of a previous exception,
KVM (L0) and L1 both want to intercept the exception (e.g. #PF for shadow
paging), and KVM determines that the exception is in the guest's domain,
i.e. queues the new exception for L2. Deferring the interception check
causes KVM to esclate various combinations of injected+pending exceptions
to double fault (#DF) without consulting L1's interception desires, and
ends up injecting a spurious #DF into L2.
KVM has fudged around the issue for #PF by special casing emulated #PF
injection for shadow paging, but the underlying issue is not unique to
shadow paging in L0, e.g. if KVM is intercepting #PF because the guest
has a smaller maxphyaddr and L1 (but not L0) is using shadow paging.
Other exceptions are affected as well, e.g. if KVM is intercepting #GP
for one of SVM's workaround or for the VMware backdoor emulation stuff.
The other cases have gone unnoticed because the #DF is spurious if and
only if L1 resolves the exception, e.g. KVM's goofs go unnoticed if L1
would have injected #DF anyways.
The hack-a-fix has also led to ugly code, e.g. bailing from the emulator
if #PF injection forced a nested VM-Exit and the emulator finds itself
back in L1. Allowing for direct-to-VM-Exit queueing also neatly solves
the async #PF in L2 mess; no need to set a magic flag and token, simply
queue a #PF nested VM-Exit.
Deal with event migration by flagging that a pending exception was queued
by userspace and check for interception at the next KVM_RUN, e.g. so that
KVM does the right thing regardless of the order in which userspace
restores nested state vs. event state.
When "getting" events from userspace, simply drop any pending excpetion
that is destined to be intercepted if there is also an injected exception
to be migrated. Ideally, KVM would migrate both events, but that would
require new ABI, and practically speaking losing the event is unlikely to
be noticed, let alone fatal. The injected exception is captured, RIP
still points at the original faulting instruction, etc... So either the
injection on the target will trigger the same intercepted exception, or
the source of the intercepted exception was transient and/or
non-deterministic, thus dropping it is ok-ish.
Fixes: a04aead144fd ("KVM: nSVM: fix running nested guests when npt=0")
Fixes: feaf0c7dc473 ("KVM: nVMX: Do not generate #DF if #PF happens during exception delivery into L2")
Cc: Jim Mattson <jmattson@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Link: https://lore.kernel.org/r/20220830231614.3580124-22-seanjc@google.com
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-08-31 07:16:08 +08:00
|
|
|
struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit;
|
2022-08-31 07:16:01 +08:00
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
2022-03-23 01:24:44 +08:00
|
|
|
struct vmcb *vmcb = svm->vmcb;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2022-08-31 07:16:01 +08:00
|
|
|
vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + ex->vector;
|
2022-03-23 01:24:44 +08:00
|
|
|
vmcb->control.exit_code_hi = 0;
|
2020-05-16 20:42:28 +08:00
|
|
|
|
2022-08-31 07:16:01 +08:00
|
|
|
if (ex->has_error_code)
|
|
|
|
vmcb->control.exit_info_1 = ex->error_code;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* EXITINFO2 is undefined for all exception intercepts other
|
|
|
|
* than #PF.
|
|
|
|
*/
|
2022-08-31 07:16:01 +08:00
|
|
|
if (ex->vector == PF_VECTOR) {
|
KVM: x86: Morph pending exceptions to pending VM-Exits at queue time
Morph pending exceptions to pending VM-Exits (due to interception) when
the exception is queued instead of waiting until nested events are
checked at VM-Entry. This fixes a longstanding bug where KVM fails to
handle an exception that occurs during delivery of a previous exception,
KVM (L0) and L1 both want to intercept the exception (e.g. #PF for shadow
paging), and KVM determines that the exception is in the guest's domain,
i.e. queues the new exception for L2. Deferring the interception check
causes KVM to esclate various combinations of injected+pending exceptions
to double fault (#DF) without consulting L1's interception desires, and
ends up injecting a spurious #DF into L2.
KVM has fudged around the issue for #PF by special casing emulated #PF
injection for shadow paging, but the underlying issue is not unique to
shadow paging in L0, e.g. if KVM is intercepting #PF because the guest
has a smaller maxphyaddr and L1 (but not L0) is using shadow paging.
Other exceptions are affected as well, e.g. if KVM is intercepting #GP
for one of SVM's workaround or for the VMware backdoor emulation stuff.
The other cases have gone unnoticed because the #DF is spurious if and
only if L1 resolves the exception, e.g. KVM's goofs go unnoticed if L1
would have injected #DF anyways.
The hack-a-fix has also led to ugly code, e.g. bailing from the emulator
if #PF injection forced a nested VM-Exit and the emulator finds itself
back in L1. Allowing for direct-to-VM-Exit queueing also neatly solves
the async #PF in L2 mess; no need to set a magic flag and token, simply
queue a #PF nested VM-Exit.
Deal with event migration by flagging that a pending exception was queued
by userspace and check for interception at the next KVM_RUN, e.g. so that
KVM does the right thing regardless of the order in which userspace
restores nested state vs. event state.
When "getting" events from userspace, simply drop any pending excpetion
that is destined to be intercepted if there is also an injected exception
to be migrated. Ideally, KVM would migrate both events, but that would
require new ABI, and practically speaking losing the event is unlikely to
be noticed, let alone fatal. The injected exception is captured, RIP
still points at the original faulting instruction, etc... So either the
injection on the target will trigger the same intercepted exception, or
the source of the intercepted exception was transient and/or
non-deterministic, thus dropping it is ok-ish.
Fixes: a04aead144fd ("KVM: nSVM: fix running nested guests when npt=0")
Fixes: feaf0c7dc473 ("KVM: nVMX: Do not generate #DF if #PF happens during exception delivery into L2")
Cc: Jim Mattson <jmattson@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Link: https://lore.kernel.org/r/20220830231614.3580124-22-seanjc@google.com
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-08-31 07:16:08 +08:00
|
|
|
if (ex->has_payload)
|
2022-08-31 07:16:01 +08:00
|
|
|
vmcb->control.exit_info_2 = ex->payload;
|
2020-05-16 20:42:28 +08:00
|
|
|
else
|
2022-08-31 07:16:01 +08:00
|
|
|
vmcb->control.exit_info_2 = vcpu->arch.cr2;
|
|
|
|
} else if (ex->vector == DB_VECTOR) {
|
2022-08-31 07:16:11 +08:00
|
|
|
/* See kvm_check_and_inject_events(). */
|
2022-08-31 07:16:01 +08:00
|
|
|
kvm_deliver_exception_payload(vcpu, ex);
|
|
|
|
|
|
|
|
if (vcpu->arch.dr7 & DR7_GD) {
|
|
|
|
vcpu->arch.dr7 &= ~DR7_GD;
|
|
|
|
kvm_update_dr7(vcpu);
|
2020-05-16 20:42:28 +08:00
|
|
|
}
|
2022-08-31 07:16:01 +08:00
|
|
|
} else {
|
|
|
|
WARN_ON(ex->has_payload);
|
|
|
|
}
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-05-16 20:42:28 +08:00
|
|
|
nested_svm_vmexit(svm);
|
2020-03-24 17:41:52 +08:00
|
|
|
}
|
|
|
|
|
2020-05-16 20:50:35 +08:00
|
|
|
static inline bool nested_exit_on_init(struct vcpu_svm *svm)
|
|
|
|
{
|
2021-11-03 22:05:26 +08:00
|
|
|
return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
|
2020-05-16 20:50:35 +08:00
|
|
|
}
|
|
|
|
|
2020-04-17 22:24:18 +08:00
|
|
|
static int svm_check_nested_events(struct kvm_vcpu *vcpu)
|
2020-03-24 17:41:52 +08:00
|
|
|
{
|
2020-05-16 20:50:35 +08:00
|
|
|
struct kvm_lapic *apic = vcpu->arch.apic;
|
2022-08-31 07:16:02 +08:00
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
|
|
|
/*
|
|
|
|
* Only a pending nested run blocks a pending exception. If there is a
|
|
|
|
* previously injected event, the pending exception occurred while said
|
|
|
|
* event was being delivered and thus needs to be handled.
|
|
|
|
*/
|
|
|
|
bool block_nested_exceptions = svm->nested.nested_run_pending;
|
|
|
|
/*
|
|
|
|
* New events (not exceptions) are only recognized at instruction
|
|
|
|
* boundaries. If an event needs reinjection, then KVM is handling a
|
|
|
|
* VM-Exit that occurred _during_ instruction execution; new events are
|
|
|
|
* blocked until the instruction completes.
|
|
|
|
*/
|
|
|
|
bool block_nested_events = block_nested_exceptions ||
|
|
|
|
kvm_event_needs_reinjection(vcpu);
|
2020-05-16 20:50:35 +08:00
|
|
|
|
|
|
|
if (lapic_in_kernel(vcpu) &&
|
|
|
|
test_bit(KVM_APIC_INIT, &apic->pending_events)) {
|
|
|
|
if (block_nested_events)
|
|
|
|
return -EBUSY;
|
|
|
|
if (!nested_exit_on_init(svm))
|
|
|
|
return 0;
|
2021-03-03 01:45:15 +08:00
|
|
|
nested_svm_simple_vmexit(svm, SVM_EXIT_INIT);
|
2020-05-16 20:50:35 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2020-03-24 17:41:52 +08:00
|
|
|
|
KVM: x86: Morph pending exceptions to pending VM-Exits at queue time
Morph pending exceptions to pending VM-Exits (due to interception) when
the exception is queued instead of waiting until nested events are
checked at VM-Entry. This fixes a longstanding bug where KVM fails to
handle an exception that occurs during delivery of a previous exception,
KVM (L0) and L1 both want to intercept the exception (e.g. #PF for shadow
paging), and KVM determines that the exception is in the guest's domain,
i.e. queues the new exception for L2. Deferring the interception check
causes KVM to esclate various combinations of injected+pending exceptions
to double fault (#DF) without consulting L1's interception desires, and
ends up injecting a spurious #DF into L2.
KVM has fudged around the issue for #PF by special casing emulated #PF
injection for shadow paging, but the underlying issue is not unique to
shadow paging in L0, e.g. if KVM is intercepting #PF because the guest
has a smaller maxphyaddr and L1 (but not L0) is using shadow paging.
Other exceptions are affected as well, e.g. if KVM is intercepting #GP
for one of SVM's workaround or for the VMware backdoor emulation stuff.
The other cases have gone unnoticed because the #DF is spurious if and
only if L1 resolves the exception, e.g. KVM's goofs go unnoticed if L1
would have injected #DF anyways.
The hack-a-fix has also led to ugly code, e.g. bailing from the emulator
if #PF injection forced a nested VM-Exit and the emulator finds itself
back in L1. Allowing for direct-to-VM-Exit queueing also neatly solves
the async #PF in L2 mess; no need to set a magic flag and token, simply
queue a #PF nested VM-Exit.
Deal with event migration by flagging that a pending exception was queued
by userspace and check for interception at the next KVM_RUN, e.g. so that
KVM does the right thing regardless of the order in which userspace
restores nested state vs. event state.
When "getting" events from userspace, simply drop any pending excpetion
that is destined to be intercepted if there is also an injected exception
to be migrated. Ideally, KVM would migrate both events, but that would
require new ABI, and practically speaking losing the event is unlikely to
be noticed, let alone fatal. The injected exception is captured, RIP
still points at the original faulting instruction, etc... So either the
injection on the target will trigger the same intercepted exception, or
the source of the intercepted exception was transient and/or
non-deterministic, thus dropping it is ok-ish.
Fixes: a04aead144fd ("KVM: nSVM: fix running nested guests when npt=0")
Fixes: feaf0c7dc473 ("KVM: nVMX: Do not generate #DF if #PF happens during exception delivery into L2")
Cc: Jim Mattson <jmattson@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Link: https://lore.kernel.org/r/20220830231614.3580124-22-seanjc@google.com
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-08-31 07:16:08 +08:00
|
|
|
if (vcpu->arch.exception_vmexit.pending) {
|
2022-08-31 07:16:02 +08:00
|
|
|
if (block_nested_exceptions)
|
2020-05-16 20:42:28 +08:00
|
|
|
return -EBUSY;
|
2022-08-31 07:16:01 +08:00
|
|
|
nested_svm_inject_exception_vmexit(vcpu);
|
2020-05-16 20:42:28 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
KVM: x86: Morph pending exceptions to pending VM-Exits at queue time
Morph pending exceptions to pending VM-Exits (due to interception) when
the exception is queued instead of waiting until nested events are
checked at VM-Entry. This fixes a longstanding bug where KVM fails to
handle an exception that occurs during delivery of a previous exception,
KVM (L0) and L1 both want to intercept the exception (e.g. #PF for shadow
paging), and KVM determines that the exception is in the guest's domain,
i.e. queues the new exception for L2. Deferring the interception check
causes KVM to esclate various combinations of injected+pending exceptions
to double fault (#DF) without consulting L1's interception desires, and
ends up injecting a spurious #DF into L2.
KVM has fudged around the issue for #PF by special casing emulated #PF
injection for shadow paging, but the underlying issue is not unique to
shadow paging in L0, e.g. if KVM is intercepting #PF because the guest
has a smaller maxphyaddr and L1 (but not L0) is using shadow paging.
Other exceptions are affected as well, e.g. if KVM is intercepting #GP
for one of SVM's workaround or for the VMware backdoor emulation stuff.
The other cases have gone unnoticed because the #DF is spurious if and
only if L1 resolves the exception, e.g. KVM's goofs go unnoticed if L1
would have injected #DF anyways.
The hack-a-fix has also led to ugly code, e.g. bailing from the emulator
if #PF injection forced a nested VM-Exit and the emulator finds itself
back in L1. Allowing for direct-to-VM-Exit queueing also neatly solves
the async #PF in L2 mess; no need to set a magic flag and token, simply
queue a #PF nested VM-Exit.
Deal with event migration by flagging that a pending exception was queued
by userspace and check for interception at the next KVM_RUN, e.g. so that
KVM does the right thing regardless of the order in which userspace
restores nested state vs. event state.
When "getting" events from userspace, simply drop any pending excpetion
that is destined to be intercepted if there is also an injected exception
to be migrated. Ideally, KVM would migrate both events, but that would
require new ABI, and practically speaking losing the event is unlikely to
be noticed, let alone fatal. The injected exception is captured, RIP
still points at the original faulting instruction, etc... So either the
injection on the target will trigger the same intercepted exception, or
the source of the intercepted exception was transient and/or
non-deterministic, thus dropping it is ok-ish.
Fixes: a04aead144fd ("KVM: nSVM: fix running nested guests when npt=0")
Fixes: feaf0c7dc473 ("KVM: nVMX: Do not generate #DF if #PF happens during exception delivery into L2")
Cc: Jim Mattson <jmattson@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Link: https://lore.kernel.org/r/20220830231614.3580124-22-seanjc@google.com
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-08-31 07:16:08 +08:00
|
|
|
if (vcpu->arch.exception.pending) {
|
|
|
|
if (block_nested_exceptions)
|
|
|
|
return -EBUSY;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-09-30 01:20:14 +08:00
|
|
|
#ifdef CONFIG_KVM_SMM
|
2020-04-23 20:13:10 +08:00
|
|
|
if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
|
2020-04-23 20:17:28 +08:00
|
|
|
if (block_nested_events)
|
|
|
|
return -EBUSY;
|
2020-04-23 20:13:10 +08:00
|
|
|
if (!nested_exit_on_smi(svm))
|
|
|
|
return 0;
|
2021-03-03 01:45:15 +08:00
|
|
|
nested_svm_simple_vmexit(svm, SVM_EXIT_SMI);
|
2020-04-23 20:17:28 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2022-09-30 01:20:14 +08:00
|
|
|
#endif
|
2020-04-23 20:17:28 +08:00
|
|
|
|
2020-04-23 20:13:10 +08:00
|
|
|
if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
|
2020-04-15 04:11:06 +08:00
|
|
|
if (block_nested_events)
|
|
|
|
return -EBUSY;
|
2020-04-23 20:13:10 +08:00
|
|
|
if (!nested_exit_on_nmi(svm))
|
|
|
|
return 0;
|
2021-03-03 01:45:15 +08:00
|
|
|
nested_svm_simple_vmexit(svm, SVM_EXIT_NMI);
|
2020-04-15 04:11:06 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-23 20:13:10 +08:00
|
|
|
if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
|
2020-03-24 17:41:52 +08:00
|
|
|
if (block_nested_events)
|
|
|
|
return -EBUSY;
|
2020-04-23 20:13:10 +08:00
|
|
|
if (!nested_exit_on_intr(svm))
|
|
|
|
return 0;
|
2021-03-03 01:45:15 +08:00
|
|
|
trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
|
|
|
|
nested_svm_simple_vmexit(svm, SVM_EXIT_INTR);
|
2020-03-24 17:41:52 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int nested_svm_exit_special(struct vcpu_svm *svm)
|
|
|
|
{
|
|
|
|
u32 exit_code = svm->vmcb->control.exit_code;
|
2022-11-01 22:54:06 +08:00
|
|
|
struct kvm_vcpu *vcpu = &svm->vcpu;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
|
|
|
switch (exit_code) {
|
|
|
|
case SVM_EXIT_INTR:
|
|
|
|
case SVM_EXIT_NMI:
|
|
|
|
case SVM_EXIT_NPF:
|
2020-05-16 20:42:28 +08:00
|
|
|
return NESTED_EXIT_HOST;
|
|
|
|
case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
|
|
|
|
u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
|
|
|
|
|
2021-01-13 20:07:52 +08:00
|
|
|
if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] &
|
|
|
|
excp_bits)
|
2020-03-24 17:41:52 +08:00
|
|
|
return NESTED_EXIT_HOST;
|
2020-05-16 20:42:28 +08:00
|
|
|
else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
|
2020-05-25 22:41:17 +08:00
|
|
|
svm->vcpu.arch.apf.host_apf_flags)
|
2020-05-16 20:42:28 +08:00
|
|
|
/* Trap async PF even if not shadowing */
|
2020-03-24 17:41:52 +08:00
|
|
|
return NESTED_EXIT_HOST;
|
|
|
|
break;
|
2020-05-16 20:42:28 +08:00
|
|
|
}
|
2022-11-01 22:54:06 +08:00
|
|
|
case SVM_EXIT_VMMCALL:
|
|
|
|
/* Hyper-V L2 TLB flush hypercall is handled by L0 */
|
|
|
|
if (guest_hv_cpuid_has_l2_tlb_flush(vcpu) &&
|
|
|
|
nested_svm_l2_tlb_flush_enabled(vcpu) &&
|
|
|
|
kvm_hv_is_tlb_flush_hcall(vcpu))
|
|
|
|
return NESTED_EXIT_HOST;
|
|
|
|
break;
|
2020-03-24 17:41:52 +08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NESTED_EXIT_CONTINUE;
|
|
|
|
}
|
2020-04-17 22:24:18 +08:00
|
|
|
|
2021-09-14 23:48:24 +08:00
|
|
|
void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
|
|
|
|
|
|
|
vcpu->arch.tsc_scaling_ratio =
|
|
|
|
kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio,
|
|
|
|
svm->tsc_ratio_msr);
|
2022-06-07 02:11:49 +08:00
|
|
|
__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
|
2021-09-14 23:48:24 +08:00
|
|
|
}
|
|
|
|
|
2021-11-03 22:05:26 +08:00
|
|
|
/* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */
|
|
|
|
static void nested_copy_vmcb_cache_to_control(struct vmcb_control_area *dst,
|
|
|
|
struct vmcb_ctrl_area_cached *from)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
memset(dst, 0, sizeof(struct vmcb_control_area));
|
|
|
|
|
|
|
|
for (i = 0; i < MAX_INTERCEPT; i++)
|
|
|
|
dst->intercepts[i] = from->intercepts[i];
|
|
|
|
|
|
|
|
dst->iopm_base_pa = from->iopm_base_pa;
|
|
|
|
dst->msrpm_base_pa = from->msrpm_base_pa;
|
|
|
|
dst->tsc_offset = from->tsc_offset;
|
|
|
|
dst->asid = from->asid;
|
|
|
|
dst->tlb_ctl = from->tlb_ctl;
|
|
|
|
dst->int_ctl = from->int_ctl;
|
|
|
|
dst->int_vector = from->int_vector;
|
|
|
|
dst->int_state = from->int_state;
|
|
|
|
dst->exit_code = from->exit_code;
|
|
|
|
dst->exit_code_hi = from->exit_code_hi;
|
|
|
|
dst->exit_info_1 = from->exit_info_1;
|
|
|
|
dst->exit_info_2 = from->exit_info_2;
|
|
|
|
dst->exit_int_info = from->exit_int_info;
|
|
|
|
dst->exit_int_info_err = from->exit_int_info_err;
|
|
|
|
dst->nested_ctl = from->nested_ctl;
|
|
|
|
dst->event_inj = from->event_inj;
|
|
|
|
dst->event_inj_err = from->event_inj_err;
|
2022-05-02 06:07:25 +08:00
|
|
|
dst->next_rip = from->next_rip;
|
2021-11-03 22:05:26 +08:00
|
|
|
dst->nested_cr3 = from->nested_cr3;
|
|
|
|
dst->virt_ext = from->virt_ext;
|
|
|
|
dst->pause_filter_count = from->pause_filter_count;
|
|
|
|
dst->pause_filter_thresh = from->pause_filter_thresh;
|
2022-11-01 22:53:41 +08:00
|
|
|
/* 'clean' and 'hv_enlightenments' are not changed by KVM */
|
2021-11-03 22:05:26 +08:00
|
|
|
}
|
|
|
|
|
2020-05-14 01:36:32 +08:00
|
|
|
static int svm_get_nested_state(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_nested_state __user *user_kvm_nested_state,
|
|
|
|
u32 user_data_size)
|
|
|
|
{
|
|
|
|
struct vcpu_svm *svm;
|
2021-11-03 22:05:26 +08:00
|
|
|
struct vmcb_control_area *ctl;
|
|
|
|
unsigned long r;
|
2020-05-14 01:36:32 +08:00
|
|
|
struct kvm_nested_state kvm_state = {
|
|
|
|
.flags = 0,
|
|
|
|
.format = KVM_STATE_NESTED_FORMAT_SVM,
|
|
|
|
.size = sizeof(kvm_state),
|
|
|
|
};
|
|
|
|
struct vmcb __user *user_vmcb = (struct vmcb __user *)
|
|
|
|
&user_kvm_nested_state->data.svm[0];
|
|
|
|
|
|
|
|
if (!vcpu)
|
|
|
|
return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
|
|
|
|
|
|
|
|
svm = to_svm(vcpu);
|
|
|
|
|
|
|
|
if (user_data_size < kvm_state.size)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* First fill in the header and copy it out. */
|
|
|
|
if (is_guest_mode(vcpu)) {
|
2020-08-28 01:11:39 +08:00
|
|
|
kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
|
2020-05-14 01:36:32 +08:00
|
|
|
kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
|
|
|
|
kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
|
|
|
|
|
|
|
|
if (svm->nested.nested_run_pending)
|
|
|
|
kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (gif_set(svm))
|
|
|
|
kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
|
|
|
|
|
|
|
|
if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (!is_guest_mode(vcpu))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy over the full size of the VMCB rather than just the size
|
|
|
|
* of the structs.
|
|
|
|
*/
|
|
|
|
if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
|
|
|
|
return -EFAULT;
|
2021-11-03 22:05:26 +08:00
|
|
|
|
|
|
|
ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
|
|
|
|
if (!ctl)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
nested_copy_vmcb_cache_to_control(ctl, &svm->nested.ctl);
|
|
|
|
r = copy_to_user(&user_vmcb->control, ctl,
|
|
|
|
sizeof(user_vmcb->control));
|
|
|
|
kfree(ctl);
|
|
|
|
if (r)
|
2020-05-14 01:36:32 +08:00
|
|
|
return -EFAULT;
|
2021-11-03 22:05:26 +08:00
|
|
|
|
2021-01-13 20:07:52 +08:00
|
|
|
if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
|
2020-05-14 01:36:32 +08:00
|
|
|
sizeof(user_vmcb->save)))
|
|
|
|
return -EFAULT;
|
|
|
|
out:
|
|
|
|
return kvm_state.size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int svm_set_nested_state(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_nested_state __user *user_kvm_nested_state,
|
|
|
|
struct kvm_nested_state *kvm_state)
|
|
|
|
{
|
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
|
|
|
struct vmcb __user *user_vmcb = (struct vmcb __user *)
|
|
|
|
&user_kvm_nested_state->data.svm[0];
|
2020-09-07 21:15:02 +08:00
|
|
|
struct vmcb_control_area *ctl;
|
|
|
|
struct vmcb_save_area *save;
|
2021-11-03 22:05:24 +08:00
|
|
|
struct vmcb_save_area_cached save_cached;
|
2021-11-03 22:05:26 +08:00
|
|
|
struct vmcb_ctrl_area_cached ctl_cached;
|
2021-06-23 01:56:59 +08:00
|
|
|
unsigned long cr0;
|
2020-09-07 21:15:02 +08:00
|
|
|
int ret;
|
2020-05-14 01:36:32 +08:00
|
|
|
|
2020-09-07 21:15:02 +08:00
|
|
|
BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
|
|
|
|
KVM_STATE_NESTED_SVM_VMCB_SIZE);
|
|
|
|
|
2020-05-14 01:36:32 +08:00
|
|
|
if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
|
|
|
|
KVM_STATE_NESTED_RUN_PENDING |
|
|
|
|
KVM_STATE_NESTED_GIF_SET))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
|
|
|
|
* EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
|
|
|
|
*/
|
|
|
|
if (!(vcpu->arch.efer & EFER_SVME)) {
|
|
|
|
/* GIF=1 and no guest mode are required if SVME=0. */
|
|
|
|
if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* SMM temporarily disables SVM, so we cannot be in guest mode. */
|
|
|
|
if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
|
KVM: x86: Forcibly leave nested virt when SMM state is toggled
Forcibly leave nested virtualization operation if userspace toggles SMM
state via KVM_SET_VCPU_EVENTS or KVM_SYNC_X86_EVENTS. If userspace
forces the vCPU out of SMM while it's post-VMXON and then injects an SMI,
vmx_enter_smm() will overwrite vmx->nested.smm.vmxon and end up with both
vmxon=false and smm.vmxon=false, but all other nVMX state allocated.
Don't attempt to gracefully handle the transition as (a) most transitions
are nonsencial, e.g. forcing SMM while L2 is running, (b) there isn't
sufficient information to handle all transitions, e.g. SVM wants access
to the SMRAM save state, and (c) KVM_SET_VCPU_EVENTS must precede
KVM_SET_NESTED_STATE during state restore as the latter disallows putting
the vCPU into L2 if SMM is active, and disallows tagging the vCPU as
being post-VMXON in SMM if SMM is not active.
Abuse of KVM_SET_VCPU_EVENTS manifests as a WARN and memory leak in nVMX
due to failure to free vmcs01's shadow VMCS, but the bug goes far beyond
just a memory leak, e.g. toggling SMM on while L2 is active puts the vCPU
in an architecturally impossible state.
WARNING: CPU: 0 PID: 3606 at free_loaded_vmcs arch/x86/kvm/vmx/vmx.c:2665 [inline]
WARNING: CPU: 0 PID: 3606 at free_loaded_vmcs+0x158/0x1a0 arch/x86/kvm/vmx/vmx.c:2656
Modules linked in:
CPU: 1 PID: 3606 Comm: syz-executor725 Not tainted 5.17.0-rc1-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
RIP: 0010:free_loaded_vmcs arch/x86/kvm/vmx/vmx.c:2665 [inline]
RIP: 0010:free_loaded_vmcs+0x158/0x1a0 arch/x86/kvm/vmx/vmx.c:2656
Code: <0f> 0b eb b3 e8 8f 4d 9f 00 e9 f7 fe ff ff 48 89 df e8 92 4d 9f 00
Call Trace:
<TASK>
kvm_arch_vcpu_destroy+0x72/0x2f0 arch/x86/kvm/x86.c:11123
kvm_vcpu_destroy arch/x86/kvm/../../../virt/kvm/kvm_main.c:441 [inline]
kvm_destroy_vcpus+0x11f/0x290 arch/x86/kvm/../../../virt/kvm/kvm_main.c:460
kvm_free_vcpus arch/x86/kvm/x86.c:11564 [inline]
kvm_arch_destroy_vm+0x2e8/0x470 arch/x86/kvm/x86.c:11676
kvm_destroy_vm arch/x86/kvm/../../../virt/kvm/kvm_main.c:1217 [inline]
kvm_put_kvm+0x4fa/0xb00 arch/x86/kvm/../../../virt/kvm/kvm_main.c:1250
kvm_vm_release+0x3f/0x50 arch/x86/kvm/../../../virt/kvm/kvm_main.c:1273
__fput+0x286/0x9f0 fs/file_table.c:311
task_work_run+0xdd/0x1a0 kernel/task_work.c:164
exit_task_work include/linux/task_work.h:32 [inline]
do_exit+0xb29/0x2a30 kernel/exit.c:806
do_group_exit+0xd2/0x2f0 kernel/exit.c:935
get_signal+0x4b0/0x28c0 kernel/signal.c:2862
arch_do_signal_or_restart+0x2a9/0x1c40 arch/x86/kernel/signal.c:868
handle_signal_work kernel/entry/common.c:148 [inline]
exit_to_user_mode_loop kernel/entry/common.c:172 [inline]
exit_to_user_mode_prepare+0x17d/0x290 kernel/entry/common.c:207
__syscall_exit_to_user_mode_work kernel/entry/common.c:289 [inline]
syscall_exit_to_user_mode+0x19/0x60 kernel/entry/common.c:300
do_syscall_64+0x42/0xb0 arch/x86/entry/common.c:86
entry_SYSCALL_64_after_hwframe+0x44/0xae
</TASK>
Cc: stable@vger.kernel.org
Reported-by: syzbot+8112db3ab20e70d50c31@syzkaller.appspotmail.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220125220358.2091737-1-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-01-26 06:03:58 +08:00
|
|
|
svm_leave_nested(vcpu);
|
2020-09-14 21:37:25 +08:00
|
|
|
svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
|
|
|
|
return 0;
|
2020-05-14 01:36:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
|
|
|
|
return -EINVAL;
|
|
|
|
if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-09-07 21:15:02 +08:00
|
|
|
ret = -ENOMEM;
|
2021-03-31 10:30:25 +08:00
|
|
|
ctl = kzalloc(sizeof(*ctl), GFP_KERNEL_ACCOUNT);
|
|
|
|
save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT);
|
2020-09-07 21:15:02 +08:00
|
|
|
if (!ctl || !save)
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
ret = -EFAULT;
|
|
|
|
if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
|
|
|
|
goto out_free;
|
|
|
|
if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
ret = -EINVAL;
|
2022-02-02 17:51:00 +08:00
|
|
|
__nested_copy_vmcb_control_to_cache(vcpu, &ctl_cached, ctl);
|
2021-11-03 22:05:26 +08:00
|
|
|
if (!__nested_vmcb_check_controls(vcpu, &ctl_cached))
|
2020-09-07 21:15:02 +08:00
|
|
|
goto out_free;
|
2020-05-14 01:36:32 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Processor state contains L2 state. Check that it is
|
2021-03-31 19:35:52 +08:00
|
|
|
* valid for guest mode (see nested_vmcb_check_save).
|
2020-05-14 01:36:32 +08:00
|
|
|
*/
|
|
|
|
cr0 = kvm_read_cr0(vcpu);
|
|
|
|
if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
|
2020-09-07 21:15:02 +08:00
|
|
|
goto out_free;
|
2020-05-14 01:36:32 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Validate host state saved from before VMRUN (see
|
|
|
|
* nested_svm_check_permissions).
|
|
|
|
*/
|
2021-11-03 22:05:24 +08:00
|
|
|
__nested_copy_vmcb_save_to_cache(&save_cached, save);
|
2020-10-07 03:06:52 +08:00
|
|
|
if (!(save->cr0 & X86_CR0_PG) ||
|
|
|
|
!(save->cr0 & X86_CR0_PE) ||
|
|
|
|
(save->rflags & X86_EFLAGS_VM) ||
|
2021-11-03 22:05:24 +08:00
|
|
|
!__nested_vmcb_check_save(vcpu, &save_cached))
|
2020-09-07 21:15:02 +08:00
|
|
|
goto out_free;
|
2020-05-14 01:36:32 +08:00
|
|
|
|
2021-06-07 17:01:59 +08:00
|
|
|
|
2020-05-14 01:36:32 +08:00
|
|
|
/*
|
2021-01-13 20:07:52 +08:00
|
|
|
* All checks done, we can enter guest mode. Userspace provides
|
|
|
|
* vmcb12.control, which will be combined with L1 and stored into
|
|
|
|
* vmcb02, and the L1 save state which we store in vmcb01.
|
|
|
|
* L2 registers if needed are moved from the current VMCB to VMCB02.
|
2020-05-14 01:36:32 +08:00
|
|
|
*/
|
2021-01-07 17:38:52 +08:00
|
|
|
|
2021-05-03 20:54:44 +08:00
|
|
|
if (is_guest_mode(vcpu))
|
KVM: x86: Forcibly leave nested virt when SMM state is toggled
Forcibly leave nested virtualization operation if userspace toggles SMM
state via KVM_SET_VCPU_EVENTS or KVM_SYNC_X86_EVENTS. If userspace
forces the vCPU out of SMM while it's post-VMXON and then injects an SMI,
vmx_enter_smm() will overwrite vmx->nested.smm.vmxon and end up with both
vmxon=false and smm.vmxon=false, but all other nVMX state allocated.
Don't attempt to gracefully handle the transition as (a) most transitions
are nonsencial, e.g. forcing SMM while L2 is running, (b) there isn't
sufficient information to handle all transitions, e.g. SVM wants access
to the SMRAM save state, and (c) KVM_SET_VCPU_EVENTS must precede
KVM_SET_NESTED_STATE during state restore as the latter disallows putting
the vCPU into L2 if SMM is active, and disallows tagging the vCPU as
being post-VMXON in SMM if SMM is not active.
Abuse of KVM_SET_VCPU_EVENTS manifests as a WARN and memory leak in nVMX
due to failure to free vmcs01's shadow VMCS, but the bug goes far beyond
just a memory leak, e.g. toggling SMM on while L2 is active puts the vCPU
in an architecturally impossible state.
WARNING: CPU: 0 PID: 3606 at free_loaded_vmcs arch/x86/kvm/vmx/vmx.c:2665 [inline]
WARNING: CPU: 0 PID: 3606 at free_loaded_vmcs+0x158/0x1a0 arch/x86/kvm/vmx/vmx.c:2656
Modules linked in:
CPU: 1 PID: 3606 Comm: syz-executor725 Not tainted 5.17.0-rc1-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
RIP: 0010:free_loaded_vmcs arch/x86/kvm/vmx/vmx.c:2665 [inline]
RIP: 0010:free_loaded_vmcs+0x158/0x1a0 arch/x86/kvm/vmx/vmx.c:2656
Code: <0f> 0b eb b3 e8 8f 4d 9f 00 e9 f7 fe ff ff 48 89 df e8 92 4d 9f 00
Call Trace:
<TASK>
kvm_arch_vcpu_destroy+0x72/0x2f0 arch/x86/kvm/x86.c:11123
kvm_vcpu_destroy arch/x86/kvm/../../../virt/kvm/kvm_main.c:441 [inline]
kvm_destroy_vcpus+0x11f/0x290 arch/x86/kvm/../../../virt/kvm/kvm_main.c:460
kvm_free_vcpus arch/x86/kvm/x86.c:11564 [inline]
kvm_arch_destroy_vm+0x2e8/0x470 arch/x86/kvm/x86.c:11676
kvm_destroy_vm arch/x86/kvm/../../../virt/kvm/kvm_main.c:1217 [inline]
kvm_put_kvm+0x4fa/0xb00 arch/x86/kvm/../../../virt/kvm/kvm_main.c:1250
kvm_vm_release+0x3f/0x50 arch/x86/kvm/../../../virt/kvm/kvm_main.c:1273
__fput+0x286/0x9f0 fs/file_table.c:311
task_work_run+0xdd/0x1a0 kernel/task_work.c:164
exit_task_work include/linux/task_work.h:32 [inline]
do_exit+0xb29/0x2a30 kernel/exit.c:806
do_group_exit+0xd2/0x2f0 kernel/exit.c:935
get_signal+0x4b0/0x28c0 kernel/signal.c:2862
arch_do_signal_or_restart+0x2a9/0x1c40 arch/x86/kernel/signal.c:868
handle_signal_work kernel/entry/common.c:148 [inline]
exit_to_user_mode_loop kernel/entry/common.c:172 [inline]
exit_to_user_mode_prepare+0x17d/0x290 kernel/entry/common.c:207
__syscall_exit_to_user_mode_work kernel/entry/common.c:289 [inline]
syscall_exit_to_user_mode+0x19/0x60 kernel/entry/common.c:300
do_syscall_64+0x42/0xb0 arch/x86/entry/common.c:86
entry_SYSCALL_64_after_hwframe+0x44/0xae
</TASK>
Cc: stable@vger.kernel.org
Reported-by: syzbot+8112db3ab20e70d50c31@syzkaller.appspotmail.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220125220358.2091737-1-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-01-26 06:03:58 +08:00
|
|
|
svm_leave_nested(vcpu);
|
2021-05-03 20:54:44 +08:00
|
|
|
else
|
|
|
|
svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
|
|
|
|
|
2021-05-04 22:39:35 +08:00
|
|
|
svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
|
|
|
|
|
2021-01-07 17:38:52 +08:00
|
|
|
svm->nested.nested_run_pending =
|
|
|
|
!!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
|
|
|
|
|
2020-08-28 01:11:39 +08:00
|
|
|
svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
|
2020-11-17 15:51:35 +08:00
|
|
|
|
2021-07-19 17:03:22 +08:00
|
|
|
svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
|
2021-11-03 22:05:23 +08:00
|
|
|
nested_copy_vmcb_control_to_cache(svm, ctl);
|
2021-01-13 20:07:52 +08:00
|
|
|
|
|
|
|
svm_switch_vmcb(svm, &svm->nested.vmcb02);
|
2022-07-18 23:47:13 +08:00
|
|
|
nested_vmcb02_prepare_control(svm, svm->vmcb->save.rip, svm->vmcb->save.cs.base);
|
2022-02-07 23:54:19 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* While the nested guest CR3 is already checked and set by
|
|
|
|
* KVM_SET_SREGS, it was set when nested state was yet loaded,
|
|
|
|
* thus MMU might not be initialized correctly.
|
|
|
|
* Set it again to fix this.
|
|
|
|
*/
|
|
|
|
|
|
|
|
ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
|
|
|
|
nested_npt_enabled(svm), false);
|
|
|
|
if (WARN_ON_ONCE(ret))
|
|
|
|
goto out_free;
|
|
|
|
|
2022-02-02 17:50:57 +08:00
|
|
|
svm->nested.force_msr_bitmap_recalc = true;
|
2022-02-07 23:54:19 +08:00
|
|
|
|
2020-09-22 19:43:14 +08:00
|
|
|
kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
|
2020-09-07 21:15:02 +08:00
|
|
|
ret = 0;
|
|
|
|
out_free:
|
|
|
|
kfree(save);
|
|
|
|
kfree(ctl);
|
|
|
|
|
|
|
|
return ret;
|
2020-05-14 01:36:32 +08:00
|
|
|
}
|
|
|
|
|
2021-04-01 22:18:10 +08:00
|
|
|
static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
|
|
|
|
|
|
|
if (WARN_ON(!is_guest_mode(vcpu)))
|
|
|
|
return true;
|
|
|
|
|
2021-06-07 17:02:03 +08:00
|
|
|
if (!vcpu->arch.pdptrs_from_userspace &&
|
|
|
|
!nested_npt_enabled(svm) && is_pae_paging(vcpu))
|
2021-06-07 17:01:59 +08:00
|
|
|
/*
|
|
|
|
* Reload the guest's PDPTRs since after a migration
|
|
|
|
* the guest CR3 might be restored prior to setting the nested
|
|
|
|
* state which can lead to a load of wrong PDPTRs.
|
|
|
|
*/
|
2021-11-24 20:20:52 +08:00
|
|
|
if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3)))
|
2021-06-07 17:01:59 +08:00
|
|
|
return false;
|
2021-04-01 22:18:10 +08:00
|
|
|
|
|
|
|
if (!nested_svm_vmrun_msrpm(svm)) {
|
|
|
|
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
|
|
|
vcpu->run->internal.suberror =
|
|
|
|
KVM_INTERNAL_ERROR_EMULATION;
|
|
|
|
vcpu->run->internal.ndata = 0;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-11-01 22:54:06 +08:00
|
|
|
if (kvm_hv_verify_vp_assist(vcpu))
|
|
|
|
return false;
|
|
|
|
|
2021-04-01 22:18:10 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-04-17 22:24:18 +08:00
|
|
|
struct kvm_x86_nested_ops svm_nested_ops = {
|
KVM: x86: Forcibly leave nested virt when SMM state is toggled
Forcibly leave nested virtualization operation if userspace toggles SMM
state via KVM_SET_VCPU_EVENTS or KVM_SYNC_X86_EVENTS. If userspace
forces the vCPU out of SMM while it's post-VMXON and then injects an SMI,
vmx_enter_smm() will overwrite vmx->nested.smm.vmxon and end up with both
vmxon=false and smm.vmxon=false, but all other nVMX state allocated.
Don't attempt to gracefully handle the transition as (a) most transitions
are nonsencial, e.g. forcing SMM while L2 is running, (b) there isn't
sufficient information to handle all transitions, e.g. SVM wants access
to the SMRAM save state, and (c) KVM_SET_VCPU_EVENTS must precede
KVM_SET_NESTED_STATE during state restore as the latter disallows putting
the vCPU into L2 if SMM is active, and disallows tagging the vCPU as
being post-VMXON in SMM if SMM is not active.
Abuse of KVM_SET_VCPU_EVENTS manifests as a WARN and memory leak in nVMX
due to failure to free vmcs01's shadow VMCS, but the bug goes far beyond
just a memory leak, e.g. toggling SMM on while L2 is active puts the vCPU
in an architecturally impossible state.
WARNING: CPU: 0 PID: 3606 at free_loaded_vmcs arch/x86/kvm/vmx/vmx.c:2665 [inline]
WARNING: CPU: 0 PID: 3606 at free_loaded_vmcs+0x158/0x1a0 arch/x86/kvm/vmx/vmx.c:2656
Modules linked in:
CPU: 1 PID: 3606 Comm: syz-executor725 Not tainted 5.17.0-rc1-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
RIP: 0010:free_loaded_vmcs arch/x86/kvm/vmx/vmx.c:2665 [inline]
RIP: 0010:free_loaded_vmcs+0x158/0x1a0 arch/x86/kvm/vmx/vmx.c:2656
Code: <0f> 0b eb b3 e8 8f 4d 9f 00 e9 f7 fe ff ff 48 89 df e8 92 4d 9f 00
Call Trace:
<TASK>
kvm_arch_vcpu_destroy+0x72/0x2f0 arch/x86/kvm/x86.c:11123
kvm_vcpu_destroy arch/x86/kvm/../../../virt/kvm/kvm_main.c:441 [inline]
kvm_destroy_vcpus+0x11f/0x290 arch/x86/kvm/../../../virt/kvm/kvm_main.c:460
kvm_free_vcpus arch/x86/kvm/x86.c:11564 [inline]
kvm_arch_destroy_vm+0x2e8/0x470 arch/x86/kvm/x86.c:11676
kvm_destroy_vm arch/x86/kvm/../../../virt/kvm/kvm_main.c:1217 [inline]
kvm_put_kvm+0x4fa/0xb00 arch/x86/kvm/../../../virt/kvm/kvm_main.c:1250
kvm_vm_release+0x3f/0x50 arch/x86/kvm/../../../virt/kvm/kvm_main.c:1273
__fput+0x286/0x9f0 fs/file_table.c:311
task_work_run+0xdd/0x1a0 kernel/task_work.c:164
exit_task_work include/linux/task_work.h:32 [inline]
do_exit+0xb29/0x2a30 kernel/exit.c:806
do_group_exit+0xd2/0x2f0 kernel/exit.c:935
get_signal+0x4b0/0x28c0 kernel/signal.c:2862
arch_do_signal_or_restart+0x2a9/0x1c40 arch/x86/kernel/signal.c:868
handle_signal_work kernel/entry/common.c:148 [inline]
exit_to_user_mode_loop kernel/entry/common.c:172 [inline]
exit_to_user_mode_prepare+0x17d/0x290 kernel/entry/common.c:207
__syscall_exit_to_user_mode_work kernel/entry/common.c:289 [inline]
syscall_exit_to_user_mode+0x19/0x60 kernel/entry/common.c:300
do_syscall_64+0x42/0xb0 arch/x86/entry/common.c:86
entry_SYSCALL_64_after_hwframe+0x44/0xae
</TASK>
Cc: stable@vger.kernel.org
Reported-by: syzbot+8112db3ab20e70d50c31@syzkaller.appspotmail.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220125220358.2091737-1-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-01-26 06:03:58 +08:00
|
|
|
.leave_nested = svm_leave_nested,
|
KVM: x86: Morph pending exceptions to pending VM-Exits at queue time
Morph pending exceptions to pending VM-Exits (due to interception) when
the exception is queued instead of waiting until nested events are
checked at VM-Entry. This fixes a longstanding bug where KVM fails to
handle an exception that occurs during delivery of a previous exception,
KVM (L0) and L1 both want to intercept the exception (e.g. #PF for shadow
paging), and KVM determines that the exception is in the guest's domain,
i.e. queues the new exception for L2. Deferring the interception check
causes KVM to esclate various combinations of injected+pending exceptions
to double fault (#DF) without consulting L1's interception desires, and
ends up injecting a spurious #DF into L2.
KVM has fudged around the issue for #PF by special casing emulated #PF
injection for shadow paging, but the underlying issue is not unique to
shadow paging in L0, e.g. if KVM is intercepting #PF because the guest
has a smaller maxphyaddr and L1 (but not L0) is using shadow paging.
Other exceptions are affected as well, e.g. if KVM is intercepting #GP
for one of SVM's workaround or for the VMware backdoor emulation stuff.
The other cases have gone unnoticed because the #DF is spurious if and
only if L1 resolves the exception, e.g. KVM's goofs go unnoticed if L1
would have injected #DF anyways.
The hack-a-fix has also led to ugly code, e.g. bailing from the emulator
if #PF injection forced a nested VM-Exit and the emulator finds itself
back in L1. Allowing for direct-to-VM-Exit queueing also neatly solves
the async #PF in L2 mess; no need to set a magic flag and token, simply
queue a #PF nested VM-Exit.
Deal with event migration by flagging that a pending exception was queued
by userspace and check for interception at the next KVM_RUN, e.g. so that
KVM does the right thing regardless of the order in which userspace
restores nested state vs. event state.
When "getting" events from userspace, simply drop any pending excpetion
that is destined to be intercepted if there is also an injected exception
to be migrated. Ideally, KVM would migrate both events, but that would
require new ABI, and practically speaking losing the event is unlikely to
be noticed, let alone fatal. The injected exception is captured, RIP
still points at the original faulting instruction, etc... So either the
injection on the target will trigger the same intercepted exception, or
the source of the intercepted exception was transient and/or
non-deterministic, thus dropping it is ok-ish.
Fixes: a04aead144fd ("KVM: nSVM: fix running nested guests when npt=0")
Fixes: feaf0c7dc473 ("KVM: nVMX: Do not generate #DF if #PF happens during exception delivery into L2")
Cc: Jim Mattson <jmattson@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Link: https://lore.kernel.org/r/20220830231614.3580124-22-seanjc@google.com
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-08-31 07:16:08 +08:00
|
|
|
.is_exception_vmexit = nested_svm_is_exception_vmexit,
|
2020-04-17 22:24:18 +08:00
|
|
|
.check_events = svm_check_nested_events,
|
2021-03-03 01:45:14 +08:00
|
|
|
.triple_fault = nested_svm_triple_fault,
|
2020-09-22 19:43:14 +08:00
|
|
|
.get_nested_state_pages = svm_get_nested_state_pages,
|
2020-05-14 01:36:32 +08:00
|
|
|
.get_state = svm_get_nested_state,
|
|
|
|
.set_state = svm_set_nested_state,
|
2022-11-01 22:53:59 +08:00
|
|
|
.hv_inject_synthetic_vmexit_post_tlb_flush = svm_hv_inject_synthetic_vmexit_post_tlb_flush,
|
2020-04-17 22:24:18 +08:00
|
|
|
};
|