2020-03-24 17:41:52 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
/*
|
|
|
|
* Kernel-based Virtual Machine driver for Linux
|
|
|
|
*
|
|
|
|
* AMD SVM support
|
|
|
|
*
|
|
|
|
* Copyright (C) 2006 Qumranet, Inc.
|
|
|
|
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Yaniv Kamay <yaniv@qumranet.com>
|
|
|
|
* Avi Kivity <avi@qumranet.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define pr_fmt(fmt) "SVM: " fmt
|
|
|
|
|
|
|
|
#include <linux/kvm_types.h>
|
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
|
|
|
|
#include <asm/msr-index.h>
|
2020-05-04 23:28:25 +08:00
|
|
|
#include <asm/debugreg.h>
|
2020-03-24 17:41:52 +08:00
|
|
|
|
|
|
|
#include "kvm_emulate.h"
|
|
|
|
#include "trace.h"
|
|
|
|
#include "mmu.h"
|
|
|
|
#include "x86.h"
|
2020-05-14 01:36:32 +08:00
|
|
|
#include "cpuid.h"
|
2020-05-16 20:50:35 +08:00
|
|
|
#include "lapic.h"
|
2020-03-24 17:41:52 +08:00
|
|
|
#include "svm.h"
|
|
|
|
|
2021-02-04 08:01:17 +08:00
|
|
|
#define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
|
|
|
|
|
2020-03-24 17:41:52 +08:00
|
|
|
static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
|
|
|
|
struct x86_exception *fault)
|
|
|
|
{
|
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
|
|
|
|
|
|
|
if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
|
|
|
|
/*
|
|
|
|
* TODO: track the cause of the nested page fault, and
|
|
|
|
* correctly fill in the high bits of exit_info_1.
|
|
|
|
*/
|
|
|
|
svm->vmcb->control.exit_code = SVM_EXIT_NPF;
|
|
|
|
svm->vmcb->control.exit_code_hi = 0;
|
|
|
|
svm->vmcb->control.exit_info_1 = (1ULL << 32);
|
|
|
|
svm->vmcb->control.exit_info_2 = fault->address;
|
|
|
|
}
|
|
|
|
|
|
|
|
svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
|
|
|
|
svm->vmcb->control.exit_info_1 |= fault->error_code;
|
|
|
|
|
|
|
|
nested_svm_vmexit(svm);
|
|
|
|
}
|
|
|
|
|
2021-02-18 20:16:59 +08:00
|
|
|
static void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception *fault)
|
|
|
|
{
|
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
|
|
|
WARN_ON(!is_guest_mode(vcpu));
|
|
|
|
|
|
|
|
if (vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) &&
|
|
|
|
!svm->nested.nested_run_pending) {
|
|
|
|
svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
|
|
|
|
svm->vmcb->control.exit_code_hi = 0;
|
|
|
|
svm->vmcb->control.exit_info_1 = fault->error_code;
|
|
|
|
svm->vmcb->control.exit_info_2 = fault->address;
|
|
|
|
nested_svm_vmexit(svm);
|
|
|
|
} else {
|
|
|
|
kvm_inject_page_fault(vcpu, fault);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-24 17:41:52 +08:00
|
|
|
static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
|
|
|
|
{
|
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
2020-05-14 01:16:12 +08:00
|
|
|
u64 cr3 = svm->nested.ctl.nested_cr3;
|
2020-03-24 17:41:52 +08:00
|
|
|
u64 pdpte;
|
|
|
|
int ret;
|
|
|
|
|
KVM: nSVM: Don't strip host's C-bit from guest's CR3 when reading PDPTRs
Don't clear the SME C-bit when reading a guest PDPTR, as the GPA (CR3) is
in the guest domain.
Barring a bizarre paravirtual use case, this is likely a benign bug. SME
is not emulated by KVM, loading SEV guest PDPTRs is doomed as KVM can't
use the correct key to read guest memory, and setting guest MAXPHYADDR
higher than the host, i.e. overlapping the C-bit, would cause faults in
the guest.
Note, for SEV guests, stripping the C-bit is technically aligned with CPU
behavior, but for KVM it's the greater of two evils. Because KVM doesn't
have access to the guest's encryption key, ignoring the C-bit would at
best result in KVM reading garbage. By keeping the C-bit, KVM will
fail its read (unless userspace creates a memslot with the C-bit set).
The guest will still undoubtedly die, as KVM will use '0' for the PDPTR
value, but that's preferable to interpreting encrypted data as a PDPTR.
Fixes: d0ec49d4de90 ("kvm/x86/svm: Support Secure Memory Encryption within KVM")
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Brijesh Singh <brijesh.singh@amd.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210204000117.3303214-3-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-02-04 08:01:07 +08:00
|
|
|
ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
|
2020-03-24 17:41:52 +08:00
|
|
|
offset_in_page(cr3) + index * 8, 8);
|
|
|
|
if (ret)
|
|
|
|
return 0;
|
|
|
|
return pdpte;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
|
|
|
|
2020-05-14 01:16:12 +08:00
|
|
|
return svm->nested.ctl.nested_cr3;
|
2020-03-24 17:41:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2020-05-19 18:18:31 +08:00
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
|
|
|
|
2020-03-24 17:41:52 +08:00
|
|
|
WARN_ON(mmu_is_nested(vcpu));
|
|
|
|
|
|
|
|
vcpu->arch.mmu = &vcpu->arch.guest_mmu;
|
2021-01-13 20:07:52 +08:00
|
|
|
kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
|
|
|
|
svm->vmcb01.ptr->save.efer,
|
2020-07-10 22:11:49 +08:00
|
|
|
svm->nested.ctl.nested_cr3);
|
2020-03-24 17:41:52 +08:00
|
|
|
vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
|
|
|
|
vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
|
|
|
|
vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
|
|
|
|
reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
|
|
|
|
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
vcpu->arch.mmu = &vcpu->arch.root_mmu;
|
|
|
|
vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
|
|
|
|
}
|
|
|
|
|
|
|
|
void recalc_intercepts(struct vcpu_svm *svm)
|
|
|
|
{
|
2020-05-14 01:16:12 +08:00
|
|
|
struct vmcb_control_area *c, *h, *g;
|
2020-09-12 03:27:58 +08:00
|
|
|
unsigned int i;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-06-25 16:03:23 +08:00
|
|
|
vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
|
|
|
if (!is_guest_mode(&svm->vcpu))
|
|
|
|
return;
|
|
|
|
|
|
|
|
c = &svm->vmcb->control;
|
2021-01-13 20:07:52 +08:00
|
|
|
h = &svm->vmcb01.ptr->control;
|
2020-05-14 01:16:12 +08:00
|
|
|
g = &svm->nested.ctl;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-09-12 03:27:58 +08:00
|
|
|
for (i = 0; i < MAX_INTERCEPT; i++)
|
|
|
|
c->intercepts[i] = h->intercepts[i];
|
|
|
|
|
2020-05-14 01:28:23 +08:00
|
|
|
if (g->int_ctl & V_INTR_MASKING_MASK) {
|
2020-03-24 17:41:52 +08:00
|
|
|
/* We only want the cr8 intercept bits of L1 */
|
2020-09-12 03:28:05 +08:00
|
|
|
vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
|
|
|
|
vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
|
|
|
|
* affect any interrupt we may want to inject; therefore,
|
|
|
|
* interrupt window vmexits are irrelevant to L0.
|
|
|
|
*/
|
2020-09-12 03:28:28 +08:00
|
|
|
vmcb_clr_intercept(c, INTERCEPT_VINTR);
|
2020-03-24 17:41:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* We don't want to see VMMCALLs from a nested guest */
|
2020-09-12 03:28:28 +08:00
|
|
|
vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-09-12 03:27:58 +08:00
|
|
|
for (i = 0; i < MAX_INTERCEPT; i++)
|
|
|
|
c->intercepts[i] |= g->intercepts[i];
|
2020-03-24 17:41:52 +08:00
|
|
|
}
|
|
|
|
|
2020-05-19 03:21:22 +08:00
|
|
|
static void copy_vmcb_control_area(struct vmcb_control_area *dst,
|
|
|
|
struct vmcb_control_area *from)
|
2020-03-24 17:41:52 +08:00
|
|
|
{
|
2020-09-12 03:27:58 +08:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < MAX_INTERCEPT; i++)
|
|
|
|
dst->intercepts[i] = from->intercepts[i];
|
|
|
|
|
2020-03-24 17:41:52 +08:00
|
|
|
dst->iopm_base_pa = from->iopm_base_pa;
|
|
|
|
dst->msrpm_base_pa = from->msrpm_base_pa;
|
|
|
|
dst->tsc_offset = from->tsc_offset;
|
2020-05-20 20:02:17 +08:00
|
|
|
/* asid not copied, it is handled manually for svm->vmcb. */
|
2020-03-24 17:41:52 +08:00
|
|
|
dst->tlb_ctl = from->tlb_ctl;
|
|
|
|
dst->int_ctl = from->int_ctl;
|
|
|
|
dst->int_vector = from->int_vector;
|
|
|
|
dst->int_state = from->int_state;
|
|
|
|
dst->exit_code = from->exit_code;
|
|
|
|
dst->exit_code_hi = from->exit_code_hi;
|
|
|
|
dst->exit_info_1 = from->exit_info_1;
|
|
|
|
dst->exit_info_2 = from->exit_info_2;
|
|
|
|
dst->exit_int_info = from->exit_int_info;
|
|
|
|
dst->exit_int_info_err = from->exit_int_info_err;
|
|
|
|
dst->nested_ctl = from->nested_ctl;
|
|
|
|
dst->event_inj = from->event_inj;
|
|
|
|
dst->event_inj_err = from->event_inj_err;
|
|
|
|
dst->nested_cr3 = from->nested_cr3;
|
|
|
|
dst->virt_ext = from->virt_ext;
|
|
|
|
dst->pause_filter_count = from->pause_filter_count;
|
|
|
|
dst->pause_filter_thresh = from->pause_filter_thresh;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* This function merges the msr permission bitmaps of kvm and the
|
|
|
|
* nested vmcb. It is optimized in that it only merges the parts where
|
|
|
|
* the kvm msr permission bitmap may contain zero bits
|
|
|
|
*/
|
|
|
|
int i;
|
|
|
|
|
2020-09-12 03:28:28 +08:00
|
|
|
if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
|
2020-03-24 17:41:52 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
for (i = 0; i < MSRPM_OFFSETS; i++) {
|
|
|
|
u32 value, p;
|
|
|
|
u64 offset;
|
|
|
|
|
|
|
|
if (msrpm_offsets[i] == 0xffffffff)
|
|
|
|
break;
|
|
|
|
|
|
|
|
p = msrpm_offsets[i];
|
2020-05-14 01:16:12 +08:00
|
|
|
offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
|
|
|
if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
svm->nested.msrpm[p] = svm->msrpm[p] | value;
|
|
|
|
}
|
|
|
|
|
|
|
|
svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-05-19 01:02:15 +08:00
|
|
|
static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
|
2020-03-24 17:41:52 +08:00
|
|
|
{
|
2021-02-04 08:01:17 +08:00
|
|
|
if (CC(!vmcb_is_intercept(control, INTERCEPT_VMRUN)))
|
2020-03-24 17:41:52 +08:00
|
|
|
return false;
|
|
|
|
|
2021-02-04 08:01:17 +08:00
|
|
|
if (CC(control->asid == 0))
|
2020-04-10 04:50:33 +08:00
|
|
|
return false;
|
|
|
|
|
2021-02-04 08:01:17 +08:00
|
|
|
if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled))
|
2020-03-24 17:41:52 +08:00
|
|
|
return false;
|
|
|
|
|
2020-05-19 01:02:15 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
static bool nested_vmcb_check_cr3_cr4(struct kvm_vcpu *vcpu,
|
2020-10-07 03:06:52 +08:00
|
|
|
struct vmcb_save_area *save)
|
2020-05-19 01:02:15 +08:00
|
|
|
{
|
2020-10-07 03:06:52 +08:00
|
|
|
/*
|
|
|
|
* These checks are also performed by KVM_SET_SREGS,
|
|
|
|
* except that EFER.LMA is not checked by SVM against
|
|
|
|
* CR0.PG && EFER.LME.
|
|
|
|
*/
|
|
|
|
if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
|
2021-02-04 08:01:17 +08:00
|
|
|
if (CC(!(save->cr4 & X86_CR4_PAE)) ||
|
|
|
|
CC(!(save->cr0 & X86_CR0_PE)) ||
|
|
|
|
CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3)))
|
2020-10-07 03:06:52 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-02-04 08:01:17 +08:00
|
|
|
if (CC(!kvm_is_valid_cr4(vcpu, save->cr4)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
2020-10-07 03:06:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Common checks that apply to both L1 and L2 state. */
|
2021-03-03 03:40:39 +08:00
|
|
|
static bool nested_vmcb_valid_sregs(struct kvm_vcpu *vcpu,
|
2020-10-07 03:06:52 +08:00
|
|
|
struct vmcb_save_area *save)
|
|
|
|
{
|
2021-03-31 18:28:01 +08:00
|
|
|
/*
|
|
|
|
* FIXME: these should be done after copying the fields,
|
|
|
|
* to avoid TOC/TOU races. For these save area checks
|
|
|
|
* the possible damage is limited since kvm_set_cr0 and
|
|
|
|
* kvm_set_cr4 handle failure; EFER_SVME is an exception
|
|
|
|
* so it is force-set later in nested_prepare_vmcb_save.
|
|
|
|
*/
|
2021-02-04 08:01:17 +08:00
|
|
|
if (CC(!(save->efer & EFER_SVME)))
|
2020-03-24 17:41:52 +08:00
|
|
|
return false;
|
|
|
|
|
2021-02-04 08:01:17 +08:00
|
|
|
if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) ||
|
|
|
|
CC(save->cr0 & ~0xffffffffULL))
|
2020-03-24 17:41:52 +08:00
|
|
|
return false;
|
|
|
|
|
2021-02-04 08:01:17 +08:00
|
|
|
if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
|
2020-05-23 06:19:52 +08:00
|
|
|
return false;
|
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
if (!nested_vmcb_check_cr3_cr4(vcpu, save))
|
2020-10-07 03:06:52 +08:00
|
|
|
return false;
|
2020-07-08 08:39:56 +08:00
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
if (CC(!kvm_valid_efer(vcpu, save->efer)))
|
2020-10-07 03:06:52 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-11-17 18:15:41 +08:00
|
|
|
static void nested_load_control_from_vmcb12(struct vcpu_svm *svm,
|
|
|
|
struct vmcb_control_area *control)
|
2020-05-14 01:07:26 +08:00
|
|
|
{
|
2020-05-14 01:16:12 +08:00
|
|
|
copy_vmcb_control_area(&svm->nested.ctl, control);
|
2020-05-14 01:07:26 +08:00
|
|
|
|
2020-05-14 01:36:32 +08:00
|
|
|
/* Copy it here because nested_svm_check_controls will check it. */
|
|
|
|
svm->nested.ctl.asid = control->asid;
|
2020-05-14 01:16:12 +08:00
|
|
|
svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL;
|
|
|
|
svm->nested.ctl.iopm_base_pa &= ~0x0fffULL;
|
2020-05-14 01:07:26 +08:00
|
|
|
}
|
|
|
|
|
2020-05-22 15:50:14 +08:00
|
|
|
/*
|
|
|
|
* Synchronize fields that are written by the processor, so that
|
2020-11-17 18:15:41 +08:00
|
|
|
* they can be copied back into the vmcb12.
|
2020-05-22 15:50:14 +08:00
|
|
|
*/
|
2020-11-17 18:15:41 +08:00
|
|
|
void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
|
2020-05-22 15:50:14 +08:00
|
|
|
{
|
|
|
|
u32 mask;
|
|
|
|
svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
|
|
|
|
svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
|
|
|
|
|
|
|
|
/* Only a few fields of int_ctl are written by the processor. */
|
|
|
|
mask = V_IRQ_MASK | V_TPR_MASK;
|
|
|
|
if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
|
2020-06-25 16:03:24 +08:00
|
|
|
svm_is_intercept(svm, INTERCEPT_VINTR)) {
|
2020-05-22 15:50:14 +08:00
|
|
|
/*
|
|
|
|
* In order to request an interrupt window, L0 is usurping
|
|
|
|
* svm->vmcb->control.int_ctl and possibly setting V_IRQ
|
|
|
|
* even if it was clear in L1's VMCB. Restoring it would be
|
|
|
|
* wrong. However, in this case V_IRQ will remain true until
|
|
|
|
* interrupt_window_interception calls svm_clear_vintr and
|
|
|
|
* restores int_ctl. We can just leave it aside.
|
|
|
|
*/
|
|
|
|
mask &= ~V_IRQ_MASK;
|
|
|
|
}
|
|
|
|
svm->nested.ctl.int_ctl &= ~mask;
|
|
|
|
svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
|
|
|
|
}
|
|
|
|
|
2020-05-22 18:04:57 +08:00
|
|
|
/*
|
|
|
|
* Transfer any event that L0 or L1 wanted to inject into L2 to
|
|
|
|
* EXIT_INT_INFO.
|
|
|
|
*/
|
2020-11-17 18:15:41 +08:00
|
|
|
static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
|
|
|
|
struct vmcb *vmcb12)
|
2020-05-22 18:04:57 +08:00
|
|
|
{
|
|
|
|
struct kvm_vcpu *vcpu = &svm->vcpu;
|
|
|
|
u32 exit_int_info = 0;
|
|
|
|
unsigned int nr;
|
|
|
|
|
|
|
|
if (vcpu->arch.exception.injected) {
|
|
|
|
nr = vcpu->arch.exception.nr;
|
|
|
|
exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
|
|
|
|
|
|
|
|
if (vcpu->arch.exception.has_error_code) {
|
|
|
|
exit_int_info |= SVM_EVTINJ_VALID_ERR;
|
2020-08-28 01:11:39 +08:00
|
|
|
vmcb12->control.exit_int_info_err =
|
2020-05-22 18:04:57 +08:00
|
|
|
vcpu->arch.exception.error_code;
|
|
|
|
}
|
|
|
|
|
|
|
|
} else if (vcpu->arch.nmi_injected) {
|
|
|
|
exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
|
|
|
|
|
|
|
|
} else if (vcpu->arch.interrupt.injected) {
|
|
|
|
nr = vcpu->arch.interrupt.nr;
|
|
|
|
exit_int_info = nr | SVM_EVTINJ_VALID;
|
|
|
|
|
|
|
|
if (vcpu->arch.interrupt.soft)
|
|
|
|
exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
|
|
|
|
else
|
|
|
|
exit_int_info |= SVM_EVTINJ_TYPE_INTR;
|
|
|
|
}
|
|
|
|
|
2020-08-28 01:11:39 +08:00
|
|
|
vmcb12->control.exit_int_info = exit_int_info;
|
2020-05-22 18:04:57 +08:00
|
|
|
}
|
|
|
|
|
2020-07-10 22:11:53 +08:00
|
|
|
static inline bool nested_npt_enabled(struct vcpu_svm *svm)
|
|
|
|
{
|
|
|
|
return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2020-07-10 22:11:56 +08:00
|
|
|
* Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
|
|
|
|
* if we are emulating VM-Entry into a guest with NPT enabled.
|
2020-07-10 22:11:53 +08:00
|
|
|
*/
|
|
|
|
static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
|
|
|
|
bool nested_npt)
|
|
|
|
{
|
2021-02-04 08:01:17 +08:00
|
|
|
if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3)))
|
2020-07-10 22:11:55 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!nested_npt && is_pae_paging(vcpu) &&
|
|
|
|
(cr3 != kvm_read_cr3(vcpu) || pdptrs_changed(vcpu))) {
|
2021-02-04 08:01:17 +08:00
|
|
|
if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)))
|
2020-07-10 22:11:55 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* TODO: optimize unconditional TLB flush/MMU sync here and in
|
|
|
|
* kvm_init_shadow_npt_mmu().
|
|
|
|
*/
|
|
|
|
if (!nested_npt)
|
|
|
|
kvm_mmu_new_pgd(vcpu, cr3, false, false);
|
|
|
|
|
|
|
|
vcpu->arch.cr3 = cr3;
|
|
|
|
kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
|
|
|
|
|
|
|
|
kvm_init_mmu(vcpu, false);
|
|
|
|
|
|
|
|
return 0;
|
2020-07-10 22:11:53 +08:00
|
|
|
}
|
|
|
|
|
2021-01-13 20:07:52 +08:00
|
|
|
void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
|
|
|
|
{
|
|
|
|
if (!svm->nested.vmcb02.ptr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* FIXME: merge g_pat from vmcb01 and vmcb12. */
|
|
|
|
svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
|
|
|
|
}
|
|
|
|
|
2020-11-17 18:15:41 +08:00
|
|
|
static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
|
2020-03-24 17:41:52 +08:00
|
|
|
{
|
2021-03-02 04:08:44 +08:00
|
|
|
bool new_vmcb12 = false;
|
|
|
|
|
2021-01-13 20:07:52 +08:00
|
|
|
nested_vmcb02_compute_g_pat(svm);
|
|
|
|
|
2020-03-24 17:41:52 +08:00
|
|
|
/* Load the nested guest state */
|
2020-11-16 19:38:19 +08:00
|
|
|
|
2021-03-02 04:08:44 +08:00
|
|
|
if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) {
|
|
|
|
new_vmcb12 = true;
|
|
|
|
svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) {
|
|
|
|
svm->vmcb->save.es = vmcb12->save.es;
|
|
|
|
svm->vmcb->save.cs = vmcb12->save.cs;
|
|
|
|
svm->vmcb->save.ss = vmcb12->save.ss;
|
|
|
|
svm->vmcb->save.ds = vmcb12->save.ds;
|
|
|
|
svm->vmcb->save.cpl = vmcb12->save.cpl;
|
|
|
|
vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) {
|
|
|
|
svm->vmcb->save.gdtr = vmcb12->save.gdtr;
|
|
|
|
svm->vmcb->save.idtr = vmcb12->save.idtr;
|
|
|
|
vmcb_mark_dirty(svm->vmcb, VMCB_DT);
|
|
|
|
}
|
2020-11-16 19:38:19 +08:00
|
|
|
|
2020-11-28 01:46:36 +08:00
|
|
|
kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
|
2021-03-31 18:28:01 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Force-set EFER_SVME even though it is checked earlier on the
|
|
|
|
* VMCB12, because the guest can flip the bit between the check
|
|
|
|
* and now. Clearing EFER_SVME would call svm_free_nested.
|
|
|
|
*/
|
|
|
|
svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);
|
|
|
|
|
2020-08-28 01:11:39 +08:00
|
|
|
svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
|
|
|
|
svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
|
2020-11-16 19:38:19 +08:00
|
|
|
|
|
|
|
svm->vcpu.arch.cr2 = vmcb12->save.cr2;
|
2021-03-02 04:08:44 +08:00
|
|
|
|
2020-08-28 01:11:39 +08:00
|
|
|
kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
|
|
|
|
kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
|
|
|
|
kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
|
|
|
/* In case we don't even reach vcpu_run, the fields are not updated */
|
2020-08-28 01:11:39 +08:00
|
|
|
svm->vmcb->save.rax = vmcb12->save.rax;
|
|
|
|
svm->vmcb->save.rsp = vmcb12->save.rsp;
|
|
|
|
svm->vmcb->save.rip = vmcb12->save.rip;
|
2020-11-16 19:38:19 +08:00
|
|
|
|
2021-03-02 04:08:44 +08:00
|
|
|
/* These bits will be set properly on the first execution when new_vmc12 is true */
|
|
|
|
if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) {
|
|
|
|
svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1;
|
|
|
|
svm->vcpu.arch.dr6 = vmcb12->save.dr6 | DR6_ACTIVE_LOW;
|
|
|
|
vmcb_mark_dirty(svm->vmcb, VMCB_DR);
|
|
|
|
}
|
2020-05-18 22:56:43 +08:00
|
|
|
}
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-11-17 18:15:41 +08:00
|
|
|
static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
|
2020-05-18 22:56:43 +08:00
|
|
|
{
|
2020-05-23 00:28:52 +08:00
|
|
|
const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
|
2020-07-10 22:11:53 +08:00
|
|
|
|
2020-11-16 19:13:15 +08:00
|
|
|
/*
|
|
|
|
* Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
|
|
|
|
* exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Also covers avic_vapic_bar, avic_backing_page, avic_logical_id,
|
|
|
|
* avic_physical_id.
|
|
|
|
*/
|
|
|
|
WARN_ON(svm->vmcb01.ptr->control.int_ctl & AVIC_ENABLE_MASK);
|
|
|
|
|
|
|
|
/* Copied from vmcb01. msrpm_base can be overwritten later. */
|
|
|
|
svm->vmcb->control.nested_ctl = svm->vmcb01.ptr->control.nested_ctl;
|
|
|
|
svm->vmcb->control.iopm_base_pa = svm->vmcb01.ptr->control.iopm_base_pa;
|
|
|
|
svm->vmcb->control.msrpm_base_pa = svm->vmcb01.ptr->control.msrpm_base_pa;
|
|
|
|
|
|
|
|
/* Done at vmrun: asid. */
|
|
|
|
|
|
|
|
/* Also overwritten later if necessary. */
|
|
|
|
svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
|
2021-01-13 20:07:52 +08:00
|
|
|
|
2020-11-16 19:13:15 +08:00
|
|
|
/* nested_cr3. */
|
2020-07-10 22:11:53 +08:00
|
|
|
if (nested_npt_enabled(svm))
|
2020-05-22 17:27:46 +08:00
|
|
|
nested_svm_init_mmu_context(&svm->vcpu);
|
|
|
|
|
2020-05-18 23:07:08 +08:00
|
|
|
svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
|
2020-05-14 01:16:12 +08:00
|
|
|
svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-05-23 00:28:52 +08:00
|
|
|
svm->vmcb->control.int_ctl =
|
|
|
|
(svm->nested.ctl.int_ctl & ~mask) |
|
2021-01-13 20:07:52 +08:00
|
|
|
(svm->vmcb01.ptr->control.int_ctl & mask);
|
2020-05-23 00:28:52 +08:00
|
|
|
|
2020-05-14 01:16:12 +08:00
|
|
|
svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext;
|
|
|
|
svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
|
|
|
|
svm->vmcb->control.int_state = svm->nested.ctl.int_state;
|
|
|
|
svm->vmcb->control.event_inj = svm->nested.ctl.event_inj;
|
|
|
|
svm->vmcb->control.event_inj_err = svm->nested.ctl.event_inj_err;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-05-14 01:16:12 +08:00
|
|
|
svm->vmcb->control.pause_filter_count = svm->nested.ctl.pause_filter_count;
|
|
|
|
svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
|
|
|
/* Enter Guest-Mode */
|
|
|
|
enter_guest_mode(&svm->vcpu);
|
|
|
|
|
|
|
|
/*
|
2020-11-16 19:38:19 +08:00
|
|
|
* Merge guest and host intercepts - must be called with vcpu in
|
|
|
|
* guest-mode to take effect.
|
2020-03-24 17:41:52 +08:00
|
|
|
*/
|
|
|
|
recalc_intercepts(svm);
|
2020-05-18 22:56:43 +08:00
|
|
|
}
|
|
|
|
|
KVM: SVM: Add support for Virtual SPEC_CTRL
Newer AMD processors have a feature to virtualize the use of the
SPEC_CTRL MSR. Presence of this feature is indicated via CPUID
function 0x8000000A_EDX[20]: GuestSpecCtrl. Hypervisors are not
required to enable this feature since it is automatically enabled on
processors that support it.
A hypervisor may wish to impose speculation controls on guest
execution or a guest may want to impose its own speculation controls.
Therefore, the processor implements both host and guest
versions of SPEC_CTRL.
When in host mode, the host SPEC_CTRL value is in effect and writes
update only the host version of SPEC_CTRL. On a VMRUN, the processor
loads the guest version of SPEC_CTRL from the VMCB. When the guest
writes SPEC_CTRL, only the guest version is updated. On a VMEXIT,
the guest version is saved into the VMCB and the processor returns
to only using the host SPEC_CTRL for speculation control. The guest
SPEC_CTRL is located at offset 0x2E0 in the VMCB.
The effective SPEC_CTRL setting is the guest SPEC_CTRL setting or'ed
with the hypervisor SPEC_CTRL setting. This allows the hypervisor to
ensure a minimum SPEC_CTRL if desired.
This support also fixes an issue where a guest may sometimes see an
inconsistent value for the SPEC_CTRL MSR on processors that support
this feature. With the current SPEC_CTRL support, the first write to
SPEC_CTRL is intercepted and the virtualized version of the SPEC_CTRL
MSR is not updated. When the guest reads back the SPEC_CTRL MSR, it
will be 0x0, instead of the actual expected value. There isn’t a
security concern here, because the host SPEC_CTRL value is or’ed with
the Guest SPEC_CTRL value to generate the effective SPEC_CTRL value.
KVM writes with the guest's virtualized SPEC_CTRL value to SPEC_CTRL
MSR just before the VMRUN, so it will always have the actual value
even though it doesn’t appear that way in the guest. The guest will
only see the proper value for the SPEC_CTRL register if the guest was
to write to the SPEC_CTRL register again. With Virtual SPEC_CTRL
support, the save area spec_ctrl is properly saved and restored.
So, the guest will always see the proper value when it is read back.
Signed-off-by: Babu Moger <babu.moger@amd.com>
Message-Id: <161188100955.28787.11816849358413330720.stgit@bmoger-ubuntu>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-02-17 23:56:04 +08:00
|
|
|
static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Some VMCB state is shared between L1 and L2 and thus has to be
|
|
|
|
* moved at the time of nested vmrun and vmexit.
|
|
|
|
*
|
|
|
|
* VMLOAD/VMSAVE state would also belong in this category, but KVM
|
|
|
|
* always performs VMLOAD and VMSAVE from the VMCB01.
|
|
|
|
*/
|
|
|
|
to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl;
|
|
|
|
}
|
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
|
2020-08-28 01:11:39 +08:00
|
|
|
struct vmcb *vmcb12)
|
2020-05-18 22:56:43 +08:00
|
|
|
{
|
2021-03-03 03:40:39 +08:00
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
2020-07-10 22:11:55 +08:00
|
|
|
int ret;
|
|
|
|
|
2021-02-17 22:57:13 +08:00
|
|
|
trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
|
|
|
|
vmcb12->save.rip,
|
|
|
|
vmcb12->control.int_ctl,
|
|
|
|
vmcb12->control.event_inj,
|
|
|
|
vmcb12->control.nested_ctl);
|
|
|
|
|
|
|
|
trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
|
|
|
|
vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
|
|
|
|
vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
|
|
|
|
vmcb12->control.intercepts[INTERCEPT_WORD3],
|
|
|
|
vmcb12->control.intercepts[INTERCEPT_WORD4],
|
|
|
|
vmcb12->control.intercepts[INTERCEPT_WORD5]);
|
|
|
|
|
|
|
|
|
2020-08-28 01:11:39 +08:00
|
|
|
svm->nested.vmcb12_gpa = vmcb12_gpa;
|
2021-01-13 20:07:52 +08:00
|
|
|
|
|
|
|
WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);
|
|
|
|
|
KVM: SVM: Add support for Virtual SPEC_CTRL
Newer AMD processors have a feature to virtualize the use of the
SPEC_CTRL MSR. Presence of this feature is indicated via CPUID
function 0x8000000A_EDX[20]: GuestSpecCtrl. Hypervisors are not
required to enable this feature since it is automatically enabled on
processors that support it.
A hypervisor may wish to impose speculation controls on guest
execution or a guest may want to impose its own speculation controls.
Therefore, the processor implements both host and guest
versions of SPEC_CTRL.
When in host mode, the host SPEC_CTRL value is in effect and writes
update only the host version of SPEC_CTRL. On a VMRUN, the processor
loads the guest version of SPEC_CTRL from the VMCB. When the guest
writes SPEC_CTRL, only the guest version is updated. On a VMEXIT,
the guest version is saved into the VMCB and the processor returns
to only using the host SPEC_CTRL for speculation control. The guest
SPEC_CTRL is located at offset 0x2E0 in the VMCB.
The effective SPEC_CTRL setting is the guest SPEC_CTRL setting or'ed
with the hypervisor SPEC_CTRL setting. This allows the hypervisor to
ensure a minimum SPEC_CTRL if desired.
This support also fixes an issue where a guest may sometimes see an
inconsistent value for the SPEC_CTRL MSR on processors that support
this feature. With the current SPEC_CTRL support, the first write to
SPEC_CTRL is intercepted and the virtualized version of the SPEC_CTRL
MSR is not updated. When the guest reads back the SPEC_CTRL MSR, it
will be 0x0, instead of the actual expected value. There isn’t a
security concern here, because the host SPEC_CTRL value is or’ed with
the Guest SPEC_CTRL value to generate the effective SPEC_CTRL value.
KVM writes with the guest's virtualized SPEC_CTRL value to SPEC_CTRL
MSR just before the VMRUN, so it will always have the actual value
even though it doesn’t appear that way in the guest. The guest will
only see the proper value for the SPEC_CTRL register if the guest was
to write to the SPEC_CTRL register again. With Virtual SPEC_CTRL
support, the save area spec_ctrl is properly saved and restored.
So, the guest will always see the proper value when it is read back.
Signed-off-by: Babu Moger <babu.moger@amd.com>
Message-Id: <161188100955.28787.11816849358413330720.stgit@bmoger-ubuntu>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-02-17 23:56:04 +08:00
|
|
|
nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
|
2021-01-13 20:07:52 +08:00
|
|
|
|
|
|
|
svm_switch_vmcb(svm, &svm->nested.vmcb02);
|
2020-11-17 18:15:41 +08:00
|
|
|
nested_vmcb02_prepare_control(svm);
|
|
|
|
nested_vmcb02_prepare_save(svm, vmcb12);
|
2020-05-18 22:56:43 +08:00
|
|
|
|
2020-08-28 01:11:39 +08:00
|
|
|
ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
|
2020-07-10 22:11:55 +08:00
|
|
|
nested_npt_enabled(svm));
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2021-02-18 20:16:59 +08:00
|
|
|
if (!npt_enabled)
|
2021-03-03 03:40:39 +08:00
|
|
|
vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
|
2021-02-18 20:16:59 +08:00
|
|
|
|
2020-05-23 00:18:27 +08:00
|
|
|
svm_set_gif(svm, true);
|
2020-07-10 22:11:52 +08:00
|
|
|
|
|
|
|
return 0;
|
2020-03-24 17:41:52 +08:00
|
|
|
}
|
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
int nested_svm_vmrun(struct kvm_vcpu *vcpu)
|
2020-03-24 17:41:52 +08:00
|
|
|
{
|
2021-03-03 03:40:39 +08:00
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
2020-03-24 17:41:52 +08:00
|
|
|
int ret;
|
2020-08-28 01:11:39 +08:00
|
|
|
struct vmcb *vmcb12;
|
2020-03-24 17:41:52 +08:00
|
|
|
struct kvm_host_map map;
|
2020-08-28 01:11:39 +08:00
|
|
|
u64 vmcb12_gpa;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
++vcpu->stat.nested_run;
|
2021-03-06 06:57:47 +08:00
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
if (is_smm(vcpu)) {
|
|
|
|
kvm_queue_exception(vcpu, UD_VECTOR);
|
2020-04-23 22:52:48 +08:00
|
|
|
return 1;
|
|
|
|
}
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-08-28 01:11:39 +08:00
|
|
|
vmcb12_gpa = svm->vmcb->save.rax;
|
2021-03-03 03:40:39 +08:00
|
|
|
ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
|
2020-03-24 17:41:52 +08:00
|
|
|
if (ret == -EINVAL) {
|
2021-03-03 03:40:39 +08:00
|
|
|
kvm_inject_gp(vcpu, 0);
|
2020-03-24 17:41:52 +08:00
|
|
|
return 1;
|
|
|
|
} else if (ret) {
|
2021-03-03 03:40:39 +08:00
|
|
|
return kvm_skip_emulated_instruction(vcpu);
|
2020-03-24 17:41:52 +08:00
|
|
|
}
|
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
ret = kvm_skip_emulated_instruction(vcpu);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-08-28 01:11:39 +08:00
|
|
|
vmcb12 = map.hva;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-10-01 19:29:54 +08:00
|
|
|
if (WARN_ON_ONCE(!svm->nested.initialized))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2021-03-31 19:35:52 +08:00
|
|
|
nested_load_control_from_vmcb12(svm, &vmcb12->control);
|
2021-03-31 18:24:43 +08:00
|
|
|
|
2021-03-31 19:35:52 +08:00
|
|
|
if (!nested_vmcb_valid_sregs(vcpu, &vmcb12->save) ||
|
2021-03-31 18:24:43 +08:00
|
|
|
!nested_vmcb_check_controls(&svm->nested.ctl)) {
|
2020-08-28 01:11:39 +08:00
|
|
|
vmcb12->control.exit_code = SVM_EXIT_ERR;
|
|
|
|
vmcb12->control.exit_code_hi = 0;
|
|
|
|
vmcb12->control.exit_info_1 = 0;
|
|
|
|
vmcb12->control.exit_info_2 = 0;
|
2020-05-14 00:57:26 +08:00
|
|
|
goto out;
|
2020-03-24 17:41:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* Clear internal status */
|
2021-03-03 03:40:39 +08:00
|
|
|
kvm_clear_exception_queue(vcpu);
|
|
|
|
kvm_clear_interrupt_queue(vcpu);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
|
|
|
/*
|
2021-01-13 20:07:52 +08:00
|
|
|
* Since vmcb01 is not in use, we can use it to store some of the L1
|
|
|
|
* state.
|
2020-03-24 17:41:52 +08:00
|
|
|
*/
|
2021-03-03 03:40:39 +08:00
|
|
|
svm->vmcb01.ptr->save.efer = vcpu->arch.efer;
|
|
|
|
svm->vmcb01.ptr->save.cr0 = kvm_read_cr0(vcpu);
|
|
|
|
svm->vmcb01.ptr->save.cr4 = vcpu->arch.cr4;
|
|
|
|
svm->vmcb01.ptr->save.rflags = kvm_get_rflags(vcpu);
|
|
|
|
svm->vmcb01.ptr->save.rip = kvm_rip_read(vcpu);
|
2021-01-13 20:07:52 +08:00
|
|
|
|
|
|
|
if (!npt_enabled)
|
2021-03-03 03:40:39 +08:00
|
|
|
svm->vmcb01.ptr->save.cr3 = kvm_read_cr3(vcpu);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-04-24 01:22:27 +08:00
|
|
|
svm->nested.nested_run_pending = 1;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12))
|
2020-07-10 22:11:52 +08:00
|
|
|
goto out_exit_err;
|
2020-07-10 22:11:51 +08:00
|
|
|
|
2020-07-10 22:11:52 +08:00
|
|
|
if (nested_svm_vmrun_msrpm(svm))
|
|
|
|
goto out;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-07-10 22:11:52 +08:00
|
|
|
out_exit_err:
|
|
|
|
svm->nested.nested_run_pending = 0;
|
|
|
|
|
|
|
|
svm->vmcb->control.exit_code = SVM_EXIT_ERR;
|
|
|
|
svm->vmcb->control.exit_code_hi = 0;
|
|
|
|
svm->vmcb->control.exit_info_1 = 0;
|
|
|
|
svm->vmcb->control.exit_info_2 = 0;
|
|
|
|
|
|
|
|
nested_svm_vmexit(svm);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-05-14 00:57:26 +08:00
|
|
|
out:
|
2021-03-03 03:40:39 +08:00
|
|
|
kvm_vcpu_unmap(vcpu, &map, true);
|
2020-05-14 00:57:26 +08:00
|
|
|
|
2020-03-24 17:41:52 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
|
|
|
|
{
|
|
|
|
to_vmcb->save.fs = from_vmcb->save.fs;
|
|
|
|
to_vmcb->save.gs = from_vmcb->save.gs;
|
|
|
|
to_vmcb->save.tr = from_vmcb->save.tr;
|
|
|
|
to_vmcb->save.ldtr = from_vmcb->save.ldtr;
|
|
|
|
to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
|
|
|
|
to_vmcb->save.star = from_vmcb->save.star;
|
|
|
|
to_vmcb->save.lstar = from_vmcb->save.lstar;
|
|
|
|
to_vmcb->save.cstar = from_vmcb->save.cstar;
|
|
|
|
to_vmcb->save.sfmask = from_vmcb->save.sfmask;
|
|
|
|
to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
|
|
|
|
to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
|
|
|
|
to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
|
|
|
|
}
|
|
|
|
|
|
|
|
int nested_svm_vmexit(struct vcpu_svm *svm)
|
|
|
|
{
|
2021-03-03 03:40:39 +08:00
|
|
|
struct kvm_vcpu *vcpu = &svm->vcpu;
|
2020-08-28 01:11:39 +08:00
|
|
|
struct vmcb *vmcb12;
|
2020-03-24 17:41:52 +08:00
|
|
|
struct vmcb *vmcb = svm->vmcb;
|
|
|
|
struct kvm_host_map map;
|
2021-03-03 03:40:39 +08:00
|
|
|
int rc;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2021-03-03 01:45:14 +08:00
|
|
|
/* Triple faults in L2 should never escape. */
|
|
|
|
WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu));
|
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
|
2020-03-24 17:41:52 +08:00
|
|
|
if (rc) {
|
|
|
|
if (rc == -EINVAL)
|
2021-03-03 03:40:39 +08:00
|
|
|
kvm_inject_gp(vcpu, 0);
|
2020-03-24 17:41:52 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2020-08-28 01:11:39 +08:00
|
|
|
vmcb12 = map.hva;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
|
|
|
/* Exit Guest-Mode */
|
2021-03-03 03:40:39 +08:00
|
|
|
leave_guest_mode(vcpu);
|
2020-08-28 01:11:39 +08:00
|
|
|
svm->nested.vmcb12_gpa = 0;
|
2020-05-22 15:50:14 +08:00
|
|
|
WARN_ON_ONCE(svm->nested.nested_run_pending);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
|
2021-01-07 17:38:51 +08:00
|
|
|
|
2020-04-24 01:13:09 +08:00
|
|
|
/* in case we halted in L2 */
|
|
|
|
svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
|
|
|
|
2020-03-24 17:41:52 +08:00
|
|
|
/* Give the current vmcb to the guest */
|
|
|
|
|
2020-08-28 01:11:39 +08:00
|
|
|
vmcb12->save.es = vmcb->save.es;
|
|
|
|
vmcb12->save.cs = vmcb->save.cs;
|
|
|
|
vmcb12->save.ss = vmcb->save.ss;
|
|
|
|
vmcb12->save.ds = vmcb->save.ds;
|
|
|
|
vmcb12->save.gdtr = vmcb->save.gdtr;
|
|
|
|
vmcb12->save.idtr = vmcb->save.idtr;
|
|
|
|
vmcb12->save.efer = svm->vcpu.arch.efer;
|
2021-03-03 03:40:39 +08:00
|
|
|
vmcb12->save.cr0 = kvm_read_cr0(vcpu);
|
|
|
|
vmcb12->save.cr3 = kvm_read_cr3(vcpu);
|
2020-08-28 01:11:39 +08:00
|
|
|
vmcb12->save.cr2 = vmcb->save.cr2;
|
|
|
|
vmcb12->save.cr4 = svm->vcpu.arch.cr4;
|
2021-03-03 03:40:39 +08:00
|
|
|
vmcb12->save.rflags = kvm_get_rflags(vcpu);
|
|
|
|
vmcb12->save.rip = kvm_rip_read(vcpu);
|
|
|
|
vmcb12->save.rsp = kvm_rsp_read(vcpu);
|
|
|
|
vmcb12->save.rax = kvm_rax_read(vcpu);
|
2020-08-28 01:11:39 +08:00
|
|
|
vmcb12->save.dr7 = vmcb->save.dr7;
|
|
|
|
vmcb12->save.dr6 = svm->vcpu.arch.dr6;
|
|
|
|
vmcb12->save.cpl = vmcb->save.cpl;
|
|
|
|
|
|
|
|
vmcb12->control.int_state = vmcb->control.int_state;
|
|
|
|
vmcb12->control.exit_code = vmcb->control.exit_code;
|
|
|
|
vmcb12->control.exit_code_hi = vmcb->control.exit_code_hi;
|
|
|
|
vmcb12->control.exit_info_1 = vmcb->control.exit_info_1;
|
|
|
|
vmcb12->control.exit_info_2 = vmcb->control.exit_info_2;
|
|
|
|
|
|
|
|
if (vmcb12->control.exit_code != SVM_EXIT_ERR)
|
2020-11-17 18:15:41 +08:00
|
|
|
nested_save_pending_event_to_vmcb12(svm, vmcb12);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
|
|
|
if (svm->nrips_enabled)
|
2020-08-28 01:11:39 +08:00
|
|
|
vmcb12->control.next_rip = vmcb->control.next_rip;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-08-28 01:11:39 +08:00
|
|
|
vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
|
|
|
|
vmcb12->control.tlb_ctl = svm->nested.ctl.tlb_ctl;
|
|
|
|
vmcb12->control.event_inj = svm->nested.ctl.event_inj;
|
|
|
|
vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-08-28 01:11:39 +08:00
|
|
|
vmcb12->control.pause_filter_count =
|
2020-03-24 17:41:52 +08:00
|
|
|
svm->vmcb->control.pause_filter_count;
|
2020-08-28 01:11:39 +08:00
|
|
|
vmcb12->control.pause_filter_thresh =
|
2020-03-24 17:41:52 +08:00
|
|
|
svm->vmcb->control.pause_filter_thresh;
|
|
|
|
|
KVM: SVM: Add support for Virtual SPEC_CTRL
Newer AMD processors have a feature to virtualize the use of the
SPEC_CTRL MSR. Presence of this feature is indicated via CPUID
function 0x8000000A_EDX[20]: GuestSpecCtrl. Hypervisors are not
required to enable this feature since it is automatically enabled on
processors that support it.
A hypervisor may wish to impose speculation controls on guest
execution or a guest may want to impose its own speculation controls.
Therefore, the processor implements both host and guest
versions of SPEC_CTRL.
When in host mode, the host SPEC_CTRL value is in effect and writes
update only the host version of SPEC_CTRL. On a VMRUN, the processor
loads the guest version of SPEC_CTRL from the VMCB. When the guest
writes SPEC_CTRL, only the guest version is updated. On a VMEXIT,
the guest version is saved into the VMCB and the processor returns
to only using the host SPEC_CTRL for speculation control. The guest
SPEC_CTRL is located at offset 0x2E0 in the VMCB.
The effective SPEC_CTRL setting is the guest SPEC_CTRL setting or'ed
with the hypervisor SPEC_CTRL setting. This allows the hypervisor to
ensure a minimum SPEC_CTRL if desired.
This support also fixes an issue where a guest may sometimes see an
inconsistent value for the SPEC_CTRL MSR on processors that support
this feature. With the current SPEC_CTRL support, the first write to
SPEC_CTRL is intercepted and the virtualized version of the SPEC_CTRL
MSR is not updated. When the guest reads back the SPEC_CTRL MSR, it
will be 0x0, instead of the actual expected value. There isn’t a
security concern here, because the host SPEC_CTRL value is or’ed with
the Guest SPEC_CTRL value to generate the effective SPEC_CTRL value.
KVM writes with the guest's virtualized SPEC_CTRL value to SPEC_CTRL
MSR just before the VMRUN, so it will always have the actual value
even though it doesn’t appear that way in the guest. The guest will
only see the proper value for the SPEC_CTRL register if the guest was
to write to the SPEC_CTRL register again. With Virtual SPEC_CTRL
support, the save area spec_ctrl is properly saved and restored.
So, the guest will always see the proper value when it is read back.
Signed-off-by: Babu Moger <babu.moger@amd.com>
Message-Id: <161188100955.28787.11816849358413330720.stgit@bmoger-ubuntu>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-02-17 23:56:04 +08:00
|
|
|
nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
|
|
|
|
|
2021-01-13 20:07:52 +08:00
|
|
|
svm_switch_vmcb(svm, &svm->vmcb01);
|
KVM: nSVM: If VMRUN is single-stepped, queue the #DB intercept in nested_svm_vmexit()
According to APM, the #DB intercept for a single-stepped VMRUN must happen
after the completion of that instruction, when the guest does #VMEXIT to
the host. However, in the current implementation of KVM, the #DB intercept
for a single-stepped VMRUN happens after the completion of the instruction
that follows the VMRUN instruction. When the #DB intercept handler is
invoked, it shows the RIP of the instruction that follows VMRUN, instead of
of VMRUN itself. This is an incorrect RIP as far as single-stepping VMRUN
is concerned.
This patch fixes the problem by checking, in nested_svm_vmexit(), for the
condition that the VMRUN instruction is being single-stepped and if so,
queues the pending #DB intercept so that the #DB is accounted for before
we execute L1's next instruction.
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oraacle.com>
Message-Id: <20210323175006.73249-2-krish.sadhukhan@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-03-24 01:50:03 +08:00
|
|
|
WARN_ON_ONCE(svm->vmcb->control.exit_code != SVM_EXIT_VMRUN);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2021-01-13 20:07:52 +08:00
|
|
|
/*
|
|
|
|
* On vmexit the GIF is set to false and
|
|
|
|
* no event can be injected in L1.
|
|
|
|
*/
|
SVM: nSVM: correctly restore GIF on vmexit from nesting after migration
Currently code in svm_set_nested_state copies the current vmcb control
area to L1 control area (hsave->control), under assumption that
it mostly reflects the defaults that kvm choose, and later qemu
overrides these defaults with L2 state using standard KVM interfaces,
like KVM_SET_REGS.
However nested GIF (which is AMD specific thing) is by default is true,
and it is copied to hsave area as such.
This alone is not a big deal since on VMexit, GIF is always set to false,
regardless of what it was on VM entry. However in nested_svm_vmexit we
were first were setting GIF to false, but then we overwrite the control
fields with value from the hsave area. (including the nested GIF field
itself if GIF virtualization is enabled).
Now on normal vm entry this is not a problem, since GIF is usually false
prior to normal vm entry, and this is the value that copied to hsave,
and then restored, but this is not always the case when the nested state
is loaded as explained above.
To fix this issue, move svm_set_gif after we restore the L1 control
state in nested_svm_vmexit, so that even with wrong GIF in the
saved L1 control area, we still clear GIF as the spec says.
Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20200827162720.278690-2-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-08-28 00:27:18 +08:00
|
|
|
svm_set_gif(svm, false);
|
2021-01-13 20:07:52 +08:00
|
|
|
svm->vmcb->control.exit_int_info = 0;
|
SVM: nSVM: correctly restore GIF on vmexit from nesting after migration
Currently code in svm_set_nested_state copies the current vmcb control
area to L1 control area (hsave->control), under assumption that
it mostly reflects the defaults that kvm choose, and later qemu
overrides these defaults with L2 state using standard KVM interfaces,
like KVM_SET_REGS.
However nested GIF (which is AMD specific thing) is by default is true,
and it is copied to hsave area as such.
This alone is not a big deal since on VMexit, GIF is always set to false,
regardless of what it was on VM entry. However in nested_svm_vmexit we
were first were setting GIF to false, but then we overwrite the control
fields with value from the hsave area. (including the nested GIF field
itself if GIF virtualization is enabled).
Now on normal vm entry this is not a problem, since GIF is usually false
prior to normal vm entry, and this is the value that copied to hsave,
and then restored, but this is not always the case when the nested state
is loaded as explained above.
To fix this issue, move svm_set_gif after we restore the L1 control
state in nested_svm_vmexit, so that even with wrong GIF in the
saved L1 control area, we still clear GIF as the spec says.
Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20200827162720.278690-2-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-08-28 00:27:18 +08:00
|
|
|
|
2020-11-16 19:38:19 +08:00
|
|
|
svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset;
|
|
|
|
if (svm->vmcb->control.tsc_offset != svm->vcpu.arch.tsc_offset) {
|
|
|
|
svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
|
|
|
|
vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
|
|
|
|
}
|
2020-05-18 23:07:08 +08:00
|
|
|
|
2020-05-14 01:16:12 +08:00
|
|
|
svm->nested.ctl.nested_cr3 = 0;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2021-01-13 20:07:52 +08:00
|
|
|
/*
|
|
|
|
* Restore processor state that had been saved in vmcb01
|
|
|
|
*/
|
2021-03-03 03:40:39 +08:00
|
|
|
kvm_set_rflags(vcpu, svm->vmcb->save.rflags);
|
|
|
|
svm_set_efer(vcpu, svm->vmcb->save.efer);
|
|
|
|
svm_set_cr0(vcpu, svm->vmcb->save.cr0 | X86_CR0_PE);
|
|
|
|
svm_set_cr4(vcpu, svm->vmcb->save.cr4);
|
|
|
|
kvm_rax_write(vcpu, svm->vmcb->save.rax);
|
|
|
|
kvm_rsp_write(vcpu, svm->vmcb->save.rsp);
|
|
|
|
kvm_rip_write(vcpu, svm->vmcb->save.rip);
|
2021-01-13 20:07:52 +08:00
|
|
|
|
|
|
|
svm->vcpu.arch.dr7 = DR7_FIXED_1;
|
|
|
|
kvm_update_dr7(&svm->vcpu);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-08-28 01:11:39 +08:00
|
|
|
trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
|
|
|
|
vmcb12->control.exit_info_1,
|
|
|
|
vmcb12->control.exit_info_2,
|
|
|
|
vmcb12->control.exit_int_info,
|
|
|
|
vmcb12->control.exit_int_info_err,
|
2020-05-22 18:04:57 +08:00
|
|
|
KVM_ISA_SVM);
|
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
kvm_vcpu_unmap(vcpu, &map, true);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
nested_svm_uninit_mmu_context(vcpu);
|
2020-07-10 22:11:54 +08:00
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
rc = nested_svm_load_cr3(vcpu, svm->vmcb->save.cr3, false);
|
2020-07-10 22:11:56 +08:00
|
|
|
if (rc)
|
|
|
|
return 1;
|
2020-07-10 22:11:54 +08:00
|
|
|
|
2020-03-24 17:41:52 +08:00
|
|
|
/*
|
|
|
|
* Drop what we picked up for L2 via svm_complete_interrupts() so it
|
|
|
|
* doesn't end up in L1.
|
|
|
|
*/
|
|
|
|
svm->vcpu.arch.nmi_injected = false;
|
2021-03-03 03:40:39 +08:00
|
|
|
kvm_clear_exception_queue(vcpu);
|
|
|
|
kvm_clear_interrupt_queue(vcpu);
|
KVM: nSVM: If VMRUN is single-stepped, queue the #DB intercept in nested_svm_vmexit()
According to APM, the #DB intercept for a single-stepped VMRUN must happen
after the completion of that instruction, when the guest does #VMEXIT to
the host. However, in the current implementation of KVM, the #DB intercept
for a single-stepped VMRUN happens after the completion of the instruction
that follows the VMRUN instruction. When the #DB intercept handler is
invoked, it shows the RIP of the instruction that follows VMRUN, instead of
of VMRUN itself. This is an incorrect RIP as far as single-stepping VMRUN
is concerned.
This patch fixes the problem by checking, in nested_svm_vmexit(), for the
condition that the VMRUN instruction is being single-stepped and if so,
queues the pending #DB intercept so that the #DB is accounted for before
we execute L1's next instruction.
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oraacle.com>
Message-Id: <20210323175006.73249-2-krish.sadhukhan@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-03-24 01:50:03 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are here following the completion of a VMRUN that
|
|
|
|
* is being single-stepped, queue the pending #DB intercept
|
|
|
|
* right now so that it an be accounted for before we execute
|
|
|
|
* L1's next instruction.
|
|
|
|
*/
|
|
|
|
if (unlikely(svm->vmcb->save.rflags & X86_EFLAGS_TF))
|
|
|
|
kvm_queue_exception(&(svm->vcpu), DB_VECTOR);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-03-03 01:45:14 +08:00
|
|
|
static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2021-03-03 01:45:15 +08:00
|
|
|
nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN);
|
2021-03-03 01:45:14 +08:00
|
|
|
}
|
|
|
|
|
2020-10-01 19:29:54 +08:00
|
|
|
int svm_allocate_nested(struct vcpu_svm *svm)
|
|
|
|
{
|
2021-01-13 20:07:52 +08:00
|
|
|
struct page *vmcb02_page;
|
2020-10-01 19:29:54 +08:00
|
|
|
|
|
|
|
if (svm->nested.initialized)
|
|
|
|
return 0;
|
|
|
|
|
2021-01-13 20:07:52 +08:00
|
|
|
vmcb02_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
|
|
|
|
if (!vmcb02_page)
|
2020-10-01 19:29:54 +08:00
|
|
|
return -ENOMEM;
|
2021-01-13 20:07:52 +08:00
|
|
|
svm->nested.vmcb02.ptr = page_address(vmcb02_page);
|
|
|
|
svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT);
|
2020-10-01 19:29:54 +08:00
|
|
|
|
|
|
|
svm->nested.msrpm = svm_vcpu_alloc_msrpm();
|
|
|
|
if (!svm->nested.msrpm)
|
2021-01-13 20:07:52 +08:00
|
|
|
goto err_free_vmcb02;
|
2020-10-01 19:29:54 +08:00
|
|
|
svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
|
|
|
|
|
|
|
|
svm->nested.initialized = true;
|
|
|
|
return 0;
|
|
|
|
|
2021-01-13 20:07:52 +08:00
|
|
|
err_free_vmcb02:
|
|
|
|
__free_page(vmcb02_page);
|
2020-10-01 19:29:54 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
void svm_free_nested(struct vcpu_svm *svm)
|
|
|
|
{
|
|
|
|
if (!svm->nested.initialized)
|
|
|
|
return;
|
|
|
|
|
|
|
|
svm_vcpu_free_msrpm(svm->nested.msrpm);
|
|
|
|
svm->nested.msrpm = NULL;
|
|
|
|
|
2021-01-13 20:07:52 +08:00
|
|
|
__free_page(virt_to_page(svm->nested.vmcb02.ptr));
|
|
|
|
svm->nested.vmcb02.ptr = NULL;
|
2020-10-01 19:29:54 +08:00
|
|
|
|
|
|
|
svm->nested.initialized = false;
|
|
|
|
}
|
|
|
|
|
2020-05-19 01:08:37 +08:00
|
|
|
/*
|
|
|
|
* Forcibly leave nested mode in order to be able to reset the VCPU later on.
|
|
|
|
*/
|
|
|
|
void svm_leave_nested(struct vcpu_svm *svm)
|
|
|
|
{
|
2021-03-03 03:40:39 +08:00
|
|
|
struct kvm_vcpu *vcpu = &svm->vcpu;
|
|
|
|
|
|
|
|
if (is_guest_mode(vcpu)) {
|
2020-05-19 01:08:37 +08:00
|
|
|
svm->nested.nested_run_pending = 0;
|
2021-03-03 03:40:39 +08:00
|
|
|
leave_guest_mode(vcpu);
|
2021-01-13 20:07:52 +08:00
|
|
|
|
|
|
|
svm_switch_vmcb(svm, &svm->nested.vmcb02);
|
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
nested_svm_uninit_mmu_context(vcpu);
|
2021-01-07 17:38:54 +08:00
|
|
|
vmcb_mark_all_dirty(svm->vmcb);
|
2020-05-19 01:08:37 +08:00
|
|
|
}
|
2020-09-22 19:43:14 +08:00
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
|
2020-05-19 01:08:37 +08:00
|
|
|
}
|
|
|
|
|
2020-03-24 17:41:52 +08:00
|
|
|
static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
|
|
|
|
{
|
|
|
|
u32 offset, msr, value;
|
|
|
|
int write, mask;
|
|
|
|
|
2020-09-12 03:28:28 +08:00
|
|
|
if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
|
2020-03-24 17:41:52 +08:00
|
|
|
return NESTED_EXIT_HOST;
|
|
|
|
|
|
|
|
msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
|
|
|
|
offset = svm_msrpm_offset(msr);
|
|
|
|
write = svm->vmcb->control.exit_info_1 & 1;
|
|
|
|
mask = 1 << ((2 * (msr & 0xf)) + write);
|
|
|
|
|
|
|
|
if (offset == MSR_INVALID)
|
|
|
|
return NESTED_EXIT_DONE;
|
|
|
|
|
|
|
|
/* Offset is in 32 bit units but need in 8 bit units */
|
|
|
|
offset *= 4;
|
|
|
|
|
2020-05-14 01:16:12 +08:00
|
|
|
if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
|
2020-03-24 17:41:52 +08:00
|
|
|
return NESTED_EXIT_DONE;
|
|
|
|
|
|
|
|
return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
|
|
|
|
{
|
|
|
|
unsigned port, size, iopm_len;
|
|
|
|
u16 val, mask;
|
|
|
|
u8 start_bit;
|
|
|
|
u64 gpa;
|
|
|
|
|
2020-09-12 03:28:28 +08:00
|
|
|
if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
|
2020-03-24 17:41:52 +08:00
|
|
|
return NESTED_EXIT_HOST;
|
|
|
|
|
|
|
|
port = svm->vmcb->control.exit_info_1 >> 16;
|
|
|
|
size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
|
|
|
|
SVM_IOIO_SIZE_SHIFT;
|
2020-05-14 01:16:12 +08:00
|
|
|
gpa = svm->nested.ctl.iopm_base_pa + (port / 8);
|
2020-03-24 17:41:52 +08:00
|
|
|
start_bit = port % 8;
|
|
|
|
iopm_len = (start_bit + size > 8) ? 2 : 1;
|
|
|
|
mask = (0xf >> (4 - size)) << start_bit;
|
|
|
|
val = 0;
|
|
|
|
|
|
|
|
if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
|
|
|
|
return NESTED_EXIT_DONE;
|
|
|
|
|
|
|
|
return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nested_svm_intercept(struct vcpu_svm *svm)
|
|
|
|
{
|
|
|
|
u32 exit_code = svm->vmcb->control.exit_code;
|
|
|
|
int vmexit = NESTED_EXIT_HOST;
|
|
|
|
|
|
|
|
switch (exit_code) {
|
|
|
|
case SVM_EXIT_MSR:
|
|
|
|
vmexit = nested_svm_exit_handled_msr(svm);
|
|
|
|
break;
|
|
|
|
case SVM_EXIT_IOIO:
|
|
|
|
vmexit = nested_svm_intercept_ioio(svm);
|
|
|
|
break;
|
|
|
|
case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
|
2020-09-12 03:28:05 +08:00
|
|
|
if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
|
2020-03-24 17:41:52 +08:00
|
|
|
vmexit = NESTED_EXIT_DONE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
|
2020-09-12 03:28:12 +08:00
|
|
|
if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
|
2020-03-24 17:41:52 +08:00
|
|
|
vmexit = NESTED_EXIT_DONE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
|
2020-05-16 20:42:28 +08:00
|
|
|
/*
|
|
|
|
* Host-intercepted exceptions have been checked already in
|
|
|
|
* nested_svm_exit_special. There is nothing to do here,
|
|
|
|
* the vmexit is injected by svm_check_nested_events.
|
|
|
|
*/
|
|
|
|
vmexit = NESTED_EXIT_DONE;
|
2020-03-24 17:41:52 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case SVM_EXIT_ERR: {
|
|
|
|
vmexit = NESTED_EXIT_DONE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default: {
|
2020-09-12 03:28:28 +08:00
|
|
|
if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
|
2020-03-24 17:41:52 +08:00
|
|
|
vmexit = NESTED_EXIT_DONE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return vmexit;
|
|
|
|
}
|
|
|
|
|
|
|
|
int nested_svm_exit_handled(struct vcpu_svm *svm)
|
|
|
|
{
|
|
|
|
int vmexit;
|
|
|
|
|
|
|
|
vmexit = nested_svm_intercept(svm);
|
|
|
|
|
|
|
|
if (vmexit == NESTED_EXIT_DONE)
|
|
|
|
nested_svm_vmexit(svm);
|
|
|
|
|
|
|
|
return vmexit;
|
|
|
|
}
|
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
|
2020-03-24 17:41:52 +08:00
|
|
|
{
|
2021-03-03 03:40:39 +08:00
|
|
|
if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) {
|
|
|
|
kvm_queue_exception(vcpu, UD_VECTOR);
|
2020-03-24 17:41:52 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2021-03-03 03:40:39 +08:00
|
|
|
if (to_svm(vcpu)->vmcb->save.cpl) {
|
|
|
|
kvm_inject_gp(vcpu, 0);
|
2020-03-24 17:41:52 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-05-16 20:42:28 +08:00
|
|
|
static bool nested_exit_on_exception(struct vcpu_svm *svm)
|
2020-03-24 17:41:52 +08:00
|
|
|
{
|
2020-05-16 20:42:28 +08:00
|
|
|
unsigned int nr = svm->vcpu.arch.exception.nr;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-09-12 03:28:20 +08:00
|
|
|
return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr));
|
2020-05-16 20:42:28 +08:00
|
|
|
}
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-05-16 20:42:28 +08:00
|
|
|
static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
|
|
|
|
{
|
|
|
|
unsigned int nr = svm->vcpu.arch.exception.nr;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
|
|
|
svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
|
|
|
|
svm->vmcb->control.exit_code_hi = 0;
|
2020-05-16 20:42:28 +08:00
|
|
|
|
|
|
|
if (svm->vcpu.arch.exception.has_error_code)
|
|
|
|
svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
|
2020-03-24 17:41:52 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* EXITINFO2 is undefined for all exception intercepts other
|
|
|
|
* than #PF.
|
|
|
|
*/
|
2020-05-16 20:42:28 +08:00
|
|
|
if (nr == PF_VECTOR) {
|
|
|
|
if (svm->vcpu.arch.exception.nested_apf)
|
|
|
|
svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
|
|
|
|
else if (svm->vcpu.arch.exception.has_payload)
|
|
|
|
svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
|
|
|
|
else
|
|
|
|
svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
|
|
|
|
} else if (nr == DB_VECTOR) {
|
|
|
|
/* See inject_pending_event. */
|
|
|
|
kvm_deliver_exception_payload(&svm->vcpu);
|
|
|
|
if (svm->vcpu.arch.dr7 & DR7_GD) {
|
|
|
|
svm->vcpu.arch.dr7 &= ~DR7_GD;
|
|
|
|
kvm_update_dr7(&svm->vcpu);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
WARN_ON(svm->vcpu.arch.exception.has_payload);
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-05-16 20:42:28 +08:00
|
|
|
nested_svm_vmexit(svm);
|
2020-03-24 17:41:52 +08:00
|
|
|
}
|
|
|
|
|
2020-05-16 20:50:35 +08:00
|
|
|
static inline bool nested_exit_on_init(struct vcpu_svm *svm)
|
|
|
|
{
|
2020-09-12 03:28:28 +08:00
|
|
|
return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
|
2020-05-16 20:50:35 +08:00
|
|
|
}
|
|
|
|
|
2020-04-17 22:24:18 +08:00
|
|
|
static int svm_check_nested_events(struct kvm_vcpu *vcpu)
|
2020-03-24 17:41:52 +08:00
|
|
|
{
|
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
|
|
|
bool block_nested_events =
|
2020-05-16 20:46:00 +08:00
|
|
|
kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
|
2020-05-16 20:50:35 +08:00
|
|
|
struct kvm_lapic *apic = vcpu->arch.apic;
|
|
|
|
|
|
|
|
if (lapic_in_kernel(vcpu) &&
|
|
|
|
test_bit(KVM_APIC_INIT, &apic->pending_events)) {
|
|
|
|
if (block_nested_events)
|
|
|
|
return -EBUSY;
|
|
|
|
if (!nested_exit_on_init(svm))
|
|
|
|
return 0;
|
2021-03-03 01:45:15 +08:00
|
|
|
nested_svm_simple_vmexit(svm, SVM_EXIT_INIT);
|
2020-05-16 20:50:35 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2020-03-24 17:41:52 +08:00
|
|
|
|
2020-05-16 20:42:28 +08:00
|
|
|
if (vcpu->arch.exception.pending) {
|
2021-04-01 22:38:14 +08:00
|
|
|
/*
|
|
|
|
* Only a pending nested run can block a pending exception.
|
|
|
|
* Otherwise an injected NMI/interrupt should either be
|
|
|
|
* lost or delivered to the nested hypervisor in the EXITINTINFO
|
|
|
|
* vmcb field, while delivering the pending exception.
|
|
|
|
*/
|
|
|
|
if (svm->nested.nested_run_pending)
|
2020-05-16 20:42:28 +08:00
|
|
|
return -EBUSY;
|
|
|
|
if (!nested_exit_on_exception(svm))
|
|
|
|
return 0;
|
|
|
|
nested_svm_inject_exception_vmexit(svm);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-23 20:13:10 +08:00
|
|
|
if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
|
2020-04-23 20:17:28 +08:00
|
|
|
if (block_nested_events)
|
|
|
|
return -EBUSY;
|
2020-04-23 20:13:10 +08:00
|
|
|
if (!nested_exit_on_smi(svm))
|
|
|
|
return 0;
|
2021-03-03 01:45:15 +08:00
|
|
|
nested_svm_simple_vmexit(svm, SVM_EXIT_SMI);
|
2020-04-23 20:17:28 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-23 20:13:10 +08:00
|
|
|
if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
|
2020-04-15 04:11:06 +08:00
|
|
|
if (block_nested_events)
|
|
|
|
return -EBUSY;
|
2020-04-23 20:13:10 +08:00
|
|
|
if (!nested_exit_on_nmi(svm))
|
|
|
|
return 0;
|
2021-03-03 01:45:15 +08:00
|
|
|
nested_svm_simple_vmexit(svm, SVM_EXIT_NMI);
|
2020-04-15 04:11:06 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-23 20:13:10 +08:00
|
|
|
if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
|
2020-03-24 17:41:52 +08:00
|
|
|
if (block_nested_events)
|
|
|
|
return -EBUSY;
|
2020-04-23 20:13:10 +08:00
|
|
|
if (!nested_exit_on_intr(svm))
|
|
|
|
return 0;
|
2021-03-03 01:45:15 +08:00
|
|
|
trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
|
|
|
|
nested_svm_simple_vmexit(svm, SVM_EXIT_INTR);
|
2020-03-24 17:41:52 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int nested_svm_exit_special(struct vcpu_svm *svm)
|
|
|
|
{
|
|
|
|
u32 exit_code = svm->vmcb->control.exit_code;
|
|
|
|
|
|
|
|
switch (exit_code) {
|
|
|
|
case SVM_EXIT_INTR:
|
|
|
|
case SVM_EXIT_NMI:
|
|
|
|
case SVM_EXIT_NPF:
|
2020-05-16 20:42:28 +08:00
|
|
|
return NESTED_EXIT_HOST;
|
|
|
|
case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
|
|
|
|
u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
|
|
|
|
|
2021-01-13 20:07:52 +08:00
|
|
|
if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] &
|
|
|
|
excp_bits)
|
2020-03-24 17:41:52 +08:00
|
|
|
return NESTED_EXIT_HOST;
|
2020-05-16 20:42:28 +08:00
|
|
|
else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
|
2020-05-25 22:41:17 +08:00
|
|
|
svm->vcpu.arch.apf.host_apf_flags)
|
2020-05-16 20:42:28 +08:00
|
|
|
/* Trap async PF even if not shadowing */
|
2020-03-24 17:41:52 +08:00
|
|
|
return NESTED_EXIT_HOST;
|
|
|
|
break;
|
2020-05-16 20:42:28 +08:00
|
|
|
}
|
2020-03-24 17:41:52 +08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NESTED_EXIT_CONTINUE;
|
|
|
|
}
|
2020-04-17 22:24:18 +08:00
|
|
|
|
2020-05-14 01:36:32 +08:00
|
|
|
static int svm_get_nested_state(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_nested_state __user *user_kvm_nested_state,
|
|
|
|
u32 user_data_size)
|
|
|
|
{
|
|
|
|
struct vcpu_svm *svm;
|
|
|
|
struct kvm_nested_state kvm_state = {
|
|
|
|
.flags = 0,
|
|
|
|
.format = KVM_STATE_NESTED_FORMAT_SVM,
|
|
|
|
.size = sizeof(kvm_state),
|
|
|
|
};
|
|
|
|
struct vmcb __user *user_vmcb = (struct vmcb __user *)
|
|
|
|
&user_kvm_nested_state->data.svm[0];
|
|
|
|
|
|
|
|
if (!vcpu)
|
|
|
|
return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
|
|
|
|
|
|
|
|
svm = to_svm(vcpu);
|
|
|
|
|
|
|
|
if (user_data_size < kvm_state.size)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* First fill in the header and copy it out. */
|
|
|
|
if (is_guest_mode(vcpu)) {
|
2020-08-28 01:11:39 +08:00
|
|
|
kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
|
2020-05-14 01:36:32 +08:00
|
|
|
kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
|
|
|
|
kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
|
|
|
|
|
|
|
|
if (svm->nested.nested_run_pending)
|
|
|
|
kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (gif_set(svm))
|
|
|
|
kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
|
|
|
|
|
|
|
|
if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (!is_guest_mode(vcpu))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy over the full size of the VMCB rather than just the size
|
|
|
|
* of the structs.
|
|
|
|
*/
|
|
|
|
if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
|
|
|
|
return -EFAULT;
|
|
|
|
if (copy_to_user(&user_vmcb->control, &svm->nested.ctl,
|
|
|
|
sizeof(user_vmcb->control)))
|
|
|
|
return -EFAULT;
|
2021-01-13 20:07:52 +08:00
|
|
|
if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
|
2020-05-14 01:36:32 +08:00
|
|
|
sizeof(user_vmcb->save)))
|
|
|
|
return -EFAULT;
|
|
|
|
out:
|
|
|
|
return kvm_state.size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int svm_set_nested_state(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_nested_state __user *user_kvm_nested_state,
|
|
|
|
struct kvm_nested_state *kvm_state)
|
|
|
|
{
|
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
|
|
|
struct vmcb __user *user_vmcb = (struct vmcb __user *)
|
|
|
|
&user_kvm_nested_state->data.svm[0];
|
2020-09-07 21:15:02 +08:00
|
|
|
struct vmcb_control_area *ctl;
|
|
|
|
struct vmcb_save_area *save;
|
|
|
|
int ret;
|
2020-05-14 01:36:32 +08:00
|
|
|
u32 cr0;
|
|
|
|
|
2020-09-07 21:15:02 +08:00
|
|
|
BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
|
|
|
|
KVM_STATE_NESTED_SVM_VMCB_SIZE);
|
|
|
|
|
2020-05-14 01:36:32 +08:00
|
|
|
if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
|
|
|
|
KVM_STATE_NESTED_RUN_PENDING |
|
|
|
|
KVM_STATE_NESTED_GIF_SET))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
|
|
|
|
* EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
|
|
|
|
*/
|
|
|
|
if (!(vcpu->arch.efer & EFER_SVME)) {
|
|
|
|
/* GIF=1 and no guest mode are required if SVME=0. */
|
|
|
|
if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* SMM temporarily disables SVM, so we cannot be in guest mode. */
|
|
|
|
if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
|
|
|
|
svm_leave_nested(svm);
|
2020-09-14 21:37:25 +08:00
|
|
|
svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
|
|
|
|
return 0;
|
2020-05-14 01:36:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
|
|
|
|
return -EINVAL;
|
|
|
|
if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-09-07 21:15:02 +08:00
|
|
|
ret = -ENOMEM;
|
2021-03-31 10:30:25 +08:00
|
|
|
ctl = kzalloc(sizeof(*ctl), GFP_KERNEL_ACCOUNT);
|
|
|
|
save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT);
|
2020-09-07 21:15:02 +08:00
|
|
|
if (!ctl || !save)
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
ret = -EFAULT;
|
|
|
|
if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
|
|
|
|
goto out_free;
|
|
|
|
if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
ret = -EINVAL;
|
|
|
|
if (!nested_vmcb_check_controls(ctl))
|
|
|
|
goto out_free;
|
2020-05-14 01:36:32 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Processor state contains L2 state. Check that it is
|
2021-03-31 19:35:52 +08:00
|
|
|
* valid for guest mode (see nested_vmcb_check_save).
|
2020-05-14 01:36:32 +08:00
|
|
|
*/
|
|
|
|
cr0 = kvm_read_cr0(vcpu);
|
|
|
|
if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
|
2020-09-07 21:15:02 +08:00
|
|
|
goto out_free;
|
2020-05-14 01:36:32 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Validate host state saved from before VMRUN (see
|
|
|
|
* nested_svm_check_permissions).
|
|
|
|
*/
|
2020-10-07 03:06:52 +08:00
|
|
|
if (!(save->cr0 & X86_CR0_PG) ||
|
|
|
|
!(save->cr0 & X86_CR0_PE) ||
|
|
|
|
(save->rflags & X86_EFLAGS_VM) ||
|
2021-03-03 03:40:39 +08:00
|
|
|
!nested_vmcb_valid_sregs(vcpu, save))
|
2020-09-07 21:15:02 +08:00
|
|
|
goto out_free;
|
2020-05-14 01:36:32 +08:00
|
|
|
|
|
|
|
/*
|
2021-01-13 20:07:52 +08:00
|
|
|
* All checks done, we can enter guest mode. Userspace provides
|
|
|
|
* vmcb12.control, which will be combined with L1 and stored into
|
|
|
|
* vmcb02, and the L1 save state which we store in vmcb01.
|
|
|
|
* L2 registers if needed are moved from the current VMCB to VMCB02.
|
2020-05-14 01:36:32 +08:00
|
|
|
*/
|
2021-01-07 17:38:52 +08:00
|
|
|
|
|
|
|
svm->nested.nested_run_pending =
|
|
|
|
!!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
|
|
|
|
|
2020-08-28 01:11:39 +08:00
|
|
|
svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
|
2021-01-13 20:07:52 +08:00
|
|
|
if (svm->current_vmcb == &svm->vmcb01)
|
|
|
|
svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
|
2020-11-17 15:51:35 +08:00
|
|
|
|
|
|
|
svm->vmcb01.ptr->save.es = save->es;
|
|
|
|
svm->vmcb01.ptr->save.cs = save->cs;
|
|
|
|
svm->vmcb01.ptr->save.ss = save->ss;
|
|
|
|
svm->vmcb01.ptr->save.ds = save->ds;
|
|
|
|
svm->vmcb01.ptr->save.gdtr = save->gdtr;
|
|
|
|
svm->vmcb01.ptr->save.idtr = save->idtr;
|
|
|
|
svm->vmcb01.ptr->save.rflags = save->rflags | X86_EFLAGS_FIXED;
|
|
|
|
svm->vmcb01.ptr->save.efer = save->efer;
|
|
|
|
svm->vmcb01.ptr->save.cr0 = save->cr0;
|
|
|
|
svm->vmcb01.ptr->save.cr3 = save->cr3;
|
|
|
|
svm->vmcb01.ptr->save.cr4 = save->cr4;
|
|
|
|
svm->vmcb01.ptr->save.rax = save->rax;
|
|
|
|
svm->vmcb01.ptr->save.rsp = save->rsp;
|
|
|
|
svm->vmcb01.ptr->save.rip = save->rip;
|
|
|
|
svm->vmcb01.ptr->save.cpl = 0;
|
|
|
|
|
2020-11-17 18:15:41 +08:00
|
|
|
nested_load_control_from_vmcb12(svm, ctl);
|
2021-01-13 20:07:52 +08:00
|
|
|
|
|
|
|
svm_switch_vmcb(svm, &svm->nested.vmcb02);
|
|
|
|
|
2020-11-17 18:15:41 +08:00
|
|
|
nested_vmcb02_prepare_control(svm);
|
2020-05-14 01:36:32 +08:00
|
|
|
|
2020-09-22 19:43:14 +08:00
|
|
|
kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
|
2020-09-07 21:15:02 +08:00
|
|
|
ret = 0;
|
|
|
|
out_free:
|
|
|
|
kfree(save);
|
|
|
|
kfree(ctl);
|
|
|
|
|
|
|
|
return ret;
|
2020-05-14 01:36:32 +08:00
|
|
|
}
|
|
|
|
|
2021-04-01 22:18:10 +08:00
|
|
|
static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
|
|
|
|
|
|
|
if (WARN_ON(!is_guest_mode(vcpu)))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
|
|
|
|
nested_npt_enabled(svm)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!nested_svm_vmrun_msrpm(svm)) {
|
|
|
|
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
|
|
|
vcpu->run->internal.suberror =
|
|
|
|
KVM_INTERNAL_ERROR_EMULATION;
|
|
|
|
vcpu->run->internal.ndata = 0;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-04-17 22:24:18 +08:00
|
|
|
struct kvm_x86_nested_ops svm_nested_ops = {
|
|
|
|
.check_events = svm_check_nested_events,
|
2021-03-03 01:45:14 +08:00
|
|
|
.triple_fault = nested_svm_triple_fault,
|
2020-09-22 19:43:14 +08:00
|
|
|
.get_nested_state_pages = svm_get_nested_state_pages,
|
2020-05-14 01:36:32 +08:00
|
|
|
.get_state = svm_get_nested_state,
|
|
|
|
.set_state = svm_set_nested_state,
|
2020-04-17 22:24:18 +08:00
|
|
|
};
|