KVM: selftests: evmcs_test: Introduce L2 TLB flush test
Enable Hyper-V L2 TLB flush and check that Hyper-V TLB flush hypercalls from L2 don't exit to L1 unless 'TlbLockCount' is set in the Partition assist page. Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20221101145426.251680-47-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
75ee7505fe
commit
4b5d8b222b
|
@ -237,6 +237,8 @@ struct hv_enlightened_vmcs {
|
|||
#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ENLIGHTENMENTSCONTROL BIT(15)
|
||||
#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL 0xFFFF
|
||||
|
||||
#define HV_VMX_SYNTHETIC_EXIT_REASON_TRAP_AFTER_FLUSH 0x10000031
|
||||
|
||||
extern struct hv_enlightened_vmcs *current_evmcs;
|
||||
|
||||
int vcpu_enable_evmcs(struct kvm_vcpu *vcpu);
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
|
||||
#include "kvm_util.h"
|
||||
|
||||
#include "hyperv.h"
|
||||
#include "vmx.h"
|
||||
|
||||
static int ud_count;
|
||||
|
@ -41,6 +42,8 @@ static inline void rdmsr_from_l2(uint32_t msr)
|
|||
/* Exit to L1 from L2 with RDMSR instruction */
|
||||
void l2_guest_code(void)
|
||||
{
|
||||
u64 unused;
|
||||
|
||||
GUEST_SYNC(7);
|
||||
|
||||
GUEST_SYNC(8);
|
||||
|
@ -57,15 +60,31 @@ void l2_guest_code(void)
|
|||
vmcall();
|
||||
rdmsr_from_l2(MSR_GS_BASE); /* intercepted */
|
||||
|
||||
/* L2 TLB flush tests */
|
||||
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT, 0x0,
|
||||
HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES | HV_FLUSH_ALL_PROCESSORS);
|
||||
rdmsr_from_l2(MSR_FS_BASE);
|
||||
/*
|
||||
* Note: hypercall status (RAX) is not preserved correctly by L1 after
|
||||
* synthetic vmexit, use unchecked version.
|
||||
*/
|
||||
__hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT, 0x0,
|
||||
HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES | HV_FLUSH_ALL_PROCESSORS,
|
||||
&unused);
|
||||
|
||||
/* Done, exit to L1 and never come back. */
|
||||
vmcall();
|
||||
}
|
||||
|
||||
void guest_code(struct vmx_pages *vmx_pages, struct hyperv_test_pages *hv_pages)
|
||||
void guest_code(struct vmx_pages *vmx_pages, struct hyperv_test_pages *hv_pages,
|
||||
vm_vaddr_t hv_hcall_page_gpa)
|
||||
{
|
||||
#define L2_GUEST_STACK_SIZE 64
|
||||
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
|
||||
|
||||
wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
|
||||
wrmsr(HV_X64_MSR_HYPERCALL, hv_hcall_page_gpa);
|
||||
|
||||
x2apic_enable();
|
||||
|
||||
GUEST_SYNC(1);
|
||||
|
@ -95,7 +114,17 @@ void guest_code(struct vmx_pages *vmx_pages, struct hyperv_test_pages *hv_pages)
|
|||
vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmreadz(PIN_BASED_VM_EXEC_CONTROL) |
|
||||
PIN_BASED_NMI_EXITING);
|
||||
|
||||
/* L2 TLB flush setup */
|
||||
current_evmcs->partition_assist_page = hv_pages->partition_assist_gpa;
|
||||
current_evmcs->hv_enlightenments_control.nested_flush_hypercall = 1;
|
||||
current_evmcs->hv_vm_id = 1;
|
||||
current_evmcs->hv_vp_id = 1;
|
||||
current_vp_assist->nested_control.features.directhypercall = 1;
|
||||
*(u32 *)(hv_pages->partition_assist) = 0;
|
||||
|
||||
GUEST_ASSERT(!vmlaunch());
|
||||
GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_EXCEPTION_NMI);
|
||||
GUEST_ASSERT_EQ((vmreadz(VM_EXIT_INTR_INFO) & 0xff), NMI_VECTOR);
|
||||
GUEST_ASSERT(vmptrstz() == hv_pages->enlightened_vmcs_gpa);
|
||||
|
||||
/*
|
||||
|
@ -139,6 +168,18 @@ void guest_code(struct vmx_pages *vmx_pages, struct hyperv_test_pages *hv_pages)
|
|||
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_MSR_READ);
|
||||
current_evmcs->guest_rip += 2; /* rdmsr */
|
||||
|
||||
/*
|
||||
* L2 TLB flush test. First VMCALL should be handled directly by L0,
|
||||
* no VMCALL exit expected.
|
||||
*/
|
||||
GUEST_ASSERT(!vmresume());
|
||||
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_MSR_READ);
|
||||
current_evmcs->guest_rip += 2; /* rdmsr */
|
||||
/* Enable synthetic vmexit */
|
||||
*(u32 *)(hv_pages->partition_assist) = 1;
|
||||
GUEST_ASSERT(!vmresume());
|
||||
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == HV_VMX_SYNTHETIC_EXIT_REASON_TRAP_AFTER_FLUSH);
|
||||
|
||||
GUEST_ASSERT(!vmresume());
|
||||
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
|
||||
GUEST_SYNC(11);
|
||||
|
@ -192,6 +233,7 @@ static struct kvm_vcpu *save_restore_vm(struct kvm_vm *vm,
|
|||
int main(int argc, char *argv[])
|
||||
{
|
||||
vm_vaddr_t vmx_pages_gva = 0, hv_pages_gva = 0;
|
||||
vm_vaddr_t hcall_page;
|
||||
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
|
@ -205,12 +247,16 @@ int main(int argc, char *argv[])
|
|||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS));
|
||||
|
||||
hcall_page = vm_vaddr_alloc_pages(vm, 1);
|
||||
memset(addr_gva2hva(vm, hcall_page), 0x0, getpagesize());
|
||||
|
||||
vcpu_set_hv_cpuid(vcpu);
|
||||
vcpu_enable_evmcs(vcpu);
|
||||
|
||||
vcpu_alloc_vmx(vm, &vmx_pages_gva);
|
||||
vcpu_alloc_hyperv_test_pages(vm, &hv_pages_gva);
|
||||
vcpu_args_set(vcpu, 2, vmx_pages_gva, hv_pages_gva);
|
||||
vcpu_args_set(vcpu, 3, vmx_pages_gva, hv_pages_gva, addr_gva2gpa(vm, hcall_page));
|
||||
vcpu_set_msr(vcpu, HV_X64_MSR_VP_INDEX, vcpu->id);
|
||||
|
||||
vm_init_descriptor_tables(vm);
|
||||
vcpu_init_descriptor_tables(vcpu);
|
||||
|
|
Loading…
Reference in New Issue