KVM/nVMX: Use __vmx_vcpu_run in nested_vmx_check_vmentry_hw
Replace inline assembly in nested_vmx_check_vmentry_hw with a call to __vmx_vcpu_run. The function is not performance critical, so (double) GPR save/restore in __vmx_vcpu_run can be tolerated, as far as performance effects are concerned. Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Sean Christopherson <seanjc@google.com> Reviewed-and-tested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Uros Bizjak <ubizjak@gmail.com> [sean: dropped versioning info from changelog] Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20201231002702.2223707-5-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
53666664a3
commit
150f17bfab
|
@ -12,6 +12,7 @@
|
|||
#include "nested.h"
|
||||
#include "pmu.h"
|
||||
#include "trace.h"
|
||||
#include "vmx.h"
|
||||
#include "x86.h"
|
||||
|
||||
static bool __read_mostly enable_shadow_vmcs = 1;
|
||||
|
@ -3057,35 +3058,8 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
|
|||
vmx->loaded_vmcs->host_state.cr4 = cr4;
|
||||
}
|
||||
|
||||
asm(
|
||||
"sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */
|
||||
"cmp %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
|
||||
"je 1f \n\t"
|
||||
__ex("vmwrite %%" _ASM_SP ", %[HOST_RSP]") "\n\t"
|
||||
"mov %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
|
||||
"1: \n\t"
|
||||
"add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
|
||||
|
||||
/* Check if vmlaunch or vmresume is needed */
|
||||
"cmpb $0, %c[launched](%[loaded_vmcs])\n\t"
|
||||
|
||||
/*
|
||||
* VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set
|
||||
* RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail
|
||||
* Valid. vmx_vmenter() directly "returns" RFLAGS, and so the
|
||||
* results of VM-Enter is captured via CC_{SET,OUT} to vm_fail.
|
||||
*/
|
||||
"call vmx_vmenter\n\t"
|
||||
|
||||
CC_SET(be)
|
||||
: ASM_CALL_CONSTRAINT, CC_OUT(be) (vm_fail)
|
||||
: [HOST_RSP]"r"((unsigned long)HOST_RSP),
|
||||
[loaded_vmcs]"r"(vmx->loaded_vmcs),
|
||||
[launched]"i"(offsetof(struct loaded_vmcs, launched)),
|
||||
[host_state_rsp]"i"(offsetof(struct loaded_vmcs, host_state.rsp)),
|
||||
[wordsize]"i"(sizeof(ulong))
|
||||
: "memory"
|
||||
);
|
||||
vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
|
||||
vmx->loaded_vmcs->launched);
|
||||
|
||||
if (vmx->msr_autoload.host.nr)
|
||||
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
* they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump
|
||||
* to vmx_vmexit.
|
||||
*/
|
||||
SYM_FUNC_START(vmx_vmenter)
|
||||
SYM_FUNC_START_LOCAL(vmx_vmenter)
|
||||
/* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */
|
||||
je 2f
|
||||
|
||||
|
|
|
@ -6658,8 +6658,6 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
}
|
||||
|
||||
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
|
||||
|
||||
static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
|
||||
struct vcpu_vmx *vmx)
|
||||
{
|
||||
|
|
|
@ -388,6 +388,7 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
|
|||
struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
|
||||
void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
|
||||
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
|
||||
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
|
||||
int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
|
||||
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
|
||||
void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu,
|
||||
|
|
Loading…
Reference in New Issue