x86/virt: KVM: Move VMXOFF helpers into KVM VMX

Now that VMX is disabled in emergencies via the virt callbacks, move the
VMXOFF helpers into KVM, the only remaining user.

No functional change intended.

Reviewed-by: Kai Huang <kai.huang@intel.com>
Link: https://lore.kernel.org/r/20230721201859.2307736-11-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
Sean Christopherson 2023-07-21 13:18:50 -07:00
parent b6a6af0d19
commit 22e420e127
2 changed files with 26 additions and 45 deletions

View File

@ -19,48 +19,6 @@
#include <asm/svm.h>
#include <asm/tlbflush.h>
/*
* VMX functions:
*/
/**
* cpu_vmxoff() - Disable VMX on the current CPU
*
* Disable VMX and clear CR4.VMXE (even if VMXOFF faults)
*
* Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to
* atomically track post-VMXON state, e.g. this may be called in NMI context.
* Eat all faults as all other faults on VMXOFF faults are mode related, i.e.
* faults are guaranteed to be due to the !post-VMXON check unless the CPU is
* magically in RM, VM86, compat mode, or at CPL>0.
*/
static inline int cpu_vmxoff(void)
{
asm_volatile_goto("1: vmxoff\n\t"
_ASM_EXTABLE(1b, %l[fault])
::: "cc", "memory" : fault);
cr4_clear_bits(X86_CR4_VMXE);
return 0;
fault:
cr4_clear_bits(X86_CR4_VMXE);
return -EIO;
}
static inline int cpu_vmx_enabled(void)
{
return __read_cr4() & X86_CR4_VMXE;
}
/** Disable VMX if it is enabled on the current CPU
*/
static inline void __cpu_emergency_vmxoff(void)
{
if (cpu_vmx_enabled())
cpu_vmxoff();
}
/*
* SVM functions:
*/

View File

@ -47,7 +47,6 @@
#include <asm/mshyperv.h>
#include <asm/mwait.h>
#include <asm/spec-ctrl.h>
#include <asm/virtext.h>
#include <asm/vmx.h>
#include "capabilities.h"
@ -725,6 +724,29 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
return ret;
}
/*
* Disable VMX and clear CR4.VMXE (even if VMXOFF faults)
*
* Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to
* atomically track post-VMXON state, e.g. this may be called in NMI context.
* Eat all faults as all other faults on VMXOFF faults are mode related, i.e.
* faults are guaranteed to be due to the !post-VMXON check unless the CPU is
* magically in RM, VM86, compat mode, or at CPL>0.
*/
static int kvm_cpu_vmxoff(void)
{
asm_volatile_goto("1: vmxoff\n\t"
_ASM_EXTABLE(1b, %l[fault])
::: "cc", "memory" : fault);
cr4_clear_bits(X86_CR4_VMXE);
return 0;
fault:
cr4_clear_bits(X86_CR4_VMXE);
return -EIO;
}
static void vmx_emergency_disable(void)
{
int cpu = raw_smp_processor_id();
@ -734,7 +756,8 @@ static void vmx_emergency_disable(void)
loaded_vmcss_on_cpu_link)
vmcs_clear(v->vmcs);
__cpu_emergency_vmxoff();
if (__read_cr4() & X86_CR4_VMXE)
kvm_cpu_vmxoff();
}
static void __loaded_vmcs_clear(void *arg)
@ -2799,7 +2822,7 @@ static void vmx_hardware_disable(void)
{
vmclear_local_loaded_vmcss();
if (cpu_vmxoff())
if (kvm_cpu_vmxoff())
kvm_spurious_fault();
hv_reset_evmcs();