KVM: VMX: Define new functions to wrapper direct call of asm code
Define vmcs_load() and kvm_cpu_vmxon() to avoid direct call of asm code. Also move VMXE bit operation out of kvm_cpu_vmxoff(). Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
parent
6859762e8a
commit
7725b89414
|
@ -453,6 +453,19 @@ static void vmcs_clear(struct vmcs *vmcs)
|
|||
vmcs, phys_addr);
|
||||
}
|
||||
|
||||
static void vmcs_load(struct vmcs *vmcs)
|
||||
{
|
||||
u64 phys_addr = __pa(vmcs);
|
||||
u8 error;
|
||||
|
||||
asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
|
||||
: "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
|
||||
: "cc", "memory");
|
||||
if (error)
|
||||
printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
|
||||
vmcs, phys_addr);
|
||||
}
|
||||
|
||||
static void __vcpu_clear(void *arg)
|
||||
{
|
||||
struct vcpu_vmx *vmx = arg;
|
||||
|
@ -830,7 +843,6 @@ static void vmx_load_host_state(struct vcpu_vmx *vmx)
|
|||
static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
u64 phys_addr = __pa(vmx->vmcs);
|
||||
u64 tsc_this, delta, new_offset;
|
||||
|
||||
if (vcpu->cpu != cpu) {
|
||||
|
@ -844,15 +856,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
}
|
||||
|
||||
if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
|
||||
u8 error;
|
||||
|
||||
per_cpu(current_vmcs, cpu) = vmx->vmcs;
|
||||
asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
|
||||
: "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
|
||||
: "cc");
|
||||
if (error)
|
||||
printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
|
||||
vmx->vmcs, phys_addr);
|
||||
vmcs_load(vmx->vmcs);
|
||||
}
|
||||
|
||||
if (vcpu->cpu != cpu) {
|
||||
|
@ -1288,6 +1293,13 @@ static __init int vmx_disabled_by_bios(void)
|
|||
/* locked but not enabled */
|
||||
}
|
||||
|
||||
static void kvm_cpu_vmxon(u64 addr)
|
||||
{
|
||||
asm volatile (ASM_VMX_VMXON_RAX
|
||||
: : "a"(&addr), "m"(addr)
|
||||
: "memory", "cc");
|
||||
}
|
||||
|
||||
static int hardware_enable(void *garbage)
|
||||
{
|
||||
int cpu = raw_smp_processor_id();
|
||||
|
@ -1310,9 +1322,7 @@ static int hardware_enable(void *garbage)
|
|||
wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
|
||||
}
|
||||
write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
|
||||
asm volatile (ASM_VMX_VMXON_RAX
|
||||
: : "a"(&phys_addr), "m"(phys_addr)
|
||||
: "memory", "cc");
|
||||
kvm_cpu_vmxon(phys_addr);
|
||||
|
||||
ept_sync_global();
|
||||
|
||||
|
@ -1336,13 +1346,13 @@ static void vmclear_local_vcpus(void)
|
|||
static void kvm_cpu_vmxoff(void)
|
||||
{
|
||||
asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
|
||||
write_cr4(read_cr4() & ~X86_CR4_VMXE);
|
||||
}
|
||||
|
||||
static void hardware_disable(void *garbage)
|
||||
{
|
||||
vmclear_local_vcpus();
|
||||
kvm_cpu_vmxoff();
|
||||
write_cr4(read_cr4() & ~X86_CR4_VMXE);
|
||||
}
|
||||
|
||||
static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
|
||||
|
|
Loading…
Reference in New Issue