KVM: x86: Account for failing enable_irq_window for NMI window request
With VMX, enable_irq_window can now return -EBUSY, in which case an immediate exit shall be requested before entering the guest. Account for this also in enable_nmi_window which uses enable_irq_window in absence of vnmi support, e.g. Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
parent
5975a2e095
commit
03b28f8133
|
@ -695,7 +695,7 @@ struct kvm_x86_ops {
|
||||||
int (*nmi_allowed)(struct kvm_vcpu *vcpu);
|
int (*nmi_allowed)(struct kvm_vcpu *vcpu);
|
||||||
bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
|
bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
|
||||||
void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
|
void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
|
||||||
void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
|
int (*enable_nmi_window)(struct kvm_vcpu *vcpu);
|
||||||
int (*enable_irq_window)(struct kvm_vcpu *vcpu);
|
int (*enable_irq_window)(struct kvm_vcpu *vcpu);
|
||||||
void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
|
void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
|
||||||
int (*vm_has_apicv)(struct kvm *kvm);
|
int (*vm_has_apicv)(struct kvm *kvm);
|
||||||
|
|
|
@ -3649,13 +3649,13 @@ static int enable_irq_window(struct kvm_vcpu *vcpu)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void enable_nmi_window(struct kvm_vcpu *vcpu)
|
static int enable_nmi_window(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct vcpu_svm *svm = to_svm(vcpu);
|
struct vcpu_svm *svm = to_svm(vcpu);
|
||||||
|
|
||||||
if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
|
if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
|
||||||
== HF_NMI_MASK)
|
== HF_NMI_MASK)
|
||||||
return; /* IRET will cause a vm exit */
|
return 0; /* IRET will cause a vm exit */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Something prevents NMI from been injected. Single step over possible
|
* Something prevents NMI from been injected. Single step over possible
|
||||||
|
@ -3664,6 +3664,7 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
|
||||||
svm->nmi_singlestep = true;
|
svm->nmi_singlestep = true;
|
||||||
svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
|
svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
|
||||||
update_db_bp_intercept(vcpu);
|
update_db_bp_intercept(vcpu);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
|
static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
|
||||||
|
|
|
@ -4417,22 +4417,20 @@ static int enable_irq_window(struct kvm_vcpu *vcpu)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void enable_nmi_window(struct kvm_vcpu *vcpu)
|
static int enable_nmi_window(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
u32 cpu_based_vm_exec_control;
|
u32 cpu_based_vm_exec_control;
|
||||||
|
|
||||||
if (!cpu_has_virtual_nmis()) {
|
if (!cpu_has_virtual_nmis())
|
||||||
enable_irq_window(vcpu);
|
return enable_irq_window(vcpu);
|
||||||
return;
|
|
||||||
}
|
if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI)
|
||||||
|
return enable_irq_window(vcpu);
|
||||||
|
|
||||||
if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
|
|
||||||
enable_irq_window(vcpu);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
|
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
|
||||||
cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
|
cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
|
||||||
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
|
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmx_inject_irq(struct kvm_vcpu *vcpu)
|
static void vmx_inject_irq(struct kvm_vcpu *vcpu)
|
||||||
|
|
|
@ -5756,7 +5756,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
/* enable NMI/IRQ window open exits if needed */
|
/* enable NMI/IRQ window open exits if needed */
|
||||||
if (vcpu->arch.nmi_pending)
|
if (vcpu->arch.nmi_pending)
|
||||||
kvm_x86_ops->enable_nmi_window(vcpu);
|
req_immediate_exit =
|
||||||
|
kvm_x86_ops->enable_nmi_window(vcpu) != 0;
|
||||||
else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
|
else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
|
||||||
req_immediate_exit =
|
req_immediate_exit =
|
||||||
kvm_x86_ops->enable_irq_window(vcpu) != 0;
|
kvm_x86_ops->enable_irq_window(vcpu) != 0;
|
||||||
|
|
Loading…
Reference in New Issue