KVM: x86: return all bits from get_interrupt_shadow
For the next patch we will need to know the full state of the interrupt shadow; we will then set KVM_REQ_EVENT when one bit is cleared. However, right now get_interrupt_shadow only returns the one corresponding to the emulated instruction, or an unconditional 0 if the emulated instruction does not have an interrupt shadow. This is confusing and does not allow us to check for cleared bits as mentioned above. Clean the callback up, and modify toggle_interruptibility to match the comment above the call. As a small result, the call to set_interrupt_shadow will be skipped in the common case where int_shadow == 0 && mask == 0. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
98eb2f8b14
commit
37ccdcbe07
|
@ -717,7 +717,7 @@ struct kvm_x86_ops {
|
||||||
int (*handle_exit)(struct kvm_vcpu *vcpu);
|
int (*handle_exit)(struct kvm_vcpu *vcpu);
|
||||||
void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
|
void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
|
||||||
void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
|
void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
|
||||||
u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
|
u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
|
||||||
void (*patch_hypercall)(struct kvm_vcpu *vcpu,
|
void (*patch_hypercall)(struct kvm_vcpu *vcpu,
|
||||||
unsigned char *hypercall_addr);
|
unsigned char *hypercall_addr);
|
||||||
void (*set_irq)(struct kvm_vcpu *vcpu);
|
void (*set_irq)(struct kvm_vcpu *vcpu);
|
||||||
|
|
|
@ -486,14 +486,14 @@ static int is_external_interrupt(u32 info)
|
||||||
return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
|
return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
|
static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct vcpu_svm *svm = to_svm(vcpu);
|
struct vcpu_svm *svm = to_svm(vcpu);
|
||||||
u32 ret = 0;
|
u32 ret = 0;
|
||||||
|
|
||||||
if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
|
if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
|
||||||
ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
|
ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
|
||||||
return ret & mask;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
|
static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
|
||||||
|
|
|
@ -1943,7 +1943,7 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
|
||||||
vmcs_writel(GUEST_RFLAGS, rflags);
|
vmcs_writel(GUEST_RFLAGS, rflags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
|
static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
|
u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -1953,7 +1953,7 @@ static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
|
||||||
if (interruptibility & GUEST_INTR_STATE_MOV_SS)
|
if (interruptibility & GUEST_INTR_STATE_MOV_SS)
|
||||||
ret |= KVM_X86_SHADOW_INT_MOV_SS;
|
ret |= KVM_X86_SHADOW_INT_MOV_SS;
|
||||||
|
|
||||||
return ret & mask;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
|
static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
|
||||||
|
|
|
@ -2978,9 +2978,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
|
||||||
vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
|
vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
|
||||||
events->interrupt.nr = vcpu->arch.interrupt.nr;
|
events->interrupt.nr = vcpu->arch.interrupt.nr;
|
||||||
events->interrupt.soft = 0;
|
events->interrupt.soft = 0;
|
||||||
events->interrupt.shadow =
|
events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
|
||||||
kvm_x86_ops->get_interrupt_shadow(vcpu,
|
|
||||||
KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
|
|
||||||
|
|
||||||
events->nmi.injected = vcpu->arch.nmi_injected;
|
events->nmi.injected = vcpu->arch.nmi_injected;
|
||||||
events->nmi.pending = vcpu->arch.nmi_pending != 0;
|
events->nmi.pending = vcpu->arch.nmi_pending != 0;
|
||||||
|
@ -4860,7 +4858,7 @@ static const struct x86_emulate_ops emulate_ops = {
|
||||||
|
|
||||||
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
|
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
|
||||||
{
|
{
|
||||||
u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask);
|
u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
|
||||||
/*
|
/*
|
||||||
* an sti; sti; sequence only disable interrupts for the first
|
* an sti; sti; sequence only disable interrupts for the first
|
||||||
* instruction. So, if the last instruction, be it emulated or
|
* instruction. So, if the last instruction, be it emulated or
|
||||||
|
@ -4868,7 +4866,9 @@ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
|
||||||
* means that the last instruction is an sti. We should not
|
* means that the last instruction is an sti. We should not
|
||||||
* leave the flag on in this case. The same goes for mov ss
|
* leave the flag on in this case. The same goes for mov ss
|
||||||
*/
|
*/
|
||||||
if (!(int_shadow & mask))
|
if (int_shadow & mask)
|
||||||
|
mask = 0;
|
||||||
|
if (unlikely(int_shadow || mask))
|
||||||
kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
|
kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue