kvm: x86: make kvm_emulate_* consistant
Currently kvm_emulate() skips the instruction but kvm_emulate_* sometimes don't. The end reult is the caller ends up doing the skip themselves. Let's make them consistant. Signed-off-by: Joel Schopp <joel.schopp@amd.com> Reviewed-by: Radim Krčmář <rkrcmar@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
parent
1170adc6dd
commit
5cb56059c9
|
@ -933,6 +933,7 @@ struct x86_emulate_ctxt;
|
||||||
int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
|
int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
|
||||||
void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
|
void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
|
||||||
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
|
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
|
||||||
|
int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
|
||||||
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
|
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
|
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
|
||||||
|
|
|
@ -1929,14 +1929,12 @@ static int nop_on_interception(struct vcpu_svm *svm)
|
||||||
static int halt_interception(struct vcpu_svm *svm)
|
static int halt_interception(struct vcpu_svm *svm)
|
||||||
{
|
{
|
||||||
svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
|
svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
|
||||||
skip_emulated_instruction(&svm->vcpu);
|
|
||||||
return kvm_emulate_halt(&svm->vcpu);
|
return kvm_emulate_halt(&svm->vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vmmcall_interception(struct vcpu_svm *svm)
|
static int vmmcall_interception(struct vcpu_svm *svm)
|
||||||
{
|
{
|
||||||
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
|
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
|
||||||
skip_emulated_instruction(&svm->vcpu);
|
|
||||||
kvm_emulate_hypercall(&svm->vcpu);
|
kvm_emulate_hypercall(&svm->vcpu);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -5000,7 +5000,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
|
||||||
if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
|
if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
|
||||||
if (vcpu->arch.halt_request) {
|
if (vcpu->arch.halt_request) {
|
||||||
vcpu->arch.halt_request = 0;
|
vcpu->arch.halt_request = 0;
|
||||||
return kvm_emulate_halt(vcpu);
|
return kvm_vcpu_halt(vcpu);
|
||||||
}
|
}
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -5527,13 +5527,11 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
static int handle_halt(struct kvm_vcpu *vcpu)
|
static int handle_halt(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
skip_emulated_instruction(vcpu);
|
|
||||||
return kvm_emulate_halt(vcpu);
|
return kvm_emulate_halt(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int handle_vmcall(struct kvm_vcpu *vcpu)
|
static int handle_vmcall(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
skip_emulated_instruction(vcpu);
|
|
||||||
kvm_emulate_hypercall(vcpu);
|
kvm_emulate_hypercall(vcpu);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -5564,7 +5562,6 @@ static int handle_rdpmc(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
static int handle_wbinvd(struct kvm_vcpu *vcpu)
|
static int handle_wbinvd(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
skip_emulated_instruction(vcpu);
|
|
||||||
kvm_emulate_wbinvd(vcpu);
|
kvm_emulate_wbinvd(vcpu);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -5903,7 +5900,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
if (vcpu->arch.halt_request) {
|
if (vcpu->arch.halt_request) {
|
||||||
vcpu->arch.halt_request = 0;
|
vcpu->arch.halt_request = 0;
|
||||||
ret = kvm_emulate_halt(vcpu);
|
ret = kvm_vcpu_halt(vcpu);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -9518,7 +9515,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
||||||
vmcs12->launch_state = 1;
|
vmcs12->launch_state = 1;
|
||||||
|
|
||||||
if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)
|
if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)
|
||||||
return kvm_emulate_halt(vcpu);
|
return kvm_vcpu_halt(vcpu);
|
||||||
|
|
||||||
vmx->nested.nested_run_pending = 1;
|
vmx->nested.nested_run_pending = 1;
|
||||||
|
|
||||||
|
|
|
@ -4706,7 +4706,7 @@ static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
|
||||||
kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
|
kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
|
int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
if (!need_emulate_wbinvd(vcpu))
|
if (!need_emulate_wbinvd(vcpu))
|
||||||
return X86EMUL_CONTINUE;
|
return X86EMUL_CONTINUE;
|
||||||
|
@ -4723,11 +4723,19 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
|
||||||
wbinvd();
|
wbinvd();
|
||||||
return X86EMUL_CONTINUE;
|
return X86EMUL_CONTINUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
kvm_x86_ops->skip_emulated_instruction(vcpu);
|
||||||
|
return kvm_emulate_wbinvd_noskip(vcpu);
|
||||||
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
|
EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
|
static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
|
||||||
{
|
{
|
||||||
kvm_emulate_wbinvd(emul_to_vcpu(ctxt));
|
kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt));
|
||||||
}
|
}
|
||||||
|
|
||||||
int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
|
int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
|
||||||
|
@ -5817,7 +5825,7 @@ void kvm_arch_exit(void)
|
||||||
free_percpu(shared_msrs);
|
free_percpu(shared_msrs);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
|
int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
++vcpu->stat.halt_exits;
|
++vcpu->stat.halt_exits;
|
||||||
if (irqchip_in_kernel(vcpu->kvm)) {
|
if (irqchip_in_kernel(vcpu->kvm)) {
|
||||||
|
@ -5828,6 +5836,13 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
|
||||||
|
|
||||||
|
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
kvm_x86_ops->skip_emulated_instruction(vcpu);
|
||||||
|
return kvm_vcpu_halt(vcpu);
|
||||||
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_emulate_halt);
|
EXPORT_SYMBOL_GPL(kvm_emulate_halt);
|
||||||
|
|
||||||
int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
|
int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
|
||||||
|
@ -5912,6 +5927,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
|
||||||
unsigned long nr, a0, a1, a2, a3, ret;
|
unsigned long nr, a0, a1, a2, a3, ret;
|
||||||
int op_64_bit, r = 1;
|
int op_64_bit, r = 1;
|
||||||
|
|
||||||
|
kvm_x86_ops->skip_emulated_instruction(vcpu);
|
||||||
|
|
||||||
if (kvm_hv_hypercall_enabled(vcpu->kvm))
|
if (kvm_hv_hypercall_enabled(vcpu->kvm))
|
||||||
return kvm_hv_hypercall(vcpu);
|
return kvm_hv_hypercall(vcpu);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue