KVM: x86: add kvm_fast_pio() to consolidate fast PIO code
Add kvm_fast_pio() to consolidate duplicate code in VMX and SVM. Unexport kvm_fast_pio_in() and kvm_fast_pio_out(). Suggested-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
432baf60ee
commit
dca7f1284f
|
@ -1210,8 +1210,7 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
|
||||||
|
|
||||||
struct x86_emulate_ctxt;
|
struct x86_emulate_ctxt;
|
||||||
|
|
||||||
int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
|
int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in);
|
||||||
int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, unsigned short port);
|
|
||||||
int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
|
int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
|
||||||
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
|
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
|
||||||
int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
|
int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
|
||||||
|
|
|
@ -2640,7 +2640,7 @@ static int io_interception(struct vcpu_svm *svm)
|
||||||
{
|
{
|
||||||
struct kvm_vcpu *vcpu = &svm->vcpu;
|
struct kvm_vcpu *vcpu = &svm->vcpu;
|
||||||
u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
|
u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
|
||||||
int size, in, string, ret;
|
int size, in, string;
|
||||||
unsigned port;
|
unsigned port;
|
||||||
|
|
||||||
++svm->vcpu.stat.io_exits;
|
++svm->vcpu.stat.io_exits;
|
||||||
|
@ -2652,16 +2652,8 @@ static int io_interception(struct vcpu_svm *svm)
|
||||||
port = io_info >> 16;
|
port = io_info >> 16;
|
||||||
size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
|
size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
|
||||||
svm->next_rip = svm->vmcb->control.exit_info_2;
|
svm->next_rip = svm->vmcb->control.exit_info_2;
|
||||||
ret = kvm_skip_emulated_instruction(&svm->vcpu);
|
|
||||||
|
|
||||||
/*
|
return kvm_fast_pio(&svm->vcpu, size, port, in);
|
||||||
* TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
|
|
||||||
* KVM_EXIT_DEBUG here.
|
|
||||||
*/
|
|
||||||
if (in)
|
|
||||||
return kvm_fast_pio_in(vcpu, size, port) && ret;
|
|
||||||
else
|
|
||||||
return kvm_fast_pio_out(vcpu, size, port) && ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nmi_interception(struct vcpu_svm *svm)
|
static int nmi_interception(struct vcpu_svm *svm)
|
||||||
|
|
|
@ -6265,7 +6265,7 @@ static int handle_triple_fault(struct kvm_vcpu *vcpu)
|
||||||
static int handle_io(struct kvm_vcpu *vcpu)
|
static int handle_io(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
unsigned long exit_qualification;
|
unsigned long exit_qualification;
|
||||||
int size, in, string, ret;
|
int size, in, string;
|
||||||
unsigned port;
|
unsigned port;
|
||||||
|
|
||||||
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
|
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
|
||||||
|
@ -6280,16 +6280,7 @@ static int handle_io(struct kvm_vcpu *vcpu)
|
||||||
size = (exit_qualification & 7) + 1;
|
size = (exit_qualification & 7) + 1;
|
||||||
in = (exit_qualification & 8) != 0;
|
in = (exit_qualification & 8) != 0;
|
||||||
|
|
||||||
ret = kvm_skip_emulated_instruction(vcpu);
|
return kvm_fast_pio(vcpu, size, port, in);
|
||||||
|
|
||||||
/*
|
|
||||||
* TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
|
|
||||||
* KVM_EXIT_DEBUG here.
|
|
||||||
*/
|
|
||||||
if (in)
|
|
||||||
return kvm_fast_pio_in(vcpu, size, port) && ret;
|
|
||||||
else
|
|
||||||
return kvm_fast_pio_out(vcpu, size, port) && ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
|
|
@ -6061,7 +6061,8 @@ restart:
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(x86_emulate_instruction);
|
EXPORT_SYMBOL_GPL(x86_emulate_instruction);
|
||||||
|
|
||||||
int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
|
static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
|
||||||
|
unsigned short port)
|
||||||
{
|
{
|
||||||
unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
|
unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
|
||||||
int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
|
int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
|
||||||
|
@ -6070,7 +6071,6 @@ int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
|
||||||
vcpu->arch.pio.count = 0;
|
vcpu->arch.pio.count = 0;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
|
|
||||||
|
|
||||||
static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
|
static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
|
@ -6094,7 +6094,8 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, unsigned short port)
|
static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
|
||||||
|
unsigned short port)
|
||||||
{
|
{
|
||||||
unsigned long val;
|
unsigned long val;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -6113,7 +6114,21 @@ int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, unsigned short port)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_fast_pio_in);
|
|
||||||
|
int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
|
||||||
|
{
|
||||||
|
int ret = kvm_skip_emulated_instruction(vcpu);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
|
||||||
|
* KVM_EXIT_DEBUG here.
|
||||||
|
*/
|
||||||
|
if (in)
|
||||||
|
return kvm_fast_pio_in(vcpu, size, port) && ret;
|
||||||
|
else
|
||||||
|
return kvm_fast_pio_out(vcpu, size, port) && ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(kvm_fast_pio);
|
||||||
|
|
||||||
static int kvmclock_cpu_down_prep(unsigned int cpu)
|
static int kvmclock_cpu_down_prep(unsigned int cpu)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in New Issue