KVM: ia64: Don't hold slots_lock in guest mode

Reorder locking to avoid holding the slots_lock when entering
the guest.

Signed-off-by: Jes Sorensen <jes@sgi.com>
Acked-by : Xiantao Zhang<xiantao.zhang@intel.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Jes Sorensen 2009-04-16 10:43:48 +02:00 committed by Avi Kivity
parent 463656c000
commit c6b60c6921
1 changed files with 33 additions and 31 deletions

View File

@ -632,34 +632,22 @@ static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
vti_set_rr6(vcpu->arch.vmm_rr); vti_set_rr6(vcpu->arch.vmm_rr);
return kvm_insert_vmm_mapping(vcpu); return kvm_insert_vmm_mapping(vcpu);
} }
static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu) static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
{ {
kvm_purge_vmm_mapping(vcpu); kvm_purge_vmm_mapping(vcpu);
vti_set_rr6(vcpu->arch.host_rr6); vti_set_rr6(vcpu->arch.host_rr6);
} }
static int vti_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{ {
union context *host_ctx, *guest_ctx; union context *host_ctx, *guest_ctx;
int r; int r;
/*Get host and guest context with guest address space.*/ /*
host_ctx = kvm_get_host_context(vcpu); * down_read() may sleep and return with interrupts enabled
guest_ctx = kvm_get_guest_context(vcpu); */
down_read(&vcpu->kvm->slots_lock);
r = kvm_vcpu_pre_transition(vcpu);
if (r < 0)
goto out;
kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
kvm_vcpu_post_transition(vcpu);
r = 0;
out:
return r;
}
static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
int r;
again: again:
if (signal_pending(current)) { if (signal_pending(current)) {
@ -668,23 +656,28 @@ again:
goto out; goto out;
} }
/*
* down_read() may sleep and return with interrupts enabled
*/
down_read(&vcpu->kvm->slots_lock);
preempt_disable(); preempt_disable();
local_irq_disable(); local_irq_disable();
/*Get host and guest context with guest address space.*/
host_ctx = kvm_get_host_context(vcpu);
guest_ctx = kvm_get_guest_context(vcpu);
vcpu->guest_mode = 1; vcpu->guest_mode = 1;
r = kvm_vcpu_pre_transition(vcpu);
if (r < 0)
goto vcpu_run_fail;
up_read(&vcpu->kvm->slots_lock);
kvm_guest_enter(); kvm_guest_enter();
r = vti_vcpu_run(vcpu, kvm_run);
if (r < 0) { /*
local_irq_enable(); * Transition to the guest
preempt_enable(); */
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
goto out;
} kvm_vcpu_post_transition(vcpu);
vcpu->arch.launched = 1; vcpu->arch.launched = 1;
vcpu->guest_mode = 0; vcpu->guest_mode = 0;
@ -698,9 +691,10 @@ again:
*/ */
barrier(); barrier();
kvm_guest_exit(); kvm_guest_exit();
up_read(&vcpu->kvm->slots_lock);
preempt_enable(); preempt_enable();
down_read(&vcpu->kvm->slots_lock);
r = kvm_handle_exit(kvm_run, vcpu); r = kvm_handle_exit(kvm_run, vcpu);
if (r > 0) { if (r > 0) {
@ -709,12 +703,20 @@ again:
} }
out: out:
up_read(&vcpu->kvm->slots_lock);
if (r > 0) { if (r > 0) {
kvm_resched(vcpu); kvm_resched(vcpu);
down_read(&vcpu->kvm->slots_lock);
goto again; goto again;
} }
return r; return r;
vcpu_run_fail:
local_irq_enable();
preempt_enable();
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
goto out;
} }
static void kvm_set_mmio_data(struct kvm_vcpu *vcpu) static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)