KVM: make processes waiting on vcpu mutex killable

vcpu mutex can be held for unlimited time so
taking it with mutex_lock on an ioctl is wrong:
one process could be passed a vcpu fd and
call this ioctl on the vcpu used by another process,
it will then be unkillable until the owner exits.

Call mutex_lock_killable instead and return status.
Note: mutex_lock_interruptible would be even nicer,
but I am not sure all users are prepared to handle EINTR
from these ioctls. They might misinterpret it as an error.

Cleanup paths expect a vcpu that can't be used by
any userspace so this will always succeed - catch bugs
by calling BUG_ON.

Catch callers that don't check return state by adding
__must_check.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
Michael S. Tsirkin 2012-09-16 11:50:30 +03:00 committed by Marcelo Tosatti
parent 7454766f7b
commit 9fc77441e5
3 changed files with 17 additions and 7 deletions

View File

@ -6016,7 +6016,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
int r; int r;
vcpu->arch.mtrr_state.have_fixed = 1; vcpu->arch.mtrr_state.have_fixed = 1;
vcpu_load(vcpu); r = vcpu_load(vcpu);
if (r)
return r;
r = kvm_arch_vcpu_reset(vcpu); r = kvm_arch_vcpu_reset(vcpu);
if (r == 0) if (r == 0)
r = kvm_mmu_setup(vcpu); r = kvm_mmu_setup(vcpu);
@ -6027,9 +6029,11 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{ {
int r;
vcpu->arch.apf.msr_val = 0; vcpu->arch.apf.msr_val = 0;
vcpu_load(vcpu); r = vcpu_load(vcpu);
BUG_ON(r);
kvm_mmu_unload(vcpu); kvm_mmu_unload(vcpu);
vcpu_put(vcpu); vcpu_put(vcpu);
@ -6275,7 +6279,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
{ {
vcpu_load(vcpu); int r;
r = vcpu_load(vcpu);
BUG_ON(r);
kvm_mmu_unload(vcpu); kvm_mmu_unload(vcpu);
vcpu_put(vcpu); vcpu_put(vcpu);
} }

View File

@ -408,7 +408,7 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
void vcpu_load(struct kvm_vcpu *vcpu); int __must_check vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu); void vcpu_put(struct kvm_vcpu *vcpu);
int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,

View File

@ -131,11 +131,12 @@ bool kvm_is_mmio_pfn(pfn_t pfn)
/* /*
* Switches to specified vcpu, until a matching vcpu_put() * Switches to specified vcpu, until a matching vcpu_put()
*/ */
void vcpu_load(struct kvm_vcpu *vcpu) int vcpu_load(struct kvm_vcpu *vcpu)
{ {
int cpu; int cpu;
mutex_lock(&vcpu->mutex); if (mutex_lock_killable(&vcpu->mutex))
return -EINTR;
if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) { if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
/* The thread running this VCPU changed. */ /* The thread running this VCPU changed. */
struct pid *oldpid = vcpu->pid; struct pid *oldpid = vcpu->pid;
@ -148,6 +149,7 @@ void vcpu_load(struct kvm_vcpu *vcpu)
preempt_notifier_register(&vcpu->preempt_notifier); preempt_notifier_register(&vcpu->preempt_notifier);
kvm_arch_vcpu_load(vcpu, cpu); kvm_arch_vcpu_load(vcpu, cpu);
put_cpu(); put_cpu();
return 0;
} }
void vcpu_put(struct kvm_vcpu *vcpu) void vcpu_put(struct kvm_vcpu *vcpu)
@ -1891,7 +1893,9 @@ static long kvm_vcpu_ioctl(struct file *filp,
#endif #endif
vcpu_load(vcpu); r = vcpu_load(vcpu);
if (r)
return r;
switch (ioctl) { switch (ioctl) {
case KVM_RUN: case KVM_RUN:
r = -EINVAL; r = -EINVAL;