kvm: Convert kvm_lock to a mutex
It doesn't seem as if there is any particular need for kvm_lock to be a spinlock, so convert the lock to a mutex so that sleepable functions (in particular cond_resched()) can be called while holding it. Signed-off-by: Junaid Shahid <junaids@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
1ae4de23ed
commit
0d9ce162cf
|
@ -15,8 +15,6 @@ The acquisition orders for mutexes are as follows:
|
|||
|
||||
On x86, vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock.
|
||||
|
||||
For spinlocks, kvm_lock is taken outside kvm->mmu_lock.
|
||||
|
||||
Everything else is a leaf: no other lock is taken inside the critical
|
||||
sections.
|
||||
|
||||
|
@ -169,7 +167,7 @@ which time it will be set using the Dirty tracking mechanism described above.
|
|||
------------
|
||||
|
||||
Name: kvm_lock
|
||||
Type: spinlock_t
|
||||
Type: mutex
|
||||
Arch: any
|
||||
Protects: - vm_list
|
||||
|
||||
|
|
|
@ -2423,13 +2423,13 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|||
kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
|
||||
if (!kvm->arch.sca)
|
||||
goto out_err;
|
||||
spin_lock(&kvm_lock);
|
||||
mutex_lock(&kvm_lock);
|
||||
sca_offset += 16;
|
||||
if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
|
||||
sca_offset = 0;
|
||||
kvm->arch.sca = (struct bsca_block *)
|
||||
((char *) kvm->arch.sca + sca_offset);
|
||||
spin_unlock(&kvm_lock);
|
||||
mutex_unlock(&kvm_lock);
|
||||
|
||||
sprintf(debug_name, "kvm-%u", current->pid);
|
||||
|
||||
|
|
|
@ -5956,7 +5956,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
|||
int nr_to_scan = sc->nr_to_scan;
|
||||
unsigned long freed = 0;
|
||||
|
||||
spin_lock(&kvm_lock);
|
||||
mutex_lock(&kvm_lock);
|
||||
|
||||
list_for_each_entry(kvm, &vm_list, vm_list) {
|
||||
int idx;
|
||||
|
@ -5998,7 +5998,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
|||
break;
|
||||
}
|
||||
|
||||
spin_unlock(&kvm_lock);
|
||||
mutex_unlock(&kvm_lock);
|
||||
return freed;
|
||||
}
|
||||
|
||||
|
|
|
@ -6719,7 +6719,7 @@ static void kvm_hyperv_tsc_notifier(void)
|
|||
struct kvm_vcpu *vcpu;
|
||||
int cpu;
|
||||
|
||||
spin_lock(&kvm_lock);
|
||||
mutex_lock(&kvm_lock);
|
||||
list_for_each_entry(kvm, &vm_list, vm_list)
|
||||
kvm_make_mclock_inprogress_request(kvm);
|
||||
|
||||
|
@ -6745,7 +6745,7 @@ static void kvm_hyperv_tsc_notifier(void)
|
|||
|
||||
spin_unlock(&ka->pvclock_gtod_sync_lock);
|
||||
}
|
||||
spin_unlock(&kvm_lock);
|
||||
mutex_unlock(&kvm_lock);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -6796,17 +6796,17 @@ static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu)
|
|||
|
||||
smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
|
||||
|
||||
spin_lock(&kvm_lock);
|
||||
mutex_lock(&kvm_lock);
|
||||
list_for_each_entry(kvm, &vm_list, vm_list) {
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
if (vcpu->cpu != cpu)
|
||||
continue;
|
||||
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
|
||||
if (vcpu->cpu != smp_processor_id())
|
||||
if (vcpu->cpu != raw_smp_processor_id())
|
||||
send_ipi = 1;
|
||||
}
|
||||
}
|
||||
spin_unlock(&kvm_lock);
|
||||
mutex_unlock(&kvm_lock);
|
||||
|
||||
if (freq->old < freq->new && send_ipi) {
|
||||
/*
|
||||
|
@ -6929,12 +6929,12 @@ static void pvclock_gtod_update_fn(struct work_struct *work)
|
|||
struct kvm_vcpu *vcpu;
|
||||
int i;
|
||||
|
||||
spin_lock(&kvm_lock);
|
||||
mutex_lock(&kvm_lock);
|
||||
list_for_each_entry(kvm, &vm_list, vm_list)
|
||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
|
||||
atomic_set(&kvm_guest_has_master_clock, 0);
|
||||
spin_unlock(&kvm_lock);
|
||||
mutex_unlock(&kvm_lock);
|
||||
}
|
||||
|
||||
static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
|
||||
|
|
|
@ -162,7 +162,7 @@ static inline bool is_error_page(struct page *page)
|
|||
|
||||
extern struct kmem_cache *kvm_vcpu_cache;
|
||||
|
||||
extern spinlock_t kvm_lock;
|
||||
extern struct mutex kvm_lock;
|
||||
extern struct list_head vm_list;
|
||||
|
||||
struct kvm_io_range {
|
||||
|
|
|
@ -98,7 +98,7 @@ EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
|
|||
* kvm->lock --> kvm->slots_lock --> kvm->irq_lock
|
||||
*/
|
||||
|
||||
DEFINE_SPINLOCK(kvm_lock);
|
||||
DEFINE_MUTEX(kvm_lock);
|
||||
static DEFINE_RAW_SPINLOCK(kvm_count_lock);
|
||||
LIST_HEAD(vm_list);
|
||||
|
||||
|
@ -683,9 +683,9 @@ static struct kvm *kvm_create_vm(unsigned long type)
|
|||
if (r)
|
||||
goto out_err;
|
||||
|
||||
spin_lock(&kvm_lock);
|
||||
mutex_lock(&kvm_lock);
|
||||
list_add(&kvm->vm_list, &vm_list);
|
||||
spin_unlock(&kvm_lock);
|
||||
mutex_unlock(&kvm_lock);
|
||||
|
||||
preempt_notifier_inc();
|
||||
|
||||
|
@ -731,9 +731,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
|
|||
kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
|
||||
kvm_destroy_vm_debugfs(kvm);
|
||||
kvm_arch_sync_events(kvm);
|
||||
spin_lock(&kvm_lock);
|
||||
mutex_lock(&kvm_lock);
|
||||
list_del(&kvm->vm_list);
|
||||
spin_unlock(&kvm_lock);
|
||||
mutex_unlock(&kvm_lock);
|
||||
kvm_free_irq_routing(kvm);
|
||||
for (i = 0; i < KVM_NR_BUSES; i++) {
|
||||
struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
|
||||
|
@ -4034,13 +4034,13 @@ static int vm_stat_get(void *_offset, u64 *val)
|
|||
u64 tmp_val;
|
||||
|
||||
*val = 0;
|
||||
spin_lock(&kvm_lock);
|
||||
mutex_lock(&kvm_lock);
|
||||
list_for_each_entry(kvm, &vm_list, vm_list) {
|
||||
stat_tmp.kvm = kvm;
|
||||
vm_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
|
||||
*val += tmp_val;
|
||||
}
|
||||
spin_unlock(&kvm_lock);
|
||||
mutex_unlock(&kvm_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4053,12 +4053,12 @@ static int vm_stat_clear(void *_offset, u64 val)
|
|||
if (val)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&kvm_lock);
|
||||
mutex_lock(&kvm_lock);
|
||||
list_for_each_entry(kvm, &vm_list, vm_list) {
|
||||
stat_tmp.kvm = kvm;
|
||||
vm_stat_clear_per_vm((void *)&stat_tmp, 0);
|
||||
}
|
||||
spin_unlock(&kvm_lock);
|
||||
mutex_unlock(&kvm_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -4073,13 +4073,13 @@ static int vcpu_stat_get(void *_offset, u64 *val)
|
|||
u64 tmp_val;
|
||||
|
||||
*val = 0;
|
||||
spin_lock(&kvm_lock);
|
||||
mutex_lock(&kvm_lock);
|
||||
list_for_each_entry(kvm, &vm_list, vm_list) {
|
||||
stat_tmp.kvm = kvm;
|
||||
vcpu_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
|
||||
*val += tmp_val;
|
||||
}
|
||||
spin_unlock(&kvm_lock);
|
||||
mutex_unlock(&kvm_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4092,12 +4092,12 @@ static int vcpu_stat_clear(void *_offset, u64 val)
|
|||
if (val)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&kvm_lock);
|
||||
mutex_lock(&kvm_lock);
|
||||
list_for_each_entry(kvm, &vm_list, vm_list) {
|
||||
stat_tmp.kvm = kvm;
|
||||
vcpu_stat_clear_per_vm((void *)&stat_tmp, 0);
|
||||
}
|
||||
spin_unlock(&kvm_lock);
|
||||
mutex_unlock(&kvm_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -4118,7 +4118,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
|
|||
if (!kvm_dev.this_device || !kvm)
|
||||
return;
|
||||
|
||||
spin_lock(&kvm_lock);
|
||||
mutex_lock(&kvm_lock);
|
||||
if (type == KVM_EVENT_CREATE_VM) {
|
||||
kvm_createvm_count++;
|
||||
kvm_active_vms++;
|
||||
|
@ -4127,7 +4127,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
|
|||
}
|
||||
created = kvm_createvm_count;
|
||||
active = kvm_active_vms;
|
||||
spin_unlock(&kvm_lock);
|
||||
mutex_unlock(&kvm_lock);
|
||||
|
||||
env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
|
||||
if (!env)
|
||||
|
|
Loading…
Reference in New Issue