KVM: PPC: Book3S: Use new mutex to synchronize access to rtas token list
Currently the Book 3S KVM code uses kvm->lock to synchronize access to the kvm->arch.rtas_tokens list. Because this list is scanned inside kvmppc_rtas_hcall(), which is called with the vcpu mutex held, taking kvm->lock cause a lock inversion problem, which could lead to a deadlock. To fix this, we add a new mutex, kvm->arch.rtas_token_lock, which nests inside the vcpu mutexes, and use that instead of kvm->lock when accessing the rtas token list. This removes the lockdep_assert_held() in kvmppc_rtas_tokens_free(). At this point we don't hold the new mutex, but that is OK because kvmppc_rtas_tokens_free() is only called when the whole VM is being destroyed, and at that point nothing can be looking up a token in the list. Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
This commit is contained in:
parent
0d4ee88d92
commit
1659e27d2b
|
@ -309,6 +309,7 @@ struct kvm_arch {
|
|||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
struct list_head spapr_tce_tables;
|
||||
struct list_head rtas_tokens;
|
||||
struct mutex rtas_token_lock;
|
||||
DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
|
||||
#endif
|
||||
#ifdef CONFIG_KVM_MPIC
|
||||
|
|
|
@ -902,6 +902,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
|
|||
#ifdef CONFIG_PPC64
|
||||
INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
|
||||
INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
|
||||
mutex_init(&kvm->arch.rtas_token_lock);
|
||||
#endif
|
||||
|
||||
return kvm->arch.kvm_ops->init_vm(kvm);
|
||||
|
|
|
@ -146,7 +146,7 @@ static int rtas_token_undefine(struct kvm *kvm, char *name)
|
|||
{
|
||||
struct rtas_token_definition *d, *tmp;
|
||||
|
||||
lockdep_assert_held(&kvm->lock);
|
||||
lockdep_assert_held(&kvm->arch.rtas_token_lock);
|
||||
|
||||
list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
|
||||
if (rtas_name_matches(d->handler->name, name)) {
|
||||
|
@ -167,7 +167,7 @@ static int rtas_token_define(struct kvm *kvm, char *name, u64 token)
|
|||
bool found;
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&kvm->lock);
|
||||
lockdep_assert_held(&kvm->arch.rtas_token_lock);
|
||||
|
||||
list_for_each_entry(d, &kvm->arch.rtas_tokens, list) {
|
||||
if (d->token == token)
|
||||
|
@ -206,14 +206,14 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
|
|||
if (copy_from_user(&args, argp, sizeof(args)))
|
||||
return -EFAULT;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
mutex_lock(&kvm->arch.rtas_token_lock);
|
||||
|
||||
if (args.token)
|
||||
rc = rtas_token_define(kvm, args.name, args.token);
|
||||
else
|
||||
rc = rtas_token_undefine(kvm, args.name);
|
||||
|
||||
mutex_unlock(&kvm->lock);
|
||||
mutex_unlock(&kvm->arch.rtas_token_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -245,7 +245,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
|
|||
orig_rets = args.rets;
|
||||
args.rets = &args.args[be32_to_cpu(args.nargs)];
|
||||
|
||||
mutex_lock(&vcpu->kvm->lock);
|
||||
mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
|
||||
|
||||
rc = -ENOENT;
|
||||
list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
|
||||
|
@ -256,7 +256,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&vcpu->kvm->lock);
|
||||
mutex_unlock(&vcpu->kvm->arch.rtas_token_lock);
|
||||
|
||||
if (rc == 0) {
|
||||
args.rets = orig_rets;
|
||||
|
@ -282,8 +282,6 @@ void kvmppc_rtas_tokens_free(struct kvm *kvm)
|
|||
{
|
||||
struct rtas_token_definition *d, *tmp;
|
||||
|
||||
lockdep_assert_held(&kvm->lock);
|
||||
|
||||
list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
|
||||
list_del(&d->list);
|
||||
kfree(d);
|
||||
|
|
Loading…
Reference in New Issue