powerpc/mm/book3s64/iommu: fix some RCU-list locks
It is safe to traverse mm->context.iommu_group_mem_list with either mem_list_mutex or the RCU read lock held. Silence a few RCU-list false positive warnings and fix a few missing RCU read locks. arch/powerpc/mm/book3s64/iommu_api.c:330 RCU-list traversed in non-reader section!! other info that might help us debug this: rcu_scheduler_active = 2, debug_locks = 1 2 locks held by qemu-kvm/4305: #0: c000000bc3fe4d68 (&container->lock){+.+.}-{3:3}, at: tce_iommu_ioctl.part.9+0xc7c/0x1870 [vfio_iommu_spapr_tce] #1: c000000001501910 (mem_list_mutex){+.+.}-{3:3}, at: mm_iommu_get+0x50/0x190 ==== arch/powerpc/mm/book3s64/iommu_api.c:132 RCU-list traversed in non-reader section!! other info that might help us debug this: rcu_scheduler_active = 2, debug_locks = 1 2 locks held by qemu-kvm/4305: #0: c000000bc3fe4d68 (&container->lock){+.+.}-{3:3}, at: tce_iommu_ioctl.part.9+0xc7c/0x1870 [vfio_iommu_spapr_tce] #1: c000000001501910 (mem_list_mutex){+.+.}-{3:3}, at: mm_iommu_do_alloc+0x120/0x5f0 ==== arch/powerpc/mm/book3s64/iommu_api.c:292 RCU-list traversed in non-reader section!! other info that might help us debug this: rcu_scheduler_active = 2, debug_locks = 1 2 locks held by qemu-kvm/4312: #0: c000000ecafe23c8 (&vcpu->mutex){+.+.}-{3:3}, at: kvm_vcpu_ioctl+0xdc/0x950 [kvm] #1: c000000045e6c468 (&kvm->srcu){....}-{0:0}, at: kvmppc_h_put_tce+0x88/0x340 [kvm] ==== arch/powerpc/mm/book3s64/iommu_api.c:424 RCU-list traversed in non-reader section!! other info that might help us debug this: rcu_scheduler_active = 2, debug_locks = 1 2 locks held by qemu-kvm/4312: #0: c000000ecafe23c8 (&vcpu->mutex){+.+.}-{3:3}, at: kvm_vcpu_ioctl+0xdc/0x950 [kvm] #1: c000000045e6c468 (&kvm->srcu){....}-{0:0}, at: kvmppc_h_put_tce+0x88/0x340 [kvm] Signed-off-by: Qian Cai <cai@lca.pw> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20200510051559.1959-1-cai@lca.pw
This commit is contained in:
parent
c9790fb5df
commit
b5952f8125
|
@ -129,7 +129,8 @@ good_exit:
|
||||||
|
|
||||||
mutex_lock(&mem_list_mutex);
|
mutex_lock(&mem_list_mutex);
|
||||||
|
|
||||||
list_for_each_entry_rcu(mem2, &mm->context.iommu_group_mem_list, next) {
|
list_for_each_entry_rcu(mem2, &mm->context.iommu_group_mem_list, next,
|
||||||
|
lockdep_is_held(&mem_list_mutex)) {
|
||||||
/* Overlap? */
|
/* Overlap? */
|
||||||
if ((mem2->ua < (ua + (entries << PAGE_SHIFT))) &&
|
if ((mem2->ua < (ua + (entries << PAGE_SHIFT))) &&
|
||||||
(ua < (mem2->ua +
|
(ua < (mem2->ua +
|
||||||
|
@ -289,6 +290,7 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
|
||||||
{
|
{
|
||||||
struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
|
struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
|
list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
|
||||||
if ((mem->ua <= ua) &&
|
if ((mem->ua <= ua) &&
|
||||||
(ua + size <= mem->ua +
|
(ua + size <= mem->ua +
|
||||||
|
@ -297,6 +299,7 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -327,7 +330,8 @@ struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
|
||||||
|
|
||||||
mutex_lock(&mem_list_mutex);
|
mutex_lock(&mem_list_mutex);
|
||||||
|
|
||||||
list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
|
list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next,
|
||||||
|
lockdep_is_held(&mem_list_mutex)) {
|
||||||
if ((mem->ua == ua) && (mem->entries == entries)) {
|
if ((mem->ua == ua) && (mem->entries == entries)) {
|
||||||
ret = mem;
|
ret = mem;
|
||||||
++mem->used;
|
++mem->used;
|
||||||
|
@ -421,6 +425,7 @@ bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
|
||||||
struct mm_iommu_table_group_mem_t *mem;
|
struct mm_iommu_table_group_mem_t *mem;
|
||||||
unsigned long end;
|
unsigned long end;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
|
list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
|
||||||
if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
|
if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
|
||||||
continue;
|
continue;
|
||||||
|
@ -437,6 +442,7 @@ bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue