drm/amdgpu: Call find_vma under mmap_sem

find_vma() must be called under the mmap_sem, reorganize this code to
do the vma check after entering the lock.

Further, fix the unlocked use of struct task_struct's mm, instead use
the mm from hmm_mirror which has an active mm_grab. Also the mm_grab
must be converted to a mm_get before acquiring mmap_sem or calling
find_vma().

Fixes: 66c45500bf ("drm/amdgpu: use new HMM APIs and helpers")
Fixes: 0919195f2b ("drm/amdgpu: Enable amdgpu_ttm_tt_get_user_pages in worker threads")
Link: https://lore.kernel.org/r/20191112202231.3856-11-jgg@ziepe.ca
Acked-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Philip Yang <Philip.Yang@amd.com>
Tested-by: Philip Yang <Philip.Yang@amd.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Jason Gunthorpe 2019-11-12 16:22:27 -04:00
parent 20fef4ef84
commit a9ae8731e6
1 changed files with 21 additions and 16 deletions

View File

@ -35,6 +35,7 @@
#include <linux/hmm.h> #include <linux/hmm.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/sched/task.h> #include <linux/sched/task.h>
#include <linux/sched/mm.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/swap.h> #include <linux/swap.h>
@ -788,7 +789,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
struct hmm_mirror *mirror = bo->mn ? &bo->mn->mirror : NULL; struct hmm_mirror *mirror = bo->mn ? &bo->mn->mirror : NULL;
struct ttm_tt *ttm = bo->tbo.ttm; struct ttm_tt *ttm = bo->tbo.ttm;
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct mm_struct *mm = gtt->usertask->mm; struct mm_struct *mm;
unsigned long start = gtt->userptr; unsigned long start = gtt->userptr;
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct hmm_range *range; struct hmm_range *range;
@ -796,25 +797,14 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
uint64_t *pfns; uint64_t *pfns;
int r = 0; int r = 0;
if (!mm) /* Happens during process shutdown */
return -ESRCH;
if (unlikely(!mirror)) { if (unlikely(!mirror)) {
DRM_DEBUG_DRIVER("Failed to get hmm_mirror\n"); DRM_DEBUG_DRIVER("Failed to get hmm_mirror\n");
r = -EFAULT; return -EFAULT;
goto out;
} }
vma = find_vma(mm, start); mm = mirror->hmm->mmu_notifier.mm;
if (unlikely(!vma || start < vma->vm_start)) { if (!mmget_not_zero(mm)) /* Happens during process shutdown */
r = -EFAULT; return -ESRCH;
goto out;
}
if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
vma->vm_file)) {
r = -EPERM;
goto out;
}
range = kzalloc(sizeof(*range), GFP_KERNEL); range = kzalloc(sizeof(*range), GFP_KERNEL);
if (unlikely(!range)) { if (unlikely(!range)) {
@ -847,6 +837,17 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT); hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT);
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
vma = find_vma(mm, start);
if (unlikely(!vma || start < vma->vm_start)) {
r = -EFAULT;
goto out_unlock;
}
if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
vma->vm_file)) {
r = -EPERM;
goto out_unlock;
}
r = hmm_range_fault(range, 0); r = hmm_range_fault(range, 0);
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
@ -865,15 +866,19 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
} }
gtt->range = range; gtt->range = range;
mmput(mm);
return 0; return 0;
out_unlock:
up_read(&mm->mmap_sem);
out_free_pfns: out_free_pfns:
hmm_range_unregister(range); hmm_range_unregister(range);
kvfree(pfns); kvfree(pfns);
out_free_ranges: out_free_ranges:
kfree(range); kfree(range);
out: out:
mmput(mm);
return r; return r;
} }