amdgpu: remove -EAGAIN handling for hmm_range_fault

hmm_range_fault can only return -EAGAIN if called with the
HMM_FAULT_ALLOW_RETRY flag, which amdgpu never does.  Remove the handling
for the -EAGAIN case with its non-standard locking scheme.

Link: https://lore.kernel.org/r/20190806160554.14046-2-hch@lst.de
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jason Gunthorpe <jgg@mellanox.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Christoph Hellwig 2019-08-06 19:05:39 +03:00 committed by Jason Gunthorpe
parent cc374377a1
commit 9d0a16658f
1 changed files with 3 additions and 20 deletions

View File

@ -778,7 +778,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
struct hmm_range *range;
unsigned long i;
uint64_t *pfns;
int retry = 0;
int r = 0;
if (!mm) /* Happens during process shutdown */
@ -822,7 +821,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
hmm_range_register(range, mirror, start,
start + ttm->num_pages * PAGE_SIZE, PAGE_SHIFT);
retry:
/*
* Just wait for range to be valid, safe to ignore return value as we
* will use the return value of hmm_range_fault() below under the
@ -831,24 +829,12 @@ retry:
hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT);
down_read(&mm->mmap_sem);
r = hmm_range_fault(range, 0);
if (unlikely(r < 0)) {
if (likely(r == -EAGAIN)) {
/*
* return -EAGAIN, mmap_sem is dropped
*/
if (retry++ < MAX_RETRY_HMM_RANGE_FAULT)
goto retry;
else
pr_err("Retry hmm fault too many times\n");
}
goto out_up_read;
}
up_read(&mm->mmap_sem);
if (unlikely(r < 0))
goto out_free_pfns;
for (i = 0; i < ttm->num_pages; i++) {
pages[i] = hmm_device_entry_to_page(range, pfns[i]);
if (unlikely(!pages[i])) {
@ -864,9 +850,6 @@ retry:
return 0;
out_up_read:
if (likely(r != -EAGAIN))
up_read(&mm->mmap_sem);
out_free_pfns:
hmm_range_unregister(range);
kvfree(pfns);