drm/amdgpu: validate shadow as well when validating bo
Signed-off-by: Chunming Zhou <David1.Zhou@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
e7893c4bd3
commit
14fd833efa
|
@ -287,34 +287,15 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
|
||||||
return max(bytes_moved_threshold, 1024*1024ull);
|
return max(bytes_moved_threshold, 1024*1024ull);
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
|
||||||
struct list_head *validated)
|
struct amdgpu_bo *bo)
|
||||||
{
|
{
|
||||||
struct amdgpu_bo_list_entry *lobj;
|
|
||||||
u64 initial_bytes_moved;
|
u64 initial_bytes_moved;
|
||||||
|
uint32_t domain;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
list_for_each_entry(lobj, validated, tv.head) {
|
|
||||||
struct amdgpu_bo *bo = lobj->robj;
|
|
||||||
bool binding_userptr = false;
|
|
||||||
struct mm_struct *usermm;
|
|
||||||
uint32_t domain;
|
|
||||||
|
|
||||||
usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
|
|
||||||
if (usermm && usermm != current->mm)
|
|
||||||
return -EPERM;
|
|
||||||
|
|
||||||
/* Check if we have user pages and nobody bound the BO already */
|
|
||||||
if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) {
|
|
||||||
size_t size = sizeof(struct page *);
|
|
||||||
|
|
||||||
size *= bo->tbo.ttm->num_pages;
|
|
||||||
memcpy(bo->tbo.ttm->pages, lobj->user_pages, size);
|
|
||||||
binding_userptr = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (bo->pin_count)
|
if (bo->pin_count)
|
||||||
continue;
|
return 0;
|
||||||
|
|
||||||
/* Avoid moving this one if we have moved too many buffers
|
/* Avoid moving this one if we have moved too many buffers
|
||||||
* for this IB already.
|
* for this IB already.
|
||||||
|
@ -329,7 +310,7 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
||||||
else
|
else
|
||||||
domain = bo->allowed_domains;
|
domain = bo->allowed_domains;
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
amdgpu_ttm_placement_from_domain(bo, domain);
|
amdgpu_ttm_placement_from_domain(bo, domain);
|
||||||
initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
|
initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
|
||||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
|
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
|
||||||
|
@ -341,6 +322,41 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
||||||
domain = bo->allowed_domains;
|
domain = bo->allowed_domains;
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
||||||
|
struct list_head *validated)
|
||||||
|
{
|
||||||
|
struct amdgpu_bo_list_entry *lobj;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
list_for_each_entry(lobj, validated, tv.head) {
|
||||||
|
struct amdgpu_bo *bo = lobj->robj;
|
||||||
|
bool binding_userptr = false;
|
||||||
|
struct mm_struct *usermm;
|
||||||
|
|
||||||
|
usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
|
||||||
|
if (usermm && usermm != current->mm)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
|
/* Check if we have user pages and nobody bound the BO already */
|
||||||
|
if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) {
|
||||||
|
size_t size = sizeof(struct page *);
|
||||||
|
|
||||||
|
size *= bo->tbo.ttm->num_pages;
|
||||||
|
memcpy(bo->tbo.ttm->pages, lobj->user_pages, size);
|
||||||
|
binding_userptr = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
r = amdgpu_cs_bo_validate(p, bo);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
if (bo->shadow) {
|
||||||
|
r = amdgpu_cs_bo_validate(p, bo);
|
||||||
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue