drm/amdgpu: Add helper to wait for BO fences using a sync object
Creates a temporary sync object to wait for the BO reservation. This generalizes amdgpu_vm_wait_pd. Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
2c11ee6ae5
commit
e8e32426b1
|
@ -1284,6 +1284,30 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
|
||||||
reservation_object_add_excl_fence(resv, fence);
|
reservation_object_add_excl_fence(resv, fence);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_sync_wait_resv - Wait for BO reservation fences
|
||||||
|
*
|
||||||
|
* @bo: buffer object
|
||||||
|
* @owner: fence owner
|
||||||
|
* @intr: Whether the wait is interruptible
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* 0 on success, errno otherwise.
|
||||||
|
*/
|
||||||
|
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||||
|
struct amdgpu_sync sync;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
amdgpu_sync_create(&sync);
|
||||||
|
amdgpu_sync_resv(adev, &sync, bo->tbo.resv, owner, false);
|
||||||
|
r = amdgpu_sync_wait(&sync, intr);
|
||||||
|
amdgpu_sync_free(&sync);
|
||||||
|
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_bo_gpu_offset - return GPU offset of bo
|
* amdgpu_bo_gpu_offset - return GPU offset of bo
|
||||||
* @bo: amdgpu object for which we query the offset
|
* @bo: amdgpu object for which we query the offset
|
||||||
|
|
|
@ -266,6 +266,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
|
||||||
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
|
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
|
||||||
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
|
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
|
||||||
bool shared);
|
bool shared);
|
||||||
|
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
|
||||||
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
|
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
|
||||||
int amdgpu_bo_validate(struct amdgpu_bo *bo);
|
int amdgpu_bo_validate(struct amdgpu_bo *bo);
|
||||||
int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
|
int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
|
||||||
|
|
|
@ -1332,31 +1332,6 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* amdgpu_vm_wait_pd - Wait for PT BOs to be free.
|
|
||||||
*
|
|
||||||
* @adev: amdgpu_device pointer
|
|
||||||
* @vm: related vm
|
|
||||||
* @owner: fence owner
|
|
||||||
*
|
|
||||||
* Returns:
|
|
||||||
* 0 on success, errno otherwise.
|
|
||||||
*/
|
|
||||||
static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|
||||||
void *owner)
|
|
||||||
{
|
|
||||||
struct amdgpu_sync sync;
|
|
||||||
int r;
|
|
||||||
|
|
||||||
amdgpu_sync_create(&sync);
|
|
||||||
amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
|
|
||||||
r = amdgpu_sync_wait(&sync, true);
|
|
||||||
amdgpu_sync_free(&sync);
|
|
||||||
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_vm_update_func - helper to call update function
|
* amdgpu_vm_update_func - helper to call update function
|
||||||
*
|
*
|
||||||
|
@ -1451,7 +1426,8 @@ restart:
|
||||||
params.adev = adev;
|
params.adev = adev;
|
||||||
|
|
||||||
if (vm->use_cpu_for_update) {
|
if (vm->use_cpu_for_update) {
|
||||||
r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
|
r = amdgpu_bo_sync_wait(vm->root.base.bo,
|
||||||
|
AMDGPU_FENCE_OWNER_VM, true);
|
||||||
if (unlikely(r))
|
if (unlikely(r))
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -1784,7 +1760,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
||||||
/* Wait for PT BOs to be idle. PTs share the same resv. object
|
/* Wait for PT BOs to be idle. PTs share the same resv. object
|
||||||
* as the root PD BO
|
* as the root PD BO
|
||||||
*/
|
*/
|
||||||
r = amdgpu_vm_wait_pd(adev, vm, owner);
|
r = amdgpu_bo_sync_wait(vm->root.base.bo, owner, true);
|
||||||
if (unlikely(r))
|
if (unlikely(r))
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue