drm/amdgpu: rework synchronization of VM updates v4
If provided we only sync to the BOs reservation object and no longer to the root PD. v2: update comment, cleanup amdgpu_bo_sync_wait_resv v3: use correct reservation object while clearing v4: fix typo in amdgpu_bo_sync_wait_resv Signed-off-by: Christian König <christian.koenig@amd.com> Tested-by: Tom St Denis <tom.stdenis@amd.com> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
4939d973b6
commit
9f3cc18d19
|
@ -1403,28 +1403,49 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
|
|||
}
|
||||
|
||||
/**
|
||||
* amdgpu_sync_wait_resv - Wait for BO reservation fences
|
||||
* amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
|
||||
*
|
||||
* @bo: buffer object
|
||||
* @adev: amdgpu device pointer
|
||||
* @resv: reservation object to sync to
|
||||
* @sync_mode: synchronization mode
|
||||
* @owner: fence owner
|
||||
* @intr: Whether the wait is interruptible
|
||||
*
|
||||
* Extract the fences from the reservation object and waits for them to finish.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, errno otherwise.
|
||||
*/
|
||||
int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
|
||||
enum amdgpu_sync_mode sync_mode, void *owner,
|
||||
bool intr)
|
||||
{
|
||||
struct amdgpu_sync sync;
|
||||
int r;
|
||||
|
||||
amdgpu_sync_create(&sync);
|
||||
amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
|
||||
r = amdgpu_sync_wait(&sync, intr);
|
||||
amdgpu_sync_free(&sync);
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
|
||||
* @bo: buffer object to wait for
|
||||
* @owner: fence owner
|
||||
* @intr: Whether the wait is interruptible
|
||||
*
|
||||
* Wrapper to wait for fences in a BO.
|
||||
* Returns:
|
||||
* 0 on success, errno otherwise.
|
||||
*/
|
||||
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct amdgpu_sync sync;
|
||||
int r;
|
||||
|
||||
amdgpu_sync_create(&sync);
|
||||
amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv,
|
||||
AMDGPU_SYNC_NE_OWNER, owner);
|
||||
r = amdgpu_sync_wait(&sync, intr);
|
||||
amdgpu_sync_free(&sync);
|
||||
|
||||
return r;
|
||||
return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
|
||||
AMDGPU_SYNC_NE_OWNER, owner, intr);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -277,6 +277,9 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
|
|||
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
|
||||
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
|
||||
bool shared);
|
||||
int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
|
||||
enum amdgpu_sync_mode sync_mode, void *owner,
|
||||
bool intr);
|
||||
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
|
||||
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
|
||||
int amdgpu_bo_validate(struct amdgpu_bo *bo);
|
||||
|
|
|
@ -249,13 +249,6 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
|
|||
owner != AMDGPU_FENCE_OWNER_UNDEFINED)
|
||||
continue;
|
||||
|
||||
/* VM updates only sync with moves but not with user
|
||||
* command submissions or KFD evictions fences
|
||||
*/
|
||||
if (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
|
||||
owner == AMDGPU_FENCE_OWNER_VM)
|
||||
continue;
|
||||
|
||||
/* Ignore fences depending on the sync mode */
|
||||
switch (mode) {
|
||||
case AMDGPU_SYNC_ALWAYS:
|
||||
|
|
|
@ -797,7 +797,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
|||
params.vm = vm;
|
||||
params.direct = direct;
|
||||
|
||||
r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_KFD, NULL);
|
||||
r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -1293,7 +1293,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
|
|||
params.vm = vm;
|
||||
params.direct = direct;
|
||||
|
||||
r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_VM, NULL);
|
||||
r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -1554,7 +1554,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
|
|||
* @adev: amdgpu_device pointer
|
||||
* @vm: requested vm
|
||||
* @direct: direct submission in a page fault
|
||||
* @exclusive: fence we need to sync to
|
||||
* @resv: fences we need to sync to
|
||||
* @start: start of mapped range
|
||||
* @last: last mapped entry
|
||||
* @flags: flags for the entries
|
||||
|
@ -1569,14 +1569,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
|
|||
*/
|
||||
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm, bool direct,
|
||||
struct dma_fence *exclusive,
|
||||
struct dma_resv *resv,
|
||||
uint64_t start, uint64_t last,
|
||||
uint64_t flags, uint64_t addr,
|
||||
dma_addr_t *pages_addr,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
struct amdgpu_vm_update_params params;
|
||||
void *owner = AMDGPU_FENCE_OWNER_VM;
|
||||
enum amdgpu_sync_mode sync_mode;
|
||||
int r;
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
|
@ -1585,9 +1585,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
|||
params.direct = direct;
|
||||
params.pages_addr = pages_addr;
|
||||
|
||||
/* sync to everything except eviction fences on unmapping */
|
||||
/* Implicitly sync to command submissions in the same VM before
|
||||
* unmapping. Sync to moving fences before mapping.
|
||||
*/
|
||||
if (!(flags & AMDGPU_PTE_VALID))
|
||||
owner = AMDGPU_FENCE_OWNER_KFD;
|
||||
sync_mode = AMDGPU_SYNC_EQ_OWNER;
|
||||
else
|
||||
sync_mode = AMDGPU_SYNC_EXPLICIT;
|
||||
|
||||
amdgpu_vm_eviction_lock(vm);
|
||||
if (vm->evicting) {
|
||||
|
@ -1595,7 +1599,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
|||
goto error_unlock;
|
||||
}
|
||||
|
||||
r = vm->update_funcs->prepare(¶ms, owner, exclusive);
|
||||
r = vm->update_funcs->prepare(¶ms, resv, sync_mode);
|
||||
if (r)
|
||||
goto error_unlock;
|
||||
|
||||
|
@ -1614,7 +1618,7 @@ error_unlock:
|
|||
* amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @exclusive: fence we need to sync to
|
||||
* @resv: fences we need to sync to
|
||||
* @pages_addr: DMA addresses to use for mapping
|
||||
* @vm: requested vm
|
||||
* @mapping: mapped range and flags to use for the update
|
||||
|
@ -1630,7 +1634,7 @@ error_unlock:
|
|||
* 0 for success, -EINVAL for failure.
|
||||
*/
|
||||
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
|
||||
struct dma_fence *exclusive,
|
||||
struct dma_resv *resv,
|
||||
dma_addr_t *pages_addr,
|
||||
struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo_va_mapping *mapping,
|
||||
|
@ -1706,7 +1710,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
last = min((uint64_t)mapping->last, start + max_entries - 1);
|
||||
r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive,
|
||||
r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
|
||||
start, last, flags, addr,
|
||||
dma_addr, fence);
|
||||
if (r)
|
||||
|
@ -1745,7 +1749,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
|||
dma_addr_t *pages_addr = NULL;
|
||||
struct ttm_mem_reg *mem;
|
||||
struct drm_mm_node *nodes;
|
||||
struct dma_fence *exclusive, **last_update;
|
||||
struct dma_fence **last_update;
|
||||
struct dma_resv *resv;
|
||||
uint64_t flags;
|
||||
struct amdgpu_device *bo_adev = adev;
|
||||
int r;
|
||||
|
@ -1753,7 +1758,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
|||
if (clear || !bo) {
|
||||
mem = NULL;
|
||||
nodes = NULL;
|
||||
exclusive = NULL;
|
||||
resv = vm->root.base.bo->tbo.base.resv;
|
||||
} else {
|
||||
struct ttm_dma_tt *ttm;
|
||||
|
||||
|
@ -1763,7 +1768,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
|||
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
|
||||
pages_addr = ttm->dma_address;
|
||||
}
|
||||
exclusive = bo->tbo.moving;
|
||||
resv = bo->tbo.base.resv;
|
||||
}
|
||||
|
||||
if (bo) {
|
||||
|
@ -1773,7 +1778,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
|||
flags = 0x0;
|
||||
}
|
||||
|
||||
if (clear || (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv))
|
||||
if (clear || (bo && bo->tbo.base.resv ==
|
||||
vm->root.base.bo->tbo.base.resv))
|
||||
last_update = &vm->last_update;
|
||||
else
|
||||
last_update = &bo_va->last_pt_update;
|
||||
|
@ -1787,7 +1793,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
|||
}
|
||||
|
||||
list_for_each_entry(mapping, &bo_va->invalids, list) {
|
||||
r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
|
||||
r = amdgpu_vm_bo_split_mapping(adev, resv, pages_addr, vm,
|
||||
mapping, flags, bo_adev, nodes,
|
||||
last_update);
|
||||
if (r)
|
||||
|
@ -1982,6 +1988,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
|||
struct amdgpu_vm *vm,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
|
||||
struct amdgpu_bo_va_mapping *mapping;
|
||||
uint64_t init_pte_value = 0;
|
||||
struct dma_fence *f = NULL;
|
||||
|
@ -1996,7 +2003,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
|||
mapping->start < AMDGPU_GMC_HOLE_START)
|
||||
init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
|
||||
|
||||
r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL,
|
||||
r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
|
||||
mapping->start, mapping->last,
|
||||
init_pte_value, 0, NULL, &f);
|
||||
amdgpu_vm_free_mapping(adev, vm, mapping, f);
|
||||
|
|
|
@ -227,8 +227,8 @@ struct amdgpu_vm_update_params {
|
|||
|
||||
struct amdgpu_vm_update_funcs {
|
||||
int (*map_table)(struct amdgpu_bo *bo);
|
||||
int (*prepare)(struct amdgpu_vm_update_params *p, void * owner,
|
||||
struct dma_fence *exclusive);
|
||||
int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
|
||||
enum amdgpu_sync_mode sync_mode);
|
||||
int (*update)(struct amdgpu_vm_update_params *p,
|
||||
struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
|
||||
unsigned count, uint32_t incr, uint64_t flags);
|
||||
|
|
|
@ -44,26 +44,14 @@ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo *table)
|
|||
* Returns:
|
||||
* Negativ errno, 0 for success.
|
||||
*/
|
||||
static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
|
||||
struct dma_fence *exclusive)
|
||||
static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
|
||||
struct dma_resv *resv,
|
||||
enum amdgpu_sync_mode sync_mode)
|
||||
{
|
||||
int r;
|
||||
|
||||
/* Wait for any BO move to be completed */
|
||||
if (exclusive) {
|
||||
r = dma_fence_wait(exclusive, true);
|
||||
if (unlikely(r))
|
||||
return r;
|
||||
}
|
||||
|
||||
/* Don't wait for submissions during page fault */
|
||||
if (p->direct)
|
||||
if (!resv)
|
||||
return 0;
|
||||
|
||||
/* Wait for PT BOs to be idle. PTs share the same resv. object
|
||||
* as the root PD BO
|
||||
*/
|
||||
return amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
|
||||
return amdgpu_bo_sync_wait_resv(p->adev, resv, sync_mode, p->vm, true);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -58,9 +58,9 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table)
|
|||
* Negativ errno, 0 for success.
|
||||
*/
|
||||
static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
|
||||
void *owner, struct dma_fence *exclusive)
|
||||
struct dma_resv *resv,
|
||||
enum amdgpu_sync_mode sync_mode)
|
||||
{
|
||||
struct amdgpu_bo *root = p->vm->root.base.bo;
|
||||
unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
|
||||
int r;
|
||||
|
||||
|
@ -70,17 +70,10 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
|
|||
|
||||
p->num_dw_left = ndw;
|
||||
|
||||
/* Wait for moves to be completed */
|
||||
r = amdgpu_sync_fence(&p->job->sync, exclusive, false);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* Don't wait for any submissions during page fault handling */
|
||||
if (p->direct)
|
||||
if (!resv)
|
||||
return 0;
|
||||
|
||||
return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
|
||||
AMDGPU_SYNC_NE_OWNER, owner);
|
||||
return amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, p->vm);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in New Issue