drm/amdgpu: cache the fence to wait for a VMID
Beneficial when a lot of processes are waiting for VMIDs. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
58592a095c
commit
3af81440a9
|
@ -205,6 +205,9 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
|
|||
unsigned i;
|
||||
int r;
|
||||
|
||||
if (ring->vmid_wait && !dma_fence_is_signaled(ring->vmid_wait))
|
||||
return amdgpu_sync_fence(adev, sync, ring->vmid_wait, false);
|
||||
|
||||
fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
|
||||
if (!fences)
|
||||
return -ENOMEM;
|
||||
|
@ -239,9 +242,9 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
|
|||
}
|
||||
|
||||
r = amdgpu_sync_fence(adev, sync, &array->base, false);
|
||||
dma_fence_put(&array->base);
|
||||
dma_fence_put(ring->vmid_wait);
|
||||
ring->vmid_wait = &array->base;
|
||||
return r;
|
||||
|
||||
}
|
||||
kfree(fences);
|
||||
|
||||
|
|
|
@ -360,6 +360,9 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
|
|||
|
||||
amdgpu_debugfs_ring_fini(ring);
|
||||
|
||||
dma_fence_put(ring->vmid_wait);
|
||||
ring->vmid_wait = NULL;
|
||||
|
||||
ring->adev->rings[ring->idx] = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -196,6 +196,7 @@ struct amdgpu_ring {
|
|||
u64 cond_exe_gpu_addr;
|
||||
volatile u32 *cond_exe_cpu_addr;
|
||||
unsigned vm_inv_eng;
|
||||
struct dma_fence *vmid_wait;
|
||||
bool has_compute_vm_bug;
|
||||
|
||||
atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX];
|
||||
|
|
Loading…
Reference in New Issue