drm/amdgpu: Rename DRM schedulers in amdgpu TTM
Rename mman.entity to mman.high_pr to make the distinction clearer that this is a high priority scheduler. Similarly, rename the recently added mman.delayed to mman.low_pr to make it clear it is a low priority scheduler. No functional change in this patch. Signed-off-by: Mukul Joshi <mukul.joshi@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
c168feed5d
commit
41ce6d6d03
|
@ -228,7 +228,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
|
|||
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
|
||||
num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
|
||||
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
num_dw * 4 + num_bytes,
|
||||
AMDGPU_IB_POOL_DELAYED, &job);
|
||||
|
@ -1456,7 +1456,7 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
|
|||
memcpy(adev->mman.sdma_access_ptr, buf, len);
|
||||
|
||||
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
|
||||
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
|
||||
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
num_dw * 4, AMDGPU_IB_POOL_DELAYED,
|
||||
&job);
|
||||
|
@ -2032,7 +2032,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
|
|||
|
||||
ring = adev->mman.buffer_funcs_ring;
|
||||
sched = &ring->sched;
|
||||
r = drm_sched_entity_init(&adev->mman.entity,
|
||||
r = drm_sched_entity_init(&adev->mman.high_pr,
|
||||
DRM_SCHED_PRIORITY_KERNEL, &sched,
|
||||
1, NULL);
|
||||
if (r) {
|
||||
|
@ -2041,7 +2041,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
|
|||
return;
|
||||
}
|
||||
|
||||
r = drm_sched_entity_init(&adev->mman.delayed,
|
||||
r = drm_sched_entity_init(&adev->mman.low_pr,
|
||||
DRM_SCHED_PRIORITY_NORMAL, &sched,
|
||||
1, NULL);
|
||||
if (r) {
|
||||
|
@ -2050,8 +2050,8 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
|
|||
goto error_free_entity;
|
||||
}
|
||||
} else {
|
||||
drm_sched_entity_destroy(&adev->mman.entity);
|
||||
drm_sched_entity_destroy(&adev->mman.delayed);
|
||||
drm_sched_entity_destroy(&adev->mman.high_pr);
|
||||
drm_sched_entity_destroy(&adev->mman.low_pr);
|
||||
dma_fence_put(man->move);
|
||||
man->move = NULL;
|
||||
}
|
||||
|
@ -2067,7 +2067,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
|
|||
return;
|
||||
|
||||
error_free_entity:
|
||||
drm_sched_entity_destroy(&adev->mman.entity);
|
||||
drm_sched_entity_destroy(&adev->mman.high_pr);
|
||||
}
|
||||
|
||||
static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
|
||||
|
@ -2082,8 +2082,8 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
|
|||
AMDGPU_IB_POOL_DIRECT :
|
||||
AMDGPU_IB_POOL_DELAYED;
|
||||
int r;
|
||||
struct drm_sched_entity *entity = delayed ? &adev->mman.delayed :
|
||||
&adev->mman.entity;
|
||||
struct drm_sched_entity *entity = delayed ? &adev->mman.low_pr :
|
||||
&adev->mman.high_pr;
|
||||
r = amdgpu_job_alloc_with_ib(adev, entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
num_dw * 4, pool, job);
|
||||
|
|
|
@ -59,10 +59,10 @@ struct amdgpu_mman {
|
|||
bool buffer_funcs_enabled;
|
||||
|
||||
struct mutex gtt_window_lock;
|
||||
/* Scheduler entity for buffer moves */
|
||||
struct drm_sched_entity entity;
|
||||
/* Scheduler entity for VRAM clearing */
|
||||
struct drm_sched_entity delayed;
|
||||
/* High priority scheduler entity for buffer moves */
|
||||
struct drm_sched_entity high_pr;
|
||||
/* Low priority scheduler entity for VRAM clearing */
|
||||
struct drm_sched_entity low_pr;
|
||||
|
||||
struct amdgpu_vram_mgr vram_mgr;
|
||||
struct amdgpu_gtt_mgr gtt_mgr;
|
||||
|
|
|
@ -382,7 +382,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
|||
* translation. Avoid this by doing the invalidation from the SDMA
|
||||
* itself.
|
||||
*/
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.entity,
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.high_pr,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
|
||||
&job);
|
||||
|
|
|
@ -64,7 +64,7 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
|
|||
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
|
||||
num_bytes = npages * 8;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
|
||||
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
num_dw * 4 + num_bytes,
|
||||
AMDGPU_IB_POOL_DELAYED,
|
||||
|
|
Loading…
Reference in New Issue