drm/amdgpu: Rename DRM schedulers in amdgpu TTM

Rename mman.entity to mman.high_pr to make the distinction
clearer that this is a high priority scheduler. Similarly,
rename the recently added mman.delayed to mman.low_pr to
make it clear it is a low priority scheduler.
No functional change in this patch.

Signed-off-by: Mukul Joshi <mukul.joshi@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Mukul Joshi 2023-05-23 11:55:54 -04:00 committed by Alex Deucher
parent c168feed5d
commit 41ce6d6d03
4 changed files with 15 additions and 15 deletions

View File

@ -228,7 +228,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE; num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED, AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4 + num_bytes, num_dw * 4 + num_bytes,
AMDGPU_IB_POOL_DELAYED, &job); AMDGPU_IB_POOL_DELAYED, &job);
@ -1456,7 +1456,7 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
memcpy(adev->mman.sdma_access_ptr, buf, len); memcpy(adev->mman.sdma_access_ptr, buf, len);
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED, AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4, AMDGPU_IB_POOL_DELAYED, num_dw * 4, AMDGPU_IB_POOL_DELAYED,
&job); &job);
@ -2032,7 +2032,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
ring = adev->mman.buffer_funcs_ring; ring = adev->mman.buffer_funcs_ring;
sched = &ring->sched; sched = &ring->sched;
r = drm_sched_entity_init(&adev->mman.entity, r = drm_sched_entity_init(&adev->mman.high_pr,
DRM_SCHED_PRIORITY_KERNEL, &sched, DRM_SCHED_PRIORITY_KERNEL, &sched,
1, NULL); 1, NULL);
if (r) { if (r) {
@ -2041,7 +2041,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
return; return;
} }
r = drm_sched_entity_init(&adev->mman.delayed, r = drm_sched_entity_init(&adev->mman.low_pr,
DRM_SCHED_PRIORITY_NORMAL, &sched, DRM_SCHED_PRIORITY_NORMAL, &sched,
1, NULL); 1, NULL);
if (r) { if (r) {
@ -2050,8 +2050,8 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
goto error_free_entity; goto error_free_entity;
} }
} else { } else {
drm_sched_entity_destroy(&adev->mman.entity); drm_sched_entity_destroy(&adev->mman.high_pr);
drm_sched_entity_destroy(&adev->mman.delayed); drm_sched_entity_destroy(&adev->mman.low_pr);
dma_fence_put(man->move); dma_fence_put(man->move);
man->move = NULL; man->move = NULL;
} }
@ -2067,7 +2067,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
return; return;
error_free_entity: error_free_entity:
drm_sched_entity_destroy(&adev->mman.entity); drm_sched_entity_destroy(&adev->mman.high_pr);
} }
static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev, static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
@ -2082,8 +2082,8 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_DIRECT :
AMDGPU_IB_POOL_DELAYED; AMDGPU_IB_POOL_DELAYED;
int r; int r;
struct drm_sched_entity *entity = delayed ? &adev->mman.delayed : struct drm_sched_entity *entity = delayed ? &adev->mman.low_pr :
&adev->mman.entity; &adev->mman.high_pr;
r = amdgpu_job_alloc_with_ib(adev, entity, r = amdgpu_job_alloc_with_ib(adev, entity,
AMDGPU_FENCE_OWNER_UNDEFINED, AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4, pool, job); num_dw * 4, pool, job);

View File

@ -59,10 +59,10 @@ struct amdgpu_mman {
bool buffer_funcs_enabled; bool buffer_funcs_enabled;
struct mutex gtt_window_lock; struct mutex gtt_window_lock;
/* Scheduler entity for buffer moves */ /* High priority scheduler entity for buffer moves */
struct drm_sched_entity entity; struct drm_sched_entity high_pr;
/* Scheduler entity for VRAM clearing */ /* Low priority scheduler entity for VRAM clearing */
struct drm_sched_entity delayed; struct drm_sched_entity low_pr;
struct amdgpu_vram_mgr vram_mgr; struct amdgpu_vram_mgr vram_mgr;
struct amdgpu_gtt_mgr gtt_mgr; struct amdgpu_gtt_mgr gtt_mgr;

View File

@ -382,7 +382,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* translation. Avoid this by doing the invalidation from the SDMA * translation. Avoid this by doing the invalidation from the SDMA
* itself. * itself.
*/ */
r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.entity, r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED, AMDGPU_FENCE_OWNER_UNDEFINED,
16 * 4, AMDGPU_IB_POOL_IMMEDIATE, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
&job); &job);

View File

@ -64,7 +64,7 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = npages * 8; num_bytes = npages * 8;
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED, AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4 + num_bytes, num_dw * 4 + num_bytes,
AMDGPU_IB_POOL_DELAYED, AMDGPU_IB_POOL_DELAYED,