From 41ce6d6d03d5e51420ea7732c83facc8a7f2e5da Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Tue, 23 May 2023 11:55:54 -0400 Subject: [PATCH] drm/amdgpu: Rename DRM schedulers in amdgpu TTM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename mman.entity to mman.high_pr to make the distinction clearer that this is a high priority scheduler. Similarly, rename the recently added mman.delayed to mman.low_pr to make it clear it is a low priority scheduler. No functional change in this patch. Signed-off-by: Mukul Joshi Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 18 +++++++++--------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 8 ++++---- drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 2 +- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index d2d0d27f9053..0534ab716809 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -228,7 +228,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE; - r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, + r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr, AMDGPU_FENCE_OWNER_UNDEFINED, num_dw * 4 + num_bytes, AMDGPU_IB_POOL_DELAYED, &job); @@ -1456,7 +1456,7 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, memcpy(adev->mman.sdma_access_ptr, buf, len); num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); - r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, + r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr, AMDGPU_FENCE_OWNER_UNDEFINED, num_dw * 4, AMDGPU_IB_POOL_DELAYED, &job); @@ -2032,7 +2032,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) ring = adev->mman.buffer_funcs_ring; sched = &ring->sched; - r = drm_sched_entity_init(&adev->mman.entity, + r = drm_sched_entity_init(&adev->mman.high_pr, DRM_SCHED_PRIORITY_KERNEL, &sched, 1, NULL); if (r) { @@ -2041,7 +2041,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) return; } - r = drm_sched_entity_init(&adev->mman.delayed, + r = drm_sched_entity_init(&adev->mman.low_pr, DRM_SCHED_PRIORITY_NORMAL, &sched, 1, NULL); if (r) { @@ -2050,8 +2050,8 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) goto error_free_entity; } } else { - drm_sched_entity_destroy(&adev->mman.entity); - drm_sched_entity_destroy(&adev->mman.delayed); + drm_sched_entity_destroy(&adev->mman.high_pr); + drm_sched_entity_destroy(&adev->mman.low_pr); dma_fence_put(man->move); man->move = NULL; } @@ -2067,7 +2067,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) return; error_free_entity: - drm_sched_entity_destroy(&adev->mman.entity); + drm_sched_entity_destroy(&adev->mman.high_pr); } static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev, @@ -2082,8 +2082,8 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev, AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_DELAYED; int r; - struct drm_sched_entity *entity = delayed ? &adev->mman.delayed : - &adev->mman.entity; + struct drm_sched_entity *entity = delayed ? &adev->mman.low_pr : + &adev->mman.high_pr; r = amdgpu_job_alloc_with_ib(adev, entity, AMDGPU_FENCE_OWNER_UNDEFINED, num_dw * 4, pool, job); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index e82b1edee7a4..6d0d66e40db9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -59,10 +59,10 @@ struct amdgpu_mman { bool buffer_funcs_enabled; struct mutex gtt_window_lock; - /* Scheduler entity for buffer moves */ - struct drm_sched_entity entity; - /* Scheduler entity for VRAM clearing */ - struct drm_sched_entity delayed; + /* High priority scheduler entity for buffer moves */ + struct drm_sched_entity high_pr; + /* Low priority scheduler entity for VRAM clearing */ + struct drm_sched_entity low_pr; struct amdgpu_vram_mgr vram_mgr; struct amdgpu_gtt_mgr gtt_mgr; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index b2e42f1b0f12..0c8a47989576 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -382,7 +382,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, * translation. Avoid this by doing the invalidation from the SDMA * itself. */ - r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.entity, + r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.high_pr, AMDGPU_FENCE_OWNER_UNDEFINED, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE, &job); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 58d95fb99595..709ac885ca6d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -64,7 +64,7 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); num_bytes = npages * 8; - r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, + r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr, AMDGPU_FENCE_OWNER_UNDEFINED, num_dw * 4 + num_bytes, AMDGPU_IB_POOL_DELAYED,