drm/amdgpu: remove non gfx specific handling from sdma_v4_0_gfx_resume

Needed to start using the paging queue.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2018-10-04 17:59:50 +02:00 committed by Alex Deucher
parent 9194a33903
commit 09f0b4ffd4
1 changed files with 20 additions and 16 deletions

View File

@ -688,13 +688,10 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
u32 wb_offset;
u32 doorbell;
u32 doorbell_offset;
u32 temp;
u64 wptr_gpu_addr;
wb_offset = (ring->rptr_offs * 4);
WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
@ -754,18 +751,6 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
/* set minor_ptr_update to 0 after wptr programed */
WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
/* set utc l1 enable flag always to 1 */
temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
if (!amdgpu_sriov_vf(adev)) {
/* unhalt engine */
temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
}
/* setup the wptr shadow polling */
wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
@ -944,9 +929,28 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
}
/* start the gfx rings and rlc compute queues */
for (i = 0; i < adev->sdma.num_instances; i++)
for (i = 0; i < adev->sdma.num_instances; i++) {
uint32_t temp;
WREG32(sdma_v4_0_get_reg_offset(adev, i,
mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
sdma_v4_0_gfx_resume(adev, i);
/* set utc l1 enable flag always to 1 */
temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
if (!amdgpu_sriov_vf(adev)) {
/* unhalt engine */
temp = RREG32(sdma_v4_0_get_reg_offset(adev, i,
mmSDMA0_F32_CNTL));
temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
WREG32(sdma_v4_0_get_reg_offset(adev, i,
mmSDMA0_F32_CNTL), temp);
}
}
if (amdgpu_sriov_vf(adev)) {
sdma_v4_0_ctx_switch_enable(adev, true);
sdma_v4_0_enable(adev, true);