drm/amdgpu/sdma: set sched.ready status after ring/IB test in sdma

sched.ready is nothing with ring initialization, it needs to set
to be true after ring/IB test in amdgpu_ring_test_helper to tell
the ring is ready for submission.

Signed-off-by: Guchun Chen <guchun.chen@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Guchun Chen 2023-05-12 16:04:56 +08:00 committed by Alex Deucher
parent a569552f76
commit 61c31b8b6c
9 changed files with 3 additions and 25 deletions

View File

@ -489,8 +489,6 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
#endif
/* enable DMA IBs */
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
ring->sched.ready = true;
}
cik_sdma_enable(adev, true);

View File

@ -466,8 +466,6 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
#endif
/* enable DMA IBs */
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
ring->sched.ready = true;
}
sdma_v2_4_enable(adev, true);

View File

@ -734,8 +734,6 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
#endif
/* enable DMA IBs */
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
ring->sched.ready = true;
}
/* unhalt the MEs */

View File

@ -1114,8 +1114,6 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
#endif
/* enable DMA IBs */
WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
ring->sched.ready = true;
}
/**
@ -1202,8 +1200,6 @@ static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
#endif
/* enable DMA IBs */
WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
ring->sched.ready = true;
}
static void

View File

@ -685,8 +685,6 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i)
#endif
/* enable DMA IBs */
WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
ring->sched.ready = true;
}
/**
@ -776,8 +774,6 @@ static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i)
#endif
/* enable DMA IBs */
WREG32_SDMA(i, regSDMA_PAGE_IB_CNTL, ib_cntl);
ring->sched.ready = true;
}
static void sdma_v4_4_2_init_pg(struct amdgpu_device *adev)

View File

@ -819,8 +819,6 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
/* enable DMA IBs */
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
ring->sched.ready = true;
if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
sdma_v5_0_ctx_switch_enable(adev, true);
sdma_v5_0_enable(adev, true);

View File

@ -617,8 +617,6 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
/* enable DMA IBs */
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
ring->sched.ready = true;
if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
sdma_v5_2_ctx_switch_enable(adev, true);
sdma_v5_2_enable(adev, true);
@ -630,6 +628,8 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
return r;
}
ring->sched.ready = true;
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
}

View File

@ -585,16 +585,12 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
/* enable DMA IBs */
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
ring->sched.ready = true;
if (amdgpu_sriov_vf(adev))
sdma_v6_0_enable(adev, true);
r = amdgpu_ring_test_helper(ring);
if (r) {
ring->sched.ready = false;
if (r)
return r;
}
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);

View File

@ -174,8 +174,6 @@ static int si_dma_start(struct amdgpu_device *adev)
WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
ring->sched.ready = true;
r = amdgpu_ring_test_helper(ring);
if (r)
return r;