drm/amdgpu: Retire amdgpu_ring.ready flag v4
Start using drm_gpu_scheduler.ready isntead. v3: Add helper function to run ring test and set sched.ready flag status accordingly, clean explicit sched.ready sets from the IP specific files. v4: Add kerneldoc and rebase. Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
faf6e1a87e
commit
c66ed765a0
|
@ -144,7 +144,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
|
|||
KGD_MAX_QUEUES);
|
||||
|
||||
/* remove the KIQ bit as well */
|
||||
if (adev->gfx.kiq.ring.ready)
|
||||
if (adev->gfx.kiq.ring.sched.ready)
|
||||
clear_bit(amdgpu_gfx_queue_to_bit(adev,
|
||||
adev->gfx.kiq.ring.me - 1,
|
||||
adev->gfx.kiq.ring.pipe,
|
||||
|
|
|
@ -786,7 +786,7 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
|
|||
if (adev->in_gpu_reset)
|
||||
return -EIO;
|
||||
|
||||
if (ring->ready)
|
||||
if (ring->sched.ready)
|
||||
return invalidate_tlbs_with_kiq(adev, pasid);
|
||||
|
||||
for (vmid = 0; vmid < 16; vmid++) {
|
||||
|
|
|
@ -146,7 +146,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
fence_ctx = 0;
|
||||
}
|
||||
|
||||
if (!ring->ready) {
|
||||
if (!ring->sched.ready) {
|
||||
dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -351,7 +351,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
|
|||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
long tmo;
|
||||
|
||||
if (!ring || !ring->ready)
|
||||
if (!ring || !ring->sched.ready)
|
||||
continue;
|
||||
|
||||
/* skip IB tests for KIQ in general for the below reasons:
|
||||
|
@ -375,7 +375,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
|
|||
|
||||
r = amdgpu_ring_test_ib(ring, tmo);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
ring->sched.ready = false;
|
||||
|
||||
if (ring == &adev->gfx.gfx_ring[0]) {
|
||||
/* oh, oh, that's really bad */
|
||||
|
|
|
@ -336,7 +336,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
|
|||
case AMDGPU_HW_IP_GFX:
|
||||
type = AMD_IP_BLOCK_TYPE_GFX;
|
||||
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
|
||||
if (adev->gfx.gfx_ring[i].ready)
|
||||
if (adev->gfx.gfx_ring[i].sched.ready)
|
||||
++num_rings;
|
||||
ib_start_alignment = 32;
|
||||
ib_size_alignment = 32;
|
||||
|
@ -344,7 +344,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
|
|||
case AMDGPU_HW_IP_COMPUTE:
|
||||
type = AMD_IP_BLOCK_TYPE_GFX;
|
||||
for (i = 0; i < adev->gfx.num_compute_rings; i++)
|
||||
if (adev->gfx.compute_ring[i].ready)
|
||||
if (adev->gfx.compute_ring[i].sched.ready)
|
||||
++num_rings;
|
||||
ib_start_alignment = 32;
|
||||
ib_size_alignment = 32;
|
||||
|
@ -352,7 +352,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
|
|||
case AMDGPU_HW_IP_DMA:
|
||||
type = AMD_IP_BLOCK_TYPE_SDMA;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++)
|
||||
if (adev->sdma.instance[i].ring.ready)
|
||||
if (adev->sdma.instance[i].ring.sched.ready)
|
||||
++num_rings;
|
||||
ib_start_alignment = 256;
|
||||
ib_size_alignment = 4;
|
||||
|
@ -363,7 +363,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
|
|||
if (adev->uvd.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
||||
if (adev->uvd.inst[i].ring.ready)
|
||||
if (adev->uvd.inst[i].ring.sched.ready)
|
||||
++num_rings;
|
||||
}
|
||||
ib_start_alignment = 64;
|
||||
|
@ -372,7 +372,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
|
|||
case AMDGPU_HW_IP_VCE:
|
||||
type = AMD_IP_BLOCK_TYPE_VCE;
|
||||
for (i = 0; i < adev->vce.num_rings; i++)
|
||||
if (adev->vce.ring[i].ready)
|
||||
if (adev->vce.ring[i].sched.ready)
|
||||
++num_rings;
|
||||
ib_start_alignment = 4;
|
||||
ib_size_alignment = 1;
|
||||
|
@ -384,7 +384,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
|
|||
continue;
|
||||
|
||||
for (j = 0; j < adev->uvd.num_enc_rings; j++)
|
||||
if (adev->uvd.inst[i].ring_enc[j].ready)
|
||||
if (adev->uvd.inst[i].ring_enc[j].sched.ready)
|
||||
++num_rings;
|
||||
}
|
||||
ib_start_alignment = 64;
|
||||
|
@ -392,7 +392,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
|
|||
break;
|
||||
case AMDGPU_HW_IP_VCN_DEC:
|
||||
type = AMD_IP_BLOCK_TYPE_VCN;
|
||||
if (adev->vcn.ring_dec.ready)
|
||||
if (adev->vcn.ring_dec.sched.ready)
|
||||
++num_rings;
|
||||
ib_start_alignment = 16;
|
||||
ib_size_alignment = 16;
|
||||
|
@ -400,14 +400,14 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
|
|||
case AMDGPU_HW_IP_VCN_ENC:
|
||||
type = AMD_IP_BLOCK_TYPE_VCN;
|
||||
for (i = 0; i < adev->vcn.num_enc_rings; i++)
|
||||
if (adev->vcn.ring_enc[i].ready)
|
||||
if (adev->vcn.ring_enc[i].sched.ready)
|
||||
++num_rings;
|
||||
ib_start_alignment = 64;
|
||||
ib_size_alignment = 1;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCN_JPEG:
|
||||
type = AMD_IP_BLOCK_TYPE_VCN;
|
||||
if (adev->vcn.ring_jpeg.ready)
|
||||
if (adev->vcn.ring_jpeg.sched.ready)
|
||||
++num_rings;
|
||||
ib_start_alignment = 16;
|
||||
ib_size_alignment = 16;
|
||||
|
|
|
@ -2129,7 +2129,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
|
|||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
if (ring && ring->ready)
|
||||
if (ring && ring->sched.ready)
|
||||
amdgpu_fence_wait_empty(ring);
|
||||
}
|
||||
|
||||
|
|
|
@ -338,7 +338,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
|||
*/
|
||||
void amdgpu_ring_fini(struct amdgpu_ring *ring)
|
||||
{
|
||||
ring->ready = false;
|
||||
ring->sched.ready = false;
|
||||
|
||||
/* Not to finish a ring which is not initialized */
|
||||
if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
|
||||
|
@ -500,3 +500,23 @@ static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
|
|||
debugfs_remove(ring->ent);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ring_test_helper - tests ring and set sched readiness status
|
||||
*
|
||||
* @ring: ring to try the recovery on
|
||||
*
|
||||
* Tests ring and set sched readiness status
|
||||
*
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
|
||||
ring->sched.ready = !r;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -189,7 +189,6 @@ struct amdgpu_ring {
|
|||
uint64_t gpu_addr;
|
||||
uint64_t ptr_mask;
|
||||
uint32_t buf_mask;
|
||||
bool ready;
|
||||
u32 idx;
|
||||
u32 me;
|
||||
u32 pipe;
|
||||
|
@ -313,4 +312,6 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
|
|||
ring->count_dw -= count_dw;
|
||||
}
|
||||
|
||||
int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1970,7 +1970,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
|
|||
unsigned i;
|
||||
int r;
|
||||
|
||||
if (direct_submit && !ring->ready) {
|
||||
if (direct_submit && !ring->sched.ready) {
|
||||
DRM_ERROR("Trying to move memory with ring turned off.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -316,8 +316,8 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
|
|||
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
|
||||
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
|
||||
}
|
||||
sdma0->ready = false;
|
||||
sdma1->ready = false;
|
||||
sdma0->sched.ready = false;
|
||||
sdma1->sched.ready = false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -494,18 +494,16 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
|
|||
/* enable DMA IBs */
|
||||
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
|
||||
|
||||
ring->ready = true;
|
||||
ring->sched.ready = true;
|
||||
}
|
||||
|
||||
cik_sdma_enable(adev, true);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
if (adev->mman.buffer_funcs_ring == ring)
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, true);
|
||||
|
|
|
@ -1950,9 +1950,9 @@ static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
|
|||
CP_ME_CNTL__CE_HALT_MASK));
|
||||
WREG32(mmSCRATCH_UMSK, 0);
|
||||
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
|
||||
adev->gfx.gfx_ring[i].ready = false;
|
||||
adev->gfx.gfx_ring[i].sched.ready = false;
|
||||
for (i = 0; i < adev->gfx.num_compute_rings; i++)
|
||||
adev->gfx.compute_ring[i].ready = false;
|
||||
adev->gfx.compute_ring[i].sched.ready = false;
|
||||
}
|
||||
udelay(50);
|
||||
}
|
||||
|
@ -2124,12 +2124,9 @@ static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device *adev)
|
|||
|
||||
/* start the rings */
|
||||
gfx_v6_0_cp_gfx_start(adev);
|
||||
ring->ready = true;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2227,14 +2224,11 @@ static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev)
|
|||
WREG32(mmCP_RB2_CNTL, tmp);
|
||||
WREG32(mmCP_RB2_BASE, ring->gpu_addr >> 8);
|
||||
|
||||
adev->gfx.compute_ring[0].ready = false;
|
||||
adev->gfx.compute_ring[1].ready = false;
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[i]);
|
||||
r = amdgpu_ring_test_helper(&adev->gfx.compute_ring[i]);
|
||||
if (r)
|
||||
return r;
|
||||
adev->gfx.compute_ring[i].ready = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -2403,7 +2403,7 @@ static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
|
|||
} else {
|
||||
WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK));
|
||||
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
|
||||
adev->gfx.gfx_ring[i].ready = false;
|
||||
adev->gfx.gfx_ring[i].sched.ready = false;
|
||||
}
|
||||
udelay(50);
|
||||
}
|
||||
|
@ -2613,12 +2613,9 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
|
|||
|
||||
/* start the ring */
|
||||
gfx_v7_0_cp_gfx_start(adev);
|
||||
ring->ready = true;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2675,7 +2672,7 @@ static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
|
|||
} else {
|
||||
WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
|
||||
for (i = 0; i < adev->gfx.num_compute_rings; i++)
|
||||
adev->gfx.compute_ring[i].ready = false;
|
||||
adev->gfx.compute_ring[i].sched.ready = false;
|
||||
}
|
||||
udelay(50);
|
||||
}
|
||||
|
@ -3106,10 +3103,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
|
|||
|
||||
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
||||
ring = &adev->gfx.compute_ring[i];
|
||||
ring->ready = true;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r)
|
||||
ring->ready = false;
|
||||
amdgpu_ring_test_helper(ring);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1629,7 +1629,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
|
||||
/* bail if the compute ring is not ready */
|
||||
if (!ring->ready)
|
||||
if (!ring->sched.ready)
|
||||
return 0;
|
||||
|
||||
tmp = RREG32(mmGB_EDC_MODE);
|
||||
|
@ -4197,7 +4197,7 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
|
|||
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
|
||||
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
|
||||
adev->gfx.gfx_ring[i].ready = false;
|
||||
adev->gfx.gfx_ring[i].sched.ready = false;
|
||||
}
|
||||
WREG32(mmCP_ME_CNTL, tmp);
|
||||
udelay(50);
|
||||
|
@ -4379,10 +4379,8 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
|
|||
/* start the ring */
|
||||
amdgpu_ring_clear_ring(ring);
|
||||
gfx_v8_0_cp_gfx_start(adev);
|
||||
ring->ready = true;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r)
|
||||
ring->ready = false;
|
||||
ring->sched.ready = true;
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -4396,8 +4394,8 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
|
|||
} else {
|
||||
WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
|
||||
for (i = 0; i < adev->gfx.num_compute_rings; i++)
|
||||
adev->gfx.compute_ring[i].ready = false;
|
||||
adev->gfx.kiq.ring.ready = false;
|
||||
adev->gfx.compute_ring[i].sched.ready = false;
|
||||
adev->gfx.kiq.ring.sched.ready = false;
|
||||
}
|
||||
udelay(50);
|
||||
}
|
||||
|
@ -4473,11 +4471,9 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
|
|||
amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
|
||||
}
|
||||
|
||||
r = amdgpu_ring_test_ring(kiq_ring);
|
||||
if (r) {
|
||||
r = amdgpu_ring_test_helper(kiq_ring);
|
||||
if (r)
|
||||
DRM_ERROR("KCQ enable failed\n");
|
||||
kiq_ring->ready = false;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -4781,7 +4777,7 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
|
|||
amdgpu_bo_kunmap(ring->mqd_obj);
|
||||
ring->mqd_ptr = NULL;
|
||||
amdgpu_bo_unreserve(ring->mqd_obj);
|
||||
ring->ready = true;
|
||||
ring->sched.ready = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4820,10 +4816,7 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
|
|||
*/
|
||||
for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) {
|
||||
ring = &adev->gfx.compute_ring[i];
|
||||
ring->ready = true;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r)
|
||||
ring->ready = false;
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
}
|
||||
|
||||
done:
|
||||
|
@ -4899,7 +4892,7 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
|
|||
amdgpu_ring_write(kiq_ring, 0);
|
||||
amdgpu_ring_write(kiq_ring, 0);
|
||||
}
|
||||
r = amdgpu_ring_test_ring(kiq_ring);
|
||||
r = amdgpu_ring_test_helper(kiq_ring);
|
||||
if (r)
|
||||
DRM_ERROR("KCQ disable failed\n");
|
||||
|
||||
|
|
|
@ -2537,7 +2537,7 @@ static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
|
|||
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
|
||||
if (!enable) {
|
||||
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
|
||||
adev->gfx.gfx_ring[i].ready = false;
|
||||
adev->gfx.gfx_ring[i].sched.ready = false;
|
||||
}
|
||||
WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
|
||||
udelay(50);
|
||||
|
@ -2727,7 +2727,7 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
|
|||
|
||||
/* start the ring */
|
||||
gfx_v9_0_cp_gfx_start(adev);
|
||||
ring->ready = true;
|
||||
ring->sched.ready = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2742,8 +2742,8 @@ static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
|
|||
WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
|
||||
(CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
|
||||
for (i = 0; i < adev->gfx.num_compute_rings; i++)
|
||||
adev->gfx.compute_ring[i].ready = false;
|
||||
adev->gfx.kiq.ring.ready = false;
|
||||
adev->gfx.compute_ring[i].sched.ready = false;
|
||||
adev->gfx.kiq.ring.sched.ready = false;
|
||||
}
|
||||
udelay(50);
|
||||
}
|
||||
|
@ -2866,11 +2866,9 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
|
|||
amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
|
||||
}
|
||||
|
||||
r = amdgpu_ring_test_ring(kiq_ring);
|
||||
if (r) {
|
||||
r = amdgpu_ring_test_helper(kiq_ring);
|
||||
if (r)
|
||||
DRM_ERROR("KCQ enable failed\n");
|
||||
kiq_ring->ready = false;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -3249,7 +3247,7 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
|
|||
amdgpu_bo_kunmap(ring->mqd_obj);
|
||||
ring->mqd_ptr = NULL;
|
||||
amdgpu_bo_unreserve(ring->mqd_obj);
|
||||
ring->ready = true;
|
||||
ring->sched.ready = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3314,19 +3312,13 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
|
|||
return r;
|
||||
|
||||
ring = &adev->gfx.gfx_ring[0];
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
||||
ring = &adev->gfx.compute_ring[i];
|
||||
|
||||
ring->ready = true;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r)
|
||||
ring->ready = false;
|
||||
amdgpu_ring_test_helper(ring);
|
||||
}
|
||||
|
||||
gfx_v9_0_enable_gui_idle_interrupt(adev, true);
|
||||
|
@ -3391,7 +3383,7 @@ static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev)
|
|||
amdgpu_ring_write(kiq_ring, 0);
|
||||
amdgpu_ring_write(kiq_ring, 0);
|
||||
}
|
||||
r = amdgpu_ring_test_ring(kiq_ring);
|
||||
r = amdgpu_ring_test_helper(kiq_ring);
|
||||
if (r)
|
||||
DRM_ERROR("KCQ disable failed\n");
|
||||
|
||||
|
|
|
@ -381,7 +381,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
|
|||
struct amdgpu_vmhub *hub = &adev->vmhub[i];
|
||||
u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
|
||||
|
||||
if (adev->gfx.kiq.ring.ready &&
|
||||
if (adev->gfx.kiq.ring.sched.ready &&
|
||||
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
|
||||
!adev->in_gpu_reset) {
|
||||
r = amdgpu_kiq_reg_write_reg_wait(adev, hub->vm_inv_eng0_req + eng,
|
||||
|
|
|
@ -349,8 +349,8 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
|
|||
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
|
||||
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
|
||||
}
|
||||
sdma0->ready = false;
|
||||
sdma1->ready = false;
|
||||
sdma0->sched.ready = false;
|
||||
sdma1->sched.ready = false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -471,17 +471,15 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
|
|||
/* enable DMA IBs */
|
||||
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
|
||||
|
||||
ring->ready = true;
|
||||
ring->sched.ready = true;
|
||||
}
|
||||
|
||||
sdma_v2_4_enable(adev, true);
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
if (adev->mman.buffer_funcs_ring == ring)
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, true);
|
||||
|
|
|
@ -523,8 +523,8 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
|
|||
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
|
||||
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
|
||||
}
|
||||
sdma0->ready = false;
|
||||
sdma1->ready = false;
|
||||
sdma0->sched.ready = false;
|
||||
sdma1->sched.ready = false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -739,7 +739,7 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
|
|||
/* enable DMA IBs */
|
||||
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
|
||||
|
||||
ring->ready = true;
|
||||
ring->sched.ready = true;
|
||||
}
|
||||
|
||||
/* unhalt the MEs */
|
||||
|
@ -749,11 +749,9 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
|
|||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
if (adev->mman.buffer_funcs_ring == ring)
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, true);
|
||||
|
|
|
@ -634,8 +634,8 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
|
|||
WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
|
||||
}
|
||||
|
||||
sdma0->ready = false;
|
||||
sdma1->ready = false;
|
||||
sdma0->sched.ready = false;
|
||||
sdma1->sched.ready = false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -675,8 +675,8 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
|
|||
WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
|
||||
}
|
||||
|
||||
sdma0->ready = false;
|
||||
sdma1->ready = false;
|
||||
sdma0->sched.ready = false;
|
||||
sdma1->sched.ready = false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -863,7 +863,7 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
|
|||
/* enable DMA IBs */
|
||||
WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
|
||||
|
||||
ring->ready = true;
|
||||
ring->sched.ready = true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -956,7 +956,7 @@ static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
|
|||
/* enable DMA IBs */
|
||||
WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
|
||||
|
||||
ring->ready = true;
|
||||
ring->sched.ready = true;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1144,21 +1144,17 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
|
|||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
if (adev->sdma.has_page_queue) {
|
||||
struct amdgpu_ring *page = &adev->sdma.instance[i].page;
|
||||
|
||||
r = amdgpu_ring_test_ring(page);
|
||||
if (r) {
|
||||
page->ready = false;
|
||||
r = amdgpu_ring_test_helper(page);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
if (adev->mman.buffer_funcs_ring == ring)
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, true);
|
||||
|
|
|
@ -122,7 +122,7 @@ static void si_dma_stop(struct amdgpu_device *adev)
|
|||
|
||||
if (adev->mman.buffer_funcs_ring == ring)
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
ring->ready = false;
|
||||
ring->sched.ready = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -175,13 +175,11 @@ static int si_dma_start(struct amdgpu_device *adev)
|
|||
WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
|
||||
WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
|
||||
|
||||
ring->ready = true;
|
||||
ring->sched.ready = true;
|
||||
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
if (adev->mman.buffer_funcs_ring == ring)
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, true);
|
||||
|
|
|
@ -162,12 +162,9 @@ static int uvd_v4_2_hw_init(void *handle)
|
|||
uvd_v4_2_enable_mgcg(adev, true);
|
||||
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
|
||||
|
||||
ring->ready = true;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
goto done;
|
||||
}
|
||||
|
||||
r = amdgpu_ring_alloc(ring, 10);
|
||||
if (r) {
|
||||
|
@ -218,7 +215,7 @@ static int uvd_v4_2_hw_fini(void *handle)
|
|||
if (RREG32(mmUVD_STATUS) != 0)
|
||||
uvd_v4_2_stop(adev);
|
||||
|
||||
ring->ready = false;
|
||||
ring->sched.ready = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -158,12 +158,9 @@ static int uvd_v5_0_hw_init(void *handle)
|
|||
uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
|
||||
uvd_v5_0_enable_mgcg(adev, true);
|
||||
|
||||
ring->ready = true;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
goto done;
|
||||
}
|
||||
|
||||
r = amdgpu_ring_alloc(ring, 10);
|
||||
if (r) {
|
||||
|
@ -215,7 +212,7 @@ static int uvd_v5_0_hw_fini(void *handle)
|
|||
if (RREG32(mmUVD_STATUS) != 0)
|
||||
uvd_v5_0_stop(adev);
|
||||
|
||||
ring->ready = false;
|
||||
ring->sched.ready = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -476,12 +476,9 @@ static int uvd_v6_0_hw_init(void *handle)
|
|||
uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
|
||||
uvd_v6_0_enable_mgcg(adev, true);
|
||||
|
||||
ring->ready = true;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
goto done;
|
||||
}
|
||||
|
||||
r = amdgpu_ring_alloc(ring, 10);
|
||||
if (r) {
|
||||
|
@ -513,14 +510,11 @@ static int uvd_v6_0_hw_init(void *handle)
|
|||
if (uvd_v6_0_enc_support(adev)) {
|
||||
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
|
||||
ring = &adev->uvd.inst->ring_enc[i];
|
||||
ring->ready = true;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
if (!r) {
|
||||
|
@ -548,7 +542,7 @@ static int uvd_v6_0_hw_fini(void *handle)
|
|||
if (RREG32(mmUVD_STATUS) != 0)
|
||||
uvd_v6_0_stop(adev);
|
||||
|
||||
ring->ready = false;
|
||||
ring->sched.ready = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -540,12 +540,9 @@ static int uvd_v7_0_hw_init(void *handle)
|
|||
ring = &adev->uvd.inst[j].ring;
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
ring->ready = true;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
goto done;
|
||||
}
|
||||
|
||||
r = amdgpu_ring_alloc(ring, 10);
|
||||
if (r) {
|
||||
|
@ -582,14 +579,11 @@ static int uvd_v7_0_hw_init(void *handle)
|
|||
|
||||
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
|
||||
ring = &adev->uvd.inst[j].ring_enc[i];
|
||||
ring->ready = true;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
}
|
||||
done:
|
||||
if (!r)
|
||||
DRM_INFO("UVD and UVD ENC initialized successfully.\n");
|
||||
|
@ -619,7 +613,7 @@ static int uvd_v7_0_hw_fini(void *handle)
|
|||
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
|
||||
if (adev->uvd.harvest_config & (1 << i))
|
||||
continue;
|
||||
adev->uvd.inst[i].ring.ready = false;
|
||||
adev->uvd.inst[i].ring.sched.ready = false;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -463,15 +463,11 @@ static int vce_v2_0_hw_init(void *handle)
|
|||
|
||||
amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
|
||||
vce_v2_0_enable_mgcg(adev, true, false);
|
||||
for (i = 0; i < adev->vce.num_rings; i++)
|
||||
adev->vce.ring[i].ready = false;
|
||||
|
||||
for (i = 0; i < adev->vce.num_rings; i++) {
|
||||
r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
|
||||
r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
|
||||
if (r)
|
||||
return r;
|
||||
else
|
||||
adev->vce.ring[i].ready = true;
|
||||
}
|
||||
|
||||
DRM_INFO("VCE initialized successfully.\n");
|
||||
|
|
|
@ -474,15 +474,10 @@ static int vce_v3_0_hw_init(void *handle)
|
|||
|
||||
amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
|
||||
|
||||
for (i = 0; i < adev->vce.num_rings; i++)
|
||||
adev->vce.ring[i].ready = false;
|
||||
|
||||
for (i = 0; i < adev->vce.num_rings; i++) {
|
||||
r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
|
||||
r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
|
||||
if (r)
|
||||
return r;
|
||||
else
|
||||
adev->vce.ring[i].ready = true;
|
||||
}
|
||||
|
||||
DRM_INFO("VCE initialized successfully.\n");
|
||||
|
|
|
@ -519,15 +519,10 @@ static int vce_v4_0_hw_init(void *handle)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
for (i = 0; i < adev->vce.num_rings; i++)
|
||||
adev->vce.ring[i].ready = false;
|
||||
|
||||
for (i = 0; i < adev->vce.num_rings; i++) {
|
||||
r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
|
||||
r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
|
||||
if (r)
|
||||
return r;
|
||||
else
|
||||
adev->vce.ring[i].ready = true;
|
||||
}
|
||||
|
||||
DRM_INFO("VCE initialized successfully.\n");
|
||||
|
@ -549,7 +544,7 @@ static int vce_v4_0_hw_fini(void *handle)
|
|||
}
|
||||
|
||||
for (i = 0; i < adev->vce.num_rings; i++)
|
||||
adev->vce.ring[i].ready = false;
|
||||
adev->vce.ring[i].sched.ready = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -176,30 +176,22 @@ static int vcn_v1_0_hw_init(void *handle)
|
|||
struct amdgpu_ring *ring = &adev->vcn.ring_dec;
|
||||
int i, r;
|
||||
|
||||
ring->ready = true;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
goto done;
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
|
||||
ring = &adev->vcn.ring_enc[i];
|
||||
ring->ready = true;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
ring->sched.ready = true;
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
ring = &adev->vcn.ring_jpeg;
|
||||
ring->ready = true;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
goto done;
|
||||
}
|
||||
|
||||
done:
|
||||
if (!r)
|
||||
|
@ -224,7 +216,7 @@ static int vcn_v1_0_hw_fini(void *handle)
|
|||
if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
|
||||
vcn_v1_0_stop(adev);
|
||||
|
||||
ring->ready = false;
|
||||
ring->sched.ready = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue