drm/amdgpu/vcn: fix race condition issue for dpg unpause mode switch
Couldn't only rely on enc fence to decide switching to dpg unpaude mode. Since a enc thread may not schedule a fence in time during multiple threads running situation. v3: 1. Rename enc_submission_cnt to dpg_enc_submission_cnt 2. Add dpg_enc_submission_cnt check in idle_work_handler v4: Remove extra counter check, and reduce counter before idle work schedule Signed-off-by: James Zhu <James.Zhu@amd.com> Reviewed-by: Leo Liu <leo.liu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
bd718638b8
commit
e3b41d82da
|
@ -65,6 +65,8 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
|||
INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
|
||||
mutex_init(&adev->vcn.vcn_pg_lock);
|
||||
atomic_set(&adev->vcn.total_submission_cnt, 0);
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
|
||||
atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_RAVEN:
|
||||
|
@ -298,7 +300,8 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
|
|||
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
|
||||
struct dpg_pause_state new_state;
|
||||
|
||||
if (fence[j])
|
||||
if (fence[j] ||
|
||||
unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
|
||||
new_state.fw_based = VCN_DPG_STATE__PAUSE;
|
||||
else
|
||||
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
|
||||
|
@ -333,19 +336,22 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
|
|||
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
|
||||
struct dpg_pause_state new_state;
|
||||
unsigned int fences = 0;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
|
||||
fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
|
||||
if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
|
||||
atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
|
||||
new_state.fw_based = VCN_DPG_STATE__PAUSE;
|
||||
} else {
|
||||
unsigned int fences = 0;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
|
||||
fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
|
||||
|
||||
if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
|
||||
new_state.fw_based = VCN_DPG_STATE__PAUSE;
|
||||
else
|
||||
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
|
||||
}
|
||||
if (fences)
|
||||
new_state.fw_based = VCN_DPG_STATE__PAUSE;
|
||||
else
|
||||
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
|
||||
|
||||
if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
|
||||
new_state.fw_based = VCN_DPG_STATE__PAUSE;
|
||||
|
||||
adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
|
||||
}
|
||||
|
@ -354,6 +360,10 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
|
|||
|
||||
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
|
||||
{
|
||||
if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
|
||||
ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
|
||||
atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
|
||||
|
||||
atomic_dec(&ring->adev->vcn.total_submission_cnt);
|
||||
|
||||
schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
|
||||
|
|
|
@ -183,6 +183,7 @@ struct amdgpu_vcn_inst {
|
|||
void *dpg_sram_cpu_addr;
|
||||
uint64_t dpg_sram_gpu_addr;
|
||||
uint32_t *dpg_sram_curr_addr;
|
||||
atomic_t dpg_enc_submission_cnt;
|
||||
};
|
||||
|
||||
struct amdgpu_vcn {
|
||||
|
|
Loading…
Reference in New Issue