drm/amd/sched: fix job tear down order v2

Move the trace before we signal the scheduler fence and drop the
scheduler fence reference directly before we free the job.

v2: keep extra s_fence reference

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Liu, Monk <Monk.Liu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2017-10-13 10:58:15 +02:00 committed by Alex Deucher
parent e1d515052f
commit 7fd5e36cae
1 changed files with 2 additions and 3 deletions

View File

@ -406,6 +406,7 @@ static void amd_sched_job_finish(struct work_struct *work)
schedule_delayed_work(&next->work_tdr, sched->timeout);
}
spin_unlock(&sched->job_list_lock);
dma_fence_put(&s_job->s_fence->finished);
sched->ops->free_job(s_job);
}
@ -586,6 +587,7 @@ static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
container_of(cb, struct amd_sched_fence, cb);
struct amd_gpu_scheduler *sched = s_fence->sched;
dma_fence_get(&s_fence->finished);
atomic_dec(&sched->hw_rq_count);
amd_sched_fence_finished(s_fence);
@ -638,9 +640,6 @@ static int amd_sched_main(void *param)
fence = sched->ops->run_job(sched_job);
amd_sched_fence_scheduled(s_fence);
/* amd_sched_process_job drops the job's reference of the fence. */
sched_job->s_fence = NULL;
if (fence) {
s_fence->parent = dma_fence_get(fence);
r = dma_fence_add_callback(fence, &s_fence->cb,