drm/scheduler: Remove obsolete spinlock.
This spinlock is superfluous, any call to drm_sched_entity_push_job should already be under a lock together with matching drm_sched_job_init to match the order of insertion into queue with job's fence seqence number. v2: Improve patch description. Add functions documentation describing the locking considerations Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com> Acked-by: Chunming Zhou <david1.zhou@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
b9245b9498
commit
563e1e664d
|
@ -139,7 +139,6 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
|
||||||
entity->last_scheduled = NULL;
|
entity->last_scheduled = NULL;
|
||||||
|
|
||||||
spin_lock_init(&entity->rq_lock);
|
spin_lock_init(&entity->rq_lock);
|
||||||
spin_lock_init(&entity->queue_lock);
|
|
||||||
spsc_queue_init(&entity->job_queue);
|
spsc_queue_init(&entity->job_queue);
|
||||||
|
|
||||||
atomic_set(&entity->fence_seq, 0);
|
atomic_set(&entity->fence_seq, 0);
|
||||||
|
@ -413,6 +412,10 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
|
||||||
*
|
*
|
||||||
* @sched_job The pointer to job required to submit
|
* @sched_job The pointer to job required to submit
|
||||||
*
|
*
|
||||||
|
* Note: To guarantee that the order of insertion to queue matches
|
||||||
|
* the job's fence sequence number this function should be
|
||||||
|
* called with drm_sched_job_init under common lock.
|
||||||
|
*
|
||||||
* Returns 0 for success, negative error code otherwise.
|
* Returns 0 for success, negative error code otherwise.
|
||||||
*/
|
*/
|
||||||
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
|
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
|
||||||
|
@ -423,11 +426,8 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
|
||||||
|
|
||||||
trace_drm_sched_job(sched_job, entity);
|
trace_drm_sched_job(sched_job, entity);
|
||||||
|
|
||||||
spin_lock(&entity->queue_lock);
|
|
||||||
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
|
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
|
||||||
|
|
||||||
spin_unlock(&entity->queue_lock);
|
|
||||||
|
|
||||||
/* first job wakes up scheduler */
|
/* first job wakes up scheduler */
|
||||||
if (first) {
|
if (first) {
|
||||||
/* Add the entity to the run queue */
|
/* Add the entity to the run queue */
|
||||||
|
@ -593,7 +593,12 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_sched_job_recovery);
|
EXPORT_SYMBOL(drm_sched_job_recovery);
|
||||||
|
|
||||||
/* init a sched_job with basic field */
|
/**
|
||||||
|
* Init a sched_job with basic field
|
||||||
|
*
|
||||||
|
* Note: Refer to drm_sched_entity_push_job documentation
|
||||||
|
* for locking considerations.
|
||||||
|
*/
|
||||||
int drm_sched_job_init(struct drm_sched_job *job,
|
int drm_sched_job_init(struct drm_sched_job *job,
|
||||||
struct drm_gpu_scheduler *sched,
|
struct drm_gpu_scheduler *sched,
|
||||||
struct drm_sched_entity *entity,
|
struct drm_sched_entity *entity,
|
||||||
|
|
|
@ -56,7 +56,6 @@ struct drm_sched_entity {
|
||||||
spinlock_t rq_lock;
|
spinlock_t rq_lock;
|
||||||
struct drm_gpu_scheduler *sched;
|
struct drm_gpu_scheduler *sched;
|
||||||
|
|
||||||
spinlock_t queue_lock;
|
|
||||||
struct spsc_queue job_queue;
|
struct spsc_queue job_queue;
|
||||||
|
|
||||||
atomic_t fence_seq;
|
atomic_t fence_seq;
|
||||||
|
|
Loading…
Reference in New Issue