drm/i915/guc: Drop pin count check trick between sched_disable and re-pin
Drop pin count check trick between a sched_disable and re-pin, now rely on the lock and counter of the number of committed requests to determine if scheduling should be disabled on the context. Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Signed-off-by: Matthew Brost <matthew.brost@intel.com> Signed-off-by: John Harrison <John.C.Harrison@Intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210909164744.31249-20-matthew.brost@intel.com
This commit is contained in:
parent
1424ba81a2
commit
5b116c17e6
|
@ -169,6 +169,8 @@ struct intel_context {
|
|||
struct list_head fences;
|
||||
/* GuC context blocked fence */
|
||||
struct i915_sw_fence blocked;
|
||||
/* GuC committed requests */
|
||||
int number_committed_requests;
|
||||
} guc_state;
|
||||
|
||||
struct {
|
||||
|
|
|
@ -249,6 +249,25 @@ static inline void decr_context_blocked(struct intel_context *ce)
|
|||
ce->guc_state.sched_state -= SCHED_STATE_BLOCKED;
|
||||
}
|
||||
|
||||
static inline bool context_has_committed_requests(struct intel_context *ce)
|
||||
{
|
||||
return !!ce->guc_state.number_committed_requests;
|
||||
}
|
||||
|
||||
static inline void incr_context_committed_requests(struct intel_context *ce)
|
||||
{
|
||||
lockdep_assert_held(&ce->guc_state.lock);
|
||||
++ce->guc_state.number_committed_requests;
|
||||
GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
|
||||
}
|
||||
|
||||
static inline void decr_context_committed_requests(struct intel_context *ce)
|
||||
{
|
||||
lockdep_assert_held(&ce->guc_state.lock);
|
||||
--ce->guc_state.number_committed_requests;
|
||||
GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
|
||||
}
|
||||
|
||||
static inline bool context_guc_id_invalid(struct intel_context *ce)
|
||||
{
|
||||
return ce->guc_id == GUC_INVALID_LRC_ID;
|
||||
|
@ -1766,24 +1785,18 @@ static void guc_context_sched_disable(struct intel_context *ce)
|
|||
spin_lock_irqsave(&ce->guc_state.lock, flags);
|
||||
|
||||
/*
|
||||
* We have to check if the context has been disabled by another thread.
|
||||
* We also have to check if the context has been pinned again as another
|
||||
* pin operation is allowed to pass this function. Checking the pin
|
||||
* count, within ce->guc_state.lock, synchronizes this function with
|
||||
* guc_request_alloc ensuring a request doesn't slip through the
|
||||
* 'context_pending_disable' fence. Checking within the spin lock (can't
|
||||
* sleep) ensures another process doesn't pin this context and generate
|
||||
* a request before we set the 'context_pending_disable' flag here.
|
||||
* We have to check if the context has been disabled by another thread,
|
||||
* check if submssion has been disabled to seal a race with reset and
|
||||
* finally check if any more requests have been committed to the
|
||||
* context ensursing that a request doesn't slip through the
|
||||
* 'context_pending_disable' fence.
|
||||
*/
|
||||
if (unlikely(!context_enabled(ce) || submission_disabled(guc))) {
|
||||
if (unlikely(!context_enabled(ce) || submission_disabled(guc) ||
|
||||
context_has_committed_requests(ce))) {
|
||||
clr_context_enabled(ce);
|
||||
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
|
||||
goto unpin;
|
||||
}
|
||||
if (unlikely(atomic_add_unless(&ce->pin_count, -2, 2))) {
|
||||
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
|
||||
return;
|
||||
}
|
||||
guc_id = prep_context_pending_disable(ce);
|
||||
|
||||
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
|
||||
|
@ -1813,6 +1826,7 @@ static void __guc_context_destroy(struct intel_context *ce)
|
|||
ce->guc_prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
|
||||
ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
|
||||
ce->guc_prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
|
||||
GEM_BUG_ON(ce->guc_state.number_committed_requests);
|
||||
|
||||
lrc_fini(ce);
|
||||
intel_context_fini(ce);
|
||||
|
@ -2043,6 +2057,10 @@ static void remove_from_context(struct i915_request *rq)
|
|||
|
||||
spin_unlock_irq(&ce->guc_active.lock);
|
||||
|
||||
spin_lock_irq(&ce->guc_state.lock);
|
||||
decr_context_committed_requests(ce);
|
||||
spin_unlock_irq(&ce->guc_state.lock);
|
||||
|
||||
atomic_dec(&ce->guc_id_ref);
|
||||
i915_request_notify_execute_cb_imm(rq);
|
||||
}
|
||||
|
@ -2193,15 +2211,7 @@ out:
|
|||
* schedule enable or context registration if either G2H is pending
|
||||
* respectfully. Once a G2H returns, the fence is released that is
|
||||
* blocking these requests (see guc_signal_context_fence).
|
||||
*
|
||||
* We can safely check the below fields outside of the lock as it isn't
|
||||
* possible for these fields to transition from being clear to set but
|
||||
* converse is possible, hence the need for the check within the lock.
|
||||
*/
|
||||
if (likely(!context_wait_for_deregister_to_register(ce) &&
|
||||
!context_pending_disable(ce)))
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&ce->guc_state.lock, flags);
|
||||
if (context_wait_for_deregister_to_register(ce) ||
|
||||
context_pending_disable(ce)) {
|
||||
|
@ -2210,6 +2220,7 @@ out:
|
|||
|
||||
list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
|
||||
}
|
||||
incr_context_committed_requests(ce);
|
||||
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
|
||||
|
||||
return 0;
|
||||
|
|
Loading…
Reference in New Issue