drm/i915/selftests: Flush all active callbacks

Flushing the outer i915_active is not enough, as we need the barrier to
be applied across all the active dma_fence callbacks. So we must
serialise with each outstanding fence.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=112096
References: f79520bb33 ("drm/i915/selftests: Synchronize checking active status with retirement")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Andi Shyti <andi.shyti@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191101181022.25633-1-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2019-11-01 18:10:22 +00:00
parent 9278bbb6e4
commit 38813767c7
3 changed files with 35 additions and 3 deletions

View File

@ -53,9 +53,7 @@ static struct pulse *pulse_create(void)
static void pulse_unlock_wait(struct pulse *p)
{
mutex_lock(&p->active.mutex);
mutex_unlock(&p->active.mutex);
flush_work(&p->active.work);
i915_active_unlock_wait(&p->active);
}
static int __live_idle_pulse(struct intel_engine_cs *engine,

View File

@ -215,5 +215,6 @@ void i915_active_acquire_barrier(struct i915_active *ref);
void i915_request_add_active_barriers(struct i915_request *rq);
void i915_active_print(struct i915_active *ref, struct drm_printer *m);
void i915_active_unlock_wait(struct i915_active *ref);
#endif /* _I915_ACTIVE_H_ */

View File

@ -250,3 +250,36 @@ void i915_active_print(struct i915_active *ref, struct drm_printer *m)
i915_active_release(ref);
}
}
static void spin_unlock_wait(spinlock_t *lock)
{
spin_lock_irq(lock);
spin_unlock_irq(lock);
}
void i915_active_unlock_wait(struct i915_active *ref)
{
if (i915_active_acquire_if_busy(ref)) {
struct active_node *it, *n;
rcu_read_lock();
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
struct dma_fence *f;
/* Wait for all active callbacks */
f = rcu_dereference(it->base.fence);
if (f)
spin_unlock_wait(f->lock);
}
rcu_read_unlock();
i915_active_release(ref);
}
/* And wait for the retire callback */
mutex_lock(&ref->mutex);
mutex_unlock(&ref->mutex);
/* ... which may have been on a thread instead */
flush_work(&ref->work);
}