From 36193acd54bdf1b790bfebfb132e37ece4af4982 Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Tue, 17 Jan 2017 17:59:02 +0200 Subject: [PATCH] drm/i915: Introduce engine_skip_context Add a new function for skipping all pending requests for a context in order to make engine reset flow more readable. Cc: Chris Wilson Cc: Tvrtko Ursulin Signed-off-by: Mika Kuoppala Link: http://patchwork.freedesktop.org/patch/msgid/1484668747-9120-2-git-send-email-mika.kuoppala@intel.com Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 45 ++++++++++++++++++++------------- 1 file changed, 27 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 249a6673b433..a83a4cc52fc9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2604,6 +2604,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine) if (__i915_gem_request_completed(request)) continue; + GEM_BUG_ON(request->engine != engine); return request; } @@ -2622,7 +2623,7 @@ void i915_gem_reset_prepare(struct drm_i915_private *dev_priv) i915_gem_revoke_fences(dev_priv); } -static void reset_request(struct drm_i915_gem_request *request) +static void skip_request(struct drm_i915_gem_request *request) { void *vaddr = request->ring->vaddr; u32 head; @@ -2641,12 +2642,33 @@ static void reset_request(struct drm_i915_gem_request *request) dma_fence_set_error(&request->fence, -EIO); } +static void engine_skip_context(struct drm_i915_gem_request *request) +{ + struct intel_engine_cs *engine = request->engine; + struct i915_gem_context *hung_ctx = request->ctx; + struct intel_timeline *timeline; + unsigned long flags; + + timeline = i915_gem_context_lookup_timeline(hung_ctx, engine); + + spin_lock_irqsave(&engine->timeline->lock, flags); + spin_lock(&timeline->lock); + + list_for_each_entry_continue(request, &engine->timeline->requests, link) + if (request->ctx == hung_ctx) + skip_request(request); + + list_for_each_entry(request, &timeline->requests, link) + skip_request(request); + + spin_unlock(&timeline->lock); + spin_unlock_irqrestore(&engine->timeline->lock, flags); +} + static void i915_gem_reset_engine(struct intel_engine_cs *engine) { struct drm_i915_gem_request *request; struct i915_gem_context *hung_ctx; - struct intel_timeline *timeline; - unsigned long flags; bool ring_hung; if (engine->irq_seqno_barrier) @@ -2668,7 +2690,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine) if (ring_hung) { i915_gem_context_mark_guilty(hung_ctx); - reset_request(request); + skip_request(request); } else { i915_gem_context_mark_innocent(hung_ctx); dma_fence_set_error(&request->fence, -EAGAIN); @@ -2696,20 +2718,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine) if (i915_gem_context_is_default(hung_ctx)) return; - timeline = i915_gem_context_lookup_timeline(hung_ctx, engine); - - spin_lock_irqsave(&engine->timeline->lock, flags); - spin_lock(&timeline->lock); - - list_for_each_entry_continue(request, &engine->timeline->requests, link) - if (request->ctx == hung_ctx) - reset_request(request); - - list_for_each_entry(request, &timeline->requests, link) - reset_request(request); - - spin_unlock(&timeline->lock); - spin_unlock_irqrestore(&engine->timeline->lock, flags); + engine_skip_context(request); } void i915_gem_reset_finish(struct drm_i915_private *dev_priv)