drm/i915: Mark all incomplete requests as -EIO when wedged

Similarly to a normal reset, after we mark the GPU as wedged (completely
fubar and no more requests can be executed), set the error status on all
the in flight requests.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170110172246.27297-4-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2017-01-10 17:22:45 +00:00
parent 3c1b284759
commit 3cd9442f66
1 changed files with 10 additions and 0 deletions

View File

@ -2730,12 +2730,16 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
static void nop_submit_request(struct drm_i915_gem_request *request) static void nop_submit_request(struct drm_i915_gem_request *request)
{ {
dma_fence_set_error(&request->fence, -EIO);
i915_gem_request_submit(request); i915_gem_request_submit(request);
intel_engine_init_global_seqno(request->engine, request->global_seqno); intel_engine_init_global_seqno(request->engine, request->global_seqno);
} }
static void i915_gem_cleanup_engine(struct intel_engine_cs *engine) static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
{ {
struct drm_i915_gem_request *request;
unsigned long flags;
/* We need to be sure that no thread is running the old callback as /* We need to be sure that no thread is running the old callback as
* we install the nop handler (otherwise we would submit a request * we install the nop handler (otherwise we would submit a request
* to hardware that will never complete). In order to prevent this * to hardware that will never complete). In order to prevent this
@ -2744,6 +2748,12 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
*/ */
engine->submit_request = nop_submit_request; engine->submit_request = nop_submit_request;
/* Mark all executing requests as skipped */
spin_lock_irqsave(&engine->timeline->lock, flags);
list_for_each_entry(request, &engine->timeline->requests, link)
dma_fence_set_error(&request->fence, -EIO);
spin_unlock_irqrestore(&engine->timeline->lock, flags);
/* Mark all pending requests as complete so that any concurrent /* Mark all pending requests as complete so that any concurrent
* (lockless) lookup doesn't try and wait upon the request as we * (lockless) lookup doesn't try and wait upon the request as we
* reset it. * reset it.