drm/i915: Move engine reset prepare/finish to backends
In preparation to more carefully handling incomplete preemption during reset by execlists, we move the existing code wholesale to the backends under a couple of new reset vfuncs. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Michał Winiarski <michal.winiarski@intel.com> CC: Michel Thierry <michel.thierry@intel.com> Cc: Jeff McGee <jeff.mcgee@intel.com> Reviewed-by: Jeff McGee <jeff.mcgee@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180516183355.10553-4-chris@chris-wilson.co.uk
This commit is contained in:
parent
ef2fb72046
commit
5adfb772f8
|
@ -3004,7 +3004,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
|
|||
struct i915_request *
|
||||
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct i915_request *request = NULL;
|
||||
struct i915_request *request;
|
||||
|
||||
/*
|
||||
* During the reset sequence, we must prevent the engine from
|
||||
|
@ -3027,31 +3027,7 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
|
|||
*/
|
||||
kthread_park(engine->breadcrumbs.signaler);
|
||||
|
||||
/*
|
||||
* Prevent request submission to the hardware until we have
|
||||
* completed the reset in i915_gem_reset_finish(). If a request
|
||||
* is completed by one engine, it may then queue a request
|
||||
* to a second via its execlists->tasklet *just* as we are
|
||||
* calling engine->init_hw() and also writing the ELSP.
|
||||
* Turning off the execlists->tasklet until the reset is over
|
||||
* prevents the race.
|
||||
*/
|
||||
__tasklet_disable_sync_once(&engine->execlists.tasklet);
|
||||
|
||||
/*
|
||||
* We're using worker to queue preemption requests from the tasklet in
|
||||
* GuC submission mode.
|
||||
* Even though tasklet was disabled, we may still have a worker queued.
|
||||
* Let's make sure that all workers scheduled before disabling the
|
||||
* tasklet are completed before continuing with the reset.
|
||||
*/
|
||||
if (engine->i915->guc.preempt_wq)
|
||||
flush_workqueue(engine->i915->guc.preempt_wq);
|
||||
|
||||
if (engine->irq_seqno_barrier)
|
||||
engine->irq_seqno_barrier(engine);
|
||||
|
||||
request = i915_gem_find_active_request(engine);
|
||||
request = engine->reset.prepare(engine);
|
||||
if (request && request->fence.error == -EIO)
|
||||
request = ERR_PTR(-EIO); /* Previous reset failed! */
|
||||
|
||||
|
@ -3202,13 +3178,8 @@ void i915_gem_reset_engine(struct intel_engine_cs *engine,
|
|||
if (request)
|
||||
request = i915_gem_reset_request(engine, request, stalled);
|
||||
|
||||
if (request) {
|
||||
DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
|
||||
engine->name, request->global_seqno);
|
||||
}
|
||||
|
||||
/* Setup the CS to resume from the breadcrumb of the hung request */
|
||||
engine->reset_hw(engine, request);
|
||||
engine->reset.reset(engine, request);
|
||||
}
|
||||
|
||||
void i915_gem_reset(struct drm_i915_private *dev_priv,
|
||||
|
@ -3256,7 +3227,8 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
|
|||
|
||||
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
|
||||
{
|
||||
tasklet_enable(&engine->execlists.tasklet);
|
||||
engine->reset.finish(engine);
|
||||
|
||||
kthread_unpark(engine->breadcrumbs.signaler);
|
||||
|
||||
intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
|
||||
|
|
|
@ -1827,8 +1827,39 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void reset_common_ring(struct intel_engine_cs *engine,
|
||||
struct i915_request *request)
|
||||
static struct i915_request *
|
||||
execlists_reset_prepare(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||
|
||||
GEM_TRACE("%s\n", engine->name);
|
||||
|
||||
/*
|
||||
* Prevent request submission to the hardware until we have
|
||||
* completed the reset in i915_gem_reset_finish(). If a request
|
||||
* is completed by one engine, it may then queue a request
|
||||
* to a second via its execlists->tasklet *just* as we are
|
||||
* calling engine->init_hw() and also writing the ELSP.
|
||||
* Turning off the execlists->tasklet until the reset is over
|
||||
* prevents the race.
|
||||
*/
|
||||
__tasklet_disable_sync_once(&execlists->tasklet);
|
||||
|
||||
/*
|
||||
* We're using worker to queue preemption requests from the tasklet in
|
||||
* GuC submission mode.
|
||||
* Even though tasklet was disabled, we may still have a worker queued.
|
||||
* Let's make sure that all workers scheduled before disabling the
|
||||
* tasklet are completed before continuing with the reset.
|
||||
*/
|
||||
if (engine->i915->guc.preempt_wq)
|
||||
flush_workqueue(engine->i915->guc.preempt_wq);
|
||||
|
||||
return i915_gem_find_active_request(engine);
|
||||
}
|
||||
|
||||
static void execlists_reset(struct intel_engine_cs *engine,
|
||||
struct i915_request *request)
|
||||
{
|
||||
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||
unsigned long flags;
|
||||
|
@ -1908,6 +1939,13 @@ static void reset_common_ring(struct intel_engine_cs *engine,
|
|||
unwind_wa_tail(request);
|
||||
}
|
||||
|
||||
static void execlists_reset_finish(struct intel_engine_cs *engine)
|
||||
{
|
||||
tasklet_enable(&engine->execlists.tasklet);
|
||||
|
||||
GEM_TRACE("%s\n", engine->name);
|
||||
}
|
||||
|
||||
static int intel_logical_ring_emit_pdps(struct i915_request *rq)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
|
||||
|
@ -2237,7 +2275,10 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
|
|||
{
|
||||
/* Default vfuncs which can be overriden by each engine. */
|
||||
engine->init_hw = gen8_init_common_ring;
|
||||
engine->reset_hw = reset_common_ring;
|
||||
|
||||
engine->reset.prepare = execlists_reset_prepare;
|
||||
engine->reset.reset = execlists_reset;
|
||||
engine->reset.finish = execlists_reset_finish;
|
||||
|
||||
engine->context_pin = execlists_context_pin;
|
||||
engine->context_unpin = execlists_context_unpin;
|
||||
|
|
|
@ -531,9 +531,20 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void reset_ring_common(struct intel_engine_cs *engine,
|
||||
struct i915_request *request)
|
||||
static struct i915_request *reset_prepare(struct intel_engine_cs *engine)
|
||||
{
|
||||
if (engine->irq_seqno_barrier)
|
||||
engine->irq_seqno_barrier(engine);
|
||||
|
||||
return i915_gem_find_active_request(engine);
|
||||
}
|
||||
|
||||
static void reset_ring(struct intel_engine_cs *engine,
|
||||
struct i915_request *request)
|
||||
{
|
||||
GEM_TRACE("%s seqno=%x\n",
|
||||
engine->name, request ? request->global_seqno : 0);
|
||||
|
||||
/*
|
||||
* RC6 must be prevented until the reset is complete and the engine
|
||||
* reinitialised. If it occurs in the middle of this sequence, the
|
||||
|
@ -597,6 +608,10 @@ static void reset_ring_common(struct intel_engine_cs *engine,
|
|||
}
|
||||
}
|
||||
|
||||
static void reset_finish(struct intel_engine_cs *engine)
|
||||
{
|
||||
}
|
||||
|
||||
static int intel_rcs_ctx_init(struct i915_request *rq)
|
||||
{
|
||||
int ret;
|
||||
|
@ -2006,7 +2021,9 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
|
|||
intel_ring_init_semaphores(dev_priv, engine);
|
||||
|
||||
engine->init_hw = init_ring_common;
|
||||
engine->reset_hw = reset_ring_common;
|
||||
engine->reset.prepare = reset_prepare;
|
||||
engine->reset.reset = reset_ring;
|
||||
engine->reset.finish = reset_finish;
|
||||
|
||||
engine->context_pin = intel_ring_context_pin;
|
||||
engine->context_unpin = intel_ring_context_unpin;
|
||||
|
|
|
@ -423,8 +423,13 @@ struct intel_engine_cs {
|
|||
void (*irq_disable)(struct intel_engine_cs *engine);
|
||||
|
||||
int (*init_hw)(struct intel_engine_cs *engine);
|
||||
void (*reset_hw)(struct intel_engine_cs *engine,
|
||||
struct i915_request *rq);
|
||||
|
||||
struct {
|
||||
struct i915_request *(*prepare)(struct intel_engine_cs *engine);
|
||||
void (*reset)(struct intel_engine_cs *engine,
|
||||
struct i915_request *rq);
|
||||
void (*finish)(struct intel_engine_cs *engine);
|
||||
} reset;
|
||||
|
||||
void (*park)(struct intel_engine_cs *engine);
|
||||
void (*unpark)(struct intel_engine_cs *engine);
|
||||
|
|
Loading…
Reference in New Issue