drm/i915: Split execlists/guc reset preparations

In the next patch, we will make the execlists reset prepare callback
take into account preemption by flushing the context-switch handler.
This is not applicable to the GuC submission backend, so split the two
into their own backend callbacks.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michał Winiarski <michal.winiarski@intel.com>
CC: Michel Thierry <michel.thierry@intel.com>
Cc: Jeff McGee <jeff.mcgee@intel.com>
Reviewed-by: Jeff McGee <jeff.mcgee@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180516183355.10553-5-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2018-05-16 19:33:52 +01:00
parent 5adfb772f8
commit 1329115c6c
2 changed files with 36 additions and 10 deletions

View File

@ -815,6 +815,37 @@ static void guc_submission_tasklet(unsigned long data)
guc_dequeue(engine); guc_dequeue(engine);
} }
static struct i915_request *
guc_reset_prepare(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
GEM_TRACE("%s\n", engine->name);
/*
* Prevent request submission to the hardware until we have
* completed the reset in i915_gem_reset_finish(). If a request
* is completed by one engine, it may then queue a request
* to a second via its execlists->tasklet *just* as we are
* calling engine->init_hw() and also writing the ELSP.
* Turning off the execlists->tasklet until the reset is over
* prevents the race.
*/
__tasklet_disable_sync_once(&execlists->tasklet);
/*
* We're using worker to queue preemption requests from the tasklet in
* GuC submission mode.
* Even though tasklet was disabled, we may still have a worker queued.
* Let's make sure that all workers scheduled before disabling the
* tasklet are completed before continuing with the reset.
*/
if (engine->i915->guc.preempt_wq)
flush_workqueue(engine->i915->guc.preempt_wq);
return i915_gem_find_active_request(engine);
}
/* /*
* Everything below here is concerned with setup & teardown, and is * Everything below here is concerned with setup & teardown, and is
* therefore not part of the somewhat time-critical batch-submission * therefore not part of the somewhat time-critical batch-submission
@ -1275,6 +1306,9 @@ int intel_guc_submission_enable(struct intel_guc *guc)
&engine->execlists; &engine->execlists;
execlists->tasklet.func = guc_submission_tasklet; execlists->tasklet.func = guc_submission_tasklet;
engine->reset.prepare = guc_reset_prepare;
engine->park = guc_submission_park; engine->park = guc_submission_park;
engine->unpark = guc_submission_unpark; engine->unpark = guc_submission_unpark;

View File

@ -1845,16 +1845,6 @@ execlists_reset_prepare(struct intel_engine_cs *engine)
*/ */
__tasklet_disable_sync_once(&execlists->tasklet); __tasklet_disable_sync_once(&execlists->tasklet);
/*
* We're using worker to queue preemption requests from the tasklet in
* GuC submission mode.
* Even though tasklet was disabled, we may still have a worker queued.
* Let's make sure that all workers scheduled before disabling the
* tasklet are completed before continuing with the reset.
*/
if (engine->i915->guc.preempt_wq)
flush_workqueue(engine->i915->guc.preempt_wq);
return i915_gem_find_active_request(engine); return i915_gem_find_active_request(engine);
} }
@ -2256,6 +2246,8 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
engine->schedule = execlists_schedule; engine->schedule = execlists_schedule;
engine->execlists.tasklet.func = execlists_submission_tasklet; engine->execlists.tasklet.func = execlists_submission_tasklet;
engine->reset.prepare = execlists_reset_prepare;
engine->park = NULL; engine->park = NULL;
engine->unpark = NULL; engine->unpark = NULL;