drm/i915/execlists: Make submission tasklet hardirq safe

Prepare to allow the execlists submission to be run from underneath a
hardirq timer context (and not just the current softirq context) as is
required for fast preemption resets and context switches.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180508210318.10274-1-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2018-05-08 22:03:17 +01:00
parent b9777c6f86
commit 4413c474b1
1 changed files with 29 additions and 13 deletions

View File

@ -356,10 +356,13 @@ execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
{ {
struct intel_engine_cs *engine = struct intel_engine_cs *engine =
container_of(execlists, typeof(*engine), execlists); container_of(execlists, typeof(*engine), execlists);
unsigned long flags;
spin_lock_irqsave(&engine->timeline.lock, flags);
spin_lock_irq(&engine->timeline.lock);
__unwind_incomplete_requests(engine); __unwind_incomplete_requests(engine);
spin_unlock_irq(&engine->timeline.lock);
spin_unlock_irqrestore(&engine->timeline.lock, flags);
} }
static inline void static inline void
@ -553,7 +556,7 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
execlists_set_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT); execlists_set_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT);
} }
static void execlists_dequeue(struct intel_engine_cs *engine) static bool __execlists_dequeue(struct intel_engine_cs *engine)
{ {
struct intel_engine_execlists * const execlists = &engine->execlists; struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port; struct execlist_port *port = execlists->port;
@ -563,6 +566,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
struct rb_node *rb; struct rb_node *rb;
bool submit = false; bool submit = false;
lockdep_assert_held(&engine->timeline.lock);
/* Hardware submission is through 2 ports. Conceptually each port /* Hardware submission is through 2 ports. Conceptually each port
* has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
* static for a context, and unique to each, so we only execute * static for a context, and unique to each, so we only execute
@ -584,7 +589,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* and context switches) submission. * and context switches) submission.
*/ */
spin_lock_irq(&engine->timeline.lock);
rb = execlists->first; rb = execlists->first;
GEM_BUG_ON(rb_first(&execlists->queue) != rb); GEM_BUG_ON(rb_first(&execlists->queue) != rb);
@ -599,7 +603,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
EXECLISTS_ACTIVE_USER)); EXECLISTS_ACTIVE_USER));
GEM_BUG_ON(!port_count(&port[0])); GEM_BUG_ON(!port_count(&port[0]));
if (port_count(&port[0]) > 1) if (port_count(&port[0]) > 1)
goto unlock; return false;
/* /*
* If we write to ELSP a second time before the HW has had * If we write to ELSP a second time before the HW has had
@ -609,11 +613,11 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* the HW to indicate that it has had a chance to respond. * the HW to indicate that it has had a chance to respond.
*/ */
if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK)) if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
goto unlock; return false;
if (need_preempt(engine, last, execlists->queue_priority)) { if (need_preempt(engine, last, execlists->queue_priority)) {
inject_preempt_context(engine); inject_preempt_context(engine);
goto unlock; return false;
} }
/* /*
@ -638,7 +642,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* priorities of the ports haven't been switch. * priorities of the ports haven't been switch.
*/ */
if (port_count(&port[1])) if (port_count(&port[1]))
goto unlock; return false;
/* /*
* WaIdleLiteRestore:bdw,skl * WaIdleLiteRestore:bdw,skl
@ -743,13 +747,25 @@ done:
/* We must always keep the beast fed if we have work piled up */ /* We must always keep the beast fed if we have work piled up */
GEM_BUG_ON(execlists->first && !port_isset(execlists->port)); GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
unlock: /* Re-evaluate the executing context setup after each preemptive kick */
spin_unlock_irq(&engine->timeline.lock); if (last)
if (submit) {
execlists_user_begin(execlists, execlists->port); execlists_user_begin(execlists, execlists->port);
return submit;
}
static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
unsigned long flags;
bool submit;
spin_lock_irqsave(&engine->timeline.lock, flags);
submit = __execlists_dequeue(engine);
spin_unlock_irqrestore(&engine->timeline.lock, flags);
if (submit)
execlists_submit_ports(engine); execlists_submit_ports(engine);
}
GEM_BUG_ON(port_isset(execlists->port) && GEM_BUG_ON(port_isset(execlists->port) &&
!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER)); !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));