drm/i915/gt: Stage the transfer of the virtual breadcrumb

We move the virtual breadcrumb from one physical engine to the next, if
the next virtual request is scheduled on a new physical engine. Since
the virtual context can only be in one signal queue, we need it to track
the current physical engine for the new breadcrumbs. However, to move
the list we need both breadcrumb locks -- and since we cannot take both
at the same time (unless we are careful and always ensure consistent
ordering) stage the movement of the signaler via the current virtual
request.

Closes: https://gitlab.freedesktop.org/drm/intel/issues/1510
Fixes: 6d06779e86 ("drm/i915: Load balancing across a virtual engine")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200325130059.30600-1-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2020-03-25 13:00:59 +00:00
parent 92581f9fb9
commit 6c81e21a47
1 changed files with 15 additions and 5 deletions

View File

@ -1663,7 +1663,7 @@ static bool virtual_matches(const struct virtual_engine *ve,
} }
static void virtual_xfer_breadcrumbs(struct virtual_engine *ve, static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
struct intel_engine_cs *engine) struct i915_request *rq)
{ {
struct intel_engine_cs *old = ve->siblings[0]; struct intel_engine_cs *old = ve->siblings[0];
@ -1671,9 +1671,19 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
spin_lock(&old->breadcrumbs.irq_lock); spin_lock(&old->breadcrumbs.irq_lock);
if (!list_empty(&ve->context.signal_link)) { if (!list_empty(&ve->context.signal_link)) {
list_move_tail(&ve->context.signal_link, list_del_init(&ve->context.signal_link);
&engine->breadcrumbs.signalers);
intel_engine_signal_breadcrumbs(engine); /*
* We cannot acquire the new engine->breadcrumbs.irq_lock
* (as we are holding a breadcrumbs.irq_lock already),
* so attach this request to the signaler on submission.
* The queued irq_work will occur when we finally drop
* the engine->active.lock after dequeue.
*/
set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags);
/* Also transfer the pending irq_work for the old breadcrumb. */
intel_engine_signal_breadcrumbs(rq->engine);
} }
spin_unlock(&old->breadcrumbs.irq_lock); spin_unlock(&old->breadcrumbs.irq_lock);
} }
@ -2045,7 +2055,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
engine); engine);
if (!list_empty(&ve->context.signals)) if (!list_empty(&ve->context.signals))
virtual_xfer_breadcrumbs(ve, engine); virtual_xfer_breadcrumbs(ve, rq);
/* /*
* Move the bound engine to the top of the list * Move the bound engine to the top of the list