drm/i915/selftests: Check preemption rollback of different ring queue depths
Like live_unlite_ring, but instead of simply looking at the impact of intel_ring_direction(), check that preemption more generally works with different depths of queued requests in the ring. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200616233733.18050-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
ba0cada976
commit
dfdfbd3823
|
@ -2758,6 +2758,168 @@ err_ce:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int __live_preempt_ring(struct intel_engine_cs *engine,
|
||||
struct igt_spinner *spin,
|
||||
int queue_sz, int ring_sz)
|
||||
{
|
||||
struct intel_context *ce[2] = {};
|
||||
struct i915_request *rq;
|
||||
struct igt_live_test t;
|
||||
int err = 0;
|
||||
int n;
|
||||
|
||||
if (igt_live_test_begin(&t, engine->i915, __func__, engine->name))
|
||||
return -EIO;
|
||||
|
||||
for (n = 0; n < ARRAY_SIZE(ce); n++) {
|
||||
struct intel_context *tmp;
|
||||
|
||||
tmp = intel_context_create(engine);
|
||||
if (IS_ERR(tmp)) {
|
||||
err = PTR_ERR(tmp);
|
||||
goto err_ce;
|
||||
}
|
||||
|
||||
tmp->ring = __intel_context_ring_size(ring_sz);
|
||||
|
||||
err = intel_context_pin(tmp);
|
||||
if (err) {
|
||||
intel_context_put(tmp);
|
||||
goto err_ce;
|
||||
}
|
||||
|
||||
memset32(tmp->ring->vaddr,
|
||||
0xdeadbeef, /* trigger a hang if executed */
|
||||
tmp->ring->vma->size / sizeof(u32));
|
||||
|
||||
ce[n] = tmp;
|
||||
}
|
||||
|
||||
rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto err_ce;
|
||||
}
|
||||
|
||||
i915_request_get(rq);
|
||||
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
|
||||
i915_request_add(rq);
|
||||
|
||||
if (!igt_wait_for_spinner(spin, rq)) {
|
||||
intel_gt_set_wedged(engine->gt);
|
||||
i915_request_put(rq);
|
||||
err = -ETIME;
|
||||
goto err_ce;
|
||||
}
|
||||
|
||||
/* Fill the ring, until we will cause a wrap */
|
||||
n = 0;
|
||||
while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) {
|
||||
struct i915_request *tmp;
|
||||
|
||||
tmp = intel_context_create_request(ce[0]);
|
||||
if (IS_ERR(tmp)) {
|
||||
err = PTR_ERR(tmp);
|
||||
i915_request_put(rq);
|
||||
goto err_ce;
|
||||
}
|
||||
|
||||
i915_request_add(tmp);
|
||||
intel_engine_flush_submission(engine);
|
||||
n++;
|
||||
}
|
||||
intel_engine_flush_submission(engine);
|
||||
pr_debug("%s: Filled %d with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
|
||||
engine->name, queue_sz, n,
|
||||
ce[0]->ring->size,
|
||||
ce[0]->ring->tail,
|
||||
ce[0]->ring->emit,
|
||||
rq->tail);
|
||||
i915_request_put(rq);
|
||||
|
||||
/* Create a second request to preempt the first ring */
|
||||
rq = intel_context_create_request(ce[1]);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto err_ce;
|
||||
}
|
||||
|
||||
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
|
||||
i915_request_get(rq);
|
||||
i915_request_add(rq);
|
||||
|
||||
err = wait_for_submit(engine, rq, HZ / 2);
|
||||
i915_request_put(rq);
|
||||
if (err) {
|
||||
pr_err("%s: preemption request was not submited\n",
|
||||
engine->name);
|
||||
err = -ETIME;
|
||||
}
|
||||
|
||||
pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
|
||||
engine->name,
|
||||
ce[0]->ring->tail, ce[0]->ring->emit,
|
||||
ce[1]->ring->tail, ce[1]->ring->emit);
|
||||
|
||||
err_ce:
|
||||
intel_engine_flush_submission(engine);
|
||||
igt_spinner_end(spin);
|
||||
for (n = 0; n < ARRAY_SIZE(ce); n++) {
|
||||
if (IS_ERR_OR_NULL(ce[n]))
|
||||
break;
|
||||
|
||||
intel_context_unpin(ce[n]);
|
||||
intel_context_put(ce[n]);
|
||||
}
|
||||
if (igt_live_test_end(&t))
|
||||
err = -EIO;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int live_preempt_ring(void *arg)
|
||||
{
|
||||
struct intel_gt *gt = arg;
|
||||
struct intel_engine_cs *engine;
|
||||
struct igt_spinner spin;
|
||||
enum intel_engine_id id;
|
||||
int err = 0;
|
||||
|
||||
/*
|
||||
* Check that we rollback large chunks of a ring in order to do a
|
||||
* preemption event. Similar to live_unlite_ring, but looking at
|
||||
* ring size rather than the impact of intel_ring_direction().
|
||||
*/
|
||||
|
||||
if (igt_spinner_init(&spin, gt))
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_engine(engine, gt, id) {
|
||||
int n;
|
||||
|
||||
if (!intel_engine_has_preemption(engine))
|
||||
continue;
|
||||
|
||||
if (!intel_engine_can_store_dword(engine))
|
||||
continue;
|
||||
|
||||
engine_heartbeat_disable(engine);
|
||||
|
||||
for (n = 0; n <= 3; n++) {
|
||||
err = __live_preempt_ring(engine, &spin,
|
||||
n * SZ_4K / 4, SZ_4K);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
||||
engine_heartbeat_enable(engine);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
||||
igt_spinner_fini(&spin);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int live_preempt_gang(void *arg)
|
||||
{
|
||||
struct intel_gt *gt = arg;
|
||||
|
@ -4540,6 +4702,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
|
|||
SUBTEST(live_preempt_cancel),
|
||||
SUBTEST(live_suppress_self_preempt),
|
||||
SUBTEST(live_chain_preempt),
|
||||
SUBTEST(live_preempt_ring),
|
||||
SUBTEST(live_preempt_gang),
|
||||
SUBTEST(live_preempt_timeout),
|
||||
SUBTEST(live_preempt_user),
|
||||
|
|
Loading…
Reference in New Issue