drm/i915/selftests: Extract spinner code
Pull out spinner code to a standalone file to enable it to be shortly used by other and new test cases. Plain code movement - no functional changes. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20181130080254.15383-1-tvrtko.ursulin@linux.intel.com
This commit is contained in:
parent
f545425a01
commit
8d2f6e2f27
|
@ -165,7 +165,8 @@ i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
|
|||
i915-$(CONFIG_DRM_I915_SELFTEST) += \
|
||||
selftests/i915_random.o \
|
||||
selftests/i915_selftest.o \
|
||||
selftests/igt_flush_test.o
|
||||
selftests/igt_flush_test.o \
|
||||
selftests/igt_spinner.o
|
||||
|
||||
# virtual gpu code
|
||||
i915-y += i915_vgpu.o
|
||||
|
|
|
@ -0,0 +1,199 @@
|
|||
/*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Copyright © 2018 Intel Corporation
|
||||
*/
|
||||
|
||||
#include "igt_spinner.h"
|
||||
|
||||
int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
|
||||
{
|
||||
unsigned int mode;
|
||||
void *vaddr;
|
||||
int err;
|
||||
|
||||
GEM_BUG_ON(INTEL_GEN(i915) < 8);
|
||||
|
||||
memset(spin, 0, sizeof(*spin));
|
||||
spin->i915 = i915;
|
||||
|
||||
spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
|
||||
if (IS_ERR(spin->hws)) {
|
||||
err = PTR_ERR(spin->hws);
|
||||
goto err;
|
||||
}
|
||||
|
||||
spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
|
||||
if (IS_ERR(spin->obj)) {
|
||||
err = PTR_ERR(spin->obj);
|
||||
goto err_hws;
|
||||
}
|
||||
|
||||
i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
|
||||
vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
|
||||
if (IS_ERR(vaddr)) {
|
||||
err = PTR_ERR(vaddr);
|
||||
goto err_obj;
|
||||
}
|
||||
spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
|
||||
|
||||
mode = i915_coherent_map_type(i915);
|
||||
vaddr = i915_gem_object_pin_map(spin->obj, mode);
|
||||
if (IS_ERR(vaddr)) {
|
||||
err = PTR_ERR(vaddr);
|
||||
goto err_unpin_hws;
|
||||
}
|
||||
spin->batch = vaddr;
|
||||
|
||||
return 0;
|
||||
|
||||
err_unpin_hws:
|
||||
i915_gem_object_unpin_map(spin->hws);
|
||||
err_obj:
|
||||
i915_gem_object_put(spin->obj);
|
||||
err_hws:
|
||||
i915_gem_object_put(spin->hws);
|
||||
err:
|
||||
return err;
|
||||
}
|
||||
|
||||
static unsigned int seqno_offset(u64 fence)
|
||||
{
|
||||
return offset_in_page(sizeof(u32) * fence);
|
||||
}
|
||||
|
||||
static u64 hws_address(const struct i915_vma *hws,
|
||||
const struct i915_request *rq)
|
||||
{
|
||||
return hws->node.start + seqno_offset(rq->fence.context);
|
||||
}
|
||||
|
||||
static int emit_recurse_batch(struct igt_spinner *spin,
|
||||
struct i915_request *rq,
|
||||
u32 arbitration_command)
|
||||
{
|
||||
struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
|
||||
struct i915_vma *hws, *vma;
|
||||
u32 *batch;
|
||||
int err;
|
||||
|
||||
vma = i915_vma_instance(spin->obj, vm, NULL);
|
||||
if (IS_ERR(vma))
|
||||
return PTR_ERR(vma);
|
||||
|
||||
hws = i915_vma_instance(spin->hws, vm, NULL);
|
||||
if (IS_ERR(hws))
|
||||
return PTR_ERR(hws);
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = i915_vma_pin(hws, 0, 0, PIN_USER);
|
||||
if (err)
|
||||
goto unpin_vma;
|
||||
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
if (err)
|
||||
goto unpin_hws;
|
||||
|
||||
if (!i915_gem_object_has_active_reference(vma->obj)) {
|
||||
i915_gem_object_get(vma->obj);
|
||||
i915_gem_object_set_active_reference(vma->obj);
|
||||
}
|
||||
|
||||
err = i915_vma_move_to_active(hws, rq, 0);
|
||||
if (err)
|
||||
goto unpin_hws;
|
||||
|
||||
if (!i915_gem_object_has_active_reference(hws->obj)) {
|
||||
i915_gem_object_get(hws->obj);
|
||||
i915_gem_object_set_active_reference(hws->obj);
|
||||
}
|
||||
|
||||
batch = spin->batch;
|
||||
|
||||
*batch++ = MI_STORE_DWORD_IMM_GEN4;
|
||||
*batch++ = lower_32_bits(hws_address(hws, rq));
|
||||
*batch++ = upper_32_bits(hws_address(hws, rq));
|
||||
*batch++ = rq->fence.seqno;
|
||||
|
||||
*batch++ = arbitration_command;
|
||||
|
||||
*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
|
||||
*batch++ = lower_32_bits(vma->node.start);
|
||||
*batch++ = upper_32_bits(vma->node.start);
|
||||
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
|
||||
|
||||
i915_gem_chipset_flush(spin->i915);
|
||||
|
||||
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
|
||||
|
||||
unpin_hws:
|
||||
i915_vma_unpin(hws);
|
||||
unpin_vma:
|
||||
i915_vma_unpin(vma);
|
||||
return err;
|
||||
}
|
||||
|
||||
struct i915_request *
|
||||
igt_spinner_create_request(struct igt_spinner *spin,
|
||||
struct i915_gem_context *ctx,
|
||||
struct intel_engine_cs *engine,
|
||||
u32 arbitration_command)
|
||||
{
|
||||
struct i915_request *rq;
|
||||
int err;
|
||||
|
||||
rq = i915_request_alloc(engine, ctx);
|
||||
if (IS_ERR(rq))
|
||||
return rq;
|
||||
|
||||
err = emit_recurse_batch(spin, rq, arbitration_command);
|
||||
if (err) {
|
||||
i915_request_add(rq);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
return rq;
|
||||
}
|
||||
|
||||
static u32
|
||||
hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
|
||||
{
|
||||
u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
|
||||
|
||||
return READ_ONCE(*seqno);
|
||||
}
|
||||
|
||||
void igt_spinner_end(struct igt_spinner *spin)
|
||||
{
|
||||
*spin->batch = MI_BATCH_BUFFER_END;
|
||||
i915_gem_chipset_flush(spin->i915);
|
||||
}
|
||||
|
||||
void igt_spinner_fini(struct igt_spinner *spin)
|
||||
{
|
||||
igt_spinner_end(spin);
|
||||
|
||||
i915_gem_object_unpin_map(spin->obj);
|
||||
i915_gem_object_put(spin->obj);
|
||||
|
||||
i915_gem_object_unpin_map(spin->hws);
|
||||
i915_gem_object_put(spin->hws);
|
||||
}
|
||||
|
||||
bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
|
||||
{
|
||||
if (!wait_event_timeout(rq->execute,
|
||||
READ_ONCE(rq->global_seqno),
|
||||
msecs_to_jiffies(10)))
|
||||
return false;
|
||||
|
||||
return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
|
||||
rq->fence.seqno),
|
||||
10) &&
|
||||
wait_for(i915_seqno_passed(hws_seqno(spin, rq),
|
||||
rq->fence.seqno),
|
||||
1000));
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Copyright © 2018 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __I915_SELFTESTS_IGT_SPINNER_H__
|
||||
#define __I915_SELFTESTS_IGT_SPINNER_H__
|
||||
|
||||
#include "../i915_selftest.h"
|
||||
|
||||
#include "../i915_drv.h"
|
||||
#include "../i915_request.h"
|
||||
#include "../intel_ringbuffer.h"
|
||||
#include "../i915_gem_context.h"
|
||||
|
||||
struct igt_spinner {
|
||||
struct drm_i915_private *i915;
|
||||
struct drm_i915_gem_object *hws;
|
||||
struct drm_i915_gem_object *obj;
|
||||
u32 *batch;
|
||||
void *seqno;
|
||||
};
|
||||
|
||||
int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915);
|
||||
void igt_spinner_fini(struct igt_spinner *spin);
|
||||
|
||||
struct i915_request *
|
||||
igt_spinner_create_request(struct igt_spinner *spin,
|
||||
struct i915_gem_context *ctx,
|
||||
struct intel_engine_cs *engine,
|
||||
u32 arbitration_command);
|
||||
void igt_spinner_end(struct igt_spinner *spin);
|
||||
|
||||
bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq);
|
||||
|
||||
#endif
|
|
@ -6,216 +6,18 @@
|
|||
|
||||
#include "../i915_selftest.h"
|
||||
#include "igt_flush_test.h"
|
||||
#include "igt_spinner.h"
|
||||
#include "i915_random.h"
|
||||
|
||||
#include "mock_context.h"
|
||||
|
||||
struct spinner {
|
||||
struct drm_i915_private *i915;
|
||||
struct drm_i915_gem_object *hws;
|
||||
struct drm_i915_gem_object *obj;
|
||||
u32 *batch;
|
||||
void *seqno;
|
||||
};
|
||||
|
||||
static int spinner_init(struct spinner *spin, struct drm_i915_private *i915)
|
||||
{
|
||||
unsigned int mode;
|
||||
void *vaddr;
|
||||
int err;
|
||||
|
||||
GEM_BUG_ON(INTEL_GEN(i915) < 8);
|
||||
|
||||
memset(spin, 0, sizeof(*spin));
|
||||
spin->i915 = i915;
|
||||
|
||||
spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
|
||||
if (IS_ERR(spin->hws)) {
|
||||
err = PTR_ERR(spin->hws);
|
||||
goto err;
|
||||
}
|
||||
|
||||
spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
|
||||
if (IS_ERR(spin->obj)) {
|
||||
err = PTR_ERR(spin->obj);
|
||||
goto err_hws;
|
||||
}
|
||||
|
||||
i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
|
||||
vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
|
||||
if (IS_ERR(vaddr)) {
|
||||
err = PTR_ERR(vaddr);
|
||||
goto err_obj;
|
||||
}
|
||||
spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
|
||||
|
||||
mode = i915_coherent_map_type(i915);
|
||||
vaddr = i915_gem_object_pin_map(spin->obj, mode);
|
||||
if (IS_ERR(vaddr)) {
|
||||
err = PTR_ERR(vaddr);
|
||||
goto err_unpin_hws;
|
||||
}
|
||||
spin->batch = vaddr;
|
||||
|
||||
return 0;
|
||||
|
||||
err_unpin_hws:
|
||||
i915_gem_object_unpin_map(spin->hws);
|
||||
err_obj:
|
||||
i915_gem_object_put(spin->obj);
|
||||
err_hws:
|
||||
i915_gem_object_put(spin->hws);
|
||||
err:
|
||||
return err;
|
||||
}
|
||||
|
||||
static unsigned int seqno_offset(u64 fence)
|
||||
{
|
||||
return offset_in_page(sizeof(u32) * fence);
|
||||
}
|
||||
|
||||
static u64 hws_address(const struct i915_vma *hws,
|
||||
const struct i915_request *rq)
|
||||
{
|
||||
return hws->node.start + seqno_offset(rq->fence.context);
|
||||
}
|
||||
|
||||
static int emit_recurse_batch(struct spinner *spin,
|
||||
struct i915_request *rq,
|
||||
u32 arbitration_command)
|
||||
{
|
||||
struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
|
||||
struct i915_vma *hws, *vma;
|
||||
u32 *batch;
|
||||
int err;
|
||||
|
||||
vma = i915_vma_instance(spin->obj, vm, NULL);
|
||||
if (IS_ERR(vma))
|
||||
return PTR_ERR(vma);
|
||||
|
||||
hws = i915_vma_instance(spin->hws, vm, NULL);
|
||||
if (IS_ERR(hws))
|
||||
return PTR_ERR(hws);
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = i915_vma_pin(hws, 0, 0, PIN_USER);
|
||||
if (err)
|
||||
goto unpin_vma;
|
||||
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
if (err)
|
||||
goto unpin_hws;
|
||||
|
||||
if (!i915_gem_object_has_active_reference(vma->obj)) {
|
||||
i915_gem_object_get(vma->obj);
|
||||
i915_gem_object_set_active_reference(vma->obj);
|
||||
}
|
||||
|
||||
err = i915_vma_move_to_active(hws, rq, 0);
|
||||
if (err)
|
||||
goto unpin_hws;
|
||||
|
||||
if (!i915_gem_object_has_active_reference(hws->obj)) {
|
||||
i915_gem_object_get(hws->obj);
|
||||
i915_gem_object_set_active_reference(hws->obj);
|
||||
}
|
||||
|
||||
batch = spin->batch;
|
||||
|
||||
*batch++ = MI_STORE_DWORD_IMM_GEN4;
|
||||
*batch++ = lower_32_bits(hws_address(hws, rq));
|
||||
*batch++ = upper_32_bits(hws_address(hws, rq));
|
||||
*batch++ = rq->fence.seqno;
|
||||
|
||||
*batch++ = arbitration_command;
|
||||
|
||||
*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
|
||||
*batch++ = lower_32_bits(vma->node.start);
|
||||
*batch++ = upper_32_bits(vma->node.start);
|
||||
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
|
||||
|
||||
i915_gem_chipset_flush(spin->i915);
|
||||
|
||||
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
|
||||
|
||||
unpin_hws:
|
||||
i915_vma_unpin(hws);
|
||||
unpin_vma:
|
||||
i915_vma_unpin(vma);
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct i915_request *
|
||||
spinner_create_request(struct spinner *spin,
|
||||
struct i915_gem_context *ctx,
|
||||
struct intel_engine_cs *engine,
|
||||
u32 arbitration_command)
|
||||
{
|
||||
struct i915_request *rq;
|
||||
int err;
|
||||
|
||||
rq = i915_request_alloc(engine, ctx);
|
||||
if (IS_ERR(rq))
|
||||
return rq;
|
||||
|
||||
err = emit_recurse_batch(spin, rq, arbitration_command);
|
||||
if (err) {
|
||||
i915_request_add(rq);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
return rq;
|
||||
}
|
||||
|
||||
static u32 hws_seqno(const struct spinner *spin, const struct i915_request *rq)
|
||||
{
|
||||
u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
|
||||
|
||||
return READ_ONCE(*seqno);
|
||||
}
|
||||
|
||||
static void spinner_end(struct spinner *spin)
|
||||
{
|
||||
*spin->batch = MI_BATCH_BUFFER_END;
|
||||
i915_gem_chipset_flush(spin->i915);
|
||||
}
|
||||
|
||||
static void spinner_fini(struct spinner *spin)
|
||||
{
|
||||
spinner_end(spin);
|
||||
|
||||
i915_gem_object_unpin_map(spin->obj);
|
||||
i915_gem_object_put(spin->obj);
|
||||
|
||||
i915_gem_object_unpin_map(spin->hws);
|
||||
i915_gem_object_put(spin->hws);
|
||||
}
|
||||
|
||||
static bool wait_for_spinner(struct spinner *spin, struct i915_request *rq)
|
||||
{
|
||||
if (!wait_event_timeout(rq->execute,
|
||||
READ_ONCE(rq->global_seqno),
|
||||
msecs_to_jiffies(10)))
|
||||
return false;
|
||||
|
||||
return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
|
||||
rq->fence.seqno),
|
||||
10) &&
|
||||
wait_for(i915_seqno_passed(hws_seqno(spin, rq),
|
||||
rq->fence.seqno),
|
||||
1000));
|
||||
}
|
||||
|
||||
static int live_sanitycheck(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct intel_engine_cs *engine;
|
||||
struct i915_gem_context *ctx;
|
||||
enum intel_engine_id id;
|
||||
struct spinner spin;
|
||||
struct igt_spinner spin;
|
||||
int err = -ENOMEM;
|
||||
|
||||
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
|
||||
|
@ -224,7 +26,7 @@ static int live_sanitycheck(void *arg)
|
|||
mutex_lock(&i915->drm.struct_mutex);
|
||||
intel_runtime_pm_get(i915);
|
||||
|
||||
if (spinner_init(&spin, i915))
|
||||
if (igt_spinner_init(&spin, i915))
|
||||
goto err_unlock;
|
||||
|
||||
ctx = kernel_context(i915);
|
||||
|
@ -234,14 +36,14 @@ static int live_sanitycheck(void *arg)
|
|||
for_each_engine(engine, i915, id) {
|
||||
struct i915_request *rq;
|
||||
|
||||
rq = spinner_create_request(&spin, ctx, engine, MI_NOOP);
|
||||
rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto err_ctx;
|
||||
}
|
||||
|
||||
i915_request_add(rq);
|
||||
if (!wait_for_spinner(&spin, rq)) {
|
||||
if (!igt_wait_for_spinner(&spin, rq)) {
|
||||
GEM_TRACE("spinner failed to start\n");
|
||||
GEM_TRACE_DUMP();
|
||||
i915_gem_set_wedged(i915);
|
||||
|
@ -249,7 +51,7 @@ static int live_sanitycheck(void *arg)
|
|||
goto err_ctx;
|
||||
}
|
||||
|
||||
spinner_end(&spin);
|
||||
igt_spinner_end(&spin);
|
||||
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
|
||||
err = -EIO;
|
||||
goto err_ctx;
|
||||
|
@ -260,7 +62,7 @@ static int live_sanitycheck(void *arg)
|
|||
err_ctx:
|
||||
kernel_context_close(ctx);
|
||||
err_spin:
|
||||
spinner_fini(&spin);
|
||||
igt_spinner_fini(&spin);
|
||||
err_unlock:
|
||||
igt_flush_test(i915, I915_WAIT_LOCKED);
|
||||
intel_runtime_pm_put(i915);
|
||||
|
@ -272,7 +74,7 @@ static int live_preempt(void *arg)
|
|||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct i915_gem_context *ctx_hi, *ctx_lo;
|
||||
struct spinner spin_hi, spin_lo;
|
||||
struct igt_spinner spin_hi, spin_lo;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
int err = -ENOMEM;
|
||||
|
@ -283,10 +85,10 @@ static int live_preempt(void *arg)
|
|||
mutex_lock(&i915->drm.struct_mutex);
|
||||
intel_runtime_pm_get(i915);
|
||||
|
||||
if (spinner_init(&spin_hi, i915))
|
||||
if (igt_spinner_init(&spin_hi, i915))
|
||||
goto err_unlock;
|
||||
|
||||
if (spinner_init(&spin_lo, i915))
|
||||
if (igt_spinner_init(&spin_lo, i915))
|
||||
goto err_spin_hi;
|
||||
|
||||
ctx_hi = kernel_context(i915);
|
||||
|
@ -304,15 +106,15 @@ static int live_preempt(void *arg)
|
|||
for_each_engine(engine, i915, id) {
|
||||
struct i915_request *rq;
|
||||
|
||||
rq = spinner_create_request(&spin_lo, ctx_lo, engine,
|
||||
MI_ARB_CHECK);
|
||||
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
|
||||
MI_ARB_CHECK);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto err_ctx_lo;
|
||||
}
|
||||
|
||||
i915_request_add(rq);
|
||||
if (!wait_for_spinner(&spin_lo, rq)) {
|
||||
if (!igt_wait_for_spinner(&spin_lo, rq)) {
|
||||
GEM_TRACE("lo spinner failed to start\n");
|
||||
GEM_TRACE_DUMP();
|
||||
i915_gem_set_wedged(i915);
|
||||
|
@ -320,16 +122,16 @@ static int live_preempt(void *arg)
|
|||
goto err_ctx_lo;
|
||||
}
|
||||
|
||||
rq = spinner_create_request(&spin_hi, ctx_hi, engine,
|
||||
MI_ARB_CHECK);
|
||||
rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
|
||||
MI_ARB_CHECK);
|
||||
if (IS_ERR(rq)) {
|
||||
spinner_end(&spin_lo);
|
||||
igt_spinner_end(&spin_lo);
|
||||
err = PTR_ERR(rq);
|
||||
goto err_ctx_lo;
|
||||
}
|
||||
|
||||
i915_request_add(rq);
|
||||
if (!wait_for_spinner(&spin_hi, rq)) {
|
||||
if (!igt_wait_for_spinner(&spin_hi, rq)) {
|
||||
GEM_TRACE("hi spinner failed to start\n");
|
||||
GEM_TRACE_DUMP();
|
||||
i915_gem_set_wedged(i915);
|
||||
|
@ -337,8 +139,8 @@ static int live_preempt(void *arg)
|
|||
goto err_ctx_lo;
|
||||
}
|
||||
|
||||
spinner_end(&spin_hi);
|
||||
spinner_end(&spin_lo);
|
||||
igt_spinner_end(&spin_hi);
|
||||
igt_spinner_end(&spin_lo);
|
||||
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
|
||||
err = -EIO;
|
||||
goto err_ctx_lo;
|
||||
|
@ -351,9 +153,9 @@ err_ctx_lo:
|
|||
err_ctx_hi:
|
||||
kernel_context_close(ctx_hi);
|
||||
err_spin_lo:
|
||||
spinner_fini(&spin_lo);
|
||||
igt_spinner_fini(&spin_lo);
|
||||
err_spin_hi:
|
||||
spinner_fini(&spin_hi);
|
||||
igt_spinner_fini(&spin_hi);
|
||||
err_unlock:
|
||||
igt_flush_test(i915, I915_WAIT_LOCKED);
|
||||
intel_runtime_pm_put(i915);
|
||||
|
@ -365,7 +167,7 @@ static int live_late_preempt(void *arg)
|
|||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct i915_gem_context *ctx_hi, *ctx_lo;
|
||||
struct spinner spin_hi, spin_lo;
|
||||
struct igt_spinner spin_hi, spin_lo;
|
||||
struct intel_engine_cs *engine;
|
||||
struct i915_sched_attr attr = {};
|
||||
enum intel_engine_id id;
|
||||
|
@ -377,10 +179,10 @@ static int live_late_preempt(void *arg)
|
|||
mutex_lock(&i915->drm.struct_mutex);
|
||||
intel_runtime_pm_get(i915);
|
||||
|
||||
if (spinner_init(&spin_hi, i915))
|
||||
if (igt_spinner_init(&spin_hi, i915))
|
||||
goto err_unlock;
|
||||
|
||||
if (spinner_init(&spin_lo, i915))
|
||||
if (igt_spinner_init(&spin_lo, i915))
|
||||
goto err_spin_hi;
|
||||
|
||||
ctx_hi = kernel_context(i915);
|
||||
|
@ -394,28 +196,29 @@ static int live_late_preempt(void *arg)
|
|||
for_each_engine(engine, i915, id) {
|
||||
struct i915_request *rq;
|
||||
|
||||
rq = spinner_create_request(&spin_lo, ctx_lo, engine,
|
||||
MI_ARB_CHECK);
|
||||
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
|
||||
MI_ARB_CHECK);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto err_ctx_lo;
|
||||
}
|
||||
|
||||
i915_request_add(rq);
|
||||
if (!wait_for_spinner(&spin_lo, rq)) {
|
||||
if (!igt_wait_for_spinner(&spin_lo, rq)) {
|
||||
pr_err("First context failed to start\n");
|
||||
goto err_wedged;
|
||||
}
|
||||
|
||||
rq = spinner_create_request(&spin_hi, ctx_hi, engine, MI_NOOP);
|
||||
rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
|
||||
MI_NOOP);
|
||||
if (IS_ERR(rq)) {
|
||||
spinner_end(&spin_lo);
|
||||
igt_spinner_end(&spin_lo);
|
||||
err = PTR_ERR(rq);
|
||||
goto err_ctx_lo;
|
||||
}
|
||||
|
||||
i915_request_add(rq);
|
||||
if (wait_for_spinner(&spin_hi, rq)) {
|
||||
if (igt_wait_for_spinner(&spin_hi, rq)) {
|
||||
pr_err("Second context overtook first?\n");
|
||||
goto err_wedged;
|
||||
}
|
||||
|
@ -423,14 +226,14 @@ static int live_late_preempt(void *arg)
|
|||
attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
|
||||
engine->schedule(rq, &attr);
|
||||
|
||||
if (!wait_for_spinner(&spin_hi, rq)) {
|
||||
if (!igt_wait_for_spinner(&spin_hi, rq)) {
|
||||
pr_err("High priority context failed to preempt the low priority context\n");
|
||||
GEM_TRACE_DUMP();
|
||||
goto err_wedged;
|
||||
}
|
||||
|
||||
spinner_end(&spin_hi);
|
||||
spinner_end(&spin_lo);
|
||||
igt_spinner_end(&spin_hi);
|
||||
igt_spinner_end(&spin_lo);
|
||||
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
|
||||
err = -EIO;
|
||||
goto err_ctx_lo;
|
||||
|
@ -443,9 +246,9 @@ err_ctx_lo:
|
|||
err_ctx_hi:
|
||||
kernel_context_close(ctx_hi);
|
||||
err_spin_lo:
|
||||
spinner_fini(&spin_lo);
|
||||
igt_spinner_fini(&spin_lo);
|
||||
err_spin_hi:
|
||||
spinner_fini(&spin_hi);
|
||||
igt_spinner_fini(&spin_hi);
|
||||
err_unlock:
|
||||
igt_flush_test(i915, I915_WAIT_LOCKED);
|
||||
intel_runtime_pm_put(i915);
|
||||
|
@ -453,8 +256,8 @@ err_unlock:
|
|||
return err;
|
||||
|
||||
err_wedged:
|
||||
spinner_end(&spin_hi);
|
||||
spinner_end(&spin_lo);
|
||||
igt_spinner_end(&spin_hi);
|
||||
igt_spinner_end(&spin_lo);
|
||||
i915_gem_set_wedged(i915);
|
||||
err = -EIO;
|
||||
goto err_ctx_lo;
|
||||
|
@ -464,7 +267,7 @@ static int live_preempt_hang(void *arg)
|
|||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct i915_gem_context *ctx_hi, *ctx_lo;
|
||||
struct spinner spin_hi, spin_lo;
|
||||
struct igt_spinner spin_hi, spin_lo;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
int err = -ENOMEM;
|
||||
|
@ -478,10 +281,10 @@ static int live_preempt_hang(void *arg)
|
|||
mutex_lock(&i915->drm.struct_mutex);
|
||||
intel_runtime_pm_get(i915);
|
||||
|
||||
if (spinner_init(&spin_hi, i915))
|
||||
if (igt_spinner_init(&spin_hi, i915))
|
||||
goto err_unlock;
|
||||
|
||||
if (spinner_init(&spin_lo, i915))
|
||||
if (igt_spinner_init(&spin_lo, i915))
|
||||
goto err_spin_hi;
|
||||
|
||||
ctx_hi = kernel_context(i915);
|
||||
|
@ -500,15 +303,15 @@ static int live_preempt_hang(void *arg)
|
|||
if (!intel_engine_has_preemption(engine))
|
||||
continue;
|
||||
|
||||
rq = spinner_create_request(&spin_lo, ctx_lo, engine,
|
||||
MI_ARB_CHECK);
|
||||
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
|
||||
MI_ARB_CHECK);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto err_ctx_lo;
|
||||
}
|
||||
|
||||
i915_request_add(rq);
|
||||
if (!wait_for_spinner(&spin_lo, rq)) {
|
||||
if (!igt_wait_for_spinner(&spin_lo, rq)) {
|
||||
GEM_TRACE("lo spinner failed to start\n");
|
||||
GEM_TRACE_DUMP();
|
||||
i915_gem_set_wedged(i915);
|
||||
|
@ -516,10 +319,10 @@ static int live_preempt_hang(void *arg)
|
|||
goto err_ctx_lo;
|
||||
}
|
||||
|
||||
rq = spinner_create_request(&spin_hi, ctx_hi, engine,
|
||||
MI_ARB_CHECK);
|
||||
rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
|
||||
MI_ARB_CHECK);
|
||||
if (IS_ERR(rq)) {
|
||||
spinner_end(&spin_lo);
|
||||
igt_spinner_end(&spin_lo);
|
||||
err = PTR_ERR(rq);
|
||||
goto err_ctx_lo;
|
||||
}
|
||||
|
@ -544,7 +347,7 @@ static int live_preempt_hang(void *arg)
|
|||
|
||||
engine->execlists.preempt_hang.inject_hang = false;
|
||||
|
||||
if (!wait_for_spinner(&spin_hi, rq)) {
|
||||
if (!igt_wait_for_spinner(&spin_hi, rq)) {
|
||||
GEM_TRACE("hi spinner failed to start\n");
|
||||
GEM_TRACE_DUMP();
|
||||
i915_gem_set_wedged(i915);
|
||||
|
@ -552,8 +355,8 @@ static int live_preempt_hang(void *arg)
|
|||
goto err_ctx_lo;
|
||||
}
|
||||
|
||||
spinner_end(&spin_hi);
|
||||
spinner_end(&spin_lo);
|
||||
igt_spinner_end(&spin_hi);
|
||||
igt_spinner_end(&spin_lo);
|
||||
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
|
||||
err = -EIO;
|
||||
goto err_ctx_lo;
|
||||
|
@ -566,9 +369,9 @@ err_ctx_lo:
|
|||
err_ctx_hi:
|
||||
kernel_context_close(ctx_hi);
|
||||
err_spin_lo:
|
||||
spinner_fini(&spin_lo);
|
||||
igt_spinner_fini(&spin_lo);
|
||||
err_spin_hi:
|
||||
spinner_fini(&spin_hi);
|
||||
igt_spinner_fini(&spin_hi);
|
||||
err_unlock:
|
||||
igt_flush_test(i915, I915_WAIT_LOCKED);
|
||||
intel_runtime_pm_put(i915);
|
||||
|
|
Loading…
Reference in New Issue