drm/i915: Track i915_active using debugobjects
Provide runtime asserts and tracking of i915_active via debugobjects. For example, this should allow us to check that the i915_active is only active when we expect it to be and is never freed too early. One consequence is that, for simplicity, we no longer allow i915_active to be on-stack which only affected the selftests. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190621183801.23252-2-chris@chris-wilson.co.uk
This commit is contained in:
parent
9e9539800d
commit
5361db1a33
|
@ -4,6 +4,8 @@
|
|||
* Copyright © 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <linux/debugobjects.h>
|
||||
|
||||
#include "gt/intel_engine_pm.h"
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
@ -31,6 +33,55 @@ struct active_node {
|
|||
u64 timeline;
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
|
||||
|
||||
static void *active_debug_hint(void *addr)
|
||||
{
|
||||
struct i915_active *ref = addr;
|
||||
|
||||
return (void *)ref->retire ?: (void *)ref;
|
||||
}
|
||||
|
||||
static struct debug_obj_descr active_debug_desc = {
|
||||
.name = "i915_active",
|
||||
.debug_hint = active_debug_hint,
|
||||
};
|
||||
|
||||
static void debug_active_init(struct i915_active *ref)
|
||||
{
|
||||
debug_object_init(ref, &active_debug_desc);
|
||||
}
|
||||
|
||||
static void debug_active_activate(struct i915_active *ref)
|
||||
{
|
||||
debug_object_activate(ref, &active_debug_desc);
|
||||
}
|
||||
|
||||
static void debug_active_deactivate(struct i915_active *ref)
|
||||
{
|
||||
debug_object_deactivate(ref, &active_debug_desc);
|
||||
}
|
||||
|
||||
static void debug_active_fini(struct i915_active *ref)
|
||||
{
|
||||
debug_object_free(ref, &active_debug_desc);
|
||||
}
|
||||
|
||||
static void debug_active_assert(struct i915_active *ref)
|
||||
{
|
||||
debug_object_assert_init(ref, &active_debug_desc);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline void debug_active_init(struct i915_active *ref) { }
|
||||
static inline void debug_active_activate(struct i915_active *ref) { }
|
||||
static inline void debug_active_deactivate(struct i915_active *ref) { }
|
||||
static inline void debug_active_fini(struct i915_active *ref) { }
|
||||
static inline void debug_active_assert(struct i915_active *ref) { }
|
||||
|
||||
#endif
|
||||
|
||||
static void
|
||||
__active_park(struct i915_active *ref)
|
||||
{
|
||||
|
@ -50,6 +101,8 @@ __active_retire(struct i915_active *ref)
|
|||
if (--ref->count)
|
||||
return;
|
||||
|
||||
debug_active_deactivate(ref);
|
||||
|
||||
/* return the unused nodes to our slabcache */
|
||||
__active_park(ref);
|
||||
|
||||
|
@ -155,6 +208,8 @@ void i915_active_init(struct drm_i915_private *i915,
|
|||
struct i915_active *ref,
|
||||
void (*retire)(struct i915_active *ref))
|
||||
{
|
||||
debug_active_init(ref);
|
||||
|
||||
ref->i915 = i915;
|
||||
ref->retire = retire;
|
||||
ref->tree = RB_ROOT;
|
||||
|
@ -191,13 +246,21 @@ out:
|
|||
|
||||
bool i915_active_acquire(struct i915_active *ref)
|
||||
{
|
||||
debug_active_assert(ref);
|
||||
lockdep_assert_held(BKL(ref));
|
||||
return !ref->count++;
|
||||
|
||||
if (ref->count++)
|
||||
return false;
|
||||
|
||||
debug_active_activate(ref);
|
||||
return true;
|
||||
}
|
||||
|
||||
void i915_active_release(struct i915_active *ref)
|
||||
{
|
||||
debug_active_assert(ref);
|
||||
lockdep_assert_held(BKL(ref));
|
||||
|
||||
__active_retire(ref);
|
||||
}
|
||||
|
||||
|
@ -260,6 +323,7 @@ out:
|
|||
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
|
||||
void i915_active_fini(struct i915_active *ref)
|
||||
{
|
||||
debug_active_fini(ref);
|
||||
GEM_BUG_ON(i915_active_request_isset(&ref->last));
|
||||
GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
|
||||
GEM_BUG_ON(ref->count);
|
||||
|
|
|
@ -16,28 +16,51 @@ struct live_active {
|
|||
bool retired;
|
||||
};
|
||||
|
||||
static void __live_active_retire(struct i915_active *base)
|
||||
static void __live_free(struct live_active *active)
|
||||
{
|
||||
i915_active_fini(&active->base);
|
||||
kfree(active);
|
||||
}
|
||||
|
||||
static void __live_retire(struct i915_active *base)
|
||||
{
|
||||
struct live_active *active = container_of(base, typeof(*active), base);
|
||||
|
||||
active->retired = true;
|
||||
}
|
||||
|
||||
static int __live_active_setup(struct drm_i915_private *i915,
|
||||
struct live_active *active)
|
||||
static struct live_active *__live_alloc(struct drm_i915_private *i915)
|
||||
{
|
||||
struct live_active *active;
|
||||
|
||||
active = kzalloc(sizeof(*active), GFP_KERNEL);
|
||||
if (!active)
|
||||
return NULL;
|
||||
|
||||
i915_active_init(i915, &active->base, __live_retire);
|
||||
|
||||
return active;
|
||||
}
|
||||
|
||||
static struct live_active *
|
||||
__live_active_setup(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
struct i915_sw_fence *submit;
|
||||
struct live_active *active;
|
||||
enum intel_engine_id id;
|
||||
unsigned int count = 0;
|
||||
int err = 0;
|
||||
|
||||
submit = heap_fence_create(GFP_KERNEL);
|
||||
if (!submit)
|
||||
return -ENOMEM;
|
||||
active = __live_alloc(i915);
|
||||
if (!active)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
i915_active_init(i915, &active->base, __live_active_retire);
|
||||
active->retired = false;
|
||||
submit = heap_fence_create(GFP_KERNEL);
|
||||
if (!submit) {
|
||||
kfree(active);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
if (!i915_active_acquire(&active->base)) {
|
||||
pr_err("First i915_active_acquire should report being idle\n");
|
||||
|
@ -84,64 +107,79 @@ out:
|
|||
i915_sw_fence_commit(submit);
|
||||
heap_fence_put(submit);
|
||||
|
||||
return err;
|
||||
/* XXX leaks live_active on error */
|
||||
return err ? ERR_PTR(err) : active;
|
||||
}
|
||||
|
||||
static int live_active_wait(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct live_active active;
|
||||
struct live_active *active;
|
||||
intel_wakeref_t wakeref;
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
/* Check that we get a callback when requests retire upon waiting */
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
err = __live_active_setup(i915, &active);
|
||||
active = __live_active_setup(i915);
|
||||
if (IS_ERR(active)) {
|
||||
err = PTR_ERR(active);
|
||||
goto err;
|
||||
}
|
||||
|
||||
i915_active_wait(&active.base);
|
||||
if (!active.retired) {
|
||||
i915_active_wait(&active->base);
|
||||
if (!active->retired) {
|
||||
pr_err("i915_active not retired after waiting!\n");
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
i915_active_fini(&active.base);
|
||||
__live_free(active);
|
||||
|
||||
if (igt_flush_test(i915, I915_WAIT_LOCKED))
|
||||
err = -EIO;
|
||||
|
||||
err:
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int live_active_retire(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct live_active active;
|
||||
struct live_active *active;
|
||||
intel_wakeref_t wakeref;
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
/* Check that we get a callback when requests are indirectly retired */
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
err = __live_active_setup(i915, &active);
|
||||
active = __live_active_setup(i915);
|
||||
if (IS_ERR(active)) {
|
||||
err = PTR_ERR(active);
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* waits for & retires all requests */
|
||||
if (igt_flush_test(i915, I915_WAIT_LOCKED))
|
||||
err = -EIO;
|
||||
|
||||
if (!active.retired) {
|
||||
if (!active->retired) {
|
||||
pr_err("i915_active not retired after flushing!\n");
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
i915_active_fini(&active.base);
|
||||
__live_free(active);
|
||||
|
||||
err:
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue