drm/i915/debug: Convert i915_verify_active() to scan all lists
... and check more regularly. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
parent
891b48cfc8
commit
23bc598253
|
@ -77,7 +77,7 @@ enum plane {
|
|||
#define WATCH_COHERENCY 0
|
||||
#define WATCH_EXEC 0
|
||||
#define WATCH_RELOC 0
|
||||
#define WATCH_INACTIVE 0
|
||||
#define WATCH_LISTS 0
|
||||
#define WATCH_PWRITE 0
|
||||
|
||||
#define I915_GEM_PHYS_CURSOR_0 1
|
||||
|
@ -1079,10 +1079,10 @@ bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
|
|||
/* i915_gem_debug.c */
|
||||
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
|
||||
const char *where, uint32_t mark);
|
||||
#if WATCH_INACTIVE
|
||||
void i915_verify_inactive(struct drm_device *dev, char *file, int line);
|
||||
#if WATCH_LISTS
|
||||
int i915_verify_lists(struct drm_device *dev);
|
||||
#else
|
||||
#define i915_verify_inactive(dev, file, line)
|
||||
#define i915_verify_lists(dev) 0
|
||||
#endif
|
||||
void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
|
||||
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
|
||||
|
|
|
@ -109,6 +109,7 @@ static int i915_mutex_lock_interruptible(struct drm_device *dev)
|
|||
return -EAGAIN;
|
||||
}
|
||||
|
||||
WARN_ON(i915_verify_lists(dev));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1612,7 +1613,6 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
|
|||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
||||
|
||||
i915_verify_inactive(dev, __FILE__, __LINE__);
|
||||
if (obj_priv->pin_count != 0)
|
||||
list_move_tail(&obj_priv->list, &dev_priv->mm.pinned_list);
|
||||
else
|
||||
|
@ -1626,7 +1626,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
|
|||
obj_priv->active = 0;
|
||||
drm_gem_object_unreference(obj);
|
||||
}
|
||||
i915_verify_inactive(dev, __FILE__, __LINE__);
|
||||
WARN_ON(i915_verify_lists(dev));
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1821,6 +1821,8 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
|
|||
list_empty(&ring->request_list))
|
||||
return;
|
||||
|
||||
WARN_ON(i915_verify_lists(dev));
|
||||
|
||||
seqno = ring->get_seqno(dev, ring);
|
||||
while (!list_empty(&ring->request_list)) {
|
||||
struct drm_i915_gem_request *request;
|
||||
|
@ -1865,6 +1867,8 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
|
|||
ring->user_irq_put(dev, ring);
|
||||
dev_priv->trace_irq_seqno = 0;
|
||||
}
|
||||
|
||||
WARN_ON(i915_verify_lists(dev));
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -3690,8 +3694,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
if (ret)
|
||||
goto pre_mutex_err;
|
||||
|
||||
i915_verify_inactive(dev, __FILE__, __LINE__);
|
||||
|
||||
if (dev_priv->mm.suspended) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
ret = -EBUSY;
|
||||
|
@ -3811,8 +3813,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
goto err;
|
||||
}
|
||||
|
||||
i915_verify_inactive(dev, __FILE__, __LINE__);
|
||||
|
||||
/* Zero the global flush/invalidate flags. These
|
||||
* will be modified as new domains are computed
|
||||
* for each object
|
||||
|
@ -3828,8 +3828,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
i915_gem_object_set_to_gpu_domain(obj);
|
||||
}
|
||||
|
||||
i915_verify_inactive(dev, __FILE__, __LINE__);
|
||||
|
||||
if (dev->invalidate_domains | dev->flush_domains) {
|
||||
#if WATCH_EXEC
|
||||
DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
|
||||
|
@ -3860,8 +3858,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
old_write_domain);
|
||||
}
|
||||
|
||||
i915_verify_inactive(dev, __FILE__, __LINE__);
|
||||
|
||||
#if WATCH_COHERENCY
|
||||
for (i = 0; i < args->buffer_count; i++) {
|
||||
i915_gem_object_check_coherency(object_list[i],
|
||||
|
@ -3890,8 +3886,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
*/
|
||||
i915_retire_commands(dev, ring);
|
||||
|
||||
i915_verify_inactive(dev, __FILE__, __LINE__);
|
||||
|
||||
for (i = 0; i < args->buffer_count; i++) {
|
||||
struct drm_gem_object *obj = object_list[i];
|
||||
obj_priv = to_intel_bo(obj);
|
||||
|
@ -3902,8 +3896,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
i915_add_request(dev, file_priv, request, ring);
|
||||
request = NULL;
|
||||
|
||||
i915_verify_inactive(dev, __FILE__, __LINE__);
|
||||
|
||||
err:
|
||||
for (i = 0; i < pinned; i++)
|
||||
i915_gem_object_unpin(object_list[i]);
|
||||
|
@ -4094,8 +4086,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
|
|||
int ret;
|
||||
|
||||
BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
|
||||
|
||||
i915_verify_inactive(dev, __FILE__, __LINE__);
|
||||
WARN_ON(i915_verify_lists(dev));
|
||||
|
||||
if (obj_priv->gtt_space != NULL) {
|
||||
if (alignment == 0)
|
||||
|
@ -4129,8 +4120,8 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
|
|||
list_move_tail(&obj_priv->list,
|
||||
&dev_priv->mm.pinned_list);
|
||||
}
|
||||
i915_verify_inactive(dev, __FILE__, __LINE__);
|
||||
|
||||
WARN_ON(i915_verify_lists(dev));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4141,7 +4132,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
|
|||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
||||
|
||||
i915_verify_inactive(dev, __FILE__, __LINE__);
|
||||
WARN_ON(i915_verify_lists(dev));
|
||||
obj_priv->pin_count--;
|
||||
BUG_ON(obj_priv->pin_count < 0);
|
||||
BUG_ON(obj_priv->gtt_space == NULL);
|
||||
|
@ -4157,7 +4148,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
|
|||
atomic_dec(&dev->pin_count);
|
||||
atomic_sub(obj->size, &dev->pin_memory);
|
||||
}
|
||||
i915_verify_inactive(dev, __FILE__, __LINE__);
|
||||
WARN_ON(i915_verify_lists(dev));
|
||||
}
|
||||
|
||||
int
|
||||
|
|
|
@ -30,24 +30,107 @@
|
|||
#include "i915_drm.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
#if WATCH_INACTIVE
|
||||
void
|
||||
i915_verify_inactive(struct drm_device *dev, char *file, int line)
|
||||
#if WATCH_LISTS
|
||||
int
|
||||
i915_verify_lists(struct drm_device *dev)
|
||||
{
|
||||
static int warned;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int err = 0;
|
||||
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
|
||||
obj = &obj_priv->base;
|
||||
if (obj_priv->pin_count || obj_priv->active ||
|
||||
(obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
|
||||
I915_GEM_DOMAIN_GTT)))
|
||||
DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n",
|
||||
if (warned)
|
||||
return 0;
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) {
|
||||
if (obj->base.dev != dev ||
|
||||
!atomic_read(&obj->base.refcount.refcount)) {
|
||||
DRM_ERROR("freed render active %p\n", obj);
|
||||
err++;
|
||||
break;
|
||||
} else if (!obj->active ||
|
||||
(obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) {
|
||||
DRM_ERROR("invalid render active %p (a %d r %x)\n",
|
||||
obj,
|
||||
obj_priv->pin_count, obj_priv->active,
|
||||
obj->write_domain, file, line);
|
||||
obj->active,
|
||||
obj->base.read_domains);
|
||||
err++;
|
||||
} else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) {
|
||||
DRM_ERROR("invalid render active %p (w %x, gwl %d)\n",
|
||||
obj,
|
||||
obj->base.write_domain,
|
||||
!list_empty(&obj->gpu_write_list));
|
||||
err++;
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) {
|
||||
if (obj->base.dev != dev ||
|
||||
!atomic_read(&obj->base.refcount.refcount)) {
|
||||
DRM_ERROR("freed flushing %p\n", obj);
|
||||
err++;
|
||||
break;
|
||||
} else if (!obj->active ||
|
||||
(obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
|
||||
list_empty(&obj->gpu_write_list)){
|
||||
DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
|
||||
obj,
|
||||
obj->active,
|
||||
obj->base.write_domain,
|
||||
!list_empty(&obj->gpu_write_list));
|
||||
err++;
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) {
|
||||
if (obj->base.dev != dev ||
|
||||
!atomic_read(&obj->base.refcount.refcount)) {
|
||||
DRM_ERROR("freed gpu write %p\n", obj);
|
||||
err++;
|
||||
break;
|
||||
} else if (!obj->active ||
|
||||
(obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) {
|
||||
DRM_ERROR("invalid gpu write %p (a %d w %x)\n",
|
||||
obj,
|
||||
obj->active,
|
||||
obj->base.write_domain);
|
||||
err++;
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
|
||||
if (obj->base.dev != dev ||
|
||||
!atomic_read(&obj->base.refcount.refcount)) {
|
||||
DRM_ERROR("freed inactive %p\n", obj);
|
||||
err++;
|
||||
break;
|
||||
} else if (obj->pin_count || obj->active ||
|
||||
(obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
|
||||
DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",
|
||||
obj,
|
||||
obj->pin_count, obj->active,
|
||||
obj->base.write_domain);
|
||||
err++;
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {
|
||||
if (obj->base.dev != dev ||
|
||||
!atomic_read(&obj->base.refcount.refcount)) {
|
||||
DRM_ERROR("freed pinned %p\n", obj);
|
||||
err++;
|
||||
break;
|
||||
} else if (!obj->pin_count || obj->active ||
|
||||
(obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
|
||||
DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
|
||||
obj,
|
||||
obj->pin_count, obj->active,
|
||||
obj->base.write_domain);
|
||||
err++;
|
||||
}
|
||||
}
|
||||
|
||||
return warned = err;
|
||||
}
|
||||
#endif /* WATCH_INACTIVE */
|
||||
|
||||
|
|
|
@ -579,6 +579,8 @@ int intel_init_ring_buffer(struct drm_device *dev,
|
|||
int ret;
|
||||
|
||||
ring->dev = dev;
|
||||
INIT_LIST_HEAD(&ring->active_list);
|
||||
INIT_LIST_HEAD(&ring->request_list);
|
||||
|
||||
if (I915_NEED_GFX_HWS(dev)) {
|
||||
ret = init_status_page(dev, ring);
|
||||
|
@ -627,8 +629,6 @@ int intel_init_ring_buffer(struct drm_device *dev,
|
|||
if (ring->space < 0)
|
||||
ring->space += ring->size;
|
||||
}
|
||||
INIT_LIST_HEAD(&ring->active_list);
|
||||
INIT_LIST_HEAD(&ring->request_list);
|
||||
return ret;
|
||||
|
||||
err_unmap:
|
||||
|
|
Loading…
Reference in New Issue