drm/i915: Refactor testing obj->mm.pages
Since we occasionally stuff an error pointer into obj->mm.pages for a semi-permanent or even permanent failure, we have to be more careful and not just test against NULL when deciding if the object has a complete set of its concurrent pages. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171013202621.7276-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
e4ffc83d3f
commit
f1fa4f442c
|
@ -3566,10 +3566,16 @@ i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
|
|||
return __i915_gem_object_get_pages(obj);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
|
||||
}
|
||||
|
||||
static inline void
|
||||
__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
GEM_BUG_ON(!obj->mm.pages);
|
||||
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
|
||||
|
||||
atomic_inc(&obj->mm.pages_pin_count);
|
||||
}
|
||||
|
@ -3583,8 +3589,8 @@ i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
|
|||
static inline void
|
||||
__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
|
||||
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
|
||||
GEM_BUG_ON(!obj->mm.pages);
|
||||
|
||||
atomic_dec(&obj->mm.pages_pin_count);
|
||||
}
|
||||
|
|
|
@ -2196,7 +2196,7 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
|
|||
struct address_space *mapping;
|
||||
|
||||
lockdep_assert_held(&obj->mm.lock);
|
||||
GEM_BUG_ON(obj->mm.pages);
|
||||
GEM_BUG_ON(i915_gem_object_has_pages(obj));
|
||||
|
||||
switch (obj->mm.madv) {
|
||||
case I915_MADV_DONTNEED:
|
||||
|
@ -2259,7 +2259,7 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
|
|||
return;
|
||||
|
||||
GEM_BUG_ON(obj->bind_count);
|
||||
if (!READ_ONCE(obj->mm.pages))
|
||||
if (!i915_gem_object_has_pages(obj))
|
||||
return;
|
||||
|
||||
/* May be called by shrinker from within get_pages() (on another bo) */
|
||||
|
@ -2563,7 +2563,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
|
||||
if (unlikely(!i915_gem_object_has_pages(obj))) {
|
||||
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
|
||||
|
||||
err = ____i915_gem_object_get_pages(obj);
|
||||
|
@ -2648,7 +2648,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
|
|||
type &= ~I915_MAP_OVERRIDE;
|
||||
|
||||
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
|
||||
if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
|
||||
if (unlikely(!i915_gem_object_has_pages(obj))) {
|
||||
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
|
||||
|
||||
ret = ____i915_gem_object_get_pages(obj);
|
||||
|
@ -2660,7 +2660,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
|
|||
atomic_inc(&obj->mm.pages_pin_count);
|
||||
pinned = false;
|
||||
}
|
||||
GEM_BUG_ON(!obj->mm.pages);
|
||||
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
|
||||
|
||||
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
|
||||
if (ptr && has_type != type) {
|
||||
|
@ -2715,7 +2715,7 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
|
|||
* allows it to avoid the cost of retrieving a page (either swapin
|
||||
* or clearing-before-use) before it is overwritten.
|
||||
*/
|
||||
if (READ_ONCE(obj->mm.pages))
|
||||
if (i915_gem_object_has_pages(obj))
|
||||
return -ENODEV;
|
||||
|
||||
/* Before the pages are instantiated the object is treated as being
|
||||
|
@ -4278,7 +4278,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
|
|||
if (err)
|
||||
goto out;
|
||||
|
||||
if (obj->mm.pages &&
|
||||
if (i915_gem_object_has_pages(obj) &&
|
||||
i915_gem_object_is_tiled(obj) &&
|
||||
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
|
||||
if (obj->mm.madv == I915_MADV_WILLNEED) {
|
||||
|
@ -4297,7 +4297,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
|
|||
obj->mm.madv = args->madv;
|
||||
|
||||
/* if the object is no longer attached, discard its backing storage */
|
||||
if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
|
||||
if (obj->mm.madv == I915_MADV_DONTNEED &&
|
||||
!i915_gem_object_has_pages(obj))
|
||||
i915_gem_object_truncate(obj);
|
||||
|
||||
args->retained = obj->mm.madv != __I915_MADV_PURGED;
|
||||
|
@ -4514,7 +4515,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
|
|||
if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
|
||||
atomic_set(&obj->mm.pages_pin_count, 0);
|
||||
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
|
||||
GEM_BUG_ON(obj->mm.pages);
|
||||
GEM_BUG_ON(i915_gem_object_has_pages(obj));
|
||||
|
||||
if (obj->base.import_attach)
|
||||
drm_prime_gem_destroy(&obj->base, NULL);
|
||||
|
|
|
@ -70,6 +70,7 @@ static const struct dma_fence_ops i915_clflush_ops = {
|
|||
|
||||
static void __i915_do_clflush(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
|
||||
drm_clflush_sg(obj->mm.pages);
|
||||
intel_fb_obj_flush(obj, ORIGIN_CPU);
|
||||
}
|
||||
|
|
|
@ -229,7 +229,7 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
|
|||
return 0;
|
||||
|
||||
/* Recreate the page after shrinking */
|
||||
if (!so->vma->obj->mm.pages)
|
||||
if (!i915_gem_object_has_pages(so->vma->obj))
|
||||
so->batch_offset = -1;
|
||||
|
||||
ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
|
||||
|
|
|
@ -97,7 +97,7 @@ static bool swap_available(void)
|
|||
|
||||
static bool can_release_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (!obj->mm.pages)
|
||||
if (!i915_gem_object_has_pages(obj))
|
||||
return false;
|
||||
|
||||
/* Consider only shrinkable ojects. */
|
||||
|
@ -129,7 +129,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
|
|||
{
|
||||
if (i915_gem_object_unbind(obj) == 0)
|
||||
__i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
|
||||
return !READ_ONCE(obj->mm.pages);
|
||||
return !i915_gem_object_has_pages(obj);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -247,7 +247,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
|||
/* May arrive from get_pages on another bo */
|
||||
mutex_lock_nested(&obj->mm.lock,
|
||||
I915_MM_SHRINKER);
|
||||
if (!obj->mm.pages) {
|
||||
if (!i915_gem_object_has_pages(obj)) {
|
||||
__i915_gem_object_invalidate(obj);
|
||||
list_del_init(&obj->global_link);
|
||||
count += obj->base.size >> PAGE_SHIFT;
|
||||
|
@ -413,7 +413,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
|
|||
*/
|
||||
unbound = bound = unevictable = 0;
|
||||
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
|
||||
if (!obj->mm.pages)
|
||||
if (!i915_gem_object_has_pages(obj))
|
||||
continue;
|
||||
|
||||
if (!can_release_pages(obj))
|
||||
|
@ -422,7 +422,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
|
|||
unbound += obj->base.size >> PAGE_SHIFT;
|
||||
}
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
|
||||
if (!obj->mm.pages)
|
||||
if (!i915_gem_object_has_pages(obj))
|
||||
continue;
|
||||
|
||||
if (!can_release_pages(obj))
|
||||
|
|
|
@ -269,7 +269,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
|
|||
* due to the change in swizzling.
|
||||
*/
|
||||
mutex_lock(&obj->mm.lock);
|
||||
if (obj->mm.pages &&
|
||||
if (i915_gem_object_has_pages(obj) &&
|
||||
obj->mm.madv == I915_MADV_WILLNEED &&
|
||||
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
|
||||
if (tiling == I915_TILING_NONE) {
|
||||
|
|
|
@ -82,7 +82,7 @@ static void cancel_userptr(struct work_struct *work)
|
|||
/* We are inside a kthread context and can't be interrupted */
|
||||
if (i915_gem_object_unbind(obj) == 0)
|
||||
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
|
||||
WARN_ONCE(obj->mm.pages,
|
||||
WARN_ONCE(i915_gem_object_has_pages(obj),
|
||||
"Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n",
|
||||
obj->bind_count,
|
||||
atomic_read(&obj->mm.pages_pin_count),
|
||||
|
|
Loading…
Reference in New Issue