drm/i915: Skip shrinking already freed pages

Previously, we wanted to shrink the pages of freed objects before they
were finally RCU collected. However, by removing the struct_mutex
serialisation around the active reference, we need to acquire an extra
reference around the wait. Unfortunately this means that we have to skip
objects that are waiting RCU collection.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=110937
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190618074153.16055-2-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2019-06-18 08:41:29 +01:00
parent 767662bc62
commit 0bd6cb6b58
2 changed files with 6 additions and 46 deletions

View File

@ -146,33 +146,6 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
}
}
static bool discard_backing_storage(struct drm_i915_gem_object *obj)
{
/*
* If we are the last user of the backing storage (be it shmemfs
* pages or stolen etc), we know that the pages are going to be
* immediately released. In this case, we can then skip copying
* back the contents from the GPU.
*/
if (!i915_gem_object_is_shrinkable(obj))
return false;
if (obj->mm.madv != I915_MADV_WILLNEED)
return false;
if (!obj->base.filp)
return true;
/* At first glance, this looks racy, but then again so would be
* userspace racing mmap against close. However, the first external
* reference to the filp can only be obtained through the
* i915_gem_mmap_ioctl() which safeguards us against the user
* acquiring such a reference whilst we are in the middle of
* freeing the object.
*/
return file_count(obj->base.filp) == 1;
}
static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
{
@ -222,8 +195,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
if (obj->ops->release)
obj->ops->release(obj);
if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
atomic_set(&obj->mm.pages_pin_count, 0);
atomic_set(&obj->mm.pages_pin_count, 0);
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
GEM_BUG_ON(i915_gem_object_has_pages(obj));
@ -320,23 +292,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
if (obj->mm.quirked)
__i915_gem_object_unpin_pages(obj);
if (discard_backing_storage(obj)) {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
obj->mm.madv = I915_MADV_DONTNEED;
if (i915_gem_object_has_pages(obj)) {
unsigned long flags;
spin_lock_irqsave(&i915->mm.obj_lock, flags);
list_move_tail(&obj->mm.link, &i915->mm.purge_list);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
}
/*
* Before we free the object, make sure any pure RCU-only
* read-side critical sections are complete, e.g.

View File

@ -241,6 +241,9 @@ i915_gem_shrink(struct drm_i915_private *i915,
if (!can_release_pages(obj))
continue;
if (!kref_get_unless_zero(&obj->base.refcount))
continue;
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
if (unsafe_drop_pages(obj)) {
@ -253,7 +256,9 @@ i915_gem_shrink(struct drm_i915_private *i915,
}
mutex_unlock(&obj->mm.lock);
}
scanned += obj->base.size >> PAGE_SHIFT;
i915_gem_object_put(obj);
spin_lock_irqsave(&i915->mm.obj_lock, flags);
}