drm/i915: Revert shrinker changes from "Track unbound pages"

This partially reverts

commit 6c085a728c
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date:   Mon Aug 20 11:40:46 2012 +0200

    drm/i915: Track unbound pages

Closer inspection of that patch revealed a bunch of unrelated changes
in the shrinker:
- The shrinker count is now in pages instead of objects.
- For counting the shrinkable objects the old code only looked at the
  inactive list, the new code looks at all bounds objects (including
  pinned ones). That is obviously in addition to the new unbound list.
- The shrinker cound is no longer scaled with
  sysctl_vfs_cache_pressure. Note though that with the default tuning
  value of vfs_cache_pressue = 100 this doesn't affect the shrinker
  behaviour.
- When actually shrinking objects, the old code first dropped
  purgeable objects, then normal (inactive) objects. Only then did it,
  in a last-ditch effort idle the gpu and evict everything. The new
  code omits the intermediate step of evicting normal inactive
  objects.

Safe for the first change, which seems benign, and the shrinker count
scaling, which is a bit a different story, the endresult of all these
changes is that the shrinker is _much_ more likely to fall back to the
last-ditch resort of idling the gpu and evicting everything.  The old
code could only do that if something else evicted lots of objects
meanwhile (since without any other changes the nr_to_scan will be
smaller than the object count).

Reverting the vfs_cache_pressure behaviour itself is a bit bogus: Only
dentry/inode object caches should scale their shrinker counts with
vfs_cache_pressure. Originally I've had that change reverted, too. But
Chris Wilson insisted that it's too bogus and shouldn't again see the
light of day.

Hence revert all these other changes and restore the old shrinker
behaviour, with the minor adjustment that we now first scan the
unbound list, then the inactive list for each object category
(purgeable or normal).

A similar patch has been tested by a few people affected by the gen4/5
hangs which started to appear in 3.7, which some people bisected to
the "drm/i915: Track unbound pages" commit. But just disabling the
unbound logic alone didn't change things at all.

Note that this patch doesn't fix the referenced bugs, it only hides
the underlying bug(s) well enough to restore pre-3.7 behaviour. The
key to achieve that is to massively reduce the likelyhood of going
into a full gpu stall and evicting everything.

v2: Reword commit message a bit, taking Chris Wilson's comment into
account.

v3: On Chris Wilson's insistency, do not reinstate the rather bogus
vfs_cache_pressure change.

Tested-by: Greg KH <gregkh@linuxfoundation.org>
Tested-by: Dave Kleikamp <dave.kleikamp@oracle.com>
References: https://bugs.freedesktop.org/show_bug.cgi?id=55984
References: https://bugs.freedesktop.org/show_bug.cgi?id=57122
References: https://bugs.freedesktop.org/show_bug.cgi?id=56916
References: https://bugs.freedesktop.org/show_bug.cgi?id=57136
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: stable@vger.kernel.org
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
Daniel Vetter 2013-01-10 18:03:00 +01:00
parent ca320ac456
commit 93927ca52a
1 changed files with 14 additions and 4 deletions

View File

@ -1717,7 +1717,8 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
} }
static long static long
i915_gem_purge(struct drm_i915_private *dev_priv, long target) __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
bool purgeable_only)
{ {
struct drm_i915_gem_object *obj, *next; struct drm_i915_gem_object *obj, *next;
long count = 0; long count = 0;
@ -1725,7 +1726,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
list_for_each_entry_safe(obj, next, list_for_each_entry_safe(obj, next,
&dev_priv->mm.unbound_list, &dev_priv->mm.unbound_list,
gtt_list) { gtt_list) {
if (i915_gem_object_is_purgeable(obj) && if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
i915_gem_object_put_pages(obj) == 0) { i915_gem_object_put_pages(obj) == 0) {
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
if (count >= target) if (count >= target)
@ -1736,7 +1737,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
list_for_each_entry_safe(obj, next, list_for_each_entry_safe(obj, next,
&dev_priv->mm.inactive_list, &dev_priv->mm.inactive_list,
mm_list) { mm_list) {
if (i915_gem_object_is_purgeable(obj) && if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
i915_gem_object_unbind(obj) == 0 && i915_gem_object_unbind(obj) == 0 &&
i915_gem_object_put_pages(obj) == 0) { i915_gem_object_put_pages(obj) == 0) {
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
@ -1748,6 +1749,12 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
return count; return count;
} }
static long
i915_gem_purge(struct drm_i915_private *dev_priv, long target)
{
return __i915_gem_shrink(dev_priv, target, true);
}
static void static void
i915_gem_shrink_all(struct drm_i915_private *dev_priv) i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{ {
@ -4395,6 +4402,9 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
if (nr_to_scan) { if (nr_to_scan) {
nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan); nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
if (nr_to_scan > 0)
nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
false);
if (nr_to_scan > 0) if (nr_to_scan > 0)
i915_gem_shrink_all(dev_priv); i915_gem_shrink_all(dev_priv);
} }
@ -4403,7 +4413,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
if (obj->pages_pin_count == 0) if (obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT; cnt += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
if (obj->pin_count == 0 && obj->pages_pin_count == 0) if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT; cnt += obj->base.size >> PAGE_SHIFT;