drm/i915: Include bound and active pages in the count of shrinkable objects
When the machine is under a lot of memory pressure and being stressed by multiple GPU threads, we quite often report fewer than shrinker->batch (i.e. SHRINK_BATCH) pages to be freed. This causes the shrink_control to skip calling into i915.ko to release pages, despite the GPU holding onto most of the physical pages in its active lists. References: https://bugs.freedesktop.org/show_bug.cgi?id=72742 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Robert Beckett <robert.beckett@intel.com> Reviewed-by: Rafael Barbalho <rafael.barbalho@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
0820baf39b
commit
ceabbba524
|
@ -1741,8 +1741,8 @@ out_power_well:
|
||||||
intel_power_domains_remove(dev_priv);
|
intel_power_domains_remove(dev_priv);
|
||||||
drm_vblank_cleanup(dev);
|
drm_vblank_cleanup(dev);
|
||||||
out_gem_unload:
|
out_gem_unload:
|
||||||
if (dev_priv->mm.inactive_shrinker.scan_objects)
|
if (dev_priv->mm.shrinker.scan_objects)
|
||||||
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
|
unregister_shrinker(&dev_priv->mm.shrinker);
|
||||||
|
|
||||||
if (dev->pdev->msi_enabled)
|
if (dev->pdev->msi_enabled)
|
||||||
pci_disable_msi(dev->pdev);
|
pci_disable_msi(dev->pdev);
|
||||||
|
@ -1793,8 +1793,8 @@ int i915_driver_unload(struct drm_device *dev)
|
||||||
|
|
||||||
i915_teardown_sysfs(dev);
|
i915_teardown_sysfs(dev);
|
||||||
|
|
||||||
if (dev_priv->mm.inactive_shrinker.scan_objects)
|
if (dev_priv->mm.shrinker.scan_objects)
|
||||||
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
|
unregister_shrinker(&dev_priv->mm.shrinker);
|
||||||
|
|
||||||
io_mapping_free(dev_priv->gtt.mappable);
|
io_mapping_free(dev_priv->gtt.mappable);
|
||||||
arch_phys_wc_del(dev_priv->gtt.mtrr);
|
arch_phys_wc_del(dev_priv->gtt.mtrr);
|
||||||
|
|
|
@ -1057,7 +1057,7 @@ struct i915_gem_mm {
|
||||||
/** PPGTT used for aliasing the PPGTT with the GTT */
|
/** PPGTT used for aliasing the PPGTT with the GTT */
|
||||||
struct i915_hw_ppgtt *aliasing_ppgtt;
|
struct i915_hw_ppgtt *aliasing_ppgtt;
|
||||||
|
|
||||||
struct shrinker inactive_shrinker;
|
struct shrinker shrinker;
|
||||||
bool shrinker_no_lock_stealing;
|
bool shrinker_no_lock_stealing;
|
||||||
|
|
||||||
/** LRU list of objects with fence regs on them. */
|
/** LRU list of objects with fence regs on them. */
|
||||||
|
|
|
@ -57,9 +57,9 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
|
||||||
struct drm_i915_fence_reg *fence,
|
struct drm_i915_fence_reg *fence,
|
||||||
bool enable);
|
bool enable);
|
||||||
|
|
||||||
static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
|
static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
|
||||||
struct shrink_control *sc);
|
struct shrink_control *sc);
|
||||||
static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
|
static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
|
||||||
struct shrink_control *sc);
|
struct shrink_control *sc);
|
||||||
static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
|
static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
|
||||||
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
|
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
|
||||||
|
@ -4739,10 +4739,10 @@ i915_gem_load(struct drm_device *dev)
|
||||||
|
|
||||||
dev_priv->mm.interruptible = true;
|
dev_priv->mm.interruptible = true;
|
||||||
|
|
||||||
dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
|
dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
|
||||||
dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
|
dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
|
||||||
dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
|
dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
|
||||||
register_shrinker(&dev_priv->mm.inactive_shrinker);
|
register_shrinker(&dev_priv->mm.shrinker);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -5001,13 +5001,23 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int num_vma_bound(struct drm_i915_gem_object *obj)
|
||||||
|
{
|
||||||
|
struct i915_vma *vma;
|
||||||
|
int count = 0;
|
||||||
|
|
||||||
|
list_for_each_entry(vma, &obj->vma_list, vma_link)
|
||||||
|
if (drm_mm_node_allocated(&vma->node))
|
||||||
|
count++;
|
||||||
|
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
static unsigned long
|
static unsigned long
|
||||||
i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
|
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv =
|
struct drm_i915_private *dev_priv =
|
||||||
container_of(shrinker,
|
container_of(shrinker, struct drm_i915_private, mm.shrinker);
|
||||||
struct drm_i915_private,
|
|
||||||
mm.inactive_shrinker);
|
|
||||||
struct drm_device *dev = dev_priv->dev;
|
struct drm_device *dev = dev_priv->dev;
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
bool unlock = true;
|
bool unlock = true;
|
||||||
|
@ -5029,10 +5039,8 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
|
||||||
count += obj->base.size >> PAGE_SHIFT;
|
count += obj->base.size >> PAGE_SHIFT;
|
||||||
|
|
||||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
||||||
if (obj->active)
|
if (!i915_gem_obj_is_pinned(obj) &&
|
||||||
continue;
|
obj->pages_pin_count == num_vma_bound(obj))
|
||||||
|
|
||||||
if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
|
|
||||||
count += obj->base.size >> PAGE_SHIFT;
|
count += obj->base.size >> PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5105,12 +5113,10 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long
|
static unsigned long
|
||||||
i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv =
|
struct drm_i915_private *dev_priv =
|
||||||
container_of(shrinker,
|
container_of(shrinker, struct drm_i915_private, mm.shrinker);
|
||||||
struct drm_i915_private,
|
|
||||||
mm.inactive_shrinker);
|
|
||||||
struct drm_device *dev = dev_priv->dev;
|
struct drm_device *dev = dev_priv->dev;
|
||||||
unsigned long freed;
|
unsigned long freed;
|
||||||
bool unlock = true;
|
bool unlock = true;
|
||||||
|
|
Loading…
Reference in New Issue