mm: add SHRINK_EMPTY shrinker methods return value

We need to distinguish the situations when shrinker has very small
amount of objects (see vfs_pressure_ratio() called from
super_cache_count()), and when it has no objects at all.  Currently, in
the both of these cases, shrinker::count_objects() returns 0.

The patch introduces new SHRINK_EMPTY return value, which will be used
for "no objects at all" case.  It's is a refactoring mostly, as
SHRINK_EMPTY is replaced by 0 by all callers of do_shrink_slab() in this
patch, and all the magic will happen in further.

Link: http://lkml.kernel.org/r/153063069574.1818.11037751256699341813.stgit@localhost.localdomain
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com>
Tested-by: Shakeel Butt <shakeelb@google.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Guenter Roeck <linux@roeck-us.net>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Josef Bacik <jbacik@fb.com>
Cc: Li RongQing <lirongqing@baidu.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Matthias Kaehlcke <mka@chromium.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Philippe Ombredanne <pombredanne@nexb.com>
Cc: Roman Gushchin <guro@fb.com>
Cc: Sahitya Tummala <stummala@codeaurora.org>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman Long <longman@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Kirill Tkhai 2018-08-17 15:48:21 -07:00 committed by Linus Torvalds
parent aeed1d325d
commit 9b996468cf
4 changed files with 20 additions and 5 deletions

View File

@ -144,6 +144,9 @@ static unsigned long super_cache_count(struct shrinker *shrink,
total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc); total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc); total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
if (!total_objects)
return SHRINK_EMPTY;
total_objects = vfs_pressure_ratio(total_objects); total_objects = vfs_pressure_ratio(total_objects);
return total_objects; return total_objects;
} }

View File

@ -34,12 +34,15 @@ struct shrink_control {
}; };
#define SHRINK_STOP (~0UL) #define SHRINK_STOP (~0UL)
#define SHRINK_EMPTY (~0UL - 1)
/* /*
* A callback you can register to apply pressure to ageable caches. * A callback you can register to apply pressure to ageable caches.
* *
* @count_objects should return the number of freeable items in the cache. If * @count_objects should return the number of freeable items in the cache. If
* there are no objects to free or the number of freeable items cannot be * there are no objects to free, it should return SHRINK_EMPTY, while 0 is
* determined, it should return 0. No deadlock checks should be done during the * returned in cases of the number of freeable items cannot be determined
* or shrinker should skip this cache for this time (e.g., their number
* is below shrinkable limit). No deadlock checks should be done during the
* count callback - the shrinker relies on aggregating scan counts that couldn't * count callback - the shrinker relies on aggregating scan counts that couldn't
* be executed due to potential deadlocks to be run at a later call when the * be executed due to potential deadlocks to be run at a later call when the
* deadlock condition is no longer pending. * deadlock condition is no longer pending.

View File

@ -456,8 +456,8 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
long scanned = 0, next_deferred; long scanned = 0, next_deferred;
freeable = shrinker->count_objects(shrinker, shrinkctl); freeable = shrinker->count_objects(shrinker, shrinkctl);
if (freeable == 0) if (freeable == 0 || freeable == SHRINK_EMPTY)
return 0; return freeable;
/* /*
* copy the current shrinker scan count into a local variable * copy the current shrinker scan count into a local variable
@ -596,6 +596,8 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
continue; continue;
ret = do_shrink_slab(&sc, shrinker, priority); ret = do_shrink_slab(&sc, shrinker, priority);
if (ret == SHRINK_EMPTY)
ret = 0;
freed += ret; freed += ret;
if (rwsem_is_contended(&shrinker_rwsem)) { if (rwsem_is_contended(&shrinker_rwsem)) {
@ -641,6 +643,7 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
{ {
struct shrinker *shrinker; struct shrinker *shrinker;
unsigned long freed = 0; unsigned long freed = 0;
int ret;
if (!mem_cgroup_is_root(memcg)) if (!mem_cgroup_is_root(memcg))
return shrink_slab_memcg(gfp_mask, nid, memcg, priority); return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
@ -658,7 +661,10 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
sc.nid = 0; sc.nid = 0;
freed += do_shrink_slab(&sc, shrinker, priority); ret = do_shrink_slab(&sc, shrinker, priority);
if (ret == SHRINK_EMPTY)
ret = 0;
freed += ret;
/* /*
* Bail out if someone want to register a new shrinker to * Bail out if someone want to register a new shrinker to
* prevent the regsitration from being stalled for long periods * prevent the regsitration from being stalled for long periods

View File

@ -399,6 +399,9 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
} }
max_nodes = cache >> (RADIX_TREE_MAP_SHIFT - 3); max_nodes = cache >> (RADIX_TREE_MAP_SHIFT - 3);
if (!nodes)
return SHRINK_EMPTY;
if (nodes <= max_nodes) if (nodes <= max_nodes)
return 0; return 0;
return nodes - max_nodes; return nodes - max_nodes;