hugepage: convert huge zero page shrinker to new shrinker API
It consists of: * returning long instead of int * separating count from scan * returning the number of freed entities in scan Signed-off-by: Glauber Costa <glommer@openvz.org> Reviewed-by: Greg Thelen <gthelen@google.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Dave Chinner <dchinner@redhat.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Cc: Arve Hjønnevåg <arve@android.com> Cc: Carlos Maiolino <cmaiolino@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Rientjes <rientjes@google.com> Cc: Gleb Natapov <gleb@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: J. Bruce Fields <bfields@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Stultz <john.stultz@linaro.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kent Overstreet <koverstreet@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
81e49f8114
commit
488964666f
|
@ -211,24 +211,29 @@ static void put_huge_zero_page(void)
|
|||
BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
|
||||
}
|
||||
|
||||
static int shrink_huge_zero_page(struct shrinker *shrink,
|
||||
struct shrink_control *sc)
|
||||
static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
if (!sc->nr_to_scan)
|
||||
/* we can free zero page only if last reference remains */
|
||||
return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
|
||||
/* we can free zero page only if last reference remains */
|
||||
return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
|
||||
}
|
||||
|
||||
static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
|
||||
struct page *zero_page = xchg(&huge_zero_page, NULL);
|
||||
BUG_ON(zero_page == NULL);
|
||||
__free_page(zero_page);
|
||||
return HPAGE_PMD_NR;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct shrinker huge_zero_page_shrinker = {
|
||||
.shrink = shrink_huge_zero_page,
|
||||
.count_objects = shrink_huge_zero_page_count,
|
||||
.scan_objects = shrink_huge_zero_page_scan,
|
||||
.seeks = DEFAULT_SEEKS,
|
||||
};
|
||||
|
||||
|
|
Loading…
Reference in New Issue