mm: remove free_page_mlock
We should not be seeing non-0 unevictable_pgs_mlockfreed any longer. So remove free_page_mlock() from the page freeing paths: __PG_MLOCKED is already in PAGE_FLAGS_CHECK_AT_FREE, so free_pages_check() will now be checking it, reporting "BUG: Bad page state" if it's ever found set. Comment UNEVICTABLE_MLOCKFREED and unevictable_pgs_mlockfreed always 0. Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Cc: Rik van Riel <riel@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michel Lespinasse <walken@google.com> Cc: Ying Han <yinghan@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
e6c509f854
commit
a0c5e813f0
|
@ -52,7 +52,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
|
|||
UNEVICTABLE_PGMUNLOCKED,
|
||||
UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
|
||||
UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
|
||||
UNEVICTABLE_MLOCKFREED,
|
||||
UNEVICTABLE_MLOCKFREED, /* no longer useful: always zero */
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
THP_FAULT_ALLOC,
|
||||
THP_FAULT_FALLBACK,
|
||||
|
|
|
@ -598,17 +598,6 @@ out:
|
|||
zone->free_area[order].nr_free++;
|
||||
}
|
||||
|
||||
/*
|
||||
* free_page_mlock() -- clean up attempts to free and mlocked() page.
|
||||
* Page should not be on lru, so no need to fix that up.
|
||||
* free_pages_check() will verify...
|
||||
*/
|
||||
static inline void free_page_mlock(struct page *page)
|
||||
{
|
||||
__dec_zone_page_state(page, NR_MLOCK);
|
||||
__count_vm_event(UNEVICTABLE_MLOCKFREED);
|
||||
}
|
||||
|
||||
static inline int free_pages_check(struct page *page)
|
||||
{
|
||||
if (unlikely(page_mapcount(page) |
|
||||
|
@ -728,15 +717,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
|
|||
static void __free_pages_ok(struct page *page, unsigned int order)
|
||||
{
|
||||
unsigned long flags;
|
||||
int wasMlocked = __TestClearPageMlocked(page);
|
||||
int migratetype;
|
||||
|
||||
if (!free_pages_prepare(page, order))
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
if (unlikely(wasMlocked))
|
||||
free_page_mlock(page);
|
||||
__count_vm_events(PGFREE, 1 << order);
|
||||
migratetype = get_pageblock_migratetype(page);
|
||||
set_freepage_migratetype(page, migratetype);
|
||||
|
@ -1310,7 +1296,6 @@ void free_hot_cold_page(struct page *page, int cold)
|
|||
struct per_cpu_pages *pcp;
|
||||
unsigned long flags;
|
||||
int migratetype;
|
||||
int wasMlocked = __TestClearPageMlocked(page);
|
||||
|
||||
if (!free_pages_prepare(page, 0))
|
||||
return;
|
||||
|
@ -1318,8 +1303,6 @@ void free_hot_cold_page(struct page *page, int cold)
|
|||
migratetype = get_pageblock_migratetype(page);
|
||||
set_freepage_migratetype(page, migratetype);
|
||||
local_irq_save(flags);
|
||||
if (unlikely(wasMlocked))
|
||||
free_page_mlock(page);
|
||||
__count_vm_event(PGFREE);
|
||||
|
||||
/*
|
||||
|
|
|
@ -782,7 +782,7 @@ const char * const vmstat_text[] = {
|
|||
"unevictable_pgs_munlocked",
|
||||
"unevictable_pgs_cleared",
|
||||
"unevictable_pgs_stranded",
|
||||
"unevictable_pgs_mlockfreed",
|
||||
"unevictable_pgs_mlockfreed", /* no longer useful: always zero */
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
"thp_fault_alloc",
|
||||
|
|
Loading…
Reference in New Issue