mm: remove check_move_unevictable_pages()
All callers have now been converted to call check_move_unevictable_folios(). Link: https://lkml.kernel.org/r/20230621164557.3510324-7-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
3291e09a46
commit
e0b72c14d8
|
@ -439,7 +439,6 @@ static inline bool node_reclaim_enabled(void)
|
|||
}
|
||||
|
||||
void check_move_unevictable_folios(struct folio_batch *fbatch);
|
||||
void check_move_unevictable_pages(struct pagevec *pvec);
|
||||
|
||||
extern void __meminit kswapd_run(int nid);
|
||||
extern void __meminit kswapd_stop(int nid);
|
||||
|
|
17
mm/vmscan.c
17
mm/vmscan.c
|
@ -8075,23 +8075,6 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
|
|||
}
|
||||
#endif
|
||||
|
||||
void check_move_unevictable_pages(struct pagevec *pvec)
|
||||
{
|
||||
struct folio_batch fbatch;
|
||||
unsigned i;
|
||||
|
||||
folio_batch_init(&fbatch);
|
||||
for (i = 0; i < pvec->nr; i++) {
|
||||
struct page *page = pvec->pages[i];
|
||||
|
||||
if (PageTransTail(page))
|
||||
continue;
|
||||
folio_batch_add(&fbatch, page_folio(page));
|
||||
}
|
||||
check_move_unevictable_folios(&fbatch);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
|
||||
|
||||
/**
|
||||
* check_move_unevictable_folios - Move evictable folios to appropriate zone
|
||||
* lru list
|
||||
|
|
Loading…
Reference in New Issue