mm/swap: make __pagevec_lru_add static
__pagevec_lru_add has no callers outside swap.c, so make it static, and move it to a more logical position in the file. Link: https://lkml.kernel.org/r/20220617175020.717127-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
c2bc16817a
commit
7d80dd096f
|
@ -26,7 +26,6 @@ struct pagevec {
|
|||
};
|
||||
|
||||
void __pagevec_release(struct pagevec *pvec);
|
||||
void __pagevec_lru_add(struct pagevec *pvec);
|
||||
unsigned pagevec_lookup_range(struct pagevec *pvec,
|
||||
struct address_space *mapping,
|
||||
pgoff_t *start, pgoff_t end);
|
||||
|
|
126
mm/swap.c
126
mm/swap.c
|
@ -228,6 +228,69 @@ static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page)
|
|||
|
||||
typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio);
|
||||
|
||||
static void __pagevec_lru_add_fn(struct folio *folio, struct lruvec *lruvec)
|
||||
{
|
||||
int was_unevictable = folio_test_clear_unevictable(folio);
|
||||
long nr_pages = folio_nr_pages(folio);
|
||||
|
||||
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
|
||||
|
||||
folio_set_lru(folio);
|
||||
/*
|
||||
* Is an smp_mb__after_atomic() still required here, before
|
||||
* folio_evictable() tests PageMlocked, to rule out the possibility
|
||||
* of stranding an evictable folio on an unevictable LRU? I think
|
||||
* not, because __munlock_page() only clears PageMlocked while the LRU
|
||||
* lock is held.
|
||||
*
|
||||
* (That is not true of __page_cache_release(), and not necessarily
|
||||
* true of release_pages(): but those only clear PageMlocked after
|
||||
* put_page_testzero() has excluded any other users of the page.)
|
||||
*/
|
||||
if (folio_evictable(folio)) {
|
||||
if (was_unevictable)
|
||||
__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
|
||||
} else {
|
||||
folio_clear_active(folio);
|
||||
folio_set_unevictable(folio);
|
||||
/*
|
||||
* folio->mlock_count = !!folio_test_mlocked(folio)?
|
||||
* But that leaves __mlock_page() in doubt whether another
|
||||
* actor has already counted the mlock or not. Err on the
|
||||
* safe side, underestimate, let page reclaim fix it, rather
|
||||
* than leaving a page on the unevictable LRU indefinitely.
|
||||
*/
|
||||
folio->mlock_count = 0;
|
||||
if (!was_unevictable)
|
||||
__count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
|
||||
}
|
||||
|
||||
lruvec_add_folio(lruvec, folio);
|
||||
trace_mm_lru_insertion(folio);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add the passed pages to the LRU, then drop the caller's refcount
|
||||
* on them. Reinitialises the caller's pagevec.
|
||||
*/
|
||||
static void __pagevec_lru_add(struct pagevec *pvec)
|
||||
{
|
||||
int i;
|
||||
struct lruvec *lruvec = NULL;
|
||||
unsigned long flags = 0;
|
||||
|
||||
for (i = 0; i < pagevec_count(pvec); i++) {
|
||||
struct folio *folio = page_folio(pvec->pages[i]);
|
||||
|
||||
lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
|
||||
__pagevec_lru_add_fn(folio, lruvec);
|
||||
}
|
||||
if (lruvec)
|
||||
unlock_page_lruvec_irqrestore(lruvec, flags);
|
||||
release_pages(pvec->pages, pvec->nr);
|
||||
pagevec_reinit(pvec);
|
||||
}
|
||||
|
||||
static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
|
||||
{
|
||||
int i;
|
||||
|
@ -1036,69 +1099,6 @@ void __pagevec_release(struct pagevec *pvec)
|
|||
}
|
||||
EXPORT_SYMBOL(__pagevec_release);
|
||||
|
||||
static void __pagevec_lru_add_fn(struct folio *folio, struct lruvec *lruvec)
|
||||
{
|
||||
int was_unevictable = folio_test_clear_unevictable(folio);
|
||||
long nr_pages = folio_nr_pages(folio);
|
||||
|
||||
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
|
||||
|
||||
folio_set_lru(folio);
|
||||
/*
|
||||
* Is an smp_mb__after_atomic() still required here, before
|
||||
* folio_evictable() tests PageMlocked, to rule out the possibility
|
||||
* of stranding an evictable folio on an unevictable LRU? I think
|
||||
* not, because __munlock_page() only clears PageMlocked while the LRU
|
||||
* lock is held.
|
||||
*
|
||||
* (That is not true of __page_cache_release(), and not necessarily
|
||||
* true of release_pages(): but those only clear PageMlocked after
|
||||
* put_page_testzero() has excluded any other users of the page.)
|
||||
*/
|
||||
if (folio_evictable(folio)) {
|
||||
if (was_unevictable)
|
||||
__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
|
||||
} else {
|
||||
folio_clear_active(folio);
|
||||
folio_set_unevictable(folio);
|
||||
/*
|
||||
* folio->mlock_count = !!folio_test_mlocked(folio)?
|
||||
* But that leaves __mlock_page() in doubt whether another
|
||||
* actor has already counted the mlock or not. Err on the
|
||||
* safe side, underestimate, let page reclaim fix it, rather
|
||||
* than leaving a page on the unevictable LRU indefinitely.
|
||||
*/
|
||||
folio->mlock_count = 0;
|
||||
if (!was_unevictable)
|
||||
__count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
|
||||
}
|
||||
|
||||
lruvec_add_folio(lruvec, folio);
|
||||
trace_mm_lru_insertion(folio);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add the passed pages to the LRU, then drop the caller's refcount
|
||||
* on them. Reinitialises the caller's pagevec.
|
||||
*/
|
||||
void __pagevec_lru_add(struct pagevec *pvec)
|
||||
{
|
||||
int i;
|
||||
struct lruvec *lruvec = NULL;
|
||||
unsigned long flags = 0;
|
||||
|
||||
for (i = 0; i < pagevec_count(pvec); i++) {
|
||||
struct folio *folio = page_folio(pvec->pages[i]);
|
||||
|
||||
lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
|
||||
__pagevec_lru_add_fn(folio, lruvec);
|
||||
}
|
||||
if (lruvec)
|
||||
unlock_page_lruvec_irqrestore(lruvec, flags);
|
||||
release_pages(pvec->pages, pvec->nr);
|
||||
pagevec_reinit(pvec);
|
||||
}
|
||||
|
||||
/**
|
||||
* folio_batch_remove_exceptionals() - Prune non-folios from a batch.
|
||||
* @fbatch: The batch to prune
|
||||
|
|
Loading…
Reference in New Issue