mm/memcg: Add folio_lruvec_relock_irq() and folio_lruvec_relock_irqsave()
These are the folio equivalents of relock_page_lruvec_irq() and folio_lruvec_relock_irqsave(). Also convert page_matches_lruvec() to folio_matches_lruvec(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: David Howells <dhowells@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
parent
e809c3fede
commit
0de340cbed
|
@ -1568,19 +1568,19 @@ static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
|
|||
}
|
||||
|
||||
/* Test requires a stable page->memcg binding, see page_memcg() */
|
||||
static inline bool page_matches_lruvec(struct page *page, struct lruvec *lruvec)
|
||||
static inline bool folio_matches_lruvec(struct folio *folio,
|
||||
struct lruvec *lruvec)
|
||||
{
|
||||
return lruvec_pgdat(lruvec) == page_pgdat(page) &&
|
||||
lruvec_memcg(lruvec) == page_memcg(page);
|
||||
return lruvec_pgdat(lruvec) == folio_pgdat(folio) &&
|
||||
lruvec_memcg(lruvec) == folio_memcg(folio);
|
||||
}
|
||||
|
||||
/* Don't lock again iff page's lruvec locked */
|
||||
static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
|
||||
static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
|
||||
struct lruvec *locked_lruvec)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
if (locked_lruvec) {
|
||||
if (page_matches_lruvec(page, locked_lruvec))
|
||||
if (folio_matches_lruvec(folio, locked_lruvec))
|
||||
return locked_lruvec;
|
||||
|
||||
unlock_page_lruvec_irq(locked_lruvec);
|
||||
|
@ -1590,12 +1590,11 @@ static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
|
|||
}
|
||||
|
||||
/* Don't lock again iff page's lruvec locked */
|
||||
static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
|
||||
static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio,
|
||||
struct lruvec *locked_lruvec, unsigned long *flags)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
if (locked_lruvec) {
|
||||
if (page_matches_lruvec(page, locked_lruvec))
|
||||
if (folio_matches_lruvec(folio, locked_lruvec))
|
||||
return locked_lruvec;
|
||||
|
||||
unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
|
||||
|
|
|
@ -271,6 +271,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
|
|||
/* Phase 1: page isolation */
|
||||
for (i = 0; i < nr; i++) {
|
||||
struct page *page = pvec->pages[i];
|
||||
struct folio *folio = page_folio(page);
|
||||
|
||||
if (TestClearPageMlocked(page)) {
|
||||
/*
|
||||
|
@ -278,7 +279,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
|
|||
* so we can spare the get_page() here.
|
||||
*/
|
||||
if (TestClearPageLRU(page)) {
|
||||
lruvec = relock_page_lruvec_irq(page, lruvec);
|
||||
lruvec = folio_lruvec_relock_irq(folio, lruvec);
|
||||
del_page_from_lru_list(page, lruvec);
|
||||
continue;
|
||||
} else
|
||||
|
|
13
mm/swap.c
13
mm/swap.c
|
@ -189,12 +189,13 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
|
|||
|
||||
for (i = 0; i < pagevec_count(pvec); i++) {
|
||||
struct page *page = pvec->pages[i];
|
||||
struct folio *folio = page_folio(page);
|
||||
|
||||
/* block memcg migration during page moving between lru */
|
||||
if (!TestClearPageLRU(page))
|
||||
continue;
|
||||
|
||||
lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
|
||||
lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
|
||||
(*move_fn)(page, lruvec);
|
||||
|
||||
SetPageLRU(page);
|
||||
|
@ -893,11 +894,12 @@ void release_pages(struct page **pages, int nr)
|
|||
int i;
|
||||
LIST_HEAD(pages_to_free);
|
||||
struct lruvec *lruvec = NULL;
|
||||
unsigned long flags;
|
||||
unsigned long flags = 0;
|
||||
unsigned int lock_batch;
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
struct page *page = pages[i];
|
||||
struct folio *folio = page_folio(page);
|
||||
|
||||
/*
|
||||
* Make sure the IRQ-safe lock-holding time does not get
|
||||
|
@ -909,7 +911,7 @@ void release_pages(struct page **pages, int nr)
|
|||
lruvec = NULL;
|
||||
}
|
||||
|
||||
page = compound_head(page);
|
||||
page = &folio->page;
|
||||
if (is_huge_zero_page(page))
|
||||
continue;
|
||||
|
||||
|
@ -948,7 +950,7 @@ void release_pages(struct page **pages, int nr)
|
|||
if (PageLRU(page)) {
|
||||
struct lruvec *prev_lruvec = lruvec;
|
||||
|
||||
lruvec = relock_page_lruvec_irqsave(page, lruvec,
|
||||
lruvec = folio_lruvec_relock_irqsave(folio, lruvec,
|
||||
&flags);
|
||||
if (prev_lruvec != lruvec)
|
||||
lock_batch = 0;
|
||||
|
@ -1052,8 +1054,9 @@ void __pagevec_lru_add(struct pagevec *pvec)
|
|||
|
||||
for (i = 0; i < pagevec_count(pvec); i++) {
|
||||
struct page *page = pvec->pages[i];
|
||||
struct folio *folio = page_folio(page);
|
||||
|
||||
lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
|
||||
lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
|
||||
__pagevec_lru_add_fn(page, lruvec);
|
||||
}
|
||||
if (lruvec)
|
||||
|
|
|
@ -2200,7 +2200,7 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec,
|
|||
* All pages were isolated from the same lruvec (and isolation
|
||||
* inhibits memcg migration).
|
||||
*/
|
||||
VM_BUG_ON_PAGE(!page_matches_lruvec(page, lruvec), page);
|
||||
VM_BUG_ON_PAGE(!folio_matches_lruvec(page_folio(page), lruvec), page);
|
||||
add_page_to_lru_list(page, lruvec);
|
||||
nr_pages = thp_nr_pages(page);
|
||||
nr_moved += nr_pages;
|
||||
|
@ -4666,6 +4666,7 @@ void check_move_unevictable_pages(struct pagevec *pvec)
|
|||
|
||||
for (i = 0; i < pvec->nr; i++) {
|
||||
struct page *page = pvec->pages[i];
|
||||
struct folio *folio = page_folio(page);
|
||||
int nr_pages;
|
||||
|
||||
if (PageTransTail(page))
|
||||
|
@ -4678,7 +4679,7 @@ void check_move_unevictable_pages(struct pagevec *pvec)
|
|||
if (!TestClearPageLRU(page))
|
||||
continue;
|
||||
|
||||
lruvec = relock_page_lruvec_irq(page, lruvec);
|
||||
lruvec = folio_lruvec_relock_irq(folio, lruvec);
|
||||
if (page_evictable(page) && PageUnevictable(page)) {
|
||||
del_page_from_lru_list(page, lruvec);
|
||||
ClearPageUnevictable(page);
|
||||
|
|
Loading…
Reference in New Issue