mm/workingset: Convert workingset_refault() to take a folio
This nets us 178 bytes of savings from removing calls to compound_head. The three callers all grow a little, but each of them will be converted to use folios soon, so that's fine. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: David Howells <dhowells@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
parent
9bf70167e3
commit
0995d7e568
|
@ -329,7 +329,7 @@ static inline swp_entry_t folio_swap_entry(struct folio *folio)
|
|||
/* linux/mm/workingset.c */
|
||||
void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
|
||||
void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg);
|
||||
void workingset_refault(struct page *page, void *shadow);
|
||||
void workingset_refault(struct folio *folio, void *shadow);
|
||||
void workingset_activation(struct folio *folio);
|
||||
|
||||
/* Only track the nodes of mappings with shadow entries */
|
||||
|
@ -350,7 +350,7 @@ extern unsigned long nr_free_buffer_pages(void);
|
|||
/* linux/mm/swap.c */
|
||||
extern void lru_note_cost(struct lruvec *lruvec, bool file,
|
||||
unsigned int nr_pages);
|
||||
extern void lru_note_cost_page(struct page *);
|
||||
extern void lru_note_cost_folio(struct folio *);
|
||||
extern void lru_cache_add(struct page *);
|
||||
void mark_page_accessed(struct page *);
|
||||
void folio_mark_accessed(struct folio *);
|
||||
|
|
|
@ -597,12 +597,6 @@ static inline void mod_lruvec_page_state(struct page *page,
|
|||
|
||||
#endif /* CONFIG_MEMCG */
|
||||
|
||||
static inline void inc_lruvec_state(struct lruvec *lruvec,
|
||||
enum node_stat_item idx)
|
||||
{
|
||||
mod_lruvec_state(lruvec, idx, 1);
|
||||
}
|
||||
|
||||
static inline void __inc_lruvec_page_state(struct page *page,
|
||||
enum node_stat_item idx)
|
||||
{
|
||||
|
|
|
@ -998,7 +998,7 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
|
|||
*/
|
||||
WARN_ON_ONCE(PageActive(page));
|
||||
if (!(gfp_mask & __GFP_WRITE) && shadow)
|
||||
workingset_refault(page, shadow);
|
||||
workingset_refault(page_folio(page), shadow);
|
||||
lru_cache_add(page);
|
||||
}
|
||||
return ret;
|
||||
|
|
|
@ -3539,7 +3539,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
|
|||
|
||||
shadow = get_shadow_from_swap_cache(entry);
|
||||
if (shadow)
|
||||
workingset_refault(page, shadow);
|
||||
workingset_refault(page_folio(page),
|
||||
shadow);
|
||||
|
||||
lru_cache_add(page);
|
||||
|
||||
|
|
|
@ -293,11 +293,10 @@ void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
|
|||
} while ((lruvec = parent_lruvec(lruvec)));
|
||||
}
|
||||
|
||||
void lru_note_cost_page(struct page *page)
|
||||
void lru_note_cost_folio(struct folio *folio)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
lru_note_cost(folio_lruvec(folio),
|
||||
page_is_file_lru(page), thp_nr_pages(page));
|
||||
lru_note_cost(folio_lruvec(folio), folio_is_file_lru(folio),
|
||||
folio_nr_pages(folio));
|
||||
}
|
||||
|
||||
static void __folio_activate(struct folio *folio, struct lruvec *lruvec)
|
||||
|
|
|
@ -498,7 +498,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
|||
mem_cgroup_swapin_uncharge_swap(entry);
|
||||
|
||||
if (shadow)
|
||||
workingset_refault(page, shadow);
|
||||
workingset_refault(page_folio(page), shadow);
|
||||
|
||||
/* Caller will initiate read into locked page */
|
||||
lru_cache_add(page);
|
||||
|
|
|
@ -273,17 +273,17 @@ void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg)
|
|||
}
|
||||
|
||||
/**
|
||||
* workingset_refault - evaluate the refault of a previously evicted page
|
||||
* @page: the freshly allocated replacement page
|
||||
* @shadow: shadow entry of the evicted page
|
||||
* workingset_refault - Evaluate the refault of a previously evicted folio.
|
||||
* @folio: The freshly allocated replacement folio.
|
||||
* @shadow: Shadow entry of the evicted folio.
|
||||
*
|
||||
* Calculates and evaluates the refault distance of the previously
|
||||
* evicted page in the context of the node and the memcg whose memory
|
||||
* evicted folio in the context of the node and the memcg whose memory
|
||||
* pressure caused the eviction.
|
||||
*/
|
||||
void workingset_refault(struct page *page, void *shadow)
|
||||
void workingset_refault(struct folio *folio, void *shadow)
|
||||
{
|
||||
bool file = page_is_file_lru(page);
|
||||
bool file = folio_is_file_lru(folio);
|
||||
struct mem_cgroup *eviction_memcg;
|
||||
struct lruvec *eviction_lruvec;
|
||||
unsigned long refault_distance;
|
||||
|
@ -295,16 +295,17 @@ void workingset_refault(struct page *page, void *shadow)
|
|||
unsigned long refault;
|
||||
bool workingset;
|
||||
int memcgid;
|
||||
long nr;
|
||||
|
||||
unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
|
||||
|
||||
rcu_read_lock();
|
||||
/*
|
||||
* Look up the memcg associated with the stored ID. It might
|
||||
* have been deleted since the page's eviction.
|
||||
* have been deleted since the folio's eviction.
|
||||
*
|
||||
* Note that in rare events the ID could have been recycled
|
||||
* for a new cgroup that refaults a shared page. This is
|
||||
* for a new cgroup that refaults a shared folio. This is
|
||||
* impossible to tell from the available data. However, this
|
||||
* should be a rare and limited disturbance, and activations
|
||||
* are always speculative anyway. Ultimately, it's the aging
|
||||
|
@ -340,17 +341,18 @@ void workingset_refault(struct page *page, void *shadow)
|
|||
refault_distance = (refault - eviction) & EVICTION_MASK;
|
||||
|
||||
/*
|
||||
* The activation decision for this page is made at the level
|
||||
* The activation decision for this folio is made at the level
|
||||
* where the eviction occurred, as that is where the LRU order
|
||||
* during page reclaim is being determined.
|
||||
* during folio reclaim is being determined.
|
||||
*
|
||||
* However, the cgroup that will own the page is the one that
|
||||
* However, the cgroup that will own the folio is the one that
|
||||
* is actually experiencing the refault event.
|
||||
*/
|
||||
memcg = page_memcg(page);
|
||||
nr = folio_nr_pages(folio);
|
||||
memcg = folio_memcg(folio);
|
||||
lruvec = mem_cgroup_lruvec(memcg, pgdat);
|
||||
|
||||
inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file);
|
||||
mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr);
|
||||
|
||||
mem_cgroup_flush_stats();
|
||||
/*
|
||||
|
@ -376,16 +378,16 @@ void workingset_refault(struct page *page, void *shadow)
|
|||
if (refault_distance > workingset_size)
|
||||
goto out;
|
||||
|
||||
SetPageActive(page);
|
||||
workingset_age_nonresident(lruvec, thp_nr_pages(page));
|
||||
inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file);
|
||||
folio_set_active(folio);
|
||||
workingset_age_nonresident(lruvec, nr);
|
||||
mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file, nr);
|
||||
|
||||
/* Page was active prior to eviction */
|
||||
/* Folio was active prior to eviction */
|
||||
if (workingset) {
|
||||
SetPageWorkingset(page);
|
||||
folio_set_workingset(folio);
|
||||
/* XXX: Move to lru_cache_add() when it supports new vs putback */
|
||||
lru_note_cost_page(page);
|
||||
inc_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file);
|
||||
lru_note_cost_folio(folio);
|
||||
mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file, nr);
|
||||
}
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
|
|
Loading…
Reference in New Issue