mm: multi-gen LRU: support page table walks
To further exploit spatial locality, the aging prefers to walk page tables to search for young PTEs and promote hot pages. A kill switch will be added in the next patch to disable this behavior. When disabled, the aging relies on the rmap only. NB: this behavior has nothing similar with the page table scanning in the 2.4 kernel [1], which searches page tables for old PTEs, adds cold pages to swapcache and unmaps them. To avoid confusion, the term "iteration" specifically means the traversal of an entire mm_struct list; the term "walk" will be applied to page tables and the rmap, as usual. An mm_struct list is maintained for each memcg, and an mm_struct follows its owner task to the new memcg when this task is migrated. Given an lruvec, the aging iterates lruvec_memcg()->mm_list and calls walk_page_range() with each mm_struct on this list to promote hot pages before it increments max_seq. When multiple page table walkers iterate the same list, each of them gets a unique mm_struct; therefore they can run concurrently. Page table walkers ignore any misplaced pages, e.g., if an mm_struct was migrated, pages it left in the previous memcg will not be promoted when its current memcg is under reclaim. Similarly, page table walkers will not promote pages from nodes other than the one under reclaim. This patch uses the following optimizations when walking page tables: 1. It tracks the usage of mm_struct's between context switches so that page table walkers can skip processes that have been sleeping since the last iteration. 2. It uses generational Bloom filters to record populated branches so that page table walkers can reduce their search space based on the query results, e.g., to skip page tables containing mostly holes or misplaced pages. 3. It takes advantage of the accessed bit in non-leaf PMD entries when CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG=y. 4. It does not zigzag between a PGD table and the same PMD table spanning multiple VMAs. IOW, it finishes all the VMAs within the range of the same PMD table before it returns to a PGD table. This improves the cache performance for workloads that have large numbers of tiny VMAs [2], especially when CONFIG_PGTABLE_LEVELS=5. Server benchmark results: Single workload: fio (buffered I/O): no change Single workload: memcached (anon): +[8, 10]% Ops/sec KB/sec patch1-7: 1147696.57 44640.29 patch1-8: 1245274.91 48435.66 Configurations: no change Client benchmark results: kswapd profiles: patch1-7 48.16% lzo1x_1_do_compress (real work) 8.20% page_vma_mapped_walk (overhead) 7.06% _raw_spin_unlock_irq 2.92% ptep_clear_flush 2.53% __zram_bvec_write 2.11% do_raw_spin_lock 2.02% memmove 1.93% lru_gen_look_around 1.56% free_unref_page_list 1.40% memset patch1-8 49.44% lzo1x_1_do_compress (real work) 6.19% page_vma_mapped_walk (overhead) 5.97% _raw_spin_unlock_irq 3.13% get_pfn_folio 2.85% ptep_clear_flush 2.42% __zram_bvec_write 2.08% do_raw_spin_lock 1.92% memmove 1.44% alloc_zspage 1.36% memset Configurations: no change Thanks to the following developers for their efforts [3]. kernel test robot <lkp@intel.com> [1] https://lwn.net/Articles/23732/ [2] https://llvm.org/docs/ScudoHardenedAllocator.html [3] https://lore.kernel.org/r/202204160827.ekEARWQo-lkp@intel.com/ Link: https://lkml.kernel.org/r/20220918080010.2920238-9-yuzhao@google.com Signed-off-by: Yu Zhao <yuzhao@google.com> Acked-by: Brian Geffon <bgeffon@google.com> Acked-by: Jan Alexander Steffens (heftig) <heftig@archlinux.org> Acked-by: Oleksandr Natalenko <oleksandr@natalenko.name> Acked-by: Steven Barrett <steven@liquorix.net> Acked-by: Suleiman Souhlal <suleiman@google.com> Tested-by: Daniel Byrne <djbyrne@mtu.edu> Tested-by: Donald Carr <d@chaos-reins.com> Tested-by: Holger Hoffstätte <holger@applied-asynchrony.com> Tested-by: Konstantin Kharlamov <Hi-Angel@yandex.ru> Tested-by: Shuang Zhai <szhai2@cs.rochester.edu> Tested-by: Sofia Trinh <sofia.trinh@edi.works> Tested-by: Vaibhav Jain <vaibhav@linux.ibm.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Barry Song <baohua@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Hillf Danton <hdanton@sina.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Michael Larabel <Michael@MichaelLarabel.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Mike Rapoport <rppt@kernel.org> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Qi Zheng <zhengqi.arch@bytedance.com> Cc: Tejun Heo <tj@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
018ee47f14
commit
bd74fdaea1
|
@ -1014,6 +1014,7 @@ static int exec_mmap(struct mm_struct *mm)
|
|||
active_mm = tsk->active_mm;
|
||||
tsk->active_mm = mm;
|
||||
tsk->mm = mm;
|
||||
lru_gen_add_mm(mm);
|
||||
/*
|
||||
* This prevents preemption while active_mm is being loaded and
|
||||
* it and mm are being updated, which could cause problems for
|
||||
|
@ -1029,6 +1030,7 @@ static int exec_mmap(struct mm_struct *mm)
|
|||
tsk->mm->vmacache_seqnum = 0;
|
||||
vmacache_flush(tsk);
|
||||
task_unlock(tsk);
|
||||
lru_gen_use_mm(mm);
|
||||
|
||||
if (vfork)
|
||||
timens_on_fork(tsk->nsproxy, tsk);
|
||||
|
|
|
@ -350,6 +350,11 @@ struct mem_cgroup {
|
|||
struct deferred_split deferred_split_queue;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_LRU_GEN
|
||||
/* per-memcg mm_struct list */
|
||||
struct lru_gen_mm_list mm_list;
|
||||
#endif
|
||||
|
||||
struct mem_cgroup_per_node *nodeinfo[];
|
||||
};
|
||||
|
||||
|
|
|
@ -672,6 +672,22 @@ struct mm_struct {
|
|||
*/
|
||||
unsigned long ksm_merging_pages;
|
||||
#endif
|
||||
#ifdef CONFIG_LRU_GEN
|
||||
struct {
|
||||
/* this mm_struct is on lru_gen_mm_list */
|
||||
struct list_head list;
|
||||
/*
|
||||
* Set when switching to this mm_struct, as a hint of
|
||||
* whether it has been used since the last time per-node
|
||||
* page table walkers cleared the corresponding bits.
|
||||
*/
|
||||
unsigned long bitmap;
|
||||
#ifdef CONFIG_MEMCG
|
||||
/* points to the memcg of "owner" above */
|
||||
struct mem_cgroup *memcg;
|
||||
#endif
|
||||
} lru_gen;
|
||||
#endif /* CONFIG_LRU_GEN */
|
||||
} __randomize_layout;
|
||||
|
||||
/*
|
||||
|
@ -698,6 +714,66 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
|
|||
return (struct cpumask *)&mm->cpu_bitmap;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_LRU_GEN
|
||||
|
||||
struct lru_gen_mm_list {
|
||||
/* mm_struct list for page table walkers */
|
||||
struct list_head fifo;
|
||||
/* protects the list above */
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
void lru_gen_add_mm(struct mm_struct *mm);
|
||||
void lru_gen_del_mm(struct mm_struct *mm);
|
||||
#ifdef CONFIG_MEMCG
|
||||
void lru_gen_migrate_mm(struct mm_struct *mm);
|
||||
#endif
|
||||
|
||||
static inline void lru_gen_init_mm(struct mm_struct *mm)
|
||||
{
|
||||
INIT_LIST_HEAD(&mm->lru_gen.list);
|
||||
mm->lru_gen.bitmap = 0;
|
||||
#ifdef CONFIG_MEMCG
|
||||
mm->lru_gen.memcg = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void lru_gen_use_mm(struct mm_struct *mm)
|
||||
{
|
||||
/*
|
||||
* When the bitmap is set, page reclaim knows this mm_struct has been
|
||||
* used since the last time it cleared the bitmap. So it might be worth
|
||||
* walking the page tables of this mm_struct to clear the accessed bit.
|
||||
*/
|
||||
WRITE_ONCE(mm->lru_gen.bitmap, -1);
|
||||
}
|
||||
|
||||
#else /* !CONFIG_LRU_GEN */
|
||||
|
||||
static inline void lru_gen_add_mm(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void lru_gen_del_mm(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMCG
|
||||
static inline void lru_gen_migrate_mm(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void lru_gen_init_mm(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void lru_gen_use_mm(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_LRU_GEN */
|
||||
|
||||
struct mmu_gather;
|
||||
extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
|
||||
extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
|
||||
|
|
|
@ -408,7 +408,7 @@ enum {
|
|||
* min_seq behind.
|
||||
*
|
||||
* The number of pages in each generation is eventually consistent and therefore
|
||||
* can be transiently negative.
|
||||
* can be transiently negative when reset_batch_size() is pending.
|
||||
*/
|
||||
struct lru_gen_struct {
|
||||
/* the aging increments the youngest generation number */
|
||||
|
@ -430,6 +430,53 @@ struct lru_gen_struct {
|
|||
atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
|
||||
};
|
||||
|
||||
enum {
|
||||
MM_LEAF_TOTAL, /* total leaf entries */
|
||||
MM_LEAF_OLD, /* old leaf entries */
|
||||
MM_LEAF_YOUNG, /* young leaf entries */
|
||||
MM_NONLEAF_TOTAL, /* total non-leaf entries */
|
||||
MM_NONLEAF_FOUND, /* non-leaf entries found in Bloom filters */
|
||||
MM_NONLEAF_ADDED, /* non-leaf entries added to Bloom filters */
|
||||
NR_MM_STATS
|
||||
};
|
||||
|
||||
/* double-buffering Bloom filters */
|
||||
#define NR_BLOOM_FILTERS 2
|
||||
|
||||
struct lru_gen_mm_state {
|
||||
/* set to max_seq after each iteration */
|
||||
unsigned long seq;
|
||||
/* where the current iteration continues (inclusive) */
|
||||
struct list_head *head;
|
||||
/* where the last iteration ended (exclusive) */
|
||||
struct list_head *tail;
|
||||
/* to wait for the last page table walker to finish */
|
||||
struct wait_queue_head wait;
|
||||
/* Bloom filters flip after each iteration */
|
||||
unsigned long *filters[NR_BLOOM_FILTERS];
|
||||
/* the mm stats for debugging */
|
||||
unsigned long stats[NR_HIST_GENS][NR_MM_STATS];
|
||||
/* the number of concurrent page table walkers */
|
||||
int nr_walkers;
|
||||
};
|
||||
|
||||
struct lru_gen_mm_walk {
|
||||
/* the lruvec under reclaim */
|
||||
struct lruvec *lruvec;
|
||||
/* unstable max_seq from lru_gen_struct */
|
||||
unsigned long max_seq;
|
||||
/* the next address within an mm to scan */
|
||||
unsigned long next_addr;
|
||||
/* to batch promoted pages */
|
||||
int nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
|
||||
/* to batch the mm stats */
|
||||
int mm_stats[NR_MM_STATS];
|
||||
/* total batched items */
|
||||
int batched;
|
||||
bool can_swap;
|
||||
bool force_scan;
|
||||
};
|
||||
|
||||
void lru_gen_init_lruvec(struct lruvec *lruvec);
|
||||
void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
|
||||
|
||||
|
@ -480,6 +527,8 @@ struct lruvec {
|
|||
#ifdef CONFIG_LRU_GEN
|
||||
/* evictable pages divided into generations */
|
||||
struct lru_gen_struct lrugen;
|
||||
/* to concurrently iterate lru_gen_mm_list */
|
||||
struct lru_gen_mm_state mm_state;
|
||||
#endif
|
||||
#ifdef CONFIG_MEMCG
|
||||
struct pglist_data *pgdat;
|
||||
|
@ -1176,6 +1225,11 @@ typedef struct pglist_data {
|
|||
|
||||
unsigned long flags;
|
||||
|
||||
#ifdef CONFIG_LRU_GEN
|
||||
/* kswap mm walk data */
|
||||
struct lru_gen_mm_walk mm_walk;
|
||||
#endif
|
||||
|
||||
ZONE_PADDING(_pad2_)
|
||||
|
||||
/* Per-node vmstats */
|
||||
|
|
|
@ -162,6 +162,10 @@ union swap_header {
|
|||
*/
|
||||
struct reclaim_state {
|
||||
unsigned long reclaimed_slab;
|
||||
#ifdef CONFIG_LRU_GEN
|
||||
/* per-thread mm walk data */
|
||||
struct lru_gen_mm_walk *mm_walk;
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
|
|
@ -466,6 +466,7 @@ assign_new_owner:
|
|||
goto retry;
|
||||
}
|
||||
WRITE_ONCE(mm->owner, c);
|
||||
lru_gen_migrate_mm(mm);
|
||||
task_unlock(c);
|
||||
put_task_struct(c);
|
||||
}
|
||||
|
|
|
@ -1152,6 +1152,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
|
|||
goto fail_nocontext;
|
||||
|
||||
mm->user_ns = get_user_ns(user_ns);
|
||||
lru_gen_init_mm(mm);
|
||||
return mm;
|
||||
|
||||
fail_nocontext:
|
||||
|
@ -1194,6 +1195,7 @@ static inline void __mmput(struct mm_struct *mm)
|
|||
}
|
||||
if (mm->binfmt)
|
||||
module_put(mm->binfmt->module);
|
||||
lru_gen_del_mm(mm);
|
||||
mmdrop(mm);
|
||||
}
|
||||
|
||||
|
@ -2694,6 +2696,13 @@ pid_t kernel_clone(struct kernel_clone_args *args)
|
|||
get_task_struct(p);
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_LRU_GEN) && !(clone_flags & CLONE_VM)) {
|
||||
/* lock the task to synchronize with memcg migration */
|
||||
task_lock(p);
|
||||
lru_gen_add_mm(p->mm);
|
||||
task_unlock(p);
|
||||
}
|
||||
|
||||
wake_up_new_task(p);
|
||||
|
||||
/* forking complete and child started to run, tell ptracer */
|
||||
|
|
|
@ -5180,6 +5180,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
|
|||
* finish_task_switch()'s mmdrop().
|
||||
*/
|
||||
switch_mm_irqs_off(prev->active_mm, next->mm, next);
|
||||
lru_gen_use_mm(next->mm);
|
||||
|
||||
if (!prev->mm) { // from kernel
|
||||
/* will mmdrop() in finish_task_switch(). */
|
||||
|
|
|
@ -6204,6 +6204,30 @@ static void mem_cgroup_move_task(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_LRU_GEN
|
||||
static void mem_cgroup_attach(struct cgroup_taskset *tset)
|
||||
{
|
||||
struct task_struct *task;
|
||||
struct cgroup_subsys_state *css;
|
||||
|
||||
/* find the first leader if there is any */
|
||||
cgroup_taskset_for_each_leader(task, css, tset)
|
||||
break;
|
||||
|
||||
if (!task)
|
||||
return;
|
||||
|
||||
task_lock(task);
|
||||
if (task->mm && READ_ONCE(task->mm->owner) == task)
|
||||
lru_gen_migrate_mm(task->mm);
|
||||
task_unlock(task);
|
||||
}
|
||||
#else
|
||||
static void mem_cgroup_attach(struct cgroup_taskset *tset)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_LRU_GEN */
|
||||
|
||||
static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
|
||||
{
|
||||
if (value == PAGE_COUNTER_MAX)
|
||||
|
@ -6609,6 +6633,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
|
|||
.css_reset = mem_cgroup_css_reset,
|
||||
.css_rstat_flush = mem_cgroup_css_rstat_flush,
|
||||
.can_attach = mem_cgroup_can_attach,
|
||||
.attach = mem_cgroup_attach,
|
||||
.cancel_attach = mem_cgroup_cancel_attach,
|
||||
.post_attach = mem_cgroup_move_task,
|
||||
.dfl_cftypes = memory_files,
|
||||
|
|
1010
mm/vmscan.c
1010
mm/vmscan.c
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue