mm: hugetlb_vmemmap: cleanup CONFIG_HUGETLB_PAGE_FREE_VMEMMAP*
The word of "free" is not expressive enough to express the feature of optimizing vmemmap pages associated with each HugeTLB, rename this keywork to "optimize". In this patch , cheanup configs to make code more expressive. Link: https://lkml.kernel.org/r/20220404074652.68024-4-songmuchun@bytedance.com Signed-off-by: Muchun Song <songmuchun@bytedance.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
f10f1442c3
commit
47010c040d
|
@ -1660,7 +1660,7 @@
|
||||||
Format: size[KMG]
|
Format: size[KMG]
|
||||||
|
|
||||||
hugetlb_free_vmemmap=
|
hugetlb_free_vmemmap=
|
||||||
[KNL] Reguires CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
|
[KNL] Reguires CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
|
||||||
enabled.
|
enabled.
|
||||||
Allows heavy hugetlb users to free up some more
|
Allows heavy hugetlb users to free up some more
|
||||||
memory (7 * PAGE_SIZE for each 2MB hugetlb page).
|
memory (7 * PAGE_SIZE for each 2MB hugetlb page).
|
||||||
|
@ -1669,7 +1669,7 @@
|
||||||
on: enable the feature
|
on: enable the feature
|
||||||
off: disable the feature
|
off: disable the feature
|
||||||
|
|
||||||
Built with CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON=y,
|
Built with CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON=y,
|
||||||
the default is on.
|
the default is on.
|
||||||
|
|
||||||
This is not compatible with memory_hotplug.memmap_on_memory.
|
This is not compatible with memory_hotplug.memmap_on_memory.
|
||||||
|
|
|
@ -164,7 +164,7 @@ default_hugepagesz
|
||||||
will all result in 256 2M huge pages being allocated. Valid default
|
will all result in 256 2M huge pages being allocated. Valid default
|
||||||
huge page size is architecture dependent.
|
huge page size is architecture dependent.
|
||||||
hugetlb_free_vmemmap
|
hugetlb_free_vmemmap
|
||||||
When CONFIG_HUGETLB_PAGE_FREE_VMEMMAP is set, this enables freeing
|
When CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP is set, this enables optimizing
|
||||||
unused vmemmap pages associated with each HugeTLB page.
|
unused vmemmap pages associated with each HugeTLB page.
|
||||||
|
|
||||||
When multiple huge page sizes are supported, ``/proc/sys/vm/nr_hugepages``
|
When multiple huge page sizes are supported, ``/proc/sys/vm/nr_hugepages``
|
||||||
|
|
|
@ -97,7 +97,7 @@ config ARM64
|
||||||
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
|
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
|
||||||
select ARCH_WANT_FRAME_POINTERS
|
select ARCH_WANT_FRAME_POINTERS
|
||||||
select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
|
select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
|
||||||
select ARCH_WANT_HUGETLB_PAGE_FREE_VMEMMAP
|
select ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
|
||||||
select ARCH_WANT_LD_ORPHAN_WARN
|
select ARCH_WANT_LD_ORPHAN_WARN
|
||||||
select ARCH_WANTS_NO_INSTR
|
select ARCH_WANTS_NO_INSTR
|
||||||
select ARCH_HAS_UBSAN_SANITIZE_ALL
|
select ARCH_HAS_UBSAN_SANITIZE_ALL
|
||||||
|
|
|
@ -78,7 +78,7 @@ void flush_dcache_page(struct page *page)
|
||||||
/*
|
/*
|
||||||
* Only the head page's flags of HugeTLB can be cleared since the tail
|
* Only the head page's flags of HugeTLB can be cleared since the tail
|
||||||
* vmemmap pages associated with each HugeTLB page are mapped with
|
* vmemmap pages associated with each HugeTLB page are mapped with
|
||||||
* read-only when CONFIG_HUGETLB_PAGE_FREE_VMEMMAP is enabled (more
|
* read-only when CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP is enabled (more
|
||||||
* details can refer to vmemmap_remap_pte()). Although
|
* details can refer to vmemmap_remap_pte()). Although
|
||||||
* __sync_icache_dcache() only set PG_dcache_clean flag on the head
|
* __sync_icache_dcache() only set PG_dcache_clean flag on the head
|
||||||
* page struct, there is more than one page struct with PG_dcache_clean
|
* page struct, there is more than one page struct with PG_dcache_clean
|
||||||
|
|
|
@ -121,7 +121,7 @@ config X86
|
||||||
select ARCH_WANTS_NO_INSTR
|
select ARCH_WANTS_NO_INSTR
|
||||||
select ARCH_WANT_GENERAL_HUGETLB
|
select ARCH_WANT_GENERAL_HUGETLB
|
||||||
select ARCH_WANT_HUGE_PMD_SHARE
|
select ARCH_WANT_HUGE_PMD_SHARE
|
||||||
select ARCH_WANT_HUGETLB_PAGE_FREE_VMEMMAP if X86_64
|
select ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP if X86_64
|
||||||
select ARCH_WANT_LD_ORPHAN_WARN
|
select ARCH_WANT_LD_ORPHAN_WARN
|
||||||
select ARCH_WANTS_THP_SWAP if X86_64
|
select ARCH_WANTS_THP_SWAP if X86_64
|
||||||
select ARCH_HAS_PARANOID_L1D_FLUSH
|
select ARCH_HAS_PARANOID_L1D_FLUSH
|
||||||
|
|
|
@ -1269,7 +1269,7 @@ static struct kcore_list kcore_vsyscall;
|
||||||
|
|
||||||
static void __init register_page_bootmem_info(void)
|
static void __init register_page_bootmem_info(void)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_NUMA) || defined(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP)
|
#if defined(CONFIG_NUMA) || defined(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_online_node(i)
|
for_each_online_node(i)
|
||||||
|
|
16
fs/Kconfig
16
fs/Kconfig
|
@ -250,22 +250,22 @@ config HUGETLB_PAGE
|
||||||
# to enable the feature of minimizing overhead of struct page associated with
|
# to enable the feature of minimizing overhead of struct page associated with
|
||||||
# each HugeTLB page.
|
# each HugeTLB page.
|
||||||
#
|
#
|
||||||
config ARCH_WANT_HUGETLB_PAGE_FREE_VMEMMAP
|
config ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
|
||||||
bool
|
bool
|
||||||
|
|
||||||
config HUGETLB_PAGE_FREE_VMEMMAP
|
config HUGETLB_PAGE_OPTIMIZE_VMEMMAP
|
||||||
def_bool HUGETLB_PAGE
|
def_bool HUGETLB_PAGE
|
||||||
depends on ARCH_WANT_HUGETLB_PAGE_FREE_VMEMMAP
|
depends on ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
|
||||||
depends on SPARSEMEM_VMEMMAP
|
depends on SPARSEMEM_VMEMMAP
|
||||||
|
|
||||||
config HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON
|
config HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON
|
||||||
bool "Default freeing vmemmap pages of HugeTLB to on"
|
bool "Default optimizing vmemmap pages of HugeTLB to on"
|
||||||
default n
|
default n
|
||||||
depends on HUGETLB_PAGE_FREE_VMEMMAP
|
depends on HUGETLB_PAGE_OPTIMIZE_VMEMMAP
|
||||||
help
|
help
|
||||||
When using HUGETLB_PAGE_FREE_VMEMMAP, the freeing unused vmemmap
|
When using HUGETLB_PAGE_OPTIMIZE_VMEMMAP, the optimizing unused vmemmap
|
||||||
pages associated with each HugeTLB page is default off. Say Y here
|
pages associated with each HugeTLB page is default off. Say Y here
|
||||||
to enable freeing vmemmap pages of HugeTLB by default. It can then
|
to enable optimizing vmemmap pages of HugeTLB by default. It can then
|
||||||
be disabled on the command line via hugetlb_free_vmemmap=off.
|
be disabled on the command line via hugetlb_free_vmemmap=off.
|
||||||
|
|
||||||
config MEMFD_CREATE
|
config MEMFD_CREATE
|
||||||
|
|
|
@ -623,7 +623,7 @@ struct hstate {
|
||||||
unsigned int nr_huge_pages_node[MAX_NUMNODES];
|
unsigned int nr_huge_pages_node[MAX_NUMNODES];
|
||||||
unsigned int free_huge_pages_node[MAX_NUMNODES];
|
unsigned int free_huge_pages_node[MAX_NUMNODES];
|
||||||
unsigned int surplus_huge_pages_node[MAX_NUMNODES];
|
unsigned int surplus_huge_pages_node[MAX_NUMNODES];
|
||||||
#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
|
#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
|
||||||
unsigned int optimize_vmemmap_pages;
|
unsigned int optimize_vmemmap_pages;
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_CGROUP_HUGETLB
|
#ifdef CONFIG_CGROUP_HUGETLB
|
||||||
|
|
|
@ -3145,7 +3145,7 @@ static inline void print_vma_addr(char *prefix, unsigned long rip)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
|
#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
|
||||||
int vmemmap_remap_free(unsigned long start, unsigned long end,
|
int vmemmap_remap_free(unsigned long start, unsigned long end,
|
||||||
unsigned long reuse);
|
unsigned long reuse);
|
||||||
int vmemmap_remap_alloc(unsigned long start, unsigned long end,
|
int vmemmap_remap_alloc(unsigned long start, unsigned long end,
|
||||||
|
|
|
@ -190,13 +190,13 @@ enum pageflags {
|
||||||
|
|
||||||
#ifndef __GENERATING_BOUNDS_H
|
#ifndef __GENERATING_BOUNDS_H
|
||||||
|
|
||||||
#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
|
#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
|
||||||
DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON,
|
DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
|
||||||
hugetlb_optimize_vmemmap_key);
|
hugetlb_optimize_vmemmap_key);
|
||||||
|
|
||||||
static __always_inline bool hugetlb_optimize_vmemmap_enabled(void)
|
static __always_inline bool hugetlb_optimize_vmemmap_enabled(void)
|
||||||
{
|
{
|
||||||
return static_branch_maybe(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON,
|
return static_branch_maybe(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
|
||||||
&hugetlb_optimize_vmemmap_key);
|
&hugetlb_optimize_vmemmap_key);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -77,7 +77,7 @@ obj-$(CONFIG_FRONTSWAP) += frontswap.o
|
||||||
obj-$(CONFIG_ZSWAP) += zswap.o
|
obj-$(CONFIG_ZSWAP) += zswap.o
|
||||||
obj-$(CONFIG_HAS_DMA) += dmapool.o
|
obj-$(CONFIG_HAS_DMA) += dmapool.o
|
||||||
obj-$(CONFIG_HUGETLBFS) += hugetlb.o
|
obj-$(CONFIG_HUGETLBFS) += hugetlb.o
|
||||||
obj-$(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP) += hugetlb_vmemmap.o
|
obj-$(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP) += hugetlb_vmemmap.o
|
||||||
obj-$(CONFIG_NUMA) += mempolicy.o
|
obj-$(CONFIG_NUMA) += mempolicy.o
|
||||||
obj-$(CONFIG_SPARSEMEM) += sparse.o
|
obj-$(CONFIG_SPARSEMEM) += sparse.o
|
||||||
obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
|
obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
|
||||||
|
|
|
@ -188,7 +188,7 @@
|
||||||
#define RESERVE_VMEMMAP_NR 1U
|
#define RESERVE_VMEMMAP_NR 1U
|
||||||
#define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
|
#define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
|
||||||
|
|
||||||
DEFINE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON,
|
DEFINE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
|
||||||
hugetlb_optimize_vmemmap_key);
|
hugetlb_optimize_vmemmap_key);
|
||||||
EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
|
EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
|
||||||
|
|
||||||
|
@ -276,7 +276,7 @@ void __init hugetlb_vmemmap_init(struct hstate *h)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There are only (RESERVE_VMEMMAP_SIZE / sizeof(struct page)) struct
|
* There are only (RESERVE_VMEMMAP_SIZE / sizeof(struct page)) struct
|
||||||
* page structs that can be used when CONFIG_HUGETLB_PAGE_FREE_VMEMMAP,
|
* page structs that can be used when CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP,
|
||||||
* so add a BUILD_BUG_ON to catch invalid usage of the tail struct page.
|
* so add a BUILD_BUG_ON to catch invalid usage of the tail struct page.
|
||||||
*/
|
*/
|
||||||
BUILD_BUG_ON(__NR_USED_SUBPAGE >=
|
BUILD_BUG_ON(__NR_USED_SUBPAGE >=
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
#define _LINUX_HUGETLB_VMEMMAP_H
|
#define _LINUX_HUGETLB_VMEMMAP_H
|
||||||
#include <linux/hugetlb.h>
|
#include <linux/hugetlb.h>
|
||||||
|
|
||||||
#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
|
#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
|
||||||
int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head);
|
int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head);
|
||||||
void hugetlb_vmemmap_free(struct hstate *h, struct page *head);
|
void hugetlb_vmemmap_free(struct hstate *h, struct page *head);
|
||||||
void hugetlb_vmemmap_init(struct hstate *h);
|
void hugetlb_vmemmap_init(struct hstate *h);
|
||||||
|
@ -41,5 +41,5 @@ static inline unsigned int hugetlb_optimize_vmemmap_pages(struct hstate *h)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_HUGETLB_PAGE_FREE_VMEMMAP */
|
#endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */
|
||||||
#endif /* _LINUX_HUGETLB_VMEMMAP_H */
|
#endif /* _LINUX_HUGETLB_VMEMMAP_H */
|
||||||
|
|
|
@ -34,7 +34,7 @@
|
||||||
#include <asm/pgalloc.h>
|
#include <asm/pgalloc.h>
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
|
|
||||||
#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
|
#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
|
||||||
/**
|
/**
|
||||||
* struct vmemmap_remap_walk - walk vmemmap page table
|
* struct vmemmap_remap_walk - walk vmemmap page table
|
||||||
*
|
*
|
||||||
|
@ -420,7 +420,7 @@ int vmemmap_remap_alloc(unsigned long start, unsigned long end,
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_HUGETLB_PAGE_FREE_VMEMMAP */
|
#endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate a block of memory to be used to back the virtual memory map
|
* Allocate a block of memory to be used to back the virtual memory map
|
||||||
|
|
Loading…
Reference in New Issue