x86/mm: split vmalloc_sync_all()
Commit3f8fd02b1b
("mm/vmalloc: Sync unmappings in __purge_vmap_area_lazy()") introduced a call to vmalloc_sync_all() in the vunmap() code-path. While this change was necessary to maintain correctness on x86-32-pae kernels, it also adds additional cycles for architectures that don't need it. Specifically on x86-64 with CONFIG_VMAP_STACK=y some people reported severe performance regressions in micro-benchmarks because it now also calls the x86-64 implementation of vmalloc_sync_all() on vunmap(). But the vmalloc_sync_all() implementation on x86-64 is only needed for newly created mappings. To avoid the unnecessary work on x86-64 and to gain the performance back, split up vmalloc_sync_all() into two functions: * vmalloc_sync_mappings(), and * vmalloc_sync_unmappings() Most call-sites to vmalloc_sync_all() only care about new mappings being synchronized. The only exception is the new call-site added in the above mentioned commit. Shile Zhang directed us to a report of an 80% regression in reaim throughput. Fixes:3f8fd02b1b
("mm/vmalloc: Sync unmappings in __purge_vmap_area_lazy()") Reported-by: kernel test robot <oliver.sang@intel.com> Reported-by: Shile Zhang <shile.zhang@linux.alibaba.com> Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Tested-by: Borislav Petkov <bp@suse.de> Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> [GHES] Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: <stable@vger.kernel.org> Link: http://lkml.kernel.org/r/20191009124418.8286-1-joro@8bytes.org Link: https://lists.01.org/hyperkitty/list/lkp@lists.01.org/thread/4D3JPPHBNOSPFK2KEPC6KGKS6J25AIDB/ Link: http://lkml.kernel.org/r/20191113095530.228959-1-shile.zhang@linux.alibaba.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
0715e6c516
commit
763802b53a
|
@ -190,7 +190,7 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
|
||||||
return pmd_k;
|
return pmd_k;
|
||||||
}
|
}
|
||||||
|
|
||||||
void vmalloc_sync_all(void)
|
static void vmalloc_sync(void)
|
||||||
{
|
{
|
||||||
unsigned long address;
|
unsigned long address;
|
||||||
|
|
||||||
|
@ -217,6 +217,16 @@ void vmalloc_sync_all(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void vmalloc_sync_mappings(void)
|
||||||
|
{
|
||||||
|
vmalloc_sync();
|
||||||
|
}
|
||||||
|
|
||||||
|
void vmalloc_sync_unmappings(void)
|
||||||
|
{
|
||||||
|
vmalloc_sync();
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 32-bit:
|
* 32-bit:
|
||||||
*
|
*
|
||||||
|
@ -319,11 +329,23 @@ out:
|
||||||
|
|
||||||
#else /* CONFIG_X86_64: */
|
#else /* CONFIG_X86_64: */
|
||||||
|
|
||||||
void vmalloc_sync_all(void)
|
void vmalloc_sync_mappings(void)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* 64-bit mappings might allocate new p4d/pud pages
|
||||||
|
* that need to be propagated to all tasks' PGDs.
|
||||||
|
*/
|
||||||
sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
|
sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void vmalloc_sync_unmappings(void)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Unmappings never allocate or free p4d/pud pages.
|
||||||
|
* No work is required here.
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 64-bit:
|
* 64-bit:
|
||||||
*
|
*
|
||||||
|
|
|
@ -171,7 +171,7 @@ int ghes_estatus_pool_init(int num_ghes)
|
||||||
* New allocation must be visible in all pgd before it can be found by
|
* New allocation must be visible in all pgd before it can be found by
|
||||||
* an NMI allocating from the pool.
|
* an NMI allocating from the pool.
|
||||||
*/
|
*/
|
||||||
vmalloc_sync_all();
|
vmalloc_sync_mappings();
|
||||||
|
|
||||||
rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
|
rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
|
||||||
if (rc)
|
if (rc)
|
||||||
|
|
|
@ -141,7 +141,8 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
|
||||||
|
|
||||||
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
|
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
|
||||||
unsigned long pgoff);
|
unsigned long pgoff);
|
||||||
void vmalloc_sync_all(void);
|
void vmalloc_sync_mappings(void);
|
||||||
|
void vmalloc_sync_unmappings(void);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Lowlevel-APIs (not for driver use!)
|
* Lowlevel-APIs (not for driver use!)
|
||||||
|
|
|
@ -519,7 +519,7 @@ NOKPROBE_SYMBOL(notify_die);
|
||||||
|
|
||||||
int register_die_notifier(struct notifier_block *nb)
|
int register_die_notifier(struct notifier_block *nb)
|
||||||
{
|
{
|
||||||
vmalloc_sync_all();
|
vmalloc_sync_mappings();
|
||||||
return atomic_notifier_chain_register(&die_chain, nb);
|
return atomic_notifier_chain_register(&die_chain, nb);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(register_die_notifier);
|
EXPORT_SYMBOL_GPL(register_die_notifier);
|
||||||
|
|
10
mm/nommu.c
10
mm/nommu.c
|
@ -370,10 +370,14 @@ void vm_unmap_aliases(void)
|
||||||
EXPORT_SYMBOL_GPL(vm_unmap_aliases);
|
EXPORT_SYMBOL_GPL(vm_unmap_aliases);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Implement a stub for vmalloc_sync_all() if the architecture chose not to
|
* Implement a stub for vmalloc_sync_[un]mapping() if the architecture
|
||||||
* have one.
|
* chose not to have one.
|
||||||
*/
|
*/
|
||||||
void __weak vmalloc_sync_all(void)
|
void __weak vmalloc_sync_mappings(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void __weak vmalloc_sync_unmappings(void)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
11
mm/vmalloc.c
11
mm/vmalloc.c
|
@ -1295,7 +1295,7 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
|
||||||
* First make sure the mappings are removed from all page-tables
|
* First make sure the mappings are removed from all page-tables
|
||||||
* before they are freed.
|
* before they are freed.
|
||||||
*/
|
*/
|
||||||
vmalloc_sync_all();
|
vmalloc_sync_unmappings();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TODO: to calculate a flush range without looping.
|
* TODO: to calculate a flush range without looping.
|
||||||
|
@ -3128,16 +3128,19 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
|
||||||
EXPORT_SYMBOL(remap_vmalloc_range);
|
EXPORT_SYMBOL(remap_vmalloc_range);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Implement a stub for vmalloc_sync_all() if the architecture chose not to
|
* Implement stubs for vmalloc_sync_[un]mappings () if the architecture chose
|
||||||
* have one.
|
* not to have one.
|
||||||
*
|
*
|
||||||
* The purpose of this function is to make sure the vmalloc area
|
* The purpose of this function is to make sure the vmalloc area
|
||||||
* mappings are identical in all page-tables in the system.
|
* mappings are identical in all page-tables in the system.
|
||||||
*/
|
*/
|
||||||
void __weak vmalloc_sync_all(void)
|
void __weak vmalloc_sync_mappings(void)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void __weak vmalloc_sync_unmappings(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
static int f(pte_t *pte, unsigned long addr, void *data)
|
static int f(pte_t *pte, unsigned long addr, void *data)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in New Issue