asm-generic/tlb, arch: Invert CONFIG_HAVE_RCU_TABLE_INVALIDATE
Make issuing a TLB invalidate for page-table pages the normal case. The reason is twofold: - too many invalidates is safer than too few, - most architectures use the linux page-tables natively and would thus require this. Make it an opt-out, instead of an opt-in. No change in behavior intended. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Will Deacon <will.deacon@arm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
8b6dd0c478
commit
96bc9567cb
|
@ -383,7 +383,7 @@ config HAVE_ARCH_JUMP_LABEL_RELATIVE
|
|||
config HAVE_RCU_TABLE_FREE
|
||||
bool
|
||||
|
||||
config HAVE_RCU_TABLE_INVALIDATE
|
||||
config HAVE_RCU_TABLE_NO_INVALIDATE
|
||||
bool
|
||||
|
||||
config HAVE_MMU_GATHER_PAGE_SIZE
|
||||
|
|
|
@ -149,7 +149,6 @@ config ARM64
|
|||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_RCU_TABLE_FREE
|
||||
select HAVE_RCU_TABLE_INVALIDATE
|
||||
select HAVE_RSEQ
|
||||
select HAVE_STACKPROTECTOR
|
||||
select HAVE_SYSCALL_TRACEPOINTS
|
||||
|
|
|
@ -218,6 +218,7 @@ config PPC
|
|||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select HAVE_RCU_TABLE_FREE if SMP
|
||||
select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE
|
||||
select HAVE_MMU_GATHER_PAGE_SIZE
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
|
||||
|
|
|
@ -63,6 +63,7 @@ config SPARC64
|
|||
select HAVE_KRETPROBES
|
||||
select HAVE_KPROBES
|
||||
select HAVE_RCU_TABLE_FREE if SMP
|
||||
select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE
|
||||
select HAVE_MEMBLOCK_NODE_MAP
|
||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
|
||||
select HAVE_DYNAMIC_FTRACE
|
||||
|
|
|
@ -183,7 +183,6 @@ config X86
|
|||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select HAVE_RCU_TABLE_FREE if PARAVIRT
|
||||
select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
|
||||
select HAVE_FUNCTION_ARG_ACCESS_API
|
||||
|
|
|
@ -135,11 +135,12 @@
|
|||
* When used, an architecture is expected to provide __tlb_remove_table()
|
||||
* which does the actual freeing of these pages.
|
||||
*
|
||||
* HAVE_RCU_TABLE_INVALIDATE
|
||||
* HAVE_RCU_TABLE_NO_INVALIDATE
|
||||
*
|
||||
* This makes HAVE_RCU_TABLE_FREE call tlb_flush_mmu_tlbonly() before freeing
|
||||
* the page-table pages. Required if you use HAVE_RCU_TABLE_FREE and your
|
||||
* architecture uses the Linux page-tables natively.
|
||||
* This makes HAVE_RCU_TABLE_FREE avoid calling tlb_flush_mmu_tlbonly() before
|
||||
* freeing the page-table pages. This can be avoided if you use
|
||||
* HAVE_RCU_TABLE_FREE and your architecture does _NOT_ use the Linux
|
||||
* page-tables natively.
|
||||
*
|
||||
* MMU_GATHER_NO_RANGE
|
||||
*
|
||||
|
|
|
@ -157,7 +157,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
|
|||
*/
|
||||
static inline void tlb_table_invalidate(struct mmu_gather *tlb)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE
|
||||
#ifndef CONFIG_HAVE_RCU_TABLE_NO_INVALIDATE
|
||||
/*
|
||||
* Invalidate page-table caches used by hardware walkers. Then we still
|
||||
* need to RCU-sched wait while freeing the pages because software
|
||||
|
|
Loading…
Reference in New Issue