powerpc/64s/radix: do not flush TLB on spurious fault
In the case of a spurious fault (which can happen due to a race with another thread that changes the page table), the default Linux mm code calls flush_tlb_page for that address. This is not required because the pte will be re-fetched. Hash does not wire this up to a hardware TLB flush for this reason. This patch avoids the flush for radix. >From Power ISA v3.0B, p.1090: Setting a Reference or Change Bit or Upgrading Access Authority (PTE Subject to Atomic Hardware Updates) If the only change being made to a valid PTE that is subject to atomic hardware updates is to set the Refer- ence or Change bit to 1 or to add access authorities, a simpler sequence suffices because the translation hardware will refetch the PTE if an access is attempted for which the only problems were reference and/or change bits needing to be set or insufficient access authority. The nest MMU on POWER9 does not re-fetch the PTE after such an access attempt before faulting, so address spaces with a coprocessor attached will continue to flush in these cases. This reduces tlbies for a kernel compile workload from 0.95M to 0.90M. fork --fork --exec benchmark improved 0.5% (12300->12400). Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
e5f7cb58c2
commit
6d8278c414
|
@ -4,7 +4,7 @@
|
||||||
|
|
||||||
#define MMU_NO_CONTEXT ~0UL
|
#define MMU_NO_CONTEXT ~0UL
|
||||||
|
|
||||||
|
#include <linux/mm_types.h>
|
||||||
#include <asm/book3s/64/tlbflush-hash.h>
|
#include <asm/book3s/64/tlbflush-hash.h>
|
||||||
#include <asm/book3s/64/tlbflush-radix.h>
|
#include <asm/book3s/64/tlbflush-radix.h>
|
||||||
|
|
||||||
|
@ -137,6 +137,16 @@ static inline void flush_all_mm(struct mm_struct *mm)
|
||||||
#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
|
#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
|
||||||
#define flush_all_mm(mm) local_flush_all_mm(mm)
|
#define flush_all_mm(mm) local_flush_all_mm(mm)
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
|
#define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
|
||||||
|
static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
|
||||||
|
unsigned long address)
|
||||||
|
{
|
||||||
|
/* See ptep_set_access_flags comment */
|
||||||
|
if (atomic_read(&vma->vm_mm->context.copros) > 0)
|
||||||
|
flush_tlb_page(vma, address);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* flush the page walk cache for the address
|
* flush the page walk cache for the address
|
||||||
*/
|
*/
|
||||||
|
|
Loading…
Reference in New Issue