x86/mm: Page size aware flush_tlb_mm_range()
Use the new tlb_get_unmap_shift() to determine the stride of the INVLPG loop. Cc: Nick Piggin <npiggin@gmail.com> Cc: Will Deacon <will.deacon@arm.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Dave Hansen <dave.hansen@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
This commit is contained in:
parent
a5b966ae42
commit
a31acd3ee8
|
@ -6,16 +6,23 @@
|
||||||
#define tlb_end_vma(tlb, vma) do { } while (0)
|
#define tlb_end_vma(tlb, vma) do { } while (0)
|
||||||
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
|
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
|
||||||
|
|
||||||
#define tlb_flush(tlb) \
|
static inline void tlb_flush(struct mmu_gather *tlb);
|
||||||
{ \
|
|
||||||
if (!tlb->fullmm && !tlb->need_flush_all) \
|
|
||||||
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
|
|
||||||
else \
|
|
||||||
flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
|
|
||||||
}
|
|
||||||
|
|
||||||
#include <asm-generic/tlb.h>
|
#include <asm-generic/tlb.h>
|
||||||
|
|
||||||
|
static inline void tlb_flush(struct mmu_gather *tlb)
|
||||||
|
{
|
||||||
|
unsigned long start = 0UL, end = TLB_FLUSH_ALL;
|
||||||
|
unsigned int stride_shift = tlb_get_unmap_shift(tlb);
|
||||||
|
|
||||||
|
if (!tlb->fullmm && !tlb->need_flush_all) {
|
||||||
|
start = tlb->start;
|
||||||
|
end = tlb->end;
|
||||||
|
}
|
||||||
|
|
||||||
|
flush_tlb_mm_range(tlb->mm, start, end, stride_shift);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* While x86 architecture in general requires an IPI to perform TLB
|
* While x86 architecture in general requires an IPI to perform TLB
|
||||||
* shootdown, enablement code for several hypervisors overrides
|
* shootdown, enablement code for several hypervisors overrides
|
||||||
|
|
|
@ -547,23 +547,27 @@ struct flush_tlb_info {
|
||||||
unsigned long start;
|
unsigned long start;
|
||||||
unsigned long end;
|
unsigned long end;
|
||||||
u64 new_tlb_gen;
|
u64 new_tlb_gen;
|
||||||
|
unsigned int stride_shift;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define local_flush_tlb() __flush_tlb()
|
#define local_flush_tlb() __flush_tlb()
|
||||||
|
|
||||||
#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
|
#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
|
||||||
|
|
||||||
#define flush_tlb_range(vma, start, end) \
|
#define flush_tlb_range(vma, start, end) \
|
||||||
flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
|
flush_tlb_mm_range((vma)->vm_mm, start, end, \
|
||||||
|
((vma)->vm_flags & VM_HUGETLB) \
|
||||||
|
? huge_page_shift(hstate_vma(vma)) \
|
||||||
|
: PAGE_SHIFT)
|
||||||
|
|
||||||
extern void flush_tlb_all(void);
|
extern void flush_tlb_all(void);
|
||||||
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
||||||
unsigned long end, unsigned long vmflag);
|
unsigned long end, unsigned int stride_shift);
|
||||||
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
||||||
|
|
||||||
static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
|
static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
|
||||||
{
|
{
|
||||||
flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
|
flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
void native_flush_tlb_others(const struct cpumask *cpumask,
|
void native_flush_tlb_others(const struct cpumask *cpumask,
|
||||||
|
|
|
@ -273,7 +273,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
|
||||||
map_ldt_struct_to_user(mm);
|
map_ldt_struct_to_user(mm);
|
||||||
|
|
||||||
va = (unsigned long)ldt_slot_va(slot);
|
va = (unsigned long)ldt_slot_va(slot);
|
||||||
flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0);
|
flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, PAGE_SHIFT);
|
||||||
|
|
||||||
ldt->slot = slot;
|
ldt->slot = slot;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -199,7 +199,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
|
||||||
pte_unmap_unlock(pte, ptl);
|
pte_unmap_unlock(pte, ptl);
|
||||||
out:
|
out:
|
||||||
up_write(&mm->mmap_sem);
|
up_write(&mm->mmap_sem);
|
||||||
flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, 0UL);
|
flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, PAGE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -528,17 +528,16 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
|
||||||
f->new_tlb_gen == local_tlb_gen + 1 &&
|
f->new_tlb_gen == local_tlb_gen + 1 &&
|
||||||
f->new_tlb_gen == mm_tlb_gen) {
|
f->new_tlb_gen == mm_tlb_gen) {
|
||||||
/* Partial flush */
|
/* Partial flush */
|
||||||
unsigned long addr;
|
unsigned long nr_invalidate = (f->end - f->start) >> f->stride_shift;
|
||||||
unsigned long nr_pages = (f->end - f->start) >> PAGE_SHIFT;
|
unsigned long addr = f->start;
|
||||||
|
|
||||||
addr = f->start;
|
|
||||||
while (addr < f->end) {
|
while (addr < f->end) {
|
||||||
__flush_tlb_one_user(addr);
|
__flush_tlb_one_user(addr);
|
||||||
addr += PAGE_SIZE;
|
addr += 1UL << f->stride_shift;
|
||||||
}
|
}
|
||||||
if (local)
|
if (local)
|
||||||
count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_pages);
|
count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_invalidate);
|
||||||
trace_tlb_flush(reason, nr_pages);
|
trace_tlb_flush(reason, nr_invalidate);
|
||||||
} else {
|
} else {
|
||||||
/* Full flush. */
|
/* Full flush. */
|
||||||
local_flush_tlb();
|
local_flush_tlb();
|
||||||
|
@ -623,12 +622,13 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
|
||||||
static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
|
static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
|
||||||
|
|
||||||
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
||||||
unsigned long end, unsigned long vmflag)
|
unsigned long end, unsigned int stride_shift)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
struct flush_tlb_info info __aligned(SMP_CACHE_BYTES) = {
|
struct flush_tlb_info info __aligned(SMP_CACHE_BYTES) = {
|
||||||
.mm = mm,
|
.mm = mm,
|
||||||
|
.stride_shift = stride_shift,
|
||||||
};
|
};
|
||||||
|
|
||||||
cpu = get_cpu();
|
cpu = get_cpu();
|
||||||
|
@ -638,8 +638,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
||||||
|
|
||||||
/* Should we flush just the requested range? */
|
/* Should we flush just the requested range? */
|
||||||
if ((end != TLB_FLUSH_ALL) &&
|
if ((end != TLB_FLUSH_ALL) &&
|
||||||
!(vmflag & VM_HUGETLB) &&
|
((end - start) >> stride_shift) <= tlb_single_page_flush_ceiling) {
|
||||||
((end - start) >> PAGE_SHIFT) <= tlb_single_page_flush_ceiling) {
|
|
||||||
info.start = start;
|
info.start = start;
|
||||||
info.end = end;
|
info.end = end;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/pagemap.h>
|
#include <linux/pagemap.h>
|
||||||
|
#include <linux/hugetlb.h>
|
||||||
#include <asm/tlb.h>
|
#include <asm/tlb.h>
|
||||||
#include <asm-generic/pgtable.h>
|
#include <asm-generic/pgtable.h>
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue