Merge branch 'tlb/asm-generic' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux into x86/mm
Pull in the generic mmu_gather changes from the ARM64 tree such that we can put x86 specific things on top as well.
This commit is contained in:
commit
a5b966ae42
13
MAINTAINERS
13
MAINTAINERS
|
@ -9698,6 +9698,19 @@ S: Maintained
|
|||
F: arch/arm/boot/dts/mmp*
|
||||
F: arch/arm/mach-mmp/
|
||||
|
||||
MMU GATHER AND TLB INVALIDATION
|
||||
M: Will Deacon <will.deacon@arm.com>
|
||||
M: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
M: Nick Piggin <npiggin@gmail.com>
|
||||
M: Peter Zijlstra <peterz@infradead.org>
|
||||
L: linux-arch@vger.kernel.org
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
F: arch/*/include/asm/tlb.h
|
||||
F: include/asm-generic/tlb.h
|
||||
F: mm/mmu_gather.c
|
||||
|
||||
MN88472 MEDIA DRIVER
|
||||
M: Antti Palosaari <crope@iki.fi>
|
||||
L: linux-media@vger.kernel.org
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
#include <asm/pgalloc.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
||||
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
||||
/*
|
||||
* Semi RCU freeing of the page directories.
|
||||
|
@ -97,12 +99,30 @@ struct mmu_gather {
|
|||
#endif
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
/* we are in the middle of an operation to clear
|
||||
* a full mm and can make some optimizations */
|
||||
unsigned int fullmm : 1,
|
||||
/* we have performed an operation which
|
||||
* requires a complete flush of the tlb */
|
||||
need_flush_all : 1;
|
||||
/*
|
||||
* we are in the middle of an operation to clear
|
||||
* a full mm and can make some optimizations
|
||||
*/
|
||||
unsigned int fullmm : 1;
|
||||
|
||||
/*
|
||||
* we have performed an operation which
|
||||
* requires a complete flush of the tlb
|
||||
*/
|
||||
unsigned int need_flush_all : 1;
|
||||
|
||||
/*
|
||||
* we have removed page directories
|
||||
*/
|
||||
unsigned int freed_tables : 1;
|
||||
|
||||
/*
|
||||
* at which levels have we cleared entries?
|
||||
*/
|
||||
unsigned int cleared_ptes : 1;
|
||||
unsigned int cleared_pmds : 1;
|
||||
unsigned int cleared_puds : 1;
|
||||
unsigned int cleared_p4ds : 1;
|
||||
|
||||
struct mmu_gather_batch *active;
|
||||
struct mmu_gather_batch local;
|
||||
|
@ -118,6 +138,7 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb,
|
|||
void tlb_flush_mmu(struct mmu_gather *tlb);
|
||||
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
|
||||
unsigned long start, unsigned long end, bool force);
|
||||
void tlb_flush_mmu_free(struct mmu_gather *tlb);
|
||||
extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
|
||||
int page_size);
|
||||
|
||||
|
@ -137,6 +158,11 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
|
|||
tlb->start = TASK_SIZE;
|
||||
tlb->end = 0;
|
||||
}
|
||||
tlb->freed_tables = 0;
|
||||
tlb->cleared_ptes = 0;
|
||||
tlb->cleared_pmds = 0;
|
||||
tlb->cleared_puds = 0;
|
||||
tlb->cleared_p4ds = 0;
|
||||
}
|
||||
|
||||
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
|
||||
|
@ -186,6 +212,25 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
|
||||
{
|
||||
if (tlb->cleared_ptes)
|
||||
return PAGE_SHIFT;
|
||||
if (tlb->cleared_pmds)
|
||||
return PMD_SHIFT;
|
||||
if (tlb->cleared_puds)
|
||||
return PUD_SHIFT;
|
||||
if (tlb->cleared_p4ds)
|
||||
return P4D_SHIFT;
|
||||
|
||||
return PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
|
||||
{
|
||||
return 1UL << tlb_get_unmap_shift(tlb);
|
||||
}
|
||||
|
||||
/*
|
||||
* In the case of tlb vma handling, we can optimise these away in the
|
||||
* case where we're doing a full MM flush. When we're doing a munmap,
|
||||
|
@ -219,13 +264,19 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
|
|||
#define tlb_remove_tlb_entry(tlb, ptep, address) \
|
||||
do { \
|
||||
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
|
||||
tlb->cleared_ptes = 1; \
|
||||
__tlb_remove_tlb_entry(tlb, ptep, address); \
|
||||
} while (0)
|
||||
|
||||
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
|
||||
do { \
|
||||
__tlb_adjust_range(tlb, address, huge_page_size(h)); \
|
||||
__tlb_remove_tlb_entry(tlb, ptep, address); \
|
||||
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
|
||||
do { \
|
||||
unsigned long _sz = huge_page_size(h); \
|
||||
__tlb_adjust_range(tlb, address, _sz); \
|
||||
if (_sz == PMD_SIZE) \
|
||||
tlb->cleared_pmds = 1; \
|
||||
else if (_sz == PUD_SIZE) \
|
||||
tlb->cleared_puds = 1; \
|
||||
__tlb_remove_tlb_entry(tlb, ptep, address); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
|
@ -239,6 +290,7 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
|
|||
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
|
||||
do { \
|
||||
__tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
|
||||
tlb->cleared_pmds = 1; \
|
||||
__tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
|
||||
} while (0)
|
||||
|
||||
|
@ -253,6 +305,7 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
|
|||
#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
|
||||
do { \
|
||||
__tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \
|
||||
tlb->cleared_puds = 1; \
|
||||
__tlb_remove_pud_tlb_entry(tlb, pudp, address); \
|
||||
} while (0)
|
||||
|
||||
|
@ -278,6 +331,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
|
|||
#define pte_free_tlb(tlb, ptep, address) \
|
||||
do { \
|
||||
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
|
||||
tlb->freed_tables = 1; \
|
||||
tlb->cleared_pmds = 1; \
|
||||
__pte_free_tlb(tlb, ptep, address); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
@ -285,7 +340,9 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
|
|||
#ifndef pmd_free_tlb
|
||||
#define pmd_free_tlb(tlb, pmdp, address) \
|
||||
do { \
|
||||
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
|
||||
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
|
||||
tlb->freed_tables = 1; \
|
||||
tlb->cleared_puds = 1; \
|
||||
__pmd_free_tlb(tlb, pmdp, address); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
@ -295,6 +352,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
|
|||
#define pud_free_tlb(tlb, pudp, address) \
|
||||
do { \
|
||||
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
|
||||
tlb->freed_tables = 1; \
|
||||
tlb->cleared_p4ds = 1; \
|
||||
__pud_free_tlb(tlb, pudp, address); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
@ -304,12 +363,15 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
|
|||
#ifndef p4d_free_tlb
|
||||
#define p4d_free_tlb(tlb, pudp, address) \
|
||||
do { \
|
||||
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
|
||||
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
|
||||
tlb->freed_tables = 1; \
|
||||
__p4d_free_tlb(tlb, pudp, address); \
|
||||
} while (0)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
#define tlb_migrate_finish(mm) do {} while (0)
|
||||
|
||||
#endif /* _ASM_GENERIC__TLB_H */
|
||||
|
|
|
@ -23,9 +23,9 @@ KCOV_INSTRUMENT_vmstat.o := n
|
|||
|
||||
mmu-y := nommu.o
|
||||
mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \
|
||||
mlock.o mmap.o mprotect.o mremap.o msync.o \
|
||||
page_vma_mapped.o pagewalk.o pgtable-generic.o \
|
||||
rmap.o vmalloc.o
|
||||
mlock.o mmap.o mmu_gather.o mprotect.o mremap.o \
|
||||
msync.o page_vma_mapped.o pagewalk.o \
|
||||
pgtable-generic.o rmap.o vmalloc.o
|
||||
|
||||
|
||||
ifdef CONFIG_CROSS_MEMORY_ATTACH
|
||||
|
|
247
mm/memory.c
247
mm/memory.c
|
@ -186,253 +186,6 @@ static void check_sync_rss_stat(struct task_struct *task)
|
|||
|
||||
#endif /* SPLIT_RSS_COUNTING */
|
||||
|
||||
#ifdef HAVE_GENERIC_MMU_GATHER
|
||||
|
||||
static bool tlb_next_batch(struct mmu_gather *tlb)
|
||||
{
|
||||
struct mmu_gather_batch *batch;
|
||||
|
||||
batch = tlb->active;
|
||||
if (batch->next) {
|
||||
tlb->active = batch->next;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
|
||||
return false;
|
||||
|
||||
batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
|
||||
if (!batch)
|
||||
return false;
|
||||
|
||||
tlb->batch_count++;
|
||||
batch->next = NULL;
|
||||
batch->nr = 0;
|
||||
batch->max = MAX_GATHER_BATCH;
|
||||
|
||||
tlb->active->next = batch;
|
||||
tlb->active = batch;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
tlb->mm = mm;
|
||||
|
||||
/* Is it from 0 to ~0? */
|
||||
tlb->fullmm = !(start | (end+1));
|
||||
tlb->need_flush_all = 0;
|
||||
tlb->local.next = NULL;
|
||||
tlb->local.nr = 0;
|
||||
tlb->local.max = ARRAY_SIZE(tlb->__pages);
|
||||
tlb->active = &tlb->local;
|
||||
tlb->batch_count = 0;
|
||||
|
||||
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
||||
tlb->batch = NULL;
|
||||
#endif
|
||||
tlb->page_size = 0;
|
||||
|
||||
__tlb_reset_range(tlb);
|
||||
}
|
||||
|
||||
static void tlb_flush_mmu_free(struct mmu_gather *tlb)
|
||||
{
|
||||
struct mmu_gather_batch *batch;
|
||||
|
||||
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
||||
tlb_table_flush(tlb);
|
||||
#endif
|
||||
for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
|
||||
free_pages_and_swap_cache(batch->pages, batch->nr);
|
||||
batch->nr = 0;
|
||||
}
|
||||
tlb->active = &tlb->local;
|
||||
}
|
||||
|
||||
void tlb_flush_mmu(struct mmu_gather *tlb)
|
||||
{
|
||||
tlb_flush_mmu_tlbonly(tlb);
|
||||
tlb_flush_mmu_free(tlb);
|
||||
}
|
||||
|
||||
/* tlb_finish_mmu
|
||||
* Called at the end of the shootdown operation to free up any resources
|
||||
* that were required.
|
||||
*/
|
||||
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
|
||||
unsigned long start, unsigned long end, bool force)
|
||||
{
|
||||
struct mmu_gather_batch *batch, *next;
|
||||
|
||||
if (force)
|
||||
__tlb_adjust_range(tlb, start, end - start);
|
||||
|
||||
tlb_flush_mmu(tlb);
|
||||
|
||||
/* keep the page table cache within bounds */
|
||||
check_pgt_cache();
|
||||
|
||||
for (batch = tlb->local.next; batch; batch = next) {
|
||||
next = batch->next;
|
||||
free_pages((unsigned long)batch, 0);
|
||||
}
|
||||
tlb->local.next = NULL;
|
||||
}
|
||||
|
||||
/* __tlb_remove_page
|
||||
* Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
|
||||
* handling the additional races in SMP caused by other CPUs caching valid
|
||||
* mappings in their TLBs. Returns the number of free page slots left.
|
||||
* When out of page slots we must call tlb_flush_mmu().
|
||||
*returns true if the caller should flush.
|
||||
*/
|
||||
bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
|
||||
{
|
||||
struct mmu_gather_batch *batch;
|
||||
|
||||
VM_BUG_ON(!tlb->end);
|
||||
VM_WARN_ON(tlb->page_size != page_size);
|
||||
|
||||
batch = tlb->active;
|
||||
/*
|
||||
* Add the page and check if we are full. If so
|
||||
* force a flush.
|
||||
*/
|
||||
batch->pages[batch->nr++] = page;
|
||||
if (batch->nr == batch->max) {
|
||||
if (!tlb_next_batch(tlb))
|
||||
return true;
|
||||
batch = tlb->active;
|
||||
}
|
||||
VM_BUG_ON_PAGE(batch->nr > batch->max, page);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* HAVE_GENERIC_MMU_GATHER */
|
||||
|
||||
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
||||
|
||||
/*
|
||||
* See the comment near struct mmu_table_batch.
|
||||
*/
|
||||
|
||||
/*
|
||||
* If we want tlb_remove_table() to imply TLB invalidates.
|
||||
*/
|
||||
static inline void tlb_table_invalidate(struct mmu_gather *tlb)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE
|
||||
/*
|
||||
* Invalidate page-table caches used by hardware walkers. Then we still
|
||||
* need to RCU-sched wait while freeing the pages because software
|
||||
* walkers can still be in-flight.
|
||||
*/
|
||||
tlb_flush_mmu_tlbonly(tlb);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void tlb_remove_table_smp_sync(void *arg)
|
||||
{
|
||||
/* Simply deliver the interrupt */
|
||||
}
|
||||
|
||||
static void tlb_remove_table_one(void *table)
|
||||
{
|
||||
/*
|
||||
* This isn't an RCU grace period and hence the page-tables cannot be
|
||||
* assumed to be actually RCU-freed.
|
||||
*
|
||||
* It is however sufficient for software page-table walkers that rely on
|
||||
* IRQ disabling. See the comment near struct mmu_table_batch.
|
||||
*/
|
||||
smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
|
||||
__tlb_remove_table(table);
|
||||
}
|
||||
|
||||
static void tlb_remove_table_rcu(struct rcu_head *head)
|
||||
{
|
||||
struct mmu_table_batch *batch;
|
||||
int i;
|
||||
|
||||
batch = container_of(head, struct mmu_table_batch, rcu);
|
||||
|
||||
for (i = 0; i < batch->nr; i++)
|
||||
__tlb_remove_table(batch->tables[i]);
|
||||
|
||||
free_page((unsigned long)batch);
|
||||
}
|
||||
|
||||
void tlb_table_flush(struct mmu_gather *tlb)
|
||||
{
|
||||
struct mmu_table_batch **batch = &tlb->batch;
|
||||
|
||||
if (*batch) {
|
||||
tlb_table_invalidate(tlb);
|
||||
call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
|
||||
*batch = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void tlb_remove_table(struct mmu_gather *tlb, void *table)
|
||||
{
|
||||
struct mmu_table_batch **batch = &tlb->batch;
|
||||
|
||||
if (*batch == NULL) {
|
||||
*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
|
||||
if (*batch == NULL) {
|
||||
tlb_table_invalidate(tlb);
|
||||
tlb_remove_table_one(table);
|
||||
return;
|
||||
}
|
||||
(*batch)->nr = 0;
|
||||
}
|
||||
|
||||
(*batch)->tables[(*batch)->nr++] = table;
|
||||
if ((*batch)->nr == MAX_TABLE_BATCH)
|
||||
tlb_table_flush(tlb);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
|
||||
|
||||
/**
|
||||
* tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
|
||||
* @tlb: the mmu_gather structure to initialize
|
||||
* @mm: the mm_struct of the target address space
|
||||
* @start: start of the region that will be removed from the page-table
|
||||
* @end: end of the region that will be removed from the page-table
|
||||
*
|
||||
* Called to initialize an (on-stack) mmu_gather structure for page-table
|
||||
* tear-down from @mm. The @start and @end are set to 0 and -1
|
||||
* respectively when @mm is without users and we're going to destroy
|
||||
* the full address space (exit/execve).
|
||||
*/
|
||||
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
arch_tlb_gather_mmu(tlb, mm, start, end);
|
||||
inc_tlb_flush_pending(tlb->mm);
|
||||
}
|
||||
|
||||
void tlb_finish_mmu(struct mmu_gather *tlb,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
/*
|
||||
* If there are parallel threads are doing PTE changes on same range
|
||||
* under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
|
||||
* flush by batching, a thread has stable TLB entry can fail to flush
|
||||
* the TLB by observing pte_none|!pte_dirty, for example so flush TLB
|
||||
* forcefully if we detect parallel PTE batching threads.
|
||||
*/
|
||||
bool force = mm_tlb_flush_nested(tlb->mm);
|
||||
|
||||
arch_tlb_finish_mmu(tlb, start, end, force);
|
||||
dec_tlb_flush_pending(tlb->mm);
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: this doesn't free the actual pages themselves. That
|
||||
* has been handled earlier when unmapping all the memory regions.
|
||||
|
|
|
@ -0,0 +1,261 @@
|
|||
#include <linux/gfp.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mmdebug.h>
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/swap.h>
|
||||
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/tlb.h>
|
||||
|
||||
#ifdef HAVE_GENERIC_MMU_GATHER
|
||||
|
||||
static bool tlb_next_batch(struct mmu_gather *tlb)
|
||||
{
|
||||
struct mmu_gather_batch *batch;
|
||||
|
||||
batch = tlb->active;
|
||||
if (batch->next) {
|
||||
tlb->active = batch->next;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
|
||||
return false;
|
||||
|
||||
batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
|
||||
if (!batch)
|
||||
return false;
|
||||
|
||||
tlb->batch_count++;
|
||||
batch->next = NULL;
|
||||
batch->nr = 0;
|
||||
batch->max = MAX_GATHER_BATCH;
|
||||
|
||||
tlb->active->next = batch;
|
||||
tlb->active = batch;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
tlb->mm = mm;
|
||||
|
||||
/* Is it from 0 to ~0? */
|
||||
tlb->fullmm = !(start | (end+1));
|
||||
tlb->need_flush_all = 0;
|
||||
tlb->local.next = NULL;
|
||||
tlb->local.nr = 0;
|
||||
tlb->local.max = ARRAY_SIZE(tlb->__pages);
|
||||
tlb->active = &tlb->local;
|
||||
tlb->batch_count = 0;
|
||||
|
||||
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
||||
tlb->batch = NULL;
|
||||
#endif
|
||||
tlb->page_size = 0;
|
||||
|
||||
__tlb_reset_range(tlb);
|
||||
}
|
||||
|
||||
void tlb_flush_mmu_free(struct mmu_gather *tlb)
|
||||
{
|
||||
struct mmu_gather_batch *batch;
|
||||
|
||||
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
||||
tlb_table_flush(tlb);
|
||||
#endif
|
||||
for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
|
||||
free_pages_and_swap_cache(batch->pages, batch->nr);
|
||||
batch->nr = 0;
|
||||
}
|
||||
tlb->active = &tlb->local;
|
||||
}
|
||||
|
||||
void tlb_flush_mmu(struct mmu_gather *tlb)
|
||||
{
|
||||
tlb_flush_mmu_tlbonly(tlb);
|
||||
tlb_flush_mmu_free(tlb);
|
||||
}
|
||||
|
||||
/* tlb_finish_mmu
|
||||
* Called at the end of the shootdown operation to free up any resources
|
||||
* that were required.
|
||||
*/
|
||||
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
|
||||
unsigned long start, unsigned long end, bool force)
|
||||
{
|
||||
struct mmu_gather_batch *batch, *next;
|
||||
|
||||
if (force) {
|
||||
__tlb_reset_range(tlb);
|
||||
__tlb_adjust_range(tlb, start, end - start);
|
||||
}
|
||||
|
||||
tlb_flush_mmu(tlb);
|
||||
|
||||
/* keep the page table cache within bounds */
|
||||
check_pgt_cache();
|
||||
|
||||
for (batch = tlb->local.next; batch; batch = next) {
|
||||
next = batch->next;
|
||||
free_pages((unsigned long)batch, 0);
|
||||
}
|
||||
tlb->local.next = NULL;
|
||||
}
|
||||
|
||||
/* __tlb_remove_page
|
||||
* Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
|
||||
* handling the additional races in SMP caused by other CPUs caching valid
|
||||
* mappings in their TLBs. Returns the number of free page slots left.
|
||||
* When out of page slots we must call tlb_flush_mmu().
|
||||
*returns true if the caller should flush.
|
||||
*/
|
||||
bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
|
||||
{
|
||||
struct mmu_gather_batch *batch;
|
||||
|
||||
VM_BUG_ON(!tlb->end);
|
||||
VM_WARN_ON(tlb->page_size != page_size);
|
||||
|
||||
batch = tlb->active;
|
||||
/*
|
||||
* Add the page and check if we are full. If so
|
||||
* force a flush.
|
||||
*/
|
||||
batch->pages[batch->nr++] = page;
|
||||
if (batch->nr == batch->max) {
|
||||
if (!tlb_next_batch(tlb))
|
||||
return true;
|
||||
batch = tlb->active;
|
||||
}
|
||||
VM_BUG_ON_PAGE(batch->nr > batch->max, page);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* HAVE_GENERIC_MMU_GATHER */
|
||||
|
||||
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
||||
|
||||
/*
|
||||
* See the comment near struct mmu_table_batch.
|
||||
*/
|
||||
|
||||
/*
|
||||
* If we want tlb_remove_table() to imply TLB invalidates.
|
||||
*/
|
||||
static inline void tlb_table_invalidate(struct mmu_gather *tlb)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE
|
||||
/*
|
||||
* Invalidate page-table caches used by hardware walkers. Then we still
|
||||
* need to RCU-sched wait while freeing the pages because software
|
||||
* walkers can still be in-flight.
|
||||
*/
|
||||
tlb_flush_mmu_tlbonly(tlb);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void tlb_remove_table_smp_sync(void *arg)
|
||||
{
|
||||
/* Simply deliver the interrupt */
|
||||
}
|
||||
|
||||
static void tlb_remove_table_one(void *table)
|
||||
{
|
||||
/*
|
||||
* This isn't an RCU grace period and hence the page-tables cannot be
|
||||
* assumed to be actually RCU-freed.
|
||||
*
|
||||
* It is however sufficient for software page-table walkers that rely on
|
||||
* IRQ disabling. See the comment near struct mmu_table_batch.
|
||||
*/
|
||||
smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
|
||||
__tlb_remove_table(table);
|
||||
}
|
||||
|
||||
static void tlb_remove_table_rcu(struct rcu_head *head)
|
||||
{
|
||||
struct mmu_table_batch *batch;
|
||||
int i;
|
||||
|
||||
batch = container_of(head, struct mmu_table_batch, rcu);
|
||||
|
||||
for (i = 0; i < batch->nr; i++)
|
||||
__tlb_remove_table(batch->tables[i]);
|
||||
|
||||
free_page((unsigned long)batch);
|
||||
}
|
||||
|
||||
void tlb_table_flush(struct mmu_gather *tlb)
|
||||
{
|
||||
struct mmu_table_batch **batch = &tlb->batch;
|
||||
|
||||
if (*batch) {
|
||||
tlb_table_invalidate(tlb);
|
||||
call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
|
||||
*batch = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void tlb_remove_table(struct mmu_gather *tlb, void *table)
|
||||
{
|
||||
struct mmu_table_batch **batch = &tlb->batch;
|
||||
|
||||
if (*batch == NULL) {
|
||||
*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
|
||||
if (*batch == NULL) {
|
||||
tlb_table_invalidate(tlb);
|
||||
tlb_remove_table_one(table);
|
||||
return;
|
||||
}
|
||||
(*batch)->nr = 0;
|
||||
}
|
||||
|
||||
(*batch)->tables[(*batch)->nr++] = table;
|
||||
if ((*batch)->nr == MAX_TABLE_BATCH)
|
||||
tlb_table_flush(tlb);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
|
||||
|
||||
/**
|
||||
* tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
|
||||
* @tlb: the mmu_gather structure to initialize
|
||||
* @mm: the mm_struct of the target address space
|
||||
* @start: start of the region that will be removed from the page-table
|
||||
* @end: end of the region that will be removed from the page-table
|
||||
*
|
||||
* Called to initialize an (on-stack) mmu_gather structure for page-table
|
||||
* tear-down from @mm. The @start and @end are set to 0 and -1
|
||||
* respectively when @mm is without users and we're going to destroy
|
||||
* the full address space (exit/execve).
|
||||
*/
|
||||
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
arch_tlb_gather_mmu(tlb, mm, start, end);
|
||||
inc_tlb_flush_pending(tlb->mm);
|
||||
}
|
||||
|
||||
void tlb_finish_mmu(struct mmu_gather *tlb,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
/*
|
||||
* If there are parallel threads are doing PTE changes on same range
|
||||
* under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
|
||||
* flush by batching, a thread has stable TLB entry can fail to flush
|
||||
* the TLB by observing pte_none|!pte_dirty, for example so flush TLB
|
||||
* forcefully if we detect parallel PTE batching threads.
|
||||
*/
|
||||
bool force = mm_tlb_flush_nested(tlb->mm);
|
||||
|
||||
arch_tlb_finish_mmu(tlb, start, end, force);
|
||||
dec_tlb_flush_pending(tlb->mm);
|
||||
}
|
Loading…
Reference in New Issue