2006-10-04 05:01:26 +08:00
|
|
|
/* include/asm-generic/tlb.h
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* Generic TLB shootdown code
|
|
|
|
*
|
|
|
|
* Copyright 2001 Red Hat, Inc.
|
|
|
|
* Based on code from mm/memory.c Copyright Linus Torvalds and others.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*/
|
|
|
|
#ifndef _ASM_GENERIC__TLB_H
|
|
|
|
#define _ASM_GENERIC__TLB_H
|
|
|
|
|
|
|
|
#include <linux/swap.h>
|
2008-02-01 05:05:48 +08:00
|
|
|
#include <asm/pgalloc.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For UP we don't need to worry about TLB flush
|
|
|
|
* and page free order so much..
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_SMP
|
2005-09-13 00:49:24 +08:00
|
|
|
#ifdef ARCH_FREE_PTR_NR
|
|
|
|
#define FREE_PTR_NR ARCH_FREE_PTR_NR
|
|
|
|
#else
|
|
|
|
#define FREE_PTE_NR 506
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
#define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
|
|
|
|
#else
|
|
|
|
#define FREE_PTE_NR 1
|
|
|
|
#define tlb_fast_mode(tlb) 1
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* struct mmu_gather is an opaque type used by the mm code for passing around
|
2005-10-30 09:16:01 +08:00
|
|
|
* any data needed by arch specific code for tlb_remove_page.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
struct mmu_gather {
|
|
|
|
struct mm_struct *mm;
|
|
|
|
unsigned int nr; /* set to ~0U means fast mode */
|
|
|
|
unsigned int need_flush;/* Really unmapped some ptes? */
|
|
|
|
unsigned int fullmm; /* non-zero means full mm flush */
|
|
|
|
struct page * pages[FREE_PTE_NR];
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Users of the generic TLB shootdown code must declare this storage space. */
|
|
|
|
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
|
|
|
|
|
|
|
|
/* tlb_gather_mmu
|
|
|
|
* Return a pointer to an initialized struct mmu_gather.
|
|
|
|
*/
|
|
|
|
static inline struct mmu_gather *
|
|
|
|
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
|
|
|
|
{
|
2005-10-30 09:16:01 +08:00
|
|
|
struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
tlb->mm = mm;
|
|
|
|
|
|
|
|
/* Use fast mode if only one CPU is online */
|
|
|
|
tlb->nr = num_online_cpus() > 1 ? 0U : ~0U;
|
|
|
|
|
|
|
|
tlb->fullmm = full_mm_flush;
|
|
|
|
|
|
|
|
return tlb;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
|
|
|
{
|
|
|
|
if (!tlb->need_flush)
|
|
|
|
return;
|
|
|
|
tlb->need_flush = 0;
|
|
|
|
tlb_flush(tlb);
|
|
|
|
if (!tlb_fast_mode(tlb)) {
|
|
|
|
free_pages_and_swap_cache(tlb->pages, tlb->nr);
|
|
|
|
tlb->nr = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* tlb_finish_mmu
|
|
|
|
* Called at the end of the shootdown operation to free up any resources
|
2005-10-30 09:16:01 +08:00
|
|
|
* that were required.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
|
|
|
{
|
|
|
|
tlb_flush_mmu(tlb, start, end);
|
|
|
|
|
|
|
|
/* keep the page table cache within bounds */
|
|
|
|
check_pgt_cache();
|
2005-10-30 09:16:01 +08:00
|
|
|
|
|
|
|
put_cpu_var(mmu_gathers);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* tlb_remove_page
|
|
|
|
* Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
|
|
|
|
* handling the additional races in SMP caused by other CPUs caching valid
|
|
|
|
* mappings in their TLBs.
|
|
|
|
*/
|
|
|
|
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
|
|
|
|
{
|
|
|
|
tlb->need_flush = 1;
|
|
|
|
if (tlb_fast_mode(tlb)) {
|
|
|
|
free_page_and_swap_cache(page);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
tlb->pages[tlb->nr++] = page;
|
|
|
|
if (tlb->nr >= FREE_PTE_NR)
|
|
|
|
tlb_flush_mmu(tlb, 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
|
|
|
|
*
|
|
|
|
* Record the fact that pte's were really umapped in ->need_flush, so we can
|
|
|
|
* later optimise away the tlb invalidate. This helps when userspace is
|
|
|
|
* unmapping already-unmapped pages, which happens quite a lot.
|
|
|
|
*/
|
|
|
|
#define tlb_remove_tlb_entry(tlb, ptep, address) \
|
|
|
|
do { \
|
|
|
|
tlb->need_flush = 1; \
|
|
|
|
__tlb_remove_tlb_entry(tlb, ptep, address); \
|
|
|
|
} while (0)
|
|
|
|
|
mm: Pass virtual address to [__]p{te,ud,md}_free_tlb()
mm: Pass virtual address to [__]p{te,ud,md}_free_tlb()
Upcoming paches to support the new 64-bit "BookE" powerpc architecture
will need to have the virtual address corresponding to PTE page when
freeing it, due to the way the HW table walker works.
Basically, the TLB can be loaded with "large" pages that cover the whole
virtual space (well, sort-of, half of it actually) represented by a PTE
page, and which contain an "indirect" bit indicating that this TLB entry
RPN points to an array of PTEs from which the TLB can then create direct
entries. Thus, in order to invalidate those when PTE pages are deleted,
we need the virtual address to pass to tlbilx or tlbivax instructions.
The old trick of sticking it somewhere in the PTE page struct page sucks
too much, the address is almost readily available in all call sites and
almost everybody implemets these as macros, so we may as well add the
argument everywhere. I added it to the pmd and pud variants for consistency.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: David Howells <dhowells@redhat.com> [MN10300 & FRV]
Acked-by: Nick Piggin <npiggin@suse.de>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> [s390]
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-07-22 13:44:28 +08:00
|
|
|
#define pte_free_tlb(tlb, ptep, address) \
|
2005-04-17 06:20:36 +08:00
|
|
|
do { \
|
|
|
|
tlb->need_flush = 1; \
|
mm: Pass virtual address to [__]p{te,ud,md}_free_tlb()
mm: Pass virtual address to [__]p{te,ud,md}_free_tlb()
Upcoming paches to support the new 64-bit "BookE" powerpc architecture
will need to have the virtual address corresponding to PTE page when
freeing it, due to the way the HW table walker works.
Basically, the TLB can be loaded with "large" pages that cover the whole
virtual space (well, sort-of, half of it actually) represented by a PTE
page, and which contain an "indirect" bit indicating that this TLB entry
RPN points to an array of PTEs from which the TLB can then create direct
entries. Thus, in order to invalidate those when PTE pages are deleted,
we need the virtual address to pass to tlbilx or tlbivax instructions.
The old trick of sticking it somewhere in the PTE page struct page sucks
too much, the address is almost readily available in all call sites and
almost everybody implemets these as macros, so we may as well add the
argument everywhere. I added it to the pmd and pud variants for consistency.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: David Howells <dhowells@redhat.com> [MN10300 & FRV]
Acked-by: Nick Piggin <npiggin@suse.de>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> [s390]
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-07-22 13:44:28 +08:00
|
|
|
__pte_free_tlb(tlb, ptep, address); \
|
2005-04-17 06:20:36 +08:00
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#ifndef __ARCH_HAS_4LEVEL_HACK
|
mm: Pass virtual address to [__]p{te,ud,md}_free_tlb()
mm: Pass virtual address to [__]p{te,ud,md}_free_tlb()
Upcoming paches to support the new 64-bit "BookE" powerpc architecture
will need to have the virtual address corresponding to PTE page when
freeing it, due to the way the HW table walker works.
Basically, the TLB can be loaded with "large" pages that cover the whole
virtual space (well, sort-of, half of it actually) represented by a PTE
page, and which contain an "indirect" bit indicating that this TLB entry
RPN points to an array of PTEs from which the TLB can then create direct
entries. Thus, in order to invalidate those when PTE pages are deleted,
we need the virtual address to pass to tlbilx or tlbivax instructions.
The old trick of sticking it somewhere in the PTE page struct page sucks
too much, the address is almost readily available in all call sites and
almost everybody implemets these as macros, so we may as well add the
argument everywhere. I added it to the pmd and pud variants for consistency.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: David Howells <dhowells@redhat.com> [MN10300 & FRV]
Acked-by: Nick Piggin <npiggin@suse.de>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> [s390]
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-07-22 13:44:28 +08:00
|
|
|
#define pud_free_tlb(tlb, pudp, address) \
|
2005-04-17 06:20:36 +08:00
|
|
|
do { \
|
|
|
|
tlb->need_flush = 1; \
|
mm: Pass virtual address to [__]p{te,ud,md}_free_tlb()
mm: Pass virtual address to [__]p{te,ud,md}_free_tlb()
Upcoming paches to support the new 64-bit "BookE" powerpc architecture
will need to have the virtual address corresponding to PTE page when
freeing it, due to the way the HW table walker works.
Basically, the TLB can be loaded with "large" pages that cover the whole
virtual space (well, sort-of, half of it actually) represented by a PTE
page, and which contain an "indirect" bit indicating that this TLB entry
RPN points to an array of PTEs from which the TLB can then create direct
entries. Thus, in order to invalidate those when PTE pages are deleted,
we need the virtual address to pass to tlbilx or tlbivax instructions.
The old trick of sticking it somewhere in the PTE page struct page sucks
too much, the address is almost readily available in all call sites and
almost everybody implemets these as macros, so we may as well add the
argument everywhere. I added it to the pmd and pud variants for consistency.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: David Howells <dhowells@redhat.com> [MN10300 & FRV]
Acked-by: Nick Piggin <npiggin@suse.de>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> [s390]
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-07-22 13:44:28 +08:00
|
|
|
__pud_free_tlb(tlb, pudp, address); \
|
2005-04-17 06:20:36 +08:00
|
|
|
} while (0)
|
|
|
|
#endif
|
|
|
|
|
mm: Pass virtual address to [__]p{te,ud,md}_free_tlb()
mm: Pass virtual address to [__]p{te,ud,md}_free_tlb()
Upcoming paches to support the new 64-bit "BookE" powerpc architecture
will need to have the virtual address corresponding to PTE page when
freeing it, due to the way the HW table walker works.
Basically, the TLB can be loaded with "large" pages that cover the whole
virtual space (well, sort-of, half of it actually) represented by a PTE
page, and which contain an "indirect" bit indicating that this TLB entry
RPN points to an array of PTEs from which the TLB can then create direct
entries. Thus, in order to invalidate those when PTE pages are deleted,
we need the virtual address to pass to tlbilx or tlbivax instructions.
The old trick of sticking it somewhere in the PTE page struct page sucks
too much, the address is almost readily available in all call sites and
almost everybody implemets these as macros, so we may as well add the
argument everywhere. I added it to the pmd and pud variants for consistency.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: David Howells <dhowells@redhat.com> [MN10300 & FRV]
Acked-by: Nick Piggin <npiggin@suse.de>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> [s390]
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-07-22 13:44:28 +08:00
|
|
|
#define pmd_free_tlb(tlb, pmdp, address) \
|
2005-04-17 06:20:36 +08:00
|
|
|
do { \
|
|
|
|
tlb->need_flush = 1; \
|
mm: Pass virtual address to [__]p{te,ud,md}_free_tlb()
mm: Pass virtual address to [__]p{te,ud,md}_free_tlb()
Upcoming paches to support the new 64-bit "BookE" powerpc architecture
will need to have the virtual address corresponding to PTE page when
freeing it, due to the way the HW table walker works.
Basically, the TLB can be loaded with "large" pages that cover the whole
virtual space (well, sort-of, half of it actually) represented by a PTE
page, and which contain an "indirect" bit indicating that this TLB entry
RPN points to an array of PTEs from which the TLB can then create direct
entries. Thus, in order to invalidate those when PTE pages are deleted,
we need the virtual address to pass to tlbilx or tlbivax instructions.
The old trick of sticking it somewhere in the PTE page struct page sucks
too much, the address is almost readily available in all call sites and
almost everybody implemets these as macros, so we may as well add the
argument everywhere. I added it to the pmd and pud variants for consistency.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: David Howells <dhowells@redhat.com> [MN10300 & FRV]
Acked-by: Nick Piggin <npiggin@suse.de>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> [s390]
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-07-22 13:44:28 +08:00
|
|
|
__pmd_free_tlb(tlb, pmdp, address); \
|
2005-04-17 06:20:36 +08:00
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define tlb_migrate_finish(mm) do {} while (0)
|
|
|
|
|
|
|
|
#endif /* _ASM_GENERIC__TLB_H */
|