mm: allow arch code to control the user page table ceiling

On architectures where a pgd entry may be shared between user and kernel
(e.g.  ARM+LPAE), freeing page tables needs a ceiling other than 0.
This patch introduces a generic USER_PGTABLES_CEILING that arch code can
override.  It is the responsibility of the arch code setting the ceiling
to ensure the complete freeing of the page tables (usually in
pgd_free()).

[catalin.marinas@arm.com: commit log; shift_arg_pages(), asm-generic/pgtables.h changes]
Signed-off-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: <stable@vger.kernel.org>	[3.3+]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Hugh Dickins 2013-04-29 15:07:44 -07:00 committed by Linus Torvalds
parent acb6d558f4
commit 6ee8630e02
3 changed files with 14 additions and 4 deletions

View File

@ -613,7 +613,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
* when the old and new regions overlap clear from new_end. * when the old and new regions overlap clear from new_end.
*/ */
free_pgd_range(&tlb, new_end, old_end, new_end, free_pgd_range(&tlb, new_end, old_end, new_end,
vma->vm_next ? vma->vm_next->vm_start : 0); vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
} else { } else {
/* /*
* otherwise, clean from old_start; this is done to not touch * otherwise, clean from old_start; this is done to not touch
@ -622,7 +622,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
* for the others its just a little faster. * for the others its just a little faster.
*/ */
free_pgd_range(&tlb, old_start, old_end, new_end, free_pgd_range(&tlb, old_start, old_end, new_end,
vma->vm_next ? vma->vm_next->vm_start : 0); vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
} }
tlb_finish_mmu(&tlb, new_end, old_end); tlb_finish_mmu(&tlb, new_end, old_end);

View File

@ -7,6 +7,16 @@
#include <linux/mm_types.h> #include <linux/mm_types.h>
#include <linux/bug.h> #include <linux/bug.h>
/*
* On almost all architectures and configurations, 0 can be used as the
* upper ceiling to free_pgtables(): on many architectures it has the same
* effect as using TASK_SIZE. However, there is one configuration which
* must impose a more careful limit, to avoid freeing kernel pgtables.
*/
#ifndef USER_PGTABLES_CEILING
#define USER_PGTABLES_CEILING 0UL
#endif
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma, extern int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep, unsigned long address, pte_t *ptep,

View File

@ -2302,7 +2302,7 @@ static void unmap_region(struct mm_struct *mm,
update_hiwater_rss(mm); update_hiwater_rss(mm);
unmap_vmas(&tlb, vma, start, end); unmap_vmas(&tlb, vma, start, end);
free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
next ? next->vm_start : 0); next ? next->vm_start : USER_PGTABLES_CEILING);
tlb_finish_mmu(&tlb, start, end); tlb_finish_mmu(&tlb, start, end);
} }
@ -2682,7 +2682,7 @@ void exit_mmap(struct mm_struct *mm)
/* Use -1 here to ensure all VMAs in the mm are unmapped */ /* Use -1 here to ensure all VMAs in the mm are unmapped */
unmap_vmas(&tlb, vma, 0, -1); unmap_vmas(&tlb, vma, 0, -1);
free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
tlb_finish_mmu(&tlb, 0, -1); tlb_finish_mmu(&tlb, 0, -1);
/* /*