mm: remove double indirection on tlb parameter to free_pgd_range() & Co
The double indirection here is not needed anywhere and hence (at least) confusing. Signed-off-by: Jan Beulich <jbeulich@novell.com> Cc: Hugh Dickins <hugh@veritas.com> Cc: Nick Piggin <npiggin@suse.de> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: "David S. Miller" <davem@davemloft.net> Acked-by: Jeremy Fitzhardinge <jeremy@goop.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
a352894d07
commit
42b7772812
|
@ -112,7 +112,7 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int wri
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void hugetlb_free_pgd_range(struct mmu_gather **tlb,
|
void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
||||||
unsigned long addr, unsigned long end,
|
unsigned long addr, unsigned long end,
|
||||||
unsigned long floor, unsigned long ceiling)
|
unsigned long floor, unsigned long ceiling)
|
||||||
{
|
{
|
||||||
|
|
|
@ -255,7 +255,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
|
||||||
*
|
*
|
||||||
* Must be called with pagetable lock held.
|
* Must be called with pagetable lock held.
|
||||||
*/
|
*/
|
||||||
void hugetlb_free_pgd_range(struct mmu_gather **tlb,
|
void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
||||||
unsigned long addr, unsigned long end,
|
unsigned long addr, unsigned long end,
|
||||||
unsigned long floor, unsigned long ceiling)
|
unsigned long floor, unsigned long ceiling)
|
||||||
{
|
{
|
||||||
|
@ -315,13 +315,13 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb,
|
||||||
return;
|
return;
|
||||||
|
|
||||||
start = addr;
|
start = addr;
|
||||||
pgd = pgd_offset((*tlb)->mm, addr);
|
pgd = pgd_offset(tlb->mm, addr);
|
||||||
do {
|
do {
|
||||||
BUG_ON(get_slice_psize((*tlb)->mm, addr) != mmu_huge_psize);
|
BUG_ON(get_slice_psize(tlb->mm, addr) != mmu_huge_psize);
|
||||||
next = pgd_addr_end(addr, end);
|
next = pgd_addr_end(addr, end);
|
||||||
if (pgd_none_or_clear_bad(pgd))
|
if (pgd_none_or_clear_bad(pgd))
|
||||||
continue;
|
continue;
|
||||||
hugetlb_free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
|
hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
|
||||||
} while (pgd++, addr = next, addr != end);
|
} while (pgd++, addr = next, addr != end);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -541,7 +541,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
|
||||||
/*
|
/*
|
||||||
* when the old and new regions overlap clear from new_end.
|
* when the old and new regions overlap clear from new_end.
|
||||||
*/
|
*/
|
||||||
free_pgd_range(&tlb, new_end, old_end, new_end,
|
free_pgd_range(tlb, new_end, old_end, new_end,
|
||||||
vma->vm_next ? vma->vm_next->vm_start : 0);
|
vma->vm_next ? vma->vm_next->vm_start : 0);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
|
@ -550,7 +550,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
|
||||||
* have constraints on va-space that make this illegal (IA64) -
|
* have constraints on va-space that make this illegal (IA64) -
|
||||||
* for the others its just a little faster.
|
* for the others its just a little faster.
|
||||||
*/
|
*/
|
||||||
free_pgd_range(&tlb, old_start, old_end, new_end,
|
free_pgd_range(tlb, old_start, old_end, new_end,
|
||||||
vma->vm_next ? vma->vm_next->vm_start : 0);
|
vma->vm_next ? vma->vm_next->vm_start : 0);
|
||||||
}
|
}
|
||||||
tlb_finish_mmu(tlb, new_end, old_end);
|
tlb_finish_mmu(tlb, new_end, old_end);
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
|
|
||||||
|
|
||||||
void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
|
void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
|
||||||
unsigned long end, unsigned long floor,
|
unsigned long end, unsigned long floor,
|
||||||
unsigned long ceiling);
|
unsigned long ceiling);
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
|
int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
|
||||||
unsigned long len);
|
unsigned long len);
|
||||||
|
|
||||||
void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
|
void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
|
||||||
unsigned long end, unsigned long floor,
|
unsigned long end, unsigned long floor,
|
||||||
unsigned long ceiling);
|
unsigned long ceiling);
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,7 @@ static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
|
||||||
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) {
|
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb,
|
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
||||||
unsigned long addr, unsigned long end,
|
unsigned long addr, unsigned long end,
|
||||||
unsigned long floor,
|
unsigned long floor,
|
||||||
unsigned long ceiling)
|
unsigned long ceiling)
|
||||||
|
|
|
@ -31,7 +31,7 @@ static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb,
|
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
||||||
unsigned long addr, unsigned long end,
|
unsigned long addr, unsigned long end,
|
||||||
unsigned long floor,
|
unsigned long floor,
|
||||||
unsigned long ceiling)
|
unsigned long ceiling)
|
||||||
|
|
|
@ -26,7 +26,7 @@ static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
|
||||||
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) {
|
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb,
|
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
||||||
unsigned long addr, unsigned long end,
|
unsigned long addr, unsigned long end,
|
||||||
unsigned long floor,
|
unsigned long floor,
|
||||||
unsigned long ceiling)
|
unsigned long ceiling)
|
||||||
|
|
|
@ -769,10 +769,8 @@ struct mm_walk {
|
||||||
|
|
||||||
int walk_page_range(unsigned long addr, unsigned long end,
|
int walk_page_range(unsigned long addr, unsigned long end,
|
||||||
struct mm_walk *walk);
|
struct mm_walk *walk);
|
||||||
void free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
|
void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
|
||||||
unsigned long end, unsigned long floor, unsigned long ceiling);
|
unsigned long end, unsigned long floor, unsigned long ceiling);
|
||||||
void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma,
|
|
||||||
unsigned long floor, unsigned long ceiling);
|
|
||||||
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
|
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
|
||||||
struct vm_area_struct *vma);
|
struct vm_area_struct *vma);
|
||||||
void unmap_mapping_range(struct address_space *mapping,
|
void unmap_mapping_range(struct address_space *mapping,
|
||||||
|
|
|
@ -13,6 +13,9 @@
|
||||||
|
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
|
|
||||||
|
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
|
||||||
|
unsigned long floor, unsigned long ceiling);
|
||||||
|
|
||||||
static inline void set_page_count(struct page *page, int v)
|
static inline void set_page_count(struct page *page, int v)
|
||||||
{
|
{
|
||||||
atomic_set(&page->_count, v);
|
atomic_set(&page->_count, v);
|
||||||
|
|
10
mm/memory.c
10
mm/memory.c
|
@ -61,6 +61,8 @@
|
||||||
#include <linux/swapops.h>
|
#include <linux/swapops.h>
|
||||||
#include <linux/elf.h>
|
#include <linux/elf.h>
|
||||||
|
|
||||||
|
#include "internal.h"
|
||||||
|
|
||||||
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
||||||
/* use the per-pgdat data instead for discontigmem - mbligh */
|
/* use the per-pgdat data instead for discontigmem - mbligh */
|
||||||
unsigned long max_mapnr;
|
unsigned long max_mapnr;
|
||||||
|
@ -211,7 +213,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
|
||||||
*
|
*
|
||||||
* Must be called with pagetable lock held.
|
* Must be called with pagetable lock held.
|
||||||
*/
|
*/
|
||||||
void free_pgd_range(struct mmu_gather **tlb,
|
void free_pgd_range(struct mmu_gather *tlb,
|
||||||
unsigned long addr, unsigned long end,
|
unsigned long addr, unsigned long end,
|
||||||
unsigned long floor, unsigned long ceiling)
|
unsigned long floor, unsigned long ceiling)
|
||||||
{
|
{
|
||||||
|
@ -262,16 +264,16 @@ void free_pgd_range(struct mmu_gather **tlb,
|
||||||
return;
|
return;
|
||||||
|
|
||||||
start = addr;
|
start = addr;
|
||||||
pgd = pgd_offset((*tlb)->mm, addr);
|
pgd = pgd_offset(tlb->mm, addr);
|
||||||
do {
|
do {
|
||||||
next = pgd_addr_end(addr, end);
|
next = pgd_addr_end(addr, end);
|
||||||
if (pgd_none_or_clear_bad(pgd))
|
if (pgd_none_or_clear_bad(pgd))
|
||||||
continue;
|
continue;
|
||||||
free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
|
free_pud_range(tlb, pgd, addr, next, floor, ceiling);
|
||||||
} while (pgd++, addr = next, addr != end);
|
} while (pgd++, addr = next, addr != end);
|
||||||
}
|
}
|
||||||
|
|
||||||
void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
|
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||||
unsigned long floor, unsigned long ceiling)
|
unsigned long floor, unsigned long ceiling)
|
||||||
{
|
{
|
||||||
while (vma) {
|
while (vma) {
|
||||||
|
|
|
@ -32,6 +32,8 @@
|
||||||
#include <asm/tlb.h>
|
#include <asm/tlb.h>
|
||||||
#include <asm/mmu_context.h>
|
#include <asm/mmu_context.h>
|
||||||
|
|
||||||
|
#include "internal.h"
|
||||||
|
|
||||||
#ifndef arch_mmap_check
|
#ifndef arch_mmap_check
|
||||||
#define arch_mmap_check(addr, len, flags) (0)
|
#define arch_mmap_check(addr, len, flags) (0)
|
||||||
#endif
|
#endif
|
||||||
|
@ -1763,7 +1765,7 @@ static void unmap_region(struct mm_struct *mm,
|
||||||
update_hiwater_rss(mm);
|
update_hiwater_rss(mm);
|
||||||
unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL);
|
unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL);
|
||||||
vm_unacct_memory(nr_accounted);
|
vm_unacct_memory(nr_accounted);
|
||||||
free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,
|
free_pgtables(tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,
|
||||||
next? next->vm_start: 0);
|
next? next->vm_start: 0);
|
||||||
tlb_finish_mmu(tlb, start, end);
|
tlb_finish_mmu(tlb, start, end);
|
||||||
}
|
}
|
||||||
|
@ -2063,7 +2065,7 @@ void exit_mmap(struct mm_struct *mm)
|
||||||
/* Use -1 here to ensure all VMAs in the mm are unmapped */
|
/* Use -1 here to ensure all VMAs in the mm are unmapped */
|
||||||
end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
|
end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
|
||||||
vm_unacct_memory(nr_accounted);
|
vm_unacct_memory(nr_accounted);
|
||||||
free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
|
free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0);
|
||||||
tlb_finish_mmu(tlb, 0, end);
|
tlb_finish_mmu(tlb, 0, end);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue