mm: consolidate pgtable_cache_init() and pgd_cache_init()
Both pgtable_cache_init() and pgd_cache_init() are used to initialize kmem cache for page table allocations on several architectures that do not use PAGE_SIZE tables for one or more levels of the page table hierarchy. Most architectures do not implement these functions and use __weak default NOP implementation of pgd_cache_init(). Since there is no such default for pgtable_cache_init(), its empty stub is duplicated among most architectures. Rename the definitions of pgd_cache_init() to pgtable_cache_init() and drop empty stubs of pgtable_cache_init(). Link: http://lkml.kernel.org/r/1566457046-22637-1-git-send-email-rppt@linux.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Acked-by: Will Deacon <will@kernel.org> [arm64] Acked-by: Thomas Gleixner <tglx@linutronix.de> [x86] Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Matthew Wilcox <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
1b9a9d8564
commit
782de70c42
|
@ -359,11 +359,6 @@ extern void paging_init(void);
|
||||||
|
|
||||||
#include <asm-generic/pgtable.h>
|
#include <asm-generic/pgtable.h>
|
||||||
|
|
||||||
/*
|
|
||||||
* No page table caches to initialise
|
|
||||||
*/
|
|
||||||
#define pgtable_cache_init() do { } while (0)
|
|
||||||
|
|
||||||
/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */
|
/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */
|
||||||
#define HAVE_ARCH_UNMAPPED_AREA
|
#define HAVE_ARCH_UNMAPPED_AREA
|
||||||
|
|
||||||
|
|
|
@ -395,11 +395,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
|
||||||
/* to cope with aliasing VIPT cache */
|
/* to cope with aliasing VIPT cache */
|
||||||
#define HAVE_ARCH_UNMAPPED_AREA
|
#define HAVE_ARCH_UNMAPPED_AREA
|
||||||
|
|
||||||
/*
|
|
||||||
* No page table caches to initialise
|
|
||||||
*/
|
|
||||||
#define pgtable_cache_init() do { } while (0)
|
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
#endif /* __ASSEMBLY__ */
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -70,11 +70,6 @@ typedef pte_t *pte_addr_t;
|
||||||
*/
|
*/
|
||||||
extern unsigned int kobjsize(const void *objp);
|
extern unsigned int kobjsize(const void *objp);
|
||||||
|
|
||||||
/*
|
|
||||||
* No page table caches to initialise.
|
|
||||||
*/
|
|
||||||
#define pgtable_cache_init() do { } while (0)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All 32bit addresses are effectively valid for vmalloc...
|
* All 32bit addresses are effectively valid for vmalloc...
|
||||||
* Sort of meaningless for non-VM targets.
|
* Sort of meaningless for non-VM targets.
|
||||||
|
|
|
@ -368,8 +368,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||||
#define HAVE_ARCH_UNMAPPED_AREA
|
#define HAVE_ARCH_UNMAPPED_AREA
|
||||||
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
|
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
|
||||||
|
|
||||||
#define pgtable_cache_init() do { } while (0)
|
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
#endif /* !__ASSEMBLY__ */
|
||||||
|
|
||||||
#endif /* CONFIG_MMU */
|
#endif /* CONFIG_MMU */
|
||||||
|
|
|
@ -861,8 +861,6 @@ extern int kern_addr_valid(unsigned long addr);
|
||||||
|
|
||||||
#include <asm-generic/pgtable.h>
|
#include <asm-generic/pgtable.h>
|
||||||
|
|
||||||
static inline void pgtable_cache_init(void) { }
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* On AArch64, the cache coherency is handled via the set_pte_at() function.
|
* On AArch64, the cache coherency is handled via the set_pte_at() function.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -35,7 +35,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||||
kmem_cache_free(pgd_cache, pgd);
|
kmem_cache_free(pgd_cache, pgd);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init pgd_cache_init(void)
|
void __init pgtable_cache_init(void)
|
||||||
{
|
{
|
||||||
if (PGD_SIZE == PAGE_SIZE)
|
if (PGD_SIZE == PAGE_SIZE)
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -59,11 +59,6 @@ extern unsigned long empty_zero_page;
|
||||||
|
|
||||||
#define swapper_pg_dir ((pgd_t *) 0)
|
#define swapper_pg_dir ((pgd_t *) 0)
|
||||||
|
|
||||||
/*
|
|
||||||
* No page table caches to initialise
|
|
||||||
*/
|
|
||||||
#define pgtable_cache_init() do { } while (0)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* c6x is !MMU, so define the simpliest implementation
|
* c6x is !MMU, so define the simpliest implementation
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -296,11 +296,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
|
||||||
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
|
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
|
||||||
#define kern_addr_valid(addr) (1)
|
#define kern_addr_valid(addr) (1)
|
||||||
|
|
||||||
/*
|
|
||||||
* No page table caches to initialise
|
|
||||||
*/
|
|
||||||
#define pgtable_cache_init() do {} while (0)
|
|
||||||
|
|
||||||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,6 @@
|
||||||
#define __ARCH_USE_5LEVEL_HACK
|
#define __ARCH_USE_5LEVEL_HACK
|
||||||
#include <asm-generic/pgtable-nopud.h>
|
#include <asm-generic/pgtable-nopud.h>
|
||||||
#include <asm-generic/pgtable.h>
|
#include <asm-generic/pgtable.h>
|
||||||
#define pgtable_cache_init() do { } while (0)
|
|
||||||
extern void paging_init(void);
|
extern void paging_init(void);
|
||||||
#define PAGE_NONE __pgprot(0) /* these mean nothing to NO_MM */
|
#define PAGE_NONE __pgprot(0) /* these mean nothing to NO_MM */
|
||||||
#define PAGE_SHARED __pgprot(0) /* these mean nothing to NO_MM */
|
#define PAGE_SHARED __pgprot(0) /* these mean nothing to NO_MM */
|
||||||
|
@ -34,11 +33,6 @@ static inline int pte_file(pte_t pte) { return 0; }
|
||||||
extern unsigned int kobjsize(const void *objp);
|
extern unsigned int kobjsize(const void *objp);
|
||||||
extern int is_in_rom(unsigned long);
|
extern int is_in_rom(unsigned long);
|
||||||
|
|
||||||
/*
|
|
||||||
* No page table caches to initialise
|
|
||||||
*/
|
|
||||||
#define pgtable_cache_init() do { } while (0)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All 32bit addresses are effectively valid for vmalloc...
|
* All 32bit addresses are effectively valid for vmalloc...
|
||||||
* Sort of meaningless for non-VM targets.
|
* Sort of meaningless for non-VM targets.
|
||||||
|
|
|
@ -431,9 +431,6 @@ static inline int pte_exec(pte_t pte)
|
||||||
|
|
||||||
#define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
|
#define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
|
||||||
|
|
||||||
/* I think this is in case we have page table caches; needed by init/main.c */
|
|
||||||
#define pgtable_cache_init() do { } while (0)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Swap/file PTE definitions. If _PAGE_PRESENT is zero, the rest of the PTE is
|
* Swap/file PTE definitions. If _PAGE_PRESENT is zero, the rest of the PTE is
|
||||||
* interpreted as swap information. The remaining free bits are interpreted as
|
* interpreted as swap information. The remaining free bits are interpreted as
|
||||||
|
|
|
@ -3,5 +3,5 @@
|
||||||
# Makefile for Hexagon memory management subsystem
|
# Makefile for Hexagon memory management subsystem
|
||||||
#
|
#
|
||||||
|
|
||||||
obj-y := init.o pgalloc.o ioremap.o uaccess.o vm_fault.o cache.o
|
obj-y := init.o ioremap.o uaccess.o vm_fault.o cache.o
|
||||||
obj-y += copy_to_user.o copy_from_user.o strnlen_user.o vm_tlb.o
|
obj-y += copy_to_user.o copy_from_user.o strnlen_user.o vm_tlb.o
|
||||||
|
|
|
@ -1,10 +0,0 @@
|
||||||
// SPDX-License-Identifier: GPL-2.0-only
|
|
||||||
/*
|
|
||||||
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/init.h>
|
|
||||||
|
|
||||||
void __init pgtable_cache_init(void)
|
|
||||||
{
|
|
||||||
}
|
|
|
@ -566,11 +566,6 @@ extern struct page *zero_page_memmap_ptr;
|
||||||
#define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M
|
#define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M
|
||||||
#define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT)
|
#define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT)
|
||||||
|
|
||||||
/*
|
|
||||||
* No page table caches to initialise
|
|
||||||
*/
|
|
||||||
#define pgtable_cache_init() do { } while (0)
|
|
||||||
|
|
||||||
/* These tell get_user_pages() that the first gate page is accessible from user-level. */
|
/* These tell get_user_pages() that the first gate page is accessible from user-level. */
|
||||||
#define FIXADDR_USER_START GATE_ADDR
|
#define FIXADDR_USER_START GATE_ADDR
|
||||||
#ifdef HAVE_BUGGY_SEGREL
|
#ifdef HAVE_BUGGY_SEGREL
|
||||||
|
|
|
@ -176,9 +176,4 @@ pgprot_t pgprot_dmacoherent(pgprot_t prot);
|
||||||
#include <asm-generic/pgtable.h>
|
#include <asm-generic/pgtable.h>
|
||||||
#endif /* !__ASSEMBLY__ */
|
#endif /* !__ASSEMBLY__ */
|
||||||
|
|
||||||
/*
|
|
||||||
* No page table caches to initialise
|
|
||||||
*/
|
|
||||||
#define pgtable_cache_init() do { } while (0)
|
|
||||||
|
|
||||||
#endif /* _M68K_PGTABLE_H */
|
#endif /* _M68K_PGTABLE_H */
|
||||||
|
|
|
@ -44,11 +44,6 @@ extern void paging_init(void);
|
||||||
*/
|
*/
|
||||||
#define ZERO_PAGE(vaddr) (virt_to_page(0))
|
#define ZERO_PAGE(vaddr) (virt_to_page(0))
|
||||||
|
|
||||||
/*
|
|
||||||
* No page table caches to initialise.
|
|
||||||
*/
|
|
||||||
#define pgtable_cache_init() do { } while (0)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All 32bit addresses are effectively valid for vmalloc...
|
* All 32bit addresses are effectively valid for vmalloc...
|
||||||
* Sort of meaningless for non-VM targets.
|
* Sort of meaningless for non-VM targets.
|
||||||
|
|
|
@ -46,8 +46,6 @@ extern int mem_init_done;
|
||||||
|
|
||||||
#define swapper_pg_dir ((pgd_t *) NULL)
|
#define swapper_pg_dir ((pgd_t *) NULL)
|
||||||
|
|
||||||
#define pgtable_cache_init() do {} while (0)
|
|
||||||
|
|
||||||
#define arch_enter_lazy_cpu_mode() do {} while (0)
|
#define arch_enter_lazy_cpu_mode() do {} while (0)
|
||||||
|
|
||||||
#define pgprot_noncached_wc(prot) prot
|
#define pgprot_noncached_wc(prot) prot
|
||||||
|
@ -526,11 +524,6 @@ extern unsigned long iopa(unsigned long addr);
|
||||||
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
|
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
|
||||||
#define kern_addr_valid(addr) (1)
|
#define kern_addr_valid(addr) (1)
|
||||||
|
|
||||||
/*
|
|
||||||
* No page table caches to initialise
|
|
||||||
*/
|
|
||||||
#define pgtable_cache_init() do { } while (0)
|
|
||||||
|
|
||||||
void do_page_fault(struct pt_regs *regs, unsigned long address,
|
void do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||||
unsigned long error_code);
|
unsigned long error_code);
|
||||||
|
|
||||||
|
|
|
@ -661,9 +661,4 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||||
#define HAVE_ARCH_UNMAPPED_AREA
|
#define HAVE_ARCH_UNMAPPED_AREA
|
||||||
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
|
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
|
||||||
|
|
||||||
/*
|
|
||||||
* No page table caches to initialise
|
|
||||||
*/
|
|
||||||
#define pgtable_cache_init() do { } while (0)
|
|
||||||
|
|
||||||
#endif /* _ASM_PGTABLE_H */
|
#endif /* _ASM_PGTABLE_H */
|
||||||
|
|
|
@ -403,8 +403,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
||||||
* into virtual address `from'
|
* into virtual address `from'
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define pgtable_cache_init() do { } while (0)
|
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
#endif /* !__ASSEMBLY__ */
|
||||||
|
|
||||||
#endif /* _ASMNDS32_PGTABLE_H */
|
#endif /* _ASMNDS32_PGTABLE_H */
|
||||||
|
|
|
@ -291,8 +291,6 @@ static inline void pte_clear(struct mm_struct *mm,
|
||||||
|
|
||||||
#include <asm-generic/pgtable.h>
|
#include <asm-generic/pgtable.h>
|
||||||
|
|
||||||
#define pgtable_cache_init() do { } while (0)
|
|
||||||
|
|
||||||
extern void __init paging_init(void);
|
extern void __init paging_init(void);
|
||||||
extern void __init mmu_init(void);
|
extern void __init mmu_init(void);
|
||||||
|
|
||||||
|
|
|
@ -443,11 +443,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
|
||||||
|
|
||||||
#include <asm-generic/pgtable.h>
|
#include <asm-generic/pgtable.h>
|
||||||
|
|
||||||
/*
|
|
||||||
* No page table caches to initialise
|
|
||||||
*/
|
|
||||||
#define pgtable_cache_init() do { } while (0)
|
|
||||||
|
|
||||||
typedef pte_t *pte_addr_t;
|
typedef pte_t *pte_addr_t;
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
#endif /* __ASSEMBLY__ */
|
||||||
|
|
|
@ -132,8 +132,6 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
|
||||||
#define PTRS_PER_PTE (1UL << BITS_PER_PTE)
|
#define PTRS_PER_PTE (1UL << BITS_PER_PTE)
|
||||||
|
|
||||||
/* Definitions for 2nd level */
|
/* Definitions for 2nd level */
|
||||||
#define pgtable_cache_init() do { } while (0)
|
|
||||||
|
|
||||||
#define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE)
|
#define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE)
|
||||||
#define PMD_SIZE (1UL << PMD_SHIFT)
|
#define PMD_SIZE (1UL << PMD_SHIFT)
|
||||||
#define PMD_MASK (~(PMD_SIZE-1))
|
#define PMD_MASK (~(PMD_SIZE-1))
|
||||||
|
|
|
@ -87,7 +87,6 @@ extern unsigned long ioremap_bot;
|
||||||
unsigned long vmalloc_to_phys(void *vmalloc_addr);
|
unsigned long vmalloc_to_phys(void *vmalloc_addr);
|
||||||
|
|
||||||
void pgtable_cache_add(unsigned int shift);
|
void pgtable_cache_add(unsigned int shift);
|
||||||
void pgtable_cache_init(void);
|
|
||||||
|
|
||||||
#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
|
#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
|
||||||
void mark_initmem_nx(void);
|
void mark_initmem_nx(void);
|
||||||
|
|
|
@ -424,11 +424,6 @@ extern void *dtb_early_va;
|
||||||
extern void setup_bootmem(void);
|
extern void setup_bootmem(void);
|
||||||
extern void paging_init(void);
|
extern void paging_init(void);
|
||||||
|
|
||||||
static inline void pgtable_cache_init(void)
|
|
||||||
{
|
|
||||||
/* No page table caches to initialize */
|
|
||||||
}
|
|
||||||
|
|
||||||
#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
|
#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
|
||||||
#define VMALLOC_END (PAGE_OFFSET - 1)
|
#define VMALLOC_END (PAGE_OFFSET - 1)
|
||||||
#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
|
#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
|
||||||
|
|
|
@ -1682,11 +1682,6 @@ extern void s390_reset_cmma(struct mm_struct *mm);
|
||||||
#define HAVE_ARCH_UNMAPPED_AREA
|
#define HAVE_ARCH_UNMAPPED_AREA
|
||||||
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
|
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
|
||||||
|
|
||||||
/*
|
|
||||||
* No page table caches to initialise
|
|
||||||
*/
|
|
||||||
static inline void pgtable_cache_init(void) { }
|
|
||||||
|
|
||||||
#include <asm-generic/pgtable.h>
|
#include <asm-generic/pgtable.h>
|
||||||
|
|
||||||
#endif /* _S390_PAGE_H */
|
#endif /* _S390_PAGE_H */
|
||||||
|
|
|
@ -123,11 +123,6 @@ typedef pte_t *pte_addr_t;
|
||||||
|
|
||||||
#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
|
#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
|
||||||
|
|
||||||
/*
|
|
||||||
* Initialise the page table caches
|
|
||||||
*/
|
|
||||||
extern void pgtable_cache_init(void);
|
|
||||||
|
|
||||||
struct vm_area_struct;
|
struct vm_area_struct;
|
||||||
struct mm_struct;
|
struct mm_struct;
|
||||||
|
|
||||||
|
|
|
@ -97,7 +97,3 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
|
||||||
void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
|
void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
void pgtable_cache_init(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
|
@ -445,9 +445,4 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
|
||||||
/* We provide our own get_unmapped_area to cope with VA holes for userland */
|
/* We provide our own get_unmapped_area to cope with VA holes for userland */
|
||||||
#define HAVE_ARCH_UNMAPPED_AREA
|
#define HAVE_ARCH_UNMAPPED_AREA
|
||||||
|
|
||||||
/*
|
|
||||||
* No page table caches to initialise
|
|
||||||
*/
|
|
||||||
#define pgtable_cache_init() do { } while (0)
|
|
||||||
|
|
||||||
#endif /* !(_SPARC_PGTABLE_H) */
|
#endif /* !(_SPARC_PGTABLE_H) */
|
||||||
|
|
|
@ -1135,7 +1135,6 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
|
||||||
unsigned long);
|
unsigned long);
|
||||||
#define HAVE_ARCH_FB_UNMAPPED_AREA
|
#define HAVE_ARCH_FB_UNMAPPED_AREA
|
||||||
|
|
||||||
void pgtable_cache_init(void);
|
|
||||||
void sun4v_register_fault_status(void);
|
void sun4v_register_fault_status(void);
|
||||||
void sun4v_ktsb_register(void);
|
void sun4v_ktsb_register(void);
|
||||||
void __init cheetah_ecache_flush_init(void);
|
void __init cheetah_ecache_flush_init(void);
|
||||||
|
|
|
@ -32,8 +32,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
||||||
/* zero page used for uninitialized stuff */
|
/* zero page used for uninitialized stuff */
|
||||||
extern unsigned long *empty_zero_page;
|
extern unsigned long *empty_zero_page;
|
||||||
|
|
||||||
#define pgtable_cache_init() do ; while (0)
|
|
||||||
|
|
||||||
/* Just any arbitrary offset to the start of the vmalloc VM area: the
|
/* Just any arbitrary offset to the start of the vmalloc VM area: the
|
||||||
* current 8MB value just means that there will be a 8MB "hole" after the
|
* current 8MB value just means that there will be a 8MB "hole" after the
|
||||||
* physical memory until the kernel virtual memory starts. That means that
|
* physical memory until the kernel virtual memory starts. That means that
|
||||||
|
|
|
@ -285,8 +285,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
||||||
|
|
||||||
#include <asm-generic/pgtable.h>
|
#include <asm-generic/pgtable.h>
|
||||||
|
|
||||||
#define pgtable_cache_init() do { } while (0)
|
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
#endif /* !__ASSEMBLY__ */
|
||||||
|
|
||||||
#endif /* __UNICORE_PGTABLE_H__ */
|
#endif /* __UNICORE_PGTABLE_H__ */
|
||||||
|
|
|
@ -29,7 +29,6 @@ extern pgd_t swapper_pg_dir[1024];
|
||||||
extern pgd_t initial_page_table[1024];
|
extern pgd_t initial_page_table[1024];
|
||||||
extern pmd_t initial_pg_pmd[];
|
extern pmd_t initial_pg_pmd[];
|
||||||
|
|
||||||
static inline void pgtable_cache_init(void) { }
|
|
||||||
void paging_init(void);
|
void paging_init(void);
|
||||||
void sync_initial_page_table(void);
|
void sync_initial_page_table(void);
|
||||||
|
|
||||||
|
|
|
@ -241,8 +241,6 @@ extern void cleanup_highmap(void);
|
||||||
#define HAVE_ARCH_UNMAPPED_AREA
|
#define HAVE_ARCH_UNMAPPED_AREA
|
||||||
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
|
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
|
||||||
|
|
||||||
#define pgtable_cache_init() do { } while (0)
|
|
||||||
|
|
||||||
#define PAGE_AGP PAGE_KERNEL_NOCACHE
|
#define PAGE_AGP PAGE_KERNEL_NOCACHE
|
||||||
#define HAVE_PAGE_AGP 1
|
#define HAVE_PAGE_AGP 1
|
||||||
|
|
||||||
|
|
|
@ -357,7 +357,7 @@ static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
|
||||||
|
|
||||||
static struct kmem_cache *pgd_cache;
|
static struct kmem_cache *pgd_cache;
|
||||||
|
|
||||||
void __init pgd_cache_init(void)
|
void __init pgtable_cache_init(void)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* When PAE kernel is running as a Xen domain, it does not use
|
* When PAE kernel is running as a Xen domain, it does not use
|
||||||
|
@ -402,10 +402,6 @@ static inline void _pgd_free(pgd_t *pgd)
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
|
||||||
void __init pgd_cache_init(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline pgd_t *_pgd_alloc(void)
|
static inline pgd_t *_pgd_alloc(void)
|
||||||
{
|
{
|
||||||
return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
|
return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
|
||||||
|
|
|
@ -238,7 +238,6 @@ extern void paging_init(void);
|
||||||
# define swapper_pg_dir NULL
|
# define swapper_pg_dir NULL
|
||||||
static inline void paging_init(void) { }
|
static inline void paging_init(void) { }
|
||||||
#endif
|
#endif
|
||||||
static inline void pgtable_cache_init(void) { }
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The pmd contains the kernel virtual address of the pte page.
|
* The pmd contains the kernel virtual address of the pte page.
|
||||||
|
|
|
@ -1126,7 +1126,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
|
||||||
static inline void init_espfix_bsp(void) { }
|
static inline void init_espfix_bsp(void) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern void __init pgd_cache_init(void);
|
extern void __init pgtable_cache_init(void);
|
||||||
|
|
||||||
#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
|
#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
|
||||||
static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
|
static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
|
||||||
|
|
|
@ -507,7 +507,7 @@ void __init __weak mem_encrypt_init(void) { }
|
||||||
|
|
||||||
void __init __weak poking_init(void) { }
|
void __init __weak poking_init(void) { }
|
||||||
|
|
||||||
void __init __weak pgd_cache_init(void) { }
|
void __init __weak pgtable_cache_init(void) { }
|
||||||
|
|
||||||
bool initcall_debug;
|
bool initcall_debug;
|
||||||
core_param(initcall_debug, initcall_debug, bool, 0644);
|
core_param(initcall_debug, initcall_debug, bool, 0644);
|
||||||
|
@ -565,7 +565,6 @@ static void __init mm_init(void)
|
||||||
init_espfix_bsp();
|
init_espfix_bsp();
|
||||||
/* Should be run after espfix64 is set up. */
|
/* Should be run after espfix64 is set up. */
|
||||||
pti_init();
|
pti_init();
|
||||||
pgd_cache_init();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init __weak arch_call_rest_init(void)
|
void __init __weak arch_call_rest_init(void)
|
||||||
|
|
Loading…
Reference in New Issue