[POWERPC] Don't use SLAB/SLUB for PTE pages
The SLUB allocator relies on struct page fields first_page and slab, overwritten by ptl when SPLIT_PTLOCK: so the SLUB allocator cannot then be used for the lowest level of pagetable pages. This was obstructing SLUB on PowerPC, which uses kmem_caches for its pagetables. So convert its pte level to use normal gfp pages (whereas pmd, pud and 64k-page pgd want partpages, so continue to use kmem_caches for pmd, pud and pgd). Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
parent
f1fa74f4af
commit
517e22638c
|
@ -120,19 +120,6 @@ config GENERIC_BUG
|
||||||
config SYS_SUPPORTS_APM_EMULATION
|
config SYS_SUPPORTS_APM_EMULATION
|
||||||
bool
|
bool
|
||||||
|
|
||||||
#
|
|
||||||
# Powerpc uses the slab allocator to manage its ptes and the
|
|
||||||
# page structs of ptes are used for splitting the page table
|
|
||||||
# lock for configurations supporting more than SPLIT_PTLOCK_CPUS.
|
|
||||||
#
|
|
||||||
# In that special configuration the page structs of slabs are modified.
|
|
||||||
# This setting disables the selection of SLUB as a slab allocator.
|
|
||||||
#
|
|
||||||
config ARCH_USES_SLAB_PAGE_STRUCT
|
|
||||||
bool
|
|
||||||
default y
|
|
||||||
depends on SPLIT_PTLOCK_CPUS <= NR_CPUS
|
|
||||||
|
|
||||||
config DEFAULT_UIMAGE
|
config DEFAULT_UIMAGE
|
||||||
bool
|
bool
|
||||||
help
|
help
|
||||||
|
|
|
@ -146,21 +146,16 @@ static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
|
||||||
memset(addr, 0, kmem_cache_size(cache));
|
memset(addr, 0, kmem_cache_size(cache));
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_64K_PAGES
|
|
||||||
static const unsigned int pgtable_cache_size[3] = {
|
|
||||||
PTE_TABLE_SIZE, PMD_TABLE_SIZE, PGD_TABLE_SIZE
|
|
||||||
};
|
|
||||||
static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
|
|
||||||
"pte_pmd_cache", "pmd_cache", "pgd_cache",
|
|
||||||
};
|
|
||||||
#else
|
|
||||||
static const unsigned int pgtable_cache_size[2] = {
|
static const unsigned int pgtable_cache_size[2] = {
|
||||||
PTE_TABLE_SIZE, PMD_TABLE_SIZE
|
PGD_TABLE_SIZE, PMD_TABLE_SIZE
|
||||||
};
|
};
|
||||||
static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
|
static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
|
||||||
"pgd_pte_cache", "pud_pmd_cache",
|
#ifdef CONFIG_PPC_64K_PAGES
|
||||||
};
|
"pgd_cache", "pmd_cache",
|
||||||
|
#else
|
||||||
|
"pgd_cache", "pud_pmd_cache",
|
||||||
#endif /* CONFIG_PPC_64K_PAGES */
|
#endif /* CONFIG_PPC_64K_PAGES */
|
||||||
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_HUGETLB_PAGE
|
#ifdef CONFIG_HUGETLB_PAGE
|
||||||
/* Hugepages need one extra cache, initialized in hugetlbpage.c. We
|
/* Hugepages need one extra cache, initialized in hugetlbpage.c. We
|
||||||
|
|
|
@ -14,18 +14,11 @@
|
||||||
|
|
||||||
extern struct kmem_cache *pgtable_cache[];
|
extern struct kmem_cache *pgtable_cache[];
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_64K_PAGES
|
#define PGD_CACHE_NUM 0
|
||||||
#define PTE_CACHE_NUM 0
|
#define PUD_CACHE_NUM 1
|
||||||
#define PMD_CACHE_NUM 1
|
#define PMD_CACHE_NUM 1
|
||||||
#define PGD_CACHE_NUM 2
|
#define HUGEPTE_CACHE_NUM 2
|
||||||
#define HUGEPTE_CACHE_NUM 3
|
#define PTE_NONCACHE_NUM 3 /* from GFP rather than kmem_cache */
|
||||||
#else
|
|
||||||
#define PTE_CACHE_NUM 0
|
|
||||||
#define PMD_CACHE_NUM 1
|
|
||||||
#define PUD_CACHE_NUM 1
|
|
||||||
#define PGD_CACHE_NUM 0
|
|
||||||
#define HUGEPTE_CACHE_NUM 2
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
|
@ -91,8 +84,7 @@ static inline void pmd_free(pmd_t *pmd)
|
||||||
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
||||||
unsigned long address)
|
unsigned long address)
|
||||||
{
|
{
|
||||||
return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM],
|
return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
|
||||||
GFP_KERNEL|__GFP_REPEAT);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct page *pte_alloc_one(struct mm_struct *mm,
|
static inline struct page *pte_alloc_one(struct mm_struct *mm,
|
||||||
|
@ -103,12 +95,12 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
|
||||||
|
|
||||||
static inline void pte_free_kernel(pte_t *pte)
|
static inline void pte_free_kernel(pte_t *pte)
|
||||||
{
|
{
|
||||||
kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte);
|
free_page((unsigned long)pte);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void pte_free(struct page *ptepage)
|
static inline void pte_free(struct page *ptepage)
|
||||||
{
|
{
|
||||||
pte_free_kernel(page_address(ptepage));
|
__free_page(ptepage);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define PGF_CACHENUM_MASK 0x3
|
#define PGF_CACHENUM_MASK 0x3
|
||||||
|
@ -130,14 +122,17 @@ static inline void pgtable_free(pgtable_free_t pgf)
|
||||||
void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
|
void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
|
||||||
int cachenum = pgf.val & PGF_CACHENUM_MASK;
|
int cachenum = pgf.val & PGF_CACHENUM_MASK;
|
||||||
|
|
||||||
kmem_cache_free(pgtable_cache[cachenum], p);
|
if (cachenum == PTE_NONCACHE_NUM)
|
||||||
|
free_page((unsigned long)p);
|
||||||
|
else
|
||||||
|
kmem_cache_free(pgtable_cache[cachenum], p);
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
|
extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
|
||||||
|
|
||||||
#define __pte_free_tlb(tlb, ptepage) \
|
#define __pte_free_tlb(tlb, ptepage) \
|
||||||
pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
|
pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
|
||||||
PTE_CACHE_NUM, PTE_TABLE_SIZE-1))
|
PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1))
|
||||||
#define __pmd_free_tlb(tlb, pmd) \
|
#define __pmd_free_tlb(tlb, pmd) \
|
||||||
pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
|
pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
|
||||||
PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
|
PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
|
||||||
|
|
Loading…
Reference in New Issue