mm: hugetlb: add arch hook for clearing page flags before entering pool
The core page allocator ensures that page flags are zeroed when freeing pages via free_pages_check. A number of architectures (ARM, PPC, MIPS) rely on this property to treat new pages as dirty with respect to the data cache and perform the appropriate flushing before mapping the pages into userspace. This can lead to cache synchronisation problems when using hugepages, since the allocator keeps its own pool of pages above the usual page allocator and does not reset the page flags when freeing a page into the pool. This patch adds a new architecture hook, arch_clear_hugepage_flags, so that architectures which rely on the page flags being in a particular state for fresh allocations can adjust the flags accordingly when a page is freed into the pool. Signed-off-by: Will Deacon <will.deacon@arm.com> Cc: Michal Hocko <mhocko@suse.cz> Reviewed-by: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
01dc52ebdf
commit
5d3a551c28
|
@ -77,4 +77,8 @@ static inline void arch_release_hugepage(struct page *page)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void arch_clear_hugepage_flags(struct page *page)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* _ASM_IA64_HUGETLB_H */
|
#endif /* _ASM_IA64_HUGETLB_H */
|
||||||
|
|
|
@ -112,4 +112,8 @@ static inline void arch_release_hugepage(struct page *page)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void arch_clear_hugepage_flags(struct page *page)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* __ASM_HUGETLB_H */
|
#endif /* __ASM_HUGETLB_H */
|
||||||
|
|
|
@ -151,6 +151,10 @@ static inline void arch_release_hugepage(struct page *page)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void arch_clear_hugepage_flags(struct page *page)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
#else /* ! CONFIG_HUGETLB_PAGE */
|
#else /* ! CONFIG_HUGETLB_PAGE */
|
||||||
static inline void flush_hugetlb_page(struct vm_area_struct *vma,
|
static inline void flush_hugetlb_page(struct vm_area_struct *vma,
|
||||||
unsigned long vmaddr)
|
unsigned long vmaddr)
|
||||||
|
|
|
@ -33,6 +33,7 @@ static inline int prepare_hugepage_range(struct file *file,
|
||||||
}
|
}
|
||||||
|
|
||||||
#define hugetlb_prefault_arch_hook(mm) do { } while (0)
|
#define hugetlb_prefault_arch_hook(mm) do { } while (0)
|
||||||
|
#define arch_clear_hugepage_flags(page) do { } while (0)
|
||||||
|
|
||||||
int arch_prepare_hugepage(struct page *page);
|
int arch_prepare_hugepage(struct page *page);
|
||||||
void arch_release_hugepage(struct page *page);
|
void arch_release_hugepage(struct page *page);
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
#ifndef _ASM_SH_HUGETLB_H
|
#ifndef _ASM_SH_HUGETLB_H
|
||||||
#define _ASM_SH_HUGETLB_H
|
#define _ASM_SH_HUGETLB_H
|
||||||
|
|
||||||
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
|
|
||||||
|
|
||||||
|
@ -89,4 +90,9 @@ static inline void arch_release_hugepage(struct page *page)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void arch_clear_hugepage_flags(struct page *page)
|
||||||
|
{
|
||||||
|
clear_bit(PG_dcache_clean, &page->flags);
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* _ASM_SH_HUGETLB_H */
|
#endif /* _ASM_SH_HUGETLB_H */
|
||||||
|
|
|
@ -82,4 +82,8 @@ static inline void arch_release_hugepage(struct page *page)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void arch_clear_hugepage_flags(struct page *page)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* _ASM_SPARC64_HUGETLB_H */
|
#endif /* _ASM_SPARC64_HUGETLB_H */
|
||||||
|
|
|
@ -106,6 +106,10 @@ static inline void arch_release_hugepage(struct page *page)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void arch_clear_hugepage_flags(struct page *page)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_HUGETLB_SUPER_PAGES
|
#ifdef CONFIG_HUGETLB_SUPER_PAGES
|
||||||
static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
|
static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
|
||||||
struct page *page, int writable)
|
struct page *page, int writable)
|
||||||
|
|
|
@ -90,4 +90,8 @@ static inline void arch_release_hugepage(struct page *page)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void arch_clear_hugepage_flags(struct page *page)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* _ASM_X86_HUGETLB_H */
|
#endif /* _ASM_X86_HUGETLB_H */
|
||||||
|
|
|
@ -637,6 +637,7 @@ static void free_huge_page(struct page *page)
|
||||||
h->surplus_huge_pages--;
|
h->surplus_huge_pages--;
|
||||||
h->surplus_huge_pages_node[nid]--;
|
h->surplus_huge_pages_node[nid]--;
|
||||||
} else {
|
} else {
|
||||||
|
arch_clear_hugepage_flags(page);
|
||||||
enqueue_huge_page(h, page);
|
enqueue_huge_page(h, page);
|
||||||
}
|
}
|
||||||
spin_unlock(&hugetlb_lock);
|
spin_unlock(&hugetlb_lock);
|
||||||
|
|
Loading…
Reference in New Issue