hugetlb: introduce generic version of prepare_hugepage_range
arm, arm64, powerpc, sparc, x86 architectures use the same version of prepare_hugepage_range, so move this generic implementation into asm-generic/hugetlb.h. Link: http://lkml.kernel.org/r/20180920060358.16606-9-alex@ghiti.fr Signed-off-by: Alexandre Ghiti <alex@ghiti.fr> Reviewed-by: Luiz Capitulino <lcapitulino@redhat.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Tested-by: Helge Deller <deller@gmx.de> [parisc] Acked-by: Catalin Marinas <catalin.marinas@arm.com> [arm64] Acked-by: Paul Burton <paul.burton@mips.com> [MIPS] Acked-by: Ingo Molnar <mingo@kernel.org> [x86] Cc: Arnd Bergmann <arnd@arndb.de> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David S. Miller <davem@davemloft.net> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James E.J. Bottomley <jejb@parisc-linux.org> Cc: James Hogan <jhogan@kernel.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Paul Mackerras <paulus@samba.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
c4916a0086
commit
78d6e4e8ea
|
@ -33,17 +33,6 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int prepare_hugepage_range(struct file *file,
|
||||
unsigned long addr, unsigned long len)
|
||||
{
|
||||
struct hstate *h = hstate_file(file);
|
||||
if (len & ~huge_page_mask(h))
|
||||
return -EINVAL;
|
||||
if (addr & ~huge_page_mask(h))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void arch_clear_hugepage_flags(struct page *page)
|
||||
{
|
||||
clear_bit(PG_dcache_clean, &page->flags);
|
||||
|
|
|
@ -31,17 +31,6 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int prepare_hugepage_range(struct file *file,
|
||||
unsigned long addr, unsigned long len)
|
||||
{
|
||||
struct hstate *h = hstate_file(file);
|
||||
if (len & ~huge_page_mask(h))
|
||||
return -EINVAL;
|
||||
if (addr & ~huge_page_mask(h))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void arch_clear_hugepage_flags(struct page *page)
|
||||
{
|
||||
clear_bit(PG_dcache_clean, &page->flags);
|
||||
|
|
|
@ -9,6 +9,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
|
|||
unsigned long end, unsigned long floor,
|
||||
unsigned long ceiling);
|
||||
|
||||
#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
|
||||
int prepare_hugepage_range(struct file *file,
|
||||
unsigned long addr, unsigned long len);
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
|
||||
static inline int prepare_hugepage_range(struct file *file,
|
||||
unsigned long addr,
|
||||
unsigned long len)
|
||||
|
|
|
@ -22,6 +22,7 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
|
|||
* If the arch doesn't supply something else, assume that hugepage
|
||||
* size aligned regions are ok without further preparation.
|
||||
*/
|
||||
#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
|
||||
static inline int prepare_hugepage_range(struct file *file,
|
||||
unsigned long addr, unsigned long len)
|
||||
{
|
||||
|
|
|
@ -114,21 +114,6 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
|
|||
unsigned long end, unsigned long floor,
|
||||
unsigned long ceiling);
|
||||
|
||||
/*
|
||||
* If the arch doesn't supply something else, assume that hugepage
|
||||
* size aligned regions are ok without further preparation.
|
||||
*/
|
||||
static inline int prepare_hugepage_range(struct file *file,
|
||||
unsigned long addr, unsigned long len)
|
||||
{
|
||||
struct hstate *h = hstate_file(file);
|
||||
if (len & ~huge_page_mask(h))
|
||||
return -EINVAL;
|
||||
if (addr & ~huge_page_mask(h))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
|
|
|
@ -15,6 +15,7 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
|
|||
* If the arch doesn't supply something else, assume that hugepage
|
||||
* size aligned regions are ok without further preparation.
|
||||
*/
|
||||
#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
|
||||
static inline int prepare_hugepage_range(struct file *file,
|
||||
unsigned long addr, unsigned long len)
|
||||
{
|
||||
|
|
|
@ -26,22 +26,6 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the arch doesn't supply something else, assume that hugepage
|
||||
* size aligned regions are ok without further preparation.
|
||||
*/
|
||||
static inline int prepare_hugepage_range(struct file *file,
|
||||
unsigned long addr, unsigned long len)
|
||||
{
|
||||
struct hstate *h = hstate_file(file);
|
||||
|
||||
if (len & ~huge_page_mask(h))
|
||||
return -EINVAL;
|
||||
if (addr & ~huge_page_mask(h))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
|
||||
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
|
|
|
@ -13,21 +13,6 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the arch doesn't supply something else, assume that hugepage
|
||||
* size aligned regions are ok without further preparation.
|
||||
*/
|
||||
static inline int prepare_hugepage_range(struct file *file,
|
||||
unsigned long addr, unsigned long len)
|
||||
{
|
||||
struct hstate *h = hstate_file(file);
|
||||
if (len & ~huge_page_mask(h))
|
||||
return -EINVAL;
|
||||
if (addr & ~huge_page_mask(h))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
|
|
|
@ -87,4 +87,19 @@ static inline pte_t huge_pte_wrprotect(pte_t pte)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifndef __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
|
||||
static inline int prepare_hugepage_range(struct file *file,
|
||||
unsigned long addr, unsigned long len)
|
||||
{
|
||||
struct hstate *h = hstate_file(file);
|
||||
|
||||
if (len & ~huge_page_mask(h))
|
||||
return -EINVAL;
|
||||
if (addr & ~huge_page_mask(h))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_GENERIC_HUGETLB_H */
|
||||
|
|
Loading…
Reference in New Issue