mm: khugepaged: make hugepage_vma_check() non-static
The hugepage_vma_check() could be reused by khugepaged_enter() and khugepaged_enter_vma_merge(), but it is static in khugepaged.c. Make it non-static and declare it in khugepaged.h. Link: https://lkml.kernel.org/r/20220510203222.24246-7-shy828301@gmail.com Signed-off-by: Yang Shi <shy828301@gmail.com> Suggested-by: Vlastimil Babka <vbabka@suse.cz> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Rik van Riel <riel@surriel.com> Cc: Song Liu <song@kernel.org> Cc: Song Liu <songliubraving@fb.com> Cc: Theodore Ts'o <tytso@mit.edu> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
d2081b2bf8
commit
2647d11b9e
|
@ -3,8 +3,6 @@
|
||||||
#define _LINUX_KHUGEPAGED_H
|
#define _LINUX_KHUGEPAGED_H
|
||||||
|
|
||||||
#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
|
#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
|
||||||
#include <linux/shmem_fs.h>
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||||
extern struct attribute_group khugepaged_attr_group;
|
extern struct attribute_group khugepaged_attr_group;
|
||||||
|
@ -12,6 +10,8 @@ extern struct attribute_group khugepaged_attr_group;
|
||||||
extern int khugepaged_init(void);
|
extern int khugepaged_init(void);
|
||||||
extern void khugepaged_destroy(void);
|
extern void khugepaged_destroy(void);
|
||||||
extern int start_stop_khugepaged(void);
|
extern int start_stop_khugepaged(void);
|
||||||
|
extern bool hugepage_vma_check(struct vm_area_struct *vma,
|
||||||
|
unsigned long vm_flags);
|
||||||
extern void __khugepaged_enter(struct mm_struct *mm);
|
extern void __khugepaged_enter(struct mm_struct *mm);
|
||||||
extern void __khugepaged_exit(struct mm_struct *mm);
|
extern void __khugepaged_exit(struct mm_struct *mm);
|
||||||
extern void khugepaged_enter_vma_merge(struct vm_area_struct *vma,
|
extern void khugepaged_enter_vma_merge(struct vm_area_struct *vma,
|
||||||
|
@ -55,13 +55,11 @@ static inline void khugepaged_exit(struct mm_struct *mm)
|
||||||
static inline void khugepaged_enter(struct vm_area_struct *vma,
|
static inline void khugepaged_enter(struct vm_area_struct *vma,
|
||||||
unsigned long vm_flags)
|
unsigned long vm_flags)
|
||||||
{
|
{
|
||||||
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
|
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
|
||||||
if ((khugepaged_always() ||
|
khugepaged_enabled()) {
|
||||||
(shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) ||
|
if (hugepage_vma_check(vma, vm_flags))
|
||||||
(khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
|
|
||||||
!(vm_flags & VM_NOHUGEPAGE) &&
|
|
||||||
!test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
|
|
||||||
__khugepaged_enter(vma->vm_mm);
|
__khugepaged_enter(vma->vm_mm);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
|
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||||
static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
|
static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
|
||||||
|
|
|
@ -437,8 +437,8 @@ static inline int khugepaged_test_exit(struct mm_struct *mm)
|
||||||
return atomic_read(&mm->mm_users) == 0;
|
return atomic_read(&mm->mm_users) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool hugepage_vma_check(struct vm_area_struct *vma,
|
bool hugepage_vma_check(struct vm_area_struct *vma,
|
||||||
unsigned long vm_flags)
|
unsigned long vm_flags)
|
||||||
{
|
{
|
||||||
if (!transhuge_vma_enabled(vma, vm_flags))
|
if (!transhuge_vma_enabled(vma, vm_flags))
|
||||||
return false;
|
return false;
|
||||||
|
@ -508,20 +508,13 @@ void __khugepaged_enter(struct mm_struct *mm)
|
||||||
void khugepaged_enter_vma_merge(struct vm_area_struct *vma,
|
void khugepaged_enter_vma_merge(struct vm_area_struct *vma,
|
||||||
unsigned long vm_flags)
|
unsigned long vm_flags)
|
||||||
{
|
{
|
||||||
unsigned long hstart, hend;
|
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
|
||||||
|
khugepaged_enabled() &&
|
||||||
/*
|
(((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
|
||||||
* khugepaged only supports read-only files for non-shmem files.
|
(vma->vm_end & HPAGE_PMD_MASK))) {
|
||||||
* khugepaged does not yet work on special mappings. And
|
if (hugepage_vma_check(vma, vm_flags))
|
||||||
* file-private shmem THP is not supported.
|
__khugepaged_enter(vma->vm_mm);
|
||||||
*/
|
}
|
||||||
if (!hugepage_vma_check(vma, vm_flags))
|
|
||||||
return;
|
|
||||||
|
|
||||||
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
|
|
||||||
hend = vma->vm_end & HPAGE_PMD_MASK;
|
|
||||||
if (hstart < hend)
|
|
||||||
khugepaged_enter(vma, vm_flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void __khugepaged_exit(struct mm_struct *mm)
|
void __khugepaged_exit(struct mm_struct *mm)
|
||||||
|
|
Loading…
Reference in New Issue