thp: split_huge_page_mm/vma
split_huge_page_pmd compat code. Each one of those would need to be expanded to hundred of lines of complex code without a fully reliable split_huge_page_pmd design. Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Acked-by: Rik van Riel <riel@redhat.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
e7a00c45f2
commit
bae9c19bf1
|
@ -179,6 +179,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
|
||||||
if (pud_none_or_clear_bad(pud))
|
if (pud_none_or_clear_bad(pud))
|
||||||
goto out;
|
goto out;
|
||||||
pmd = pmd_offset(pud, 0xA0000);
|
pmd = pmd_offset(pud, 0xA0000);
|
||||||
|
split_huge_page_pmd(mm, pmd);
|
||||||
if (pmd_none_or_clear_bad(pmd))
|
if (pmd_none_or_clear_bad(pmd))
|
||||||
goto out;
|
goto out;
|
||||||
pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
|
pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
|
||||||
|
|
|
@ -514,6 +514,7 @@ static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
|
||||||
pmd = pmd_offset(pud, addr);
|
pmd = pmd_offset(pud, addr);
|
||||||
do {
|
do {
|
||||||
next = pmd_addr_end(addr, end);
|
next = pmd_addr_end(addr, end);
|
||||||
|
split_huge_page_pmd(vma->vm_mm, pmd);
|
||||||
if (pmd_none_or_clear_bad(pmd))
|
if (pmd_none_or_clear_bad(pmd))
|
||||||
continue;
|
continue;
|
||||||
if (check_pte_range(vma, pmd, addr, next, nodes,
|
if (check_pte_range(vma, pmd, addr, next, nodes,
|
||||||
|
|
|
@ -154,6 +154,7 @@ static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud,
|
||||||
pmd = pmd_offset(pud, addr);
|
pmd = pmd_offset(pud, addr);
|
||||||
do {
|
do {
|
||||||
next = pmd_addr_end(addr, end);
|
next = pmd_addr_end(addr, end);
|
||||||
|
split_huge_page_pmd(vma->vm_mm, pmd);
|
||||||
if (pmd_none_or_clear_bad(pmd))
|
if (pmd_none_or_clear_bad(pmd))
|
||||||
mincore_unmapped_range(vma, addr, next, vec);
|
mincore_unmapped_range(vma, addr, next, vec);
|
||||||
else
|
else
|
||||||
|
|
|
@ -88,6 +88,7 @@ static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
|
||||||
pmd = pmd_offset(pud, addr);
|
pmd = pmd_offset(pud, addr);
|
||||||
do {
|
do {
|
||||||
next = pmd_addr_end(addr, end);
|
next = pmd_addr_end(addr, end);
|
||||||
|
split_huge_page_pmd(mm, pmd);
|
||||||
if (pmd_none_or_clear_bad(pmd))
|
if (pmd_none_or_clear_bad(pmd))
|
||||||
continue;
|
continue;
|
||||||
change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable);
|
change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable);
|
||||||
|
|
|
@ -41,6 +41,7 @@ static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
pmd = pmd_offset(pud, addr);
|
pmd = pmd_offset(pud, addr);
|
||||||
|
split_huge_page_pmd(mm, pmd);
|
||||||
if (pmd_none_or_clear_bad(pmd))
|
if (pmd_none_or_clear_bad(pmd))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
|
|
@ -34,6 +34,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
|
||||||
pmd = pmd_offset(pud, addr);
|
pmd = pmd_offset(pud, addr);
|
||||||
do {
|
do {
|
||||||
next = pmd_addr_end(addr, end);
|
next = pmd_addr_end(addr, end);
|
||||||
|
split_huge_page_pmd(walk->mm, pmd);
|
||||||
if (pmd_none_or_clear_bad(pmd)) {
|
if (pmd_none_or_clear_bad(pmd)) {
|
||||||
if (walk->pte_hole)
|
if (walk->pte_hole)
|
||||||
err = walk->pte_hole(addr, next, walk);
|
err = walk->pte_hole(addr, next, walk);
|
||||||
|
|
Loading…
Reference in New Issue