mm: pagewalk: fix walk for hugepage tables
Pagewalk ignores hugepd entries and walk down the tables as if it was traditionnal entries, leading to crazy result. Add walk_hugepd_range() and use it to walk hugepage tables. Link: https://lkml.kernel.org/r/38d04410700c8d02f28ba37e020b62c55d6f3d2c.1624597695.git.christophe.leroy@csgroup.eu Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> Reviewed-by: Steven Price <steven.price@arm.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Daniel Axtens <dja@axtens.net> Cc: "Oliver O'Halloran" <oohall@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
a458b76a41
commit
e17eae2b83
|
@ -58,6 +58,45 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
|
|||
return err;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_HUGEPD
|
||||
static int walk_hugepd_range(hugepd_t *phpd, unsigned long addr,
|
||||
unsigned long end, struct mm_walk *walk, int pdshift)
|
||||
{
|
||||
int err = 0;
|
||||
const struct mm_walk_ops *ops = walk->ops;
|
||||
int shift = hugepd_shift(*phpd);
|
||||
int page_size = 1 << shift;
|
||||
|
||||
if (!ops->pte_entry)
|
||||
return 0;
|
||||
|
||||
if (addr & (page_size - 1))
|
||||
return 0;
|
||||
|
||||
for (;;) {
|
||||
pte_t *pte;
|
||||
|
||||
spin_lock(&walk->mm->page_table_lock);
|
||||
pte = hugepte_offset(*phpd, addr, pdshift);
|
||||
err = ops->pte_entry(pte, addr, addr + page_size, walk);
|
||||
spin_unlock(&walk->mm->page_table_lock);
|
||||
|
||||
if (err)
|
||||
break;
|
||||
if (addr >= end - page_size)
|
||||
break;
|
||||
addr += page_size;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
#else
|
||||
static int walk_hugepd_range(hugepd_t *phpd, unsigned long addr,
|
||||
unsigned long end, struct mm_walk *walk, int pdshift)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
|
||||
struct mm_walk *walk)
|
||||
{
|
||||
|
@ -108,7 +147,10 @@ again:
|
|||
goto again;
|
||||
}
|
||||
|
||||
err = walk_pte_range(pmd, addr, next, walk);
|
||||
if (is_hugepd(__hugepd(pmd_val(*pmd))))
|
||||
err = walk_hugepd_range((hugepd_t *)pmd, addr, next, walk, PMD_SHIFT);
|
||||
else
|
||||
err = walk_pte_range(pmd, addr, next, walk);
|
||||
if (err)
|
||||
break;
|
||||
} while (pmd++, addr = next, addr != end);
|
||||
|
@ -157,7 +199,10 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
|
|||
if (pud_none(*pud))
|
||||
goto again;
|
||||
|
||||
err = walk_pmd_range(pud, addr, next, walk);
|
||||
if (is_hugepd(__hugepd(pud_val(*pud))))
|
||||
err = walk_hugepd_range((hugepd_t *)pud, addr, next, walk, PUD_SHIFT);
|
||||
else
|
||||
err = walk_pmd_range(pud, addr, next, walk);
|
||||
if (err)
|
||||
break;
|
||||
} while (pud++, addr = next, addr != end);
|
||||
|
@ -189,7 +234,9 @@ static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
|
|||
if (err)
|
||||
break;
|
||||
}
|
||||
if (ops->pud_entry || ops->pmd_entry || ops->pte_entry)
|
||||
if (is_hugepd(__hugepd(p4d_val(*p4d))))
|
||||
err = walk_hugepd_range((hugepd_t *)p4d, addr, next, walk, P4D_SHIFT);
|
||||
else if (ops->pud_entry || ops->pmd_entry || ops->pte_entry)
|
||||
err = walk_pud_range(p4d, addr, next, walk);
|
||||
if (err)
|
||||
break;
|
||||
|
@ -224,8 +271,9 @@ static int walk_pgd_range(unsigned long addr, unsigned long end,
|
|||
if (err)
|
||||
break;
|
||||
}
|
||||
if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry ||
|
||||
ops->pte_entry)
|
||||
if (is_hugepd(__hugepd(pgd_val(*pgd))))
|
||||
err = walk_hugepd_range((hugepd_t *)pgd, addr, next, walk, PGDIR_SHIFT);
|
||||
else if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry || ops->pte_entry)
|
||||
err = walk_p4d_range(pgd, addr, next, walk);
|
||||
if (err)
|
||||
break;
|
||||
|
|
Loading…
Reference in New Issue