x86/mm/ptdump: Optimize check for W+X mappings for CONFIG_KASAN=y
Enabling both DEBUG_WX=y and KASAN=y options significantly increases boot time (dozens of seconds at least). KASAN fills kernel page tables with repeated values to map several TBs of the virtual memory to the single kasan_zero_page: kasan_zero_pud -> kasan_zero_pmd-> kasan_zero_pte-> kasan_zero_page So, the page table walker used to find W+X mapping check the same kasan_zero_p?d page table entries a lot more than once. With patch pud walker will skip the pud if it has the same value as the previous one . Skipping done iff we search for W+X mappings, so this optimization won't affect the page table dump via debugfs. This dropped time spend in W+X check from ~30 sec to reasonable 0.1 sec: Before: [ 4.579991] Freeing unused kernel memory: 1000K [ 35.257523] x86/mm: Checked W+X mappings: passed, no W+X pages found. After: [ 5.138756] Freeing unused kernel memory: 1000K [ 5.266496] x86/mm: Checked W+X mappings: passed, no W+X pages found. Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Reviewed-by: Alexander Potapenko <glider@google.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: kasan-dev@googlegroups.com Cc: Tobias Regnery <tobias.regnery@gmail.com> Cc: Dmitry Vyukov <dvyukov@google.com> Link: http://lkml.kernel.org/r/20170214100839.17186-1-aryabinin@virtuozzo.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
5b1ad68f9b
commit
243b72aae2
|
@ -327,18 +327,31 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr,
|
|||
|
||||
#if PTRS_PER_PUD > 1
|
||||
|
||||
/*
|
||||
* This is an optimization for CONFIG_DEBUG_WX=y + CONFIG_KASAN=y
|
||||
* KASAN fills page tables with the same values. Since there is no
|
||||
* point in checking page table more than once we just skip repeated
|
||||
* entries. This saves us dozens of seconds during boot.
|
||||
*/
|
||||
static bool pud_already_checked(pud_t *prev_pud, pud_t *pud, bool checkwx)
|
||||
{
|
||||
return checkwx && prev_pud && (pud_val(*prev_pud) == pud_val(*pud));
|
||||
}
|
||||
|
||||
static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
|
||||
unsigned long P)
|
||||
{
|
||||
int i;
|
||||
pud_t *start;
|
||||
pgprotval_t prot;
|
||||
pud_t *prev_pud = NULL;
|
||||
|
||||
start = (pud_t *) pgd_page_vaddr(addr);
|
||||
|
||||
for (i = 0; i < PTRS_PER_PUD; i++) {
|
||||
st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT);
|
||||
if (!pud_none(*start)) {
|
||||
if (!pud_none(*start) &&
|
||||
!pud_already_checked(prev_pud, start, st->check_wx)) {
|
||||
if (pud_large(*start) || !pud_present(*start)) {
|
||||
prot = pud_flags(*start);
|
||||
note_page(m, st, __pgprot(prot), 2);
|
||||
|
@ -349,6 +362,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
|
|||
} else
|
||||
note_page(m, st, __pgprot(0), 2);
|
||||
|
||||
prev_pud = start;
|
||||
start++;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue