powerpc/32s: Implement dedicated kasan_init_region()
Implement a kasan_init_region() dedicated to book3s/32 that allocates KASAN regions using BATs. Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/709e821602b48a1d7c211a9b156da26db98c3e9d.1589866984.git.christophe.leroy@csgroup.eu
This commit is contained in:
parent
2b279c0348
commit
7974c47326
|
@ -34,6 +34,7 @@ static inline void kasan_init(void) { }
|
|||
static inline void kasan_late_init(void) { }
|
||||
#endif
|
||||
|
||||
void kasan_update_early_region(unsigned long k_start, unsigned long k_end, pte_t pte);
|
||||
int kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end);
|
||||
int kasan_init_region(void *start, size_t size);
|
||||
|
||||
|
|
|
@ -4,3 +4,4 @@ KASAN_SANITIZE := n
|
|||
|
||||
obj-$(CONFIG_PPC32) += kasan_init_32.o
|
||||
obj-$(CONFIG_PPC_8xx) += 8xx.o
|
||||
obj-$(CONFIG_PPC_BOOK3S_32) += book3s_32.o
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#define DISABLE_BRANCH_PROFILING
|
||||
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <mm/mmu_decl.h>
|
||||
|
||||
int __init kasan_init_region(void *start, size_t size)
|
||||
{
|
||||
unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
|
||||
unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
|
||||
unsigned long k_cur = k_start;
|
||||
int k_size = k_end - k_start;
|
||||
int k_size_base = 1 << (ffs(k_size) - 1);
|
||||
int ret;
|
||||
void *block;
|
||||
|
||||
block = memblock_alloc(k_size, k_size_base);
|
||||
|
||||
if (block && k_size_base >= SZ_128K && k_start == ALIGN(k_start, k_size_base)) {
|
||||
int k_size_more = 1 << (ffs(k_size - k_size_base) - 1);
|
||||
|
||||
setbat(-1, k_start, __pa(block), k_size_base, PAGE_KERNEL);
|
||||
if (k_size_more >= SZ_128K)
|
||||
setbat(-1, k_start + k_size_base, __pa(block) + k_size_base,
|
||||
k_size_more, PAGE_KERNEL);
|
||||
if (v_block_mapped(k_start))
|
||||
k_cur = k_start + k_size_base;
|
||||
if (v_block_mapped(k_start + k_size_base))
|
||||
k_cur = k_start + k_size_base + k_size_more;
|
||||
|
||||
update_bats();
|
||||
}
|
||||
|
||||
if (!block)
|
||||
block = memblock_alloc(k_size, PAGE_SIZE);
|
||||
if (!block)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = kasan_init_shadow_page_tables(k_start, k_end);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
kasan_update_early_region(k_start, k_cur, __pte(0));
|
||||
|
||||
for (; k_cur < k_end; k_cur += PAGE_SIZE) {
|
||||
pmd_t *pmd = pmd_ptr_k(k_cur);
|
||||
void *va = block + k_cur - k_start;
|
||||
pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
|
||||
|
||||
__set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
|
||||
}
|
||||
flush_tlb_kernel_range(k_start, k_end);
|
||||
return 0;
|
||||
}
|
|
@ -79,7 +79,7 @@ int __init __weak kasan_init_region(void *start, size_t size)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void __init
|
||||
void __init
|
||||
kasan_update_early_region(unsigned long k_start, unsigned long k_end, pte_t pte)
|
||||
{
|
||||
unsigned long k_cur;
|
||||
|
|
Loading…
Reference in New Issue