Merge remote-tracking branch 'arm64/for-next/scs' into for-next/core
* arm64/for-next/scs: arm64: sdei: Push IS_ENABLED() checks down to callee functions arm64: scs: use vmapped IRQ and SDEI shadow stacks scs: switch to vmapped shadow stacks
This commit is contained in:
commit
d45056ad73
|
@ -58,7 +58,6 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
|
|||
obj-$(CONFIG_CRASH_CORE) += crash_core.o
|
||||
obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
|
||||
obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o
|
||||
obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
|
||||
obj-$(CONFIG_ARM64_MTE) += mte.o
|
||||
|
||||
obj-y += vdso/ probes/
|
||||
|
|
|
@ -429,7 +429,7 @@ SYM_CODE_END(__swpan_exit_el0)
|
|||
|
||||
#ifdef CONFIG_SHADOW_CALL_STACK
|
||||
/* also switch to the irq shadow stack */
|
||||
adr_this_cpu scs_sp, irq_shadow_call_stack, x26
|
||||
ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x26
|
||||
#endif
|
||||
|
||||
9998:
|
||||
|
@ -1086,9 +1086,9 @@ SYM_CODE_START(__sdei_asm_handler)
|
|||
#ifdef CONFIG_SHADOW_CALL_STACK
|
||||
/* Use a separate shadow call stack for normal and critical events */
|
||||
cbnz w4, 3f
|
||||
adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal, tmp=x6
|
||||
ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6
|
||||
b 4f
|
||||
3: adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical, tmp=x6
|
||||
3: ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6
|
||||
4:
|
||||
#endif
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/irqchip.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/scs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <asm/daifflags.h>
|
||||
|
@ -27,6 +28,25 @@ DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts);
|
|||
|
||||
DEFINE_PER_CPU(unsigned long *, irq_stack_ptr);
|
||||
|
||||
|
||||
DECLARE_PER_CPU(unsigned long *, irq_shadow_call_stack_ptr);
|
||||
|
||||
#ifdef CONFIG_SHADOW_CALL_STACK
|
||||
DEFINE_PER_CPU(unsigned long *, irq_shadow_call_stack_ptr);
|
||||
#endif
|
||||
|
||||
static void init_irq_scs(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_SHADOW_CALL_STACK))
|
||||
return;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
per_cpu(irq_shadow_call_stack_ptr, cpu) =
|
||||
scs_alloc(cpu_to_node(cpu));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
static void init_irq_stacks(void)
|
||||
{
|
||||
|
@ -54,6 +74,7 @@ static void init_irq_stacks(void)
|
|||
void __init init_IRQ(void)
|
||||
{
|
||||
init_irq_stacks();
|
||||
init_irq_scs();
|
||||
irqchip_init();
|
||||
if (!handle_arch_irq)
|
||||
panic("No interrupt controller found.");
|
||||
|
|
|
@ -1,16 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Shadow Call Stack support.
|
||||
*
|
||||
* Copyright (C) 2019 Google LLC
|
||||
*/
|
||||
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/scs.h>
|
||||
|
||||
DEFINE_SCS(irq_shadow_call_stack);
|
||||
|
||||
#ifdef CONFIG_ARM_SDE_INTERFACE
|
||||
DEFINE_SCS(sdei_shadow_call_stack_normal);
|
||||
DEFINE_SCS(sdei_shadow_call_stack_critical);
|
||||
#endif
|
|
@ -7,6 +7,7 @@
|
|||
#include <linux/hardirq.h>
|
||||
#include <linux/irqflags.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
#include <linux/scs.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/alternative.h>
|
||||
|
@ -37,6 +38,14 @@ DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
|
|||
DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
|
||||
#endif
|
||||
|
||||
DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr);
|
||||
DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr);
|
||||
|
||||
#ifdef CONFIG_SHADOW_CALL_STACK
|
||||
DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr);
|
||||
DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr);
|
||||
#endif
|
||||
|
||||
static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu)
|
||||
{
|
||||
unsigned long *p;
|
||||
|
@ -52,6 +61,9 @@ static void free_sdei_stacks(void)
|
|||
{
|
||||
int cpu;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_VMAP_STACK))
|
||||
return;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
_free_sdei_stack(&sdei_stack_normal_ptr, cpu);
|
||||
_free_sdei_stack(&sdei_stack_critical_ptr, cpu);
|
||||
|
@ -75,6 +87,9 @@ static int init_sdei_stacks(void)
|
|||
int cpu;
|
||||
int err = 0;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_VMAP_STACK))
|
||||
return 0;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
err = _init_sdei_stack(&sdei_stack_normal_ptr, cpu);
|
||||
if (err)
|
||||
|
@ -90,6 +105,62 @@ static int init_sdei_stacks(void)
|
|||
return err;
|
||||
}
|
||||
|
||||
static void _free_sdei_scs(unsigned long * __percpu *ptr, int cpu)
|
||||
{
|
||||
void *s;
|
||||
|
||||
s = per_cpu(*ptr, cpu);
|
||||
if (s) {
|
||||
per_cpu(*ptr, cpu) = NULL;
|
||||
scs_free(s);
|
||||
}
|
||||
}
|
||||
|
||||
static void free_sdei_scs(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
_free_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu);
|
||||
_free_sdei_scs(&sdei_shadow_call_stack_critical_ptr, cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static int _init_sdei_scs(unsigned long * __percpu *ptr, int cpu)
|
||||
{
|
||||
void *s;
|
||||
|
||||
s = scs_alloc(cpu_to_node(cpu));
|
||||
if (!s)
|
||||
return -ENOMEM;
|
||||
per_cpu(*ptr, cpu) = s;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_sdei_scs(void)
|
||||
{
|
||||
int cpu;
|
||||
int err = 0;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_SHADOW_CALL_STACK))
|
||||
return 0;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
err = _init_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu);
|
||||
if (err)
|
||||
break;
|
||||
err = _init_sdei_scs(&sdei_shadow_call_stack_critical_ptr, cpu);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
||||
if (err)
|
||||
free_sdei_scs();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
|
||||
{
|
||||
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
|
||||
|
@ -130,13 +201,14 @@ unsigned long sdei_arch_get_entry_point(int conduit)
|
|||
*/
|
||||
if (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
|
||||
pr_err("Not supported on this hardware/boot configuration\n");
|
||||
return 0;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_VMAP_STACK)) {
|
||||
if (init_sdei_stacks())
|
||||
return 0;
|
||||
}
|
||||
if (init_sdei_stacks())
|
||||
goto out_err;
|
||||
|
||||
if (init_sdei_scs())
|
||||
goto out_err_free_stacks;
|
||||
|
||||
sdei_exit_mode = (conduit == SMCCC_CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC;
|
||||
|
||||
|
@ -151,6 +223,10 @@ unsigned long sdei_arch_get_entry_point(int conduit)
|
|||
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
|
||||
return (unsigned long)__sdei_asm_handler;
|
||||
|
||||
out_err_free_stacks:
|
||||
free_sdei_stacks();
|
||||
out_err:
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -15,24 +15,18 @@
|
|||
|
||||
#ifdef CONFIG_SHADOW_CALL_STACK
|
||||
|
||||
/*
|
||||
* In testing, 1 KiB shadow stack size (i.e. 128 stack frames on a 64-bit
|
||||
* architecture) provided ~40% safety margin on stack usage while keeping
|
||||
* memory allocation overhead reasonable.
|
||||
*/
|
||||
#define SCS_SIZE SZ_1K
|
||||
#define SCS_ORDER 0
|
||||
#define SCS_SIZE (PAGE_SIZE << SCS_ORDER)
|
||||
#define GFP_SCS (GFP_KERNEL | __GFP_ZERO)
|
||||
|
||||
/* An illegal pointer value to mark the end of the shadow stack. */
|
||||
#define SCS_END_MAGIC (0x5f6UL + POISON_POINTER_DELTA)
|
||||
|
||||
/* Allocate a static per-CPU shadow stack */
|
||||
#define DEFINE_SCS(name) \
|
||||
DEFINE_PER_CPU(unsigned long [SCS_SIZE/sizeof(long)], name) \
|
||||
|
||||
#define task_scs(tsk) (task_thread_info(tsk)->scs_base)
|
||||
#define task_scs_sp(tsk) (task_thread_info(tsk)->scs_sp)
|
||||
|
||||
void *scs_alloc(int node);
|
||||
void scs_free(void *s);
|
||||
void scs_init(void);
|
||||
int scs_prepare(struct task_struct *tsk, int node);
|
||||
void scs_release(struct task_struct *tsk);
|
||||
|
@ -61,6 +55,8 @@ static inline bool task_scs_end_corrupted(struct task_struct *tsk)
|
|||
|
||||
#else /* CONFIG_SHADOW_CALL_STACK */
|
||||
|
||||
static inline void *scs_alloc(int node) { return NULL; }
|
||||
static inline void scs_free(void *s) {}
|
||||
static inline void scs_init(void) {}
|
||||
static inline void scs_task_reset(struct task_struct *tsk) {}
|
||||
static inline int scs_prepare(struct task_struct *tsk, int node) { return 0; }
|
||||
|
|
73
kernel/scs.c
73
kernel/scs.c
|
@ -5,26 +5,49 @@
|
|||
* Copyright (C) 2019 Google LLC
|
||||
*/
|
||||
|
||||
#include <linux/cpuhotplug.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/scs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/vmstat.h>
|
||||
|
||||
static struct kmem_cache *scs_cache;
|
||||
|
||||
static void __scs_account(void *s, int account)
|
||||
{
|
||||
struct page *scs_page = virt_to_page(s);
|
||||
struct page *scs_page = vmalloc_to_page(s);
|
||||
|
||||
mod_node_page_state(page_pgdat(scs_page), NR_KERNEL_SCS_KB,
|
||||
account * (SCS_SIZE / SZ_1K));
|
||||
}
|
||||
|
||||
static void *scs_alloc(int node)
|
||||
{
|
||||
void *s = kmem_cache_alloc_node(scs_cache, GFP_SCS, node);
|
||||
/* Matches NR_CACHED_STACKS for VMAP_STACK */
|
||||
#define NR_CACHED_SCS 2
|
||||
static DEFINE_PER_CPU(void *, scs_cache[NR_CACHED_SCS]);
|
||||
|
||||
static void *__scs_alloc(int node)
|
||||
{
|
||||
int i;
|
||||
void *s;
|
||||
|
||||
for (i = 0; i < NR_CACHED_SCS; i++) {
|
||||
s = this_cpu_xchg(scs_cache[i], NULL);
|
||||
if (s) {
|
||||
kasan_unpoison_vmalloc(s, SCS_SIZE);
|
||||
memset(s, 0, SCS_SIZE);
|
||||
return s;
|
||||
}
|
||||
}
|
||||
|
||||
return __vmalloc_node_range(SCS_SIZE, 1, VMALLOC_START, VMALLOC_END,
|
||||
GFP_SCS, PAGE_KERNEL, 0, node,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
|
||||
void *scs_alloc(int node)
|
||||
{
|
||||
void *s;
|
||||
|
||||
s = __scs_alloc(node);
|
||||
if (!s)
|
||||
return NULL;
|
||||
|
||||
|
@ -34,21 +57,47 @@ static void *scs_alloc(int node)
|
|||
* Poison the allocation to catch unintentional accesses to
|
||||
* the shadow stack when KASAN is enabled.
|
||||
*/
|
||||
kasan_poison_object_data(scs_cache, s);
|
||||
kasan_poison_vmalloc(s, SCS_SIZE);
|
||||
__scs_account(s, 1);
|
||||
return s;
|
||||
}
|
||||
|
||||
static void scs_free(void *s)
|
||||
void scs_free(void *s)
|
||||
{
|
||||
int i;
|
||||
|
||||
__scs_account(s, -1);
|
||||
kasan_unpoison_object_data(scs_cache, s);
|
||||
kmem_cache_free(scs_cache, s);
|
||||
|
||||
/*
|
||||
* We cannot sleep as this can be called in interrupt context,
|
||||
* so use this_cpu_cmpxchg to update the cache, and vfree_atomic
|
||||
* to free the stack.
|
||||
*/
|
||||
|
||||
for (i = 0; i < NR_CACHED_SCS; i++)
|
||||
if (this_cpu_cmpxchg(scs_cache[i], 0, s) == NULL)
|
||||
return;
|
||||
|
||||
vfree_atomic(s);
|
||||
}
|
||||
|
||||
static int scs_cleanup(unsigned int cpu)
|
||||
{
|
||||
int i;
|
||||
void **cache = per_cpu_ptr(scs_cache, cpu);
|
||||
|
||||
for (i = 0; i < NR_CACHED_SCS; i++) {
|
||||
vfree(cache[i]);
|
||||
cache[i] = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __init scs_init(void)
|
||||
{
|
||||
scs_cache = kmem_cache_create("scs_cache", SCS_SIZE, 0, 0, NULL);
|
||||
cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "scs:scs_cache", NULL,
|
||||
scs_cleanup);
|
||||
}
|
||||
|
||||
int scs_prepare(struct task_struct *tsk, int node)
|
||||
|
|
Loading…
Reference in New Issue