powerpc/kasan: Don't instrument non-maskable or raw interrupts
Disable address sanitization for raw and non-maskable interrupt handlers, because they can run in real mode, where we cannot access the shadow memory. (Note that kasan_arch_is_ready() doesn't test for real mode, since it is a static branch for speed, and in any case not all the entry points to the generic KASAN code are protected by kasan_arch_is_ready guards.) The changes to interrupt_nmi_enter/exit_prepare() look larger than they actually are. The changes are equivalent to adding !IS_ENABLED(CONFIG_KASAN) to the conditions for calling nmi_enter() or nmi_exit() in real mode. That is, the code is equivalent to using the following condition for calling nmi_enter/exit: if (((!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !firmware_has_feature(FW_FEATURE_LPAR) || radix_enabled()) && !IS_ENABLED(CONFIG_KASAN) || (mfmsr() & MSR_DR)) That unwieldy condition has been split into several statements with comments, for easier reading. The nmi_ipi_lock functions that call atomic functions (i.e., nmi_ipi_lock_start(), nmi_ipi_lock() and nmi_ipi_unlock()), besides being marked noinstr, now call arch_atomic_* functions instead of atomic_* functions because with KASAN enabled, the atomic_* functions are wrappers which explicitly do address sanitization on their arguments. Since we are trying to avoid address sanitization, we have to use the lower-level arch_atomic_* versions. In hv_nmi_check_nonrecoverable(), the regs_set_unrecoverable() call has been open-coded so as to avoid having to either trust the inlining or mark regs_set_unrecoverable() as noinstr. [paulus@ozlabs.org: combined a few work-in-progress commits of Daniel's and wrote the commit message.] Signed-off-by: Daniel Axtens <dja@axtens.net> Signed-off-by: Paul Mackerras <paulus@ozlabs.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/YoTFGaKM8Pd46PIK@cleo
This commit is contained in:
parent
f08aed5241
commit
5352090a99
|
@ -324,22 +324,46 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* If data relocations are enabled, it's safe to use nmi_enter() */
|
||||||
|
if (mfmsr() & MSR_DR) {
|
||||||
|
nmi_enter();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Do not use nmi_enter() for pseries hash guest taking a real-mode
|
* But do not use nmi_enter() for pseries hash guest taking a real-mode
|
||||||
* NMI because not everything it touches is within the RMA limit.
|
* NMI because not everything it touches is within the RMA limit.
|
||||||
*/
|
*/
|
||||||
if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
|
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
|
||||||
!firmware_has_feature(FW_FEATURE_LPAR) ||
|
firmware_has_feature(FW_FEATURE_LPAR) &&
|
||||||
radix_enabled() || (mfmsr() & MSR_DR))
|
!radix_enabled())
|
||||||
nmi_enter();
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Likewise, don't use it if we have some form of instrumentation (like
|
||||||
|
* KASAN shadow) that is not safe to access in real mode (even on radix)
|
||||||
|
*/
|
||||||
|
if (IS_ENABLED(CONFIG_KASAN))
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* Otherwise, it should be safe to call it */
|
||||||
|
nmi_enter();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
|
static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
|
||||||
{
|
{
|
||||||
if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
|
if (mfmsr() & MSR_DR) {
|
||||||
!firmware_has_feature(FW_FEATURE_LPAR) ||
|
// nmi_exit if relocations are on
|
||||||
radix_enabled() || (mfmsr() & MSR_DR))
|
|
||||||
nmi_exit();
|
nmi_exit();
|
||||||
|
} else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
|
||||||
|
firmware_has_feature(FW_FEATURE_LPAR) &&
|
||||||
|
!radix_enabled()) {
|
||||||
|
// no nmi_exit for a pseries hash guest taking a real mode exception
|
||||||
|
} else if (IS_ENABLED(CONFIG_KASAN)) {
|
||||||
|
// no nmi_exit for KASAN in real mode
|
||||||
|
} else {
|
||||||
|
nmi_exit();
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* nmi does not call nap_adjust_return because nmi should not create
|
* nmi does not call nap_adjust_return because nmi should not create
|
||||||
|
@ -407,7 +431,8 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter
|
||||||
* Specific handlers may have additional restrictions.
|
* Specific handlers may have additional restrictions.
|
||||||
*/
|
*/
|
||||||
#define DEFINE_INTERRUPT_HANDLER_RAW(func) \
|
#define DEFINE_INTERRUPT_HANDLER_RAW(func) \
|
||||||
static __always_inline long ____##func(struct pt_regs *regs); \
|
static __always_inline __no_sanitize_address __no_kcsan long \
|
||||||
|
____##func(struct pt_regs *regs); \
|
||||||
\
|
\
|
||||||
interrupt_handler long func(struct pt_regs *regs) \
|
interrupt_handler long func(struct pt_regs *regs) \
|
||||||
{ \
|
{ \
|
||||||
|
@ -421,7 +446,8 @@ interrupt_handler long func(struct pt_regs *regs) \
|
||||||
} \
|
} \
|
||||||
NOKPROBE_SYMBOL(func); \
|
NOKPROBE_SYMBOL(func); \
|
||||||
\
|
\
|
||||||
static __always_inline long ____##func(struct pt_regs *regs)
|
static __always_inline __no_sanitize_address __no_kcsan long \
|
||||||
|
____##func(struct pt_regs *regs)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function
|
* DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function
|
||||||
|
@ -541,7 +567,8 @@ static __always_inline void ____##func(struct pt_regs *regs)
|
||||||
* body with a pair of curly brackets.
|
* body with a pair of curly brackets.
|
||||||
*/
|
*/
|
||||||
#define DEFINE_INTERRUPT_HANDLER_NMI(func) \
|
#define DEFINE_INTERRUPT_HANDLER_NMI(func) \
|
||||||
static __always_inline long ____##func(struct pt_regs *regs); \
|
static __always_inline __no_sanitize_address __no_kcsan long \
|
||||||
|
____##func(struct pt_regs *regs); \
|
||||||
\
|
\
|
||||||
interrupt_handler long func(struct pt_regs *regs) \
|
interrupt_handler long func(struct pt_regs *regs) \
|
||||||
{ \
|
{ \
|
||||||
|
@ -558,7 +585,8 @@ interrupt_handler long func(struct pt_regs *regs) \
|
||||||
} \
|
} \
|
||||||
NOKPROBE_SYMBOL(func); \
|
NOKPROBE_SYMBOL(func); \
|
||||||
\
|
\
|
||||||
static __always_inline long ____##func(struct pt_regs *regs)
|
static __always_inline __no_sanitize_address __no_kcsan long \
|
||||||
|
____##func(struct pt_regs *regs)
|
||||||
|
|
||||||
|
|
||||||
/* Interrupt handlers */
|
/* Interrupt handlers */
|
||||||
|
|
|
@ -411,32 +411,32 @@ static struct cpumask nmi_ipi_pending_mask;
|
||||||
static bool nmi_ipi_busy = false;
|
static bool nmi_ipi_busy = false;
|
||||||
static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
|
static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
|
||||||
|
|
||||||
static void nmi_ipi_lock_start(unsigned long *flags)
|
noinstr static void nmi_ipi_lock_start(unsigned long *flags)
|
||||||
{
|
{
|
||||||
raw_local_irq_save(*flags);
|
raw_local_irq_save(*flags);
|
||||||
hard_irq_disable();
|
hard_irq_disable();
|
||||||
while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
|
while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
|
||||||
raw_local_irq_restore(*flags);
|
raw_local_irq_restore(*flags);
|
||||||
spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
|
spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0);
|
||||||
raw_local_irq_save(*flags);
|
raw_local_irq_save(*flags);
|
||||||
hard_irq_disable();
|
hard_irq_disable();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nmi_ipi_lock(void)
|
noinstr static void nmi_ipi_lock(void)
|
||||||
{
|
{
|
||||||
while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
|
while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
|
||||||
spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
|
spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nmi_ipi_unlock(void)
|
noinstr static void nmi_ipi_unlock(void)
|
||||||
{
|
{
|
||||||
smp_mb();
|
smp_mb();
|
||||||
WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
|
WARN_ON(arch_atomic_read(&__nmi_ipi_lock) != 1);
|
||||||
atomic_set(&__nmi_ipi_lock, 0);
|
arch_atomic_set(&__nmi_ipi_lock, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nmi_ipi_unlock_end(unsigned long *flags)
|
noinstr static void nmi_ipi_unlock_end(unsigned long *flags)
|
||||||
{
|
{
|
||||||
nmi_ipi_unlock();
|
nmi_ipi_unlock();
|
||||||
raw_local_irq_restore(*flags);
|
raw_local_irq_restore(*flags);
|
||||||
|
@ -445,7 +445,7 @@ static void nmi_ipi_unlock_end(unsigned long *flags)
|
||||||
/*
|
/*
|
||||||
* Platform NMI handler calls this to ack
|
* Platform NMI handler calls this to ack
|
||||||
*/
|
*/
|
||||||
int smp_handle_nmi_ipi(struct pt_regs *regs)
|
noinstr int smp_handle_nmi_ipi(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
void (*fn)(struct pt_regs *) = NULL;
|
void (*fn)(struct pt_regs *) = NULL;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
|
@ -393,7 +393,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
|
||||||
* Builds that do not support KVM could take this second option to increase
|
* Builds that do not support KVM could take this second option to increase
|
||||||
* the recoverability of NMIs.
|
* the recoverability of NMIs.
|
||||||
*/
|
*/
|
||||||
void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
|
noinstr void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_PPC_POWERNV
|
#ifdef CONFIG_PPC_POWERNV
|
||||||
unsigned long kbase = (unsigned long)_stext;
|
unsigned long kbase = (unsigned long)_stext;
|
||||||
|
@ -433,7 +433,9 @@ void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
nonrecoverable:
|
nonrecoverable:
|
||||||
regs_set_unrecoverable(regs);
|
regs->msr &= ~MSR_RI;
|
||||||
|
local_paca->hsrr_valid = 0;
|
||||||
|
local_paca->srr_valid = 0;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception)
|
DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception)
|
||||||
|
|
|
@ -13,6 +13,9 @@ CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
|
||||||
|
|
||||||
KASAN_SANITIZE_code-patching.o := n
|
KASAN_SANITIZE_code-patching.o := n
|
||||||
KASAN_SANITIZE_feature-fixups.o := n
|
KASAN_SANITIZE_feature-fixups.o := n
|
||||||
|
# restart_table.o contains functions called in the NMI interrupt path
|
||||||
|
# which can be in real mode. Disable KASAN.
|
||||||
|
KASAN_SANITIZE_restart_table.o := n
|
||||||
|
|
||||||
ifdef CONFIG_KASAN
|
ifdef CONFIG_KASAN
|
||||||
CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING
|
CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING
|
||||||
|
|
|
@ -345,7 +345,7 @@ static void __init pnv_smp_probe(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pnv_system_reset_exception(struct pt_regs *regs)
|
noinstr static int pnv_system_reset_exception(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
if (smp_handle_nmi_ipi(regs))
|
if (smp_handle_nmi_ipi(regs))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
Loading…
Reference in New Issue