arm64: idle: don't instrument idle code with KCOV
The low-level idle code in arch_cpu_idle() and its callees runs at a time where where portions of the kernel environment aren't available. For example, RCU may not be watching, and lockdep state may be out-of-sync with the hardware. Due to this, it is not sound to instrument this code. We generally avoid instrumentation by marking the entry functions as `noinstr`, but currently this doesn't inhibit KCOV instrumentation. Prevent this by factoring these functions into a new idle.c so that we can disable KCOV for the entire compilation unit, as is done for the core idle code in kernel/sched/idle.c. We'd like to keep instrumentation of the rest of process.c, and for the existing code in cpuidle.c, so a new compilation unit is preferable. The arch_cpu_idle_dead() function in process.c is a cpu hotplug function that is safe to instrument, so it is left as-is in process.c. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210607094624.34689-21-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
parent
bf6fa2c0dd
commit
b5df5b8307
|
@ -18,6 +18,7 @@ CFLAGS_syscall.o += -fno-stack-protector
|
|||
# available or are out-of-sync with HW state. Since `noinstr` doesn't always
|
||||
# inhibit KCOV instrumentation, disable it for the entire compilation unit.
|
||||
KCOV_INSTRUMENT_entry.o := n
|
||||
KCOV_INSTRUMENT_idle.o := n
|
||||
|
||||
# Object file lists.
|
||||
obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
|
||||
|
@ -27,7 +28,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
|
|||
return_address.o cpuinfo.o cpu_errata.o \
|
||||
cpufeature.o alternative.o cacheinfo.o \
|
||||
smp.o smp_spin_table.o topology.o smccc-call.o \
|
||||
syscall.o proton-pack.o idreg-override.o
|
||||
syscall.o proton-pack.o idreg-override.o idle.o
|
||||
|
||||
targets += efi-entry.o
|
||||
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Low-level idle sequences
|
||||
*/
|
||||
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/irqflags.h>
|
||||
|
||||
#include <asm/arch_gicv3.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
static void noinstr __cpu_do_idle(void)
|
||||
{
|
||||
dsb(sy);
|
||||
wfi();
|
||||
}
|
||||
|
||||
static void noinstr __cpu_do_idle_irqprio(void)
|
||||
{
|
||||
unsigned long pmr;
|
||||
unsigned long daif_bits;
|
||||
|
||||
daif_bits = read_sysreg(daif);
|
||||
write_sysreg(daif_bits | PSR_I_BIT | PSR_F_BIT, daif);
|
||||
|
||||
/*
|
||||
* Unmask PMR before going idle to make sure interrupts can
|
||||
* be raised.
|
||||
*/
|
||||
pmr = gic_read_pmr();
|
||||
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
|
||||
|
||||
__cpu_do_idle();
|
||||
|
||||
gic_write_pmr(pmr);
|
||||
write_sysreg(daif_bits, daif);
|
||||
}
|
||||
|
||||
/*
|
||||
* cpu_do_idle()
|
||||
*
|
||||
* Idle the processor (wait for interrupt).
|
||||
*
|
||||
* If the CPU supports priority masking we must do additional work to
|
||||
* ensure that interrupts are not masked at the PMR (because the core will
|
||||
* not wake up if we block the wake up signal in the interrupt controller).
|
||||
*/
|
||||
void noinstr cpu_do_idle(void)
|
||||
{
|
||||
if (system_uses_irq_prio_masking())
|
||||
__cpu_do_idle_irqprio();
|
||||
else
|
||||
__cpu_do_idle();
|
||||
}
|
||||
|
||||
/*
|
||||
* This is our default idle handler.
|
||||
*/
|
||||
void noinstr arch_cpu_idle(void)
|
||||
{
|
||||
/*
|
||||
* This should do all the clock switching and wait for interrupt
|
||||
* tricks
|
||||
*/
|
||||
cpu_do_idle();
|
||||
raw_local_irq_enable();
|
||||
}
|
|
@ -73,63 +73,6 @@ EXPORT_SYMBOL_GPL(pm_power_off);
|
|||
|
||||
void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
|
||||
|
||||
static void noinstr __cpu_do_idle(void)
|
||||
{
|
||||
dsb(sy);
|
||||
wfi();
|
||||
}
|
||||
|
||||
static void noinstr __cpu_do_idle_irqprio(void)
|
||||
{
|
||||
unsigned long pmr;
|
||||
unsigned long daif_bits;
|
||||
|
||||
daif_bits = read_sysreg(daif);
|
||||
write_sysreg(daif_bits | PSR_I_BIT | PSR_F_BIT, daif);
|
||||
|
||||
/*
|
||||
* Unmask PMR before going idle to make sure interrupts can
|
||||
* be raised.
|
||||
*/
|
||||
pmr = gic_read_pmr();
|
||||
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
|
||||
|
||||
__cpu_do_idle();
|
||||
|
||||
gic_write_pmr(pmr);
|
||||
write_sysreg(daif_bits, daif);
|
||||
}
|
||||
|
||||
/*
|
||||
* cpu_do_idle()
|
||||
*
|
||||
* Idle the processor (wait for interrupt).
|
||||
*
|
||||
* If the CPU supports priority masking we must do additional work to
|
||||
* ensure that interrupts are not masked at the PMR (because the core will
|
||||
* not wake up if we block the wake up signal in the interrupt controller).
|
||||
*/
|
||||
void noinstr cpu_do_idle(void)
|
||||
{
|
||||
if (system_uses_irq_prio_masking())
|
||||
__cpu_do_idle_irqprio();
|
||||
else
|
||||
__cpu_do_idle();
|
||||
}
|
||||
|
||||
/*
|
||||
* This is our default idle handler.
|
||||
*/
|
||||
void noinstr arch_cpu_idle(void)
|
||||
{
|
||||
/*
|
||||
* This should do all the clock switching and wait for interrupt
|
||||
* tricks
|
||||
*/
|
||||
cpu_do_idle();
|
||||
raw_local_irq_enable();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
void arch_cpu_idle_dead(void)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue