LoongArch: Fix idle VS timer enqueue
[ Upstream commit edb1942542bc538707cea221e9c7923a6270465f ]
LoongArch re-enables interrupts on its idle routine and performs a
TIF_NEED_RESCHED check afterwards before putting the CPU to sleep.
The IRQs firing between the check and the idle instruction may set the
TIF_NEED_RESCHED flag. In order to deal with such a race, IRQs
interrupting __arch_cpu_idle() rollback their return address to the
beginning of __arch_cpu_idle() so that TIF_NEED_RESCHED is checked
again before going back to sleep.
However idle IRQs can also queue timers that may require a tick
reprogramming through a new generic idle loop iteration but those timers
would go unnoticed here because __arch_cpu_idle() only checks
TIF_NEED_RESCHED. It doesn't check for pending timers.
Fix this with fast-forwarding idle IRQs return address to the end of the
idle routine instead of the beginning, so that the generic idle loop can
handle both TIF_NEED_RESCHED and pending timers.
Fixes: 0603839b18
("LoongArch: Add exception/interrupt handling")
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
a303649b99
commit
1dfcc04e3c
|
@ -18,27 +18,29 @@
|
|||
|
||||
.align 5
|
||||
SYM_FUNC_START(__arch_cpu_idle)
|
||||
/* start of rollback region */
|
||||
LONG_L t0, tp, TI_FLAGS
|
||||
nop
|
||||
andi t0, t0, _TIF_NEED_RESCHED
|
||||
bnez t0, 1f
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
/* start of idle interrupt region */
|
||||
ori t0, zero, CSR_CRMD_IE
|
||||
/* idle instruction needs irq enabled */
|
||||
csrxchg t0, t0, LOONGARCH_CSR_CRMD
|
||||
/*
|
||||
* If an interrupt lands here; between enabling interrupts above and
|
||||
* going idle on the next instruction, we must *NOT* go idle since the
|
||||
* interrupt could have set TIF_NEED_RESCHED or caused an timer to need
|
||||
* reprogramming. Fall through -- see handle_vint() below -- and have
|
||||
* the idle loop take care of things.
|
||||
*/
|
||||
idle 0
|
||||
/* end of rollback region */
|
||||
/* end of idle interrupt region */
|
||||
1: jr ra
|
||||
SYM_FUNC_END(__arch_cpu_idle)
|
||||
|
||||
SYM_CODE_START(handle_vint)
|
||||
BACKUP_T0T1
|
||||
SAVE_ALL
|
||||
la_abs t1, __arch_cpu_idle
|
||||
la_abs t1, 1b
|
||||
LONG_L t0, sp, PT_ERA
|
||||
/* 32 byte rollback region */
|
||||
ori t0, t0, 0x1f
|
||||
xori t0, t0, 0x1f
|
||||
/* 3 instructions idle interrupt region */
|
||||
ori t0, t0, 0b1100
|
||||
bne t0, t1, 1f
|
||||
LONG_S t0, sp, PT_ERA
|
||||
1: move a0, sp
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
|
||||
void __cpuidle arch_cpu_idle(void)
|
||||
{
|
||||
raw_local_irq_enable();
|
||||
__arch_cpu_idle(); /* idle instruction needs irq enabled */
|
||||
__arch_cpu_idle();
|
||||
raw_local_irq_disable();
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ void machine_halt(void)
|
|||
console_flush_on_panic(CONSOLE_FLUSH_PENDING);
|
||||
|
||||
while (true) {
|
||||
__arch_cpu_idle();
|
||||
__asm__ __volatile__("idle 0" : : : "memory");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -53,7 +53,7 @@ void machine_power_off(void)
|
|||
#endif
|
||||
|
||||
while (true) {
|
||||
__arch_cpu_idle();
|
||||
__asm__ __volatile__("idle 0" : : : "memory");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -74,6 +74,6 @@ void machine_restart(char *command)
|
|||
acpi_reboot();
|
||||
|
||||
while (true) {
|
||||
__arch_cpu_idle();
|
||||
__asm__ __volatile__("idle 0" : : : "memory");
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue