ARM: v7m: enable support for IRQ stacks

Enable support for IRQ stacks on !MMU, and add the code to the IRQ entry
path to switch to the IRQ stack if not running from it already.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Tested-by: Marc Zyngier <maz@kernel.org>
Tested-by: Vladimir Murzin <vladimir.murzin@arm.com> # ARMv7M
This commit is contained in:
Ard Biesheuvel 2021-12-02 09:36:59 +01:00
parent 9c46929e79
commit cafc0eab16
2 changed files with 15 additions and 3 deletions

View File

@ -1158,7 +1158,6 @@ config CURRENT_POINTER_IN_TPIDRURO
config IRQSTACKS config IRQSTACKS
def_bool y def_bool y
depends on MMU
select HAVE_IRQ_EXIT_ON_IRQ_STACK select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_SOFTIRQ_ON_OWN_STACK select HAVE_SOFTIRQ_ON_OWN_STACK

View File

@ -40,11 +40,24 @@ __irq_entry:
@ Invoke the IRQ handler @ Invoke the IRQ handler
@ @
mov r0, sp mov r0, sp
stmdb sp!, {lr} ldr_this_cpu sp, irq_stack_ptr, r1, r2
@
@ If we took the interrupt while running in the kernel, we may already
@ be using the IRQ stack, so revert to the original value in that case.
@
subs r2, sp, r0 @ SP above bottom of IRQ stack?
rsbscs r2, r2, #THREAD_SIZE @ ... and below the top?
movcs sp, r0
push {r0, lr} @ preserve LR and original SP
@ routine called with r0 = struct pt_regs * @ routine called with r0 = struct pt_regs *
bl generic_handle_arch_irq bl generic_handle_arch_irq
pop {lr} pop {r0, lr}
mov sp, r0
@ @
@ Check for any pending work if returning to user @ Check for any pending work if returning to user
@ @