arm64: factor out entry stack manipulation

In subsequent patches, we will detect stack overflow in our exception
entry code, by verifying the SP after it has been decremented to make
space for the exception regs.

This verification code is small, and we can minimize its impact by
placing it directly in the vectors. To avoid redundant modification of
the SP, we also need to move the initial decrement of the SP into the
vectors.

As a preparatory step, this patch introduces kernel_ventry, which
performs this decrement, and updates the entry code accordingly.
Subsequent patches will fold SP verification into kernel_ventry.

There should be no functional change as a result of this patch.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
[Mark: turn into prep patch, expand commit msg]
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Will Deacon <will.deacon@arm.com>
Tested-by: Laura Abbott <labbott@redhat.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
This commit is contained in:
Mark Rutland 2017-07-19 17:24:49 +01:00
parent 170976bcab
commit b11e5759bf
1 changed files with 26 additions and 21 deletions

View File

@ -69,8 +69,13 @@
#define BAD_FIQ 2 #define BAD_FIQ 2
#define BAD_ERROR 3 #define BAD_ERROR 3
.macro kernel_entry, el, regsize = 64 .macro kernel_ventry label
.align 7
sub sp, sp, #S_FRAME_SIZE sub sp, sp, #S_FRAME_SIZE
b \label
.endm
.macro kernel_entry, el, regsize = 64
.if \regsize == 32 .if \regsize == 32
mov w0, w0 // zero upper 32 bits of x0 mov w0, w0 // zero upper 32 bits of x0
.endif .endif
@ -319,31 +324,31 @@ tsk .req x28 // current thread_info
.align 11 .align 11
ENTRY(vectors) ENTRY(vectors)
ventry el1_sync_invalid // Synchronous EL1t kernel_ventry el1_sync_invalid // Synchronous EL1t
ventry el1_irq_invalid // IRQ EL1t kernel_ventry el1_irq_invalid // IRQ EL1t
ventry el1_fiq_invalid // FIQ EL1t kernel_ventry el1_fiq_invalid // FIQ EL1t
ventry el1_error_invalid // Error EL1t kernel_ventry el1_error_invalid // Error EL1t
ventry el1_sync // Synchronous EL1h kernel_ventry el1_sync // Synchronous EL1h
ventry el1_irq // IRQ EL1h kernel_ventry el1_irq // IRQ EL1h
ventry el1_fiq_invalid // FIQ EL1h kernel_ventry el1_fiq_invalid // FIQ EL1h
ventry el1_error_invalid // Error EL1h kernel_ventry el1_error_invalid // Error EL1h
ventry el0_sync // Synchronous 64-bit EL0 kernel_ventry el0_sync // Synchronous 64-bit EL0
ventry el0_irq // IRQ 64-bit EL0 kernel_ventry el0_irq // IRQ 64-bit EL0
ventry el0_fiq_invalid // FIQ 64-bit EL0 kernel_ventry el0_fiq_invalid // FIQ 64-bit EL0
ventry el0_error_invalid // Error 64-bit EL0 kernel_ventry el0_error_invalid // Error 64-bit EL0
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
ventry el0_sync_compat // Synchronous 32-bit EL0 kernel_ventry el0_sync_compat // Synchronous 32-bit EL0
ventry el0_irq_compat // IRQ 32-bit EL0 kernel_ventry el0_irq_compat // IRQ 32-bit EL0
ventry el0_fiq_invalid_compat // FIQ 32-bit EL0 kernel_ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
ventry el0_error_invalid_compat // Error 32-bit EL0 kernel_ventry el0_error_invalid_compat // Error 32-bit EL0
#else #else
ventry el0_sync_invalid // Synchronous 32-bit EL0 kernel_ventry el0_sync_invalid // Synchronous 32-bit EL0
ventry el0_irq_invalid // IRQ 32-bit EL0 kernel_ventry el0_irq_invalid // IRQ 32-bit EL0
ventry el0_fiq_invalid // FIQ 32-bit EL0 kernel_ventry el0_fiq_invalid // FIQ 32-bit EL0
ventry el0_error_invalid // Error 32-bit EL0 kernel_ventry el0_error_invalid // Error 32-bit EL0
#endif #endif
END(vectors) END(vectors)