arm64: entry: Annotate vector table and handlers as code

In an effort to clarify and simplify the annotation of assembly
functions new macros have been introduced. These replace ENTRY and
ENDPROC with two different annotations for normal functions and those
with unusual calling conventions. The vector table and handlers aren't
normal C style code so should be annotated as CODE.

Signed-off-by: Mark Brown <broonie@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
Mark Brown 2020-02-18 19:58:27 +00:00 committed by Catalin Marinas
parent b8e505484e
commit 0ccbd98a92
1 changed files with 38 additions and 38 deletions

View File

@ -465,7 +465,7 @@ alternative_endif
.pushsection ".entry.text", "ax"
.align 11
ENTRY(vectors)
SYM_CODE_START(vectors)
kernel_ventry 1, sync_invalid // Synchronous EL1t
kernel_ventry 1, irq_invalid // IRQ EL1t
kernel_ventry 1, fiq_invalid // FIQ EL1t
@ -492,7 +492,7 @@ ENTRY(vectors)
kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
#endif
END(vectors)
SYM_CODE_END(vectors)
#ifdef CONFIG_VMAP_STACK
/*
@ -534,57 +534,57 @@ __bad_stack:
ASM_BUG()
.endm
el0_sync_invalid:
SYM_CODE_START_LOCAL(el0_sync_invalid)
inv_entry 0, BAD_SYNC
ENDPROC(el0_sync_invalid)
SYM_CODE_END(el0_sync_invalid)
el0_irq_invalid:
SYM_CODE_START_LOCAL(el0_irq_invalid)
inv_entry 0, BAD_IRQ
ENDPROC(el0_irq_invalid)
SYM_CODE_END(el0_irq_invalid)
el0_fiq_invalid:
SYM_CODE_START_LOCAL(el0_fiq_invalid)
inv_entry 0, BAD_FIQ
ENDPROC(el0_fiq_invalid)
SYM_CODE_END(el0_fiq_invalid)
el0_error_invalid:
SYM_CODE_START_LOCAL(el0_error_invalid)
inv_entry 0, BAD_ERROR
ENDPROC(el0_error_invalid)
SYM_CODE_END(el0_error_invalid)
#ifdef CONFIG_COMPAT
el0_fiq_invalid_compat:
SYM_CODE_START_LOCAL(el0_fiq_invalid_compat)
inv_entry 0, BAD_FIQ, 32
ENDPROC(el0_fiq_invalid_compat)
SYM_CODE_END(el0_fiq_invalid_compat)
#endif
el1_sync_invalid:
SYM_CODE_START_LOCAL(el1_sync_invalid)
inv_entry 1, BAD_SYNC
ENDPROC(el1_sync_invalid)
SYM_CODE_END(el1_sync_invalid)
el1_irq_invalid:
SYM_CODE_START_LOCAL(el1_irq_invalid)
inv_entry 1, BAD_IRQ
ENDPROC(el1_irq_invalid)
SYM_CODE_END(el1_irq_invalid)
el1_fiq_invalid:
SYM_CODE_START_LOCAL(el1_fiq_invalid)
inv_entry 1, BAD_FIQ
ENDPROC(el1_fiq_invalid)
SYM_CODE_END(el1_fiq_invalid)
el1_error_invalid:
SYM_CODE_START_LOCAL(el1_error_invalid)
inv_entry 1, BAD_ERROR
ENDPROC(el1_error_invalid)
SYM_CODE_END(el1_error_invalid)
/*
* EL1 mode handlers.
*/
.align 6
el1_sync:
SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
kernel_entry 1
mov x0, sp
bl el1_sync_handler
kernel_exit 1
ENDPROC(el1_sync)
SYM_CODE_END(el1_sync)
.align 6
el1_irq:
SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
kernel_entry 1
gic_prio_irq_setup pmr=x20, tmp=x1
enable_da_f
@ -639,42 +639,42 @@ alternative_else_nop_endif
#endif
kernel_exit 1
ENDPROC(el1_irq)
SYM_CODE_END(el1_irq)
/*
* EL0 mode handlers.
*/
.align 6
el0_sync:
SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
kernel_entry 0
mov x0, sp
bl el0_sync_handler
b ret_to_user
ENDPROC(el0_sync)
SYM_CODE_END(el0_sync)
#ifdef CONFIG_COMPAT
.align 6
el0_sync_compat:
SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
kernel_entry 0, 32
mov x0, sp
bl el0_sync_compat_handler
b ret_to_user
ENDPROC(el0_sync_compat)
SYM_CODE_END(el0_sync_compat)
.align 6
el0_irq_compat:
SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
kernel_entry 0, 32
b el0_irq_naked
ENDPROC(el0_irq_compat)
SYM_CODE_END(el0_irq_compat)
el0_error_compat:
SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
kernel_entry 0, 32
b el0_error_naked
ENDPROC(el0_error_compat)
SYM_CODE_END(el0_error_compat)
#endif
.align 6
el0_irq:
SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
kernel_entry 0
el0_irq_naked:
gic_prio_irq_setup pmr=x20, tmp=x0
@ -696,9 +696,9 @@ el0_irq_naked:
bl trace_hardirqs_on
#endif
b ret_to_user
ENDPROC(el0_irq)
SYM_CODE_END(el0_irq)
el1_error:
SYM_CODE_START_LOCAL(el1_error)
kernel_entry 1
mrs x1, esr_el1
gic_prio_kentry_setup tmp=x2
@ -706,9 +706,9 @@ el1_error:
mov x0, sp
bl do_serror
kernel_exit 1
ENDPROC(el1_error)
SYM_CODE_END(el1_error)
el0_error:
SYM_CODE_START_LOCAL(el0_error)
kernel_entry 0
el0_error_naked:
mrs x25, esr_el1
@ -720,7 +720,7 @@ el0_error_naked:
bl do_serror
enable_da_f
b ret_to_user
ENDPROC(el0_error)
SYM_CODE_END(el0_error)
/*
* Ok, we need to do extra processing, enter the slow path.