arm64: use ENDPIPROC() to annotate position independent assembler routines
For more control over which functions are called with the MMU off or with the UEFI 1:1 mapping active, annotate some assembler routines as position independent. This is done by introducing ENDPIPROC(), which replaces the ENDPROC() declaration of those routines. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
parent
d4dddfdbbc
commit
207918461e
|
@ -193,4 +193,15 @@ lr .req x30 // link register
|
|||
str \src, [\tmp, :lo12:\sym]
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Annotate a function as position independent, i.e., safe to be called before
|
||||
* the kernel virtual mapping is activated.
|
||||
*/
|
||||
#define ENDPIPROC(x) \
|
||||
.globl __pi_##x; \
|
||||
.type __pi_##x, %function; \
|
||||
.set __pi_##x, x; \
|
||||
.size __pi_##x, . - x; \
|
||||
ENDPROC(x)
|
||||
|
||||
#endif /* __ASM_ASSEMBLER_H */
|
||||
|
|
|
@ -41,4 +41,4 @@ ENTRY(memchr)
|
|||
ret
|
||||
2: mov x0, #0
|
||||
ret
|
||||
ENDPROC(memchr)
|
||||
ENDPIPROC(memchr)
|
||||
|
|
|
@ -255,4 +255,4 @@ CPU_LE( rev data2, data2 )
|
|||
.Lret0:
|
||||
mov result, #0
|
||||
ret
|
||||
ENDPROC(memcmp)
|
||||
ENDPIPROC(memcmp)
|
||||
|
|
|
@ -71,4 +71,4 @@
|
|||
ENTRY(memcpy)
|
||||
#include "copy_template.S"
|
||||
ret
|
||||
ENDPROC(memcpy)
|
||||
ENDPIPROC(memcpy)
|
||||
|
|
|
@ -194,4 +194,4 @@ ENTRY(memmove)
|
|||
tst count, #0x3f
|
||||
b.ne .Ltail63
|
||||
ret
|
||||
ENDPROC(memmove)
|
||||
ENDPIPROC(memmove)
|
||||
|
|
|
@ -213,4 +213,4 @@ ENTRY(memset)
|
|||
ands count, count, zva_bits_x
|
||||
b.ne .Ltail_maybe_long
|
||||
ret
|
||||
ENDPROC(memset)
|
||||
ENDPIPROC(memset)
|
||||
|
|
|
@ -231,4 +231,4 @@ CPU_BE( orr syndrome, diff, has_nul )
|
|||
lsr data1, data1, #56
|
||||
sub result, data1, data2, lsr #56
|
||||
ret
|
||||
ENDPROC(strcmp)
|
||||
ENDPIPROC(strcmp)
|
||||
|
|
|
@ -123,4 +123,4 @@ CPU_LE( lsr tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */
|
|||
csinv data1, data1, xzr, le
|
||||
csel data2, data2, data2a, le
|
||||
b .Lrealigned
|
||||
ENDPROC(strlen)
|
||||
ENDPIPROC(strlen)
|
||||
|
|
|
@ -307,4 +307,4 @@ CPU_BE( orr syndrome, diff, has_nul )
|
|||
.Lret0:
|
||||
mov result, #0
|
||||
ret
|
||||
ENDPROC(strncmp)
|
||||
ENDPIPROC(strncmp)
|
||||
|
|
|
@ -98,7 +98,7 @@ ENTRY(__flush_dcache_area)
|
|||
b.lo 1b
|
||||
dsb sy
|
||||
ret
|
||||
ENDPROC(__flush_dcache_area)
|
||||
ENDPIPROC(__flush_dcache_area)
|
||||
|
||||
/*
|
||||
* __inval_cache_range(start, end)
|
||||
|
@ -131,7 +131,7 @@ __dma_inv_range:
|
|||
b.lo 2b
|
||||
dsb sy
|
||||
ret
|
||||
ENDPROC(__inval_cache_range)
|
||||
ENDPIPROC(__inval_cache_range)
|
||||
ENDPROC(__dma_inv_range)
|
||||
|
||||
/*
|
||||
|
@ -171,7 +171,7 @@ ENTRY(__dma_flush_range)
|
|||
b.lo 1b
|
||||
dsb sy
|
||||
ret
|
||||
ENDPROC(__dma_flush_range)
|
||||
ENDPIPROC(__dma_flush_range)
|
||||
|
||||
/*
|
||||
* __dma_map_area(start, size, dir)
|
||||
|
@ -184,7 +184,7 @@ ENTRY(__dma_map_area)
|
|||
cmp w2, #DMA_FROM_DEVICE
|
||||
b.eq __dma_inv_range
|
||||
b __dma_clean_range
|
||||
ENDPROC(__dma_map_area)
|
||||
ENDPIPROC(__dma_map_area)
|
||||
|
||||
/*
|
||||
* __dma_unmap_area(start, size, dir)
|
||||
|
@ -197,4 +197,4 @@ ENTRY(__dma_unmap_area)
|
|||
cmp w2, #DMA_TO_DEVICE
|
||||
b.ne __dma_inv_range
|
||||
ret
|
||||
ENDPROC(__dma_unmap_area)
|
||||
ENDPIPROC(__dma_unmap_area)
|
||||
|
|
Loading…
Reference in New Issue