builtins: support building ARM builtins for a HF target

The `-target` impacts the CC for the builtins.  HF targets (with either
floating point ABI) always use AAPCS VFP for the builtins unless they
are AEABI builtins, in which case they use AAPCS.  Non-HF targets (with
either floating point ABI) always use AAPCS for the builtins and AAPCS
for the AEABI builtins.  This introduces the thunks necessary to switch
CC for the floating point operations.  This is not currently enabled,
and should be dependent on the target being used to build compiler-rt.
However, as a stop-gap, a define can be added for ASFLAGS to get the
thunks.

llvm-svn: 291677
This commit is contained in:
Saleem Abdulrasool 2017-01-11 16:19:25 +00:00
parent 4bf308317d
commit 9c287bca23
35 changed files with 179 additions and 7 deletions

View File

@ -18,10 +18,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__adddf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vadd.f64 d0, d0, d1
#else
vmov d6, r0, r1 // move first param from r0/r1 pair into d6
vmov d7, r2, r3 // move second param from r2/r3 pair into d7
vadd.f64 d6, d6, d7
vmov r0, r1, d6 // move result back to r0/r1 pair
#endif
bx lr
END_COMPILERRT_FUNCTION(__adddf3vfp)

View File

@ -18,10 +18,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__addsf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vadd.f32 s0, s0, s1
#else
vmov s14, r0 // move first param from r0 into float register
vmov s15, r1 // move second param from r1 into float register
vadd.f32 s14, s14, s15
vmov r0, s14 // move result back to r0
#endif
bx lr
END_COMPILERRT_FUNCTION(__addsf3vfp)

View File

@ -43,8 +43,14 @@
.thumb
#endif
@ int __eqsf2(float a, float b)
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__eqsf2)
#if defined(COMPILER_RT_ARMHF_TARGET)
vmov r0, s0
vmov r1, s1
#endif
// Make copies of a and b with the sign bit shifted off the top. These will
// be used to detect zeros and NaNs.
#if __ARM_ARCH_ISA_THUMB == 1
@ -166,16 +172,23 @@ LOCAL_LABEL(CHECK_NAN):
JMP(lr)
#endif
END_COMPILERRT_FUNCTION(__eqsf2)
DEFINE_COMPILERRT_FUNCTION_ALIAS(__lesf2, __eqsf2)
DEFINE_COMPILERRT_FUNCTION_ALIAS(__ltsf2, __eqsf2)
DEFINE_COMPILERRT_FUNCTION_ALIAS(__nesf2, __eqsf2)
@ int __gtsf2(float a, float b)
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__gtsf2)
// Identical to the preceding except in that we return -1 for NaN values.
// Given that the two paths share so much code, one might be tempted to
// unify them; however, the extra code needed to do so makes the code size
// to performance tradeoff very hard to justify for such small functions.
#if defined(COMPILER_RT_ARMHF_TARGET)
vmov r0, s0
vmov r1, s1
#endif
#if __ARM_ARCH_ISA_THUMB == 1
push {r6, lr}
lsls r2, r0, #1
@ -235,10 +248,17 @@ LOCAL_LABEL(CHECK_NAN_2):
JMP(lr)
#endif
END_COMPILERRT_FUNCTION(__gtsf2)
DEFINE_COMPILERRT_FUNCTION_ALIAS(__gesf2, __gtsf2)
@ int __unordsf2(float a, float b)
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__unordsf2)
#if defined(COMPILER_RT_ARMHF_TARGET)
vmov r0, s0
vmov r1, s1
#endif
// Return 1 for NaN values, 0 otherwise.
lsls r2, r0, #1
lsls r3, r1, #1
@ -262,7 +282,15 @@ DEFINE_COMPILERRT_FUNCTION(__unordsf2)
JMP(lr)
END_COMPILERRT_FUNCTION(__unordsf2)
#if defined(COMPILER_RT_ARMHF_TARGET)
DEFINE_COMPILERRT_FUNCTION(__aeabi_fcmpum):
vmov s0, r0
vmov s1, r1
b SYMBOL_NAME(__unordsf2)
END_COMPILERRT_FUNCTION(__aeabi_fcmpum)
#else
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_fcmpun, __unordsf2)
#endif
NO_EXEC_STACK_DIRECTIVE

View File

@ -18,10 +18,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__divdf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vdiv.f64 d0, d0, d1
#else
vmov d6, r0, r1 // move first param from r0/r1 pair into d6
vmov d7, r2, r3 // move second param from r2/r3 pair into d7
vdiv.f64 d5, d6, d7
vmov r0, r1, d5 // move result back to r0/r1 pair
#endif
bx lr
END_COMPILERRT_FUNCTION(__divdf3vfp)

View File

@ -18,10 +18,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__divsf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vdiv.f32 s0, s0, s1
#else
vmov s14, r0 // move first param from r0 into float register
vmov s15, r1 // move second param from r1 into float register
vdiv.f32 s13, s14, s15
vmov r0, s13 // move result back to r0
#endif
bx lr
END_COMPILERRT_FUNCTION(__divsf3vfp)

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__eqdf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f64 d0, d1
#else
vmov d6, r0, r1 // load r0/r1 pair in double register
vmov d7, r2, r3 // load r2/r3 pair in double register
vcmp.f64 d6, d7
#endif
vmrs apsr_nzcv, fpscr
moveq r0, #1 // set result register to 1 if equal
movne r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__eqsf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f32 s0, s1
#else
vmov s14, r0 // move from GPR 0 to float register
vmov s15, r1 // move from GPR 1 to float register
vcmp.f32 s14, s15
#endif
vmrs apsr_nzcv, fpscr
moveq r0, #1 // set result register to 1 if equal
movne r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__extendsfdf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcvt.f64.f32 d0, s0
#else
vmov s15, r0 // load float register from R0
vcvt.f64.f32 d7, s15 // convert single to double
vmov r0, r1, d7 // return result in r0/r1 pair
#endif
bx lr
END_COMPILERRT_FUNCTION(__extendsfdf2vfp)

View File

@ -19,9 +19,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__fixdfsivfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcvt.s32.f64 s0, d0
vmov r0, s0
#else
vmov d7, r0, r1 // load double register from R0/R1
vcvt.s32.f64 s15, d7 // convert double to 32-bit int into s15
vmov r0, s15 // move s15 to result register
#endif
bx lr
END_COMPILERRT_FUNCTION(__fixdfsivfp)

View File

@ -19,9 +19,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__fixsfsivfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcvt.s32.f32 s0, s0
vmov r0, s0
#else
vmov s15, r0 // load float register from R0
vcvt.s32.f32 s15, s15 // convert single to 32-bit int into s15
vmov r0, s15 // move s15 to result register
#endif
bx lr
END_COMPILERRT_FUNCTION(__fixsfsivfp)

View File

@ -20,9 +20,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__fixunsdfsivfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcvt.u32.f64 s0, d0
vmov r0, s0
#else
vmov d7, r0, r1 // load double register from R0/R1
vcvt.u32.f64 s15, d7 // convert double to 32-bit int into s15
vmov r0, s15 // move s15 to result register
#endif
bx lr
END_COMPILERRT_FUNCTION(__fixunsdfsivfp)

View File

@ -20,9 +20,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__fixunssfsivfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcvt.u32.f32 s0, s0
vmov r0, s0
#else
vmov s15, r0 // load float register from R0
vcvt.u32.f32 s15, s15 // convert single to 32-bit unsigned into s15
vmov r0, s15 // move s15 to result register
#endif
bx lr
END_COMPILERRT_FUNCTION(__fixunssfsivfp)

View File

@ -19,9 +19,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__floatsidfvfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vmov s0, r0
vcvt.f64.s32 d0, s0
#else
vmov s15, r0 // move int to float register s15
vcvt.f64.s32 d7, s15 // convert 32-bit int in s15 to double in d7
vmov r0, r1, d7 // move d7 to result register pair r0/r1
#endif
bx lr
END_COMPILERRT_FUNCTION(__floatsidfvfp)

View File

@ -19,9 +19,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__floatsisfvfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vmov s0, r0
vcvt.f32.s32 s0, s0
#else
vmov s15, r0 // move int to float register s15
vcvt.f32.s32 s15, s15 // convert 32-bit int in s15 to float in s15
vmov r0, s15 // move s15 to result register
#endif
bx lr
END_COMPILERRT_FUNCTION(__floatsisfvfp)

View File

@ -19,9 +19,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__floatunssidfvfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vmov s0, r0
vcvt.f64.u32 d0, s0
#else
vmov s15, r0 // move int to float register s15
vcvt.f64.u32 d7, s15 // convert 32-bit int in s15 to double in d7
vmov r0, r1, d7 // move d7 to result register pair r0/r1
#endif
bx lr
END_COMPILERRT_FUNCTION(__floatunssidfvfp)

View File

@ -19,9 +19,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__floatunssisfvfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vmov s0, r0
vcvt.f32.u32 s0, s0
#else
vmov s15, r0 // move int to float register s15
vcvt.f32.u32 s15, s15 // convert 32-bit int in s15 to float in s15
vmov r0, s15 // move s15 to result register
#endif
bx lr
END_COMPILERRT_FUNCTION(__floatunssisfvfp)

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__gedf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f64 d0, d1
#else
vmov d6, r0, r1 // load r0/r1 pair in double register
vmov d7, r2, r3 // load r2/r3 pair in double register
vcmp.f64 d6, d7
#endif
vmrs apsr_nzcv, fpscr
movge r0, #1 // set result register to 1 if greater than or equal
movlt r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__gesf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f32 s0, s1
#else
vmov s14, r0 // move from GPR 0 to float register
vmov s15, r1 // move from GPR 1 to float register
vcmp.f32 s14, s15
#endif
vmrs apsr_nzcv, fpscr
movge r0, #1 // set result register to 1 if greater than or equal
movlt r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__gtdf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f64 d0, d1
#else
vmov d6, r0, r1 // load r0/r1 pair in double register
vmov d7, r2, r3 // load r2/r3 pair in double register
vcmp.f64 d6, d7
#endif
vmrs apsr_nzcv, fpscr
movgt r0, #1 // set result register to 1 if equal
movle r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__gtsf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f32 s0, s1
#else
vmov s14, r0 // move from GPR 0 to float register
vmov s15, r1 // move from GPR 1 to float register
vcmp.f32 s14, s15
#endif
vmrs apsr_nzcv, fpscr
movgt r0, #1 // set result register to 1 if equal
movle r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__ledf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f64 d0, d1
#else
vmov d6, r0, r1 // load r0/r1 pair in double register
vmov d7, r2, r3 // load r2/r3 pair in double register
vcmp.f64 d6, d7
#endif
vmrs apsr_nzcv, fpscr
movls r0, #1 // set result register to 1 if equal
movhi r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__lesf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f32 s0, s1
#else
vmov s14, r0 // move from GPR 0 to float register
vmov s15, r1 // move from GPR 1 to float register
vcmp.f32 s14, s15
#endif
vmrs apsr_nzcv, fpscr
movls r0, #1 // set result register to 1 if equal
movhi r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__ltdf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f64 d0, d1
#else
vmov d6, r0, r1 // load r0/r1 pair in double register
vmov d7, r2, r3 // load r2/r3 pair in double register
vcmp.f64 d6, d7
#endif
vmrs apsr_nzcv, fpscr
movmi r0, #1 // set result register to 1 if equal
movpl r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__ltsf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f32 s0, s1
#else
vmov s14, r0 // move from GPR 0 to float register
vmov s15, r1 // move from GPR 1 to float register
vcmp.f32 s14, s15
#endif
vmrs apsr_nzcv, fpscr
movmi r0, #1 // set result register to 1 if equal
movpl r0, #0

View File

@ -18,10 +18,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__muldf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vmul.f64 d0, d0, d1
#else
vmov d6, r0, r1 // move first param from r0/r1 pair into d6
vmov d7, r2, r3 // move second param from r2/r3 pair into d7
vmul.f64 d6, d6, d7
vmov r0, r1, d6 // move result back to r0/r1 pair
#endif
bx lr
END_COMPILERRT_FUNCTION(__muldf3vfp)

View File

@ -18,9 +18,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__mulsf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vmul.f32 s0, s0, s1
#else
vmov s14, r0 // move first param from r0 into float register
vmov s15, r1 // move second param from r1 into float register
vmul.f32 s13, s14, s15
#endif
vmov r0, s13 // move result back to r0
bx lr
END_COMPILERRT_FUNCTION(__mulsf3vfp)

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__nedf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f64 d0, d1
#else
vmov d6, r0, r1 // load r0/r1 pair in double register
vmov d7, r2, r3 // load r2/r3 pair in double register
vcmp.f64 d6, d7
#endif
vmrs apsr_nzcv, fpscr
movne r0, #1 // set result register to 0 if unequal
moveq r0, #0

View File

@ -18,7 +18,11 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__negdf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vneg.f64 d0, d0
#else
eor r1, r1, #-2147483648 // flip sign bit on double in r0/r1 pair
#endif
bx lr
END_COMPILERRT_FUNCTION(__negdf2vfp)

View File

@ -18,7 +18,11 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__negsf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vneg.f32 s0, s0
#else
eor r0, r0, #-2147483648 // flip sign bit on float in r0
#endif
bx lr
END_COMPILERRT_FUNCTION(__negsf2vfp)

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__nesf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f32 s0, s1
#else
vmov s14, r0 // move from GPR 0 to float register
vmov s15, r1 // move from GPR 1 to float register
vcmp.f32 s14, s15
#endif
vmrs apsr_nzcv, fpscr
movne r0, #1 // set result register to 1 if unequal
moveq r0, #0

View File

@ -18,10 +18,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__subdf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vsub.f64 d0, d0, d1
#else
vmov d6, r0, r1 // move first param from r0/r1 pair into d6
vmov d7, r2, r3 // move second param from r2/r3 pair into d7
vsub.f64 d6, d6, d7
vmov r0, r1, d6 // move result back to r0/r1 pair
#endif
bx lr
END_COMPILERRT_FUNCTION(__subdf3vfp)

View File

@ -19,10 +19,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__subsf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vsub.f32 s0, s0, s1
#elsee
vmov s14, r0 // move first param from r0 into float register
vmov s15, r1 // move second param from r1 into float register
vsub.f32 s14, s14, s15
vmov r0, s14 // move result back to r0
#endif
bx lr
END_COMPILERRT_FUNCTION(__subsf3vfp)

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__truncdfsf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcvt.f32.f64 s0, d0
#else
vmov d7, r0, r1 // load double from r0/r1 pair
vcvt.f32.f64 s15, d7 // convert double to single (trucate precision)
vmov r0, s15 // return result in r0
#endif
bx lr
END_COMPILERRT_FUNCTION(__truncdfsf2vfp)

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__unorddf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f64 d0, d1
#else
vmov d6, r0, r1 // load r0/r1 pair in double register
vmov d7, r2, r3 // load r2/r3 pair in double register
vcmp.f64 d6, d7
#endif
vmrs apsr_nzcv, fpscr
movvs r0, #1 // set result register to 1 if "overflow" (any NaNs)
movvc r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__unordsf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f32 s0, s1
#else
vmov s14, r0 // move from GPR 0 to float register
vmov s15, r1 // move from GPR 1 to float register
vcmp.f32 s14, s15
#endif
vmrs apsr_nzcv, fpscr
movvs r0, #1 // set result register to 1 if "overflow" (any NaNs)
movvc r0, #0