[builtins] Reformat builtins with clang-format

Update formatting to use the LLVM style.

This is part of the cleanup proposed in "[RFC] compiler-rt builtins
cleanup and refactoring".

Differential Revision: https://reviews.llvm.org/D60351

llvm-svn: 359410
This commit is contained in:
Petr Hosek 2019-04-28 21:53:32 +00:00
parent 65f12f66f6
commit 082b89b25f
193 changed files with 5270 additions and 5802 deletions

View File

@ -17,11 +17,9 @@
/* Effects: aborts if abs(x) < 0 */
COMPILER_RT_ABI di_int
__absvdi2(di_int a)
{
COMPILER_RT_ABI di_int __absvdi2(di_int a) {
const int N = (int)(sizeof(di_int) * CHAR_BIT);
if (a == ((di_int)1 << (N-1)))
if (a == ((di_int)1 << (N - 1)))
compilerrt_abort();
const di_int t = a >> (N - 1);
return (a ^ t) - t;

View File

@ -17,11 +17,9 @@
/* Effects: aborts if abs(x) < 0 */
COMPILER_RT_ABI si_int
__absvsi2(si_int a)
{
COMPILER_RT_ABI si_int __absvsi2(si_int a) {
const int N = (int)(sizeof(si_int) * CHAR_BIT);
if (a == (1 << (N-1)))
if (a == (1 << (N - 1)))
compilerrt_abort();
const si_int t = a >> (N - 1);
return (a ^ t) - t;

View File

@ -19,15 +19,12 @@
/* Effects: aborts if abs(x) < 0 */
COMPILER_RT_ABI ti_int
__absvti2(ti_int a)
{
COMPILER_RT_ABI ti_int __absvti2(ti_int a) {
const int N = (int)(sizeof(ti_int) * CHAR_BIT);
if (a == ((ti_int)1 << (N-1)))
if (a == ((ti_int)1 << (N - 1)))
compilerrt_abort();
const ti_int s = a >> (N - 1);
return (a ^ s) - s;
}
#endif /* CRT_HAS_128BIT */

View File

@ -14,15 +14,11 @@
#define DOUBLE_PRECISION
#include "fp_add_impl.inc"
COMPILER_RT_ABI double __adddf3(double a, double b){
return __addXf3__(a, b);
}
COMPILER_RT_ABI double __adddf3(double a, double b) { return __addXf3__(a, b); }
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI double __aeabi_dadd(double a, double b) {
return __adddf3(a, b);
}
AEABI_RTABI double __aeabi_dadd(double a, double b) { return __adddf3(a, b); }
#else
AEABI_RTABI double __aeabi_dadd(double a, double b) COMPILER_RT_ALIAS(__adddf3);
#endif

View File

@ -14,15 +14,11 @@
#define SINGLE_PRECISION
#include "fp_add_impl.inc"
COMPILER_RT_ABI float __addsf3(float a, float b) {
return __addXf3__(a, b);
}
COMPILER_RT_ABI float __addsf3(float a, float b) { return __addXf3__(a, b); }
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI float __aeabi_fadd(float a, float b) {
return __addsf3(a, b);
}
AEABI_RTABI float __aeabi_fadd(float a, float b) { return __addsf3(a, b); }
#else
AEABI_RTABI float __aeabi_fadd(float a, float b) COMPILER_RT_ALIAS(__addsf3);
#endif

View File

@ -17,7 +17,7 @@
#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
#include "fp_add_impl.inc"
COMPILER_RT_ABI long double __addtf3(long double a, long double b){
COMPILER_RT_ABI long double __addtf3(long double a, long double b) {
return __addXf3__(a, b);
}

View File

@ -17,17 +17,12 @@
/* Effects: aborts if a + b overflows */
COMPILER_RT_ABI di_int
__addvdi3(di_int a, di_int b)
{
di_int s = (du_int) a + (du_int) b;
if (b >= 0)
{
COMPILER_RT_ABI di_int __addvdi3(di_int a, di_int b) {
di_int s = (du_int)a + (du_int)b;
if (b >= 0) {
if (s < a)
compilerrt_abort();
}
else
{
} else {
if (s >= a)
compilerrt_abort();
}

View File

@ -17,17 +17,12 @@
/* Effects: aborts if a + b overflows */
COMPILER_RT_ABI si_int
__addvsi3(si_int a, si_int b)
{
si_int s = (su_int) a + (su_int) b;
if (b >= 0)
{
COMPILER_RT_ABI si_int __addvsi3(si_int a, si_int b) {
si_int s = (su_int)a + (su_int)b;
if (b >= 0) {
if (s < a)
compilerrt_abort();
}
else
{
} else {
if (s >= a)
compilerrt_abort();
}

View File

@ -19,17 +19,12 @@
/* Effects: aborts if a + b overflows */
COMPILER_RT_ABI ti_int
__addvti3(ti_int a, ti_int b)
{
ti_int s = (tu_int) a + (tu_int) b;
if (b >= 0)
{
COMPILER_RT_ABI ti_int __addvti3(ti_int a, ti_int b) {
ti_int s = (tu_int)a + (tu_int)b;
if (b >= 0) {
if (s < a)
compilerrt_abort();
}
else
{
} else {
if (s >= a)
compilerrt_abort();
}

View File

@ -7,36 +7,34 @@
* ===----------------------------------------------------------------------===
*/
#if __APPLE__
#include <Availability.h>
#include <Availability.h>
#if __IPHONE_OS_VERSION_MIN_REQUIRED
#define NOT_HERE_BEFORE_10_6(sym)
#define NOT_HERE_IN_10_8_AND_EARLIER(sym) \
extern const char sym##_tmp61 __asm("$ld$hide$os6.1$_" #sym ); \
#if __IPHONE_OS_VERSION_MIN_REQUIRED
#define NOT_HERE_BEFORE_10_6(sym)
#define NOT_HERE_IN_10_8_AND_EARLIER(sym) \
extern const char sym##_tmp61 __asm("$ld$hide$os6.1$_" #sym); \
__attribute__((visibility("default"))) const char sym##_tmp61 = 0; \
extern const char sym##_tmp60 __asm("$ld$hide$os6.0$_" #sym ); \
extern const char sym##_tmp60 __asm("$ld$hide$os6.0$_" #sym); \
__attribute__((visibility("default"))) const char sym##_tmp60 = 0; \
extern const char sym##_tmp51 __asm("$ld$hide$os5.1$_" #sym ); \
extern const char sym##_tmp51 __asm("$ld$hide$os5.1$_" #sym); \
__attribute__((visibility("default"))) const char sym##_tmp51 = 0; \
extern const char sym##_tmp50 __asm("$ld$hide$os5.0$_" #sym ); \
extern const char sym##_tmp50 __asm("$ld$hide$os5.0$_" #sym); \
__attribute__((visibility("default"))) const char sym##_tmp50 = 0;
#else
#define NOT_HERE_BEFORE_10_6(sym) \
extern const char sym##_tmp4 __asm("$ld$hide$os10.4$_" #sym ); \
#else
#define NOT_HERE_BEFORE_10_6(sym) \
extern const char sym##_tmp4 __asm("$ld$hide$os10.4$_" #sym); \
__attribute__((visibility("default"))) const char sym##_tmp4 = 0; \
extern const char sym##_tmp5 __asm("$ld$hide$os10.5$_" #sym ); \
extern const char sym##_tmp5 __asm("$ld$hide$os10.5$_" #sym); \
__attribute__((visibility("default"))) const char sym##_tmp5 = 0;
#define NOT_HERE_IN_10_8_AND_EARLIER(sym) \
extern const char sym##_tmp8 __asm("$ld$hide$os10.8$_" #sym ); \
#define NOT_HERE_IN_10_8_AND_EARLIER(sym) \
extern const char sym##_tmp8 __asm("$ld$hide$os10.8$_" #sym); \
__attribute__((visibility("default"))) const char sym##_tmp8 = 0; \
extern const char sym##_tmp7 __asm("$ld$hide$os10.7$_" #sym ); \
extern const char sym##_tmp7 __asm("$ld$hide$os10.7$_" #sym); \
__attribute__((visibility("default"))) const char sym##_tmp7 = 0; \
extern const char sym##_tmp6 __asm("$ld$hide$os10.6$_" #sym ); \
extern const char sym##_tmp6 __asm("$ld$hide$os10.6$_" #sym); \
__attribute__((visibility("default"))) const char sym##_tmp6 = 0;
#endif
#endif
/* Symbols in libSystem.dylib in 10.6 and later,
* but are in libgcc_s.dylib in earlier versions
@ -142,7 +140,6 @@ NOT_HERE_BEFORE_10_6(__udivti3)
NOT_HERE_BEFORE_10_6(__umoddi3)
NOT_HERE_BEFORE_10_6(__umodti3)
#if __ppc__
NOT_HERE_BEFORE_10_6(__gcc_qadd)
NOT_HERE_BEFORE_10_6(__gcc_qdiv)
@ -200,22 +197,21 @@ NOT_HERE_IN_10_8_AND_EARLIER(__atomic_store_2)
NOT_HERE_IN_10_8_AND_EARLIER(__atomic_store_4)
NOT_HERE_IN_10_8_AND_EARLIER(__atomic_store_8)
#if __arm__ && __DYNAMIC__
#define NOT_HERE_UNTIL_AFTER_4_3(sym) \
extern const char sym##_tmp1 __asm("$ld$hide$os3.0$_" #sym ); \
#define NOT_HERE_UNTIL_AFTER_4_3(sym) \
extern const char sym##_tmp1 __asm("$ld$hide$os3.0$_" #sym); \
__attribute__((visibility("default"))) const char sym##_tmp1 = 0; \
extern const char sym##_tmp2 __asm("$ld$hide$os3.1$_" #sym ); \
extern const char sym##_tmp2 __asm("$ld$hide$os3.1$_" #sym); \
__attribute__((visibility("default"))) const char sym##_tmp2 = 0; \
extern const char sym##_tmp3 __asm("$ld$hide$os3.2$_" #sym ); \
extern const char sym##_tmp3 __asm("$ld$hide$os3.2$_" #sym); \
__attribute__((visibility("default"))) const char sym##_tmp3 = 0; \
extern const char sym##_tmp4 __asm("$ld$hide$os4.0$_" #sym ); \
extern const char sym##_tmp4 __asm("$ld$hide$os4.0$_" #sym); \
__attribute__((visibility("default"))) const char sym##_tmp4 = 0; \
extern const char sym##_tmp5 __asm("$ld$hide$os4.1$_" #sym ); \
extern const char sym##_tmp5 __asm("$ld$hide$os4.1$_" #sym); \
__attribute__((visibility("default"))) const char sym##_tmp5 = 0; \
extern const char sym##_tmp6 __asm("$ld$hide$os4.2$_" #sym ); \
extern const char sym##_tmp6 __asm("$ld$hide$os4.2$_" #sym); \
__attribute__((visibility("default"))) const char sym##_tmp6 = 0; \
extern const char sym##_tmp7 __asm("$ld$hide$os4.3$_" #sym ); \
extern const char sym##_tmp7 __asm("$ld$hide$os4.3$_" #sym); \
__attribute__((visibility("default"))) const char sym##_tmp7 = 0;
NOT_HERE_UNTIL_AFTER_4_3(__absvdi2)
@ -338,10 +334,6 @@ NOT_HERE_UNTIL_AFTER_4_3(__divmodsi4)
NOT_HERE_UNTIL_AFTER_4_3(__udivmodsi4)
#endif // __arm__ && __DYNAMIC__
#else /* !__APPLE__ */
extern int avoid_empty_file;

View File

@ -6,10 +6,10 @@
//
//===----------------------------------------------------------------------===//
#include <stdint.h>
#include "../int_lib.h"
#include <stdint.h>
AEABI_RTABI __attribute__((visibility("hidden")))
int __aeabi_cdcmpeq_check_nan(double a, double b) {
AEABI_RTABI __attribute__((visibility("hidden"))) int
__aeabi_cdcmpeq_check_nan(double a, double b) {
return __builtin_isnan(a) || __builtin_isnan(b);
}

View File

@ -6,10 +6,10 @@
//
//===----------------------------------------------------------------------===//
#include <stdint.h>
#include "../int_lib.h"
#include <stdint.h>
AEABI_RTABI __attribute__((visibility("hidden")))
int __aeabi_cfcmpeq_check_nan(float a, float b) {
AEABI_RTABI __attribute__((visibility("hidden"))) int
__aeabi_cfcmpeq_check_nan(float a, float b) {
return __builtin_isnan(a) || __builtin_isnan(b);
}

View File

@ -36,9 +36,8 @@ __aeabi_idiv0(int return_value) {
return return_value;
}
AEABI_RTABI long long __attribute__((weak)) __attribute__((visibility("hidden")))
__aeabi_ldiv0(long long return_value) {
AEABI_RTABI long long __attribute__((weak))
__attribute__((visibility("hidden"))) __aeabi_ldiv0(long long return_value) {
return return_value;
}
#endif

View File

@ -9,10 +9,6 @@
#define DOUBLE_PRECISION
#include "../fp_lib.h"
AEABI_RTABI fp_t
__aeabi_dsub(fp_t, fp_t);
AEABI_RTABI fp_t __aeabi_dsub(fp_t, fp_t);
AEABI_RTABI fp_t
__aeabi_drsub(fp_t a, fp_t b) {
return __aeabi_dsub(b, a);
}
AEABI_RTABI fp_t __aeabi_drsub(fp_t a, fp_t b) { return __aeabi_dsub(b, a); }

View File

@ -9,10 +9,6 @@
#define SINGLE_PRECISION
#include "../fp_lib.h"
AEABI_RTABI fp_t
__aeabi_fsub(fp_t, fp_t);
AEABI_RTABI fp_t __aeabi_fsub(fp_t, fp_t);
AEABI_RTABI fp_t
__aeabi_frsub(fp_t a, fp_t b) {
return __aeabi_fsub(b, a);
}
AEABI_RTABI fp_t __aeabi_frsub(fp_t a, fp_t b) { return __aeabi_fsub(b, a); }

View File

@ -15,49 +15,47 @@
#include "../assembly.h"
#define SYNC_OP_4(op) \
.p2align 2 ; \
.thumb ; \
.syntax unified ; \
DEFINE_COMPILERRT_THUMB_FUNCTION(__sync_fetch_and_ ## op) \
dmb ; \
mov r12, r0 ; \
LOCAL_LABEL(tryatomic_ ## op): \
ldrex r0, [r12] ; \
op(r2, r0, r1) ; \
strex r3, r2, [r12] ; \
cmp r3, #0 ; \
bne LOCAL_LABEL(tryatomic_ ## op) ; \
dmb ; \
.p2align 2; \
.thumb; \
.syntax unified; \
DEFINE_COMPILERRT_THUMB_FUNCTION(__sync_fetch_and_##op) \
dmb; \
mov r12, r0; \
LOCAL_LABEL(tryatomic_##op) : ldrex r0, [r12]; \
op(r2, r0, r1); \
strex r3, r2, [r12]; \
cmp r3, #0; \
bne LOCAL_LABEL(tryatomic_##op); \
dmb; \
bx lr
#define SYNC_OP_8(op) \
.p2align 2 ; \
.thumb ; \
.syntax unified ; \
DEFINE_COMPILERRT_THUMB_FUNCTION(__sync_fetch_and_ ## op) \
push {r4, r5, r6, lr} ; \
dmb ; \
mov r12, r0 ; \
LOCAL_LABEL(tryatomic_ ## op): \
ldrexd r0, r1, [r12] ; \
op(r4, r5, r0, r1, r2, r3) ; \
strexd r6, r4, r5, [r12] ; \
cmp r6, #0 ; \
bne LOCAL_LABEL(tryatomic_ ## op) ; \
dmb ; \
pop {r4, r5, r6, pc}
.p2align 2; \
.thumb; \
.syntax unified; \
DEFINE_COMPILERRT_THUMB_FUNCTION(__sync_fetch_and_##op) \
push{r4, r5, r6, lr}; \
dmb; \
mov r12, r0; \
LOCAL_LABEL(tryatomic_##op) : ldrexd r0, r1, [r12]; \
op(r4, r5, r0, r1, r2, r3); \
strexd r6, r4, r5, [r12]; \
cmp r6, #0; \
bne LOCAL_LABEL(tryatomic_##op); \
dmb; \
pop { r4, r5, r6, pc }
#define MINMAX_4(rD, rN, rM, cmp_kind) \
cmp rN, rM ; \
mov rD, rM ; \
it cmp_kind ; \
cmp rN, rM; \
mov rD, rM; \
it cmp_kind; \
mov##cmp_kind rD, rN
#define MINMAX_8(rD_LO, rD_HI, rN_LO, rN_HI, rM_LO, rM_HI, cmp_kind) \
cmp rN_LO, rM_LO ; \
sbcs rN_HI, rM_HI ; \
mov rD_LO, rM_LO ; \
mov rD_HI, rM_HI ; \
itt cmp_kind ; \
mov##cmp_kind rD_LO, rN_LO ; \
cmp rN_LO, rM_LO; \
sbcs rN_HI, rM_HI; \
mov rD_LO, rM_LO; \
mov rD_HI, rM_HI; \
itt cmp_kind; \
mov##cmp_kind rD_LO, rN_LO; \
mov##cmp_kind rD_HI, rN_HI

View File

@ -17,9 +17,7 @@
/* Precondition: 0 <= b < bits_in_dword */
COMPILER_RT_ABI di_int
__ashldi3(di_int a, si_int b)
{
COMPILER_RT_ABI di_int __ashldi3(di_int a, si_int b) {
const int bits_in_word = (int)(sizeof(si_int) * CHAR_BIT);
dwords input;
dwords result;
@ -28,8 +26,7 @@ __ashldi3(di_int a, si_int b)
{
result.s.low = 0;
result.s.high = input.s.low << (b - bits_in_word);
}
else /* 0 <= b < bits_in_word */
} else /* 0 <= b < bits_in_word */
{
if (b == 0)
return a;
@ -40,5 +37,6 @@ __ashldi3(di_int a, si_int b)
}
#if defined(__ARM_EABI__)
AEABI_RTABI di_int __aeabi_llsl(di_int a, si_int b) COMPILER_RT_ALIAS(__ashldi3);
AEABI_RTABI di_int __aeabi_llsl(di_int a, si_int b)
COMPILER_RT_ALIAS(__ashldi3);
#endif

View File

@ -19,9 +19,7 @@
/* Precondition: 0 <= b < bits_in_tword */
COMPILER_RT_ABI ti_int
__ashlti3(ti_int a, si_int b)
{
COMPILER_RT_ABI ti_int __ashlti3(ti_int a, si_int b) {
const int bits_in_dword = (int)(sizeof(di_int) * CHAR_BIT);
twords input;
twords result;
@ -30,8 +28,7 @@ __ashlti3(ti_int a, si_int b)
{
result.s.low = 0;
result.s.high = input.s.low << (b - bits_in_dword);
}
else /* 0 <= b < bits_in_dword */
} else /* 0 <= b < bits_in_dword */
{
if (b == 0)
return a;

View File

@ -17,9 +17,7 @@
/* Precondition: 0 <= b < bits_in_dword */
COMPILER_RT_ABI di_int
__ashrdi3(di_int a, si_int b)
{
COMPILER_RT_ABI di_int __ashrdi3(di_int a, si_int b) {
const int bits_in_word = (int)(sizeof(si_int) * CHAR_BIT);
dwords input;
dwords result;
@ -29,8 +27,7 @@ __ashrdi3(di_int a, si_int b)
/* result.s.high = input.s.high < 0 ? -1 : 0 */
result.s.high = input.s.high >> (bits_in_word - 1);
result.s.low = input.s.high >> (b - bits_in_word);
}
else /* 0 <= b < bits_in_word */
} else /* 0 <= b < bits_in_word */
{
if (b == 0)
return a;
@ -41,5 +38,6 @@ __ashrdi3(di_int a, si_int b)
}
#if defined(__ARM_EABI__)
AEABI_RTABI di_int __aeabi_lasr(di_int a, si_int b) COMPILER_RT_ALIAS(__ashrdi3);
AEABI_RTABI di_int __aeabi_lasr(di_int a, si_int b)
COMPILER_RT_ALIAS(__ashrdi3);
#endif

View File

@ -19,9 +19,7 @@
/* Precondition: 0 <= b < bits_in_tword */
COMPILER_RT_ABI ti_int
__ashrti3(ti_int a, si_int b)
{
COMPILER_RT_ABI ti_int __ashrti3(ti_int a, si_int b) {
const int bits_in_dword = (int)(sizeof(di_int) * CHAR_BIT);
twords input;
twords result;
@ -31,8 +29,7 @@ __ashrti3(ti_int a, si_int b)
/* result.s.high = input.s.high < 0 ? -1 : 0 */
result.s.high = input.s.high >> (bits_in_dword - 1);
result.s.low = input.s.high >> (b - bits_in_dword);
}
else /* 0 <= b < bits_in_dword */
} else /* 0 <= b < bits_in_dword */
{
if (b == 0)
return a;

View File

@ -34,13 +34,14 @@
#pragma redefine_extname __atomic_load_c SYMBOL_NAME(__atomic_load)
#pragma redefine_extname __atomic_store_c SYMBOL_NAME(__atomic_store)
#pragma redefine_extname __atomic_exchange_c SYMBOL_NAME(__atomic_exchange)
#pragma redefine_extname __atomic_compare_exchange_c SYMBOL_NAME(__atomic_compare_exchange)
#pragma redefine_extname __atomic_compare_exchange_c SYMBOL_NAME( \
__atomic_compare_exchange)
/// Number of locks. This allocates one page on 32-bit platforms, two on
/// 64-bit. This can be specified externally if a different trade between
/// memory usage and contention probability is required for a given platform.
#ifndef SPINLOCK_COUNT
#define SPINLOCK_COUNT (1<<10)
#define SPINLOCK_COUNT (1 << 10)
#endif
static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1;
@ -51,38 +52,35 @@ static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1;
////////////////////////////////////////////////////////////////////////////////
#ifdef __FreeBSD__
#include <errno.h>
#include <sys/types.h>
#include <machine/atomic.h>
#include <sys/types.h>
#include <sys/umtx.h>
typedef struct _usem Lock;
__inline static void unlock(Lock *l) {
__c11_atomic_store((_Atomic(uint32_t)*)&l->_count, 1, __ATOMIC_RELEASE);
__c11_atomic_store((_Atomic(uint32_t) *)&l->_count, 1, __ATOMIC_RELEASE);
__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
if (l->_has_waiters)
_umtx_op(l, UMTX_OP_SEM_WAKE, 1, 0, 0);
}
__inline static void lock(Lock *l) {
uint32_t old = 1;
while (!__c11_atomic_compare_exchange_weak((_Atomic(uint32_t)*)&l->_count, &old,
0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
while (!__c11_atomic_compare_exchange_weak((_Atomic(uint32_t) *)&l->_count,
&old, 0, __ATOMIC_ACQUIRE,
__ATOMIC_RELAXED)) {
_umtx_op(l, UMTX_OP_SEM_WAIT, 0, 0, 0);
old = 1;
}
}
/// locks for atomic operations
static Lock locks[SPINLOCK_COUNT] = { [0 ... SPINLOCK_COUNT-1] = {0,1,0} };
static Lock locks[SPINLOCK_COUNT] = {[0 ... SPINLOCK_COUNT - 1] = {0, 1, 0}};
#elif defined(__APPLE__)
#include <libkern/OSAtomic.h>
typedef OSSpinLock Lock;
__inline static void unlock(Lock *l) {
OSSpinLockUnlock(l);
}
__inline static void unlock(Lock *l) { OSSpinLockUnlock(l); }
/// Locks a lock. In the current implementation, this is potentially
/// unbounded in the contended case.
__inline static void lock(Lock *l) {
OSSpinLockLock(l);
}
__inline static void lock(Lock *l) { OSSpinLockLock(l); }
static Lock locks[SPINLOCK_COUNT]; // initialized to OS_SPINLOCK_INIT which is 0
#else
@ -103,7 +101,6 @@ __inline static void lock(Lock *l) {
static Lock locks[SPINLOCK_COUNT];
#endif
/// Returns a lock to use for a given pointer.
static __inline Lock *lock_for_pointer(void *ptr) {
intptr_t hash = (intptr_t)ptr;
@ -133,43 +130,42 @@ static __inline Lock *lock_for_pointer(void *ptr) {
/// Macro that calls the compiler-generated lock-free versions of functions
/// when they exist.
#define LOCK_FREE_CASES() \
do {\
switch (size) {\
case 1:\
if (IS_LOCK_FREE_1) {\
LOCK_FREE_ACTION(uint8_t);\
}\
do { \
switch (size) { \
case 1: \
if (IS_LOCK_FREE_1) { \
LOCK_FREE_ACTION(uint8_t); \
} \
break; \
case 2:\
if (IS_LOCK_FREE_2) {\
LOCK_FREE_ACTION(uint16_t);\
}\
case 2: \
if (IS_LOCK_FREE_2) { \
LOCK_FREE_ACTION(uint16_t); \
} \
break; \
case 4:\
if (IS_LOCK_FREE_4) {\
LOCK_FREE_ACTION(uint32_t);\
}\
case 4: \
if (IS_LOCK_FREE_4) { \
LOCK_FREE_ACTION(uint32_t); \
} \
break; \
case 8:\
if (IS_LOCK_FREE_8) {\
LOCK_FREE_ACTION(uint64_t);\
}\
case 8: \
if (IS_LOCK_FREE_8) { \
LOCK_FREE_ACTION(uint64_t); \
} \
break; \
case 16:\
if (IS_LOCK_FREE_16) {\
/* FIXME: __uint128_t isn't available on 32 bit platforms.
LOCK_FREE_ACTION(__uint128_t);*/\
}\
case 16: \
if (IS_LOCK_FREE_16) { \
/* FIXME: __uint128_t isn't available on 32 bit platforms. \
LOCK_FREE_ACTION(__uint128_t);*/ \
} \
break; \
}\
} \
} while (0)
/// An atomic load operation. This is atomic with respect to the source
/// pointer only.
void __atomic_load_c(int size, void *src, void *dest, int model) {
#define LOCK_FREE_ACTION(type) \
*((type*)dest) = __c11_atomic_load((_Atomic(type)*)src, model);\
*((type *)dest) = __c11_atomic_load((_Atomic(type) *)src, model); \
return;
LOCK_FREE_CASES();
#undef LOCK_FREE_ACTION
@ -183,7 +179,7 @@ void __atomic_load_c(int size, void *src, void *dest, int model) {
/// pointer only.
void __atomic_store_c(int size, void *dest, void *src, int model) {
#define LOCK_FREE_ACTION(type) \
__c11_atomic_store((_Atomic(type)*)dest, *(type*)src, model);\
__c11_atomic_store((_Atomic(type) *)dest, *(type *)src, model); \
return;
LOCK_FREE_CASES();
#undef LOCK_FREE_ACTION
@ -201,8 +197,9 @@ void __atomic_store_c(int size, void *dest, void *src, int model) {
int __atomic_compare_exchange_c(int size, void *ptr, void *expected,
void *desired, int success, int failure) {
#define LOCK_FREE_ACTION(type) \
return __c11_atomic_compare_exchange_strong((_Atomic(type)*)ptr, (type*)expected,\
*(type*)desired, success, failure)
return __c11_atomic_compare_exchange_strong( \
(_Atomic(type) *)ptr, (type *)expected, *(type *)desired, success, \
failure)
LOCK_FREE_CASES();
#undef LOCK_FREE_ACTION
Lock *l = lock_for_pointer(ptr);
@ -221,8 +218,8 @@ int __atomic_compare_exchange_c(int size, void *ptr, void *expected,
/// with respect to the target address.
void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) {
#define LOCK_FREE_ACTION(type) \
*(type*)old = __c11_atomic_exchange((_Atomic(type)*)ptr, *(type*)val,\
model);\
*(type *)old = \
__c11_atomic_exchange((_Atomic(type) *)ptr, *(type *)val, model); \
return;
LOCK_FREE_CASES();
#undef LOCK_FREE_ACTION
@ -238,79 +235,79 @@ void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) {
// specialised versions of the above functions.
////////////////////////////////////////////////////////////////////////////////
#ifdef __SIZEOF_INT128__
#define OPTIMISED_CASES\
OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)\
OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)\
OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)\
OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)\
#define OPTIMISED_CASES \
OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t) \
OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t) \
OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t) \
OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t) \
OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)
#else
#define OPTIMISED_CASES\
OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)\
OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)\
OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)\
#define OPTIMISED_CASES \
OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t) \
OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t) \
OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t) \
OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)
#endif
#define OPTIMISED_CASE(n, lockfree, type)\
type __atomic_load_##n(type *src, int model) {\
if (lockfree)\
return __c11_atomic_load((_Atomic(type)*)src, model);\
Lock *l = lock_for_pointer(src);\
lock(l);\
type val = *src;\
unlock(l);\
return val;\
}
#define OPTIMISED_CASE(n, lockfree, type) \
type __atomic_load_##n(type *src, int model) { \
if (lockfree) \
return __c11_atomic_load((_Atomic(type) *)src, model); \
Lock *l = lock_for_pointer(src); \
lock(l); \
type val = *src; \
unlock(l); \
return val; \
}
OPTIMISED_CASES
#undef OPTIMISED_CASE
#define OPTIMISED_CASE(n, lockfree, type)\
void __atomic_store_##n(type *dest, type val, int model) {\
if (lockfree) {\
__c11_atomic_store((_Atomic(type)*)dest, val, model);\
return;\
}\
Lock *l = lock_for_pointer(dest);\
lock(l);\
*dest = val;\
unlock(l);\
return;\
}
#define OPTIMISED_CASE(n, lockfree, type) \
void __atomic_store_##n(type *dest, type val, int model) { \
if (lockfree) { \
__c11_atomic_store((_Atomic(type) *)dest, val, model); \
return; \
} \
Lock *l = lock_for_pointer(dest); \
lock(l); \
*dest = val; \
unlock(l); \
return; \
}
OPTIMISED_CASES
#undef OPTIMISED_CASE
#define OPTIMISED_CASE(n, lockfree, type)\
type __atomic_exchange_##n(type *dest, type val, int model) {\
if (lockfree)\
return __c11_atomic_exchange((_Atomic(type)*)dest, val, model);\
Lock *l = lock_for_pointer(dest);\
lock(l);\
type tmp = *dest;\
*dest = val;\
unlock(l);\
return tmp;\
}
#define OPTIMISED_CASE(n, lockfree, type) \
type __atomic_exchange_##n(type *dest, type val, int model) { \
if (lockfree) \
return __c11_atomic_exchange((_Atomic(type) *)dest, val, model); \
Lock *l = lock_for_pointer(dest); \
lock(l); \
type tmp = *dest; \
*dest = val; \
unlock(l); \
return tmp; \
}
OPTIMISED_CASES
#undef OPTIMISED_CASE
#define OPTIMISED_CASE(n, lockfree, type)\
int __atomic_compare_exchange_##n(type *ptr, type *expected, type desired,\
int success, int failure) {\
if (lockfree)\
return __c11_atomic_compare_exchange_strong((_Atomic(type)*)ptr, expected, desired,\
success, failure);\
Lock *l = lock_for_pointer(ptr);\
lock(l);\
if (*ptr == *expected) {\
*ptr = desired;\
unlock(l);\
return 1;\
}\
*expected = *ptr;\
unlock(l);\
return 0;\
}
#define OPTIMISED_CASE(n, lockfree, type) \
int __atomic_compare_exchange_##n(type *ptr, type *expected, type desired, \
int success, int failure) { \
if (lockfree) \
return __c11_atomic_compare_exchange_strong( \
(_Atomic(type) *)ptr, expected, desired, success, failure); \
Lock *l = lock_for_pointer(ptr); \
lock(l); \
if (*ptr == *expected) { \
*ptr = desired; \
unlock(l); \
return 1; \
} \
*expected = *ptr; \
unlock(l); \
return 0; \
}
OPTIMISED_CASES
#undef OPTIMISED_CASE
@ -318,16 +315,16 @@ OPTIMISED_CASES
// Atomic read-modify-write operations for integers of various sizes.
////////////////////////////////////////////////////////////////////////////////
#define ATOMIC_RMW(n, lockfree, type, opname, op) \
type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) {\
type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) { \
if (lockfree) \
return __c11_atomic_fetch_##opname((_Atomic(type)*)ptr, val, model);\
Lock *l = lock_for_pointer(ptr);\
lock(l);\
type tmp = *ptr;\
*ptr = tmp op val;\
unlock(l);\
return tmp;\
}
return __c11_atomic_fetch_##opname((_Atomic(type) *)ptr, val, model); \
Lock *l = lock_for_pointer(ptr); \
lock(l); \
type tmp = *ptr; \
*ptr = tmp op val; \
unlock(l); \
return tmp; \
}
#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +)
OPTIMISED_CASES

View File

@ -14,8 +14,7 @@
#include "int_lib.h"
COMPILER_RT_ABI uint32_t __bswapsi2(uint32_t u) {
return (
(((u)&0xff000000) >> 24) |
return ((((u)&0xff000000) >> 24) |
(((u)&0x00ff0000) >> 8) |
(((u)&0x0000ff00) << 8) |
(((u)&0x000000ff) << 24));

View File

@ -12,7 +12,7 @@
#include <stddef.h>
#if __APPLE__
#include <libkern/OSCacheControl.h>
#include <libkern/OSCacheControl.h>
#endif
#if defined(_WIN32)
@ -24,29 +24,29 @@ uintptr_t GetCurrentProcess(void);
#endif
#if defined(__FreeBSD__) && defined(__arm__)
#include <sys/types.h>
#include <machine/sysarch.h>
#include <machine/sysarch.h>
#include <sys/types.h>
#endif
#if defined(__NetBSD__) && defined(__arm__)
#include <machine/sysarch.h>
#include <machine/sysarch.h>
#endif
#if defined(__OpenBSD__) && defined(__mips__)
#include <sys/types.h>
#include <machine/sysarch.h>
#include <machine/sysarch.h>
#include <sys/types.h>
#endif
#if defined(__linux__) && defined(__mips__)
#include <sys/cachectl.h>
#include <sys/syscall.h>
#include <unistd.h>
#if defined(__ANDROID__) && defined(__LP64__)
/*
#include <sys/cachectl.h>
#include <sys/syscall.h>
#include <unistd.h>
#if defined(__ANDROID__) && defined(__LP64__)
/*
* clear_mips_cache - Invalidates instruction cache for Mips.
*/
static void clear_mips_cache(const void* Addr, size_t Size) {
__asm__ volatile (
static void clear_mips_cache(const void *Addr, size_t Size) {
__asm__ volatile(
".set push\n"
".set noreorder\n"
".set noat\n"
@ -80,11 +80,9 @@ uintptr_t GetCurrentProcess(void);
hazards. */
"nop\n"
".set pop\n"
: [Addr] "+r"(Addr), [Size] "+r"(Size)
:: "at", "ra", "v0", "memory"
);
}
#endif
: [ Addr ] "+r"(Addr), [ Size ] "+r"(Size)::"at", "ra", "v0", "memory");
}
#endif
#endif
/*
@ -103,15 +101,15 @@ void __clear_cache(void *start, void *end) {
#elif defined(_WIN32) && (defined(__arm__) || defined(__aarch64__))
FlushInstructionCache(GetCurrentProcess(), start, end - start);
#elif defined(__arm__) && !defined(__APPLE__)
#if defined(__FreeBSD__) || defined(__NetBSD__)
#if defined(__FreeBSD__) || defined(__NetBSD__)
struct arm_sync_icache_args arg;
arg.addr = (uintptr_t)start;
arg.len = (uintptr_t)end - (uintptr_t)start;
sysarch(ARM_SYNC_ICACHE, &arg);
#elif defined(__linux__)
/*
#elif defined(__linux__)
/*
* We used to include asm/unistd.h for the __ARM_NR_cacheflush define, but
* it also brought many other unused defines, as well as a dependency on
* kernel headers to be installed.
@ -119,23 +117,22 @@ void __clear_cache(void *start, void *end) {
* This value is stable at least since Linux 3.13 and should remain so for
* compatibility reasons, warranting it's re-definition here.
*/
#define __ARM_NR_cacheflush 0x0f0002
register int start_reg __asm("r0") = (int) (intptr_t) start;
const register int end_reg __asm("r1") = (int) (intptr_t) end;
#define __ARM_NR_cacheflush 0x0f0002
register int start_reg __asm("r0") = (int)(intptr_t)start;
const register int end_reg __asm("r1") = (int)(intptr_t)end;
const register int flags __asm("r2") = 0;
const register int syscall_nr __asm("r7") = __ARM_NR_cacheflush;
__asm __volatile("svc 0x0"
: "=r"(start_reg)
: "r"(syscall_nr), "r"(start_reg), "r"(end_reg),
"r"(flags));
: "r"(syscall_nr), "r"(start_reg), "r"(end_reg), "r"(flags));
assert(start_reg == 0 && "Cache flush syscall failed.");
#else
#else
compilerrt_abort();
#endif
#endif
#elif defined(__linux__) && defined(__mips__)
const uintptr_t start_int = (uintptr_t) start;
const uintptr_t end_int = (uintptr_t) end;
#if defined(__ANDROID__) && defined(__LP64__)
const uintptr_t start_int = (uintptr_t)start;
const uintptr_t end_int = (uintptr_t)end;
#if defined(__ANDROID__) && defined(__LP64__)
// Call synci implementation for short address range.
const uintptr_t address_range_limit = 256;
if ((end_int - start_int) <= address_range_limit) {
@ -143,14 +140,14 @@ void __clear_cache(void *start, void *end) {
} else {
syscall(__NR_cacheflush, start, (end_int - start_int), BCACHE);
}
#else
#else
syscall(__NR_cacheflush, start, (end_int - start_int), BCACHE);
#endif
#endif
#elif defined(__mips__) && defined(__OpenBSD__)
cacheflush(start, (uintptr_t)end - (uintptr_t)start, BCACHE);
#elif defined(__aarch64__) && !defined(__APPLE__)
uint64_t xstart = (uint64_t)(uintptr_t) start;
uint64_t xend = (uint64_t)(uintptr_t) end;
uint64_t xstart = (uint64_t)(uintptr_t)start;
uint64_t xend = (uint64_t)(uintptr_t)end;
uint64_t addr;
// Get Cache Type Info
@ -164,15 +161,15 @@ void __clear_cache(void *start, void *end) {
const size_t dcache_line_size = 4 << ((ctr_el0 >> 16) & 15);
for (addr = xstart & ~(dcache_line_size - 1); addr < xend;
addr += dcache_line_size)
__asm __volatile("dc cvau, %0" :: "r"(addr));
__asm __volatile("dc cvau, %0" ::"r"(addr));
__asm __volatile("dsb ish");
const size_t icache_line_size = 4 << ((ctr_el0 >> 0) & 15);
for (addr = xstart & ~(icache_line_size - 1); addr < xend;
addr += icache_line_size)
__asm __volatile("ic ivau, %0" :: "r"(addr));
__asm __volatile("ic ivau, %0" ::"r"(addr));
__asm __volatile("isb sy");
#elif defined (__powerpc64__)
#elif defined(__powerpc64__)
const size_t line_size = 32;
const size_t len = (uintptr_t)end - (uintptr_t)start;
@ -188,11 +185,11 @@ void __clear_cache(void *start, void *end) {
__asm__ volatile("icbi 0, %0" : : "r"(line));
__asm__ volatile("isync");
#else
#if __APPLE__
#if __APPLE__
/* On Darwin, sys_icache_invalidate() provides this functionality */
sys_icache_invalidate(start, end-start);
#else
sys_icache_invalidate(start, end - start);
#else
compilerrt_abort();
#endif
#endif
#endif
}

View File

@ -16,8 +16,7 @@
/* Returns: the number of leading 0-bits */
#if !defined(__clang__) && \
((defined(__sparc__) && defined(__arch64__)) || \
defined(__mips64) || \
((defined(__sparc__) && defined(__arch64__)) || defined(__mips64) || \
(defined(__riscv) && __SIZEOF_POINTER__ >= 8))
/* On 64-bit architectures with neither a native clz instruction nor a native
* ctz instruction, gcc resolves __builtin_clz to __clzdi2 rather than
@ -28,9 +27,7 @@ extern si_int __clzsi2(si_int);
/* Precondition: a != 0 */
COMPILER_RT_ABI si_int
__clzdi2(di_int a)
{
COMPILER_RT_ABI si_int __clzdi2(di_int a) {
dwords x;
x.all = a;
const si_int f = -(x.s.high == 0);

View File

@ -17,9 +17,7 @@
/* Precondition: a != 0 */
COMPILER_RT_ABI si_int
__clzsi2(si_int a)
{
COMPILER_RT_ABI si_int __clzsi2(si_int a) {
su_int x = (su_int)a;
si_int t = ((x & 0xFFFF0000) == 0) << 4; /* if (x is small) t = 16 else 0 */
x >>= 16 - t; /* x = [0 - 0xFFFF] */
@ -37,7 +35,7 @@ __clzsi2(si_int a)
x >>= 2 - t; /* x = [0 - 3] */
r += t; /* r = [0 - 30] and is even */
/* return r + clz(x) */
/* switch (x)
/* switch (x)
* {
* case 0:
* return r + 2;

View File

@ -19,9 +19,7 @@
/* Precondition: a != 0 */
COMPILER_RT_ABI si_int
__clzti2(ti_int a)
{
COMPILER_RT_ABI si_int __clzti2(ti_int a) {
twords x;
x.all = a;
const di_int f = -(x.s.high == 0);

View File

@ -14,13 +14,11 @@
#include "int_lib.h"
/* Returns: if (a < b) returns 0
* if (a == b) returns 1
* if (a > b) returns 2
*/
* if (a == b) returns 1
* if (a > b) returns 2
*/
COMPILER_RT_ABI si_int
__cmpdi2(di_int a, di_int b)
{
COMPILER_RT_ABI si_int __cmpdi2(di_int a, di_int b) {
dwords x;
x.all = a;
dwords y;
@ -38,13 +36,10 @@ __cmpdi2(di_int a, di_int b)
#ifdef __ARM_EABI__
/* Returns: if (a < b) returns -1
* if (a == b) returns 0
* if (a > b) returns 1
*/
COMPILER_RT_ABI si_int
__aeabi_lcmp(di_int a, di_int b)
{
* if (a == b) returns 0
* if (a > b) returns 1
*/
COMPILER_RT_ABI si_int __aeabi_lcmp(di_int a, di_int b) {
return __cmpdi2(a, b) - 1;
}
#endif

View File

@ -20,9 +20,7 @@
* if (a > b) returns 2
*/
COMPILER_RT_ABI si_int
__cmpti2(ti_int a, ti_int b)
{
COMPILER_RT_ABI si_int __cmpti2(ti_int a, ti_int b) {
twords x;
x.all = a;
twords y;

View File

@ -39,15 +39,9 @@
#define DOUBLE_PRECISION
#include "fp_lib.h"
enum LE_RESULT {
LE_LESS = -1,
LE_EQUAL = 0,
LE_GREATER = 1,
LE_UNORDERED = 1
};
enum LE_RESULT { LE_LESS = -1, LE_EQUAL = 0, LE_GREATER = 1, LE_UNORDERED = 1 };
COMPILER_RT_ABI enum LE_RESULT
__ledf2(fp_t a, fp_t b) {
COMPILER_RT_ABI enum LE_RESULT __ledf2(fp_t a, fp_t b) {
const srep_t aInt = toRep(a);
const srep_t bInt = toRep(b);
@ -55,17 +49,22 @@ __ledf2(fp_t a, fp_t b) {
const rep_t bAbs = bInt & absMask;
// If either a or b is NaN, they are unordered.
if (aAbs > infRep || bAbs > infRep) return LE_UNORDERED;
if (aAbs > infRep || bAbs > infRep)
return LE_UNORDERED;
// If a and b are both zeros, they are equal.
if ((aAbs | bAbs) == 0) return LE_EQUAL;
if ((aAbs | bAbs) == 0)
return LE_EQUAL;
// If at least one of a and b is positive, we get the same result comparing
// a and b as signed integers as we would with a floating-point compare.
if ((aInt & bInt) >= 0) {
if (aInt < bInt) return LE_LESS;
else if (aInt == bInt) return LE_EQUAL;
else return LE_GREATER;
if (aInt < bInt)
return LE_LESS;
else if (aInt == bInt)
return LE_EQUAL;
else
return LE_GREATER;
}
// Otherwise, both are negative, so we need to flip the sense of the
@ -73,9 +72,12 @@ __ledf2(fp_t a, fp_t b) {
// complement integer representation; if integers are represented in a
// sign-magnitude representation, then this flip is incorrect).
else {
if (aInt > bInt) return LE_LESS;
else if (aInt == bInt) return LE_EQUAL;
else return LE_GREATER;
if (aInt > bInt)
return LE_LESS;
else if (aInt == bInt)
return LE_EQUAL;
else
return LE_GREATER;
}
}
@ -91,29 +93,35 @@ enum GE_RESULT {
GE_UNORDERED = -1 // Note: different from LE_UNORDERED
};
COMPILER_RT_ABI enum GE_RESULT
__gedf2(fp_t a, fp_t b) {
COMPILER_RT_ABI enum GE_RESULT __gedf2(fp_t a, fp_t b) {
const srep_t aInt = toRep(a);
const srep_t bInt = toRep(b);
const rep_t aAbs = aInt & absMask;
const rep_t bAbs = bInt & absMask;
if (aAbs > infRep || bAbs > infRep) return GE_UNORDERED;
if ((aAbs | bAbs) == 0) return GE_EQUAL;
if (aAbs > infRep || bAbs > infRep)
return GE_UNORDERED;
if ((aAbs | bAbs) == 0)
return GE_EQUAL;
if ((aInt & bInt) >= 0) {
if (aInt < bInt) return GE_LESS;
else if (aInt == bInt) return GE_EQUAL;
else return GE_GREATER;
if (aInt < bInt)
return GE_LESS;
else if (aInt == bInt)
return GE_EQUAL;
else
return GE_GREATER;
} else {
if (aInt > bInt) return GE_LESS;
else if (aInt == bInt) return GE_EQUAL;
else return GE_GREATER;
if (aInt > bInt)
return GE_LESS;
else if (aInt == bInt)
return GE_EQUAL;
else
return GE_GREATER;
}
}
COMPILER_RT_ABI int
__unorddf2(fp_t a, fp_t b) {
COMPILER_RT_ABI int __unorddf2(fp_t a, fp_t b) {
const rep_t aAbs = toRep(a) & absMask;
const rep_t bAbs = toRep(b) & absMask;
return aAbs > infRep || bAbs > infRep;
@ -121,31 +129,17 @@ __unorddf2(fp_t a, fp_t b) {
// The following are alternative names for the preceding routines.
COMPILER_RT_ABI enum LE_RESULT
__eqdf2(fp_t a, fp_t b) {
return __ledf2(a, b);
}
COMPILER_RT_ABI enum LE_RESULT __eqdf2(fp_t a, fp_t b) { return __ledf2(a, b); }
COMPILER_RT_ABI enum LE_RESULT
__ltdf2(fp_t a, fp_t b) {
return __ledf2(a, b);
}
COMPILER_RT_ABI enum LE_RESULT __ltdf2(fp_t a, fp_t b) { return __ledf2(a, b); }
COMPILER_RT_ABI enum LE_RESULT
__nedf2(fp_t a, fp_t b) {
return __ledf2(a, b);
}
COMPILER_RT_ABI enum LE_RESULT __nedf2(fp_t a, fp_t b) { return __ledf2(a, b); }
COMPILER_RT_ABI enum GE_RESULT
__gtdf2(fp_t a, fp_t b) {
return __gedf2(a, b);
}
COMPILER_RT_ABI enum GE_RESULT __gtdf2(fp_t a, fp_t b) { return __gedf2(a, b); }
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI int __aeabi_dcmpun(fp_t a, fp_t b) {
return __unorddf2(a, b);
}
AEABI_RTABI int __aeabi_dcmpun(fp_t a, fp_t b) { return __unorddf2(a, b); }
#else
AEABI_RTABI int __aeabi_dcmpun(fp_t a, fp_t b) COMPILER_RT_ALIAS(__unorddf2);
#endif

View File

@ -39,15 +39,9 @@
#define SINGLE_PRECISION
#include "fp_lib.h"
enum LE_RESULT {
LE_LESS = -1,
LE_EQUAL = 0,
LE_GREATER = 1,
LE_UNORDERED = 1
};
enum LE_RESULT { LE_LESS = -1, LE_EQUAL = 0, LE_GREATER = 1, LE_UNORDERED = 1 };
COMPILER_RT_ABI enum LE_RESULT
__lesf2(fp_t a, fp_t b) {
COMPILER_RT_ABI enum LE_RESULT __lesf2(fp_t a, fp_t b) {
const srep_t aInt = toRep(a);
const srep_t bInt = toRep(b);
@ -55,17 +49,22 @@ __lesf2(fp_t a, fp_t b) {
const rep_t bAbs = bInt & absMask;
// If either a or b is NaN, they are unordered.
if (aAbs > infRep || bAbs > infRep) return LE_UNORDERED;
if (aAbs > infRep || bAbs > infRep)
return LE_UNORDERED;
// If a and b are both zeros, they are equal.
if ((aAbs | bAbs) == 0) return LE_EQUAL;
if ((aAbs | bAbs) == 0)
return LE_EQUAL;
// If at least one of a and b is positive, we get the same result comparing
// a and b as signed integers as we would with a fp_ting-point compare.
if ((aInt & bInt) >= 0) {
if (aInt < bInt) return LE_LESS;
else if (aInt == bInt) return LE_EQUAL;
else return LE_GREATER;
if (aInt < bInt)
return LE_LESS;
else if (aInt == bInt)
return LE_EQUAL;
else
return LE_GREATER;
}
// Otherwise, both are negative, so we need to flip the sense of the
@ -73,9 +72,12 @@ __lesf2(fp_t a, fp_t b) {
// complement integer representation; if integers are represented in a
// sign-magnitude representation, then this flip is incorrect).
else {
if (aInt > bInt) return LE_LESS;
else if (aInt == bInt) return LE_EQUAL;
else return LE_GREATER;
if (aInt > bInt)
return LE_LESS;
else if (aInt == bInt)
return LE_EQUAL;
else
return LE_GREATER;
}
}
@ -91,29 +93,35 @@ enum GE_RESULT {
GE_UNORDERED = -1 // Note: different from LE_UNORDERED
};
COMPILER_RT_ABI enum GE_RESULT
__gesf2(fp_t a, fp_t b) {
COMPILER_RT_ABI enum GE_RESULT __gesf2(fp_t a, fp_t b) {
const srep_t aInt = toRep(a);
const srep_t bInt = toRep(b);
const rep_t aAbs = aInt & absMask;
const rep_t bAbs = bInt & absMask;
if (aAbs > infRep || bAbs > infRep) return GE_UNORDERED;
if ((aAbs | bAbs) == 0) return GE_EQUAL;
if (aAbs > infRep || bAbs > infRep)
return GE_UNORDERED;
if ((aAbs | bAbs) == 0)
return GE_EQUAL;
if ((aInt & bInt) >= 0) {
if (aInt < bInt) return GE_LESS;
else if (aInt == bInt) return GE_EQUAL;
else return GE_GREATER;
if (aInt < bInt)
return GE_LESS;
else if (aInt == bInt)
return GE_EQUAL;
else
return GE_GREATER;
} else {
if (aInt > bInt) return GE_LESS;
else if (aInt == bInt) return GE_EQUAL;
else return GE_GREATER;
if (aInt > bInt)
return GE_LESS;
else if (aInt == bInt)
return GE_EQUAL;
else
return GE_GREATER;
}
}
COMPILER_RT_ABI int
__unordsf2(fp_t a, fp_t b) {
COMPILER_RT_ABI int __unordsf2(fp_t a, fp_t b) {
const rep_t aAbs = toRep(a) & absMask;
const rep_t bAbs = toRep(b) & absMask;
return aAbs > infRep || bAbs > infRep;
@ -121,31 +129,17 @@ __unordsf2(fp_t a, fp_t b) {
// The following are alternative names for the preceding routines.
COMPILER_RT_ABI enum LE_RESULT
__eqsf2(fp_t a, fp_t b) {
return __lesf2(a, b);
}
COMPILER_RT_ABI enum LE_RESULT __eqsf2(fp_t a, fp_t b) { return __lesf2(a, b); }
COMPILER_RT_ABI enum LE_RESULT
__ltsf2(fp_t a, fp_t b) {
return __lesf2(a, b);
}
COMPILER_RT_ABI enum LE_RESULT __ltsf2(fp_t a, fp_t b) { return __lesf2(a, b); }
COMPILER_RT_ABI enum LE_RESULT
__nesf2(fp_t a, fp_t b) {
return __lesf2(a, b);
}
COMPILER_RT_ABI enum LE_RESULT __nesf2(fp_t a, fp_t b) { return __lesf2(a, b); }
COMPILER_RT_ABI enum GE_RESULT
__gtsf2(fp_t a, fp_t b) {
return __gesf2(a, b);
}
COMPILER_RT_ABI enum GE_RESULT __gtsf2(fp_t a, fp_t b) { return __gesf2(a, b); }
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI int __aeabi_fcmpun(fp_t a, fp_t b) {
return __unordsf2(a, b);
}
AEABI_RTABI int __aeabi_fcmpun(fp_t a, fp_t b) { return __unordsf2(a, b); }
#else
AEABI_RTABI int __aeabi_fcmpun(fp_t a, fp_t b) COMPILER_RT_ALIAS(__unordsf2);
#endif

View File

@ -40,12 +40,7 @@
#include "fp_lib.h"
#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
enum LE_RESULT {
LE_LESS = -1,
LE_EQUAL = 0,
LE_GREATER = 1,
LE_UNORDERED = 1
};
enum LE_RESULT { LE_LESS = -1, LE_EQUAL = 0, LE_GREATER = 1, LE_UNORDERED = 1 };
COMPILER_RT_ABI enum LE_RESULT __letf2(fp_t a, fp_t b) {
@ -55,26 +50,33 @@ COMPILER_RT_ABI enum LE_RESULT __letf2(fp_t a, fp_t b) {
const rep_t bAbs = bInt & absMask;
// If either a or b is NaN, they are unordered.
if (aAbs > infRep || bAbs > infRep) return LE_UNORDERED;
if (aAbs > infRep || bAbs > infRep)
return LE_UNORDERED;
// If a and b are both zeros, they are equal.
if ((aAbs | bAbs) == 0) return LE_EQUAL;
if ((aAbs | bAbs) == 0)
return LE_EQUAL;
// If at least one of a and b is positive, we get the same result comparing
// a and b as signed integers as we would with a floating-point compare.
if ((aInt & bInt) >= 0) {
if (aInt < bInt) return LE_LESS;
else if (aInt == bInt) return LE_EQUAL;
else return LE_GREATER;
}
else {
if (aInt < bInt)
return LE_LESS;
else if (aInt == bInt)
return LE_EQUAL;
else
return LE_GREATER;
} else {
// Otherwise, both are negative, so we need to flip the sense of the
// comparison to get the correct result. (This assumes a twos- or ones-
// complement integer representation; if integers are represented in a
// sign-magnitude representation, then this flip is incorrect).
if (aInt > bInt) return LE_LESS;
else if (aInt == bInt) return LE_EQUAL;
else return LE_GREATER;
if (aInt > bInt)
return LE_LESS;
else if (aInt == bInt)
return LE_EQUAL;
else
return LE_GREATER;
}
}
@ -97,16 +99,24 @@ COMPILER_RT_ABI enum GE_RESULT __getf2(fp_t a, fp_t b) {
const rep_t aAbs = aInt & absMask;
const rep_t bAbs = bInt & absMask;
if (aAbs > infRep || bAbs > infRep) return GE_UNORDERED;
if ((aAbs | bAbs) == 0) return GE_EQUAL;
if (aAbs > infRep || bAbs > infRep)
return GE_UNORDERED;
if ((aAbs | bAbs) == 0)
return GE_EQUAL;
if ((aInt & bInt) >= 0) {
if (aInt < bInt) return GE_LESS;
else if (aInt == bInt) return GE_EQUAL;
else return GE_GREATER;
if (aInt < bInt)
return GE_LESS;
else if (aInt == bInt)
return GE_EQUAL;
else
return GE_GREATER;
} else {
if (aInt > bInt) return GE_LESS;
else if (aInt == bInt) return GE_EQUAL;
else return GE_GREATER;
if (aInt > bInt)
return GE_LESS;
else if (aInt == bInt)
return GE_EQUAL;
else
return GE_GREATER;
}
}
@ -118,20 +128,12 @@ COMPILER_RT_ABI int __unordtf2(fp_t a, fp_t b) {
// The following are alternative names for the preceding routines.
COMPILER_RT_ABI enum LE_RESULT __eqtf2(fp_t a, fp_t b) {
return __letf2(a, b);
}
COMPILER_RT_ABI enum LE_RESULT __eqtf2(fp_t a, fp_t b) { return __letf2(a, b); }
COMPILER_RT_ABI enum LE_RESULT __lttf2(fp_t a, fp_t b) {
return __letf2(a, b);
}
COMPILER_RT_ABI enum LE_RESULT __lttf2(fp_t a, fp_t b) { return __letf2(a, b); }
COMPILER_RT_ABI enum LE_RESULT __netf2(fp_t a, fp_t b) {
return __letf2(a, b);
}
COMPILER_RT_ABI enum LE_RESULT __netf2(fp_t a, fp_t b) { return __letf2(a, b); }
COMPILER_RT_ABI enum GE_RESULT __gttf2(fp_t a, fp_t b) {
return __getf2(a, b);
}
COMPILER_RT_ABI enum GE_RESULT __gttf2(fp_t a, fp_t b) { return __getf2(a, b); }
#endif

View File

@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
#if (defined(__i386__) || defined(_M_IX86) || \
defined(__x86_64__) || defined(_M_X64)) && \
#if (defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || \
defined(_M_X64)) && \
(defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER))
#include <assert.h>
@ -267,9 +267,9 @@ static void detectX86FamilyModel(unsigned EAX, unsigned *Family,
}
}
static void
getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
unsigned Brand_id, unsigned Features,
static void getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
unsigned Brand_id,
unsigned Features,
unsigned Features2, unsigned *Type,
unsigned *Subtype) {
if (Brand_id != 0)
@ -618,8 +618,7 @@ unsigned int __cpu_features2;
the priority set. However, it still runs after ifunc initializers and
needs to be called explicitly there. */
int CONSTRUCTOR_ATTRIBUTE
__cpu_indicator_init(void) {
int CONSTRUCTOR_ATTRIBUTE __cpu_indicator_init(void) {
unsigned EAX, EBX, ECX, EDX;
unsigned MaxLeaf = 5;
unsigned Vendor;
@ -651,8 +650,7 @@ __cpu_indicator_init(void) {
if (Vendor == SIG_INTEL) {
/* Get CPU type. */
getIntelProcessorTypeAndSubtype(Family, Model, Brand_id, Features,
Features2,
&(__cpu_model.__cpu_type),
Features2, &(__cpu_model.__cpu_type),
&(__cpu_model.__cpu_subtype));
__cpu_model.__cpu_vendor = VENDOR_INTEL;
} else if (Vendor == SIG_AMD) {

View File

@ -16,8 +16,7 @@
/* Returns: the number of trailing 0-bits */
#if !defined(__clang__) && \
((defined(__sparc__) && defined(__arch64__)) || \
defined(__mips64) || \
((defined(__sparc__) && defined(__arch64__)) || defined(__mips64) || \
(defined(__riscv) && __SIZEOF_POINTER__ >= 8))
/* On 64-bit architectures with neither a native clz instruction nor a native
* ctz instruction, gcc resolves __builtin_ctz to __ctzdi2 rather than
@ -28,9 +27,7 @@ extern si_int __ctzsi2(si_int);
/* Precondition: a != 0 */
COMPILER_RT_ABI si_int
__ctzdi2(di_int a)
{
COMPILER_RT_ABI si_int __ctzdi2(di_int a) {
dwords x;
x.all = a;
const si_int f = -(x.s.low == 0);

View File

@ -17,11 +17,10 @@
/* Precondition: a != 0 */
COMPILER_RT_ABI si_int
__ctzsi2(si_int a)
{
COMPILER_RT_ABI si_int __ctzsi2(si_int a) {
su_int x = (su_int)a;
si_int t = ((x & 0x0000FFFF) == 0) << 4; /* if (x has no small bits) t = 16 else 0 */
si_int t = ((x & 0x0000FFFF) == 0)
<< 4; /* if (x has no small bits) t = 16 else 0 */
x >>= t; /* x = [0 - 0xFFFF] + higher garbage bits */
su_int r = t; /* r = [0, 16] */
/* return r + ctz(x) */
@ -39,7 +38,7 @@ __ctzsi2(si_int a)
r += t; /* r = [0 - 30] and is even */
/* return r + ctz(x) */
/* The branch-less return statement below is equivalent
/* The branch-less return statement below is equivalent
* to the following switch statement:
* switch (x)
* {

View File

@ -19,9 +19,7 @@
/* Precondition: a != 0 */
COMPILER_RT_ABI si_int
__ctzti2(ti_int a)
{
COMPILER_RT_ABI si_int __ctzti2(ti_int a) {
twords x;
x.all = a;
const di_int f = -(x.s.low == 0);

View File

@ -18,13 +18,11 @@
/* Returns: the quotient of (a + ib) / (c + id) */
COMPILER_RT_ABI Dcomplex
__divdc3(double __a, double __b, double __c, double __d)
{
COMPILER_RT_ABI Dcomplex __divdc3(double __a, double __b, double __c,
double __d) {
int __ilogbw = 0;
double __logbw = __compiler_rt_logb(crt_fmax(crt_fabs(__c), crt_fabs(__d)));
if (crt_isfinite(__logbw))
{
if (crt_isfinite(__logbw)) {
__ilogbw = (int)__logbw;
__c = crt_scalbn(__c, -__ilogbw);
__d = crt_scalbn(__d, -__ilogbw);
@ -32,25 +30,20 @@ __divdc3(double __a, double __b, double __c, double __d)
double __denom = __c * __c + __d * __d;
Dcomplex z;
COMPLEX_REAL(z) = crt_scalbn((__a * __c + __b * __d) / __denom, -__ilogbw);
COMPLEX_IMAGINARY(z) = crt_scalbn((__b * __c - __a * __d) / __denom, -__ilogbw);
if (crt_isnan(COMPLEX_REAL(z)) && crt_isnan(COMPLEX_IMAGINARY(z)))
{
if ((__denom == 0.0) && (!crt_isnan(__a) || !crt_isnan(__b)))
{
COMPLEX_IMAGINARY(z) =
crt_scalbn((__b * __c - __a * __d) / __denom, -__ilogbw);
if (crt_isnan(COMPLEX_REAL(z)) && crt_isnan(COMPLEX_IMAGINARY(z))) {
if ((__denom == 0.0) && (!crt_isnan(__a) || !crt_isnan(__b))) {
COMPLEX_REAL(z) = crt_copysign(CRT_INFINITY, __c) * __a;
COMPLEX_IMAGINARY(z) = crt_copysign(CRT_INFINITY, __c) * __b;
}
else if ((crt_isinf(__a) || crt_isinf(__b)) &&
crt_isfinite(__c) && crt_isfinite(__d))
{
} else if ((crt_isinf(__a) || crt_isinf(__b)) && crt_isfinite(__c) &&
crt_isfinite(__d)) {
__a = crt_copysign(crt_isinf(__a) ? 1.0 : 0.0, __a);
__b = crt_copysign(crt_isinf(__b) ? 1.0 : 0.0, __b);
COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c + __b * __d);
COMPLEX_IMAGINARY(z) = CRT_INFINITY * (__b * __c - __a * __d);
}
else if (crt_isinf(__logbw) && __logbw > 0.0 &&
crt_isfinite(__a) && crt_isfinite(__b))
{
} else if (crt_isinf(__logbw) && __logbw > 0.0 && crt_isfinite(__a) &&
crt_isfinite(__b)) {
__c = crt_copysign(crt_isinf(__c) ? 1.0 : 0.0, __c);
__d = crt_copysign(crt_isinf(__d) ? 1.0 : 0.0, __d);
COMPLEX_REAL(z) = 0.0 * (__a * __c + __b * __d);

View File

@ -18,8 +18,7 @@
#define DOUBLE_PRECISION
#include "fp_lib.h"
COMPILER_RT_ABI fp_t
__divdf3(fp_t a, fp_t b) {
COMPILER_RT_ABI fp_t __divdf3(fp_t a, fp_t b) {
const unsigned int aExponent = toRep(a) >> significandBits & maxExponent;
const unsigned int bExponent = toRep(b) >> significandBits & maxExponent;
@ -30,40 +29,51 @@ __divdf3(fp_t a, fp_t b) {
int scale = 0;
// Detect if a or b is zero, denormal, infinity, or NaN.
if (aExponent-1U >= maxExponent-1U || bExponent-1U >= maxExponent-1U) {
if (aExponent - 1U >= maxExponent - 1U ||
bExponent - 1U >= maxExponent - 1U) {
const rep_t aAbs = toRep(a) & absMask;
const rep_t bAbs = toRep(b) & absMask;
// NaN / anything = qNaN
if (aAbs > infRep) return fromRep(toRep(a) | quietBit);
if (aAbs > infRep)
return fromRep(toRep(a) | quietBit);
// anything / NaN = qNaN
if (bAbs > infRep) return fromRep(toRep(b) | quietBit);
if (bAbs > infRep)
return fromRep(toRep(b) | quietBit);
if (aAbs == infRep) {
// infinity / infinity = NaN
if (bAbs == infRep) return fromRep(qnanRep);
if (bAbs == infRep)
return fromRep(qnanRep);
// infinity / anything else = +/- infinity
else return fromRep(aAbs | quotientSign);
else
return fromRep(aAbs | quotientSign);
}
// anything else / infinity = +/- 0
if (bAbs == infRep) return fromRep(quotientSign);
if (bAbs == infRep)
return fromRep(quotientSign);
if (!aAbs) {
// zero / zero = NaN
if (!bAbs) return fromRep(qnanRep);
if (!bAbs)
return fromRep(qnanRep);
// zero / anything else = +/- zero
else return fromRep(quotientSign);
else
return fromRep(quotientSign);
}
// anything else / zero = +/- infinity
if (!bAbs) return fromRep(infRep | quotientSign);
if (!bAbs)
return fromRep(infRep | quotientSign);
// one or both of a or b is denormal, the other (if applicable) is a
// normal number. Renormalize one or both of a and b, and set scale to
// include the necessary exponent adjustment.
if (aAbs < implicitBit) scale += normalize(&aSignificand);
if (bAbs < implicitBit) scale -= normalize(&bSignificand);
if (aAbs < implicitBit)
scale += normalize(&aSignificand);
if (bAbs < implicitBit)
scale -= normalize(&bSignificand);
}
// Or in the implicit significand bit. (If we fell through from the
@ -105,10 +115,10 @@ __divdf3(fp_t a, fp_t b) {
// The last iteration needs to happen with extra precision.
const uint32_t q63blo = bSignificand << 11;
uint64_t correction, reciprocal;
correction = -((uint64_t)recip32*q31b + ((uint64_t)recip32*q63blo >> 32));
correction = -((uint64_t)recip32 * q31b + ((uint64_t)recip32 * q63blo >> 32));
uint32_t cHi = correction >> 32;
uint32_t cLo = correction;
reciprocal = (uint64_t)recip32*cHi + ((uint64_t)recip32*cLo >> 32);
reciprocal = (uint64_t)recip32 * cHi + ((uint64_t)recip32 * cLo >> 32);
// We already adjusted the 32-bit estimate, now we need to adjust the final
// 64-bit reciprocal estimate downward to ensure that it is strictly smaller
@ -195,9 +205,7 @@ __divdf3(fp_t a, fp_t b) {
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI fp_t __aeabi_ddiv(fp_t a, fp_t b) {
return __divdf3(a, b);
}
AEABI_RTABI fp_t __aeabi_ddiv(fp_t a, fp_t b) { return __divdf3(a, b); }
#else
AEABI_RTABI fp_t __aeabi_ddiv(fp_t a, fp_t b) COMPILER_RT_ALIAS(__divdf3);
#endif

View File

@ -15,14 +15,13 @@
/* Returns: a / b */
COMPILER_RT_ABI di_int
__divdi3(di_int a, di_int b)
{
COMPILER_RT_ABI di_int __divdi3(di_int a, di_int b) {
const int bits_in_dword_m1 = (int)(sizeof(di_int) * CHAR_BIT) - 1;
di_int s_a = a >> bits_in_dword_m1; /* s_a = a < 0 ? -1 : 0 */
di_int s_b = b >> bits_in_dword_m1; /* s_b = b < 0 ? -1 : 0 */
a = (a ^ s_a) - s_a; /* negate if s_a == -1 */
b = (b ^ s_b) - s_b; /* negate if s_b == -1 */
s_a ^= s_b; /*sign of quotient */
return (__udivmoddi4(a, b, (du_int*)0) ^ s_a) - s_a; /* negate if s_a == -1 */
return (__udivmoddi4(a, b, (du_int *)0) ^ s_a) -
s_a; /* negate if s_a == -1 */
}

View File

@ -15,10 +15,8 @@
/* Returns: a / b, *rem = a % b */
COMPILER_RT_ABI di_int
__divmoddi4(di_int a, di_int b, di_int* rem)
{
di_int d = __divdi3(a,b);
*rem = a - (d*b);
COMPILER_RT_ABI di_int __divmoddi4(di_int a, di_int b, di_int *rem) {
di_int d = __divdi3(a, b);
*rem = a - (d * b);
return d;
}

View File

@ -15,12 +15,8 @@
/* Returns: a / b, *rem = a % b */
COMPILER_RT_ABI si_int
__divmodsi4(si_int a, si_int b, si_int* rem)
{
si_int d = __divsi3(a,b);
*rem = a - (d*b);
COMPILER_RT_ABI si_int __divmodsi4(si_int a, si_int b, si_int *rem) {
si_int d = __divsi3(a, b);
*rem = a - (d * b);
return d;
}

View File

@ -18,14 +18,11 @@
/* Returns: the quotient of (a + ib) / (c + id) */
COMPILER_RT_ABI Fcomplex
__divsc3(float __a, float __b, float __c, float __d)
{
COMPILER_RT_ABI Fcomplex __divsc3(float __a, float __b, float __c, float __d) {
int __ilogbw = 0;
float __logbw =
__compiler_rt_logbf(crt_fmaxf(crt_fabsf(__c), crt_fabsf(__d)));
if (crt_isfinite(__logbw))
{
if (crt_isfinite(__logbw)) {
__ilogbw = (int)__logbw;
__c = crt_scalbnf(__c, -__ilogbw);
__d = crt_scalbnf(__d, -__ilogbw);
@ -33,25 +30,20 @@ __divsc3(float __a, float __b, float __c, float __d)
float __denom = __c * __c + __d * __d;
Fcomplex z;
COMPLEX_REAL(z) = crt_scalbnf((__a * __c + __b * __d) / __denom, -__ilogbw);
COMPLEX_IMAGINARY(z) = crt_scalbnf((__b * __c - __a * __d) / __denom, -__ilogbw);
if (crt_isnan(COMPLEX_REAL(z)) && crt_isnan(COMPLEX_IMAGINARY(z)))
{
if ((__denom == 0) && (!crt_isnan(__a) || !crt_isnan(__b)))
{
COMPLEX_IMAGINARY(z) =
crt_scalbnf((__b * __c - __a * __d) / __denom, -__ilogbw);
if (crt_isnan(COMPLEX_REAL(z)) && crt_isnan(COMPLEX_IMAGINARY(z))) {
if ((__denom == 0) && (!crt_isnan(__a) || !crt_isnan(__b))) {
COMPLEX_REAL(z) = crt_copysignf(CRT_INFINITY, __c) * __a;
COMPLEX_IMAGINARY(z) = crt_copysignf(CRT_INFINITY, __c) * __b;
}
else if ((crt_isinf(__a) || crt_isinf(__b)) &&
crt_isfinite(__c) && crt_isfinite(__d))
{
} else if ((crt_isinf(__a) || crt_isinf(__b)) && crt_isfinite(__c) &&
crt_isfinite(__d)) {
__a = crt_copysignf(crt_isinf(__a) ? 1 : 0, __a);
__b = crt_copysignf(crt_isinf(__b) ? 1 : 0, __b);
COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c + __b * __d);
COMPLEX_IMAGINARY(z) = CRT_INFINITY * (__b * __c - __a * __d);
}
else if (crt_isinf(__logbw) && __logbw > 0 &&
crt_isfinite(__a) && crt_isfinite(__b))
{
} else if (crt_isinf(__logbw) && __logbw > 0 && crt_isfinite(__a) &&
crt_isfinite(__b)) {
__c = crt_copysignf(crt_isinf(__c) ? 1 : 0, __c);
__d = crt_copysignf(crt_isinf(__d) ? 1 : 0, __d);
COMPLEX_REAL(z) = 0 * (__a * __c + __b * __d);

View File

@ -18,8 +18,7 @@
#define SINGLE_PRECISION
#include "fp_lib.h"
COMPILER_RT_ABI fp_t
__divsf3(fp_t a, fp_t b) {
COMPILER_RT_ABI fp_t __divsf3(fp_t a, fp_t b) {
const unsigned int aExponent = toRep(a) >> significandBits & maxExponent;
const unsigned int bExponent = toRep(b) >> significandBits & maxExponent;
@ -30,40 +29,51 @@ __divsf3(fp_t a, fp_t b) {
int scale = 0;
// Detect if a or b is zero, denormal, infinity, or NaN.
if (aExponent-1U >= maxExponent-1U || bExponent-1U >= maxExponent-1U) {
if (aExponent - 1U >= maxExponent - 1U ||
bExponent - 1U >= maxExponent - 1U) {
const rep_t aAbs = toRep(a) & absMask;
const rep_t bAbs = toRep(b) & absMask;
// NaN / anything = qNaN
if (aAbs > infRep) return fromRep(toRep(a) | quietBit);
if (aAbs > infRep)
return fromRep(toRep(a) | quietBit);
// anything / NaN = qNaN
if (bAbs > infRep) return fromRep(toRep(b) | quietBit);
if (bAbs > infRep)
return fromRep(toRep(b) | quietBit);
if (aAbs == infRep) {
// infinity / infinity = NaN
if (bAbs == infRep) return fromRep(qnanRep);
if (bAbs == infRep)
return fromRep(qnanRep);
// infinity / anything else = +/- infinity
else return fromRep(aAbs | quotientSign);
else
return fromRep(aAbs | quotientSign);
}
// anything else / infinity = +/- 0
if (bAbs == infRep) return fromRep(quotientSign);
if (bAbs == infRep)
return fromRep(quotientSign);
if (!aAbs) {
// zero / zero = NaN
if (!bAbs) return fromRep(qnanRep);
if (!bAbs)
return fromRep(qnanRep);
// zero / anything else = +/- zero
else return fromRep(quotientSign);
else
return fromRep(quotientSign);
}
// anything else / zero = +/- infinity
if (!bAbs) return fromRep(infRep | quotientSign);
if (!bAbs)
return fromRep(infRep | quotientSign);
// one or both of a or b is denormal, the other (if applicable) is a
// normal number. Renormalize one or both of a and b, and set scale to
// include the necessary exponent adjustment.
if (aAbs < implicitBit) scale += normalize(&aSignificand);
if (bAbs < implicitBit) scale -= normalize(&bSignificand);
if (aAbs < implicitBit)
scale += normalize(&aSignificand);
if (bAbs < implicitBit)
scale -= normalize(&bSignificand);
}
// Or in the implicit significand bit. (If we fell through from the
@ -115,7 +125,7 @@ __divsf3(fp_t a, fp_t b) {
// is the error in the reciprocal of b scaled by the maximum
// possible value of a. As a consequence of this error bound,
// either q or nextafter(q) is the correctly rounded
rep_t quotient = (uint64_t)reciprocal*(aSignificand << 1) >> 32;
rep_t quotient = (uint64_t)reciprocal * (aSignificand << 1) >> 32;
// Two cases: quotient is in [0.5, 1.0) or quotient is in [1.0, 2.0).
// In either case, we are going to compute a residual of the form
@ -179,9 +189,7 @@ __divsf3(fp_t a, fp_t b) {
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI fp_t __aeabi_fdiv(fp_t a, fp_t b) {
return __divsf3(a, b);
}
AEABI_RTABI fp_t __aeabi_fdiv(fp_t a, fp_t b) { return __divsf3(a, b); }
#else
AEABI_RTABI fp_t __aeabi_fdiv(fp_t a, fp_t b) COMPILER_RT_ALIAS(__divsf3);
#endif

View File

@ -15,9 +15,7 @@
/* Returns: a / b */
COMPILER_RT_ABI si_int
__divsi3(si_int a, si_int b)
{
COMPILER_RT_ABI si_int __divsi3(si_int a, si_int b) {
const int bits_in_word_m1 = (int)(sizeof(si_int) * CHAR_BIT) - 1;
si_int s_a = a >> bits_in_word_m1; /* s_a = a < 0 ? -1 : 0 */
si_int s_b = b >> bits_in_word_m1; /* s_b = b < 0 ? -1 : 0 */
@ -30,7 +28,7 @@ __divsi3(si_int a, si_int b)
* On CPUs with unsigned hardware division support,
* this uses the unsigned division instruction.
*/
return ((su_int)a/(su_int)b ^ s_a) - s_a; /* negate if s_a == -1 */
return ((su_int)a / (su_int)b ^ s_a) - s_a; /* negate if s_a == -1 */
}
#if defined(__ARM_EABI__)

View File

@ -18,14 +18,12 @@
/* Returns: the quotient of (a + ib) / (c + id) */
COMPILER_RT_ABI Lcomplex
__divtc3(long double __a, long double __b, long double __c, long double __d)
{
COMPILER_RT_ABI Lcomplex __divtc3(long double __a, long double __b,
long double __c, long double __d) {
int __ilogbw = 0;
long double __logbw =
__compiler_rt_logbl(crt_fmaxl(crt_fabsl(__c), crt_fabsl(__d)));
if (crt_isfinite(__logbw))
{
if (crt_isfinite(__logbw)) {
__ilogbw = (int)__logbw;
__c = crt_scalbnl(__c, -__ilogbw);
__d = crt_scalbnl(__d, -__ilogbw);
@ -33,25 +31,20 @@ __divtc3(long double __a, long double __b, long double __c, long double __d)
long double __denom = __c * __c + __d * __d;
Lcomplex z;
COMPLEX_REAL(z) = crt_scalbnl((__a * __c + __b * __d) / __denom, -__ilogbw);
COMPLEX_IMAGINARY(z) = crt_scalbnl((__b * __c - __a * __d) / __denom, -__ilogbw);
if (crt_isnan(COMPLEX_REAL(z)) && crt_isnan(COMPLEX_IMAGINARY(z)))
{
if ((__denom == 0.0) && (!crt_isnan(__a) || !crt_isnan(__b)))
{
COMPLEX_IMAGINARY(z) =
crt_scalbnl((__b * __c - __a * __d) / __denom, -__ilogbw);
if (crt_isnan(COMPLEX_REAL(z)) && crt_isnan(COMPLEX_IMAGINARY(z))) {
if ((__denom == 0.0) && (!crt_isnan(__a) || !crt_isnan(__b))) {
COMPLEX_REAL(z) = crt_copysignl(CRT_INFINITY, __c) * __a;
COMPLEX_IMAGINARY(z) = crt_copysignl(CRT_INFINITY, __c) * __b;
}
else if ((crt_isinf(__a) || crt_isinf(__b)) &&
crt_isfinite(__c) && crt_isfinite(__d))
{
} else if ((crt_isinf(__a) || crt_isinf(__b)) && crt_isfinite(__c) &&
crt_isfinite(__d)) {
__a = crt_copysignl(crt_isinf(__a) ? 1.0 : 0.0, __a);
__b = crt_copysignl(crt_isinf(__b) ? 1.0 : 0.0, __b);
COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c + __b * __d);
COMPLEX_IMAGINARY(z) = CRT_INFINITY * (__b * __c - __a * __d);
}
else if (crt_isinf(__logbw) && __logbw > 0.0 &&
crt_isfinite(__a) && crt_isfinite(__b))
{
} else if (crt_isinf(__logbw) && __logbw > 0.0 && crt_isfinite(__a) &&
crt_isfinite(__b)) {
__c = crt_copysignl(crt_isinf(__c) ? 1.0 : 0.0, __c);
__d = crt_copysignl(crt_isinf(__d) ? 1.0 : 0.0, __d);
COMPLEX_REAL(z) = 0.0 * (__a * __c + __b * __d);

View File

@ -30,40 +30,51 @@ COMPILER_RT_ABI fp_t __divtf3(fp_t a, fp_t b) {
int scale = 0;
// Detect if a or b is zero, denormal, infinity, or NaN.
if (aExponent-1U >= maxExponent-1U || bExponent-1U >= maxExponent-1U) {
if (aExponent - 1U >= maxExponent - 1U ||
bExponent - 1U >= maxExponent - 1U) {
const rep_t aAbs = toRep(a) & absMask;
const rep_t bAbs = toRep(b) & absMask;
// NaN / anything = qNaN
if (aAbs > infRep) return fromRep(toRep(a) | quietBit);
if (aAbs > infRep)
return fromRep(toRep(a) | quietBit);
// anything / NaN = qNaN
if (bAbs > infRep) return fromRep(toRep(b) | quietBit);
if (bAbs > infRep)
return fromRep(toRep(b) | quietBit);
if (aAbs == infRep) {
// infinity / infinity = NaN
if (bAbs == infRep) return fromRep(qnanRep);
if (bAbs == infRep)
return fromRep(qnanRep);
// infinity / anything else = +/- infinity
else return fromRep(aAbs | quotientSign);
else
return fromRep(aAbs | quotientSign);
}
// anything else / infinity = +/- 0
if (bAbs == infRep) return fromRep(quotientSign);
if (bAbs == infRep)
return fromRep(quotientSign);
if (!aAbs) {
// zero / zero = NaN
if (!bAbs) return fromRep(qnanRep);
if (!bAbs)
return fromRep(qnanRep);
// zero / anything else = +/- zero
else return fromRep(quotientSign);
else
return fromRep(quotientSign);
}
// anything else / zero = +/- infinity
if (!bAbs) return fromRep(infRep | quotientSign);
if (!bAbs)
return fromRep(infRep | quotientSign);
// one or both of a or b is denormal, the other (if applicable) is a
// normal number. Renormalize one or both of a and b, and set scale to
// include the necessary exponent adjustment.
if (aAbs < implicitBit) scale += normalize(&aSignificand);
if (bAbs < implicitBit) scale -= normalize(&bSignificand);
if (aAbs < implicitBit)
scale += normalize(&aSignificand);
if (bAbs < implicitBit)
scale -= normalize(&bSignificand);
}
// Or in the implicit significand bit. (If we fell through from the
@ -179,8 +190,7 @@ COMPILER_RT_ABI fp_t __divtf3(fp_t a, fp_t b) {
if (writtenExponent >= maxExponent) {
// If we have overflowed the exponent, return infinity.
return fromRep(infRep | quotientSign);
}
else if (writtenExponent < 1) {
} else if (writtenExponent < 1) {
if (writtenExponent == 0) {
// Check whether the rounded result is normal.
const bool round = (residual << 1) > bSignificand;
@ -196,8 +206,7 @@ COMPILER_RT_ABI fp_t __divtf3(fp_t a, fp_t b) {
// Flush denormals to zero. In the future, it would be nice to add
// code to round them correctly.
return fromRep(quotientSign);
}
else {
} else {
const bool round = (residual << 1) >= bSignificand;
// Clear the implicit bit
rep_t absResult = quotient & significandMask;

View File

@ -17,16 +17,15 @@
/* Returns: a / b */
COMPILER_RT_ABI ti_int
__divti3(ti_int a, ti_int b)
{
COMPILER_RT_ABI ti_int __divti3(ti_int a, ti_int b) {
const int bits_in_tword_m1 = (int)(sizeof(ti_int) * CHAR_BIT) - 1;
ti_int s_a = a >> bits_in_tword_m1; /* s_a = a < 0 ? -1 : 0 */
ti_int s_b = b >> bits_in_tword_m1; /* s_b = b < 0 ? -1 : 0 */
a = (a ^ s_a) - s_a; /* negate if s_a == -1 */
b = (b ^ s_b) - s_b; /* negate if s_b == -1 */
s_a ^= s_b; /* sign of quotient */
return (__udivmodti4(a, b, (tu_int*)0) ^ s_a) - s_a; /* negate if s_a == -1 */
return (__udivmodti4(a, b, (tu_int *)0) ^ s_a) -
s_a; /* negate if s_a == -1 */
}
#endif /* CRT_HAS_128BIT */

View File

@ -17,13 +17,11 @@
/* Returns: the quotient of (a + ib) / (c + id) */
COMPILER_RT_ABI Lcomplex
__divxc3(long double __a, long double __b, long double __c, long double __d)
{
COMPILER_RT_ABI Lcomplex __divxc3(long double __a, long double __b,
long double __c, long double __d) {
int __ilogbw = 0;
long double __logbw = crt_logbl(crt_fmaxl(crt_fabsl(__c), crt_fabsl(__d)));
if (crt_isfinite(__logbw))
{
if (crt_isfinite(__logbw)) {
__ilogbw = (int)__logbw;
__c = crt_scalbnl(__c, -__ilogbw);
__d = crt_scalbnl(__d, -__ilogbw);
@ -31,25 +29,20 @@ __divxc3(long double __a, long double __b, long double __c, long double __d)
long double __denom = __c * __c + __d * __d;
Lcomplex z;
COMPLEX_REAL(z) = crt_scalbnl((__a * __c + __b * __d) / __denom, -__ilogbw);
COMPLEX_IMAGINARY(z) = crt_scalbnl((__b * __c - __a * __d) / __denom, -__ilogbw);
if (crt_isnan(COMPLEX_REAL(z)) && crt_isnan(COMPLEX_IMAGINARY(z)))
{
if ((__denom == 0) && (!crt_isnan(__a) || !crt_isnan(__b)))
{
COMPLEX_IMAGINARY(z) =
crt_scalbnl((__b * __c - __a * __d) / __denom, -__ilogbw);
if (crt_isnan(COMPLEX_REAL(z)) && crt_isnan(COMPLEX_IMAGINARY(z))) {
if ((__denom == 0) && (!crt_isnan(__a) || !crt_isnan(__b))) {
COMPLEX_REAL(z) = crt_copysignl(CRT_INFINITY, __c) * __a;
COMPLEX_IMAGINARY(z) = crt_copysignl(CRT_INFINITY, __c) * __b;
}
else if ((crt_isinf(__a) || crt_isinf(__b)) &&
crt_isfinite(__c) && crt_isfinite(__d))
{
} else if ((crt_isinf(__a) || crt_isinf(__b)) && crt_isfinite(__c) &&
crt_isfinite(__d)) {
__a = crt_copysignl(crt_isinf(__a) ? 1 : 0, __a);
__b = crt_copysignl(crt_isinf(__b) ? 1 : 0, __b);
COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c + __b * __d);
COMPLEX_IMAGINARY(z) = CRT_INFINITY * (__b * __c - __a * __d);
}
else if (crt_isinf(__logbw) && __logbw > 0 &&
crt_isfinite(__a) && crt_isfinite(__b))
{
} else if (crt_isinf(__logbw) && __logbw > 0 && crt_isfinite(__a) &&
crt_isfinite(__b)) {
__c = crt_copysignl(crt_isinf(__c) ? 1 : 0, __c);
__d = crt_copysignl(crt_isinf(__d) ? 1 : 0, __d);
COMPLEX_REAL(z) = 0 * (__a * __c + __b * __d);

View File

@ -11,7 +11,6 @@
#include <string.h>
#include "int_lib.h"
#include "int_util.h"
#ifdef __BIONIC__
/* There are 4 pthread key cleanup rounds on Bionic. Delay emutls deallocation
@ -30,7 +29,7 @@
typedef struct emutls_address_array {
uintptr_t skip_destructor_rounds;
uintptr_t size; /* number of elements in the 'data' array */
void* data[];
void *data[];
} emutls_address_array;
static void emutls_shutdown(emutls_address_array *array);
@ -59,14 +58,14 @@ static __inline void *emutls_memalign_alloc(size_t align, size_t size) {
if (posix_memalign(&base, align, size) != 0)
abort();
#else
#define EXTRA_ALIGN_PTR_BYTES (align - 1 + sizeof(void*))
char* object;
if ((object = (char*)malloc(EXTRA_ALIGN_PTR_BYTES + size)) == NULL)
#define EXTRA_ALIGN_PTR_BYTES (align - 1 + sizeof(void *))
char *object;
if ((object = (char *)malloc(EXTRA_ALIGN_PTR_BYTES + size)) == NULL)
abort();
base = (void*)(((uintptr_t)(object + EXTRA_ALIGN_PTR_BYTES))
& ~(uintptr_t)(align - 1));
base = (void *)(((uintptr_t)(object + EXTRA_ALIGN_PTR_BYTES)) &
~(uintptr_t)(align - 1));
((void**)base)[-1] = object;
((void **)base)[-1] = object;
#endif
return base;
}
@ -76,20 +75,20 @@ static __inline void emutls_memalign_free(void *base) {
free(base);
#else
/* The mallocated address is in ((void**)base)[-1] */
free(((void**)base)[-1]);
free(((void **)base)[-1]);
#endif
}
static __inline void emutls_setspecific(emutls_address_array *value) {
pthread_setspecific(emutls_pthread_key, (void*) value);
pthread_setspecific(emutls_pthread_key, (void *)value);
}
static __inline emutls_address_array* emutls_getspecific() {
return (emutls_address_array*) pthread_getspecific(emutls_pthread_key);
static __inline emutls_address_array *emutls_getspecific() {
return (emutls_address_array *)pthread_getspecific(emutls_pthread_key);
}
static void emutls_key_destructor(void* ptr) {
emutls_address_array *array = (emutls_address_array*)ptr;
static void emutls_key_destructor(void *ptr) {
emutls_address_array *array = (emutls_address_array *)ptr;
if (array->skip_destructor_rounds > 0) {
/* emutls is deallocated using a pthread key destructor. These
* destructors are called in several rounds to accommodate destructor
@ -117,26 +116,22 @@ static __inline void emutls_init_once(void) {
pthread_once(&once, emutls_init);
}
static __inline void emutls_lock() {
pthread_mutex_lock(&emutls_mutex);
}
static __inline void emutls_lock() { pthread_mutex_lock(&emutls_mutex); }
static __inline void emutls_unlock() {
pthread_mutex_unlock(&emutls_mutex);
}
static __inline void emutls_unlock() { pthread_mutex_unlock(&emutls_mutex); }
#else /* _WIN32 */
#include <windows.h>
#include <assert.h>
#include <malloc.h>
#include <stdio.h>
#include <assert.h>
#include <windows.h>
static LPCRITICAL_SECTION emutls_mutex;
static DWORD emutls_tls_index = TLS_OUT_OF_INDEXES;
typedef uintptr_t gcc_word;
typedef void * gcc_pointer;
typedef void *gcc_pointer;
static void win_error(DWORD last_err, const char *hint) {
char *buffer = NULL;
@ -163,9 +158,7 @@ static __inline void *emutls_memalign_alloc(size_t align, size_t size) {
return base;
}
static __inline void emutls_memalign_free(void *base) {
_aligned_free(base);
}
static __inline void emutls_memalign_free(void *base) { _aligned_free(base); }
static void emutls_exit(void) {
if (emutls_mutex) {
@ -174,16 +167,17 @@ static void emutls_exit(void) {
emutls_mutex = NULL;
}
if (emutls_tls_index != TLS_OUT_OF_INDEXES) {
emutls_shutdown((emutls_address_array*)TlsGetValue(emutls_tls_index));
emutls_shutdown((emutls_address_array *)TlsGetValue(emutls_tls_index));
TlsFree(emutls_tls_index);
emutls_tls_index = TLS_OUT_OF_INDEXES;
}
}
#pragma warning (push)
#pragma warning (disable : 4100)
#pragma warning(push)
#pragma warning(disable : 4100)
static BOOL CALLBACK emutls_init(PINIT_ONCE p0, PVOID p1, PVOID *p2) {
emutls_mutex = (LPCRITICAL_SECTION)_aligned_malloc(sizeof(CRITICAL_SECTION), 16);
emutls_mutex =
(LPCRITICAL_SECTION)_aligned_malloc(sizeof(CRITICAL_SECTION), 16);
if (!emutls_mutex) {
win_error(GetLastError(), "_aligned_malloc");
return FALSE;
@ -205,27 +199,23 @@ static __inline void emutls_init_once(void) {
InitOnceExecuteOnce(&once, emutls_init, NULL, NULL);
}
static __inline void emutls_lock() {
EnterCriticalSection(emutls_mutex);
}
static __inline void emutls_lock() { EnterCriticalSection(emutls_mutex); }
static __inline void emutls_unlock() {
LeaveCriticalSection(emutls_mutex);
}
static __inline void emutls_unlock() { LeaveCriticalSection(emutls_mutex); }
static __inline void emutls_setspecific(emutls_address_array *value) {
if (TlsSetValue(emutls_tls_index, (LPVOID) value) == 0)
if (TlsSetValue(emutls_tls_index, (LPVOID)value) == 0)
win_abort(GetLastError(), "TlsSetValue");
}
static __inline emutls_address_array* emutls_getspecific() {
static __inline emutls_address_array *emutls_getspecific() {
LPVOID value = TlsGetValue(emutls_tls_index);
if (value == NULL) {
const DWORD err = GetLastError();
if (err != ERROR_SUCCESS)
win_abort(err, "TlsGetValue");
}
return (emutls_address_array*) value;
return (emutls_address_array *)value;
}
/* Provide atomic load/store functions for emutls_get_index if built with MSVC.
@ -253,7 +243,7 @@ static __inline void __atomic_store_n(void *ptr, uintptr_t val, unsigned type) {
#endif /* __ATOMIC_RELEASE */
#pragma warning (pop)
#pragma warning(pop)
#endif /* _WIN32 */
@ -285,22 +275,22 @@ typedef struct __emutls_control {
gcc_word align; /* alignment of the object in bytes */
union {
uintptr_t index; /* data[index-1] is the object address */
void* address; /* object address, when in single thread env */
void *address; /* object address, when in single thread env */
} object;
void* value; /* null or non-zero initial value for the object */
void *value; /* null or non-zero initial value for the object */
} __emutls_control;
/* Emulated TLS objects are always allocated at run-time. */
static __inline void *emutls_allocate_object(__emutls_control *control) {
/* Use standard C types, check with gcc's emutls.o. */
COMPILE_TIME_ASSERT(sizeof(uintptr_t) == sizeof(gcc_pointer));
COMPILE_TIME_ASSERT(sizeof(uintptr_t) == sizeof(void*));
COMPILE_TIME_ASSERT(sizeof(uintptr_t) == sizeof(void *));
size_t size = control->size;
size_t align = control->align;
void* base;
if (align < sizeof(void*))
align = sizeof(void*);
void *base;
if (align < sizeof(void *))
align = sizeof(void *);
/* Make sure that align is power of 2. */
if ((align & (align - 1)) != 0)
abort();
@ -313,7 +303,6 @@ static __inline void *emutls_allocate_object(__emutls_control *control) {
return base;
}
/* Returns control->object.index; set index if not allocated yet. */
static __inline uintptr_t emutls_get_index(__emutls_control *control) {
uintptr_t index = __atomic_load_n(&control->object.index, __ATOMIC_ACQUIRE);
@ -363,30 +352,30 @@ static __inline uintptr_t emutls_asize(uintptr_t N) {
*/
static __inline emutls_address_array *
emutls_get_address_array(uintptr_t index) {
emutls_address_array* array = emutls_getspecific();
emutls_address_array *array = emutls_getspecific();
if (array == NULL) {
uintptr_t new_size = emutls_new_data_array_size(index);
array = (emutls_address_array*) malloc(emutls_asize(new_size));
array = (emutls_address_array *)malloc(emutls_asize(new_size));
if (array) {
memset(array->data, 0, new_size * sizeof(void*));
memset(array->data, 0, new_size * sizeof(void *));
array->skip_destructor_rounds = EMUTLS_SKIP_DESTRUCTOR_ROUNDS;
}
emutls_check_array_set_size(array, new_size);
} else if (index > array->size) {
uintptr_t orig_size = array->size;
uintptr_t new_size = emutls_new_data_array_size(index);
array = (emutls_address_array*) realloc(array, emutls_asize(new_size));
array = (emutls_address_array *)realloc(array, emutls_asize(new_size));
if (array)
memset(array->data + orig_size, 0,
(new_size - orig_size) * sizeof(void*));
(new_size - orig_size) * sizeof(void *));
emutls_check_array_set_size(array, new_size);
}
return array;
}
void* __emutls_get_address(__emutls_control* control) {
void *__emutls_get_address(__emutls_control *control) {
uintptr_t index = emutls_get_index(control);
emutls_address_array* array = emutls_get_address_array(index--);
emutls_address_array *array = emutls_get_address_array(index--);
if (array->data[index] == NULL)
array->data[index] = emutls_allocate_object(control);
return array->data[index];
@ -394,8 +383,7 @@ void* __emutls_get_address(__emutls_control* control) {
#ifdef __BIONIC__
/* Called by Bionic on dlclose to delete the emutls pthread key. */
__attribute__((visibility("hidden")))
void __emutls_unregister_key(void) {
__attribute__((visibility("hidden"))) void __emutls_unregister_key(void) {
if (emutls_key_created) {
pthread_key_delete(emutls_pthread_key);
emutls_key_created = false;

View File

@ -29,9 +29,9 @@
#endif /* _WIN32 */
#if __LP64__
#define TRAMPOLINE_SIZE 48
#define TRAMPOLINE_SIZE 48
#else
#define TRAMPOLINE_SIZE 40
#define TRAMPOLINE_SIZE 40
#endif
/*
@ -42,15 +42,15 @@
* that means changing the protection on those page(s) to rwx.
*/
COMPILER_RT_ABI void
__enable_execute_stack(void* addr)
{
COMPILER_RT_ABI void __enable_execute_stack(void *addr) {
#if _WIN32
MEMORY_BASIC_INFORMATION mbi;
if (!VirtualQuery (addr, &mbi, sizeof(mbi)))
return; /* We should probably assert here because there is no return value */
VirtualProtect (mbi.BaseAddress, mbi.RegionSize, PAGE_EXECUTE_READWRITE, &mbi.Protect);
if (!VirtualQuery(addr, &mbi, sizeof(mbi)))
return; /* We should probably assert here because there is no return value
*/
VirtualProtect(mbi.BaseAddress, mbi.RegionSize, PAGE_EXECUTE_READWRITE,
&mbi.Protect);
#else
#if __APPLE__
/* On Darwin, pagesize is always 4096 bytes */
@ -61,11 +61,12 @@ __enable_execute_stack(void* addr)
const uintptr_t pageSize = sysconf(_SC_PAGESIZE);
#endif /* __APPLE__ */
const uintptr_t pageAlignMask = ~(pageSize-1);
const uintptr_t pageAlignMask = ~(pageSize - 1);
uintptr_t p = (uintptr_t)addr;
unsigned char* startPage = (unsigned char*)(p & pageAlignMask);
unsigned char* endPage = (unsigned char*)((p+TRAMPOLINE_SIZE+pageSize) & pageAlignMask);
unsigned char *startPage = (unsigned char *)(p & pageAlignMask);
unsigned char *endPage =
(unsigned char *)((p + TRAMPOLINE_SIZE + pageSize) & pageAlignMask);
size_t length = endPage - startPage;
(void) mprotect((void *)startPage, length, PROT_READ | PROT_WRITE | PROT_EXEC);
(void)mprotect((void *)startPage, length, PROT_READ | PROT_WRITE | PROT_EXEC);
#endif
}

View File

@ -7,12 +7,9 @@
* ===----------------------------------------------------------------------===
*/
#include "int_lib.h"
#include <stdio.h>
/*
* __eprintf() was used in an old version of <assert.h>.
* It can eventually go away, but it is needed when linking
@ -25,9 +22,8 @@
__attribute__((visibility("hidden")))
#endif
COMPILER_RT_ABI void
__eprintf(const char* format, const char* assertion_expression,
const char* line, const char* file)
{
__eprintf(const char *format, const char *assertion_expression,
const char *line, const char *file) {
fprintf(stderr, format, assertion_expression, line, file);
fflush(stderr);
compilerrt_abort();

View File

@ -17,15 +17,11 @@ COMPILER_RT_ABI NOINLINE float __extendhfsf2(uint16_t a) {
return __extendXfYf2__(a);
}
COMPILER_RT_ABI float __gnu_h2f_ieee(uint16_t a) {
return __extendhfsf2(a);
}
COMPILER_RT_ABI float __gnu_h2f_ieee(uint16_t a) { return __extendhfsf2(a); }
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI float __aeabi_h2f(uint16_t a) {
return __extendhfsf2(a);
}
AEABI_RTABI float __aeabi_h2f(uint16_t a) { return __extendhfsf2(a); }
#else
AEABI_RTABI float __aeabi_h2f(uint16_t a) COMPILER_RT_ALIAS(__extendhfsf2);
#endif

View File

@ -11,15 +11,11 @@
#define DST_DOUBLE
#include "fp_extend_impl.inc"
COMPILER_RT_ABI double __extendsfdf2(float a) {
return __extendXfYf2__(a);
}
COMPILER_RT_ABI double __extendsfdf2(float a) { return __extendXfYf2__(a); }
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI double __aeabi_f2d(float a) {
return __extendsfdf2(a);
}
AEABI_RTABI double __aeabi_f2d(float a) { return __extendsfdf2(a); }
#else
AEABI_RTABI double __aeabi_f2d(float a) COMPILER_RT_ALIAS(__extendsfdf2);
#endif

View File

@ -17,13 +17,10 @@
* the value zero if a is zero. The least significant bit is index one.
*/
COMPILER_RT_ABI si_int
__ffsdi2(di_int a)
{
COMPILER_RT_ABI si_int __ffsdi2(di_int a) {
dwords x;
x.all = a;
if (x.s.low == 0)
{
if (x.s.low == 0) {
if (x.s.high == 0)
return 0;
return __builtin_ctz(x.s.high) + (1 + sizeof(si_int) * CHAR_BIT);

View File

@ -17,11 +17,8 @@
* the value zero if a is zero. The least significant bit is index one.
*/
COMPILER_RT_ABI si_int
__ffssi2(si_int a)
{
if (a == 0)
{
COMPILER_RT_ABI si_int __ffssi2(si_int a) {
if (a == 0) {
return 0;
}
return __builtin_ctz(a) + 1;

View File

@ -19,13 +19,10 @@
* the value zero if a is zero. The least significant bit is index one.
*/
COMPILER_RT_ABI si_int
__ffsti2(ti_int a)
{
COMPILER_RT_ABI si_int __ffsti2(ti_int a) {
twords x;
x.all = a;
if (x.s.low == 0)
{
if (x.s.low == 0) {
if (x.s.high == 0)
return 0;
return __builtin_ctzll(x.s.high) + (1 + sizeof(di_int) * CHAR_BIT);

View File

@ -17,9 +17,7 @@
COMPILER_RT_ABI du_int __fixunsdfdi(double a);
COMPILER_RT_ABI di_int
__fixdfdi(double a)
{
COMPILER_RT_ABI di_int __fixdfdi(double a) {
if (a < 0.0) {
return -__fixunsdfdi(-a);
}
@ -36,18 +34,13 @@ typedef di_int fixint_t;
typedef du_int fixuint_t;
#include "fp_fixint_impl.inc"
COMPILER_RT_ABI di_int
__fixdfdi(fp_t a) {
return __fixint(a);
}
COMPILER_RT_ABI di_int __fixdfdi(fp_t a) { return __fixint(a); }
#endif
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI di_int __aeabi_d2lz(fp_t a) {
return __fixdfdi(a);
}
AEABI_RTABI di_int __aeabi_d2lz(fp_t a) { return __fixdfdi(a); }
#else
AEABI_RTABI di_int __aeabi_d2lz(fp_t a) COMPILER_RT_ALIAS(__fixdfdi);
#endif

View File

@ -13,16 +13,11 @@ typedef si_int fixint_t;
typedef su_int fixuint_t;
#include "fp_fixint_impl.inc"
COMPILER_RT_ABI si_int
__fixdfsi(fp_t a) {
return __fixint(a);
}
COMPILER_RT_ABI si_int __fixdfsi(fp_t a) { return __fixint(a); }
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI si_int __aeabi_d2iz(fp_t a) {
return __fixdfsi(a);
}
AEABI_RTABI si_int __aeabi_d2iz(fp_t a) { return __fixdfsi(a); }
#else
AEABI_RTABI si_int __aeabi_d2iz(fp_t a) COMPILER_RT_ALIAS(__fixdfsi);
#endif

View File

@ -17,9 +17,6 @@ typedef ti_int fixint_t;
typedef tu_int fixuint_t;
#include "fp_fixint_impl.inc"
COMPILER_RT_ABI ti_int
__fixdfti(fp_t a) {
return __fixint(a);
}
COMPILER_RT_ABI ti_int __fixdfti(fp_t a) { return __fixint(a); }
#endif /* CRT_HAS_128BIT */

View File

@ -17,9 +17,7 @@
COMPILER_RT_ABI du_int __fixunssfdi(float a);
COMPILER_RT_ABI di_int
__fixsfdi(float a)
{
COMPILER_RT_ABI di_int __fixsfdi(float a) {
if (a < 0.0f) {
return -__fixunssfdi(-a);
}
@ -36,18 +34,13 @@ typedef di_int fixint_t;
typedef du_int fixuint_t;
#include "fp_fixint_impl.inc"
COMPILER_RT_ABI di_int
__fixsfdi(fp_t a) {
return __fixint(a);
}
COMPILER_RT_ABI di_int __fixsfdi(fp_t a) { return __fixint(a); }
#endif
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI di_int __aeabi_f2lz(fp_t a) {
return __fixsfdi(a);
}
AEABI_RTABI di_int __aeabi_f2lz(fp_t a) { return __fixsfdi(a); }
#else
AEABI_RTABI di_int __aeabi_f2lz(fp_t a) COMPILER_RT_ALIAS(__fixsfdi);
#endif

View File

@ -13,16 +13,11 @@ typedef si_int fixint_t;
typedef su_int fixuint_t;
#include "fp_fixint_impl.inc"
COMPILER_RT_ABI si_int
__fixsfsi(fp_t a) {
return __fixint(a);
}
COMPILER_RT_ABI si_int __fixsfsi(fp_t a) { return __fixint(a); }
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI si_int __aeabi_f2iz(fp_t a) {
return __fixsfsi(a);
}
AEABI_RTABI si_int __aeabi_f2iz(fp_t a) { return __fixsfsi(a); }
#else
AEABI_RTABI si_int __aeabi_f2iz(fp_t a) COMPILER_RT_ALIAS(__fixsfsi);
#endif

View File

@ -17,9 +17,6 @@ typedef ti_int fixint_t;
typedef tu_int fixuint_t;
#include "fp_fixint_impl.inc"
COMPILER_RT_ABI ti_int
__fixsfti(fp_t a) {
return __fixint(a);
}
COMPILER_RT_ABI ti_int __fixsfti(fp_t a) { return __fixint(a); }
#endif /* CRT_HAS_128BIT */

View File

@ -15,8 +15,5 @@ typedef di_int fixint_t;
typedef du_int fixuint_t;
#include "fp_fixint_impl.inc"
COMPILER_RT_ABI di_int
__fixtfdi(fp_t a) {
return __fixint(a);
}
COMPILER_RT_ABI di_int __fixtfdi(fp_t a) { return __fixint(a); }
#endif

View File

@ -15,8 +15,5 @@ typedef si_int fixint_t;
typedef su_int fixuint_t;
#include "fp_fixint_impl.inc"
COMPILER_RT_ABI si_int
__fixtfsi(fp_t a) {
return __fixint(a);
}
COMPILER_RT_ABI si_int __fixtfsi(fp_t a) { return __fixint(a); }
#endif

View File

@ -15,8 +15,5 @@ typedef ti_int fixint_t;
typedef tu_int fixuint_t;
#include "fp_fixint_impl.inc"
COMPILER_RT_ABI ti_int
__fixtfti(fp_t a) {
return __fixint(a);
}
COMPILER_RT_ABI ti_int __fixtfti(fp_t a) { return __fixint(a); }
#endif

View File

@ -15,10 +15,9 @@
* flag as a side-effect of computation.
*/
COMPILER_RT_ABI du_int
__fixunsdfdi(double a)
{
if (a <= 0.0) return 0;
COMPILER_RT_ABI du_int __fixunsdfdi(double a) {
if (a <= 0.0)
return 0;
su_int high = a / 4294967296.f; /* a / 0x1p32f; */
su_int low = a - (double)high * 4294967296.f; /* high * 0x1p32f; */
return ((du_int)high << 32) | low;
@ -33,18 +32,13 @@ __fixunsdfdi(double a)
typedef du_int fixuint_t;
#include "fp_fixuint_impl.inc"
COMPILER_RT_ABI du_int
__fixunsdfdi(fp_t a) {
return __fixuint(a);
}
COMPILER_RT_ABI du_int __fixunsdfdi(fp_t a) { return __fixuint(a); }
#endif
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI du_int __aeabi_d2ulz(fp_t a) {
return __fixunsdfdi(a);
}
AEABI_RTABI du_int __aeabi_d2ulz(fp_t a) { return __fixunsdfdi(a); }
#else
AEABI_RTABI du_int __aeabi_d2ulz(fp_t a) COMPILER_RT_ALIAS(__fixunsdfdi);
#endif

View File

@ -12,16 +12,11 @@
typedef su_int fixuint_t;
#include "fp_fixuint_impl.inc"
COMPILER_RT_ABI su_int
__fixunsdfsi(fp_t a) {
return __fixuint(a);
}
COMPILER_RT_ABI su_int __fixunsdfsi(fp_t a) { return __fixuint(a); }
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI su_int __aeabi_d2uiz(fp_t a) {
return __fixunsdfsi(a);
}
AEABI_RTABI su_int __aeabi_d2uiz(fp_t a) { return __fixunsdfsi(a); }
#else
AEABI_RTABI su_int __aeabi_d2uiz(fp_t a) COMPILER_RT_ALIAS(__fixunsdfsi);
#endif

View File

@ -15,8 +15,5 @@
typedef tu_int fixuint_t;
#include "fp_fixuint_impl.inc"
COMPILER_RT_ABI tu_int
__fixunsdfti(fp_t a) {
return __fixuint(a);
}
COMPILER_RT_ABI tu_int __fixunsdfti(fp_t a) { return __fixuint(a); }
#endif /* CRT_HAS_128BIT */

View File

@ -15,10 +15,9 @@
* flag as a side-effect of computation.
*/
COMPILER_RT_ABI du_int
__fixunssfdi(float a)
{
if (a <= 0.0f) return 0;
COMPILER_RT_ABI du_int __fixunssfdi(float a) {
if (a <= 0.0f)
return 0;
double da = a;
su_int high = da / 4294967296.f; /* da / 0x1p32f; */
su_int low = da - (double)high * 4294967296.f; /* high * 0x1p32f; */
@ -34,18 +33,13 @@ __fixunssfdi(float a)
typedef du_int fixuint_t;
#include "fp_fixuint_impl.inc"
COMPILER_RT_ABI du_int
__fixunssfdi(fp_t a) {
return __fixuint(a);
}
COMPILER_RT_ABI du_int __fixunssfdi(fp_t a) { return __fixuint(a); }
#endif
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI du_int __aeabi_f2ulz(fp_t a) {
return __fixunssfdi(a);
}
AEABI_RTABI du_int __aeabi_f2ulz(fp_t a) { return __fixunssfdi(a); }
#else
AEABI_RTABI du_int __aeabi_f2ulz(fp_t a) COMPILER_RT_ALIAS(__fixunssfdi);
#endif

View File

@ -16,16 +16,11 @@
typedef su_int fixuint_t;
#include "fp_fixuint_impl.inc"
COMPILER_RT_ABI su_int
__fixunssfsi(fp_t a) {
return __fixuint(a);
}
COMPILER_RT_ABI su_int __fixunssfsi(fp_t a) { return __fixuint(a); }
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI su_int __aeabi_f2uiz(fp_t a) {
return __fixunssfsi(a);
}
AEABI_RTABI su_int __aeabi_f2uiz(fp_t a) { return __fixunssfsi(a); }
#else
AEABI_RTABI su_int __aeabi_f2uiz(fp_t a) COMPILER_RT_ALIAS(__fixunssfsi);
#endif

View File

@ -18,8 +18,5 @@
typedef tu_int fixuint_t;
#include "fp_fixuint_impl.inc"
COMPILER_RT_ABI tu_int
__fixunssfti(fp_t a) {
return __fixuint(a);
}
COMPILER_RT_ABI tu_int __fixunssfti(fp_t a) { return __fixuint(a); }
#endif

View File

@ -14,8 +14,5 @@
typedef du_int fixuint_t;
#include "fp_fixuint_impl.inc"
COMPILER_RT_ABI du_int
__fixunstfdi(fp_t a) {
return __fixuint(a);
}
COMPILER_RT_ABI du_int __fixunstfdi(fp_t a) { return __fixuint(a); }
#endif

View File

@ -14,8 +14,5 @@
typedef su_int fixuint_t;
#include "fp_fixuint_impl.inc"
COMPILER_RT_ABI su_int
__fixunstfsi(fp_t a) {
return __fixuint(a);
}
COMPILER_RT_ABI su_int __fixunstfsi(fp_t a) { return __fixuint(a); }
#endif

View File

@ -14,8 +14,5 @@
typedef tu_int fixuint_t;
#include "fp_fixuint_impl.inc"
COMPILER_RT_ABI tu_int
__fixunstfti(fp_t a) {
return __fixuint(a);
}
COMPILER_RT_ABI tu_int __fixunstfti(fp_t a) { return __fixuint(a); }
#endif

View File

@ -19,19 +19,17 @@
* Negative values all become zero.
*/
/* Assumption: long double is an intel 80 bit floating point type padded with 6 bytes
* du_int is a 64 bit integral type
* value in long double is representable in du_int or is negative
* (no range checking performed)
/* Assumption: long double is an intel 80 bit floating point type padded with 6
* bytes du_int is a 64 bit integral type value in long double is representable
* in du_int or is negative (no range checking performed)
*/
/* gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee eeee |
* 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm
/* gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee
* eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
* mmmm mmmm mmmm
*/
COMPILER_RT_ABI du_int
__fixunsxfdi(long double a)
{
COMPILER_RT_ABI du_int __fixunsxfdi(long double a) {
long_double_bits fb;
fb.f = a;
int e = (fb.u.high.s.low & 0x00007FFF) - 16383;

View File

@ -19,18 +19,17 @@
* Negative values all become zero.
*/
/* Assumption: long double is an intel 80 bit floating point type padded with 6 bytes
* su_int is a 32 bit integral type
* value in long double is representable in su_int or is negative
/* Assumption: long double is an intel 80 bit floating point type padded with 6
* bytes su_int is a 32 bit integral type value in long double is representable
* in su_int or is negative
*/
/* gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee eeee |
* 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm
/* gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee
* eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
* mmmm mmmm mmmm
*/
COMPILER_RT_ABI su_int
__fixunsxfsi(long double a)
{
COMPILER_RT_ABI su_int __fixunsxfsi(long double a) {
long_double_bits fb;
fb.f = a;
int e = (fb.u.high.s.low & 0x00007FFF) - 16383;

View File

@ -19,18 +19,17 @@
* Negative values all become zero.
*/
/* Assumption: long double is an intel 80 bit floating point type padded with 6 bytes
* tu_int is a 128 bit integral type
* value in long double is representable in tu_int or is negative
/* Assumption: long double is an intel 80 bit floating point type padded with 6
* bytes tu_int is a 128 bit integral type value in long double is representable
* in tu_int or is negative
*/
/* gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee eeee |
* 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm
/* gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee
* eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
* mmmm mmmm mmmm
*/
COMPILER_RT_ABI tu_int
__fixunsxfti(long double a)
{
COMPILER_RT_ABI tu_int __fixunsxfti(long double a) {
long_double_bits fb;
fb.f = a;
int e = (fb.u.high.s.low & 0x00007FFF) - 16383;

View File

@ -17,18 +17,17 @@
/* Returns: convert a to a signed long long, rounding toward zero. */
/* Assumption: long double is an intel 80 bit floating point type padded with 6 bytes
* di_int is a 64 bit integral type
* value in long double is representable in di_int (no range checking performed)
/* Assumption: long double is an intel 80 bit floating point type padded with 6
* bytes di_int is a 64 bit integral type value in long double is representable
* in di_int (no range checking performed)
*/
/* gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee eeee |
* 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm
/* gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee
* eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
* mmmm mmmm mmmm
*/
COMPILER_RT_ABI di_int
__fixxfdi(long double a)
{
COMPILER_RT_ABI di_int __fixxfdi(long double a) {
const di_int di_max = (di_int)((~(du_int)0) / 2);
const di_int di_min = -di_max - 1;
long_double_bits fb;

View File

@ -17,18 +17,17 @@
/* Returns: convert a to a signed long long, rounding toward zero. */
/* Assumption: long double is an intel 80 bit floating point type padded with 6 bytes
* ti_int is a 128 bit integral type
* value in long double is representable in ti_int
/* Assumption: long double is an intel 80 bit floating point type padded with 6
* bytes ti_int is a 128 bit integral type value in long double is representable
* in ti_int
*/
/* gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee eeee |
* 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm
/* gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee
* eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
* mmmm mmmm mmmm
*/
COMPILER_RT_ABI ti_int
__fixxfti(long double a)
{
COMPILER_RT_ABI ti_int __fixxfti(long double a) {
const ti_int ti_max = (ti_int)((~(tu_int)0) / 2);
const ti_int ti_min = -ti_max - 1;
long_double_bits fb;

View File

@ -19,20 +19,22 @@
* di_int is a 64 bit integral type
*/
/* seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm */
/* seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm
* mmmm */
#ifndef __SOFT_FP__
/* Support for systems that have hardware floating-point; we'll set the inexact flag
* as a side-effect of this computation.
/* Support for systems that have hardware floating-point; we'll set the inexact
* flag as a side-effect of this computation.
*/
COMPILER_RT_ABI double
__floatdidf(di_int a)
{
COMPILER_RT_ABI double __floatdidf(di_int a) {
static const double twop52 = 4503599627370496.0; // 0x1.0p52
static const double twop32 = 4294967296.0; // 0x1.0p32
union { int64_t x; double d; } low = { .d = twop52 };
union {
int64_t x;
double d;
} low = {.d = twop52};
const double high = (int32_t)(a >> 32) * twop32;
low.x |= a & INT64_C(0x00000000ffffffff);
@ -42,22 +44,20 @@ __floatdidf(di_int a)
}
#else
/* Support for systems that don't have hardware floating-point; there are no flags to
* set, and we don't want to code-gen to an unknown soft-float implementation.
/* Support for systems that don't have hardware floating-point; there are no
* flags to set, and we don't want to code-gen to an unknown soft-float
* implementation.
*/
COMPILER_RT_ABI double
__floatdidf(di_int a)
{
COMPILER_RT_ABI double __floatdidf(di_int a) {
if (a == 0)
return 0.0;
const unsigned N = sizeof(di_int) * CHAR_BIT;
const di_int s = a >> (N-1);
const di_int s = a >> (N - 1);
a = (a ^ s) - s;
int sd = N - __builtin_clzll(a); /* number of significant digits */
int e = sd - 1; /* exponent */
if (sd > DBL_MANT_DIG)
{
if (sd > DBL_MANT_DIG) {
/* start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
* finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
* 12345678901234567890123456
@ -66,31 +66,27 @@ __floatdidf(di_int a)
* Q = bit DBL_MANT_DIG bits to the right of 1
* R = "or" of all bits to the right of Q
*/
switch (sd)
{
switch (sd) {
case DBL_MANT_DIG + 1:
a <<= 1;
break;
case DBL_MANT_DIG + 2:
break;
default:
a = ((du_int)a >> (sd - (DBL_MANT_DIG+2))) |
((a & ((du_int)(-1) >> ((N + DBL_MANT_DIG+2) - sd))) != 0);
a = ((du_int)a >> (sd - (DBL_MANT_DIG + 2))) |
((a & ((du_int)(-1) >> ((N + DBL_MANT_DIG + 2) - sd))) != 0);
};
/* finish: */
a |= (a & 4) != 0; /* Or P into R */
++a; /* round - this step may add a significant bit */
a >>= 2; /* dump Q and R */
/* a is now rounded to DBL_MANT_DIG or DBL_MANT_DIG+1 bits */
if (a & ((du_int)1 << DBL_MANT_DIG))
{
if (a & ((du_int)1 << DBL_MANT_DIG)) {
a >>= 1;
++e;
}
/* a is now rounded to DBL_MANT_DIG bits */
}
else
{
} else {
a <<= (DBL_MANT_DIG - sd);
/* a is now rounded to DBL_MANT_DIG bits */
}
@ -105,9 +101,7 @@ __floatdidf(di_int a)
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI double __aeabi_l2d(di_int a) {
return __floatdidf(a);
}
AEABI_RTABI double __aeabi_l2d(di_int a) { return __floatdidf(a); }
#else
AEABI_RTABI double __aeabi_l2d(di_int a) COMPILER_RT_ALIAS(__floatdidf);
#endif

View File

@ -21,18 +21,15 @@
#include "int_lib.h"
COMPILER_RT_ABI float
__floatdisf(di_int a)
{
COMPILER_RT_ABI float __floatdisf(di_int a) {
if (a == 0)
return 0.0F;
const unsigned N = sizeof(di_int) * CHAR_BIT;
const di_int s = a >> (N-1);
const di_int s = a >> (N - 1);
a = (a ^ s) - s;
int sd = N - __builtin_clzll(a); /* number of significant digits */
int e = sd - 1; /* exponent */
if (sd > FLT_MANT_DIG)
{
if (sd > FLT_MANT_DIG) {
/* start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
* finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
* 12345678901234567890123456
@ -41,31 +38,27 @@ __floatdisf(di_int a)
* Q = bit FLT_MANT_DIG bits to the right of 1
* R = "or" of all bits to the right of Q
*/
switch (sd)
{
switch (sd) {
case FLT_MANT_DIG + 1:
a <<= 1;
break;
case FLT_MANT_DIG + 2:
break;
default:
a = ((du_int)a >> (sd - (FLT_MANT_DIG+2))) |
((a & ((du_int)(-1) >> ((N + FLT_MANT_DIG+2) - sd))) != 0);
a = ((du_int)a >> (sd - (FLT_MANT_DIG + 2))) |
((a & ((du_int)(-1) >> ((N + FLT_MANT_DIG + 2) - sd))) != 0);
};
/* finish: */
a |= (a & 4) != 0; /* Or P into R */
++a; /* round - this step may add a significant bit */
a >>= 2; /* dump Q and R */
/* a is now rounded to FLT_MANT_DIG or FLT_MANT_DIG+1 bits */
if (a & ((du_int)1 << FLT_MANT_DIG))
{
if (a & ((du_int)1 << FLT_MANT_DIG)) {
a >>= 1;
++e;
}
/* a is now rounded to FLT_MANT_DIG bits */
}
else
{
} else {
a <<= (FLT_MANT_DIG - sd);
/* a is now rounded to FLT_MANT_DIG bits */
}
@ -78,9 +71,7 @@ __floatdisf(di_int a)
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI float __aeabi_l2f(di_int a) {
return __floatdisf(a);
}
AEABI_RTABI float __aeabi_l2f(di_int a) { return __floatdisf(a); }
#else
AEABI_RTABI float __aeabi_l2f(di_int a) COMPILER_RT_ALIAS(__floatdisf);
#endif

View File

@ -17,24 +17,23 @@
/* Returns: convert a to a long double, rounding toward even. */
/* Assumption: long double is a IEEE 80 bit floating point type padded to 128 bits
* di_int is a 64 bit integral type
/* Assumption: long double is a IEEE 80 bit floating point type padded to 128
* bits di_int is a 64 bit integral type
*/
/* gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee eeee |
* 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm
/* gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee
* eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
* mmmm mmmm mmmm
*/
COMPILER_RT_ABI long double
__floatdixf(di_int a)
{
COMPILER_RT_ABI long double __floatdixf(di_int a) {
if (a == 0)
return 0.0;
const unsigned N = sizeof(di_int) * CHAR_BIT;
const di_int s = a >> (N-1);
const di_int s = a >> (N - 1);
a = (a ^ s) - s;
int clz = __builtin_clzll(a);
int e = (N - 1) - clz ; /* exponent */
int e = (N - 1) - clz; /* exponent */
long_double_bits fb;
fb.u.high.s.low = ((su_int)s & 0x00008000) | /* sign */
(e + 16383); /* exponent */

View File

@ -17,8 +17,7 @@
#include "int_lib.h"
COMPILER_RT_ABI fp_t
__floatsidf(int a) {
COMPILER_RT_ABI fp_t __floatsidf(int a) {
const int aWidth = sizeof a * CHAR_BIT;
@ -51,9 +50,7 @@ __floatsidf(int a) {
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI fp_t __aeabi_i2d(int a) {
return __floatsidf(a);
}
AEABI_RTABI fp_t __aeabi_i2d(int a) { return __floatsidf(a); }
#else
AEABI_RTABI fp_t __aeabi_i2d(int a) COMPILER_RT_ALIAS(__floatsidf);
#endif

View File

@ -17,8 +17,7 @@
#include "int_lib.h"
COMPILER_RT_ABI fp_t
__floatsisf(int a) {
COMPILER_RT_ABI fp_t __floatsisf(int a) {
const int aWidth = sizeof a * CHAR_BIT;
@ -45,8 +44,10 @@ __floatsisf(int a) {
const int shift = exponent - significandBits;
result = (rep_t)a >> shift ^ implicitBit;
rep_t round = (rep_t)a << (typeWidth - shift);
if (round > signBit) result++;
if (round == signBit) result += result & 1;
if (round > signBit)
result++;
if (round == signBit)
result += result & 1;
}
// Insert the exponent
@ -57,9 +58,7 @@ __floatsisf(int a) {
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI fp_t __aeabi_i2f(int a) {
return __floatsisf(a);
}
AEABI_RTABI fp_t __aeabi_i2f(int a) { return __floatsisf(a); }
#else
AEABI_RTABI fp_t __aeabi_i2f(int a) COMPILER_RT_ALIAS(__floatsisf);
#endif

View File

@ -21,20 +21,18 @@
* ti_int is a 128 bit integral type
*/
/* seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm */
/* seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm
* mmmm */
COMPILER_RT_ABI double
__floattidf(ti_int a)
{
COMPILER_RT_ABI double __floattidf(ti_int a) {
if (a == 0)
return 0.0;
const unsigned N = sizeof(ti_int) * CHAR_BIT;
const ti_int s = a >> (N-1);
const ti_int s = a >> (N - 1);
a = (a ^ s) - s;
int sd = N - __clzti2(a); /* number of significant digits */
int e = sd - 1; /* exponent */
if (sd > DBL_MANT_DIG)
{
if (sd > DBL_MANT_DIG) {
/* start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
* finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
* 12345678901234567890123456
@ -43,31 +41,27 @@ __floattidf(ti_int a)
* Q = bit DBL_MANT_DIG bits to the right of 1
* R = "or" of all bits to the right of Q
*/
switch (sd)
{
switch (sd) {
case DBL_MANT_DIG + 1:
a <<= 1;
break;
case DBL_MANT_DIG + 2:
break;
default:
a = ((tu_int)a >> (sd - (DBL_MANT_DIG+2))) |
((a & ((tu_int)(-1) >> ((N + DBL_MANT_DIG+2) - sd))) != 0);
a = ((tu_int)a >> (sd - (DBL_MANT_DIG + 2))) |
((a & ((tu_int)(-1) >> ((N + DBL_MANT_DIG + 2) - sd))) != 0);
};
/* finish: */
a |= (a & 4) != 0; /* Or P into R */
++a; /* round - this step may add a significant bit */
a >>= 2; /* dump Q and R */
/* a is now rounded to DBL_MANT_DIG or DBL_MANT_DIG+1 bits */
if (a & ((tu_int)1 << DBL_MANT_DIG))
{
if (a & ((tu_int)1 << DBL_MANT_DIG)) {
a >>= 1;
++e;
}
/* a is now rounded to DBL_MANT_DIG bits */
}
else
{
} else {
a <<= (DBL_MANT_DIG - sd);
/* a is now rounded to DBL_MANT_DIG bits */
}

View File

@ -23,18 +23,15 @@
/* seee eeee emmm mmmm mmmm mmmm mmmm mmmm */
COMPILER_RT_ABI float
__floattisf(ti_int a)
{
COMPILER_RT_ABI float __floattisf(ti_int a) {
if (a == 0)
return 0.0F;
const unsigned N = sizeof(ti_int) * CHAR_BIT;
const ti_int s = a >> (N-1);
const ti_int s = a >> (N - 1);
a = (a ^ s) - s;
int sd = N - __clzti2(a); /* number of significant digits */
int e = sd - 1; /* exponent */
if (sd > FLT_MANT_DIG)
{
if (sd > FLT_MANT_DIG) {
/* start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
* finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
* 12345678901234567890123456
@ -43,31 +40,27 @@ __floattisf(ti_int a)
* Q = bit FLT_MANT_DIG bits to the right of 1
* R = "or" of all bits to the right of Q
*/
switch (sd)
{
switch (sd) {
case FLT_MANT_DIG + 1:
a <<= 1;
break;
case FLT_MANT_DIG + 2:
break;
default:
a = ((tu_int)a >> (sd - (FLT_MANT_DIG+2))) |
((a & ((tu_int)(-1) >> ((N + FLT_MANT_DIG+2) - sd))) != 0);
a = ((tu_int)a >> (sd - (FLT_MANT_DIG + 2))) |
((a & ((tu_int)(-1) >> ((N + FLT_MANT_DIG + 2) - sd))) != 0);
};
/* finish: */
a |= (a & 4) != 0; /* Or P into R */
++a; /* round - this step may add a significant bit */
a >>= 2; /* dump Q and R */
/* a is now rounded to FLT_MANT_DIG or FLT_MANT_DIG+1 bits */
if (a & ((tu_int)1 << FLT_MANT_DIG))
{
if (a & ((tu_int)1 << FLT_MANT_DIG)) {
a >>= 1;
++e;
}
/* a is now rounded to FLT_MANT_DIG bits */
}
else
{
} else {
a <<= (FLT_MANT_DIG - sd);
/* a is now rounded to FLT_MANT_DIG bits */
}

View File

@ -22,17 +22,17 @@
* ti_int is a 128 bit integral type
*/
/* seee eeee eeee eeee mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm |
* mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm
/* seee eeee eeee eeee mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm
* mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
* mmmm mmmm mmmm
*/
#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
COMPILER_RT_ABI fp_t
__floattitf(ti_int a) {
COMPILER_RT_ABI fp_t __floattitf(ti_int a) {
if (a == 0)
return 0.0;
const unsigned N = sizeof(ti_int) * CHAR_BIT;
const ti_int s = a >> (N-1);
const ti_int s = a >> (N - 1);
a = (a ^ s) - s;
int sd = N - __clzti2(a); /* number of significant digits */
int e = sd - 1; /* exponent */
@ -52,8 +52,8 @@ __floattitf(ti_int a) {
case LDBL_MANT_DIG + 2:
break;
default:
a = ((tu_int)a >> (sd - (LDBL_MANT_DIG+2))) |
((a & ((tu_int)(-1) >> ((N + LDBL_MANT_DIG+2) - sd))) != 0);
a = ((tu_int)a >> (sd - (LDBL_MANT_DIG + 2))) |
((a & ((tu_int)(-1) >> ((N + LDBL_MANT_DIG + 2) - sd))) != 0);
};
/* finish: */
a |= (a & 4) != 0; /* Or P into R */

View File

@ -17,26 +17,24 @@
/* Returns: convert a to a long double, rounding toward even. */
/* Assumption: long double is a IEEE 80 bit floating point type padded to 128 bits
* ti_int is a 128 bit integral type
/* Assumption: long double is a IEEE 80 bit floating point type padded to 128
* bits ti_int is a 128 bit integral type
*/
/* gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee eeee |
* 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm
/* gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee
* eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
* mmmm mmmm mmmm
*/
COMPILER_RT_ABI long double
__floattixf(ti_int a)
{
COMPILER_RT_ABI long double __floattixf(ti_int a) {
if (a == 0)
return 0.0;
const unsigned N = sizeof(ti_int) * CHAR_BIT;
const ti_int s = a >> (N-1);
const ti_int s = a >> (N - 1);
a = (a ^ s) - s;
int sd = N - __clzti2(a); /* number of significant digits */
int e = sd - 1; /* exponent */
if (sd > LDBL_MANT_DIG)
{
if (sd > LDBL_MANT_DIG) {
/* start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
* finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
* 12345678901234567890123456
@ -45,31 +43,27 @@ __floattixf(ti_int a)
* Q = bit LDBL_MANT_DIG bits to the right of 1
* R = "or" of all bits to the right of Q
*/
switch (sd)
{
switch (sd) {
case LDBL_MANT_DIG + 1:
a <<= 1;
break;
case LDBL_MANT_DIG + 2:
break;
default:
a = ((tu_int)a >> (sd - (LDBL_MANT_DIG+2))) |
((a & ((tu_int)(-1) >> ((N + LDBL_MANT_DIG+2) - sd))) != 0);
a = ((tu_int)a >> (sd - (LDBL_MANT_DIG + 2))) |
((a & ((tu_int)(-1) >> ((N + LDBL_MANT_DIG + 2) - sd))) != 0);
};
/* finish: */
a |= (a & 4) != 0; /* Or P into R */
++a; /* round - this step may add a significant bit */
a >>= 2; /* dump Q and R */
/* a is now rounded to LDBL_MANT_DIG or LDBL_MANT_DIG+1 bits */
if (a & ((tu_int)1 << LDBL_MANT_DIG))
{
if (a & ((tu_int)1 << LDBL_MANT_DIG)) {
a >>= 1;
++e;
}
/* a is now rounded to LDBL_MANT_DIG bits */
}
else
{
} else {
a <<= (LDBL_MANT_DIG - sd);
/* a is now rounded to LDBL_MANT_DIG bits */
}

View File

@ -17,24 +17,30 @@
* du_int is a 64 bit integral type
*/
/* seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm */
/* seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm
* mmmm */
#include "int_lib.h"
#ifndef __SOFT_FP__
/* Support for systems that have hardware floating-point; we'll set the inexact flag
* as a side-effect of this computation.
/* Support for systems that have hardware floating-point; we'll set the inexact
* flag as a side-effect of this computation.
*/
COMPILER_RT_ABI double
__floatundidf(du_int a)
{
COMPILER_RT_ABI double __floatundidf(du_int a) {
static const double twop52 = 4503599627370496.0; // 0x1.0p52
static const double twop84 = 19342813113834066795298816.0; // 0x1.0p84
static const double twop84_plus_twop52 = 19342813118337666422669312.0; // 0x1.00000001p84
static const double twop84_plus_twop52 =
19342813118337666422669312.0; // 0x1.00000001p84
union { uint64_t x; double d; } high = { .d = twop84 };
union { uint64_t x; double d; } low = { .d = twop52 };
union {
uint64_t x;
double d;
} high = {.d = twop84};
union {
uint64_t x;
double d;
} low = {.d = twop52};
high.x |= a >> 32;
low.x |= a & UINT64_C(0x00000000ffffffff);
@ -44,20 +50,18 @@ __floatundidf(du_int a)
}
#else
/* Support for systems that don't have hardware floating-point; there are no flags to
* set, and we don't want to code-gen to an unknown soft-float implementation.
/* Support for systems that don't have hardware floating-point; there are no
* flags to set, and we don't want to code-gen to an unknown soft-float
* implementation.
*/
COMPILER_RT_ABI double
__floatundidf(du_int a)
{
COMPILER_RT_ABI double __floatundidf(du_int a) {
if (a == 0)
return 0.0;
const unsigned N = sizeof(du_int) * CHAR_BIT;
int sd = N - __builtin_clzll(a); /* number of significant digits */
int e = sd - 1; /* exponent */
if (sd > DBL_MANT_DIG)
{
if (sd > DBL_MANT_DIG) {
/* start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
* finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
* 12345678901234567890123456
@ -66,31 +70,27 @@ __floatundidf(du_int a)
* Q = bit DBL_MANT_DIG bits to the right of 1
* R = "or" of all bits to the right of Q
*/
switch (sd)
{
switch (sd) {
case DBL_MANT_DIG + 1:
a <<= 1;
break;
case DBL_MANT_DIG + 2:
break;
default:
a = (a >> (sd - (DBL_MANT_DIG+2))) |
((a & ((du_int)(-1) >> ((N + DBL_MANT_DIG+2) - sd))) != 0);
a = (a >> (sd - (DBL_MANT_DIG + 2))) |
((a & ((du_int)(-1) >> ((N + DBL_MANT_DIG + 2) - sd))) != 0);
};
/* finish: */
a |= (a & 4) != 0; /* Or P into R */
++a; /* round - this step may add a significant bit */
a >>= 2; /* dump Q and R */
/* a is now rounded to DBL_MANT_DIG or DBL_MANT_DIG+1 bits */
if (a & ((du_int)1 << DBL_MANT_DIG))
{
if (a & ((du_int)1 << DBL_MANT_DIG)) {
a >>= 1;
++e;
}
/* a is now rounded to DBL_MANT_DIG bits */
}
else
{
} else {
a <<= (DBL_MANT_DIG - sd);
/* a is now rounded to DBL_MANT_DIG bits */
}
@ -104,9 +104,7 @@ __floatundidf(du_int a)
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI double __aeabi_ul2d(du_int a) {
return __floatundidf(a);
}
AEABI_RTABI double __aeabi_ul2d(du_int a) { return __floatundidf(a); }
#else
AEABI_RTABI double __aeabi_ul2d(du_int a) COMPILER_RT_ALIAS(__floatundidf);
#endif

View File

@ -21,16 +21,13 @@
#include "int_lib.h"
COMPILER_RT_ABI float
__floatundisf(du_int a)
{
COMPILER_RT_ABI float __floatundisf(du_int a) {
if (a == 0)
return 0.0F;
const unsigned N = sizeof(du_int) * CHAR_BIT;
int sd = N - __builtin_clzll(a); /* number of significant digits */
int e = sd - 1; /* 8 exponent */
if (sd > FLT_MANT_DIG)
{
if (sd > FLT_MANT_DIG) {
/* start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
* finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
* 12345678901234567890123456
@ -39,31 +36,27 @@ __floatundisf(du_int a)
* Q = bit FLT_MANT_DIG bits to the right of 1
* R = "or" of all bits to the right of Q
*/
switch (sd)
{
switch (sd) {
case FLT_MANT_DIG + 1:
a <<= 1;
break;
case FLT_MANT_DIG + 2:
break;
default:
a = (a >> (sd - (FLT_MANT_DIG+2))) |
((a & ((du_int)(-1) >> ((N + FLT_MANT_DIG+2) - sd))) != 0);
a = (a >> (sd - (FLT_MANT_DIG + 2))) |
((a & ((du_int)(-1) >> ((N + FLT_MANT_DIG + 2) - sd))) != 0);
};
/* finish: */
a |= (a & 4) != 0; /* Or P into R */
++a; /* round - this step may add a significant bit */
a >>= 2; /* dump Q and R */
/* a is now rounded to FLT_MANT_DIG or FLT_MANT_DIG+1 bits */
if (a & ((du_int)1 << FLT_MANT_DIG))
{
if (a & ((du_int)1 << FLT_MANT_DIG)) {
a >>= 1;
++e;
}
/* a is now rounded to FLT_MANT_DIG bits */
}
else
{
} else {
a <<= (FLT_MANT_DIG - sd);
/* a is now rounded to FLT_MANT_DIG bits */
}
@ -75,9 +68,7 @@ __floatundisf(du_int a)
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI float __aeabi_ul2f(du_int a) {
return __floatundisf(a);
}
AEABI_RTABI float __aeabi_ul2f(du_int a) { return __floatundisf(a); }
#else
AEABI_RTABI float __aeabi_ul2f(du_int a) COMPILER_RT_ALIAS(__floatundisf);
#endif

View File

@ -21,7 +21,8 @@ COMPILER_RT_ABI fp_t __floatunditf(du_int a) {
const int aWidth = sizeof a * CHAR_BIT;
// Handle zero as a special case to protect clz
if (a == 0) return fromRep(0);
if (a == 0)
return fromRep(0);
// Exponent of (fp_t)a is the width of abs(a).
const int exponent = (aWidth - 1) - __builtin_clzll(a);

View File

@ -17,21 +17,20 @@
/* Returns: convert a to a long double, rounding toward even. */
/* Assumption: long double is a IEEE 80 bit floating point type padded to 128 bits
* du_int is a 64 bit integral type
/* Assumption: long double is a IEEE 80 bit floating point type padded to 128
* bits du_int is a 64 bit integral type
*/
/* gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee eeee |
* 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm
/* gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee
* eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
* mmmm mmmm mmmm
*/
COMPILER_RT_ABI long double
__floatundixf(du_int a)
{
COMPILER_RT_ABI long double __floatundixf(du_int a) {
if (a == 0)
return 0.0;
const unsigned N = sizeof(du_int) * CHAR_BIT;
int clz = __builtin_clzll(a);
int e = (N - 1) - clz ; /* exponent */
int e = (N - 1) - clz; /* exponent */
long_double_bits fb;
fb.u.high.s.low = (e + 16383); /* exponent */
fb.u.low.all = a << clz; /* mantissa */

View File

@ -17,13 +17,13 @@
#include "int_lib.h"
COMPILER_RT_ABI fp_t
__floatunsidf(unsigned int a) {
COMPILER_RT_ABI fp_t __floatunsidf(unsigned int a) {
const int aWidth = sizeof a * CHAR_BIT;
// Handle zero as a special case to protect clz
if (a == 0) return fromRep(0);
if (a == 0)
return fromRep(0);
// Exponent of (fp_t)a is the width of abs(a).
const int exponent = (aWidth - 1) - __builtin_clz(a);
@ -40,9 +40,7 @@ __floatunsidf(unsigned int a) {
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI fp_t __aeabi_ui2d(unsigned int a) {
return __floatunsidf(a);
}
AEABI_RTABI fp_t __aeabi_ui2d(unsigned int a) { return __floatunsidf(a); }
#else
AEABI_RTABI fp_t __aeabi_ui2d(unsigned int a) COMPILER_RT_ALIAS(__floatunsidf);
#endif

View File

@ -17,13 +17,13 @@
#include "int_lib.h"
COMPILER_RT_ABI fp_t
__floatunsisf(unsigned int a) {
COMPILER_RT_ABI fp_t __floatunsisf(unsigned int a) {
const int aWidth = sizeof a * CHAR_BIT;
// Handle zero as a special case to protect clz
if (a == 0) return fromRep(0);
if (a == 0)
return fromRep(0);
// Exponent of (fp_t)a is the width of abs(a).
const int exponent = (aWidth - 1) - __builtin_clz(a);
@ -37,8 +37,10 @@ __floatunsisf(unsigned int a) {
const int shift = exponent - significandBits;
result = (rep_t)a >> shift ^ implicitBit;
rep_t round = (rep_t)a << (typeWidth - shift);
if (round > signBit) result++;
if (round == signBit) result += result & 1;
if (round > signBit)
result++;
if (round == signBit)
result += result & 1;
}
// Insert the exponent
@ -48,9 +50,7 @@ __floatunsisf(unsigned int a) {
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI fp_t __aeabi_ui2f(unsigned int a) {
return __floatunsisf(a);
}
AEABI_RTABI fp_t __aeabi_ui2f(unsigned int a) { return __floatunsisf(a); }
#else
AEABI_RTABI fp_t __aeabi_ui2f(unsigned int a) COMPILER_RT_ALIAS(__floatunsisf);
#endif

View File

@ -21,7 +21,8 @@ COMPILER_RT_ABI fp_t __floatunsitf(unsigned int a) {
const int aWidth = sizeof a * CHAR_BIT;
// Handle zero as a special case to protect clz
if (a == 0) return fromRep(0);
if (a == 0)
return fromRep(0);
// Exponent of (fp_t)a is the width of abs(a).
const int exponent = (aWidth - 1) - __builtin_clz(a);

View File

@ -21,18 +21,16 @@
* tu_int is a 128 bit integral type
*/
/* seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm */
/* seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm
* mmmm */
COMPILER_RT_ABI double
__floatuntidf(tu_int a)
{
COMPILER_RT_ABI double __floatuntidf(tu_int a) {
if (a == 0)
return 0.0;
const unsigned N = sizeof(tu_int) * CHAR_BIT;
int sd = N - __clzti2(a); /* number of significant digits */
int e = sd - 1; /* exponent */
if (sd > DBL_MANT_DIG)
{
if (sd > DBL_MANT_DIG) {
/* start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
* finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
* 12345678901234567890123456
@ -41,31 +39,27 @@ __floatuntidf(tu_int a)
* Q = bit DBL_MANT_DIG bits to the right of 1
* R = "or" of all bits to the right of Q
*/
switch (sd)
{
switch (sd) {
case DBL_MANT_DIG + 1:
a <<= 1;
break;
case DBL_MANT_DIG + 2:
break;
default:
a = (a >> (sd - (DBL_MANT_DIG+2))) |
((a & ((tu_int)(-1) >> ((N + DBL_MANT_DIG+2) - sd))) != 0);
a = (a >> (sd - (DBL_MANT_DIG + 2))) |
((a & ((tu_int)(-1) >> ((N + DBL_MANT_DIG + 2) - sd))) != 0);
};
/* finish: */
a |= (a & 4) != 0; /* Or P into R */
++a; /* round - this step may add a significant bit */
a >>= 2; /* dump Q and R */
/* a is now rounded to DBL_MANT_DIG or DBL_MANT_DIG+1 bits */
if (a & ((tu_int)1 << DBL_MANT_DIG))
{
if (a & ((tu_int)1 << DBL_MANT_DIG)) {
a >>= 1;
++e;
}
/* a is now rounded to DBL_MANT_DIG bits */
}
else
{
} else {
a <<= (DBL_MANT_DIG - sd);
/* a is now rounded to DBL_MANT_DIG bits */
}

View File

@ -23,16 +23,13 @@
/* seee eeee emmm mmmm mmmm mmmm mmmm mmmm */
COMPILER_RT_ABI float
__floatuntisf(tu_int a)
{
COMPILER_RT_ABI float __floatuntisf(tu_int a) {
if (a == 0)
return 0.0F;
const unsigned N = sizeof(tu_int) * CHAR_BIT;
int sd = N - __clzti2(a); /* number of significant digits */
int e = sd - 1; /* exponent */
if (sd > FLT_MANT_DIG)
{
if (sd > FLT_MANT_DIG) {
/* start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
* finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
* 12345678901234567890123456
@ -41,31 +38,27 @@ __floatuntisf(tu_int a)
* Q = bit FLT_MANT_DIG bits to the right of 1
* R = "or" of all bits to the right of Q
*/
switch (sd)
{
switch (sd) {
case FLT_MANT_DIG + 1:
a <<= 1;
break;
case FLT_MANT_DIG + 2:
break;
default:
a = (a >> (sd - (FLT_MANT_DIG+2))) |
((a & ((tu_int)(-1) >> ((N + FLT_MANT_DIG+2) - sd))) != 0);
a = (a >> (sd - (FLT_MANT_DIG + 2))) |
((a & ((tu_int)(-1) >> ((N + FLT_MANT_DIG + 2) - sd))) != 0);
};
/* finish: */
a |= (a & 4) != 0; /* Or P into R */
++a; /* round - this step may add a significant bit */
a >>= 2; /* dump Q and R */
/* a is now rounded to FLT_MANT_DIG or FLT_MANT_DIG+1 bits */
if (a & ((tu_int)1 << FLT_MANT_DIG))
{
if (a & ((tu_int)1 << FLT_MANT_DIG)) {
a >>= 1;
++e;
}
/* a is now rounded to FLT_MANT_DIG bits */
}
else
{
} else {
a <<= (FLT_MANT_DIG - sd);
/* a is now rounded to FLT_MANT_DIG bits */
}

View File

@ -22,13 +22,13 @@
* tu_int is a 128 bit integral type
*/
/* seee eeee eeee eeee mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm |
* mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm
/* seee eeee eeee eeee mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm
* mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
* mmmm mmmm mmmm
*/
#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
COMPILER_RT_ABI fp_t
__floatuntitf(tu_int a) {
COMPILER_RT_ABI fp_t __floatuntitf(tu_int a) {
if (a == 0)
return 0.0;
const unsigned N = sizeof(tu_int) * CHAR_BIT;
@ -50,8 +50,8 @@ __floatuntitf(tu_int a) {
case LDBL_MANT_DIG + 2:
break;
default:
a = (a >> (sd - (LDBL_MANT_DIG+2))) |
((a & ((tu_int)(-1) >> ((N + LDBL_MANT_DIG+2) - sd))) != 0);
a = (a >> (sd - (LDBL_MANT_DIG + 2))) |
((a & ((tu_int)(-1) >> ((N + LDBL_MANT_DIG + 2) - sd))) != 0);
};
/* finish: */
a |= (a & 4) != 0; /* Or P into R */

View File

@ -17,24 +17,22 @@
/* Returns: convert a to a long double, rounding toward even. */
/* Assumption: long double is a IEEE 80 bit floating point type padded to 128 bits
* tu_int is a 128 bit integral type
/* Assumption: long double is a IEEE 80 bit floating point type padded to 128
* bits tu_int is a 128 bit integral type
*/
/* gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee eeee |
* 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm
/* gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee
* eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
* mmmm mmmm mmmm
*/
COMPILER_RT_ABI long double
__floatuntixf(tu_int a)
{
COMPILER_RT_ABI long double __floatuntixf(tu_int a) {
if (a == 0)
return 0.0;
const unsigned N = sizeof(tu_int) * CHAR_BIT;
int sd = N - __clzti2(a); /* number of significant digits */
int e = sd - 1; /* exponent */
if (sd > LDBL_MANT_DIG)
{
if (sd > LDBL_MANT_DIG) {
/* start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
* finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
* 12345678901234567890123456
@ -43,31 +41,27 @@ __floatuntixf(tu_int a)
* Q = bit LDBL_MANT_DIG bits to the right of 1
* R = "or" of all bits to the right of Q
*/
switch (sd)
{
switch (sd) {
case LDBL_MANT_DIG + 1:
a <<= 1;
break;
case LDBL_MANT_DIG + 2:
break;
default:
a = (a >> (sd - (LDBL_MANT_DIG+2))) |
((a & ((tu_int)(-1) >> ((N + LDBL_MANT_DIG+2) - sd))) != 0);
a = (a >> (sd - (LDBL_MANT_DIG + 2))) |
((a & ((tu_int)(-1) >> ((N + LDBL_MANT_DIG + 2) - sd))) != 0);
};
/* finish: */
a |= (a & 4) != 0; /* Or P into R */
++a; /* round - this step may add a significant bit */
a >>= 2; /* dump Q and R */
/* a is now rounded to LDBL_MANT_DIG or LDBL_MANT_DIG+1 bits */
if (a & ((tu_int)1 << LDBL_MANT_DIG))
{
if (a & ((tu_int)1 << LDBL_MANT_DIG)) {
a >>= 1;
++e;
}
/* a is now rounded to LDBL_MANT_DIG bits */
}
else
{
} else {
a <<= (LDBL_MANT_DIG - sd);
/* a is now rounded to LDBL_MANT_DIG bits */
}

View File

@ -23,29 +23,37 @@ static __inline fp_t __addXf3__(fp_t a, fp_t b) {
if (aAbs - REP_C(1) >= infRep - REP_C(1) ||
bAbs - REP_C(1) >= infRep - REP_C(1)) {
// NaN + anything = qNaN
if (aAbs > infRep) return fromRep(toRep(a) | quietBit);
if (aAbs > infRep)
return fromRep(toRep(a) | quietBit);
// anything + NaN = qNaN
if (bAbs > infRep) return fromRep(toRep(b) | quietBit);
if (bAbs > infRep)
return fromRep(toRep(b) | quietBit);
if (aAbs == infRep) {
// +/-infinity + -/+infinity = qNaN
if ((toRep(a) ^ toRep(b)) == signBit) return fromRep(qnanRep);
if ((toRep(a) ^ toRep(b)) == signBit)
return fromRep(qnanRep);
// +/-infinity + anything remaining = +/- infinity
else return a;
else
return a;
}
// anything remaining + +/-infinity = +/-infinity
if (bAbs == infRep) return b;
if (bAbs == infRep)
return b;
// zero + anything = anything
if (!aAbs) {
// but we need to get the sign right for zero + zero
if (!bAbs) return fromRep(toRep(a) & toRep(b));
else return b;
if (!bAbs)
return fromRep(toRep(a) & toRep(b));
else
return b;
}
// anything + zero = anything
if (!bAbs) return a;
if (!bAbs)
return a;
}
// Swap a and b if necessary so that a has the larger absolute value.
@ -62,8 +70,10 @@ static __inline fp_t __addXf3__(fp_t a, fp_t b) {
rep_t bSignificand = bRep & significandMask;
// Normalize any denormals, and adjust the exponent accordingly.
if (aExponent == 0) aExponent = normalize(&aSignificand);
if (bExponent == 0) bExponent = normalize(&bSignificand);
if (aExponent == 0)
aExponent = normalize(&aSignificand);
if (bExponent == 0)
bExponent = normalize(&bSignificand);
// The sign of the result is the sign of the larger operand, a. If they
// have opposite signs, we are performing a subtraction; otherwise addition.
@ -91,7 +101,8 @@ static __inline fp_t __addXf3__(fp_t a, fp_t b) {
if (subtraction) {
aSignificand -= bSignificand;
// If a == -b, return +zero.
if (aSignificand == 0) return fromRep(0);
if (aSignificand == 0)
return fromRep(0);
// If partial cancellation occured, we need to left-shift the result
// and adjust the exponent:
@ -100,8 +111,7 @@ static __inline fp_t __addXf3__(fp_t a, fp_t b) {
aSignificand <<= shift;
aExponent -= shift;
}
}
else /* addition */ {
} else /* addition */ {
aSignificand += bSignificand;
// If the addition carried up, we need to right-shift the result and
@ -114,7 +124,8 @@ static __inline fp_t __addXf3__(fp_t a, fp_t b) {
}
// If we have overflowed the type, return +/- infinity:
if (aExponent >= maxExponent) return fromRep(infRep | resultSign);
if (aExponent >= maxExponent)
return fromRep(infRep | resultSign);
if (aExponent <= 0) {
// Result is denormal before rounding; the exponent is zero and we
@ -137,7 +148,9 @@ static __inline fp_t __addXf3__(fp_t a, fp_t b) {
// Final rounding. The result may overflow to infinity, but that is the
// correct result in that case.
if (roundGuardSticky > 0x4) result++;
if (roundGuardSticky == 0x4) result += result & 1;
if (roundGuardSticky > 0x4)
result++;
if (roundGuardSticky == 0x4)
result += result & 1;
return fromRep(result);
}

View File

@ -1,4 +1,5 @@
//===-lib/fp_extend.h - low precision -> high precision conversion -*- C -*-===//
//===-lib/fp_extend.h - low precision -> high precision conversion -*- C
//-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@ -47,7 +48,7 @@ static const int srcSigBits = 10;
#else
#error Source should be half, single, or double precision!
#endif //end source precision
#endif // end source precision
#if defined DST_SINGLE
typedef float dst_t;
@ -69,20 +70,26 @@ static const int dstSigBits = 112;
#else
#error Destination should be single, double, or quad precision!
#endif //end destination precision
#endif // end destination precision
// End of specialization parameters. Two helper routines for conversion to and
// from the representation of floating-point data as integer values follow.
static __inline src_rep_t srcToRep(src_t x) {
const union { src_t f; src_rep_t i; } rep = {.f = x};
const union {
src_t f;
src_rep_t i;
} rep = {.f = x};
return rep.i;
}
static __inline dst_t dstFromRep(dst_rep_t x) {
const union { dst_t f; dst_rep_t i; } rep = {.i = x};
const union {
dst_t f;
dst_rep_t i;
} rep = {.i = x};
return rep.f;
}
// End helper routines. Conversion implementation follows.
#endif //FP_EXTEND_HEADER
#endif // FP_EXTEND_HEADER

Some files were not shown because too many files have changed in this diff Show More