[clang][ARM][NFC] Clean up signed conversion and undefined macros in builtin header

These warnings were identified while debugging modules with Wsystem-headers.

Differential Revision: https://reviews.llvm.org/D132003
This commit is contained in:
Dominic Chen 2022-08-16 16:41:26 -07:00
parent 5f3a8b585b
commit ac77b3fde1
1 changed files with 28 additions and 28 deletions

View File

@ -64,7 +64,7 @@ static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(v
}
#endif
#if __ARM_32BIT_STATE
#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
#define __dbg(t) __builtin_arm_dbg(t)
#endif
@ -82,7 +82,7 @@ __swp(uint32_t __x, volatile uint32_t *__p) {
/* 8.6.1 Data prefetch */
#define __pld(addr) __pldx(0, 0, 0, addr)
#if __ARM_32BIT_STATE
#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
#define __pldx(access_kind, cache_level, retention_policy, addr) \
__builtin_arm_prefetch(addr, access_kind, 1)
#else
@ -93,7 +93,7 @@ __swp(uint32_t __x, volatile uint32_t *__p) {
/* 8.6.2 Instruction prefetch */
#define __pli(addr) __plix(0, 0, addr)
#if __ARM_32BIT_STATE
#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
#define __plix(cache_level, retention_policy, addr) \
__builtin_arm_prefetch(addr, 0, 0)
#else
@ -140,17 +140,17 @@ __rorl(unsigned long __x, uint32_t __y) {
/* CLZ */
static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
__clz(uint32_t __t) {
return __builtin_clz(__t);
return (uint32_t)__builtin_clz(__t);
}
static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
__clzl(unsigned long __t) {
return __builtin_clzl(__t);
return (unsigned long)__builtin_clzl(__t);
}
static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
__clzll(uint64_t __t) {
return __builtin_clzll(__t);
return (uint64_t)__builtin_clzll(__t);
}
/* CLS */
@ -201,7 +201,7 @@ __rev16(uint32_t __t) {
static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
__rev16ll(uint64_t __t) {
return (((uint64_t)__rev16(__t >> 32)) << 32) | __rev16(__t);
return (((uint64_t)__rev16(__t >> 32)) << 32) | (uint64_t)__rev16((uint32_t)__t);
}
static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
@ -216,7 +216,7 @@ __rev16l(unsigned long __t) {
/* REVSH */
static __inline__ int16_t __attribute__((__always_inline__, __nodebug__))
__revsh(int16_t __t) {
return __builtin_bswap16(__t);
return (int16_t)__builtin_bswap16((uint16_t)__t);
}
/* RBIT */
@ -227,7 +227,7 @@ __rbit(uint32_t __t) {
static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
__rbitll(uint64_t __t) {
#if __ARM_32BIT_STATE
#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
return (((uint64_t)__builtin_arm_rbit(__t)) << 32) |
__builtin_arm_rbit(__t >> 32);
#else
@ -247,7 +247,7 @@ __rbitl(unsigned long __t) {
/*
* 9.3 16-bit multiplications
*/
#if __ARM_FEATURE_DSP
#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP
static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
__smulbb(int32_t __a, int32_t __b) {
return __builtin_arm_smulbb(__a, __b);
@ -281,13 +281,13 @@ __smulwt(int32_t __a, int32_t __b) {
* intrinsics are implemented and the flag is enabled.
*/
/* 9.4.1 Width-specified saturation intrinsics */
#if __ARM_FEATURE_SAT
#if defined(__ARM_FEATURE_SAT) && __ARM_FEATURE_SAT
#define __ssat(x, y) __builtin_arm_ssat(x, y)
#define __usat(x, y) __builtin_arm_usat(x, y)
#endif
/* 9.4.2 Saturating addition and subtraction intrinsics */
#if __ARM_FEATURE_DSP
#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP
static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
__qadd(int32_t __t, int32_t __v) {
return __builtin_arm_qadd(__t, __v);
@ -305,7 +305,7 @@ __qdbl(int32_t __t) {
#endif
/* 9.4.3 Accumultating multiplications */
#if __ARM_FEATURE_DSP
#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP
static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
__smlabb(int32_t __a, int32_t __b, int32_t __c) {
return __builtin_arm_smlabb(__a, __b, __c);
@ -334,13 +334,13 @@ __smlawt(int32_t __a, int32_t __b, int32_t __c) {
/* 9.5.4 Parallel 16-bit saturation */
#if __ARM_FEATURE_SIMD32
#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
#define __ssat16(x, y) __builtin_arm_ssat16(x, y)
#define __usat16(x, y) __builtin_arm_usat16(x, y)
#endif
/* 9.5.5 Packing and unpacking */
#if __ARM_FEATURE_SIMD32
#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
typedef int32_t int8x4_t;
typedef int32_t int16x2_t;
typedef uint32_t uint8x4_t;
@ -365,7 +365,7 @@ __uxtb16(int8x4_t __a) {
#endif
/* 9.5.6 Parallel selection */
#if __ARM_FEATURE_SIMD32
#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
__sel(uint8x4_t __a, uint8x4_t __b) {
return __builtin_arm_sel(__a, __b);
@ -373,7 +373,7 @@ __sel(uint8x4_t __a, uint8x4_t __b) {
#endif
/* 9.5.7 Parallel 8-bit addition and subtraction */
#if __ARM_FEATURE_SIMD32
#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
__qadd8(int8x4_t __a, int8x4_t __b) {
return __builtin_arm_qadd8(__a, __b);
@ -425,7 +425,7 @@ __usub8(uint8x4_t __a, uint8x4_t __b) {
#endif
/* 9.5.8 Sum of 8-bit absolute differences */
#if __ARM_FEATURE_SIMD32
#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
__usad8(uint8x4_t __a, uint8x4_t __b) {
return __builtin_arm_usad8(__a, __b);
@ -437,7 +437,7 @@ __usada8(uint8x4_t __a, uint8x4_t __b, uint32_t __c) {
#endif
/* 9.5.9 Parallel 16-bit addition and subtraction */
#if __ARM_FEATURE_SIMD32
#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
__qadd16(int16x2_t __a, int16x2_t __b) {
return __builtin_arm_qadd16(__a, __b);
@ -537,7 +537,7 @@ __usub16(uint16x2_t __a, uint16x2_t __b) {
#endif
/* 9.5.10 Parallel 16-bit multiplications */
#if __ARM_FEATURE_SIMD32
#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
__smlad(int16x2_t __a, int16x2_t __b, int32_t __c) {
return __builtin_arm_smlad(__a, __b, __c);
@ -589,7 +589,7 @@ __smusdx(int16x2_t __a, int16x2_t __b) {
#endif
/* 9.7 CRC32 intrinsics */
#if __ARM_FEATURE_CRC32
#if defined(__ARM_FEATURE_CRC32) && __ARM_FEATURE_CRC32
static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
__crc32b(uint32_t __a, uint8_t __b) {
return __builtin_arm_crc32b(__a, __b);
@ -632,7 +632,7 @@ __crc32cd(uint32_t __a, uint64_t __b) {
#endif
/* Armv8.3-A Javascript conversion intrinsic */
#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_JCVT)
#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE && defined(__ARM_FEATURE_JCVT)
static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
__jcvt(double __a) {
return __builtin_arm_jcvt(__a);
@ -640,7 +640,7 @@ __jcvt(double __a) {
#endif
/* Armv8.5-A FP rounding intrinsics */
#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_FRINT)
#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE && defined(__ARM_FEATURE_FRINT)
static __inline__ float __attribute__((__always_inline__, __nodebug__))
__frint32zf(float __a) {
return __builtin_arm_frint32zf(__a);
@ -683,7 +683,7 @@ __frint64x(double __a) {
#endif
/* Armv8.7-A load/store 64-byte intrinsics */
#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_LS64)
#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE && defined(__ARM_FEATURE_LS64)
typedef struct {
uint64_t val[8];
} data512_t;
@ -721,7 +721,7 @@ __arm_st64bv0(void *__addr, data512_t __value) {
#define __arm_wsrf64(sysreg, v) __arm_wsr64(sysreg, __builtin_bit_cast(uint64_t, v))
/* Memory Tagging Extensions (MTE) Intrinsics */
#if __ARM_FEATURE_MEMORY_TAGGING
#if defined(__ARM_FEATURE_MEMORY_TAGGING) && __ARM_FEATURE_MEMORY_TAGGING
#define __arm_mte_create_random_tag(__ptr, __mask) __builtin_arm_irg(__ptr, __mask)
#define __arm_mte_increment_tag(__ptr, __tag_offset) __builtin_arm_addg(__ptr, __tag_offset)
#define __arm_mte_exclude_tag(__ptr, __excluded) __builtin_arm_gmi(__ptr, __excluded)
@ -731,13 +731,13 @@ __arm_st64bv0(void *__addr, data512_t __value) {
#endif
/* Memory Operations Intrinsics */
#if __ARM_FEATURE_MOPS && __ARM_FEATURE_MEMORY_TAGGING
#if defined(__ARM_FEATURE_MOPS) && __ARM_FEATURE_MOPS && defined(__ARM_FEATURE_MEMORY_TAGGING) && __ARM_FEATURE_MEMORY_TAGGING
#define __arm_mops_memset_tag(__tagged_address, __value, __size) \
__builtin_arm_mops_memset_tag(__tagged_address, __value, __size)
#endif
/* Transactional Memory Extension (TME) Intrinsics */
#if __ARM_FEATURE_TME
#if defined(__ARM_FEATURE_TME) && __ARM_FEATURE_TME
#define _TMFAILURE_REASON 0x00007fffu
#define _TMFAILURE_RTRY 0x00008000u
@ -759,7 +759,7 @@ __arm_st64bv0(void *__addr, data512_t __value) {
#endif /* __ARM_FEATURE_TME */
/* Armv8.5-A Random number generation intrinsics */
#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_RNG)
#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE && defined(__ARM_FEATURE_RNG)
static __inline__ int __attribute__((__always_inline__, __nodebug__))
__rndr(uint64_t *__p) {
return __builtin_arm_rndr(__p);