forked from OSchip/llvm-project
[COFF, ARM64] Implement InterlockedOr*_* builtins
This is fourth in a series of patches to move intrinsic definitions out of intrin.h. llvm-svn: 346190
This commit is contained in:
parent
6b880689f0
commit
ec62b31e2c
|
@ -143,6 +143,19 @@ TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_acq, "LLiLLiD*LLiLLi", "nh",
|
|||
TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_nf, "LLiLLiD*LLiLLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_rel, "LLiLLiD*LLiLLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
|
||||
TARGET_HEADER_BUILTIN(_InterlockedOr8_acq, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedOr8_nf, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedOr8_rel, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedOr16_acq, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedOr16_nf, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedOr16_rel, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedOr_acq, "LiLiD*Li", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedOr_nf, "LiLiD*Li", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedOr_rel, "LiLiD*Li", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedOr64_acq, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedOr64_nf, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedOr64_rel, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
|
||||
TARGET_HEADER_BUILTIN(_ReadWriteBarrier, "v", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(__getReg, "ULLii", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_ReadStatusReg, "ii", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
|
|
|
@ -269,6 +269,19 @@ TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_acq, "LLiLLiD*LLiLLi", "nh",
|
|||
TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_nf, "LLiLLiD*LLiLLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_rel, "LLiLLiD*LLiLLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
|
||||
TARGET_HEADER_BUILTIN(_InterlockedOr8_acq, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedOr8_nf, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedOr8_rel, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedOr16_acq, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedOr16_nf, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedOr16_rel, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedOr_acq, "LiLiD*Li", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedOr_nf, "LiLiD*Li", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedOr_rel, "LiLiD*Li", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedOr64_acq, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedOr64_nf, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedOr64_rel, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
|
||||
#undef BUILTIN
|
||||
#undef LANGBUILTIN
|
||||
#undef TARGET_HEADER_BUILTIN
|
||||
|
|
|
@ -799,6 +799,9 @@ enum class CodeGenFunction::MSVCIntrin {
|
|||
_InterlockedCompareExchange_acq,
|
||||
_InterlockedCompareExchange_rel,
|
||||
_InterlockedCompareExchange_nf,
|
||||
_InterlockedOr_acq,
|
||||
_InterlockedOr_rel,
|
||||
_InterlockedOr_nf,
|
||||
__fastfail,
|
||||
};
|
||||
|
||||
|
@ -889,6 +892,15 @@ Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
|
|||
return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
|
||||
case MSVCIntrin::_InterlockedCompareExchange_nf:
|
||||
return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
|
||||
case MSVCIntrin::_InterlockedOr_acq:
|
||||
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
|
||||
AtomicOrdering::Acquire);
|
||||
case MSVCIntrin::_InterlockedOr_rel:
|
||||
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
|
||||
AtomicOrdering::Release);
|
||||
case MSVCIntrin::_InterlockedOr_nf:
|
||||
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
|
||||
AtomicOrdering::Monotonic);
|
||||
|
||||
case MSVCIntrin::_InterlockedDecrement: {
|
||||
llvm::Type *IntTy = ConvertType(E->getType());
|
||||
|
@ -6217,6 +6229,21 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
|
|||
case ARM::BI_InterlockedCompareExchange_nf:
|
||||
case ARM::BI_InterlockedCompareExchange64_nf:
|
||||
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
|
||||
case ARM::BI_InterlockedOr8_acq:
|
||||
case ARM::BI_InterlockedOr16_acq:
|
||||
case ARM::BI_InterlockedOr_acq:
|
||||
case ARM::BI_InterlockedOr64_acq:
|
||||
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E);
|
||||
case ARM::BI_InterlockedOr8_rel:
|
||||
case ARM::BI_InterlockedOr16_rel:
|
||||
case ARM::BI_InterlockedOr_rel:
|
||||
case ARM::BI_InterlockedOr64_rel:
|
||||
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E);
|
||||
case ARM::BI_InterlockedOr8_nf:
|
||||
case ARM::BI_InterlockedOr16_nf:
|
||||
case ARM::BI_InterlockedOr_nf:
|
||||
case ARM::BI_InterlockedOr64_nf:
|
||||
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
|
||||
}
|
||||
|
||||
// Get the last argument, which specifies the vector type.
|
||||
|
@ -8748,6 +8775,21 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
|
|||
case AArch64::BI_InterlockedCompareExchange_nf:
|
||||
case AArch64::BI_InterlockedCompareExchange64_nf:
|
||||
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
|
||||
case AArch64::BI_InterlockedOr8_acq:
|
||||
case AArch64::BI_InterlockedOr16_acq:
|
||||
case AArch64::BI_InterlockedOr_acq:
|
||||
case AArch64::BI_InterlockedOr64_acq:
|
||||
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E);
|
||||
case AArch64::BI_InterlockedOr8_rel:
|
||||
case AArch64::BI_InterlockedOr16_rel:
|
||||
case AArch64::BI_InterlockedOr_rel:
|
||||
case AArch64::BI_InterlockedOr64_rel:
|
||||
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E);
|
||||
case AArch64::BI_InterlockedOr8_nf:
|
||||
case AArch64::BI_InterlockedOr16_nf:
|
||||
case AArch64::BI_InterlockedOr_nf:
|
||||
case AArch64::BI_InterlockedOr64_nf:
|
||||
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
|
||||
|
||||
case AArch64::BI_InterlockedAdd: {
|
||||
Value *Arg0 = EmitScalarExpr(E->getArg(0));
|
||||
|
|
|
@ -498,54 +498,18 @@ unsigned char _interlockedbittestandreset_rel(long volatile *_BitBase,
|
|||
|* Interlocked Or
|
||||
\*----------------------------------------------------------------------------*/
|
||||
#if defined(__arm__) || defined(__aarch64__)
|
||||
static __inline__ char __DEFAULT_FN_ATTRS
|
||||
_InterlockedOr8_acq(char volatile *_Value, char _Mask) {
|
||||
return __atomic_fetch_or(_Value, _Mask, __ATOMIC_ACQUIRE);
|
||||
}
|
||||
static __inline__ char __DEFAULT_FN_ATTRS
|
||||
_InterlockedOr8_nf(char volatile *_Value, char _Mask) {
|
||||
return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELAXED);
|
||||
}
|
||||
static __inline__ char __DEFAULT_FN_ATTRS
|
||||
_InterlockedOr8_rel(char volatile *_Value, char _Mask) {
|
||||
return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELEASE);
|
||||
}
|
||||
static __inline__ short __DEFAULT_FN_ATTRS
|
||||
_InterlockedOr16_acq(short volatile *_Value, short _Mask) {
|
||||
return __atomic_fetch_or(_Value, _Mask, __ATOMIC_ACQUIRE);
|
||||
}
|
||||
static __inline__ short __DEFAULT_FN_ATTRS
|
||||
_InterlockedOr16_nf(short volatile *_Value, short _Mask) {
|
||||
return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELAXED);
|
||||
}
|
||||
static __inline__ short __DEFAULT_FN_ATTRS
|
||||
_InterlockedOr16_rel(short volatile *_Value, short _Mask) {
|
||||
return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELEASE);
|
||||
}
|
||||
static __inline__ long __DEFAULT_FN_ATTRS
|
||||
_InterlockedOr_acq(long volatile *_Value, long _Mask) {
|
||||
return __atomic_fetch_or(_Value, _Mask, __ATOMIC_ACQUIRE);
|
||||
}
|
||||
static __inline__ long __DEFAULT_FN_ATTRS
|
||||
_InterlockedOr_nf(long volatile *_Value, long _Mask) {
|
||||
return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELAXED);
|
||||
}
|
||||
static __inline__ long __DEFAULT_FN_ATTRS
|
||||
_InterlockedOr_rel(long volatile *_Value, long _Mask) {
|
||||
return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELEASE);
|
||||
}
|
||||
static __inline__ __int64 __DEFAULT_FN_ATTRS
|
||||
_InterlockedOr64_acq(__int64 volatile *_Value, __int64 _Mask) {
|
||||
return __atomic_fetch_or(_Value, _Mask, __ATOMIC_ACQUIRE);
|
||||
}
|
||||
static __inline__ __int64 __DEFAULT_FN_ATTRS
|
||||
_InterlockedOr64_nf(__int64 volatile *_Value, __int64 _Mask) {
|
||||
return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELAXED);
|
||||
}
|
||||
static __inline__ __int64 __DEFAULT_FN_ATTRS
|
||||
_InterlockedOr64_rel(__int64 volatile *_Value, __int64 _Mask) {
|
||||
return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELEASE);
|
||||
}
|
||||
char _InterlockedOr8_acq(char volatile *_Value, char _Mask);
|
||||
char _InterlockedOr8_nf(char volatile *_Value, char _Mask);
|
||||
char _InterlockedOr8_rel(char volatile *_Value, char _Mask);
|
||||
short _InterlockedOr16_acq(short volatile *_Value, short _Mask);
|
||||
short _InterlockedOr16_nf(short volatile *_Value, short _Mask);
|
||||
short _InterlockedOr16_rel(short volatile *_Value, short _Mask);
|
||||
long _InterlockedOr_acq(long volatile *_Value, long _Mask);
|
||||
long _InterlockedOr_nf(long volatile *_Value, long _Mask);
|
||||
long _InterlockedOr_rel(long volatile *_Value, long _Mask);
|
||||
__int64 _InterlockedOr64_acq(__int64 volatile *_Value, __int64 _Mask);
|
||||
__int64 _InterlockedOr64_nf(__int64 volatile *_Value, __int64 _Mask);
|
||||
__int64 _InterlockedOr64_rel(__int64 volatile *_Value, __int64 _Mask);
|
||||
#endif
|
||||
/*----------------------------------------------------------------------------*\
|
||||
|* Interlocked Xor
|
||||
|
|
|
@ -890,6 +890,102 @@ __int64 test_InterlockedCompareExchange64_nf(__int64 volatile *Destination, __in
|
|||
// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0
|
||||
// CHECK-ARM-ARM64: ret i64 [[RESULT]]
|
||||
// CHECK-ARM-ARM64: }
|
||||
|
||||
char test_InterlockedOr8_acq(char volatile *value, char mask) {
|
||||
return _InterlockedOr8_acq(value, mask);
|
||||
}
|
||||
// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_acq(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
|
||||
// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i8* %value, i8 %mask acquire
|
||||
// CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
|
||||
// CHECK-ARM-ARM64: }
|
||||
|
||||
char test_InterlockedOr8_rel(char volatile *value, char mask) {
|
||||
return _InterlockedOr8_rel(value, mask);
|
||||
}
|
||||
// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_rel(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
|
||||
// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i8* %value, i8 %mask release
|
||||
// CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
|
||||
// CHECK-ARM-ARM64: }
|
||||
|
||||
char test_InterlockedOr8_nf(char volatile *value, char mask) {
|
||||
return _InterlockedOr8_nf(value, mask);
|
||||
}
|
||||
// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_nf(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
|
||||
// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i8* %value, i8 %mask monotonic
|
||||
// CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
|
||||
// CHECK-ARM-ARM64: }
|
||||
|
||||
short test_InterlockedOr16_acq(short volatile *value, short mask) {
|
||||
return _InterlockedOr16_acq(value, mask);
|
||||
}
|
||||
// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_acq(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
|
||||
// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i16* %value, i16 %mask acquire
|
||||
// CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
|
||||
// CHECK-ARM-ARM64: }
|
||||
|
||||
short test_InterlockedOr16_rel(short volatile *value, short mask) {
|
||||
return _InterlockedOr16_rel(value, mask);
|
||||
}
|
||||
// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_rel(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
|
||||
// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i16* %value, i16 %mask release
|
||||
// CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
|
||||
// CHECK-ARM-ARM64: }
|
||||
|
||||
short test_InterlockedOr16_nf(short volatile *value, short mask) {
|
||||
return _InterlockedOr16_nf(value, mask);
|
||||
}
|
||||
// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_nf(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
|
||||
// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i16* %value, i16 %mask monotonic
|
||||
// CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
|
||||
// CHECK-ARM-ARM64: }
|
||||
|
||||
long test_InterlockedOr_acq(long volatile *value, long mask) {
|
||||
return _InterlockedOr_acq(value, mask);
|
||||
}
|
||||
// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
|
||||
// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask acquire
|
||||
// CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
|
||||
// CHECK-ARM-ARM64: }
|
||||
|
||||
long test_InterlockedOr_rel(long volatile *value, long mask) {
|
||||
return _InterlockedOr_rel(value, mask);
|
||||
}
|
||||
// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
|
||||
// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask release
|
||||
// CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
|
||||
// CHECK-ARM-ARM64: }
|
||||
|
||||
long test_InterlockedOr_nf(long volatile *value, long mask) {
|
||||
return _InterlockedOr_nf(value, mask);
|
||||
}
|
||||
// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
|
||||
// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask monotonic
|
||||
// CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
|
||||
// CHECK-ARM-ARM64: }
|
||||
|
||||
__int64 test_InterlockedOr64_acq(__int64 volatile *value, __int64 mask) {
|
||||
return _InterlockedOr64_acq(value, mask);
|
||||
}
|
||||
// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_acq(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
|
||||
// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i64* %value, i64 %mask acquire
|
||||
// CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
|
||||
// CHECK-ARM-ARM64: }
|
||||
|
||||
__int64 test_InterlockedOr64_rel(__int64 volatile *value, __int64 mask) {
|
||||
return _InterlockedOr64_rel(value, mask);
|
||||
}
|
||||
// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_rel(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
|
||||
// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i64* %value, i64 %mask release
|
||||
// CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
|
||||
// CHECK-ARM-ARM64: }
|
||||
|
||||
__int64 test_InterlockedOr64_nf(__int64 volatile *value, __int64 mask) {
|
||||
return _InterlockedOr64_nf(value, mask);
|
||||
}
|
||||
// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_nf(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
|
||||
// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i64* %value, i64 %mask monotonic
|
||||
// CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
|
||||
// CHECK-ARM-ARM64: }
|
||||
#endif
|
||||
|
||||
#if !defined(__aarch64__)
|
||||
|
|
Loading…
Reference in New Issue