forked from OSchip/llvm-project
[COFF, ARM64] Implement InterlockedIncrement*_* builtins
This is seventh in a series of patches to move intrinsic definitions out of intrin.h. Differential: https://reviews.llvm.org/D54067 llvm-svn: 346207
This commit is contained in:
parent
c89157b5c1
commit
fdf74d9751
|
@ -182,6 +182,16 @@ TARGET_HEADER_BUILTIN(_InterlockedAnd64_acq, "LLiLLiD*LLi", "nh", "intrin.h", AL
|
|||
TARGET_HEADER_BUILTIN(_InterlockedAnd64_nf, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedAnd64_rel, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
|
||||
TARGET_HEADER_BUILTIN(_InterlockedIncrement16_acq, "ssD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedIncrement16_nf, "ssD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedIncrement16_rel, "ssD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedIncrement_acq, "LiLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedIncrement_nf, "LiLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedIncrement_rel, "LiLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedIncrement64_acq, "LLiLLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedIncrement64_nf, "LLiLLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedIncrement64_rel, "LLiLLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
|
||||
TARGET_HEADER_BUILTIN(_ReadWriteBarrier, "v", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(__getReg, "ULLii", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_ReadStatusReg, "ii", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
|
|
|
@ -308,6 +308,16 @@ TARGET_HEADER_BUILTIN(_InterlockedAnd64_acq, "LLiLLiD*LLi", "nh", "intrin.h", AL
|
|||
TARGET_HEADER_BUILTIN(_InterlockedAnd64_nf, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedAnd64_rel, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
|
||||
TARGET_HEADER_BUILTIN(_InterlockedIncrement16_acq, "ssD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedIncrement16_nf, "ssD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedIncrement16_rel, "ssD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedIncrement_acq, "LiLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedIncrement_nf, "LiLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedIncrement_rel, "LiLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedIncrement64_acq, "LLiLLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedIncrement64_nf, "LLiLLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_InterlockedIncrement64_rel, "LLiLLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
|
||||
#undef BUILTIN
|
||||
#undef LANGBUILTIN
|
||||
#undef TARGET_HEADER_BUILTIN
|
||||
|
|
|
@ -272,6 +272,19 @@ Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
|
|||
return CGF.Builder.CreateExtractValue(Result, 0);
|
||||
}
|
||||
|
||||
static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E,
|
||||
AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
|
||||
assert(E->getArg(0)->getType()->isPointerType());
|
||||
|
||||
auto *IntTy = CGF.ConvertType(E->getType());
|
||||
auto *Result = CGF.Builder.CreateAtomicRMW(
|
||||
AtomicRMWInst::Add,
|
||||
CGF.EmitScalarExpr(E->getArg(0)),
|
||||
ConstantInt::get(IntTy, 1),
|
||||
Ordering);
|
||||
return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
|
||||
}
|
||||
|
||||
// Emit a simple mangled intrinsic that has 1 argument and a return type
|
||||
// matching the argument type.
|
||||
static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
|
||||
|
@ -808,6 +821,9 @@ enum class CodeGenFunction::MSVCIntrin {
|
|||
_InterlockedAnd_acq,
|
||||
_InterlockedAnd_rel,
|
||||
_InterlockedAnd_nf,
|
||||
_InterlockedIncrement_acq,
|
||||
_InterlockedIncrement_rel,
|
||||
_InterlockedIncrement_nf,
|
||||
__fastfail,
|
||||
};
|
||||
|
||||
|
@ -925,6 +941,12 @@ Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
|
|||
case MSVCIntrin::_InterlockedAnd_nf:
|
||||
return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
|
||||
AtomicOrdering::Monotonic);
|
||||
case MSVCIntrin::_InterlockedIncrement_acq:
|
||||
return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
|
||||
case MSVCIntrin::_InterlockedIncrement_rel:
|
||||
return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
|
||||
case MSVCIntrin::_InterlockedIncrement_nf:
|
||||
return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
|
||||
|
||||
case MSVCIntrin::_InterlockedDecrement: {
|
||||
llvm::Type *IntTy = ConvertType(E->getType());
|
||||
|
@ -935,15 +957,8 @@ Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
|
|||
llvm::AtomicOrdering::SequentiallyConsistent);
|
||||
return Builder.CreateSub(RMWI, ConstantInt::get(IntTy, 1));
|
||||
}
|
||||
case MSVCIntrin::_InterlockedIncrement: {
|
||||
llvm::Type *IntTy = ConvertType(E->getType());
|
||||
AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
|
||||
AtomicRMWInst::Add,
|
||||
EmitScalarExpr(E->getArg(0)),
|
||||
ConstantInt::get(IntTy, 1),
|
||||
llvm::AtomicOrdering::SequentiallyConsistent);
|
||||
return Builder.CreateAdd(RMWI, ConstantInt::get(IntTy, 1));
|
||||
}
|
||||
case MSVCIntrin::_InterlockedIncrement:
|
||||
return EmitAtomicIncrementValue(*this, E);
|
||||
|
||||
case MSVCIntrin::__fastfail: {
|
||||
// Request immediate process termination from the kernel. The instruction
|
||||
|
@ -6298,6 +6313,18 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
|
|||
case ARM::BI_InterlockedAnd_nf:
|
||||
case ARM::BI_InterlockedAnd64_nf:
|
||||
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E);
|
||||
case ARM::BI_InterlockedIncrement16_acq:
|
||||
case ARM::BI_InterlockedIncrement_acq:
|
||||
case ARM::BI_InterlockedIncrement64_acq:
|
||||
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E);
|
||||
case ARM::BI_InterlockedIncrement16_rel:
|
||||
case ARM::BI_InterlockedIncrement_rel:
|
||||
case ARM::BI_InterlockedIncrement64_rel:
|
||||
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E);
|
||||
case ARM::BI_InterlockedIncrement16_nf:
|
||||
case ARM::BI_InterlockedIncrement_nf:
|
||||
case ARM::BI_InterlockedIncrement64_nf:
|
||||
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E);
|
||||
}
|
||||
|
||||
// Get the last argument, which specifies the vector type.
|
||||
|
@ -8874,6 +8901,18 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
|
|||
case AArch64::BI_InterlockedAnd_nf:
|
||||
case AArch64::BI_InterlockedAnd64_nf:
|
||||
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E);
|
||||
case AArch64::BI_InterlockedIncrement16_acq:
|
||||
case AArch64::BI_InterlockedIncrement_acq:
|
||||
case AArch64::BI_InterlockedIncrement64_acq:
|
||||
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E);
|
||||
case AArch64::BI_InterlockedIncrement16_rel:
|
||||
case AArch64::BI_InterlockedIncrement_rel:
|
||||
case AArch64::BI_InterlockedIncrement64_rel:
|
||||
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E);
|
||||
case AArch64::BI_InterlockedIncrement16_nf:
|
||||
case AArch64::BI_InterlockedIncrement_nf:
|
||||
case AArch64::BI_InterlockedIncrement64_nf:
|
||||
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E);
|
||||
|
||||
case AArch64::BI_InterlockedAdd: {
|
||||
Value *Arg0 = EmitScalarExpr(E->getArg(0));
|
||||
|
|
|
@ -346,42 +346,15 @@ __int64 _InterlockedExchangeAdd64_rel(__int64 volatile *_Addend, __int64 _Value)
|
|||
|* Interlocked Increment
|
||||
\*----------------------------------------------------------------------------*/
|
||||
#if defined(__arm__) || defined(__aarch64__)
|
||||
static __inline__ short __DEFAULT_FN_ATTRS
|
||||
_InterlockedIncrement16_acq(short volatile *_Value) {
|
||||
return __atomic_add_fetch(_Value, 1, __ATOMIC_ACQUIRE);
|
||||
}
|
||||
static __inline__ short __DEFAULT_FN_ATTRS
|
||||
_InterlockedIncrement16_nf(short volatile *_Value) {
|
||||
return __atomic_add_fetch(_Value, 1, __ATOMIC_RELAXED);
|
||||
}
|
||||
static __inline__ short __DEFAULT_FN_ATTRS
|
||||
_InterlockedIncrement16_rel(short volatile *_Value) {
|
||||
return __atomic_add_fetch(_Value, 1, __ATOMIC_RELEASE);
|
||||
}
|
||||
static __inline__ long __DEFAULT_FN_ATTRS
|
||||
_InterlockedIncrement_acq(long volatile *_Value) {
|
||||
return __atomic_add_fetch(_Value, 1, __ATOMIC_ACQUIRE);
|
||||
}
|
||||
static __inline__ long __DEFAULT_FN_ATTRS
|
||||
_InterlockedIncrement_nf(long volatile *_Value) {
|
||||
return __atomic_add_fetch(_Value, 1, __ATOMIC_RELAXED);
|
||||
}
|
||||
static __inline__ long __DEFAULT_FN_ATTRS
|
||||
_InterlockedIncrement_rel(long volatile *_Value) {
|
||||
return __atomic_add_fetch(_Value, 1, __ATOMIC_RELEASE);
|
||||
}
|
||||
static __inline__ __int64 __DEFAULT_FN_ATTRS
|
||||
_InterlockedIncrement64_acq(__int64 volatile *_Value) {
|
||||
return __atomic_add_fetch(_Value, 1, __ATOMIC_ACQUIRE);
|
||||
}
|
||||
static __inline__ __int64 __DEFAULT_FN_ATTRS
|
||||
_InterlockedIncrement64_nf(__int64 volatile *_Value) {
|
||||
return __atomic_add_fetch(_Value, 1, __ATOMIC_RELAXED);
|
||||
}
|
||||
static __inline__ __int64 __DEFAULT_FN_ATTRS
|
||||
_InterlockedIncrement64_rel(__int64 volatile *_Value) {
|
||||
return __atomic_add_fetch(_Value, 1, __ATOMIC_RELEASE);
|
||||
}
|
||||
short _InterlockedIncrement16_acq(short volatile *_Value);
|
||||
short _InterlockedIncrement16_nf(short volatile *_Value);
|
||||
short _InterlockedIncrement16_rel(short volatile *_Value);
|
||||
long _InterlockedIncrement_acq(long volatile *_Value);
|
||||
long _InterlockedIncrement_nf(long volatile *_Value);
|
||||
long _InterlockedIncrement_rel(long volatile *_Value);
|
||||
__int64 _InterlockedIncrement64_acq(__int64 volatile *_Value);
|
||||
__int64 _InterlockedIncrement64_nf(__int64 volatile *_Value);
|
||||
__int64 _InterlockedIncrement64_rel(__int64 volatile *_Value);
|
||||
#endif
|
||||
/*----------------------------------------------------------------------------*\
|
||||
|* Interlocked Decrement
|
||||
|
|
|
@ -1178,6 +1178,87 @@ __int64 test_InterlockedAnd64_nf(__int64 volatile *value, __int64 mask) {
|
|||
// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i64* %value, i64 %mask monotonic
|
||||
// CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
|
||||
// CHECK-ARM-ARM64: }
|
||||
|
||||
short test_InterlockedIncrement16_acq(short volatile *Addend) {
|
||||
return _InterlockedIncrement16_acq(Addend);
|
||||
}
|
||||
// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedIncrement16_acq(i16*{{[a-z_ ]*}}%Addend){{.*}}{
|
||||
// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i16* %Addend, i16 1 acquire
|
||||
// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1
|
||||
// CHECK-ARM-ARM64: ret i16 [[RESULT]]
|
||||
// CHECK-ARM-ARM64: }
|
||||
|
||||
short test_InterlockedIncrement16_rel(short volatile *Addend) {
|
||||
return _InterlockedIncrement16_rel(Addend);
|
||||
}
|
||||
// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedIncrement16_rel(i16*{{[a-z_ ]*}}%Addend){{.*}}{
|
||||
// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i16* %Addend, i16 1 release
|
||||
// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1
|
||||
// CHECK-ARM-ARM64: ret i16 [[RESULT]]
|
||||
// CHECK-ARM-ARM64: }
|
||||
|
||||
short test_InterlockedIncrement16_nf(short volatile *Addend) {
|
||||
return _InterlockedIncrement16_nf(Addend);
|
||||
}
|
||||
// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedIncrement16_nf(i16*{{[a-z_ ]*}}%Addend){{.*}}{
|
||||
// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i16* %Addend, i16 1 monotonic
|
||||
// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1
|
||||
// CHECK-ARM-ARM64: ret i16 [[RESULT]]
|
||||
// CHECK-ARM-ARM64: }
|
||||
|
||||
long test_InterlockedIncrement_acq(long volatile *Addend) {
|
||||
return _InterlockedIncrement_acq(Addend);
|
||||
}
|
||||
// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedIncrement_acq(i32*{{[a-z_ ]*}}%Addend){{.*}}{
|
||||
// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 acquire
|
||||
// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
|
||||
// CHECK-ARM-ARM64: ret i32 [[RESULT]]
|
||||
// CHECK-ARM-ARM64: }
|
||||
|
||||
long test_InterlockedIncrement_rel(long volatile *Addend) {
|
||||
return _InterlockedIncrement_rel(Addend);
|
||||
}
|
||||
// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedIncrement_rel(i32*{{[a-z_ ]*}}%Addend){{.*}}{
|
||||
// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 release
|
||||
// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
|
||||
// CHECK-ARM-ARM64: ret i32 [[RESULT]]
|
||||
// CHECK-ARM-ARM64: }
|
||||
|
||||
long test_InterlockedIncrement_nf(long volatile *Addend) {
|
||||
return _InterlockedIncrement_nf(Addend);
|
||||
}
|
||||
// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedIncrement_nf(i32*{{[a-z_ ]*}}%Addend){{.*}}{
|
||||
// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 monotonic
|
||||
// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
|
||||
// CHECK-ARM-ARM64: ret i32 [[RESULT]]
|
||||
// CHECK-ARM-ARM64: }
|
||||
|
||||
__int64 test_InterlockedIncrement64_acq(__int64 volatile *Addend) {
|
||||
return _InterlockedIncrement64_acq(Addend);
|
||||
}
|
||||
// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedIncrement64_acq(i64*{{[a-z_ ]*}}%Addend){{.*}}{
|
||||
// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i64* %Addend, i64 1 acquire
|
||||
// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1
|
||||
// CHECK-ARM-ARM64: ret i64 [[RESULT]]
|
||||
// CHECK-ARM-ARM64: }
|
||||
|
||||
__int64 test_InterlockedIncrement64_rel(__int64 volatile *Addend) {
|
||||
return _InterlockedIncrement64_rel(Addend);
|
||||
}
|
||||
// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedIncrement64_rel(i64*{{[a-z_ ]*}}%Addend){{.*}}{
|
||||
// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i64* %Addend, i64 1 release
|
||||
// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1
|
||||
// CHECK-ARM-ARM64: ret i64 [[RESULT]]
|
||||
// CHECK-ARM-ARM64: }
|
||||
|
||||
__int64 test_InterlockedIncrement64_nf(__int64 volatile *Addend) {
|
||||
return _InterlockedIncrement64_nf(Addend);
|
||||
}
|
||||
// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedIncrement64_nf(i64*{{[a-z_ ]*}}%Addend){{.*}}{
|
||||
// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i64* %Addend, i64 1 monotonic
|
||||
// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1
|
||||
// CHECK-ARM-ARM64: ret i64 [[RESULT]]
|
||||
// CHECK-ARM-ARM64: }
|
||||
#endif
|
||||
|
||||
#if !defined(__aarch64__)
|
||||
|
|
Loading…
Reference in New Issue