forked from OSchip/llvm-project
[MS] Make __iso_volatile_* available on all targets
Future versions of MSVC make these intrinsics available on x86 & x64, according to: http://lists.llvm.org/pipermail/cfe-dev/2019-March/061711.html The purpose of these builtins is to emit plain, non-atomic, volatile stores when /volatile:ms (-cc1 -fms-volatile) is enabled. llvm-svn: 357220
This commit is contained in:
parent
6c82695753
commit
73253bdefc
|
@ -820,6 +820,14 @@ LANGBUILTIN(_interlockedbittestandset64, "UcWiD*Wi", "n", ALL_MS_LANGUAGES)
|
|||
LANGBUILTIN(_interlockedbittestandset_acq, "UcNiD*Ni", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(_interlockedbittestandset_nf, "UcNiD*Ni", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(_interlockedbittestandset_rel, "UcNiD*Ni", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__iso_volatile_load8, "ccCD*", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__iso_volatile_load16, "ssCD*", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__iso_volatile_load32, "iiCD*", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__iso_volatile_load64, "LLiLLiCD*", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__iso_volatile_store8, "vcD*c", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__iso_volatile_store16, "vsD*s", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__iso_volatile_store32, "viD*i", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__iso_volatile_store64, "vLLiD*LLi", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__noop, "i.", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__lzcnt16, "UsUs", "nc", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__lzcnt, "UiUi", "nc", ALL_MS_LANGUAGES)
|
||||
|
|
|
@ -78,16 +78,6 @@ LANGBUILTIN(__wfi, "v", "", ALL_MS_LANGUAGES)
|
|||
LANGBUILTIN(__sev, "v", "", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__sevl, "v", "", ALL_MS_LANGUAGES)
|
||||
|
||||
// MSVC intrinsics for volatile but non-acquire/release loads and stores
|
||||
LANGBUILTIN(__iso_volatile_load8, "ccCD*", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__iso_volatile_load16, "ssCD*", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__iso_volatile_load32, "iiCD*", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__iso_volatile_load64, "LLiLLiCD*", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__iso_volatile_store8, "vcD*c", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__iso_volatile_store16, "vsD*s", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__iso_volatile_store32, "viD*i", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__iso_volatile_store64, "vLLiD*LLi", "n", ALL_MS_LANGUAGES)
|
||||
|
||||
TARGET_HEADER_BUILTIN(_BitScanForward, "UcUNi*UNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_BitScanReverse, "UcUNi*UNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
TARGET_HEADER_BUILTIN(_BitScanForward64, "UcUNi*ULLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
|
||||
|
|
|
@ -201,14 +201,6 @@ LANGBUILTIN(__sevl, "v", "", ALL_MS_LANGUAGES)
|
|||
LANGBUILTIN(__dmb, "vUi", "nc", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__dsb, "vUi", "nc", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__isb, "vUi", "nc", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__iso_volatile_load8, "ccCD*", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__iso_volatile_load16, "ssCD*", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__iso_volatile_load32, "iiCD*", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__iso_volatile_load64, "LLiLLiCD*", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__iso_volatile_store8, "vcD*c", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__iso_volatile_store16, "vsD*s", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__iso_volatile_store32, "viD*i", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__iso_volatile_store64, "vLLiD*LLi", "n", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(__ldrexd, "WiWiCD*", "", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(_MoveFromCoprocessor, "UiIUiIUiIUiIUiIUi", "", ALL_MS_LANGUAGES)
|
||||
LANGBUILTIN(_MoveFromCoprocessor2, "UiIUiIUiIUiIUiIUi", "", ALL_MS_LANGUAGES)
|
||||
|
|
|
@ -298,6 +298,34 @@ static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E,
|
|||
return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
|
||||
}
|
||||
|
||||
// Build a plain volatile load.
|
||||
static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) {
|
||||
Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
|
||||
QualType ElTy = E->getArg(0)->getType()->getPointeeType();
|
||||
CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
|
||||
llvm::Type *ITy =
|
||||
llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
|
||||
Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
|
||||
llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(Ptr, LoadSize);
|
||||
Load->setVolatile(true);
|
||||
return Load;
|
||||
}
|
||||
|
||||
// Build a plain volatile store.
|
||||
static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) {
|
||||
Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
|
||||
Value *Value = CGF.EmitScalarExpr(E->getArg(1));
|
||||
QualType ElTy = E->getArg(0)->getType()->getPointeeType();
|
||||
CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
|
||||
llvm::Type *ITy =
|
||||
llvm::IntegerType::get(CGF.getLLVMContext(), StoreSize.getQuantity() * 8);
|
||||
Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
|
||||
llvm::StoreInst *Store =
|
||||
CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
|
||||
Store->setVolatile(true);
|
||||
return Store;
|
||||
}
|
||||
|
||||
// Emit a simple mangled intrinsic that has 1 argument and a return type
|
||||
// matching the argument type.
|
||||
static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
|
||||
|
@ -3341,6 +3369,19 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
|
|||
case Builtin::BI_interlockedbittestandreset_nf:
|
||||
return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
|
||||
|
||||
// These builtins exist to emit regular volatile loads and stores not
|
||||
// affected by the -fms-volatile setting.
|
||||
case Builtin::BI__iso_volatile_load8:
|
||||
case Builtin::BI__iso_volatile_load16:
|
||||
case Builtin::BI__iso_volatile_load32:
|
||||
case Builtin::BI__iso_volatile_load64:
|
||||
return RValue::get(EmitISOVolatileLoad(*this, E));
|
||||
case Builtin::BI__iso_volatile_store8:
|
||||
case Builtin::BI__iso_volatile_store16:
|
||||
case Builtin::BI__iso_volatile_store32:
|
||||
case Builtin::BI__iso_volatile_store64:
|
||||
return RValue::get(EmitISOVolatileStore(*this, E));
|
||||
|
||||
case Builtin::BI__exception_code:
|
||||
case Builtin::BI_exception_code:
|
||||
return RValue::get(EmitSEHExceptionCode());
|
||||
|
@ -5835,34 +5876,6 @@ static bool HasExtraNeonArgument(unsigned BuiltinID) {
|
|||
return true;
|
||||
}
|
||||
|
||||
Value *CodeGenFunction::EmitISOVolatileLoad(const CallExpr *E) {
|
||||
Value *Ptr = EmitScalarExpr(E->getArg(0));
|
||||
QualType ElTy = E->getArg(0)->getType()->getPointeeType();
|
||||
CharUnits LoadSize = getContext().getTypeSizeInChars(ElTy);
|
||||
llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
|
||||
LoadSize.getQuantity() * 8);
|
||||
Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
|
||||
llvm::LoadInst *Load =
|
||||
Builder.CreateAlignedLoad(Ptr, LoadSize);
|
||||
Load->setVolatile(true);
|
||||
return Load;
|
||||
}
|
||||
|
||||
Value *CodeGenFunction::EmitISOVolatileStore(const CallExpr *E) {
|
||||
Value *Ptr = EmitScalarExpr(E->getArg(0));
|
||||
Value *Value = EmitScalarExpr(E->getArg(1));
|
||||
QualType ElTy = E->getArg(0)->getType()->getPointeeType();
|
||||
CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
|
||||
llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
|
||||
StoreSize.getQuantity() * 8);
|
||||
Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
|
||||
llvm::StoreInst *Store =
|
||||
Builder.CreateAlignedStore(Value, Ptr,
|
||||
StoreSize);
|
||||
Store->setVolatile(true);
|
||||
return Store;
|
||||
}
|
||||
|
||||
Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
|
||||
const CallExpr *E,
|
||||
llvm::Triple::ArchType Arch) {
|
||||
|
@ -6102,19 +6115,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
|
|||
return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
|
||||
}
|
||||
|
||||
switch (BuiltinID) {
|
||||
case ARM::BI__iso_volatile_load8:
|
||||
case ARM::BI__iso_volatile_load16:
|
||||
case ARM::BI__iso_volatile_load32:
|
||||
case ARM::BI__iso_volatile_load64:
|
||||
return EmitISOVolatileLoad(E);
|
||||
case ARM::BI__iso_volatile_store8:
|
||||
case ARM::BI__iso_volatile_store16:
|
||||
case ARM::BI__iso_volatile_store32:
|
||||
case ARM::BI__iso_volatile_store64:
|
||||
return EmitISOVolatileStore(E);
|
||||
}
|
||||
|
||||
if (BuiltinID == ARM::BI__builtin_arm_clrex) {
|
||||
Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
|
||||
return Builder.CreateCall(F);
|
||||
|
@ -8926,16 +8926,6 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
|
|||
Int = Intrinsic::aarch64_neon_suqadd;
|
||||
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
|
||||
}
|
||||
case AArch64::BI__iso_volatile_load8:
|
||||
case AArch64::BI__iso_volatile_load16:
|
||||
case AArch64::BI__iso_volatile_load32:
|
||||
case AArch64::BI__iso_volatile_load64:
|
||||
return EmitISOVolatileLoad(E);
|
||||
case AArch64::BI__iso_volatile_store8:
|
||||
case AArch64::BI__iso_volatile_store16:
|
||||
case AArch64::BI__iso_volatile_store32:
|
||||
case AArch64::BI__iso_volatile_store64:
|
||||
return EmitISOVolatileStore(E);
|
||||
case AArch64::BI_BitScanForward:
|
||||
case AArch64::BI_BitScanForward64:
|
||||
return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
|
||||
|
|
|
@ -3728,9 +3728,6 @@ public:
|
|||
Address PtrOp0, Address PtrOp1,
|
||||
llvm::Triple::ArchType Arch);
|
||||
|
||||
llvm::Value *EmitISOVolatileLoad(const CallExpr *E);
|
||||
llvm::Value *EmitISOVolatileStore(const CallExpr *E);
|
||||
|
||||
llvm::Function *LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
|
||||
unsigned Modifier, llvm::Type *ArgTy,
|
||||
const CallExpr *E);
|
||||
|
|
|
@ -494,6 +494,35 @@ long test_InterlockedDecrement(long volatile *Addend) {
|
|||
// CHECK: ret i32 [[RESULT]]
|
||||
// CHECK: }
|
||||
|
||||
char test_iso_volatile_load8(char volatile *p) { return __iso_volatile_load8(p); }
|
||||
short test_iso_volatile_load16(short volatile *p) { return __iso_volatile_load16(p); }
|
||||
int test_iso_volatile_load32(int volatile *p) { return __iso_volatile_load32(p); }
|
||||
__int64 test_iso_volatile_load64(__int64 volatile *p) { return __iso_volatile_load64(p); }
|
||||
|
||||
// CHECK: define{{.*}}i8 @test_iso_volatile_load8(i8*{{[a-z_ ]*}}%p)
|
||||
// CHECK: = load volatile i8, i8* %p
|
||||
// CHECK: define{{.*}}i16 @test_iso_volatile_load16(i16*{{[a-z_ ]*}}%p)
|
||||
// CHECK: = load volatile i16, i16* %p
|
||||
// CHECK: define{{.*}}i32 @test_iso_volatile_load32(i32*{{[a-z_ ]*}}%p)
|
||||
// CHECK: = load volatile i32, i32* %p
|
||||
// CHECK: define{{.*}}i64 @test_iso_volatile_load64(i64*{{[a-z_ ]*}}%p)
|
||||
// CHECK: = load volatile i64, i64* %p
|
||||
|
||||
void test_iso_volatile_store8(char volatile *p, char v) { __iso_volatile_store8(p, v); }
|
||||
void test_iso_volatile_store16(short volatile *p, short v) { __iso_volatile_store16(p, v); }
|
||||
void test_iso_volatile_store32(int volatile *p, int v) { __iso_volatile_store32(p, v); }
|
||||
void test_iso_volatile_store64(__int64 volatile *p, __int64 v) { __iso_volatile_store64(p, v); }
|
||||
|
||||
// CHECK: define{{.*}}void @test_iso_volatile_store8(i8*{{[a-z_ ]*}}%p, i8 {{[a-z_ ]*}}%v)
|
||||
// CHECK: store volatile i8 %v, i8* %p
|
||||
// CHECK: define{{.*}}void @test_iso_volatile_store16(i16*{{[a-z_ ]*}}%p, i16 {{[a-z_ ]*}}%v)
|
||||
// CHECK: store volatile i16 %v, i16* %p
|
||||
// CHECK: define{{.*}}void @test_iso_volatile_store32(i32*{{[a-z_ ]*}}%p, i32 {{[a-z_ ]*}}%v)
|
||||
// CHECK: store volatile i32 %v, i32* %p
|
||||
// CHECK: define{{.*}}void @test_iso_volatile_store64(i64*{{[a-z_ ]*}}%p, i64 {{[a-z_ ]*}}%v)
|
||||
// CHECK: store volatile i64 %v, i64* %p
|
||||
|
||||
|
||||
#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
|
||||
__int64 test_InterlockedExchange64(__int64 volatile *value, __int64 mask) {
|
||||
return _InterlockedExchange64(value, mask);
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
// REQUIRES: aarch64-registered-target
|
||||
// RUN: %clang_cc1 -triple aarch64-win32 -emit-llvm -fms-extensions -fms-volatile -o - < %s | FileCheck %s
|
||||
|
||||
void test1(int volatile *p, int v) {
|
||||
__iso_volatile_store32(p, v);
|
||||
// CHECK-LABEL: @test1
|
||||
// CHECK: store volatile {{.*}}, {{.*}}
|
||||
}
|
||||
int test2(const int volatile *p) {
|
||||
return __iso_volatile_load32(p);
|
||||
// CHECK-LABEL: @test2
|
||||
// CHECK: load volatile {{.*}}
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
// REQUIRES: arm-registered-target
|
||||
// RUN: %clang_cc1 -triple thumbv7-win32 -emit-llvm -fms-extensions -fms-volatile -o - < %s | FileCheck %s
|
||||
|
||||
void test1(int volatile *p, int v) {
|
||||
__iso_volatile_store32(p, v);
|
||||
// CHECK-LABEL: @test1
|
||||
// CHECK: store volatile {{.*}}, {{.*}}
|
||||
}
|
||||
int test2(const int volatile *p) {
|
||||
return __iso_volatile_load32(p);
|
||||
// CHECK-LABEL: @test2
|
||||
// CHECK: load volatile {{.*}}
|
||||
}
|
Loading…
Reference in New Issue