forked from OSchip/llvm-project
Fix atomic libcall.
This commit fixes a cast instruction assertion failure due to the incompatible type cast. This will only happen when the target requires atomic libcalls. llvm-svn: 204834
This commit is contained in:
parent
ed2cd39b81
commit
74798a34e6
|
@ -577,6 +577,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
|
|||
Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
|
||||
|
||||
std::string LibCallName;
|
||||
QualType LoweredMemTy =
|
||||
MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
|
||||
QualType RetTy;
|
||||
bool HaveRetTy = false;
|
||||
switch (E->getOp()) {
|
||||
|
@ -632,7 +634,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
|
|||
case AtomicExpr::AO__c11_atomic_fetch_add:
|
||||
case AtomicExpr::AO__atomic_fetch_add:
|
||||
LibCallName = "__atomic_fetch_add";
|
||||
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
|
||||
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
|
||||
E->getExprLoc());
|
||||
break;
|
||||
// T __atomic_fetch_and_N(T *mem, T val, int order)
|
||||
|
@ -653,7 +655,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
|
|||
case AtomicExpr::AO__c11_atomic_fetch_sub:
|
||||
case AtomicExpr::AO__atomic_fetch_sub:
|
||||
LibCallName = "__atomic_fetch_sub";
|
||||
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
|
||||
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
|
||||
E->getExprLoc());
|
||||
break;
|
||||
// T __atomic_fetch_xor_N(T *mem, T val, int order)
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
// RUN: %clang_cc1 < %s -triple armv5e-none-linux-gnueabi -emit-llvm -O1 | FileCheck %s
|
||||
|
||||
enum memory_order {
|
||||
memory_order_relaxed, memory_order_consume, memory_order_acquire,
|
||||
memory_order_release, memory_order_acq_rel, memory_order_seq_cst
|
||||
};
|
||||
|
||||
int *test_c11_atomic_fetch_add_int_ptr(_Atomic(int *) *p) {
|
||||
// CHECK: test_c11_atomic_fetch_add_int_ptr
|
||||
// CHECK: {{%[^ ]*}} = tail call i32* @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 12, i32 5)
|
||||
return __c11_atomic_fetch_add(p, 3, memory_order_seq_cst);
|
||||
}
|
||||
|
||||
int *test_c11_atomic_fetch_sub_int_ptr(_Atomic(int *) *p) {
|
||||
// CHECK: test_c11_atomic_fetch_sub_int_ptr
|
||||
// CHECK: {{%[^ ]*}} = tail call i32* @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 20, i32 5)
|
||||
return __c11_atomic_fetch_sub(p, 5, memory_order_seq_cst);
|
||||
}
|
||||
|
||||
int test_c11_atomic_fetch_add_int(_Atomic(int) *p) {
|
||||
// CHECK: test_c11_atomic_fetch_add_int
|
||||
// CHECK: {{%[^ ]*}} = tail call i32 bitcast (i32* (i8*, i32, i32)* @__atomic_fetch_add_4 to i32 (i8*, i32, i32)*)(i8* {{%[0-9]+}}, i32 3, i32 5)
|
||||
return __c11_atomic_fetch_add(p, 3, memory_order_seq_cst);
|
||||
}
|
||||
|
||||
int test_c11_atomic_fetch_sub_int(_Atomic(int) *p) {
|
||||
// CHECK: test_c11_atomic_fetch_sub_int
|
||||
// CHECK: {{%[^ ]*}} = tail call i32 bitcast (i32* (i8*, i32, i32)* @__atomic_fetch_sub_4 to i32 (i8*, i32, i32)*)(i8* {{%[0-9]+}}, i32 5, i32 5)
|
||||
return __c11_atomic_fetch_sub(p, 5, memory_order_seq_cst);
|
||||
}
|
||||
|
||||
int *fp2a(int **p) {
|
||||
// CHECK: @fp2a
|
||||
// CHECK: {{%[^ ]*}} = tail call i32* @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 4, i32 0)
|
||||
// Note, the GNU builtins do not multiply by sizeof(T)!
|
||||
return __atomic_fetch_sub(p, 4, memory_order_relaxed);
|
||||
}
|
Loading…
Reference in New Issue