ARM: implement low-level intrinsics for the atomic exclusive operations.

This adds three overloaded intrinsics to Clang:
    T __builtin_arm_ldrex(const volatile T *addr)
    int __builtin_arm_strex(T val, volatile T *addr)
    void __builtin_arm_clrex()

The intent is that these do what users would expect when given most sensible
types. Currently, "sensible" translates to ints, floats and pointers.

llvm-svn: 186394
This commit is contained in:
Tim Northover 2013-07-16 09:47:53 +00:00
parent 4245f78fdd
commit 6aacd49094
9 changed files with 398 additions and 21 deletions

View File

@ -1656,6 +1656,36 @@ C11's ``<stdatomic.h>`` header. These builtins provide the semantics of the
* ``__c11_atomic_fetch_or``
* ``__c11_atomic_fetch_xor``
Low-level ARM exclusive memory builtins
---------------------------------------
Clang provides overloaded builtins giving direct access to the three key ARM
instructions for implementing atomic operations.
.. code-block:: c
T __builtin_arm_ldrex(const volatile T *addr);
int __builtin_arm_strex(T val, volatile T *addr);
void __builtin_arm_clrex(void);
The types ``T`` currently supported are:
* Integer types with width at most 64 bits.
* Floating-point types
* Pointer types.
Note that the compiler does not guarantee it will not insert stores which clear
the exclusive monitor in between an ``ldrex`` and its paired ``strex``. In
practice this is only usually a risk when the extra store is on the same cache
line as the variable being modified and Clang will only insert stack stores on
its own, so it is best not to use these operations on variables with automatic
storage duration.
Also, loads and stores may be implicit in code written between the ``ldrex`` and
``strex``. Clang will not necessarily mitigate the effects of these either, so
care should be exercised.
For these reasons the higher level atomic primitives should be preferred where
possible.
Non-standard C++11 Attributes
=============================

View File

@ -24,10 +24,14 @@ BUILTIN(__builtin_arm_qsub, "iii", "nc")
BUILTIN(__builtin_arm_ssat, "iiUi", "nc")
BUILTIN(__builtin_arm_usat, "UiUiUi", "nc")
// Store and load exclusive doubleword
// Store and load exclusive
BUILTIN(__builtin_arm_ldrexd, "LLUiv*", "")
BUILTIN(__builtin_arm_strexd, "iLLUiv*", "")
BUILTIN(__builtin_arm_ldrex, "v.", "t")
BUILTIN(__builtin_arm_strex, "i.", "t")
BUILTIN(__builtin_arm_clrex, "v", "")
// VFP
BUILTIN(__builtin_arm_get_fpscr, "Ui", "nc")
BUILTIN(__builtin_arm_set_fpscr, "vUi", "nc")

View File

@ -5384,27 +5384,33 @@ def err_builtin_fn_use : Error<"builtin functions must be directly called">;
def warn_call_wrong_number_of_arguments : Warning<
"too %select{few|many}0 arguments in call to %1">;
def err_atomic_builtin_must_be_pointer : Error<
"first argument to atomic builtin must be a pointer (%0 invalid)">;
"address argument to atomic builtin must be a pointer (%0 invalid)">;
def err_atomic_builtin_must_be_pointer_intptr : Error<
"first argument to atomic builtin must be a pointer to integer or pointer"
"address argument to atomic builtin must be a pointer to integer or pointer"
" (%0 invalid)">;
def err_atomic_builtin_must_be_pointer_intfltptr : Error<
"address argument to atomic builtin must be a pointer to integer,"
" floating-point or pointer (%0 invalid)">;
def err_atomic_builtin_pointer_size : Error<
"first argument to atomic builtin must be a pointer to 1,2,4,8 or 16 byte "
"address argument to atomic builtin must be a pointer to 1,2,4,8 or 16 byte "
"type (%0 invalid)">;
def err_atomic_exclusive_builtin_pointer_size : Error<
"address argument to load or store exclusive builtin must be a pointer to"
" 1,2,4 or 8 byte type (%0 invalid)">;
def err_atomic_op_needs_atomic : Error<
"first argument to atomic operation must be a pointer to _Atomic "
"address argument to atomic operation must be a pointer to _Atomic "
"type (%0 invalid)">;
def err_atomic_op_needs_non_const_atomic : Error<
"first argument to atomic operation must be a pointer to non-const _Atomic "
"address argument to atomic operation must be a pointer to non-const _Atomic "
"type (%0 invalid)">;
def err_atomic_op_needs_trivial_copy : Error<
"first argument to atomic operation must be a pointer to a trivially-copyable"
" type (%0 invalid)">;
"address argument to atomic operation must be a pointer to a "
"trivially-copyable type (%0 invalid)">;
def err_atomic_op_needs_atomic_int_or_ptr : Error<
"first argument to atomic operation must be a pointer to %select{|atomic }0"
"address argument to atomic operation must be a pointer to %select{|atomic }0"
"integer or pointer (%1 invalid)">;
def err_atomic_op_bitwise_needs_atomic_int : Error<
"first argument to bitwise atomic operation must be a pointer to "
"address argument to bitwise atomic operation must be a pointer to "
"%select{|atomic }0integer (%1 invalid)">;
def err_atomic_load_store_uses_lib : Error<

View File

@ -7499,7 +7499,10 @@ private:
bool CheckObjCString(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(CallExpr *TheCall);

View File

@ -1737,11 +1737,14 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
}
if (BuiltinID == ARM::BI__builtin_arm_ldrexd) {
if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
(BuiltinID == ARM::BI__builtin_arm_ldrex &&
getContext().getTypeSize(E->getType()) == 64)) {
Function *F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
Value *LdPtr = EmitScalarExpr(E->getArg(0));
Value *Val = Builder.CreateCall(F, LdPtr, "ldrexd");
Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
"ldrexd");
Value *Val0 = Builder.CreateExtractValue(Val, 1);
Value *Val1 = Builder.CreateExtractValue(Val, 0);
@ -1750,15 +1753,39 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
return Builder.CreateOr(Val, Val1);
Val = Builder.CreateOr(Val, Val1);
return Builder.CreateBitCast(Val, ConvertType(E->getType()));
}
if (BuiltinID == ARM::BI__builtin_arm_strexd) {
if (BuiltinID == ARM::BI__builtin_arm_ldrex) {
Value *LoadAddr = EmitScalarExpr(E->getArg(0));
QualType Ty = E->getType();
llvm::Type *RealResTy = ConvertType(Ty);
llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(),
getContext().getTypeSize(Ty));
LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo());
Function *F = CGM.getIntrinsic(Intrinsic::arm_ldrex, LoadAddr->getType());
Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
if (RealResTy->isPointerTy())
return Builder.CreateIntToPtr(Val, RealResTy);
else {
Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
return Builder.CreateBitCast(Val, RealResTy);
}
}
if (BuiltinID == ARM::BI__builtin_arm_strexd ||
(BuiltinID == ARM::BI__builtin_arm_strex &&
getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
Function *F = CGM.getIntrinsic(Intrinsic::arm_strexd);
llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, NULL);
Value *One = llvm::ConstantInt::get(Int32Ty, 1);
Value *Tmp = Builder.CreateAlloca(Int64Ty, One);
Value *Tmp = Builder.CreateAlloca(ConvertType(E->getArg(0)->getType()),
One);
Value *Val = EmitScalarExpr(E->getArg(0));
Builder.CreateStore(Val, Tmp);
@ -1767,10 +1794,35 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Value *Arg0 = Builder.CreateExtractValue(Val, 0);
Value *Arg1 = Builder.CreateExtractValue(Val, 1);
Value *StPtr = EmitScalarExpr(E->getArg(1));
Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
return Builder.CreateCall3(F, Arg0, Arg1, StPtr, "strexd");
}
if (BuiltinID == ARM::BI__builtin_arm_strex) {
Value *StoreVal = EmitScalarExpr(E->getArg(0));
Value *StoreAddr = EmitScalarExpr(E->getArg(1));
QualType Ty = E->getArg(0)->getType();
llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
getContext().getTypeSize(Ty));
StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
if (StoreVal->getType()->isPointerTy())
StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
else {
StoreVal = Builder.CreateBitCast(StoreVal, StoreTy);
StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
}
Function *F = CGM.getIntrinsic(Intrinsic::arm_strex, StoreAddr->getType());
return Builder.CreateCall2(F, StoreVal, StoreAddr, "strex");
}
if (BuiltinID == ARM::BI__builtin_arm_clrex) {
Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
return Builder.CreateCall(F);
}
SmallVector<Value*, 4> Ops;
llvm::Value *Align = 0;
for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {

View File

@ -371,9 +371,117 @@ static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context) {
llvm_unreachable("Invalid NeonTypeFlag!");
}
bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall) {
assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
BuiltinID == ARM::BI__builtin_arm_strex) &&
"unexpected ARM builtin");
bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex;
DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
// Ensure that we have the proper number of arguments.
if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2))
return true;
// Inspect the pointer argument of the atomic builtin. This should always be
// a pointer type, whose element is an integral scalar or pointer type.
// Because it is a pointer type, we don't have to worry about any implicit
// casts here.
Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1);
ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg);
if (PointerArgRes.isInvalid())
return true;
PointerArg = PointerArgRes.take();
const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
if (!pointerType) {
Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer)
<< PointerArg->getType() << PointerArg->getSourceRange();
return true;
}
// ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
// task is to insert the appropriate casts into the AST. First work out just
// what the appropriate type is.
QualType ValType = pointerType->getPointeeType();
QualType AddrType = ValType.getUnqualifiedType().withVolatile();
if (IsLdrex)
AddrType.addConst();
// Issue a warning if the cast is dodgy.
CastKind CastNeeded = CK_NoOp;
if (!AddrType.isAtLeastAsQualifiedAs(ValType)) {
CastNeeded = CK_BitCast;
Diag(DRE->getLocStart(), diag::ext_typecheck_convert_discards_qualifiers)
<< PointerArg->getType()
<< Context.getPointerType(AddrType)
<< AA_Passing << PointerArg->getSourceRange();
}
// Finally, do the cast and replace the argument with the corrected version.
AddrType = Context.getPointerType(AddrType);
PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded);
if (PointerArgRes.isInvalid())
return true;
PointerArg = PointerArgRes.take();
TheCall->setArg(IsLdrex ? 0 : 1, PointerArg);
// In general, we allow ints, floats and pointers to be loaded and stored.
if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
!ValType->isBlockPointerType() && !ValType->isFloatingType()) {
Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer_intfltptr)
<< PointerArg->getType() << PointerArg->getSourceRange();
return true;
}
// But ARM doesn't have instructions to deal with 128-bit versions.
if (Context.getTypeSize(ValType) > 64) {
Diag(DRE->getLocStart(), diag::err_atomic_exclusive_builtin_pointer_size)
<< PointerArg->getType() << PointerArg->getSourceRange();
return true;
}
switch (ValType.getObjCLifetime()) {
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
// okay
break;
case Qualifiers::OCL_Weak:
case Qualifiers::OCL_Strong:
case Qualifiers::OCL_Autoreleasing:
Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership)
<< ValType << PointerArg->getSourceRange();
return true;
}
if (IsLdrex) {
TheCall->setType(ValType);
return false;
}
// Initialize the argument to be stored.
ExprResult ValArg = TheCall->getArg(0);
InitializedEntity Entity = InitializedEntity::InitializeParameter(
Context, ValType, /*consume*/ false);
ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
if (ValArg.isInvalid())
return true;
TheCall->setArg(0, ValArg.get());
return false;
}
bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
llvm::APSInt Result;
if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
BuiltinID == ARM::BI__builtin_arm_strex) {
return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall);
}
uint64_t mask = 0;
unsigned TV = 0;
int PtrArgNum = -1;

View File

@ -0,0 +1,113 @@
// REQUIRES: arm-registered-target
// RUN: %clang_cc1 -Wall -Werror -triple thumbv7-linux-gnueabi -fno-signed-char -O3 -emit-llvm -o - %s | FileCheck %s
// Make sure the canonical use works before going into smaller details:
int atomic_inc(int *addr) {
int Failure, OldVal;
do {
OldVal = __builtin_arm_ldrex(addr);
Failure = __builtin_arm_strex(OldVal + 1, addr);
} while (Failure);
return OldVal;
}
// CHECK: @atomic_inc
// CHECK: [[OLDVAL:%.*]] = tail call i32 @llvm.arm.ldrex.p0i32(i32* %addr)
// CHECK: [[INC:%.*]] = add nsw i32 [[OLDVAL]], 1
// CHECK: [[FAILURE:%.*]] = tail call i32 @llvm.arm.strex.p0i32(i32 [[INC]], i32* %addr)
// CHECK: [[TST:%.*]] = icmp eq i32 [[FAILURE]], 0
// CHECK: br i1 [[TST]], label %[[LOOP_END:[a-zA-Z0-9.]+]], label {{%[a-zA-Z0-9.]+}}
// CHECK: [[LOOP_END]]:
struct Simple {
char a, b;
};
int test_ldrex(char *addr, long long *addr64, float *addrfloat) {
// CHECK: @test_ldrex
int sum = 0;
sum += __builtin_arm_ldrex(addr);
// CHECK: [[INTRES:%.*]] = tail call i32 @llvm.arm.ldrex.p0i8(i8* %addr)
// CHECK: and i32 [[INTRES]], 255
sum += __builtin_arm_ldrex((short *)addr);
// CHECK: [[ADDR16:%.*]] = bitcast i8* %addr to i16*
// CHECK: [[INTRES:%.*]] = tail call i32 @llvm.arm.ldrex.p0i16(i16* [[ADDR16]])
// CHECK: [[TMPSEXT:%.*]] = shl i32 [[INTRES]], 16
// CHECK: ashr exact i32 [[TMPSEXT]], 16
sum += __builtin_arm_ldrex((int *)addr);
// CHECK: [[ADDR32:%.*]] = bitcast i8* %addr to i32*
// CHECK: call i32 @llvm.arm.ldrex.p0i32(i32* [[ADDR32]])
sum += __builtin_arm_ldrex((long long *)addr);
// CHECK: call { i32, i32 } @llvm.arm.ldrexd(i8* %addr)
sum += __builtin_arm_ldrex(addr64);
// CHECK: [[ADDR64_AS8:%.*]] = bitcast i64* %addr64 to i8*
// CHECK: call { i32, i32 } @llvm.arm.ldrexd(i8* [[ADDR64_AS8]])
sum += __builtin_arm_ldrex(addrfloat);
// CHECK: [[INTADDR:%.*]] = bitcast float* %addrfloat to i32*
// CHECK: [[INTRES:%.*]] = tail call i32 @llvm.arm.ldrex.p0i32(i32* [[INTADDR]])
// CHECK: bitcast i32 [[INTRES]] to float
sum += __builtin_arm_ldrex((double *)addr);
// CHECK: [[STRUCTRES:%.*]] = tail call { i32, i32 } @llvm.arm.ldrexd(i8* %addr)
// CHECK: [[RESHI:%.*]] = extractvalue { i32, i32 } [[STRUCTRES]], 1
// CHECK: [[RESLO:%.*]] = extractvalue { i32, i32 } [[STRUCTRES]], 0
// CHECK: [[RESHI64:%.*]] = zext i32 [[RESHI]] to i64
// CHECK: [[RESLO64:%.*]] = zext i32 [[RESLO]] to i64
// CHECK: [[RESHIHI:%.*]] = shl nuw i64 [[RESHI64]], 32
// CHECK: [[INTRES:%.*]] = or i64 [[RESHIHI]], [[RESLO64]]
// CHECK: bitcast i64 [[INTRES]] to double
sum += *__builtin_arm_ldrex((int **)addr);
// CHECK: [[INTRES:%.*]] = tail call i32 @llvm.arm.ldrex.p0i32(i32* [[ADDR32]])
// CHECK: inttoptr i32 [[INTRES]] to i32*
sum += __builtin_arm_ldrex((struct Simple **)addr)->a;
// CHECK: [[INTRES:%.*]] = tail call i32 @llvm.arm.ldrex.p0i32(i32* [[ADDR32]])
// CHECK: inttoptr i32 [[INTRES]] to %struct.Simple*
return sum;
}
int test_strex(char *addr) {
// CHECK: @test_strex
int res = 0;
struct Simple var = {0};
res |= __builtin_arm_strex(4, addr);
// CHECK: call i32 @llvm.arm.strex.p0i8(i32 4, i8* %addr)
res |= __builtin_arm_strex(42, (short *)addr);
// CHECK: [[ADDR16:%.*]] = bitcast i8* %addr to i16*
// CHECK: call i32 @llvm.arm.strex.p0i16(i32 42, i16* [[ADDR16]])
res |= __builtin_arm_strex(42, (int *)addr);
// CHECK: [[ADDR32:%.*]] = bitcast i8* %addr to i32*
// CHECK: call i32 @llvm.arm.strex.p0i32(i32 42, i32* [[ADDR32]])
res |= __builtin_arm_strex(42, (long long *)addr);
// CHECK: call i32 @llvm.arm.strexd(i32 42, i32 0, i8* %addr)
res |= __builtin_arm_strex(2.71828f, (float *)addr);
// CHECK: call i32 @llvm.arm.strex.p0i32(i32 1076754509, i32* [[ADDR32]])
res |= __builtin_arm_strex(3.14159, (double *)addr);
// CHECK: call i32 @llvm.arm.strexd(i32 -266631570, i32 1074340345, i8* %addr)
res |= __builtin_arm_strex(&var, (struct Simple **)addr);
// CHECK: [[INTVAL:%.*]] = ptrtoint i16* %var to i32
// CHECK: call i32 @llvm.arm.strex.p0i32(i32 [[INTVAL]], i32* [[ADDR32]])
return res;
}
void test_clrex() {
// CHECK: @test_clrex
__builtin_arm_clrex();
// CHECK: call void @llvm.arm.clrex()
}

View File

@ -86,8 +86,8 @@ void f(_Atomic(int) *i, _Atomic(int*) *p, _Atomic(float) *d,
__c11_atomic_init(I, 5); // expected-error {{pointer to _Atomic}}
__c11_atomic_load(0); // expected-error {{too few arguments to function}}
__c11_atomic_load(0,0,0); // expected-error {{too many arguments to function}}
__c11_atomic_store(0,0,0); // expected-error {{first argument to atomic builtin must be a pointer}}
__c11_atomic_store((int*)0,0,0); // expected-error {{first argument to atomic operation must be a pointer to _Atomic}}
__c11_atomic_store(0,0,0); // expected-error {{address argument to atomic builtin must be a pointer}}
__c11_atomic_store((int*)0,0,0); // expected-error {{address argument to atomic operation must be a pointer to _Atomic}}
__c11_atomic_load(i, memory_order_seq_cst);
__c11_atomic_load(p, memory_order_seq_cst);
@ -169,9 +169,9 @@ void f(_Atomic(int) *i, _Atomic(int*) *p, _Atomic(float) *d,
(int)__atomic_clear(&flag, memory_order_seq_cst); // expected-error {{operand of type 'void'}}
const _Atomic(int) const_atomic;
__c11_atomic_init(&const_atomic, 0); // expected-error {{first argument to atomic operation must be a pointer to non-const _Atomic type ('const _Atomic(int) *' invalid)}}
__c11_atomic_store(&const_atomic, 0, memory_order_release); // expected-error {{first argument to atomic operation must be a pointer to non-const _Atomic type ('const _Atomic(int) *' invalid)}}
__c11_atomic_load(&const_atomic, memory_order_acquire); // expected-error {{first argument to atomic operation must be a pointer to non-const _Atomic type ('const _Atomic(int) *' invalid)}}
__c11_atomic_init(&const_atomic, 0); // expected-error {{address argument to atomic operation must be a pointer to non-const _Atomic type ('const _Atomic(int) *' invalid)}}
__c11_atomic_store(&const_atomic, 0, memory_order_release); // expected-error {{address argument to atomic operation must be a pointer to non-const _Atomic type ('const _Atomic(int) *' invalid)}}
__c11_atomic_load(&const_atomic, memory_order_acquire); // expected-error {{address argument to atomic operation must be a pointer to non-const _Atomic type ('const _Atomic(int) *' invalid)}}
}
_Atomic(int*) PR12527_a;

View File

@ -0,0 +1,61 @@
// RUN: %clang_cc1 -triple armv7 -fsyntax-only -verify %s
struct Simple {
char a, b;
};
int test_ldrex(char *addr) {
int sum = 0;
sum += __builtin_arm_ldrex(addr);
sum += __builtin_arm_ldrex((short *)addr);
sum += __builtin_arm_ldrex((int *)addr);
sum += __builtin_arm_ldrex((long long *)addr);
sum += __builtin_arm_ldrex((float *)addr);
sum += __builtin_arm_ldrex((double *)addr);
sum += *__builtin_arm_ldrex((int **)addr);
sum += __builtin_arm_ldrex((struct Simple **)addr)->a;
sum += __builtin_arm_ldrex((volatile char *)addr);
sum += __builtin_arm_ldrex((const volatile char *)addr);
// In principle this might be valid, but stick to ints and floats for scalar
// types at the moment.
sum += __builtin_arm_ldrex((struct Simple *)addr).a; // expected-error {{address argument to atomic builtin must be a pointer to}}
sum += __builtin_arm_ldrex((__int128 *)addr); // expected-error {{__int128 is not supported on this target}} expected-error {{address argument to load or store exclusive builtin must be a pointer to 1,2,4 or 8 byte type}}
__builtin_arm_ldrex(); // expected-error {{too few arguments to function call}}
__builtin_arm_ldrex(1, 2); // expected-error {{too many arguments to function call}}
return sum;
}
int test_strex(char *addr) {
int res = 0;
struct Simple var = {0};
res |= __builtin_arm_strex(4, addr);
res |= __builtin_arm_strex(42, (short *)addr);
res |= __builtin_arm_strex(42, (int *)addr);
res |= __builtin_arm_strex(42, (long long *)addr);
res |= __builtin_arm_strex(2.71828f, (float *)addr);
res |= __builtin_arm_strex(3.14159, (double *)addr);
res |= __builtin_arm_strex(&var, (struct Simple **)addr);
res |= __builtin_arm_strex(42, (volatile char *)addr);
res |= __builtin_arm_strex(42, (char *const)addr);
res |= __builtin_arm_strex(42, (const char *)addr); // expected-warning {{passing 'const char *' to parameter of type 'volatile char *' discards qualifiers}}
res |= __builtin_arm_strex(var, (struct Simple *)addr); // expected-error {{address argument to atomic builtin must be a pointer to}}
res |= __builtin_arm_strex(var, (struct Simple **)addr); // expected-error {{passing 'struct Simple' to parameter of incompatible type 'struct Simple *'}}
res |= __builtin_arm_strex(&var, (struct Simple **)addr).a; // expected-error {{is not a structure or union}}
res |= __builtin_arm_strex(1, (__int128 *)addr); // expected-error {{__int128 is not supported on this target}} expected-error {{address argument to load or store exclusive builtin must be a pointer to 1,2,4 or 8 byte type}}
__builtin_arm_strex(1); // expected-error {{too few arguments to function call}}
__builtin_arm_strex(1, 2, 3); // expected-error {{too many arguments to function call}}
return res;
}
void test_clrex() {
__builtin_arm_clrex();
__builtin_arm_clrex(1); // expected-error {{too many arguments to function call}}
}