forked from OSchip/llvm-project
Some improvements to the handling of C11 atomic types:
- Add atomic-to/from-nonatomic cast types - Emit atomic operations for arithmetic on atomic types - Emit non-atomic stores for initialisation of atomic types, but atomic stores and loads for every other store / load - Add a __atomic_init() intrinsic which does a non-atomic store to an _Atomic() type. This is needed for the corresponding C11 stdatomic.h function. - Enables the relevant __has_feature() checks. The feature isn't 100% complete yet, but it's done enough that we want people testing it. Still to do: - Make the arithmetic operations on atomic types (e.g. Atomic(int) foo = 1; foo++;) use the correct LLVM intrinsic if one exists, not a loop with a cmpxchg. - Add a signal fence builtin - Properly set the fenv state in atomic operations on floating point values - Correctly handle things like _Atomic(_Complex double) which are too large for an atomic cmpxchg on some platforms (this requires working out what 'correctly' means in this context) - Fix the many remaining corner cases llvm-svn: 148242
This commit is contained in:
parent
44a2895a03
commit
fa35df628a
|
@ -4410,7 +4410,7 @@ public:
|
|||
class AtomicExpr : public Expr {
|
||||
public:
|
||||
enum AtomicOp { Load, Store, CmpXchgStrong, CmpXchgWeak, Xchg,
|
||||
Add, Sub, And, Or, Xor };
|
||||
Add, Sub, And, Or, Xor, Init };
|
||||
private:
|
||||
enum { PTR, ORDER, VAL1, ORDER_FAIL, VAL2, END_EXPR };
|
||||
Stmt* SubExprs[END_EXPR];
|
||||
|
@ -4438,10 +4438,16 @@ public:
|
|||
SubExprs[ORDER] = E;
|
||||
}
|
||||
Expr *getVal1() const {
|
||||
if (Op == Init)
|
||||
return cast<Expr>(SubExprs[ORDER]);
|
||||
assert(NumSubExprs >= 3);
|
||||
return cast<Expr>(SubExprs[VAL1]);
|
||||
}
|
||||
void setVal1(Expr *E) {
|
||||
if (Op == Init) {
|
||||
SubExprs[ORDER] = E;
|
||||
return;
|
||||
}
|
||||
assert(NumSubExprs >= 3);
|
||||
SubExprs[VAL1] = E;
|
||||
}
|
||||
|
|
|
@ -267,7 +267,12 @@ enum CastKind {
|
|||
/// in ARC cause blocks to be copied; this is for cases where that
|
||||
/// would not otherwise be guaranteed, such as when casting to a
|
||||
/// non-block pointer type.
|
||||
CK_ARCExtendBlockObject
|
||||
CK_ARCExtendBlockObject,
|
||||
|
||||
/// \brief Converts from _Atomic(T) to T.
|
||||
CK_AtomicToNonAtomic,
|
||||
/// \brief Converts from T to _Atomic(T).
|
||||
CK_NonAtomicToAtomic
|
||||
};
|
||||
|
||||
#define CK_Invalid ((CastKind) -1)
|
||||
|
|
|
@ -599,6 +599,7 @@ BUILTIN(__atomic_fetch_or, "v.", "t")
|
|||
BUILTIN(__atomic_fetch_xor, "v.", "t")
|
||||
BUILTIN(__atomic_thread_fence, "vi", "n")
|
||||
BUILTIN(__atomic_signal_fence, "vi", "n")
|
||||
BUILTIN(__atomic_init, "v.", "t")
|
||||
BUILTIN(__atomic_is_lock_free, "iz", "n")
|
||||
|
||||
// Non-overloaded atomic builtins.
|
||||
|
|
|
@ -1092,6 +1092,8 @@ void CastExpr::CheckCastConsistency() const {
|
|||
case CK_Dependent:
|
||||
case CK_LValueToRValue:
|
||||
case CK_NoOp:
|
||||
case CK_AtomicToNonAtomic:
|
||||
case CK_NonAtomicToAtomic:
|
||||
case CK_PointerToBoolean:
|
||||
case CK_IntegralToBoolean:
|
||||
case CK_FloatingToBoolean:
|
||||
|
@ -1204,6 +1206,10 @@ const char *CastExpr::getCastKindName() const {
|
|||
return "ARCReclaimReturnedObject";
|
||||
case CK_ARCExtendBlockObject:
|
||||
return "ARCCExtendBlockObject";
|
||||
case CK_AtomicToNonAtomic:
|
||||
return "AtomicToNonAtomic";
|
||||
case CK_NonAtomicToAtomic:
|
||||
return "NonAtomicToAtomic";
|
||||
}
|
||||
|
||||
llvm_unreachable("Unhandled cast kind!");
|
||||
|
@ -2536,6 +2542,18 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef) const {
|
|||
case CStyleCastExprClass: {
|
||||
const CastExpr *CE = cast<CastExpr>(this);
|
||||
|
||||
// If we're promoting an integer to an _Atomic type then this is constant
|
||||
// if the integer is constant. We also need to check the converse in case
|
||||
// someone does something like:
|
||||
//
|
||||
// int a = (_Atomic(int))42;
|
||||
//
|
||||
// I doubt anyone would write code like this directly, but it's quite
|
||||
// possible as the result of macro expansions.
|
||||
if (CE->getCastKind() == CK_NonAtomicToAtomic ||
|
||||
CE->getCastKind() == CK_AtomicToNonAtomic)
|
||||
return CE->getSubExpr()->isConstantInitializer(Ctx, false);
|
||||
|
||||
// Handle bitcasts of vector constants.
|
||||
if (getType()->isVectorType() && CE->getCastKind() == CK_BitCast)
|
||||
return CE->getSubExpr()->isConstantInitializer(Ctx, false);
|
||||
|
|
|
@ -2277,6 +2277,8 @@ public:
|
|||
default:
|
||||
break;
|
||||
|
||||
case CK_AtomicToNonAtomic:
|
||||
case CK_NonAtomicToAtomic:
|
||||
case CK_NoOp:
|
||||
return StmtVisitorTy::Visit(E->getSubExpr());
|
||||
|
||||
|
@ -4531,6 +4533,8 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
|
|||
return Error(E);
|
||||
|
||||
case CK_LValueToRValue:
|
||||
case CK_AtomicToNonAtomic:
|
||||
case CK_NonAtomicToAtomic:
|
||||
case CK_NoOp:
|
||||
return ExprEvaluatorBaseTy::VisitCastExpr(E);
|
||||
|
||||
|
@ -4997,6 +5001,8 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
|
|||
llvm_unreachable("invalid cast kind for complex value");
|
||||
|
||||
case CK_LValueToRValue:
|
||||
case CK_AtomicToNonAtomic:
|
||||
case CK_NonAtomicToAtomic:
|
||||
case CK_NoOp:
|
||||
return ExprEvaluatorBaseTy::VisitCastExpr(E);
|
||||
|
||||
|
@ -5855,6 +5861,8 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
|
|||
}
|
||||
switch (cast<CastExpr>(E)->getCastKind()) {
|
||||
case CK_LValueToRValue:
|
||||
case CK_AtomicToNonAtomic:
|
||||
case CK_NonAtomicToAtomic:
|
||||
case CK_NoOp:
|
||||
case CK_IntegralToBoolean:
|
||||
case CK_IntegralCast:
|
||||
|
|
|
@ -1052,6 +1052,9 @@ void StmtPrinter::VisitPseudoObjectExpr(PseudoObjectExpr *Node) {
|
|||
void StmtPrinter::VisitAtomicExpr(AtomicExpr *Node) {
|
||||
const char *Name = 0;
|
||||
switch (Node->getOp()) {
|
||||
case AtomicExpr::Init:
|
||||
Name = "__atomic_init(";
|
||||
break;
|
||||
case AtomicExpr::Load:
|
||||
Name = "__atomic_load(";
|
||||
break;
|
||||
|
@ -1094,7 +1097,8 @@ void StmtPrinter::VisitAtomicExpr(AtomicExpr *Node) {
|
|||
PrintExpr(Node->getVal2());
|
||||
OS << ", ";
|
||||
}
|
||||
PrintExpr(Node->getOrder());
|
||||
if (Node->getOp() != AtomicExpr::Init)
|
||||
PrintExpr(Node->getOrder());
|
||||
if (Node->isCmpXChg()) {
|
||||
OS << ", ";
|
||||
PrintExpr(Node->getOrderFail());
|
||||
|
|
|
@ -494,7 +494,7 @@ void CodeGenFunction::EmitScalarInit(const Expr *init,
|
|||
llvm::Value *value = EmitScalarExpr(init);
|
||||
if (capturedByInit)
|
||||
drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
|
||||
EmitStoreThroughLValue(RValue::get(value), lvalue);
|
||||
EmitStoreThroughLValue(RValue::get(value), lvalue, true);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -535,7 +535,7 @@ void CodeGenFunction::EmitScalarInit(const Expr *init,
|
|||
|
||||
// Otherwise just do a simple store.
|
||||
else
|
||||
EmitStoreOfScalar(zero, tempLV);
|
||||
EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true);
|
||||
}
|
||||
|
||||
// Emit the initializer.
|
||||
|
@ -581,19 +581,19 @@ void CodeGenFunction::EmitScalarInit(const Expr *init,
|
|||
// both __weak and __strong, but __weak got filtered out above.
|
||||
if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
|
||||
llvm::Value *oldValue = EmitLoadOfScalar(lvalue);
|
||||
EmitStoreOfScalar(value, lvalue);
|
||||
EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
|
||||
EmitARCRelease(oldValue, /*precise*/ false);
|
||||
return;
|
||||
}
|
||||
|
||||
EmitStoreOfScalar(value, lvalue);
|
||||
EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
|
||||
}
|
||||
|
||||
/// EmitScalarInit - Initialize the given lvalue with the given object.
|
||||
void CodeGenFunction::EmitScalarInit(llvm::Value *init, LValue lvalue) {
|
||||
Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
|
||||
if (!lifetime)
|
||||
return EmitStoreThroughLValue(RValue::get(init), lvalue);
|
||||
return EmitStoreThroughLValue(RValue::get(init), lvalue, true);
|
||||
|
||||
switch (lifetime) {
|
||||
case Qualifiers::OCL_None:
|
||||
|
@ -617,7 +617,7 @@ void CodeGenFunction::EmitScalarInit(llvm::Value *init, LValue lvalue) {
|
|||
break;
|
||||
}
|
||||
|
||||
EmitStoreOfScalar(init, lvalue);
|
||||
EmitStoreOfScalar(init, lvalue, /* isInitialization */ true);
|
||||
}
|
||||
|
||||
/// canEmitInitWithFewStoresAfterMemset - Decide whether we can emit the
|
||||
|
@ -1045,7 +1045,7 @@ void CodeGenFunction::EmitExprAsInit(const Expr *init,
|
|||
RValue rvalue = EmitReferenceBindingToExpr(init, D);
|
||||
if (capturedByInit)
|
||||
drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
|
||||
EmitStoreThroughLValue(rvalue, lvalue);
|
||||
EmitStoreThroughLValue(rvalue, lvalue, true);
|
||||
} else if (!hasAggregateLLVMType(type)) {
|
||||
EmitScalarInit(init, D, lvalue, capturedByInit);
|
||||
} else if (type->isAnyComplexType()) {
|
||||
|
@ -1505,7 +1505,7 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
|
|||
if (doStore) {
|
||||
LValue lv = MakeAddrLValue(DeclPtr, Ty,
|
||||
getContext().getDeclAlign(&D));
|
||||
EmitStoreOfScalar(Arg, lv);
|
||||
EmitStoreOfScalar(Arg, lv, /* isInitialization */ true);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -764,6 +764,9 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
|
|||
Load->setAlignment(Alignment);
|
||||
if (TBAAInfo)
|
||||
CGM.DecorateInstruction(Load, TBAAInfo);
|
||||
// If this is an atomic type, all normal reads must be atomic
|
||||
if (Ty->isAtomicType())
|
||||
Load->setAtomic(llvm::SequentiallyConsistent);
|
||||
|
||||
return EmitFromMemory(Load, Ty);
|
||||
}
|
||||
|
@ -800,7 +803,8 @@ llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
|
|||
void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
|
||||
bool Volatile, unsigned Alignment,
|
||||
QualType Ty,
|
||||
llvm::MDNode *TBAAInfo) {
|
||||
llvm::MDNode *TBAAInfo,
|
||||
bool isInit) {
|
||||
Value = EmitToMemory(Value, Ty);
|
||||
|
||||
llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
|
||||
|
@ -808,12 +812,15 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
|
|||
Store->setAlignment(Alignment);
|
||||
if (TBAAInfo)
|
||||
CGM.DecorateInstruction(Store, TBAAInfo);
|
||||
if (!isInit && Ty->isAtomicType())
|
||||
Store->setAtomic(llvm::SequentiallyConsistent);
|
||||
}
|
||||
|
||||
void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue) {
|
||||
void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
|
||||
bool isInit) {
|
||||
EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
|
||||
lvalue.getAlignment().getQuantity(), lvalue.getType(),
|
||||
lvalue.getTBAAInfo());
|
||||
lvalue.getTBAAInfo(), isInit);
|
||||
}
|
||||
|
||||
/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
|
||||
|
@ -961,7 +968,7 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
|
|||
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
|
||||
/// lvalue, where both are guaranteed to the have the same type, and that type
|
||||
/// is 'Ty'.
|
||||
void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst) {
|
||||
void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) {
|
||||
if (!Dst.isSimple()) {
|
||||
if (Dst.isVectorElt()) {
|
||||
// Read/modify/write the vector, inserting the new element.
|
||||
|
@ -1041,7 +1048,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst) {
|
|||
}
|
||||
|
||||
assert(Src.isScalar() && "Can't emit an agg store with this method");
|
||||
EmitStoreOfScalar(Src.getScalarVal(), Dst);
|
||||
EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
|
||||
}
|
||||
|
||||
void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
|
||||
|
@ -2052,6 +2059,11 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
|
|||
|
||||
case CK_Dependent:
|
||||
llvm_unreachable("dependent cast kind in IR gen!");
|
||||
|
||||
// These two casts are currently treated as no-ops, although they could
|
||||
// potentially be real operations depending on the target's ABI.
|
||||
case CK_NonAtomicToAtomic:
|
||||
case CK_AtomicToNonAtomic:
|
||||
|
||||
case CK_NoOp:
|
||||
case CK_LValueToRValue:
|
||||
|
@ -2541,6 +2553,7 @@ EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
|
|||
case AtomicExpr::CmpXchgWeak:
|
||||
case AtomicExpr::CmpXchgStrong:
|
||||
case AtomicExpr::Store:
|
||||
case AtomicExpr::Init:
|
||||
case AtomicExpr::Load: assert(0 && "Already handled!");
|
||||
case AtomicExpr::Add: Op = llvm::AtomicRMWInst::Add; break;
|
||||
case AtomicExpr::Sub: Op = llvm::AtomicRMWInst::Sub; break;
|
||||
|
@ -2588,8 +2601,20 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
|
|||
getContext().getTargetInfo().getMaxAtomicInlineWidth();
|
||||
bool UseLibcall = (Size != Align || Size > MaxInlineWidth);
|
||||
|
||||
|
||||
|
||||
llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
|
||||
Ptr = EmitScalarExpr(E->getPtr());
|
||||
|
||||
if (E->getOp() == AtomicExpr::Init) {
|
||||
assert(!Dest && "Init does not return a value");
|
||||
Val1 = EmitScalarExpr(E->getVal1());
|
||||
llvm::StoreInst *Store = Builder.CreateStore(Val1, Ptr);
|
||||
Store->setAlignment(Size);
|
||||
Store->setVolatile(E->isVolatile());
|
||||
return RValue::get(0);
|
||||
}
|
||||
|
||||
Order = EmitScalarExpr(E->getOrder());
|
||||
if (E->isCmpXChg()) {
|
||||
Val1 = EmitScalarExpr(E->getVal1());
|
||||
|
@ -2703,7 +2728,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
|
|||
// enforce that in general.
|
||||
break;
|
||||
}
|
||||
if (E->getOp() == AtomicExpr::Store)
|
||||
if (E->getOp() == AtomicExpr::Store || E->getOp() == AtomicExpr::Init)
|
||||
return RValue::get(0);
|
||||
return ConvertTempToRValue(*this, E->getType(), OrigDest);
|
||||
}
|
||||
|
|
|
@ -337,6 +337,8 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
|
|||
|
||||
case CK_LValueToRValue: // hope for downstream optimization
|
||||
case CK_NoOp:
|
||||
case CK_AtomicToNonAtomic:
|
||||
case CK_NonAtomicToAtomic:
|
||||
case CK_UserDefinedConversion:
|
||||
case CK_ConstructorConversion:
|
||||
assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
|
||||
|
|
|
@ -358,6 +358,10 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
|
|||
switch (CK) {
|
||||
case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
|
||||
|
||||
// Atomic to non-atomic casts may be more than a no-op for some platforms and
|
||||
// for some types.
|
||||
case CK_AtomicToNonAtomic:
|
||||
case CK_NonAtomicToAtomic:
|
||||
case CK_NoOp:
|
||||
case CK_LValueToRValue:
|
||||
case CK_UserDefinedConversion:
|
||||
|
|
|
@ -624,6 +624,8 @@ public:
|
|||
return CGM.getCXXABI().EmitMemberPointerConversion(C, E);
|
||||
|
||||
case CK_LValueToRValue:
|
||||
case CK_AtomicToNonAtomic:
|
||||
case CK_NonAtomicToAtomic:
|
||||
case CK_NoOp:
|
||||
return C;
|
||||
|
||||
|
|
|
@ -1064,6 +1064,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
|
|||
Value *Src = Visit(const_cast<Expr*>(E));
|
||||
return Builder.CreateBitCast(Src, ConvertType(DestTy));
|
||||
}
|
||||
case CK_AtomicToNonAtomic:
|
||||
case CK_NonAtomicToAtomic:
|
||||
case CK_NoOp:
|
||||
case CK_UserDefinedConversion:
|
||||
return Visit(const_cast<Expr*>(E));
|
||||
|
@ -1293,9 +1295,21 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
|
|||
QualType type = E->getSubExpr()->getType();
|
||||
llvm::Value *value = EmitLoadOfLValue(LV);
|
||||
llvm::Value *input = value;
|
||||
llvm::PHINode *atomicPHI = 0;
|
||||
|
||||
int amount = (isInc ? 1 : -1);
|
||||
|
||||
if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
|
||||
llvm::BasicBlock *startBB = Builder.GetInsertBlock();
|
||||
llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
|
||||
Builder.CreateBr(opBB);
|
||||
Builder.SetInsertPoint(opBB);
|
||||
atomicPHI = Builder.CreatePHI(value->getType(), 2);
|
||||
atomicPHI->addIncoming(value, startBB);
|
||||
type = atomicTy->getValueType();
|
||||
value = atomicPHI;
|
||||
}
|
||||
|
||||
// Special case of integer increment that we have to check first: bool++.
|
||||
// Due to promotion rules, we get:
|
||||
// bool++ -> bool = bool + 1
|
||||
|
@ -1415,6 +1429,18 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
|
|||
value = Builder.CreateInBoundsGEP(value, sizeValue, "incdec.objptr");
|
||||
value = Builder.CreateBitCast(value, input->getType());
|
||||
}
|
||||
|
||||
if (atomicPHI) {
|
||||
llvm::BasicBlock *opBB = Builder.GetInsertBlock();
|
||||
llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
|
||||
llvm::Value *old = Builder.CreateAtomicCmpXchg(LV.getAddress(), atomicPHI,
|
||||
value, llvm::SequentiallyConsistent);
|
||||
atomicPHI->addIncoming(old, opBB);
|
||||
llvm::Value *success = Builder.CreateICmpEQ(old, atomicPHI);
|
||||
Builder.CreateCondBr(success, contBB, opBB);
|
||||
Builder.SetInsertPoint(contBB);
|
||||
return isPre ? value : input;
|
||||
}
|
||||
|
||||
// Store the updated result through the lvalue.
|
||||
if (LV.isBitField())
|
||||
|
@ -1670,12 +1696,38 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
|
|||
OpInfo.LHS = EmitLoadOfLValue(LHSLV);
|
||||
OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
|
||||
E->getComputationLHSType());
|
||||
|
||||
llvm::PHINode *atomicPHI = 0;
|
||||
if (const AtomicType *atomicTy = OpInfo.Ty->getAs<AtomicType>()) {
|
||||
// FIXME: For floating point types, we should be saving and restoring the
|
||||
// floating point environment in the loop.
|
||||
llvm::BasicBlock *startBB = Builder.GetInsertBlock();
|
||||
llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
|
||||
Builder.CreateBr(opBB);
|
||||
Builder.SetInsertPoint(opBB);
|
||||
atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
|
||||
atomicPHI->addIncoming(OpInfo.LHS, startBB);
|
||||
OpInfo.Ty = atomicTy->getValueType();
|
||||
OpInfo.LHS = atomicPHI;
|
||||
}
|
||||
|
||||
// Expand the binary operator.
|
||||
Result = (this->*Func)(OpInfo);
|
||||
|
||||
// Convert the result back to the LHS type.
|
||||
Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy);
|
||||
|
||||
if (atomicPHI) {
|
||||
llvm::BasicBlock *opBB = Builder.GetInsertBlock();
|
||||
llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
|
||||
llvm::Value *old = Builder.CreateAtomicCmpXchg(LHSLV.getAddress(), atomicPHI,
|
||||
Result, llvm::SequentiallyConsistent);
|
||||
atomicPHI->addIncoming(old, opBB);
|
||||
llvm::Value *success = Builder.CreateICmpEQ(old, atomicPHI);
|
||||
Builder.CreateCondBr(success, contBB, opBB);
|
||||
Builder.SetInsertPoint(contBB);
|
||||
return LHSLV;
|
||||
}
|
||||
|
||||
// Store the result value into the LHS lvalue. Bit-fields are handled
|
||||
// specially because the result is altered by the store, i.e., [C99 6.5.16p1]
|
||||
|
|
|
@ -2029,13 +2029,14 @@ public:
|
|||
/// the LLVM value representation.
|
||||
void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
|
||||
bool Volatile, unsigned Alignment, QualType Ty,
|
||||
llvm::MDNode *TBAAInfo = 0);
|
||||
llvm::MDNode *TBAAInfo = 0, bool isInit=false);
|
||||
|
||||
/// EmitStoreOfScalar - Store a scalar value to an address, taking
|
||||
/// care to appropriately convert from the memory representation to
|
||||
/// the LLVM value representation. The l-value must be a simple
|
||||
/// l-value.
|
||||
void EmitStoreOfScalar(llvm::Value *value, LValue lvalue);
|
||||
/// l-value. The isInit flag indicates whether this is an initialization.
|
||||
/// If so, atomic qualifiers are ignored and the store is always non-atomic.
|
||||
void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false);
|
||||
|
||||
/// EmitLoadOfLValue - Given an expression that represents a value lvalue,
|
||||
/// this method emits the address of the lvalue, then loads the result as an
|
||||
|
@ -2047,7 +2048,7 @@ public:
|
|||
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
|
||||
/// lvalue, where both are guaranteed to the have the same type, and that type
|
||||
/// is 'Ty'.
|
||||
void EmitStoreThroughLValue(RValue Src, LValue Dst);
|
||||
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false);
|
||||
void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
|
||||
|
||||
/// EmitStoreThroughLValue - Store Src into Dst with same constraints as
|
||||
|
|
|
@ -325,6 +325,14 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
|
|||
Builder.defineMacro("__GNUC__", "4");
|
||||
Builder.defineMacro("__GXX_ABI_VERSION", "1002");
|
||||
|
||||
// Define macros for the C11 / C++11 memory orderings
|
||||
Builder.defineMacro("__ATOMIC_RELAXED", "0");
|
||||
Builder.defineMacro("__ATOMIC_CONSUME", "1");
|
||||
Builder.defineMacro("__ATOMIC_ACQUIRE", "2");
|
||||
Builder.defineMacro("__ATOMIC_RELEASE", "3");
|
||||
Builder.defineMacro("__ATOMIC_ACQ_REL", "4");
|
||||
Builder.defineMacro("__ATOMIC_SEQ_CST", "5");
|
||||
|
||||
// As sad as it is, enough software depends on the __VERSION__ for version
|
||||
// checks that it is necessary to report 4.2.1 (the base GCC version we claim
|
||||
// compatibility with) first.
|
||||
|
|
|
@ -626,12 +626,14 @@ static bool HasFeature(const Preprocessor &PP, const IdentifierInfo *II) {
|
|||
.Case("arc_cf_code_audited", true)
|
||||
// C11 features
|
||||
.Case("c_alignas", LangOpts.C11)
|
||||
.Case("c_atomic", LangOpts.C11)
|
||||
.Case("c_generic_selections", LangOpts.C11)
|
||||
.Case("c_static_assert", LangOpts.C11)
|
||||
// C++0x features
|
||||
.Case("cxx_access_control_sfinae", LangOpts.CPlusPlus0x)
|
||||
.Case("cxx_alias_templates", LangOpts.CPlusPlus0x)
|
||||
.Case("cxx_alignas", LangOpts.CPlusPlus0x)
|
||||
.Case("cxx_atomic", LangOpts.CPlusPlus0x)
|
||||
.Case("cxx_attributes", LangOpts.CPlusPlus0x)
|
||||
.Case("cxx_auto_type", LangOpts.CPlusPlus0x)
|
||||
//.Case("cxx_constexpr", false);
|
||||
|
@ -724,9 +726,11 @@ static bool HasExtension(const Preprocessor &PP, const IdentifierInfo *II) {
|
|||
return llvm::StringSwitch<bool>(II->getName())
|
||||
// C11 features supported by other languages as extensions.
|
||||
.Case("c_alignas", true)
|
||||
.Case("c_atomic", true)
|
||||
.Case("c_generic_selections", true)
|
||||
.Case("c_static_assert", true)
|
||||
// C++0x features supported by other languages as extensions.
|
||||
.Case("cxx_atomic", LangOpts.CPlusPlus)
|
||||
.Case("cxx_deleted_functions", LangOpts.CPlusPlus)
|
||||
.Case("cxx_explicit_conversions", LangOpts.CPlusPlus)
|
||||
.Case("cxx_inline_namespaces", LangOpts.CPlusPlus)
|
||||
|
|
|
@ -1897,6 +1897,11 @@ void CastOperation::CheckCStyleCast() {
|
|||
if (SrcExpr.isInvalid())
|
||||
return;
|
||||
QualType SrcType = SrcExpr.get()->getType();
|
||||
|
||||
// You can cast an _Atomic(T) to anything you can cast a T to.
|
||||
if (const AtomicType *AtomicSrcType = SrcType->getAs<AtomicType>())
|
||||
SrcType = AtomicSrcType->getValueType();
|
||||
|
||||
assert(!SrcType->isPlaceholderType());
|
||||
|
||||
if (Self.RequireCompleteType(OpRange.getBegin(), DestType,
|
||||
|
|
|
@ -277,6 +277,8 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
|
|||
return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Load);
|
||||
case Builtin::BI__atomic_store:
|
||||
return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Store);
|
||||
case Builtin::BI__atomic_init:
|
||||
return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Init);
|
||||
case Builtin::BI__atomic_exchange:
|
||||
return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Xchg);
|
||||
case Builtin::BI__atomic_compare_exchange_strong:
|
||||
|
@ -538,6 +540,8 @@ Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op)
|
|||
NumVals = 2;
|
||||
NumOrders = 2;
|
||||
}
|
||||
if (Op == AtomicExpr::Init)
|
||||
NumOrders = 0;
|
||||
|
||||
if (TheCall->getNumArgs() < NumVals+NumOrders+1) {
|
||||
Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
|
||||
|
@ -600,7 +604,7 @@ Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op)
|
|||
}
|
||||
|
||||
QualType ResultType = ValType;
|
||||
if (Op == AtomicExpr::Store)
|
||||
if (Op == AtomicExpr::Store || Op == AtomicExpr::Init)
|
||||
ResultType = Context.VoidTy;
|
||||
else if (Op == AtomicExpr::CmpXchgWeak || Op == AtomicExpr::CmpXchgStrong)
|
||||
ResultType = Context.BoolTy;
|
||||
|
@ -641,6 +645,8 @@ Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op)
|
|||
SubExprs.push_back(Ptr);
|
||||
if (Op == AtomicExpr::Load) {
|
||||
SubExprs.push_back(TheCall->getArg(1)); // Order
|
||||
} else if (Op == AtomicExpr::Init) {
|
||||
SubExprs.push_back(TheCall->getArg(1)); // Val1
|
||||
} else if (Op != AtomicExpr::CmpXchgWeak && Op != AtomicExpr::CmpXchgStrong) {
|
||||
SubExprs.push_back(TheCall->getArg(2)); // Order
|
||||
SubExprs.push_back(TheCall->getArg(1)); // Val1
|
||||
|
|
|
@ -4066,6 +4066,11 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
|
|||
// pointers. Everything else should be possible.
|
||||
|
||||
QualType SrcTy = Src.get()->getType();
|
||||
if (const AtomicType *SrcAtomicTy = SrcTy->getAs<AtomicType>())
|
||||
SrcTy = SrcAtomicTy->getValueType();
|
||||
if (const AtomicType *DestAtomicTy = DestTy->getAs<AtomicType>())
|
||||
DestTy = DestAtomicTy->getValueType();
|
||||
|
||||
if (Context.hasSameUnqualifiedType(SrcTy, DestTy))
|
||||
return CK_NoOp;
|
||||
|
||||
|
@ -5358,9 +5363,6 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
|
|||
LHSType = Context.getCanonicalType(LHSType).getUnqualifiedType();
|
||||
RHSType = Context.getCanonicalType(RHSType).getUnqualifiedType();
|
||||
|
||||
// We can't do assignment from/to atomics yet.
|
||||
if (LHSType->isAtomicType())
|
||||
return Incompatible;
|
||||
|
||||
// Common case: no conversion required.
|
||||
if (LHSType == RHSType) {
|
||||
|
@ -5368,6 +5370,21 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
|
|||
return Compatible;
|
||||
}
|
||||
|
||||
if (const AtomicType *AtomicTy = dyn_cast<AtomicType>(LHSType)) {
|
||||
if (AtomicTy->getValueType() == RHSType) {
|
||||
Kind = CK_NonAtomicToAtomic;
|
||||
return Compatible;
|
||||
}
|
||||
}
|
||||
|
||||
if (const AtomicType *AtomicTy = dyn_cast<AtomicType>(RHSType)) {
|
||||
if (AtomicTy->getValueType() == LHSType) {
|
||||
Kind = CK_AtomicToNonAtomic;
|
||||
return Compatible;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// If the left-hand side is a reference type, then we are in a
|
||||
// (rare!) case where we've allowed the use of references in C,
|
||||
// e.g., as a parameter type in a built-in function. In this case,
|
||||
|
@ -5906,9 +5923,15 @@ QualType Sema::CheckMultiplyDivideOperands(ExprResult &LHS, ExprResult &RHS,
|
|||
if (LHS.isInvalid() || RHS.isInvalid())
|
||||
return QualType();
|
||||
|
||||
|
||||
if (!LHS.get()->getType()->isArithmeticType() ||
|
||||
!RHS.get()->getType()->isArithmeticType())
|
||||
!RHS.get()->getType()->isArithmeticType()) {
|
||||
if (IsCompAssign &&
|
||||
LHS.get()->getType()->isAtomicType() &&
|
||||
RHS.get()->getType()->isArithmeticType())
|
||||
return compType;
|
||||
return InvalidOperands(Loc, LHS, RHS);
|
||||
}
|
||||
|
||||
// Check for division by zero.
|
||||
if (IsDiv &&
|
||||
|
@ -6134,6 +6157,12 @@ QualType Sema::CheckAdditionOperands( // C99 6.5.6
|
|||
return compType;
|
||||
}
|
||||
|
||||
if (LHS.get()->getType()->isAtomicType() &&
|
||||
RHS.get()->getType()->isArithmeticType()) {
|
||||
*CompLHSTy = LHS.get()->getType();
|
||||
return compType;
|
||||
}
|
||||
|
||||
// Put any potential pointer into PExp
|
||||
Expr* PExp = LHS.get(), *IExp = RHS.get();
|
||||
if (IExp->getType()->isAnyPointerType())
|
||||
|
@ -6194,6 +6223,12 @@ QualType Sema::CheckSubtractionOperands(ExprResult &LHS, ExprResult &RHS,
|
|||
return compType;
|
||||
}
|
||||
|
||||
if (LHS.get()->getType()->isAtomicType() &&
|
||||
RHS.get()->getType()->isArithmeticType()) {
|
||||
*CompLHSTy = LHS.get()->getType();
|
||||
return compType;
|
||||
}
|
||||
|
||||
// Either ptr - int or ptr - ptr.
|
||||
if (LHS.get()->getType()->isAnyPointerType()) {
|
||||
QualType lpointee = LHS.get()->getType()->getPointeeType();
|
||||
|
@ -7290,6 +7325,12 @@ static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op,
|
|||
return S.Context.DependentTy;
|
||||
|
||||
QualType ResType = Op->getType();
|
||||
// Atomic types can be used for increment / decrement where the non-atomic
|
||||
// versions can, so ignore the _Atomic() specifier for the purpose of
|
||||
// checking.
|
||||
if (const AtomicType *ResAtomicType = ResType->getAs<AtomicType>())
|
||||
ResType = ResAtomicType->getValueType();
|
||||
|
||||
assert(!ResType.isNull() && "no type for increment/decrement expression");
|
||||
|
||||
if (S.getLangOptions().CPlusPlus && ResType->isBooleanType()) {
|
||||
|
|
|
@ -218,6 +218,11 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
|
|||
case CK_ARCConsumeObject:
|
||||
case CK_ARCReclaimReturnedObject:
|
||||
case CK_ARCExtendBlockObject: // Fall-through.
|
||||
// The analyser can ignore atomic casts for now, although some future
|
||||
// checkers may want to make certain that you're not modifying the same
|
||||
// value through atomic and nonatomic pointers.
|
||||
case CK_AtomicToNonAtomic:
|
||||
case CK_NonAtomicToAtomic:
|
||||
// True no-ops.
|
||||
case CK_NoOp:
|
||||
case CK_FunctionToPointerDecay: {
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
// RUN: %clang_cc1 -emit-llvm %s -o - | FileCheck %s
|
||||
|
||||
// Check that no atomic operations are used in any initialisation of _Atomic
|
||||
// types.
|
||||
|
||||
_Atomic(int) i = 42;
|
||||
|
||||
void foo()
|
||||
{
|
||||
_Atomic(int) j = 12; // CHECK: store
|
||||
// CHECK-NOT: atomic
|
||||
__atomic_init(&j, 42); // CHECK: store
|
||||
// CHECK-NOT: atomic
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
// RUN: %clang_cc1 -emit-llvm %s -o - | FileCheck %s
|
||||
|
||||
void foo(void)
|
||||
{
|
||||
_Atomic(int) i = 0;
|
||||
// Check that multiply / divides on atomics produce a cmpxchg loop
|
||||
i *= 2; // CHECK: cmpxchg
|
||||
i /= 2; // CHECK: cmpxchg
|
||||
// These should be emitting atomicrmw instructions, but they aren't yet
|
||||
i += 2; // CHECK: cmpxchg
|
||||
i -= 2; // CHECK: cmpxchg
|
||||
i++; // CHECK: cmpxchg
|
||||
i--; // CHECK: cmpxchg
|
||||
}
|
|
@ -1,6 +1,15 @@
|
|||
// RUN: %clang_cc1 -E -std=c1x %s -o - | FileCheck --check-prefix=CHECK-1X %s
|
||||
// RUN: %clang_cc1 -E %s -o - | FileCheck --check-prefix=CHECK-NO-1X %s
|
||||
|
||||
#if __has_feature(c_atomic)
|
||||
int has_atomic();
|
||||
#else
|
||||
int no_atomic();
|
||||
#endif
|
||||
|
||||
// CHECK-1X: has_atomic
|
||||
// CHECK-NO-1X: no_atomic
|
||||
|
||||
#if __has_feature(c_static_assert)
|
||||
int has_static_assert();
|
||||
#else
|
||||
|
|
|
@ -1,6 +1,15 @@
|
|||
// RUN: %clang_cc1 -E -std=c++11 %s -o - | FileCheck --check-prefix=CHECK-0X %s
|
||||
// RUN: %clang_cc1 -E %s -o - | FileCheck --check-prefix=CHECK-NO-0X %s
|
||||
|
||||
#if __has_feature(cxx_atomic)
|
||||
int has_atomic();
|
||||
#else
|
||||
int no_atomic();
|
||||
#endif
|
||||
|
||||
// CHECK-0X: has_atomic
|
||||
// CHECK-NO-0X: no_atomic
|
||||
|
||||
#if __has_feature(cxx_lambdas)
|
||||
int has_lambdas();
|
||||
#else
|
||||
|
|
Loading…
Reference in New Issue