From 4b72fddd9914d76370704c79f5fdedfbc6c6dedc Mon Sep 17 00:00:00 2001 From: Eli Friedman Date: Fri, 14 Oct 2011 20:59:01 +0000 Subject: [PATCH] Misc fixes for atomics. Biggest fix is doing alignment correctly for _Atomic types. llvm-svn: 142002 --- clang/include/clang/Basic/Builtins.def | 4 ++-- clang/include/clang/Basic/TargetInfo.h | 9 +++++++++ clang/lib/AST/ASTContext.cpp | 18 +++++++++++++++--- clang/lib/AST/StmtProfile.cpp | 1 + clang/lib/Basic/TargetInfo.cpp | 1 + clang/lib/Basic/Targets.cpp | 18 ++++++++++++++++++ clang/lib/CodeGen/CGExpr.cpp | 14 ++++++-------- clang/test/CodeGen/atomic-ops.c | 4 +--- 8 files changed, 53 insertions(+), 16 deletions(-) diff --git a/clang/include/clang/Basic/Builtins.def b/clang/include/clang/Basic/Builtins.def index 4f89f81341b6..f73065272b45 100644 --- a/clang/include/clang/Basic/Builtins.def +++ b/clang/include/clang/Basic/Builtins.def @@ -596,8 +596,8 @@ BUILTIN(__atomic_fetch_sub, "v.", "t") BUILTIN(__atomic_fetch_and, "v.", "t") BUILTIN(__atomic_fetch_or, "v.", "t") BUILTIN(__atomic_fetch_xor, "v.", "t") -BUILTIN(__atomic_thread_fence, "vi", "t") -BUILTIN(__atomic_signal_fence, "vi", "t") +BUILTIN(__atomic_thread_fence, "vi", "n") +BUILTIN(__atomic_signal_fence, "vi", "n") // Non-overloaded atomic builtins. BUILTIN(__sync_synchronize, "v.", "n") diff --git a/clang/include/clang/Basic/TargetInfo.h b/clang/include/clang/Basic/TargetInfo.h index e5a732a4cceb..0a7d2f1ea081 100644 --- a/clang/include/clang/Basic/TargetInfo.h +++ b/clang/include/clang/Basic/TargetInfo.h @@ -75,6 +75,7 @@ protected: unsigned char LargeArrayMinWidth, LargeArrayAlign; unsigned char LongWidth, LongAlign; unsigned char LongLongWidth, LongLongAlign; + unsigned char MaxAtomicPromoteWidth, MaxAtomicInlineWidth; const char *DescriptionString; const char *UserLabelPrefix; const char *MCountName; @@ -246,6 +247,14 @@ public: unsigned getLargeArrayMinWidth() const { return LargeArrayMinWidth; } unsigned getLargeArrayAlign() const { return LargeArrayAlign; } + /// getMaxAtomicPromoteWidth - Return the maximum width lock-free atomic + /// operation which will ever be supported for the given target + unsigned getMaxAtomicPromoteWidth() const { return MaxAtomicPromoteWidth; } + /// getMaxAtomicInlineWidth - Return the maximum width lock-free atomic + /// operation which can be inlined given the supported features of the + /// given target. + unsigned getMaxAtomicInlineWidth() const { return MaxAtomicInlineWidth; } + /// getIntMaxTWidth - Return the size of intmax_t and uintmax_t for this /// target, in bits. unsigned getIntMaxTWidth() const { diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp index b625655e45f9..ae96dfd14816 100644 --- a/clang/lib/AST/ASTContext.cpp +++ b/clang/lib/AST/ASTContext.cpp @@ -1064,13 +1064,25 @@ ASTContext::getTypeInfo(const Type *T) const { } case Type::Atomic: { - // FIXME: The alignment needs to be "fixed". - return getTypeInfo(cast(T)->getValueType()); + std::pair Info + = getTypeInfo(cast(T)->getValueType()); + Width = Info.first; + Align = Info.second; + if (Width != 0 && Width <= Target->getMaxAtomicPromoteWidth() && + llvm::isPowerOf2_64(Width)) { + // We can potentially perform lock-free atomic operations for this + // type; promote the alignment appropriately. + // FIXME: We could potentially promote the width here as well... + // is that worthwhile? (Non-struct atomic types generally have + // power-of-two size anyway, but structs might not. Requires a bit + // of implementation work to make sure we zero out the extra bits.) + Align = static_cast(Width); + } } } - assert(Align && (Align & (Align-1)) == 0 && "Alignment must be power of 2"); + assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); return std::make_pair(Width, Align); } diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp index df49e843f969..214378a974a6 100644 --- a/clang/lib/AST/StmtProfile.cpp +++ b/clang/lib/AST/StmtProfile.cpp @@ -470,6 +470,7 @@ void StmtProfiler::VisitGenericSelectionExpr(const GenericSelectionExpr *S) { void StmtProfiler::VisitAtomicExpr(const AtomicExpr *S) { VisitExpr(S); + ID.AddInteger(S->getOp()); } static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S, diff --git a/clang/lib/Basic/TargetInfo.cpp b/clang/lib/Basic/TargetInfo.cpp index d5dcf066754b..a9285e6e2dbd 100644 --- a/clang/lib/Basic/TargetInfo.cpp +++ b/clang/lib/Basic/TargetInfo.cpp @@ -42,6 +42,7 @@ TargetInfo::TargetInfo(const std::string &T) : Triple(T) { LongDoubleAlign = 64; LargeArrayMinWidth = 0; LargeArrayAlign = 0; + MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 0; SizeType = UnsignedLong; PtrDiffType = SignedLong; IntMaxType = SignedLongLong; diff --git a/clang/lib/Basic/Targets.cpp b/clang/lib/Basic/Targets.cpp index 648ed73f0fff..889518bb2a99 100644 --- a/clang/lib/Basic/Targets.cpp +++ b/clang/lib/Basic/Targets.cpp @@ -1984,6 +1984,11 @@ public: RealTypeUsesObjCFPRet = ((1 << TargetInfo::Float) | (1 << TargetInfo::Double) | (1 << TargetInfo::LongDouble)); + + // x86-32 has atomics up to 8 bytes + // FIXME: Check that we actually have cmpxchg8b before setting + // MaxAtomicInlineWidth. (cmpxchg8b is an i586 instruction.) + MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64; } virtual const char *getVAListDeclaration() const { return "typedef char* __builtin_va_list;"; @@ -2219,6 +2224,12 @@ public: // Use fpret only for long double. RealTypeUsesObjCFPRet = (1 << TargetInfo::LongDouble); + + // x86-64 has atomics up to 16 bytes. + // FIXME: Once the backend is fixed, increase MaxAtomicInlineWidth to 128 + // on CPUs with cmpxchg16b + MaxAtomicPromoteWidth = 128; + MaxAtomicInlineWidth = 64; } virtual const char *getVAListDeclaration() const { return "typedef struct __va_list_tag {" @@ -2391,6 +2402,10 @@ public: // ARM targets default to using the ARM C++ ABI. CXXABI = CXXABI_ARM; + + // ARM has atomics up to 8 bytes + // FIXME: Set MaxAtomicInlineWidth if we have the feature v6e + MaxAtomicPromoteWidth = 64; } virtual const char *getABI() const { return ABI.c_str(); } virtual bool setABI(const std::string &Name) { @@ -2708,6 +2723,9 @@ public: DarwinARMTargetInfo(const std::string& triple) : DarwinTargetInfo(triple) { HasAlignMac68kSupport = true; + // iOS always has 64-bit atomic instructions. + // FIXME: This should be based off of the target features in ARMTargetInfo. + MaxAtomicInlineWidth = 64; } }; } // end anonymous namespace. diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp index ad27506d56ea..bd4e553991b3 100644 --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -2567,9 +2567,9 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { uint64_t Size = sizeChars.getQuantity(); CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy); unsigned Align = alignChars.getQuantity(); - // FIXME: Bound on Size should not be hardcoded. - bool UseLibcall = (sizeChars != alignChars || !llvm::isPowerOf2_64(Size) || - Size > 8); + unsigned MaxInlineWidth = + getContext().getTargetInfo().getMaxAtomicInlineWidth(); + bool UseLibcall = (Size != Align || Size > MaxInlineWidth); llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0; Ptr = EmitScalarExpr(E->getPtr()); @@ -2585,11 +2585,9 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { // is not the same as adding 1 to a uintptr_t. QualType Val1Ty = E->getVal1()->getType(); llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1()); - uint64_t PointeeIncAmt = - getContext().getTypeSizeInChars(MemTy->getPointeeType()).getQuantity(); - llvm::Value *PointeeIncAmtVal = - llvm::ConstantInt::get(Val1Scalar->getType(), PointeeIncAmt); - Val1Scalar = Builder.CreateMul(Val1Scalar, PointeeIncAmtVal); + CharUnits PointeeIncAmt = + getContext().getTypeSizeInChars(MemTy->getPointeeType()); + Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt)); Val1 = CreateMemTemp(Val1Ty, ".atomictmp"); EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty)); } else if (E->getOp() != AtomicExpr::Load) { diff --git a/clang/test/CodeGen/atomic-ops.c b/clang/test/CodeGen/atomic-ops.c index cb3a868b8089..e2904cf10e81 100644 --- a/clang/test/CodeGen/atomic-ops.c +++ b/clang/test/CodeGen/atomic-ops.c @@ -63,9 +63,7 @@ int* fp2(_Atomic(int*) *p) { return __atomic_fetch_add(p, 1, memory_order_relaxed); } -// FIXME: Alignment specification shouldn't be necessary -typedef _Complex float ComplexAligned __attribute((aligned(8))); -_Complex float fc(_Atomic(ComplexAligned) *c) { +_Complex float fc(_Atomic(_Complex float) *c) { // CHECK: @fc // CHECK: atomicrmw xchg i64* return __atomic_exchange(c, 2, memory_order_seq_cst);