Make clang use Acquire loads and Release stores where necessary.

llvm-svn: 139643
This commit is contained in:
Eli Friedman 2011-09-13 21:31:32 +00:00
parent 6eb3a6b192
commit f92b2e0714
5 changed files with 30 additions and 45 deletions

View File

@ -900,13 +900,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_lock_release_8:
case Builtin::BI__sync_lock_release_16: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
llvm::Type *ElTy =
llvm::Type *ElLLVMTy =
cast<llvm::PointerType>(Ptr->getType())->getElementType();
llvm::StoreInst *Store =
Builder.CreateStore(llvm::Constant::getNullValue(ElTy), Ptr);
// FIXME: This is completely, utterly wrong; it only even sort-of works
// on x86.
Store->setVolatile(true);
Builder.CreateStore(llvm::Constant::getNullValue(ElLLVMTy), Ptr);
QualType ElTy = E->getArg(0)->getType()->getPointeeType();
CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
Store->setAlignment(StoreSize.getQuantity());
Store->setAtomic(llvm::Release);
return RValue::get(0);
}

View File

@ -1130,20 +1130,27 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
} else {
// Load the first byte of the guard variable.
llvm::Type *PtrTy = Builder.getInt8PtrTy();
llvm::Value *V =
llvm::LoadInst *LI =
Builder.CreateLoad(Builder.CreateBitCast(GuardVariable, PtrTy), "tmp");
LI->setAlignment(1);
IsInitialized = Builder.CreateIsNull(V, "guard.uninitialized");
// Itanium ABI:
// An implementation supporting thread-safety on multiprocessor
// systems must also guarantee that references to the initialized
// object do not occur before the load of the initialization flag.
//
// In LLVM, we do this by marking the load Acquire.
if (threadsafe)
LI->setAtomic(llvm::Acquire);
IsInitialized = Builder.CreateIsNull(LI, "guard.uninitialized");
}
llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
llvm::BasicBlock *NoCheckBlock = EndBlock;
if (threadsafe) NoCheckBlock = CGF.createBasicBlock("init.barrier");
// Check if the first byte of the guard variable is zero.
Builder.CreateCondBr(IsInitialized, InitCheckBlock, NoCheckBlock);
Builder.CreateCondBr(IsInitialized, InitCheckBlock, EndBlock);
CGF.EmitBlock(InitCheckBlock);
@ -1177,23 +1184,5 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
Builder.CreateStore(llvm::ConstantInt::get(GuardTy, 1), GuardVariable);
}
// Emit an acquire memory barrier if using thread-safe statics:
// Itanium ABI:
// An implementation supporting thread-safety on multiprocessor
// systems must also guarantee that references to the initialized
// object do not occur before the load of the initialization flag.
if (threadsafe) {
Builder.CreateBr(EndBlock);
CGF.EmitBlock(NoCheckBlock);
llvm::Value *_false = Builder.getFalse();
llvm::Value *_true = Builder.getTrue();
Builder.CreateCall5(CGM.getIntrinsic(llvm::Intrinsic::memory_barrier),
/* load-load, load-store */ _true, _true,
/* store-load, store-store */ _false, _false,
/* device or I/O */ _false);
}
CGF.EmitBlock(EndBlock);
}

View File

@ -192,13 +192,12 @@ void test_lock (void)
__sync_synchronize (); // CHECK: fence seq_cst
// FIXME: These are wrong!
__sync_lock_release (&sc); // CHECK: store volatile
__sync_lock_release (&uc); // CHECK: store volatile
__sync_lock_release (&ss); // CHECK: store volatile
__sync_lock_release (&us); // CHECK: store volatile
__sync_lock_release (&si); // CHECK: store volatile
__sync_lock_release (&ui); // CHECK: store volatile
__sync_lock_release (&sll); // CHECK: store volatile
__sync_lock_release (&ull); // CHECK: store volatile
__sync_lock_release (&sc); // CHECK: store atomic {{.*}} release, align 1
__sync_lock_release (&uc); // CHECK: store atomic {{.*}} release, align 1
__sync_lock_release (&ss); // CHECK: store atomic {{.*}} release, align 2
__sync_lock_release (&us); /// CHECK: store atomic {{.*}} release, align 2
__sync_lock_release (&si); // CHECK: store atomic {{.*}} release, align 4
__sync_lock_release (&ui); // CHECK: store atomic {{.*}} release, align 4
__sync_lock_release (&sll); // CHECK: store atomic {{.*}} release, align 8
__sync_lock_release (&ull); // CHECK: store atomic {{.*}} release, align 8
}

View File

@ -75,8 +75,7 @@ int atomic(void) {
// CHECK: cmpxchg i32* null, i32 0, i32 0 seq_cst
__sync_lock_release(&val);
// FIXME: WRONG!
// CHECK: store volatile i32 0, i32*
// CHECK: store atomic {{.*}} release, align 4
__sync_synchronize ();
// CHECK: fence seq_cst
@ -88,8 +87,7 @@ int atomic(void) {
void release_return(int *lock) {
// Ensure this is actually returning void all the way through.
return __sync_lock_release(lock);
// FIXME: WRONG!
// CHECK: store volatile i32 0, i32*
// CHECK: store atomic {{.*}} release, align 4
}

View File

@ -12,13 +12,11 @@ struct A {
};
void f() {
// CHECK: load atomic i8* bitcast (i64* @_ZGVZ1fvE1a to i8*) acquire, align 1
// CHECK: call i32 @__cxa_guard_acquire
// CHECK: call void @_ZN1AC1Ev
// CHECK: call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.A*)* @_ZN1AD1Ev to void (i8*)*), i8* getelementptr inbounds (%struct.A* @_ZZ1fvE1a, i32 0, i32 0), i8* bitcast (i8** @__dso_handle to i8*))
// CHECK: call void @__cxa_guard_release
// rdar://problem/9496726
// CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 false, i1 false, i1 false)
static A a;
}