[TSan] Add option for emitting compound read-write instrumentation

This adds option -tsan-compound-read-before-write to emit different
instrumentation for the write if the read before that write is omitted
from instrumentation. The default TSan runtime currently does not
support the different instrumentation, and the option is disabled by
default.

Alternative runtimes, such as the Kernel Concurrency Sanitizer (KCSAN)
can make use of the feature. Indeed, the initial motivation is for use
in KCSAN as it was determined that due to the Linux kernel having a
large number of unaddressed data races, it makes sense to improve
performance and reporting by distinguishing compounded operations. E.g.
the compounded instrumentation is typically emitted for compound
operations such as ++, +=, |=, etc. By emitting different reports, such
data races can easily be noticed, and also automatically bucketed
differently by CI systems.

Reviewed By: dvyukov, glider

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D83867
This commit is contained in:
Marco Elver 2020-07-17 08:53:56 +02:00
parent 0db3ac3354
commit 785d41a261
2 changed files with 177 additions and 66 deletions

View File

@ -19,7 +19,8 @@
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "llvm/Transforms/Instrumentation/ThreadSanitizer.h" #include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
#include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h" #include "llvm/ADT/Statistic.h"
@ -52,30 +53,36 @@ using namespace llvm;
#define DEBUG_TYPE "tsan" #define DEBUG_TYPE "tsan"
static cl::opt<bool> ClInstrumentMemoryAccesses( static cl::opt<bool> ClInstrumentMemoryAccesses(
"tsan-instrument-memory-accesses", cl::init(true), "tsan-instrument-memory-accesses", cl::init(true),
cl::desc("Instrument memory accesses"), cl::Hidden); cl::desc("Instrument memory accesses"), cl::Hidden);
static cl::opt<bool> ClInstrumentFuncEntryExit( static cl::opt<bool>
"tsan-instrument-func-entry-exit", cl::init(true), ClInstrumentFuncEntryExit("tsan-instrument-func-entry-exit", cl::init(true),
cl::desc("Instrument function entry and exit"), cl::Hidden); cl::desc("Instrument function entry and exit"),
static cl::opt<bool> ClHandleCxxExceptions( cl::Hidden);
static cl::opt<bool> ClHandleCxxExceptions(
"tsan-handle-cxx-exceptions", cl::init(true), "tsan-handle-cxx-exceptions", cl::init(true),
cl::desc("Handle C++ exceptions (insert cleanup blocks for unwinding)"), cl::desc("Handle C++ exceptions (insert cleanup blocks for unwinding)"),
cl::Hidden); cl::Hidden);
static cl::opt<bool> ClInstrumentAtomics( static cl::opt<bool> ClInstrumentAtomics("tsan-instrument-atomics",
"tsan-instrument-atomics", cl::init(true), cl::init(true),
cl::desc("Instrument atomics"), cl::Hidden); cl::desc("Instrument atomics"),
static cl::opt<bool> ClInstrumentMemIntrinsics( cl::Hidden);
static cl::opt<bool> ClInstrumentMemIntrinsics(
"tsan-instrument-memintrinsics", cl::init(true), "tsan-instrument-memintrinsics", cl::init(true),
cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden); cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden);
static cl::opt<bool> ClDistinguishVolatile( static cl::opt<bool> ClDistinguishVolatile(
"tsan-distinguish-volatile", cl::init(false), "tsan-distinguish-volatile", cl::init(false),
cl::desc("Emit special instrumentation for accesses to volatiles"), cl::desc("Emit special instrumentation for accesses to volatiles"),
cl::Hidden); cl::Hidden);
static cl::opt<bool> ClInstrumentReadBeforeWrite( static cl::opt<bool> ClInstrumentReadBeforeWrite(
"tsan-instrument-read-before-write", cl::init(false), "tsan-instrument-read-before-write", cl::init(false),
cl::desc("Do not eliminate read instrumentation for read-before-writes"), cl::desc("Do not eliminate read instrumentation for read-before-writes"),
cl::Hidden); cl::Hidden);
static cl::opt<bool> ClCompoundReadBeforeWrite(
"tsan-compound-read-before-write", cl::init(false),
cl::desc("Emit special compound instrumentation for reads-before-writes"),
cl::Hidden);
STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
@ -101,15 +108,37 @@ namespace {
/// ensures the __tsan_init function is in the list of global constructors for /// ensures the __tsan_init function is in the list of global constructors for
/// the module. /// the module.
struct ThreadSanitizer { struct ThreadSanitizer {
ThreadSanitizer() {
// Sanity check options and warn user.
if (ClInstrumentReadBeforeWrite && ClCompoundReadBeforeWrite) {
errs()
<< "warning: Option -tsan-compound-read-before-write has no effect "
"when -tsan-instrument-read-before-write is set.\n";
}
}
bool sanitizeFunction(Function &F, const TargetLibraryInfo &TLI); bool sanitizeFunction(Function &F, const TargetLibraryInfo &TLI);
private: private:
// Internal Instruction wrapper that contains more information about the
// Instruction from prior analysis.
struct InstructionInfo {
// Instrumentation emitted for this instruction is for a compounded set of
// read and write operations in the same basic block.
static constexpr unsigned kCompoundRW = (1U << 0);
explicit InstructionInfo(Instruction *Inst) : Inst(Inst) {}
Instruction *Inst;
unsigned Flags = 0;
};
void initialize(Module &M); void initialize(Module &M);
bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL); bool instrumentLoadOrStore(const InstructionInfo &II, const DataLayout &DL);
bool instrumentAtomic(Instruction *I, const DataLayout &DL); bool instrumentAtomic(Instruction *I, const DataLayout &DL);
bool instrumentMemIntrinsic(Instruction *I); bool instrumentMemIntrinsic(Instruction *I);
void chooseInstructionsToInstrument(SmallVectorImpl<Instruction *> &Local, void chooseInstructionsToInstrument(SmallVectorImpl<Instruction *> &Local,
SmallVectorImpl<Instruction *> &All, SmallVectorImpl<InstructionInfo> &All,
const DataLayout &DL); const DataLayout &DL);
bool addrPointsToConstantData(Value *Addr); bool addrPointsToConstantData(Value *Addr);
int getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL); int getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL);
@ -130,6 +159,8 @@ private:
FunctionCallee TsanVolatileWrite[kNumberOfAccessSizes]; FunctionCallee TsanVolatileWrite[kNumberOfAccessSizes];
FunctionCallee TsanUnalignedVolatileRead[kNumberOfAccessSizes]; FunctionCallee TsanUnalignedVolatileRead[kNumberOfAccessSizes];
FunctionCallee TsanUnalignedVolatileWrite[kNumberOfAccessSizes]; FunctionCallee TsanUnalignedVolatileWrite[kNumberOfAccessSizes];
FunctionCallee TsanCompoundRW[kNumberOfAccessSizes];
FunctionCallee TsanUnalignedCompoundRW[kNumberOfAccessSizes];
FunctionCallee TsanAtomicLoad[kNumberOfAccessSizes]; FunctionCallee TsanAtomicLoad[kNumberOfAccessSizes];
FunctionCallee TsanAtomicStore[kNumberOfAccessSizes]; FunctionCallee TsanAtomicStore[kNumberOfAccessSizes];
FunctionCallee TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1] FunctionCallee TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1]
@ -268,6 +299,15 @@ void ThreadSanitizer::initialize(Module &M) {
TsanUnalignedVolatileWrite[i] = M.getOrInsertFunction( TsanUnalignedVolatileWrite[i] = M.getOrInsertFunction(
UnalignedVolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy()); UnalignedVolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
SmallString<64> CompoundRWName("__tsan_read_write" + ByteSizeStr);
TsanCompoundRW[i] = M.getOrInsertFunction(
CompoundRWName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
SmallString<64> UnalignedCompoundRWName("__tsan_unaligned_read_write" +
ByteSizeStr);
TsanUnalignedCompoundRW[i] = M.getOrInsertFunction(
UnalignedCompoundRWName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
Type *Ty = Type::getIntNTy(M.getContext(), BitSize); Type *Ty = Type::getIntNTy(M.getContext(), BitSize);
Type *PtrTy = Ty->getPointerTo(); Type *PtrTy = Ty->getPointerTo();
SmallString<32> AtomicLoadName("__tsan_atomic" + BitSizeStr + "_load"); SmallString<32> AtomicLoadName("__tsan_atomic" + BitSizeStr + "_load");
@ -402,34 +442,42 @@ bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) {
// 'Local' is a vector of insns within the same BB (no calls between). // 'Local' is a vector of insns within the same BB (no calls between).
// 'All' is a vector of insns that will be instrumented. // 'All' is a vector of insns that will be instrumented.
void ThreadSanitizer::chooseInstructionsToInstrument( void ThreadSanitizer::chooseInstructionsToInstrument(
SmallVectorImpl<Instruction *> &Local, SmallVectorImpl<Instruction *> &All, SmallVectorImpl<Instruction *> &Local,
const DataLayout &DL) { SmallVectorImpl<InstructionInfo> &All, const DataLayout &DL) {
SmallPtrSet<Value*, 8> WriteTargets; DenseMap<Value *, size_t> WriteTargets; // Map of addresses to index in All
// Iterate from the end. // Iterate from the end.
for (Instruction *I : reverse(Local)) { for (Instruction *I : reverse(Local)) {
if (StoreInst *Store = dyn_cast<StoreInst>(I)) { const bool IsWrite = isa<StoreInst>(*I);
Value *Addr = Store->getPointerOperand(); Value *Addr = IsWrite ? cast<StoreInst>(I)->getPointerOperand()
if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr)) : cast<LoadInst>(I)->getPointerOperand();
continue;
WriteTargets.insert(Addr); if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
} else { continue;
LoadInst *Load = cast<LoadInst>(I);
Value *Addr = Load->getPointerOperand(); if (!IsWrite) {
if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr)) const auto WriteEntry = WriteTargets.find(Addr);
continue; if (!ClInstrumentReadBeforeWrite && WriteEntry != WriteTargets.end()) {
if (!ClInstrumentReadBeforeWrite && WriteTargets.count(Addr)) { auto &WI = All[WriteEntry->second];
// We will write to this temp, so no reason to analyze the read. // If we distinguish volatile accesses and if either the read or write
NumOmittedReadsBeforeWrite++; // is volatile, do not omit any instrumentation.
continue; const bool AnyVolatile =
ClDistinguishVolatile && (cast<LoadInst>(I)->isVolatile() ||
cast<StoreInst>(WI.Inst)->isVolatile());
if (!AnyVolatile) {
// We will write to this temp, so no reason to analyze the read.
// Mark the write instruction as compound.
WI.Flags |= InstructionInfo::kCompoundRW;
NumOmittedReadsBeforeWrite++;
continue;
}
} }
if (addrPointsToConstantData(Addr)) { if (addrPointsToConstantData(Addr)) {
// Addr points to some constant data -- it can not race with any writes. // Addr points to some constant data -- it can not race with any writes.
continue; continue;
} }
} }
Value *Addr = isa<StoreInst>(*I)
? cast<StoreInst>(I)->getPointerOperand()
: cast<LoadInst>(I)->getPointerOperand();
if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) && if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
!PointerMayBeCaptured(Addr, true, true)) { !PointerMayBeCaptured(Addr, true, true)) {
// The variable is addressable but not captured, so it cannot be // The variable is addressable but not captured, so it cannot be
@ -438,7 +486,14 @@ void ThreadSanitizer::chooseInstructionsToInstrument(
NumOmittedNonCaptured++; NumOmittedNonCaptured++;
continue; continue;
} }
All.push_back(I);
// Instrument this instruction.
All.emplace_back(I);
if (IsWrite) {
// For read-before-write and compound instrumentation we only need one
// write target, and we can override any previous entry if it exists.
WriteTargets[Addr] = All.size() - 1;
}
} }
Local.clear(); Local.clear();
} }
@ -479,7 +534,7 @@ bool ThreadSanitizer::sanitizeFunction(Function &F,
if (F.hasFnAttribute(Attribute::Naked)) if (F.hasFnAttribute(Attribute::Naked))
return false; return false;
initialize(*F.getParent()); initialize(*F.getParent());
SmallVector<Instruction*, 8> AllLoadsAndStores; SmallVector<InstructionInfo, 8> AllLoadsAndStores;
SmallVector<Instruction*, 8> LocalLoadsAndStores; SmallVector<Instruction*, 8> LocalLoadsAndStores;
SmallVector<Instruction*, 8> AtomicAccesses; SmallVector<Instruction*, 8> AtomicAccesses;
SmallVector<Instruction*, 8> MemIntrinCalls; SmallVector<Instruction*, 8> MemIntrinCalls;
@ -514,8 +569,8 @@ bool ThreadSanitizer::sanitizeFunction(Function &F,
// Instrument memory accesses only if we want to report bugs in the function. // Instrument memory accesses only if we want to report bugs in the function.
if (ClInstrumentMemoryAccesses && SanitizeFunction) if (ClInstrumentMemoryAccesses && SanitizeFunction)
for (auto Inst : AllLoadsAndStores) { for (const auto &II : AllLoadsAndStores) {
Res |= instrumentLoadOrStore(Inst, DL); Res |= instrumentLoadOrStore(II, DL);
} }
// Instrument atomic memory accesses in any case (they can be used to // Instrument atomic memory accesses in any case (they can be used to
@ -553,13 +608,12 @@ bool ThreadSanitizer::sanitizeFunction(Function &F,
return Res; return Res;
} }
bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I, bool ThreadSanitizer::instrumentLoadOrStore(const InstructionInfo &II,
const DataLayout &DL) { const DataLayout &DL) {
IRBuilder<> IRB(I); IRBuilder<> IRB(II.Inst);
bool IsWrite = isa<StoreInst>(*I); const bool IsWrite = isa<StoreInst>(*II.Inst);
Value *Addr = IsWrite Value *Addr = IsWrite ? cast<StoreInst>(II.Inst)->getPointerOperand()
? cast<StoreInst>(I)->getPointerOperand() : cast<LoadInst>(II.Inst)->getPointerOperand();
: cast<LoadInst>(I)->getPointerOperand();
// swifterror memory addresses are mem2reg promoted by instruction selection. // swifterror memory addresses are mem2reg promoted by instruction selection.
// As such they cannot have regular uses like an instrumentation function and // As such they cannot have regular uses like an instrumentation function and
@ -570,9 +624,9 @@ bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I,
int Idx = getMemoryAccessFuncIndex(Addr, DL); int Idx = getMemoryAccessFuncIndex(Addr, DL);
if (Idx < 0) if (Idx < 0)
return false; return false;
if (IsWrite && isVtableAccess(I)) { if (IsWrite && isVtableAccess(II.Inst)) {
LLVM_DEBUG(dbgs() << " VPTR : " << *I << "\n"); LLVM_DEBUG(dbgs() << " VPTR : " << *II.Inst << "\n");
Value *StoredValue = cast<StoreInst>(I)->getValueOperand(); Value *StoredValue = cast<StoreInst>(II.Inst)->getValueOperand();
// StoredValue may be a vector type if we are storing several vptrs at once. // StoredValue may be a vector type if we are storing several vptrs at once.
// In this case, just take the first element of the vector since this is // In this case, just take the first element of the vector since this is
// enough to find vptr races. // enough to find vptr races.
@ -588,36 +642,46 @@ bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I,
NumInstrumentedVtableWrites++; NumInstrumentedVtableWrites++;
return true; return true;
} }
if (!IsWrite && isVtableAccess(I)) { if (!IsWrite && isVtableAccess(II.Inst)) {
IRB.CreateCall(TsanVptrLoad, IRB.CreateCall(TsanVptrLoad,
IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy())); IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
NumInstrumentedVtableReads++; NumInstrumentedVtableReads++;
return true; return true;
} }
const unsigned Alignment = IsWrite
? cast<StoreInst>(I)->getAlignment() const unsigned Alignment = IsWrite ? cast<StoreInst>(II.Inst)->getAlignment()
: cast<LoadInst>(I)->getAlignment(); : cast<LoadInst>(II.Inst)->getAlignment();
const bool IsVolatile = const bool IsCompoundRW =
ClDistinguishVolatile && (IsWrite ? cast<StoreInst>(I)->isVolatile() ClCompoundReadBeforeWrite && (II.Flags & InstructionInfo::kCompoundRW);
: cast<LoadInst>(I)->isVolatile()); const bool IsVolatile = ClDistinguishVolatile &&
(IsWrite ? cast<StoreInst>(II.Inst)->isVolatile()
: cast<LoadInst>(II.Inst)->isVolatile());
assert((!IsVolatile || !IsCompoundRW) && "Compound volatile invalid!");
Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType(); Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
const uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy); const uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
FunctionCallee OnAccessFunc = nullptr; FunctionCallee OnAccessFunc = nullptr;
if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0) { if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0) {
if (IsVolatile) if (IsCompoundRW)
OnAccessFunc = TsanCompoundRW[Idx];
else if (IsVolatile)
OnAccessFunc = IsWrite ? TsanVolatileWrite[Idx] : TsanVolatileRead[Idx]; OnAccessFunc = IsWrite ? TsanVolatileWrite[Idx] : TsanVolatileRead[Idx];
else else
OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx]; OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
} else { } else {
if (IsVolatile) if (IsCompoundRW)
OnAccessFunc = TsanUnalignedCompoundRW[Idx];
else if (IsVolatile)
OnAccessFunc = IsWrite ? TsanUnalignedVolatileWrite[Idx] OnAccessFunc = IsWrite ? TsanUnalignedVolatileWrite[Idx]
: TsanUnalignedVolatileRead[Idx]; : TsanUnalignedVolatileRead[Idx];
else else
OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx]; OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx];
} }
IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy())); IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
if (IsWrite) NumInstrumentedWrites++; if (IsCompoundRW || IsWrite)
else NumInstrumentedReads++; NumInstrumentedWrites++;
if (IsCompoundRW || !IsWrite)
NumInstrumentedReads++;
return true; return true;
} }

View File

@ -1,5 +1,7 @@
; RUN: opt < %s -tsan -S | FileCheck %s ; RUN: opt < %s -tsan -S | FileCheck --check-prefixes=CHECK,CHECK-OPT %s
; RUN: opt < %s -tsan -tsan-instrument-read-before-write -S | FileCheck %s --check-prefixes=CHECK,CHECK-UNOPT ; RUN: opt < %s -tsan -tsan-instrument-read-before-write -S | FileCheck %s --check-prefixes=CHECK,CHECK-UNOPT
; RUN: opt < %s -tsan -tsan-compound-read-before-write -S | FileCheck %s --check-prefixes=CHECK,CHECK-COMPOUND
; RUN: opt < %s -tsan -tsan-distinguish-volatile -tsan-compound-read-before-write -S | FileCheck %s --check-prefixes=CHECK,CHECK-COMPOUND-VOLATILE
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
@ -10,10 +12,13 @@ entry:
store i32 %inc, i32* %ptr, align 4 store i32 %inc, i32* %ptr, align 4
ret void ret void
} }
; CHECK: define void @IncrementMe ; CHECK-LABEL: define void @IncrementMe
; CHECK-NOT: __tsan_read ; CHECK-OPT-NOT: __tsan_read4
; CHECK-UNOPT: __tsan_read ; CHECK-COMPOUND-NOT: __tsan_read4
; CHECK: __tsan_write ; CHECK-UNOPT: __tsan_read4
; CHECK-OPT: __tsan_write4
; CHECK-UNOPT: __tsan_write4
; CHECK-COMPOUND: __tsan_read_write4
; CHECK: ret void ; CHECK: ret void
define void @IncrementMeWithCallInBetween(i32* nocapture %ptr) nounwind uwtable sanitize_thread { define void @IncrementMeWithCallInBetween(i32* nocapture %ptr) nounwind uwtable sanitize_thread {
@ -25,10 +30,52 @@ entry:
ret void ret void
} }
; CHECK: define void @IncrementMeWithCallInBetween ; CHECK-LABEL: define void @IncrementMeWithCallInBetween
; CHECK: __tsan_read ; CHECK: __tsan_read4
; CHECK: __tsan_write ; CHECK: __tsan_write4
; CHECK: ret void ; CHECK: ret void
declare void @foo() declare void @foo()
define void @VolatileLoad(i32* nocapture %ptr) nounwind uwtable sanitize_thread {
entry:
%0 = load volatile i32, i32* %ptr, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %ptr, align 4
ret void
}
; CHECK-LABEL: define void @VolatileLoad
; CHECK-COMPOUND-NOT: __tsan_read4
; CHECK-COMPOUND-VOLATILE: __tsan_volatile_read4
; CHECK-COMPOUND: __tsan_read_write4
; CHECK-COMPOUND-VOLATILE: __tsan_write4
; CHECK: ret void
define void @VolatileStore(i32* nocapture %ptr) nounwind uwtable sanitize_thread {
entry:
%0 = load i32, i32* %ptr, align 4
%inc = add nsw i32 %0, 1
store volatile i32 %inc, i32* %ptr, align 4
ret void
}
; CHECK-LABEL: define void @VolatileStore
; CHECK-COMPOUND-NOT: __tsan_read4
; CHECK-COMPOUND-VOLATILE: __tsan_read4
; CHECK-COMPOUND: __tsan_read_write4
; CHECK-COMPOUND-VOLATILE: __tsan_volatile_write4
; CHECK: ret void
define void @VolatileBoth(i32* nocapture %ptr) nounwind uwtable sanitize_thread {
entry:
%0 = load volatile i32, i32* %ptr, align 4
%inc = add nsw i32 %0, 1
store volatile i32 %inc, i32* %ptr, align 4
ret void
}
; CHECK-LABEL: define void @VolatileBoth
; CHECK-COMPOUND-NOT: __tsan_read4
; CHECK-COMPOUND-VOLATILE: __tsan_volatile_read4
; CHECK-COMPOUND: __tsan_read_write4
; CHECK-COMPOUND-VOLATILE: __tsan_volatile_write4
; CHECK: ret void