[TSAN] Add optional support for distinguishing volatiles

Add support to optionally emit different instrumentation for accesses to
volatile variables. While the default TSAN runtime likely will never
require this feature, other runtimes for different environments that
have subtly different memory models or assumptions may require
distinguishing volatiles.

One such environment are OS kernels, where volatile is still used in
various places for various reasons, and often declare volatile to be
"safe enough" even in multi-threaded contexts. One such example is the
Linux kernel, which implements various synchronization primitives using
volatile (READ_ONCE(), WRITE_ONCE()). Here the Kernel Concurrency
Sanitizer (KCSAN) [1], is a runtime that uses TSAN instrumentation but
otherwise implements a very different approach to race detection from
TSAN.

While in the Linux kernel it is generally discouraged to use volatiles
explicitly, the topic will likely come up again, and we will eventually
need to distinguish volatile accesses [2]. The other use-case is
ignoring data races on specially marked variables in the kernel, for
example bit-flags (here we may hide 'volatile' behind a different name
such as 'no_data_race').

[1] https://github.com/google/ktsan/wiki/KCSAN
[2] https://lkml.kernel.org/r/CANpmjNOfXNE-Zh3MNP=-gmnhvKbsfUfTtWkyg_=VqTxS4nnptQ@mail.gmail.com

Author: melver (Marco Elver)
Reviewed-in: https://reviews.llvm.org/D78554
This commit is contained in:
Dmitry Vyukov 2020-04-22 16:01:33 +02:00
parent ec16df7066
commit 5a2c31116f
2 changed files with 216 additions and 4 deletions

View File

@ -68,6 +68,10 @@ static cl::opt<bool> ClInstrumentAtomics(
static cl::opt<bool> ClInstrumentMemIntrinsics(
"tsan-instrument-memintrinsics", cl::init(true),
cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden);
static cl::opt<bool> ClDistinguishVolatile(
"tsan-distinguish-volatile", cl::init(false),
cl::desc("Emit special instrumentation for accesses to volatiles"),
cl::Hidden);
STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
@ -118,6 +122,10 @@ private:
FunctionCallee TsanWrite[kNumberOfAccessSizes];
FunctionCallee TsanUnalignedRead[kNumberOfAccessSizes];
FunctionCallee TsanUnalignedWrite[kNumberOfAccessSizes];
FunctionCallee TsanVolatileRead[kNumberOfAccessSizes];
FunctionCallee TsanVolatileWrite[kNumberOfAccessSizes];
FunctionCallee TsanUnalignedVolatileRead[kNumberOfAccessSizes];
FunctionCallee TsanUnalignedVolatileWrite[kNumberOfAccessSizes];
FunctionCallee TsanAtomicLoad[kNumberOfAccessSizes];
FunctionCallee TsanAtomicStore[kNumberOfAccessSizes];
FunctionCallee TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1]
@ -236,6 +244,24 @@ void ThreadSanitizer::initialize(Module &M) {
TsanUnalignedWrite[i] = M.getOrInsertFunction(
UnalignedWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
SmallString<64> VolatileReadName("__tsan_volatile_read" + ByteSizeStr);
TsanVolatileRead[i] = M.getOrInsertFunction(
VolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
SmallString<64> VolatileWriteName("__tsan_volatile_write" + ByteSizeStr);
TsanVolatileWrite[i] = M.getOrInsertFunction(
VolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
SmallString<64> UnalignedVolatileReadName("__tsan_unaligned_volatile_read" +
ByteSizeStr);
TsanUnalignedVolatileRead[i] = M.getOrInsertFunction(
UnalignedVolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
SmallString<64> UnalignedVolatileWriteName(
"__tsan_unaligned_volatile_write" + ByteSizeStr);
TsanUnalignedVolatileWrite[i] = M.getOrInsertFunction(
UnalignedVolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
Type *Ty = Type::getIntNTy(M.getContext(), BitSize);
Type *PtrTy = Ty->getPointerTo();
SmallString<32> AtomicLoadName("__tsan_atomic" + BitSizeStr + "_load");
@ -565,13 +591,24 @@ bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I,
const unsigned Alignment = IsWrite
? cast<StoreInst>(I)->getAlignment()
: cast<LoadInst>(I)->getAlignment();
const bool IsVolatile =
ClDistinguishVolatile && (IsWrite ? cast<StoreInst>(I)->isVolatile()
: cast<LoadInst>(I)->isVolatile());
Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
const uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
FunctionCallee OnAccessFunc = nullptr;
if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0)
OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
else
OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx];
if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0) {
if (IsVolatile)
OnAccessFunc = IsWrite ? TsanVolatileWrite[Idx] : TsanVolatileRead[Idx];
else
OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
} else {
if (IsVolatile)
OnAccessFunc = IsWrite ? TsanUnalignedVolatileWrite[Idx]
: TsanUnalignedVolatileRead[Idx];
else
OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx];
}
IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
if (IsWrite) NumInstrumentedWrites++;
else NumInstrumentedReads++;

View File

@ -0,0 +1,175 @@
; RUN: opt < %s -tsan -tsan-distinguish-volatile -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
define i16 @test_volatile_read2(i16* %a) sanitize_thread {
entry:
%tmp1 = load volatile i16, i16* %a, align 2
ret i16 %tmp1
}
; CHECK-LABEL: define i16 @test_volatile_read2(i16* %a)
; CHECK: call void @__tsan_func_entry(i8* %0)
; CHECK-NEXT: %1 = bitcast i16* %a to i8*
; CHECK-NEXT: call void @__tsan_volatile_read2(i8* %1)
; CHECK-NEXT: %tmp1 = load volatile i16, i16* %a, align 2
; CHECK-NEXT: call void @__tsan_func_exit()
; CHECK: ret i16
define i32 @test_volatile_read4(i32* %a) sanitize_thread {
entry:
%tmp1 = load volatile i32, i32* %a, align 4
ret i32 %tmp1
}
; CHECK-LABEL: define i32 @test_volatile_read4(i32* %a)
; CHECK: call void @__tsan_func_entry(i8* %0)
; CHECK-NEXT: %1 = bitcast i32* %a to i8*
; CHECK-NEXT: call void @__tsan_volatile_read4(i8* %1)
; CHECK-NEXT: %tmp1 = load volatile i32, i32* %a, align 4
; CHECK-NEXT: call void @__tsan_func_exit()
; CHECK: ret i32
define i64 @test_volatile_read8(i64* %a) sanitize_thread {
entry:
%tmp1 = load volatile i64, i64* %a, align 8
ret i64 %tmp1
}
; CHECK-LABEL: define i64 @test_volatile_read8(i64* %a)
; CHECK: call void @__tsan_func_entry(i8* %0)
; CHECK-NEXT: %1 = bitcast i64* %a to i8*
; CHECK-NEXT: call void @__tsan_volatile_read8(i8* %1)
; CHECK-NEXT: %tmp1 = load volatile i64, i64* %a, align 8
; CHECK-NEXT: call void @__tsan_func_exit()
; CHECK: ret i64
define i128 @test_volatile_read16(i128* %a) sanitize_thread {
entry:
%tmp1 = load volatile i128, i128* %a, align 16
ret i128 %tmp1
}
; CHECK-LABEL: define i128 @test_volatile_read16(i128* %a)
; CHECK: call void @__tsan_func_entry(i8* %0)
; CHECK-NEXT: %1 = bitcast i128* %a to i8*
; CHECK-NEXT: call void @__tsan_volatile_read16(i8* %1)
; CHECK-NEXT: %tmp1 = load volatile i128, i128* %a, align 16
; CHECK-NEXT: call void @__tsan_func_exit()
; CHECK: ret i128
define void @test_volatile_write2(i16* %a) sanitize_thread {
entry:
store volatile i16 1, i16* %a, align 2
ret void
}
; CHECK-LABEL: define void @test_volatile_write2(i16* %a)
; CHECK: call void @__tsan_func_entry(i8* %0)
; CHECK-NEXT: %1 = bitcast i16* %a to i8*
; CHECK-NEXT: call void @__tsan_volatile_write2(i8* %1)
; CHECK-NEXT: store volatile i16 1, i16* %a, align 2
; CHECK-NEXT: call void @__tsan_func_exit()
; CHECK: ret void
define void @test_volatile_write4(i32* %a) sanitize_thread {
entry:
store volatile i32 1, i32* %a, align 4
ret void
}
; CHECK-LABEL: define void @test_volatile_write4(i32* %a)
; CHECK: call void @__tsan_func_entry(i8* %0)
; CHECK-NEXT: %1 = bitcast i32* %a to i8*
; CHECK-NEXT: call void @__tsan_volatile_write4(i8* %1)
; CHECK-NEXT: store volatile i32 1, i32* %a, align 4
; CHECK-NEXT: call void @__tsan_func_exit()
; CHECK: ret void
define void @test_volatile_write8(i64* %a) sanitize_thread {
entry:
store volatile i64 1, i64* %a, align 8
ret void
}
; CHECK-LABEL: define void @test_volatile_write8(i64* %a)
; CHECK: call void @__tsan_func_entry(i8* %0)
; CHECK-NEXT: %1 = bitcast i64* %a to i8*
; CHECK-NEXT: call void @__tsan_volatile_write8(i8* %1)
; CHECK-NEXT: store volatile i64 1, i64* %a, align 8
; CHECK-NEXT: call void @__tsan_func_exit()
; CHECK: ret void
define void @test_volatile_write16(i128* %a) sanitize_thread {
entry:
store volatile i128 1, i128* %a, align 16
ret void
}
; CHECK-LABEL: define void @test_volatile_write16(i128* %a)
; CHECK: call void @__tsan_func_entry(i8* %0)
; CHECK-NEXT: %1 = bitcast i128* %a to i8*
; CHECK-NEXT: call void @__tsan_volatile_write16(i8* %1)
; CHECK-NEXT: store volatile i128 1, i128* %a, align 16
; CHECK-NEXT: call void @__tsan_func_exit()
; CHECK: ret void
; Check unaligned volatile accesses
define i32 @test_unaligned_read4(i32* %a) sanitize_thread {
entry:
%tmp1 = load volatile i32, i32* %a, align 2
ret i32 %tmp1
}
; CHECK-LABEL: define i32 @test_unaligned_read4(i32* %a)
; CHECK: call void @__tsan_func_entry(i8* %0)
; CHECK-NEXT: %1 = bitcast i32* %a to i8*
; CHECK-NEXT: call void @__tsan_unaligned_volatile_read4(i8* %1)
; CHECK-NEXT: %tmp1 = load volatile i32, i32* %a, align 2
; CHECK-NEXT: call void @__tsan_func_exit()
; CHECK: ret i32
define void @test_unaligned_write4(i32* %a) sanitize_thread {
entry:
store volatile i32 1, i32* %a, align 1
ret void
}
; CHECK-LABEL: define void @test_unaligned_write4(i32* %a)
; CHECK: call void @__tsan_func_entry(i8* %0)
; CHECK-NEXT: %1 = bitcast i32* %a to i8*
; CHECK-NEXT: call void @__tsan_unaligned_volatile_write4(i8* %1)
; CHECK-NEXT: store volatile i32 1, i32* %a, align 1
; CHECK-NEXT: call void @__tsan_func_exit()
; CHECK: ret void
; Check that regular aligned accesses are unaffected
define i32 @test_read4(i32* %a) sanitize_thread {
entry:
%tmp1 = load i32, i32* %a, align 4
ret i32 %tmp1
}
; CHECK-LABEL: define i32 @test_read4(i32* %a)
; CHECK: call void @__tsan_func_entry(i8* %0)
; CHECK-NEXT: %1 = bitcast i32* %a to i8*
; CHECK-NEXT: call void @__tsan_read4(i8* %1)
; CHECK-NEXT: %tmp1 = load i32, i32* %a, align 4
; CHECK-NEXT: call void @__tsan_func_exit()
; CHECK: ret i32
define void @test_write4(i32* %a) sanitize_thread {
entry:
store i32 1, i32* %a, align 4
ret void
}
; CHECK-LABEL: define void @test_write4(i32* %a)
; CHECK: call void @__tsan_func_entry(i8* %0)
; CHECK-NEXT: %1 = bitcast i32* %a to i8*
; CHECK-NEXT: call void @__tsan_write4(i8* %1)
; CHECK-NEXT: store i32 1, i32* %a, align 4
; CHECK-NEXT: call void @__tsan_func_exit()
; CHECK: ret void