Revert "[MSAN] Instrument libatomic load/store calls"

Problems with instrumenting atomic_load when the call has no successor,
blocking compiler roll

This reverts commit 33d239513c.
This commit is contained in:
Gui Andrade 2020-08-07 19:30:15 +00:00
parent b53fd9cdba
commit 17ff170e3a
3 changed files with 0 additions and 223 deletions

View File

@ -1,41 +0,0 @@
// RUN: %clang_msan -fsanitize-memory-track-origins=2 -latomic -DTEST_STORE -O0 %s -o %t && %run %t 2>&1
// RUN: %clang_msan -fsanitize-memory-track-origins=0 -latomic -DTEST_LOAD -O0 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
// RUN: %clang_msan -fsanitize-memory-track-origins=2 -latomic -DTEST_LOAD -O0 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-SHADOW
// PPC has no libatomic
// UNSUPPORTED: powerpc64-target-arch
// UNSUPPORTED: powerpc64le-target-arch
#include <sanitizer/msan_interface.h>
#include <stdatomic.h>
typedef struct __attribute((packed)) {
uint8_t val[3];
} i24;
void copy(i24 *dst, i24 *src);
int main() {
i24 uninit;
i24 init = {0};
__msan_check_mem_is_initialized(&init, 3);
copy(&init, &uninit);
__msan_check_mem_is_initialized(&init, 3);
}
void copy(i24 *dst, i24 *src) {
#ifdef TEST_LOAD
__atomic_load(src, dst, __ATOMIC_RELAXED);
// CHECK: MemorySanitizer: use-of-uninitialized-value
// CHECK: #0 {{0x[a-f0-9]+}} in main{{.*}}libatomic.c:[[@LINE-8]]
// CHECK-SHADOW: Uninitialized value was stored to memory at
// CHECK-SHADOW: #0 {{0x[a-f0-9]+}} in copy{{.*}}libatomic.c:[[@LINE-6]]
#endif
#ifdef TEST_STORE
// Store always writes a clean shadow
__atomic_store(src, dst, __ATOMIC_RELAXED);
#endif
}

View File

@ -573,9 +573,6 @@ private:
/// uninitialized value and returns an updated origin id encoding this info.
FunctionCallee MsanChainOriginFn;
/// Run-time helper that paints an origin over a region.
FunctionCallee MsanSetOriginFn;
/// MSan runtime replacements for memmove, memcpy and memset.
FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
@ -854,9 +851,6 @@ void MemorySanitizer::initializeCallbacks(Module &M) {
// instrumentation.
MsanChainOriginFn = M.getOrInsertFunction(
"__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty());
MsanSetOriginFn =
M.getOrInsertFunction("__msan_set_origin", IRB.getVoidTy(),
IRB.getInt8PtrTy(), IntptrTy, IRB.getInt32Ty());
MemmoveFn = M.getOrInsertFunction(
"__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
IRB.getInt8PtrTy(), IntptrTy);
@ -1824,24 +1818,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
llvm_unreachable("Unknown ordering");
}
Value *makeAddReleaseOrderingTable(IRBuilder<> &IRB) {
constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
uint32_t OrderingTable[NumOrderings] = {};
OrderingTable[(int)AtomicOrderingCABI::relaxed] =
OrderingTable[(int)AtomicOrderingCABI::release] =
(int)AtomicOrderingCABI::release;
OrderingTable[(int)AtomicOrderingCABI::consume] =
OrderingTable[(int)AtomicOrderingCABI::acquire] =
OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
(int)AtomicOrderingCABI::acq_rel;
OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
(int)AtomicOrderingCABI::seq_cst;
return ConstantDataVector::get(IRB.getContext(),
makeArrayRef(OrderingTable, NumOrderings));
}
AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
switch (a) {
case AtomicOrdering::NotAtomic:
@ -1859,24 +1835,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
llvm_unreachable("Unknown ordering");
}
Value *makeAddAcquireOrderingTable(IRBuilder<> &IRB) {
constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
uint32_t OrderingTable[NumOrderings] = {};
OrderingTable[(int)AtomicOrderingCABI::relaxed] =
OrderingTable[(int)AtomicOrderingCABI::acquire] =
OrderingTable[(int)AtomicOrderingCABI::consume] =
(int)AtomicOrderingCABI::acquire;
OrderingTable[(int)AtomicOrderingCABI::release] =
OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
(int)AtomicOrderingCABI::acq_rel;
OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
(int)AtomicOrderingCABI::seq_cst;
return ConstantDataVector::get(IRB.getContext(),
makeArrayRef(OrderingTable, NumOrderings));
}
// ------------------- Visitors.
using InstVisitor<MemorySanitizerVisitor>::visit;
void visit(Instruction &I) {
@ -3493,60 +3451,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
}
void visitLibAtomicLoad(CallBase &CB) {
IRBuilder<> IRB(&CB);
Value *Size = CB.getArgOperand(0);
Value *SrcPtr = CB.getArgOperand(1);
Value *DstPtr = CB.getArgOperand(2);
Value *Ordering = CB.getArgOperand(3);
// Convert the call to have at least Acquire ordering to make sure
// the shadow operations aren't reordered before it.
Value *NewOrdering =
IRB.CreateExtractElement(makeAddAcquireOrderingTable(IRB), Ordering);
CB.setArgOperand(3, NewOrdering);
IRBuilder<> NextIRB(CB.getNextNode());
NextIRB.SetCurrentDebugLocation(CB.getDebugLoc());
Value *SrcShadowPtr, *SrcOriginPtr;
std::tie(SrcShadowPtr, SrcOriginPtr) =
getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(), Align(1),
/*isStore*/ false);
Value *DstShadowPtr =
getShadowOriginPtr(DstPtr, NextIRB, NextIRB.getInt8Ty(), Align(1),
/*isStore*/ true)
.first;
NextIRB.CreateMemCpy(DstShadowPtr, Align(1), SrcShadowPtr, Align(1), Size);
if (MS.TrackOrigins) {
Value *SrcOrigin = NextIRB.CreateAlignedLoad(MS.OriginTy, SrcOriginPtr,
kMinOriginAlignment);
Value *NewOrigin = updateOrigin(SrcOrigin, NextIRB);
NextIRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin});
}
}
void visitLibAtomicStore(CallBase &CB) {
IRBuilder<> IRB(&CB);
Value *Size = CB.getArgOperand(0);
Value *DstPtr = CB.getArgOperand(2);
Value *Ordering = CB.getArgOperand(3);
// Convert the call to have at least Release ordering to make sure
// the shadow operations aren't reordered after it.
Value *NewOrdering =
IRB.CreateExtractElement(makeAddReleaseOrderingTable(IRB), Ordering);
CB.setArgOperand(3, NewOrdering);
Value *DstShadowPtr =
getShadowOriginPtr(DstPtr, IRB, IRB.getInt8Ty(), Align(1),
/*isStore*/ true)
.first;
// Atomic store always paints clean shadow/origin. See file header.
IRB.CreateMemSet(DstShadowPtr, getCleanShadow(IRB.getInt8Ty()), Size,
Align(1));
}
void visitCallBase(CallBase &CB) {
assert(!CB.getMetadata("nosanitize"));
if (CB.isInlineAsm()) {
@ -3560,23 +3464,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
visitInstruction(CB);
return;
}
LibFunc LF;
if (TLI->getLibFunc(CB, LF)) {
// libatomic.a functions need to have special handling because there isn't
// a good way to intercept them or compile the library with
// instrumentation.
switch (LF) {
case LibFunc_atomic_load:
visitLibAtomicLoad(CB);
return;
case LibFunc_atomic_store:
visitLibAtomicStore(CB);
return;
default:
break;
}
}
if (auto *Call = dyn_cast<CallInst>(&CB)) {
assert(!isa<IntrinsicInst>(Call) && "intrinsics are handled elsewhere");

View File

@ -1,69 +0,0 @@
; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck %s
; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=2 -S -passes=msan 2>&1 | FileCheck %s -check-prefixes=CHECK,CHECK-ORIGIN
; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
declare void @__atomic_load(i64, i8*, i8*, i32)
declare void @__atomic_store(i64, i8*, i8*, i32)
define i24 @odd_sized_load(i24* %ptr) sanitize_memory {
; CHECK: @odd_sized_load(i24* {{.*}}[[PTR:%.+]])
; CHECK: [[VAL_PTR:%.*]] = alloca i24, align 1
; CHECK-ORIGIN: @__msan_set_alloca_origin
; CHECK: [[VAL_PTR_I8:%.*]] = bitcast i24* [[VAL_PTR]] to i8*
; CHECK: [[PTR_I8:%.*]] = bitcast i24* [[PTR]] to i8*
; CHECK: call void @__atomic_load(i64 3, i8* [[PTR_I8]], i8* [[VAL_PTR_I8]], i32 2)
; CHECK: ptrtoint i8* [[PTR_I8]]
; CHECK: xor
; CHECK: [[SPTR_I8:%.*]] = inttoptr
; CHECK-ORIGIN: add
; CHECK-ORIGIN: and
; CHECK-ORIGIN: [[OPTR:%.*]] = inttoptr
; CHECK: ptrtoint i8* [[VAL_PTR_I8]]
; CHECK: xor
; CHECK: [[VAL_SPTR_I8:%.*]] = inttoptr
; CHECK-ORIGIN: add
; CHECK-ORIGIN: and
; CHECK-ORIGIN: [[VAL_OPTR:%.*]] = inttoptr
; CHECK: call void @llvm.memcpy{{.*}}(i8* align 1 [[VAL_SPTR_I8]], i8* align 1 [[SPTR_I8]], i64 3
; CHECK-ORIGIN: [[ARG_ORIGIN:%.*]] = load i32, i32* [[OPTR]]
; CHECK-ORIGIN: [[VAL_ORIGIN:%.*]] = call i32 @__msan_chain_origin(i32 [[ARG_ORIGIN]])
; CHECK-ORIGIN: call void @__msan_set_origin(i8* [[VAL_PTR_I8]], i64 3, i32 [[VAL_ORIGIN]])
; CHECK: [[VAL:%.*]] = load i24, i24* [[VAL_PTR]]
; CHECK: ret i24 [[VAL]]
%val_ptr = alloca i24, align 1
%val_ptr_i8 = bitcast i24* %val_ptr to i8*
%ptr_i8 = bitcast i24* %ptr to i8*
call void @__atomic_load(i64 3, i8* %ptr_i8, i8* %val_ptr_i8, i32 0)
%val = load i24, i24* %val_ptr
ret i24 %val
}
define void @odd_sized_store(i24* %ptr, i24 %val) sanitize_memory {
; CHECK: @odd_sized_store(i24* {{.*}}[[PTR:%.+]], i24 {{.*}}[[VAL:%.+]])
; CHECK: [[VAL_PTR:%.*]] = alloca i24, align 1
; CHECK: store i24 [[VAL]], i24* [[VAL_PTR]]
; CHECK: [[VAL_PTR_I8:%.*]] = bitcast i24* [[VAL_PTR]] to i8*
; CHECK: [[PTR_I8:%.*]] = bitcast i24* [[PTR]] to i8*
; CHECK: ptrtoint i8* [[PTR_I8]]
; CHECK: xor
; CHECK: [[SPTR_I8:%.*]] = inttoptr
; CHECK: call void @llvm.memset{{.*}}(i8* align 1 [[SPTR_I8]], i8 0, i64 3
; CHECK: call void @__atomic_store(i64 3, i8* [[VAL_PTR_I8]], i8* [[PTR_I8]], i32 3)
; CHECK: ret void
%val_ptr = alloca i24, align 1
store i24 %val, i24* %val_ptr
%val_ptr_i8 = bitcast i24* %val_ptr to i8*
%ptr_i8 = bitcast i24* %ptr to i8*
call void @__atomic_store(i64 3, i8* %val_ptr_i8, i8* %ptr_i8, i32 0)
ret void
}