diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp index ade0d2058e0d..4d15b784f486 100644 --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -1758,8 +1758,7 @@ struct MemorySanitizerVisitor : public InstVisitor { break; } - if (!FArgEagerCheck) - ArgOffset += alignTo(Size, kShadowTLSAlignment); + ArgOffset += alignTo(Size, kShadowTLSAlignment); } assert(*ShadowPtr && "Could not find shadow for an argument"); return *ShadowPtr; @@ -3709,42 +3708,48 @@ struct MemorySanitizerVisitor : public InstVisitor { if (EagerCheck) { insertShadowCheck(A, &CB); - continue; - } - if (ByVal) { - // ByVal requires some special handling as it's too big for a single - // load - assert(A->getType()->isPointerTy() && - "ByVal argument is not a pointer!"); - Size = DL.getTypeAllocSize(CB.getParamByValType(i)); - if (ArgOffset + Size > kParamTLSSize) break; - const MaybeAlign ParamAlignment(CB.getParamAlign(i)); - MaybeAlign Alignment = llvm::None; - if (ParamAlignment) - Alignment = std::min(*ParamAlignment, kShadowTLSAlignment); - Value *AShadowPtr = - getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), Alignment, - /*isStore*/ false) - .first; - - Store = IRB.CreateMemCpy(ArgShadowBase, Alignment, AShadowPtr, - Alignment, Size); - // TODO(glider): need to copy origins. - } else { - // Any other parameters mean we need bit-grained tracking of uninit data Size = DL.getTypeAllocSize(A->getType()); - if (ArgOffset + Size > kParamTLSSize) break; - Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase, - kShadowTLSAlignment); - Constant *Cst = dyn_cast(ArgShadow); - if (Cst && Cst->isNullValue()) ArgIsInitialized = true; + } else { + if (ByVal) { + // ByVal requires some special handling as it's too big for a single + // load + assert(A->getType()->isPointerTy() && + "ByVal argument is not a pointer!"); + Size = DL.getTypeAllocSize(CB.getParamByValType(i)); + if (ArgOffset + Size > kParamTLSSize) + break; + const MaybeAlign ParamAlignment(CB.getParamAlign(i)); + MaybeAlign Alignment = llvm::None; + if (ParamAlignment) + Alignment = std::min(*ParamAlignment, kShadowTLSAlignment); + Value *AShadowPtr = + getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), Alignment, + /*isStore*/ false) + .first; + + Store = IRB.CreateMemCpy(ArgShadowBase, Alignment, AShadowPtr, + Alignment, Size); + // TODO(glider): need to copy origins. + } else { + // Any other parameters mean we need bit-grained tracking of uninit + // data + Size = DL.getTypeAllocSize(A->getType()); + if (ArgOffset + Size > kParamTLSSize) + break; + Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase, + kShadowTLSAlignment); + Constant *Cst = dyn_cast(ArgShadow); + if (Cst && Cst->isNullValue()) + ArgIsInitialized = true; + } + if (MS.TrackOrigins && !ArgIsInitialized) + IRB.CreateStore(getOrigin(A), + getOriginPtrForArgument(A, IRB, ArgOffset)); + (void)Store; + assert(Store != nullptr); + LLVM_DEBUG(dbgs() << " Param:" << *Store << "\n"); } - if (MS.TrackOrigins && !ArgIsInitialized) - IRB.CreateStore(getOrigin(A), - getOriginPtrForArgument(A, IRB, ArgOffset)); - (void)Store; - assert(Size != 0 && Store != nullptr); - LLVM_DEBUG(dbgs() << " Param:" << *Store << "\n"); + assert(Size != 0); ArgOffset += alignTo(Size, kShadowTLSAlignment); } LLVM_DEBUG(dbgs() << " done with call args\n"); diff --git a/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll index 582d9a926344..10ab029b5e2c 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll @@ -69,8 +69,8 @@ define void @NormalArg(i32 noundef %a) nounwind uwtable sanitize_memory { define void @NormalArgAfterNoUndef(i32 noundef %a, i32 %b) nounwind uwtable sanitize_memory { ; CHECK-LABEL: @NormalArgAfterNoUndef( -; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* bitcast ([100 x i64]* @__msan_param_tls to i32*), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to i32*), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 8) to i32*), align 4 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[P:%.*]] = inttoptr i64 0 to i32* ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint i32* [[P]] to i64 @@ -135,7 +135,7 @@ define void @CallNormalArgAfterNoUndef() nounwind uwtable sanitize_memory { ; CHECK-LABEL: @CallNormalArgAfterNoUndef( ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[R:%.*]] = call i32 @NormalRet() #[[ATTR0]] -; CHECK-NEXT: store i32 0, i32* bitcast ([100 x i64]* @__msan_param_tls to i32*), align 8 +; CHECK-NEXT: store i32 0, i32* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to i32*), align 8 ; CHECK-NEXT: call void @NormalArgAfterNoUndef(i32 [[R]], i32 [[R]]) #[[ATTR0]] ; CHECK-NEXT: ret void ;