diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp index 10a71de82e39..fda1395cf3ed 100644 --- a/llvm/lib/Analysis/Loads.cpp +++ b/llvm/lib/Analysis/Loads.cpp @@ -107,11 +107,14 @@ static bool isDereferenceableAndAlignedPointer( return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, Size, DL, CtxI, DT, Visited); - if (auto CS = ImmutableCallSite(V)) + if (auto CS = ImmutableCallSite(V)) { if (const Value *RV = CS.getReturnedArgOperand()) return isDereferenceableAndAlignedPointer(RV, Align, Size, DL, CtxI, DT, Visited); - + if (CS.getIntrinsicID() == Intrinsic::launder_invariant_group) + return isDereferenceableAndAlignedPointer(CS->getOperand(0), Align, Size, + DL, CtxI, DT, Visited); + } // If we don't know, assume the worst. return false; } diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index 05246a288177..811c50165298 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -1953,9 +1953,12 @@ bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) { if (LI->getMetadata(LLVMContext::MD_nonnull)) return true; - if (auto CS = ImmutableCallSite(V)) + if (auto CS = ImmutableCallSite(V)) { if (CS.isReturnNonNull()) return true; + if (CS.getIntrinsicID() == Intrinsic::ID::launder_invariant_group) + return isKnownNonZero(CS->getOperand(0), Depth + 1, Q); + } } // The remaining tests are all recursive, so bail out if we hit the limit. diff --git a/llvm/test/Analysis/ValueTracking/deref-bitcast-of-gep.ll b/llvm/test/Analysis/ValueTracking/deref-bitcast-of-gep.ll index 38a8da8c0982..69c00819aa38 100644 --- a/llvm/test/Analysis/ValueTracking/deref-bitcast-of-gep.ll +++ b/llvm/test/Analysis/ValueTracking/deref-bitcast-of-gep.ll @@ -80,3 +80,28 @@ loop: leave: ret void } + +define void @checkLaunder(i8* align 4 dereferenceable(1024) %p) { +; CHECK-LABEL: @checkLaunder( +; CHECK: entry: +; CHECK: %l = call i8* @llvm.launder.invariant.group.p0i8(i8* %p) +; CHECK: %val = load i8, i8* %l +; CHECK: br label %loop +; CHECK: loop: +; CHECK: call void @use(i32 0) +; CHECK-NEXT: call void @use8(i8 %val) + +entry: + %l = call i8* @llvm.launder.invariant.group.p0i8(i8* %p) + br label %loop + +loop: + call void @use(i32 0) + %val = load i8, i8* %l, !invariant.load !{} + call void @use8(i8 %val) + br label %loop +} + +declare i8* @llvm.launder.invariant.group.p0i8(i8*) + +declare void @use8(i8) diff --git a/llvm/test/Analysis/ValueTracking/invariant.group.ll b/llvm/test/Analysis/ValueTracking/invariant.group.ll new file mode 100644 index 000000000000..82bbe724c82a --- /dev/null +++ b/llvm/test/Analysis/ValueTracking/invariant.group.ll @@ -0,0 +1,19 @@ +; RUN: opt -S -instsimplify -instcombine < %s | FileCheck %s + +; CHECK-LABEL: define void @checkNonnull() +define void @checkNonnull() { +; CHECK: %p = call i8* @llvm.launder.invariant.group.p0i8(i8* nonnull %0) +; CHECK: %p2 = call i8* @llvm.launder.invariant.group.p0i8(i8* nonnull %p) +; CHECK: call void @use(i8* nonnull %p2) +entry: + %0 = alloca i8, align 8 + + %p = call i8* @llvm.launder.invariant.group.p0i8(i8* %0) + %p2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %p) + call void @use(i8* %p2) + + ret void +} + +declare i8* @llvm.launder.invariant.group.p0i8(i8*) +declare void @use(i8*)