forked from OSchip/llvm-project
[AssumeBundles] Preserve information in EarlyCSE
Summary: this patch preserve information from various places in EarlyCSE into assume bundles. Reviewers: jdoerfert Subscribers: hiraditya, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D76769
This commit is contained in:
parent
7093b92a13
commit
4aeb7e1ef4
|
@ -30,6 +30,11 @@ class IntrinsicInst;
|
|||
/// The returned instruction is not inserted anywhere.
|
||||
IntrinsicInst *buildAssumeFromInst(Instruction *I);
|
||||
|
||||
/// Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert
|
||||
/// if before I. This is usually what need to be done to salvage the knowledge
|
||||
/// contained in the instruction I.
|
||||
void salvageKnowledge(Instruction* I);
|
||||
|
||||
/// It is possible to have multiple Value for the argument of an attribute in
|
||||
/// the same llvm.assume on the same llvm::Value. This is rare but need to be
|
||||
/// dealt with.
|
||||
|
|
|
@ -232,6 +232,11 @@ IntrinsicInst *llvm::buildAssumeFromInst(Instruction *I) {
|
|||
return Builder.build();
|
||||
}
|
||||
|
||||
void llvm::salvageKnowledge(Instruction* I) {
|
||||
if (Instruction* Intr = buildAssumeFromInst(I))
|
||||
Intr->insertBefore(I);
|
||||
}
|
||||
|
||||
static bool bundleHasArgument(const CallBase::BundleOpInfo &BOI,
|
||||
unsigned Idx) {
|
||||
return BOI.End - BOI.Begin > Idx;
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include "llvm/IR/Instructions.h"
|
||||
#include "llvm/IR/IntrinsicInst.h"
|
||||
#include "llvm/IR/Intrinsics.h"
|
||||
#include "llvm/IR/KnowledgeRetention.h"
|
||||
#include "llvm/IR/LLVMContext.h"
|
||||
#include "llvm/IR/PassManager.h"
|
||||
#include "llvm/IR/PatternMatch.h"
|
||||
|
@ -947,6 +948,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
|
|||
continue;
|
||||
}
|
||||
|
||||
salvageKnowledge(&Inst);
|
||||
salvageDebugInfoOrMarkUndef(Inst);
|
||||
removeMSSA(Inst);
|
||||
Inst.eraseFromParent();
|
||||
|
@ -1013,6 +1015,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
|
|||
cast<ConstantInt>(KnownCond)->isOne()) {
|
||||
LLVM_DEBUG(dbgs()
|
||||
<< "EarlyCSE removing guard: " << Inst << '\n');
|
||||
salvageKnowledge(&Inst);
|
||||
removeMSSA(Inst);
|
||||
Inst.eraseFromParent();
|
||||
Changed = true;
|
||||
|
@ -1048,6 +1051,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
|
|||
Changed = true;
|
||||
}
|
||||
if (isInstructionTriviallyDead(&Inst, &TLI)) {
|
||||
salvageKnowledge(&Inst);
|
||||
removeMSSA(Inst);
|
||||
Inst.eraseFromParent();
|
||||
Changed = true;
|
||||
|
@ -1073,6 +1077,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
|
|||
if (auto *I = dyn_cast<Instruction>(V))
|
||||
I->andIRFlags(&Inst);
|
||||
Inst.replaceAllUsesWith(V);
|
||||
salvageKnowledge(&Inst);
|
||||
removeMSSA(Inst);
|
||||
Inst.eraseFromParent();
|
||||
Changed = true;
|
||||
|
@ -1133,6 +1138,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
|
|||
}
|
||||
if (!Inst.use_empty())
|
||||
Inst.replaceAllUsesWith(Op);
|
||||
salvageKnowledge(&Inst);
|
||||
removeMSSA(Inst);
|
||||
Inst.eraseFromParent();
|
||||
Changed = true;
|
||||
|
@ -1176,6 +1182,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
|
|||
}
|
||||
if (!Inst.use_empty())
|
||||
Inst.replaceAllUsesWith(InVal.first);
|
||||
salvageKnowledge(&Inst);
|
||||
removeMSSA(Inst);
|
||||
Inst.eraseFromParent();
|
||||
Changed = true;
|
||||
|
@ -1228,6 +1235,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
|
|||
LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
|
||||
continue;
|
||||
}
|
||||
salvageKnowledge(&Inst);
|
||||
removeMSSA(Inst);
|
||||
Inst.eraseFromParent();
|
||||
Changed = true;
|
||||
|
@ -1263,6 +1271,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
|
|||
if (!DebugCounter::shouldExecute(CSECounter)) {
|
||||
LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
|
||||
} else {
|
||||
salvageKnowledge(&Inst);
|
||||
removeMSSA(*LastStore);
|
||||
LastStore->eraseFromParent();
|
||||
Changed = true;
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
; RUN: opt -S -early-cse < %s | FileCheck %s
|
||||
; RUN: opt < %s -S -basicaa -early-cse-memssa | FileCheck %s
|
||||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; RUN: opt -S -early-cse < %s | FileCheck %s --check-prefixes=CHECK,NO_ASSUME
|
||||
; RUN: opt < %s -S -basicaa -early-cse-memssa | FileCheck %s --check-prefixes=CHECK,NO_ASSUME
|
||||
; RUN: opt < %s -S -basicaa -early-cse-memssa --enable-knowledge-retention | FileCheck %s --check-prefixes=CHECK,USE_ASSUME
|
||||
|
||||
declare void @llvm.experimental.guard(i1,...)
|
||||
|
||||
|
@ -8,11 +10,17 @@ declare void @llvm.assume(i1)
|
|||
define i32 @test0(i32* %ptr, i1 %cond) {
|
||||
; We can do store to load forwarding over a guard, since it does not
|
||||
; clobber memory
|
||||
|
||||
; CHECK-LABEL: @test0(
|
||||
; CHECK-NEXT: store i32 40, i32* %ptr
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %cond) [ "deopt"() ]
|
||||
; CHECK-NEXT: ret i32 40
|
||||
; NO_ASSUME-LABEL: @test0(
|
||||
; NO_ASSUME-NEXT: store i32 40, i32* [[PTR:%.*]]
|
||||
; NO_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND:%.*]]) [ "deopt"() ]
|
||||
; NO_ASSUME-NEXT: ret i32 40
|
||||
;
|
||||
; USE_ASSUME-LABEL: @test0(
|
||||
; USE_ASSUME-NEXT: store i32 40, i32* [[PTR:%.*]]
|
||||
; USE_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND:%.*]]) [ "deopt"() ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]) ]
|
||||
; USE_ASSUME-NEXT: ret i32 40
|
||||
;
|
||||
|
||||
store i32 40, i32* %ptr
|
||||
call void(i1,...) @llvm.experimental.guard(i1 %cond) [ "deopt"() ]
|
||||
|
@ -22,11 +30,17 @@ define i32 @test0(i32* %ptr, i1 %cond) {
|
|||
|
||||
define i32 @test1(i32* %val, i1 %cond) {
|
||||
; We can CSE loads over a guard, since it does not clobber memory
|
||||
|
||||
; CHECK-LABEL: @test1(
|
||||
; CHECK-NEXT: %val0 = load i32, i32* %val
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %cond) [ "deopt"() ]
|
||||
; CHECK-NEXT: ret i32 0
|
||||
; NO_ASSUME-LABEL: @test1(
|
||||
; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[VAL:%.*]]
|
||||
; NO_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND:%.*]]) [ "deopt"() ]
|
||||
; NO_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
; USE_ASSUME-LABEL: @test1(
|
||||
; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[VAL:%.*]]
|
||||
; USE_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND:%.*]]) [ "deopt"() ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[VAL]], i64 4), "nonnull"(i32* [[VAL]]) ]
|
||||
; USE_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
|
||||
%val0 = load i32, i32* %val
|
||||
call void(i1,...) @llvm.experimental.guard(i1 %cond) [ "deopt"() ]
|
||||
|
@ -37,9 +51,9 @@ define i32 @test1(i32* %val, i1 %cond) {
|
|||
|
||||
define i32 @test2() {
|
||||
; Guards on "true" get removed
|
||||
|
||||
; CHECK-LABEL: @test2(
|
||||
; CHECK-NEXT: ret i32 0
|
||||
; CHECK-NEXT: ret i32 0
|
||||
;
|
||||
call void(i1, ...) @llvm.experimental.guard(i1 true) [ "deopt"() ]
|
||||
ret i32 0
|
||||
}
|
||||
|
@ -47,11 +61,11 @@ define i32 @test2() {
|
|||
define i32 @test3(i32 %val) {
|
||||
; After a guard has executed the condition it was guarding is known to
|
||||
; be true.
|
||||
|
||||
; CHECK-LABEL: @test3(
|
||||
; CHECK-NEXT: %cond0 = icmp slt i32 %val, 40
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %cond0) [ "deopt"() ]
|
||||
; CHECK-NEXT: ret i32 -1
|
||||
; CHECK-NEXT: [[COND0:%.*]] = icmp slt i32 [[VAL:%.*]], 40
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND0]]) [ "deopt"() ]
|
||||
; CHECK-NEXT: ret i32 -1
|
||||
;
|
||||
|
||||
%cond0 = icmp slt i32 %val, 40
|
||||
call void(i1,...) @llvm.experimental.guard(i1 %cond0) [ "deopt"() ]
|
||||
|
@ -85,23 +99,23 @@ define i32 @test3.unhandled(i32 %val) {
|
|||
|
||||
define i32 @test4(i32 %val, i1 %c) {
|
||||
; Same as test3, but with some control flow involved.
|
||||
|
||||
; CHECK-LABEL: @test4(
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %cond0 = icmp slt i32 %val, 40
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %cond0
|
||||
; CHECK-NEXT: br label %bb0
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[COND0:%.*]] = icmp slt i32 [[VAL:%.*]], 40
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND0]]) [ "deopt"() ]
|
||||
; CHECK-NEXT: br label [[BB0:%.*]]
|
||||
; CHECK: bb0:
|
||||
; CHECK-NEXT: [[COND2:%.*]] = icmp ult i32 [[VAL]], 200
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND2]]) [ "deopt"() ]
|
||||
; CHECK-NEXT: br i1 [[C:%.*]], label [[LEFT:%.*]], label [[RIGHT:%.*]]
|
||||
; CHECK: left:
|
||||
; CHECK-NEXT: ret i32 0
|
||||
; CHECK: right:
|
||||
; CHECK-NEXT: ret i32 20
|
||||
;
|
||||
|
||||
; CHECK: bb0:
|
||||
; CHECK-NEXT: %cond2 = icmp ult i32 %val, 200
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %cond2
|
||||
; CHECK-NEXT: br i1 %c, label %left, label %right
|
||||
|
||||
; CHECK: left:
|
||||
; CHECK-NEXT: ret i32 0
|
||||
|
||||
; CHECK: right:
|
||||
; CHECK-NEXT: ret i32 20
|
||||
|
||||
entry:
|
||||
%cond0 = icmp slt i32 %val, 40
|
||||
|
@ -121,29 +135,29 @@ left:
|
|||
ret i32 0
|
||||
|
||||
right:
|
||||
ret i32 20
|
||||
ret i32 20
|
||||
}
|
||||
|
||||
define i32 @test5(i32 %val, i1 %c) {
|
||||
; Same as test4, but the %left block has mutliple predecessors.
|
||||
|
||||
; CHECK-LABEL: @test5(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[COND0:%.*]] = icmp slt i32 [[VAL:%.*]], 40
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND0]]) [ "deopt"() ]
|
||||
; CHECK-NEXT: br label [[BB0:%.*]]
|
||||
; CHECK: bb0:
|
||||
; CHECK-NEXT: [[COND2:%.*]] = icmp ult i32 [[VAL]], 200
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND2]]) [ "deopt"() ]
|
||||
; CHECK-NEXT: br i1 [[C:%.*]], label [[LEFT:%.*]], label [[RIGHT:%.*]]
|
||||
; CHECK: left:
|
||||
; CHECK-NEXT: br label [[RIGHT]]
|
||||
; CHECK: right:
|
||||
; CHECK-NEXT: br label [[LEFT]]
|
||||
;
|
||||
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %cond0 = icmp slt i32 %val, 40
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %cond0
|
||||
; CHECK-NEXT: br label %bb0
|
||||
|
||||
; CHECK: bb0:
|
||||
; CHECK-NEXT: %cond2 = icmp ult i32 %val, 200
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %cond2
|
||||
; CHECK-NEXT: br i1 %c, label %left, label %right
|
||||
|
||||
; CHECK: left:
|
||||
; CHECK-NEXT: br label %right
|
||||
|
||||
; CHECK: right:
|
||||
; CHECK-NEXT: br label %left
|
||||
|
||||
entry:
|
||||
%cond0 = icmp slt i32 %val, 40
|
||||
|
@ -170,11 +184,12 @@ define void @test6(i1 %c, i32* %ptr) {
|
|||
; Check that we do not DSE over calls to @llvm.experimental.guard.
|
||||
; Guard intrinsics do _read_ memory, so th call to guard below needs
|
||||
; to see the store of 500 to %ptr
|
||||
|
||||
; CHECK-LABEL: @test6(
|
||||
; CHECK-NEXT: store i32 500, i32* %ptr
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %c) [ "deopt"() ]
|
||||
; CHECK-NEXT: store i32 600, i32* %ptr
|
||||
; CHECK-NEXT: store i32 500, i32* [[PTR:%.*]]
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[C:%.*]]) [ "deopt"() ]
|
||||
; CHECK-NEXT: store i32 600, i32* [[PTR]]
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
|
||||
|
||||
store i32 500, i32* %ptr
|
||||
|
@ -186,11 +201,11 @@ define void @test6(i1 %c, i32* %ptr) {
|
|||
define void @test07(i32 %a, i32 %b) {
|
||||
; Check that we are able to remove the guards on the same condition even if the
|
||||
; condition is not being recalculated.
|
||||
|
||||
; CHECK-LABEL: @test07(
|
||||
; CHECK-NEXT: %cmp = icmp eq i32 %a, %b
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
|
||||
; CHECK-NEXT: ret void
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
|
||||
%cmp = icmp eq i32 %a, %b
|
||||
call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
|
||||
|
@ -202,13 +217,22 @@ define void @test07(i32 %a, i32 %b) {
|
|||
define void @test08(i32 %a, i32 %b, i32* %ptr) {
|
||||
; Check that we deal correctly with stores when removing guards in the same
|
||||
; block in case when the condition is not recalculated.
|
||||
|
||||
; CHECK-LABEL: @test08(
|
||||
; CHECK-NEXT: %cmp = icmp eq i32 %a, %b
|
||||
; CHECK-NEXT: store i32 100, i32* %ptr
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
|
||||
; CHECK-NEXT: store i32 400, i32* %ptr
|
||||
; CHECK-NEXT: ret void
|
||||
; NO_ASSUME-LABEL: @test08(
|
||||
; NO_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
|
||||
; NO_ASSUME-NEXT: store i32 100, i32* [[PTR:%.*]]
|
||||
; NO_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
|
||||
; NO_ASSUME-NEXT: store i32 400, i32* [[PTR]]
|
||||
; NO_ASSUME-NEXT: ret void
|
||||
;
|
||||
; USE_ASSUME-LABEL: @test08(
|
||||
; USE_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
|
||||
; USE_ASSUME-NEXT: store i32 100, i32* [[PTR:%.*]]
|
||||
; USE_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]) ]
|
||||
; USE_ASSUME-NEXT: store i32 400, i32* [[PTR]]
|
||||
; USE_ASSUME-NEXT: ret void
|
||||
;
|
||||
|
||||
%cmp = icmp eq i32 %a, %b
|
||||
store i32 100, i32* %ptr
|
||||
|
@ -225,22 +249,40 @@ define void @test09(i32 %a, i32 %b, i1 %c, i32* %ptr) {
|
|||
; Similar to test08, but with more control flow.
|
||||
; TODO: Can we get rid of the store in the end of entry given that it is
|
||||
; post-dominated by other stores?
|
||||
|
||||
; CHECK-LABEL: @test09(
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %cmp = icmp eq i32 %a, %b
|
||||
; CHECK-NEXT: store i32 100, i32* %ptr
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
|
||||
; CHECK-NEXT: store i32 400, i32* %ptr
|
||||
; CHECK-NEXT: br i1 %c, label %if.true, label %if.false
|
||||
; CHECK: if.true:
|
||||
; CHECK-NEXT: store i32 500, i32* %ptr
|
||||
; CHECK-NEXT: br label %merge
|
||||
; CHECK: if.false:
|
||||
; CHECK-NEXT: store i32 600, i32* %ptr
|
||||
; CHECK-NEXT: br label %merge
|
||||
; CHECK: merge:
|
||||
; CHECK-NEXT: ret void
|
||||
; NO_ASSUME-LABEL: @test09(
|
||||
; NO_ASSUME-NEXT: entry:
|
||||
; NO_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
|
||||
; NO_ASSUME-NEXT: store i32 100, i32* [[PTR:%.*]]
|
||||
; NO_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
|
||||
; NO_ASSUME-NEXT: store i32 400, i32* [[PTR]]
|
||||
; NO_ASSUME-NEXT: br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
|
||||
; NO_ASSUME: if.true:
|
||||
; NO_ASSUME-NEXT: store i32 500, i32* [[PTR]]
|
||||
; NO_ASSUME-NEXT: br label [[MERGE:%.*]]
|
||||
; NO_ASSUME: if.false:
|
||||
; NO_ASSUME-NEXT: store i32 600, i32* [[PTR]]
|
||||
; NO_ASSUME-NEXT: br label [[MERGE]]
|
||||
; NO_ASSUME: merge:
|
||||
; NO_ASSUME-NEXT: ret void
|
||||
;
|
||||
; USE_ASSUME-LABEL: @test09(
|
||||
; USE_ASSUME-NEXT: entry:
|
||||
; USE_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
|
||||
; USE_ASSUME-NEXT: store i32 100, i32* [[PTR:%.*]]
|
||||
; USE_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]) ]
|
||||
; USE_ASSUME-NEXT: store i32 400, i32* [[PTR]]
|
||||
; USE_ASSUME-NEXT: br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
|
||||
; USE_ASSUME: if.true:
|
||||
; USE_ASSUME-NEXT: store i32 500, i32* [[PTR]]
|
||||
; USE_ASSUME-NEXT: br label [[MERGE:%.*]]
|
||||
; USE_ASSUME: if.false:
|
||||
; USE_ASSUME-NEXT: store i32 600, i32* [[PTR]]
|
||||
; USE_ASSUME-NEXT: br label [[MERGE]]
|
||||
; USE_ASSUME: merge:
|
||||
; USE_ASSUME-NEXT: ret void
|
||||
;
|
||||
|
||||
entry:
|
||||
%cmp = icmp eq i32 %a, %b
|
||||
|
@ -269,23 +311,23 @@ merge:
|
|||
|
||||
define void @test10(i32 %a, i32 %b, i1 %c, i32* %ptr) {
|
||||
; Make sure that non-dominating guards do not cause other guards removal.
|
||||
|
||||
; CHECK-LABEL: @test10(
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %cmp = icmp eq i32 %a, %b
|
||||
; CHECK-NEXT: br i1 %c, label %if.true, label %if.false
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
|
||||
; CHECK-NEXT: br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
|
||||
; CHECK: if.true:
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
|
||||
; CHECK-NEXT: store i32 100, i32* %ptr
|
||||
; CHECK-NEXT: br label %merge
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
|
||||
; CHECK-NEXT: store i32 100, i32* [[PTR:%.*]]
|
||||
; CHECK-NEXT: br label [[MERGE:%.*]]
|
||||
; CHECK: if.false:
|
||||
; CHECK-NEXT: store i32 200, i32* %ptr
|
||||
; CHECK-NEXT: br label %merge
|
||||
; CHECK-NEXT: store i32 200, i32* [[PTR]]
|
||||
; CHECK-NEXT: br label [[MERGE]]
|
||||
; CHECK: merge:
|
||||
; CHECK-NEXT: store i32 300, i32* %ptr
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
|
||||
; CHECK-NEXT: store i32 400, i32* %ptr
|
||||
; CHECK-NEXT: store i32 300, i32* [[PTR]]
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
|
||||
; CHECK-NEXT: store i32 400, i32* [[PTR]]
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
|
||||
entry:
|
||||
%cmp = icmp eq i32 %a, %b
|
||||
|
@ -310,18 +352,18 @@ merge:
|
|||
|
||||
define void @test11(i32 %a, i32 %b, i32* %ptr) {
|
||||
; Make sure that branching condition is applied to guards.
|
||||
|
||||
; CHECK-LABEL: @test11(
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %cmp = icmp eq i32 %a, %b
|
||||
; CHECK-NEXT: br i1 %cmp, label %if.true, label %if.false
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
|
||||
; CHECK-NEXT: br i1 [[CMP]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
|
||||
; CHECK: if.true:
|
||||
; CHECK-NEXT: br label %merge
|
||||
; CHECK-NEXT: br label [[MERGE:%.*]]
|
||||
; CHECK: if.false:
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 false) [ "deopt"() ]
|
||||
; CHECK-NEXT: br label %merge
|
||||
; CHECK-NEXT: br label [[MERGE]]
|
||||
; CHECK: merge:
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
|
||||
entry:
|
||||
%cmp = icmp eq i32 %a, %b
|
||||
|
@ -342,11 +384,11 @@ merge:
|
|||
define void @test12(i32 %a, i32 %b) {
|
||||
; Check that the assume marks its condition as being true (and thus allows to
|
||||
; eliminate the dominated guards).
|
||||
|
||||
; CHECK-LABEL: @test12(
|
||||
; CHECK-NEXT: %cmp = icmp eq i32 %a, %b
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 %cmp)
|
||||
; CHECK-NEXT: ret void
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
|
||||
%cmp = icmp eq i32 %a, %b
|
||||
call void @llvm.assume(i1 %cmp)
|
||||
|
@ -358,12 +400,21 @@ define void @test12(i32 %a, i32 %b) {
|
|||
|
||||
define void @test13(i32 %a, i32 %b, i32* %ptr) {
|
||||
; Check that we deal correctly with stores when removing guards due to assume.
|
||||
|
||||
; CHECK-LABEL: @test13(
|
||||
; CHECK-NEXT: %cmp = icmp eq i32 %a, %b
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 %cmp)
|
||||
; CHECK-NEXT: store i32 400, i32* %ptr
|
||||
; CHECK-NEXT: ret void
|
||||
; NO_ASSUME-LABEL: @test13(
|
||||
; NO_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
|
||||
; NO_ASSUME-NEXT: call void @llvm.assume(i1 [[CMP]])
|
||||
; NO_ASSUME-NEXT: store i32 400, i32* [[PTR:%.*]]
|
||||
; NO_ASSUME-NEXT: ret void
|
||||
;
|
||||
; USE_ASSUME-LABEL: @test13(
|
||||
; USE_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 [[CMP]])
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR:%.*]], i64 4), "nonnull"(i32* [[PTR]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]) ]
|
||||
; USE_ASSUME-NEXT: store i32 400, i32* [[PTR]]
|
||||
; USE_ASSUME-NEXT: ret void
|
||||
;
|
||||
|
||||
%cmp = icmp eq i32 %a, %b
|
||||
call void @llvm.assume(i1 %cmp)
|
||||
|
@ -381,21 +432,39 @@ define void @test14(i32 %a, i32 %b, i1 %c, i32* %ptr) {
|
|||
; Similar to test13, but with more control flow.
|
||||
; TODO: Can we get rid of the store in the end of entry given that it is
|
||||
; post-dominated by other stores?
|
||||
|
||||
; CHECK-LABEL: @test14(
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %cmp = icmp eq i32 %a, %b
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 %cmp)
|
||||
; CHECK-NEXT: store i32 400, i32* %ptr
|
||||
; CHECK-NEXT: br i1 %c, label %if.true, label %if.false
|
||||
; CHECK: if.true:
|
||||
; CHECK-NEXT: store i32 500, i32* %ptr
|
||||
; CHECK-NEXT: br label %merge
|
||||
; CHECK: if.false:
|
||||
; CHECK-NEXT: store i32 600, i32* %ptr
|
||||
; CHECK-NEXT: br label %merge
|
||||
; CHECK: merge:
|
||||
; CHECK-NEXT: ret void
|
||||
; NO_ASSUME-LABEL: @test14(
|
||||
; NO_ASSUME-NEXT: entry:
|
||||
; NO_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
|
||||
; NO_ASSUME-NEXT: call void @llvm.assume(i1 [[CMP]])
|
||||
; NO_ASSUME-NEXT: store i32 400, i32* [[PTR:%.*]]
|
||||
; NO_ASSUME-NEXT: br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
|
||||
; NO_ASSUME: if.true:
|
||||
; NO_ASSUME-NEXT: store i32 500, i32* [[PTR]]
|
||||
; NO_ASSUME-NEXT: br label [[MERGE:%.*]]
|
||||
; NO_ASSUME: if.false:
|
||||
; NO_ASSUME-NEXT: store i32 600, i32* [[PTR]]
|
||||
; NO_ASSUME-NEXT: br label [[MERGE]]
|
||||
; NO_ASSUME: merge:
|
||||
; NO_ASSUME-NEXT: ret void
|
||||
;
|
||||
; USE_ASSUME-LABEL: @test14(
|
||||
; USE_ASSUME-NEXT: entry:
|
||||
; USE_ASSUME-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 [[CMP]])
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR:%.*]], i64 4), "nonnull"(i32* [[PTR]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]) ]
|
||||
; USE_ASSUME-NEXT: store i32 400, i32* [[PTR]]
|
||||
; USE_ASSUME-NEXT: br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
|
||||
; USE_ASSUME: if.true:
|
||||
; USE_ASSUME-NEXT: store i32 500, i32* [[PTR]]
|
||||
; USE_ASSUME-NEXT: br label [[MERGE:%.*]]
|
||||
; USE_ASSUME: if.false:
|
||||
; USE_ASSUME-NEXT: store i32 600, i32* [[PTR]]
|
||||
; USE_ASSUME-NEXT: br label [[MERGE]]
|
||||
; USE_ASSUME: merge:
|
||||
; USE_ASSUME-NEXT: ret void
|
||||
;
|
||||
|
||||
entry:
|
||||
%cmp = icmp eq i32 %a, %b
|
||||
|
@ -425,23 +494,23 @@ merge:
|
|||
|
||||
define void @test15(i32 %a, i32 %b, i1 %c, i32* %ptr) {
|
||||
; Make sure that non-dominating assumes do not cause guards removal.
|
||||
|
||||
; CHECK-LABEL: @test15(
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %cmp = icmp eq i32 %a, %b
|
||||
; CHECK-NEXT: br i1 %c, label %if.true, label %if.false
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
|
||||
; CHECK-NEXT: br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
|
||||
; CHECK: if.true:
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 %cmp)
|
||||
; CHECK-NEXT: store i32 100, i32* %ptr
|
||||
; CHECK-NEXT: br label %merge
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
|
||||
; CHECK-NEXT: store i32 100, i32* [[PTR:%.*]]
|
||||
; CHECK-NEXT: br label [[MERGE:%.*]]
|
||||
; CHECK: if.false:
|
||||
; CHECK-NEXT: store i32 200, i32* %ptr
|
||||
; CHECK-NEXT: br label %merge
|
||||
; CHECK-NEXT: store i32 200, i32* [[PTR]]
|
||||
; CHECK-NEXT: br label [[MERGE]]
|
||||
; CHECK: merge:
|
||||
; CHECK-NEXT: store i32 300, i32* %ptr
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
|
||||
; CHECK-NEXT: store i32 400, i32* %ptr
|
||||
; CHECK-NEXT: store i32 300, i32* [[PTR]]
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
|
||||
; CHECK-NEXT: store i32 400, i32* [[PTR]]
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
|
||||
entry:
|
||||
%cmp = icmp eq i32 %a, %b
|
||||
|
@ -467,12 +536,12 @@ merge:
|
|||
define void @test16(i32 %a, i32 %b) {
|
||||
; Check that we don't bother to do anything with assumes even if we know the
|
||||
; condition being true.
|
||||
|
||||
; CHECK-LABEL: @test16(
|
||||
; CHECK-NEXT: %cmp = icmp eq i32 %a, %b
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 %cmp)
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 %cmp)
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
|
||||
%cmp = icmp eq i32 %a, %b
|
||||
call void @llvm.assume(i1 %cmp)
|
||||
|
@ -483,19 +552,19 @@ define void @test16(i32 %a, i32 %b) {
|
|||
define void @test17(i32 %a, i32 %b, i1 %c, i32* %ptr) {
|
||||
; Check that we don't bother to do anything with assumes even if we know the
|
||||
; condition being true or false (includes come control flow).
|
||||
|
||||
; CHECK-LABEL: @test17(
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %cmp = icmp eq i32 %a, %b
|
||||
; CHECK-NEXT: br i1 %c, label %if.true, label %if.false
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
|
||||
; CHECK-NEXT: br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
|
||||
; CHECK: if.true:
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 %cmp)
|
||||
; CHECK-NEXT: br label %merge
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
|
||||
; CHECK-NEXT: br label [[MERGE:%.*]]
|
||||
; CHECK: if.false:
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 %cmp)
|
||||
; CHECK-NEXT: br label %merge
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
|
||||
; CHECK-NEXT: br label [[MERGE]]
|
||||
; CHECK: merge:
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
|
||||
entry:
|
||||
%cmp = icmp eq i32 %a, %b
|
||||
|
@ -516,11 +585,11 @@ merge:
|
|||
define void @test18(i1 %c) {
|
||||
; Check that we don't bother to do anything with assumes even if we know the
|
||||
; condition being true and not being an instruction.
|
||||
|
||||
; CHECK-LABEL: @test18(
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 %c)
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 %c)
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 [[C:%.*]])
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 [[C]])
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
|
||||
call void @llvm.assume(i1 %c)
|
||||
call void @llvm.assume(i1 %c)
|
||||
|
|
|
@ -1,15 +1,27 @@
|
|||
; RUN: opt -S -early-cse < %s | FileCheck %s
|
||||
; RUN: opt -S -basicaa -early-cse-memssa < %s | FileCheck %s
|
||||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; RUN: opt -S -early-cse < %s | FileCheck %s --check-prefixes=CHECK,NO_ASSUME
|
||||
; RUN: opt -S -basicaa -early-cse-memssa < %s | FileCheck %s --check-prefixes=CHECK,NO_ASSUME
|
||||
; RUN: opt -S -basicaa -early-cse-memssa --enable-knowledge-retention < %s | FileCheck %s --check-prefixes=CHECK,USE_ASSUME
|
||||
|
||||
declare void @clobber_and_use(i32)
|
||||
|
||||
define void @f_0(i32* %ptr) {
|
||||
; CHECK-LABEL: @f_0(
|
||||
; CHECK: %val0 = load i32, i32* %ptr, !invariant.load !0
|
||||
; CHECK: call void @clobber_and_use(i32 %val0)
|
||||
; CHECK: call void @clobber_and_use(i32 %val0)
|
||||
; CHECK: call void @clobber_and_use(i32 %val0)
|
||||
; CHECK: ret void
|
||||
; NO_ASSUME-LABEL: @f_0(
|
||||
; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], !invariant.load !0
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; NO_ASSUME-NEXT: ret void
|
||||
;
|
||||
; USE_ASSUME-LABEL: @f_0(
|
||||
; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], !invariant.load !0
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]) ]
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]) ]
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; USE_ASSUME-NEXT: ret void
|
||||
;
|
||||
|
||||
%val0 = load i32, i32* %ptr, !invariant.load !{}
|
||||
call void @clobber_and_use(i32 %val0)
|
||||
|
@ -22,11 +34,19 @@ define void @f_0(i32* %ptr) {
|
|||
|
||||
define void @f_1(i32* %ptr) {
|
||||
; We can forward invariant loads to non-invariant loads.
|
||||
|
||||
; CHECK-LABEL: @f_1(
|
||||
; CHECK: %val0 = load i32, i32* %ptr, !invariant.load !0
|
||||
; CHECK: call void @clobber_and_use(i32 %val0)
|
||||
; CHECK: call void @clobber_and_use(i32 %val0)
|
||||
; NO_ASSUME-LABEL: @f_1(
|
||||
; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], !invariant.load !0
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; NO_ASSUME-NEXT: ret void
|
||||
;
|
||||
; USE_ASSUME-LABEL: @f_1(
|
||||
; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], !invariant.load !0
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]) ]
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; USE_ASSUME-NEXT: ret void
|
||||
;
|
||||
|
||||
%val0 = load i32, i32* %ptr, !invariant.load !{}
|
||||
call void @clobber_and_use(i32 %val0)
|
||||
|
@ -37,11 +57,19 @@ define void @f_1(i32* %ptr) {
|
|||
|
||||
define void @f_2(i32* %ptr) {
|
||||
; We can forward a non-invariant load into an invariant load.
|
||||
|
||||
; CHECK-LABEL: @f_2(
|
||||
; CHECK: %val0 = load i32, i32* %ptr
|
||||
; CHECK: call void @clobber_and_use(i32 %val0)
|
||||
; CHECK: call void @clobber_and_use(i32 %val0)
|
||||
; NO_ASSUME-LABEL: @f_2(
|
||||
; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]]
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; NO_ASSUME-NEXT: ret void
|
||||
;
|
||||
; USE_ASSUME-LABEL: @f_2(
|
||||
; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]]
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]) ]
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; USE_ASSUME-NEXT: ret void
|
||||
;
|
||||
|
||||
%val0 = load i32, i32* %ptr
|
||||
call void @clobber_and_use(i32 %val0)
|
||||
|
@ -51,14 +79,31 @@ define void @f_2(i32* %ptr) {
|
|||
}
|
||||
|
||||
define void @f_3(i1 %cond, i32* %ptr) {
|
||||
; CHECK-LABEL: @f_3(
|
||||
; NO_ASSUME-LABEL: @f_3(
|
||||
; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], !invariant.load !0
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; NO_ASSUME-NEXT: br i1 [[COND:%.*]], label [[LEFT:%.*]], label [[RIGHT:%.*]]
|
||||
; NO_ASSUME: left:
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; NO_ASSUME-NEXT: ret void
|
||||
; NO_ASSUME: right:
|
||||
; NO_ASSUME-NEXT: ret void
|
||||
;
|
||||
; USE_ASSUME-LABEL: @f_3(
|
||||
; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], !invariant.load !0
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; USE_ASSUME-NEXT: br i1 [[COND:%.*]], label [[LEFT:%.*]], label [[RIGHT:%.*]]
|
||||
; USE_ASSUME: left:
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]) ]
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; USE_ASSUME-NEXT: ret void
|
||||
; USE_ASSUME: right:
|
||||
; USE_ASSUME-NEXT: ret void
|
||||
;
|
||||
%val0 = load i32, i32* %ptr, !invariant.load !{}
|
||||
call void @clobber_and_use(i32 %val0)
|
||||
br i1 %cond, label %left, label %right
|
||||
|
||||
; CHECK: %val0 = load i32, i32* %ptr, !invariant.load !0
|
||||
; CHECK: left:
|
||||
; CHECK-NEXT: call void @clobber_and_use(i32 %val0)
|
||||
|
||||
left:
|
||||
%val1 = load i32, i32* %ptr
|
||||
|
@ -72,23 +117,26 @@ right:
|
|||
define void @f_4(i1 %cond, i32* %ptr) {
|
||||
; Negative test -- can't forward %val0 to %va1 because that'll break
|
||||
; def-dominates-use.
|
||||
|
||||
; CHECK-LABEL: @f_4(
|
||||
; CHECK-NEXT: br i1 [[COND:%.*]], label [[LEFT:%.*]], label [[MERGE:%.*]]
|
||||
; CHECK: left:
|
||||
; CHECK-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], !invariant.load !0
|
||||
; CHECK-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; CHECK-NEXT: br label [[MERGE]]
|
||||
; CHECK: merge:
|
||||
; CHECK-NEXT: [[VAL1:%.*]] = load i32, i32* [[PTR]]
|
||||
; CHECK-NEXT: call void @clobber_and_use(i32 [[VAL1]])
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
br i1 %cond, label %left, label %merge
|
||||
|
||||
left:
|
||||
; CHECK: left:
|
||||
; CHECK-NEXT: %val0 = load i32, i32* %ptr, !invariant.load !
|
||||
; CHECK-NEXT: call void @clobber_and_use(i32 %val0)
|
||||
|
||||
%val0 = load i32, i32* %ptr, !invariant.load !{}
|
||||
call void @clobber_and_use(i32 %val0)
|
||||
br label %merge
|
||||
|
||||
merge:
|
||||
; CHECK: merge:
|
||||
; CHECK-NEXT: %val1 = load i32, i32* %ptr
|
||||
; CHECK-NEXT: call void @clobber_and_use(i32 %val1)
|
||||
|
||||
%val1 = load i32, i32* %ptr
|
||||
call void @clobber_and_use(i32 %val1)
|
||||
|
@ -100,8 +148,17 @@ merge:
|
|||
; for the moment we chose to be conservative and just assume it's valid
|
||||
; to restore the same unchanging value.
|
||||
define void @test_dse1(i32* %p) {
|
||||
; CHECK-LABEL: @test_dse1
|
||||
; CHECK-NOT: store
|
||||
; NO_ASSUME-LABEL: @test_dse1(
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], !invariant.load !0
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]])
|
||||
; NO_ASSUME-NEXT: ret void
|
||||
;
|
||||
; USE_ASSUME-LABEL: @test_dse1(
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], !invariant.load !0
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]])
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: ret void
|
||||
;
|
||||
%v1 = load i32, i32* %p, !invariant.load !{}
|
||||
call void @clobber_and_use(i32 %v1)
|
||||
store i32 %v1, i32* %p
|
||||
|
@ -110,8 +167,12 @@ define void @test_dse1(i32* %p) {
|
|||
|
||||
; By assumption, v1 must equal v2 (TODO)
|
||||
define void @test_false_negative_dse2(i32* %p, i32 %v2) {
|
||||
; CHECK-LABEL: @test_false_negative_dse2
|
||||
; CHECK: store
|
||||
; CHECK-LABEL: @test_false_negative_dse2(
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], !invariant.load !0
|
||||
; CHECK-NEXT: call void @clobber_and_use(i32 [[V1]])
|
||||
; CHECK-NEXT: store i32 [[V2:%.*]], i32* [[P]]
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
%v1 = load i32, i32* %p, !invariant.load !{}
|
||||
call void @clobber_and_use(i32 %v1)
|
||||
store i32 %v2, i32* %p
|
||||
|
@ -121,12 +182,22 @@ define void @test_false_negative_dse2(i32* %p, i32 %v2) {
|
|||
; If we remove the load, we still start an invariant scope since
|
||||
; it lets us remove later loads not explicitly marked invariant
|
||||
define void @test_scope_start_without_load(i32* %p) {
|
||||
; CHECK-LABEL: @test_scope_start_without_load
|
||||
; CHECK: %v1 = load i32, i32* %p
|
||||
; CHECK: %add = add i32 %v1, %v1
|
||||
; CHECK: call void @clobber_and_use(i32 %add)
|
||||
; CHECK: call void @clobber_and_use(i32 %v1)
|
||||
; CHECK: ret void
|
||||
; NO_ASSUME-LABEL: @test_scope_start_without_load(
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]]
|
||||
; NO_ASSUME-NEXT: [[ADD:%.*]] = add i32 [[V1]], [[V1]]
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[ADD]])
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]])
|
||||
; NO_ASSUME-NEXT: ret void
|
||||
;
|
||||
; USE_ASSUME-LABEL: @test_scope_start_without_load(
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: [[ADD:%.*]] = add i32 [[V1]], [[V1]]
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[ADD]])
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]])
|
||||
; USE_ASSUME-NEXT: ret void
|
||||
;
|
||||
%v1 = load i32, i32* %p
|
||||
%v2 = load i32, i32* %p, !invariant.load !{}
|
||||
%add = add i32 %v1, %v2
|
||||
|
@ -140,13 +211,24 @@ define void @test_scope_start_without_load(i32* %p) {
|
|||
; with a potentially greater generation. This hides the earlier invariant
|
||||
; load
|
||||
define void @test_scope_restart(i32* %p) {
|
||||
; CHECK-LABEL: @test_scope_restart
|
||||
; CHECK: %v1 = load i32, i32* %p
|
||||
; CHECK: call void @clobber_and_use(i32 %v1)
|
||||
; CHECK: %add = add i32 %v1, %v1
|
||||
; CHECK: call void @clobber_and_use(i32 %add)
|
||||
; CHECK: call void @clobber_and_use(i32 %v1)
|
||||
; CHECK: ret void
|
||||
; NO_ASSUME-LABEL: @test_scope_restart(
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], !invariant.load !0
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]])
|
||||
; NO_ASSUME-NEXT: [[ADD:%.*]] = add i32 [[V1]], [[V1]]
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[ADD]])
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]])
|
||||
; NO_ASSUME-NEXT: ret void
|
||||
;
|
||||
; USE_ASSUME-LABEL: @test_scope_restart(
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], !invariant.load !0
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]])
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: [[ADD:%.*]] = add i32 [[V1]], [[V1]]
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[ADD]])
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]])
|
||||
; USE_ASSUME-NEXT: ret void
|
||||
;
|
||||
%v1 = load i32, i32* %p, !invariant.load !{}
|
||||
call void @clobber_and_use(i32 %v1)
|
||||
%v2 = load i32, i32* %p, !invariant.load !{}
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
; RUN: opt < %s -S -early-cse | FileCheck %s
|
||||
; RUN: opt < %s -S -passes=early-cse | FileCheck %s
|
||||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature
|
||||
; RUN: opt < %s -S -early-cse | FileCheck %s --check-prefixes=CHECK,NO_ASSUME
|
||||
; RUN: opt < %s -S -early-cse --enable-knowledge-retention | FileCheck %s --check-prefixes=CHECK,USE_ASSUME
|
||||
; RUN: opt < %s -S -passes=early-cse | FileCheck %s --check-prefixes=CHECK,NO_ASSUME
|
||||
|
||||
declare {}* @llvm.invariant.start.p0i8(i64, i8* nocapture) nounwind readonly
|
||||
declare void @llvm.invariant.end.p0i8({}*, i64, i8* nocapture) nounwind
|
||||
|
@ -7,10 +9,19 @@ declare void @llvm.invariant.end.p0i8({}*, i64, i8* nocapture) nounwind
|
|||
; Check that we do load-load forwarding over invariant.start, since it does not
|
||||
; clobber memory
|
||||
define i8 @test_bypass1(i8 *%P) {
|
||||
; CHECK-LABEL: @test_bypass1(
|
||||
; CHECK-NEXT: %V1 = load i8, i8* %P
|
||||
; CHECK-NEXT: %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %P)
|
||||
; CHECK-NEXT: ret i8 0
|
||||
; NO_ASSUME-LABEL: define {{[^@]+}}@test_bypass1
|
||||
; NO_ASSUME-SAME: (i8* [[P:%.*]])
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i8, i8* [[P]]
|
||||
; NO_ASSUME-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
|
||||
; NO_ASSUME-NEXT: ret i8 0
|
||||
;
|
||||
; USE_ASSUME-LABEL: define {{[^@]+}}@test_bypass1
|
||||
; USE_ASSUME-SAME: (i8* [[P:%.*]])
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i8, i8* [[P]]
|
||||
; USE_ASSUME-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[P]], i64 1), "nonnull"(i8* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: ret i8 0
|
||||
;
|
||||
|
||||
%V1 = load i8, i8* %P
|
||||
%i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %P)
|
||||
|
@ -22,10 +33,19 @@ define i8 @test_bypass1(i8 *%P) {
|
|||
|
||||
; Trivial Store->load forwarding over invariant.start
|
||||
define i8 @test_bypass2(i8 *%P) {
|
||||
; CHECK-LABEL: @test_bypass2(
|
||||
; CHECK-NEXT: store i8 42, i8* %P
|
||||
; CHECK-NEXT: %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %P)
|
||||
; CHECK-NEXT: ret i8 42
|
||||
; NO_ASSUME-LABEL: define {{[^@]+}}@test_bypass2
|
||||
; NO_ASSUME-SAME: (i8* [[P:%.*]])
|
||||
; NO_ASSUME-NEXT: store i8 42, i8* [[P]]
|
||||
; NO_ASSUME-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
|
||||
; NO_ASSUME-NEXT: ret i8 42
|
||||
;
|
||||
; USE_ASSUME-LABEL: define {{[^@]+}}@test_bypass2
|
||||
; USE_ASSUME-SAME: (i8* [[P:%.*]])
|
||||
; USE_ASSUME-NEXT: store i8 42, i8* [[P]]
|
||||
; USE_ASSUME-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[P]], i64 1), "nonnull"(i8* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: ret i8 42
|
||||
;
|
||||
|
||||
store i8 42, i8* %P
|
||||
%i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %P)
|
||||
|
@ -37,9 +57,19 @@ define i8 @test_bypass2(i8 *%P) {
|
|||
; %P is valid, and the second store is actually unreachable based on semantics
|
||||
; of invariant.start.
|
||||
define void @test_bypass3(i8* %P) {
|
||||
; CHECK-LABEL: @test_bypass3(
|
||||
; CHECK-NEXT: %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %P)
|
||||
; CHECK-NEXT: store i8 60, i8* %P
|
||||
; NO_ASSUME-LABEL: define {{[^@]+}}@test_bypass3
|
||||
; NO_ASSUME-SAME: (i8* [[P:%.*]])
|
||||
; NO_ASSUME-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
|
||||
; NO_ASSUME-NEXT: store i8 60, i8* [[P]]
|
||||
; NO_ASSUME-NEXT: ret void
|
||||
;
|
||||
; USE_ASSUME-LABEL: define {{[^@]+}}@test_bypass3
|
||||
; USE_ASSUME-SAME: (i8* [[P:%.*]])
|
||||
; USE_ASSUME-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[P]], i64 1), "nonnull"(i8* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: store i8 60, i8* [[P]]
|
||||
; USE_ASSUME-NEXT: ret void
|
||||
;
|
||||
|
||||
store i8 50, i8* %P
|
||||
%i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %P)
|
||||
|
@ -51,12 +81,14 @@ define void @test_bypass3(i8* %P) {
|
|||
; FIXME: Now the first store can actually be eliminated, since there is no read within
|
||||
; the invariant region, between start and end.
|
||||
define void @test_bypass4(i8* %P) {
|
||||
|
||||
; CHECK-LABEL: @test_bypass4(
|
||||
; CHECK-NEXT: store i8 50, i8* %P
|
||||
; CHECK-NEXT: %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %P)
|
||||
; CHECK-NEXT: call void @llvm.invariant.end.p0i8({}* %i, i64 1, i8* %P)
|
||||
; CHECK-NEXT: store i8 60, i8* %P
|
||||
; CHECK-LABEL: define {{[^@]+}}@test_bypass4
|
||||
; CHECK-SAME: (i8* [[P:%.*]])
|
||||
; CHECK-NEXT: store i8 50, i8* [[P]]
|
||||
; CHECK-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
|
||||
; CHECK-NEXT: call void @llvm.invariant.end.p0i8({}* [[I]], i64 1, i8* [[P]])
|
||||
; CHECK-NEXT: store i8 60, i8* [[P]]
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
|
||||
|
||||
store i8 50, i8* %P
|
||||
|
@ -72,8 +104,21 @@ declare {}* @llvm.invariant.start.p0i32(i64 %size, i32* nocapture %ptr)
|
|||
declare void @llvm.invariant.end.p0i32({}*, i64, i32* nocapture) nounwind
|
||||
|
||||
define i32 @test_before_load(i32* %p) {
|
||||
; CHECK-LABEL: @test_before_load
|
||||
; CHECK: ret i32 0
|
||||
; NO_ASSUME-LABEL: define {{[^@]+}}@test_before_load
|
||||
; NO_ASSUME-SAME: (i32* [[P:%.*]])
|
||||
; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; NO_ASSUME-NEXT: call void @clobber()
|
||||
; NO_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
; USE_ASSUME-LABEL: define {{[^@]+}}@test_before_load
|
||||
; USE_ASSUME-SAME: (i32* [[P:%.*]])
|
||||
; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; USE_ASSUME-NEXT: call void @clobber()
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
|
||||
%v1 = load i32, i32* %p
|
||||
call void @clobber()
|
||||
|
@ -83,8 +128,21 @@ define i32 @test_before_load(i32* %p) {
|
|||
}
|
||||
|
||||
define i32 @test_before_clobber(i32* %p) {
|
||||
; CHECK-LABEL: @test_before_clobber
|
||||
; CHECK: ret i32 0
|
||||
; NO_ASSUME-LABEL: define {{[^@]+}}@test_before_clobber
|
||||
; NO_ASSUME-SAME: (i32* [[P:%.*]])
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; NO_ASSUME-NEXT: call void @clobber()
|
||||
; NO_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
; USE_ASSUME-LABEL: define {{[^@]+}}@test_before_clobber
|
||||
; USE_ASSUME-SAME: (i32* [[P:%.*]])
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; USE_ASSUME-NEXT: call void @clobber()
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
%v1 = load i32, i32* %p
|
||||
call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
|
||||
call void @clobber()
|
||||
|
@ -94,8 +152,23 @@ define i32 @test_before_clobber(i32* %p) {
|
|||
}
|
||||
|
||||
define i32 @test_duplicate_scope(i32* %p) {
|
||||
; CHECK-LABEL: @test_duplicate_scope
|
||||
; CHECK: ret i32 0
|
||||
; NO_ASSUME-LABEL: define {{[^@]+}}@test_duplicate_scope
|
||||
; NO_ASSUME-SAME: (i32* [[P:%.*]])
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; NO_ASSUME-NEXT: call void @clobber()
|
||||
; NO_ASSUME-NEXT: [[TMP2:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; NO_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
; USE_ASSUME-LABEL: define {{[^@]+}}@test_duplicate_scope
|
||||
; USE_ASSUME-SAME: (i32* [[P:%.*]])
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; USE_ASSUME-NEXT: call void @clobber()
|
||||
; USE_ASSUME-NEXT: [[TMP2:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
%v1 = load i32, i32* %p
|
||||
call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
|
||||
call void @clobber()
|
||||
|
@ -106,8 +179,23 @@ define i32 @test_duplicate_scope(i32* %p) {
|
|||
}
|
||||
|
||||
define i32 @test_unanalzyable_load(i32* %p) {
|
||||
; CHECK-LABEL: @test_unanalzyable_load
|
||||
; CHECK: ret i32 0
|
||||
; NO_ASSUME-LABEL: define {{[^@]+}}@test_unanalzyable_load
|
||||
; NO_ASSUME-SAME: (i32* [[P:%.*]])
|
||||
; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; NO_ASSUME-NEXT: call void @clobber()
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; NO_ASSUME-NEXT: call void @clobber()
|
||||
; NO_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
; USE_ASSUME-LABEL: define {{[^@]+}}@test_unanalzyable_load
|
||||
; USE_ASSUME-SAME: (i32* [[P:%.*]])
|
||||
; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; USE_ASSUME-NEXT: call void @clobber()
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; USE_ASSUME-NEXT: call void @clobber()
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
|
||||
call void @clobber()
|
||||
%v1 = load i32, i32* %p
|
||||
|
@ -118,8 +206,15 @@ define i32 @test_unanalzyable_load(i32* %p) {
|
|||
}
|
||||
|
||||
define i32 @test_negative_after_clobber(i32* %p) {
|
||||
; CHECK-LABEL: @test_negative_after_clobber
|
||||
; CHECK: ret i32 %sub
|
||||
; CHECK-LABEL: define {{[^@]+}}@test_negative_after_clobber
|
||||
; CHECK-SAME: (i32* [[P:%.*]])
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: call void @clobber()
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]]
|
||||
; CHECK-NEXT: ret i32 [[SUB]]
|
||||
;
|
||||
%v1 = load i32, i32* %p
|
||||
call void @clobber()
|
||||
call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
|
||||
|
@ -129,8 +224,29 @@ define i32 @test_negative_after_clobber(i32* %p) {
|
|||
}
|
||||
|
||||
define i32 @test_merge(i32* %p, i1 %cnd) {
|
||||
; CHECK-LABEL: @test_merge
|
||||
; CHECK: ret i32 0
|
||||
; NO_ASSUME-LABEL: define {{[^@]+}}@test_merge
|
||||
; NO_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; NO_ASSUME-NEXT: br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]]
|
||||
; NO_ASSUME: taken:
|
||||
; NO_ASSUME-NEXT: call void @clobber()
|
||||
; NO_ASSUME-NEXT: br label [[MERGE]]
|
||||
; NO_ASSUME: merge:
|
||||
; NO_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
; USE_ASSUME-LABEL: define {{[^@]+}}@test_merge
|
||||
; USE_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; USE_ASSUME-NEXT: br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]]
|
||||
; USE_ASSUME: taken:
|
||||
; USE_ASSUME-NEXT: call void @clobber()
|
||||
; USE_ASSUME-NEXT: br label [[MERGE]]
|
||||
; USE_ASSUME: merge:
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
%v1 = load i32, i32* %p
|
||||
call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
|
||||
br i1 %cnd, label %merge, label %taken
|
||||
|
@ -145,8 +261,19 @@ merge:
|
|||
}
|
||||
|
||||
define i32 @test_negative_after_mergeclobber(i32* %p, i1 %cnd) {
|
||||
; CHECK-LABEL: @test_negative_after_mergeclobber
|
||||
; CHECK: ret i32 %sub
|
||||
; CHECK-LABEL: define {{[^@]+}}@test_negative_after_mergeclobber
|
||||
; CHECK-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]]
|
||||
; CHECK: taken:
|
||||
; CHECK-NEXT: call void @clobber()
|
||||
; CHECK-NEXT: br label [[MERGE]]
|
||||
; CHECK: merge:
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]]
|
||||
; CHECK-NEXT: ret i32 [[SUB]]
|
||||
;
|
||||
%v1 = load i32, i32* %p
|
||||
br i1 %cnd, label %merge, label %taken
|
||||
|
||||
|
@ -161,10 +288,21 @@ merge:
|
|||
}
|
||||
|
||||
; In theory, this version could work, but earlycse is incapable of
|
||||
; merging facts along distinct paths.
|
||||
; merging facts along distinct paths.
|
||||
define i32 @test_false_negative_merge(i32* %p, i1 %cnd) {
|
||||
; CHECK-LABEL: @test_false_negative_merge
|
||||
; CHECK: ret i32 %sub
|
||||
; CHECK-LABEL: define {{[^@]+}}@test_false_negative_merge
|
||||
; CHECK-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]]
|
||||
; CHECK: taken:
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; CHECK-NEXT: call void @clobber()
|
||||
; CHECK-NEXT: br label [[MERGE]]
|
||||
; CHECK: merge:
|
||||
; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]]
|
||||
; CHECK-NEXT: ret i32 [[SUB]]
|
||||
;
|
||||
%v1 = load i32, i32* %p
|
||||
br i1 %cnd, label %merge, label %taken
|
||||
|
||||
|
@ -179,8 +317,31 @@ merge:
|
|||
}
|
||||
|
||||
define i32 @test_merge_unanalyzable_load(i32* %p, i1 %cnd) {
|
||||
; CHECK-LABEL: @test_merge_unanalyzable_load
|
||||
; CHECK: ret i32 0
|
||||
; NO_ASSUME-LABEL: define {{[^@]+}}@test_merge_unanalyzable_load
|
||||
; NO_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
|
||||
; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; NO_ASSUME-NEXT: call void @clobber()
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; NO_ASSUME-NEXT: br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]]
|
||||
; NO_ASSUME: taken:
|
||||
; NO_ASSUME-NEXT: call void @clobber()
|
||||
; NO_ASSUME-NEXT: br label [[MERGE]]
|
||||
; NO_ASSUME: merge:
|
||||
; NO_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
; USE_ASSUME-LABEL: define {{[^@]+}}@test_merge_unanalyzable_load
|
||||
; USE_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
|
||||
; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; USE_ASSUME-NEXT: call void @clobber()
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; USE_ASSUME-NEXT: br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]]
|
||||
; USE_ASSUME: taken:
|
||||
; USE_ASSUME-NEXT: call void @clobber()
|
||||
; USE_ASSUME-NEXT: br label [[MERGE]]
|
||||
; USE_ASSUME: merge:
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
|
||||
call void @clobber()
|
||||
%v1 = load i32, i32* %p
|
||||
|
@ -196,8 +357,21 @@ merge:
|
|||
}
|
||||
|
||||
define void @test_dse_before_load(i32* %p, i1 %cnd) {
|
||||
; CHECK-LABEL: @test_dse_before_load
|
||||
; CHECK-NOT: store
|
||||
; NO_ASSUME-LABEL: define {{[^@]+}}@test_dse_before_load
|
||||
; NO_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
|
||||
; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; NO_ASSUME-NEXT: call void @clobber()
|
||||
; NO_ASSUME-NEXT: ret void
|
||||
;
|
||||
; USE_ASSUME-LABEL: define {{[^@]+}}@test_dse_before_load
|
||||
; USE_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
|
||||
; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; USE_ASSUME-NEXT: call void @clobber()
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: ret void
|
||||
;
|
||||
call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
|
||||
%v1 = load i32, i32* %p
|
||||
call void @clobber()
|
||||
|
@ -206,8 +380,21 @@ define void @test_dse_before_load(i32* %p, i1 %cnd) {
|
|||
}
|
||||
|
||||
define void @test_dse_after_load(i32* %p, i1 %cnd) {
|
||||
; CHECK-LABEL: @test_dse_after_load
|
||||
; CHECK-NOT: store
|
||||
; NO_ASSUME-LABEL: define {{[^@]+}}@test_dse_after_load
|
||||
; NO_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; NO_ASSUME-NEXT: call void @clobber()
|
||||
; NO_ASSUME-NEXT: ret void
|
||||
;
|
||||
; USE_ASSUME-LABEL: define {{[^@]+}}@test_dse_after_load
|
||||
; USE_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; USE_ASSUME-NEXT: call void @clobber()
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: ret void
|
||||
;
|
||||
%v1 = load i32, i32* %p
|
||||
call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
|
||||
call void @clobber()
|
||||
|
@ -220,8 +407,17 @@ define void @test_dse_after_load(i32* %p, i1 %cnd) {
|
|||
; typed due to the user of a Value to represent the address. Note that other
|
||||
; passes will canonicalize away the bitcasts in this example.
|
||||
define i32 @test_false_negative_types(i32* %p) {
|
||||
; CHECK-LABEL: @test_false_negative_types
|
||||
; CHECK: ret i32 %sub
|
||||
; CHECK-LABEL: define {{[^@]+}}@test_false_negative_types
|
||||
; CHECK-SAME: (i32* [[P:%.*]])
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: call void @clobber()
|
||||
; CHECK-NEXT: [[PF:%.*]] = bitcast i32* [[P]] to float*
|
||||
; CHECK-NEXT: [[V2F:%.*]] = load float, float* [[PF]]
|
||||
; CHECK-NEXT: [[V2:%.*]] = bitcast float [[V2F]] to i32
|
||||
; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]]
|
||||
; CHECK-NEXT: ret i32 [[SUB]]
|
||||
;
|
||||
call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
|
||||
%v1 = load i32, i32* %p
|
||||
call void @clobber()
|
||||
|
@ -233,8 +429,15 @@ define i32 @test_false_negative_types(i32* %p) {
|
|||
}
|
||||
|
||||
define i32 @test_negative_size1(i32* %p) {
|
||||
; CHECK-LABEL: @test_negative_size1
|
||||
; CHECK: ret i32 %sub
|
||||
; CHECK-LABEL: define {{[^@]+}}@test_negative_size1
|
||||
; CHECK-SAME: (i32* [[P:%.*]])
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 3, i32* [[P]])
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: call void @clobber()
|
||||
; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]]
|
||||
; CHECK-NEXT: ret i32 [[SUB]]
|
||||
;
|
||||
call {}* @llvm.invariant.start.p0i32(i64 3, i32* %p)
|
||||
%v1 = load i32, i32* %p
|
||||
call void @clobber()
|
||||
|
@ -244,8 +447,15 @@ define i32 @test_negative_size1(i32* %p) {
|
|||
}
|
||||
|
||||
define i32 @test_negative_size2(i32* %p) {
|
||||
; CHECK-LABEL: @test_negative_size2
|
||||
; CHECK: ret i32 %sub
|
||||
; CHECK-LABEL: define {{[^@]+}}@test_negative_size2
|
||||
; CHECK-SAME: (i32* [[P:%.*]])
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 0, i32* [[P]])
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: call void @clobber()
|
||||
; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]]
|
||||
; CHECK-NEXT: ret i32 [[SUB]]
|
||||
;
|
||||
call {}* @llvm.invariant.start.p0i32(i64 0, i32* %p)
|
||||
%v1 = load i32, i32* %p
|
||||
call void @clobber()
|
||||
|
@ -255,8 +465,16 @@ define i32 @test_negative_size2(i32* %p) {
|
|||
}
|
||||
|
||||
define i32 @test_negative_scope(i32* %p) {
|
||||
; CHECK-LABEL: @test_negative_scope
|
||||
; CHECK: ret i32 %sub
|
||||
; CHECK-LABEL: define {{[^@]+}}@test_negative_scope
|
||||
; CHECK-SAME: (i32* [[P:%.*]])
|
||||
; CHECK-NEXT: [[SCOPE:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; CHECK-NEXT: call void @llvm.invariant.end.p0i32({}* [[SCOPE]], i64 4, i32* [[P]])
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: call void @clobber()
|
||||
; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]]
|
||||
; CHECK-NEXT: ret i32 [[SUB]]
|
||||
;
|
||||
%scope = call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
|
||||
call void @llvm.invariant.end.p0i32({}* %scope, i64 4, i32* %p)
|
||||
%v1 = load i32, i32* %p
|
||||
|
@ -267,8 +485,16 @@ define i32 @test_negative_scope(i32* %p) {
|
|||
}
|
||||
|
||||
define i32 @test_false_negative_scope(i32* %p) {
|
||||
; CHECK-LABEL: @test_false_negative_scope
|
||||
; CHECK: ret i32 %sub
|
||||
; CHECK-LABEL: define {{[^@]+}}@test_false_negative_scope
|
||||
; CHECK-SAME: (i32* [[P:%.*]])
|
||||
; CHECK-NEXT: [[SCOPE:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: call void @clobber()
|
||||
; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: call void @llvm.invariant.end.p0i32({}* [[SCOPE]], i64 4, i32* [[P]])
|
||||
; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]]
|
||||
; CHECK-NEXT: ret i32 [[SUB]]
|
||||
;
|
||||
%scope = call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
|
||||
%v1 = load i32, i32* %p
|
||||
call void @clobber()
|
||||
|
@ -280,8 +506,19 @@ define i32 @test_false_negative_scope(i32* %p) {
|
|||
|
||||
; Invariant load defact starts an invariant.start scope of the appropriate size
|
||||
define i32 @test_invariant_load_scope(i32* %p) {
|
||||
; CHECK-LABEL: @test_invariant_load_scope
|
||||
; CHECK: ret i32 0
|
||||
; NO_ASSUME-LABEL: define {{[^@]+}}@test_invariant_load_scope
|
||||
; NO_ASSUME-SAME: (i32* [[P:%.*]])
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], !invariant.load !0
|
||||
; NO_ASSUME-NEXT: call void @clobber()
|
||||
; NO_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
; USE_ASSUME-LABEL: define {{[^@]+}}@test_invariant_load_scope
|
||||
; USE_ASSUME-SAME: (i32* [[P:%.*]])
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], !invariant.load !0
|
||||
; USE_ASSUME-NEXT: call void @clobber()
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
%v1 = load i32, i32* %p, !invariant.load !{}
|
||||
call void @clobber()
|
||||
%v2 = load i32, i32* %p
|
||||
|
|
Loading…
Reference in New Issue