[ARM][MVE] Tail-predication: support constant trip count

We had support for runtime trip count values, but not constants, and this adds
supports for that.

And added a minor optimisation while I was add it: don't invoke Cleanup when
there's nothing to clean up.

Differential Revision: https://reviews.llvm.org/D73198
This commit is contained in:
Sjoerd Meijer 2020-01-27 11:05:26 +00:00
parent 6c2df5d14f
commit b567ff2fa0
2 changed files with 499 additions and 91 deletions

View File

@ -55,6 +55,27 @@ DisableTailPredication("disable-mve-tail-predication", cl::Hidden,
cl::desc("Disable MVE Tail Predication"));
namespace {
// Bookkeeping for pattern matching the loop trip count and the number of
// elements processed by the loop.
struct TripCountPattern {
// The Predicate used by the masked loads/stores, i.e. an icmp instruction
// which calculates active/inactive lanes
Instruction *Predicate = nullptr;
// The add instruction that increments the IV
Value *TripCount = nullptr;
// The number of elements processed by the vector loop.
Value *NumElements = nullptr;
VectorType *VecTy = nullptr;
Instruction *Shuffle = nullptr;
Instruction *Induction = nullptr;
TripCountPattern(Instruction *P, Value *TC, VectorType *VT)
: Predicate(P), TripCount(TC), VecTy(VT){};
};
class MVETailPredication : public LoopPass {
SmallVector<IntrinsicInst*, 4> MaskedInsts;
Loop *L = nullptr;
@ -85,7 +106,6 @@ public:
bool runOnLoop(Loop *L, LPPassManager&) override;
private:
/// Perform the relevant checks on the loop and convert if possible.
bool TryConvert(Value *TripCount);
@ -94,18 +114,16 @@ private:
bool IsPredicatedVectorLoop();
/// Compute a value for the total number of elements that the predicated
/// loop will process.
Value *ComputeElements(Value *TripCount, VectorType *VecTy);
/// loop will process if it is a runtime value.
bool ComputeRuntimeElements(TripCountPattern &TCP);
/// Is the icmp that generates an i1 vector, based upon a loop counter
/// and a limit that is defined outside the loop.
bool isTailPredicate(Instruction *Predicate, Value *NumElements);
bool isTailPredicate(TripCountPattern &TCP);
/// Insert the intrinsic to represent the effect of tail predication.
void InsertVCTPIntrinsic(Instruction *Predicate,
DenseMap<Instruction*, Instruction*> &NewPredicates,
VectorType *VecTy,
Value *NumElements);
void InsertVCTPIntrinsic(TripCountPattern &TCP,
DenseMap<Instruction *, Instruction *> &NewPredicates);
/// Rematerialize the iteration count in exit blocks, which enables
/// ARMLowOverheadLoops to better optimise away loop update statements inside
@ -213,6 +231,7 @@ bool MVETailPredication::runOnLoop(Loop *L, LPPassManager&) {
if (!Decrement)
return false;
ClonedVCTPInExitBlock = false;
LLVM_DEBUG(dbgs() << "ARM TP: Running on Loop: " << *L << *Setup << "\n"
<< *Decrement << "\n");
@ -225,17 +244,17 @@ bool MVETailPredication::runOnLoop(Loop *L, LPPassManager&) {
return false;
}
bool MVETailPredication::isTailPredicate(Instruction *I, Value *NumElements) {
// Look for the following:
// Pattern match predicates/masks and determine if they use the loop induction
// variable to control the number of elements processed by the loop. If so,
// the loop is a candidate for tail-predication.
bool MVETailPredication::isTailPredicate(TripCountPattern &TCP) {
using namespace PatternMatch;
// %trip.count.minus.1 = add i32 %N, -1
// %broadcast.splatinsert10 = insertelement <4 x i32> undef,
// i32 %trip.count.minus.1, i32 0
// %broadcast.splat11 = shufflevector <4 x i32> %broadcast.splatinsert10,
// <4 x i32> undef,
// <4 x i32> zeroinitializer
// ...
// ...
// Pattern match the loop body and find the add with takes the index iv
// and adds a constant vector to it:
//
// vector.body:
// ..
// %index = phi i32
// %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
// %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert,
@ -244,49 +263,11 @@ bool MVETailPredication::isTailPredicate(Instruction *I, Value *NumElements) {
// %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
// %pred = icmp ule <4 x i32> %induction, %broadcast.splat11
// And return whether V == %pred.
using namespace PatternMatch;
CmpInst::Predicate Pred;
Instruction *Shuffle = nullptr;
Instruction *Induction = nullptr;
// The vector icmp
if (!match(I, m_ICmp(Pred, m_Instruction(Induction),
m_Instruction(Shuffle))) ||
Pred != ICmpInst::ICMP_ULE)
return false;
// First find the stuff outside the loop which is setting up the limit
// vector....
// The invariant shuffle that broadcast the limit into a vector.
Instruction *Insert = nullptr;
if (!match(Shuffle, m_ShuffleVector(m_Instruction(Insert), m_Undef(),
m_Zero())))
return false;
// Insert the limit into a vector.
Instruction *BECount = nullptr;
if (!match(Insert, m_InsertElement(m_Undef(), m_Instruction(BECount),
m_Zero())))
return false;
// The limit calculation, backedge count.
Value *TripCount = nullptr;
if (!match(BECount, m_Add(m_Value(TripCount), m_AllOnes())))
return false;
if (TripCount != NumElements || !L->isLoopInvariant(BECount))
return false;
// Now back to searching inside the loop body...
// Find the add with takes the index iv and adds a constant vector to it.
Instruction *BroadcastSplat = nullptr;
Constant *Const = nullptr;
if (!match(Induction, m_Add(m_Instruction(BroadcastSplat),
m_Constant(Const))))
return false;
if (!match(TCP.Induction,
m_Add(m_Instruction(BroadcastSplat), m_Constant(Const))))
return false;
// Check that we're adding <0, 1, 2, 3...
if (auto *CDS = dyn_cast<ConstantDataSequential>(Const)) {
@ -297,9 +278,10 @@ bool MVETailPredication::isTailPredicate(Instruction *I, Value *NumElements) {
} else
return false;
Instruction *Insert = nullptr;
// The shuffle which broadcasts the index iv into a vector.
if (!match(BroadcastSplat, m_ShuffleVector(m_Instruction(Insert), m_Undef(),
m_Zero())))
if (!match(BroadcastSplat,
m_ShuffleVector(m_Instruction(Insert), m_Undef(), m_Zero())))
return false;
// The insert element which initialises a vector with the index iv.
@ -327,7 +309,7 @@ bool MVETailPredication::isTailPredicate(Instruction *I, Value *NumElements) {
return LHS == Phi;
}
static VectorType* getVectorType(IntrinsicInst *I) {
static VectorType *getVectorType(IntrinsicInst *I) {
unsigned TypeOp = I->getIntrinsicID() == Intrinsic::masked_load ? 0 : 1;
auto *PtrTy = cast<PointerType>(I->getOperand(TypeOp)->getType());
return cast<VectorType>(PtrTy->getElementType());
@ -361,17 +343,108 @@ bool MVETailPredication::IsPredicatedVectorLoop() {
return !MaskedInsts.empty();
}
Value* MVETailPredication::ComputeElements(Value *TripCount,
VectorType *VecTy) {
const SCEV *TripCountSE = SE->getSCEV(TripCount);
ConstantInt *VF = ConstantInt::get(cast<IntegerType>(TripCount->getType()),
VecTy->getNumElements());
// Pattern match the predicate, which is an icmp with a constant vector of this
// form:
//
// icmp ult <4 x i32> %induction, <i32 32002, i32 32002, i32 32002, i32 32002>
//
// and return the constant, i.e. 32002 in this example. This is assumed to be
// the scalar loop iteration count: the number of loop elements by the
// the vector loop. Further checks are performed in function isTailPredicate(),
// to verify 'induction' behaves as an induction variable.
//
static bool ComputeConstElements(TripCountPattern &TCP) {
if (!dyn_cast<ConstantInt>(TCP.TripCount))
return false;
ConstantInt *VF = ConstantInt::get(
cast<IntegerType>(TCP.TripCount->getType()), TCP.VecTy->getNumElements());
using namespace PatternMatch;
CmpInst::Predicate CC;
if (!match(TCP.Predicate, m_ICmp(CC, m_Instruction(TCP.Induction),
m_AnyIntegralConstant())) ||
CC != ICmpInst::ICMP_ULT)
return false;
LLVM_DEBUG(dbgs() << "ARM TP: icmp with constants: "; TCP.Predicate->dump(););
Value *ConstVec = TCP.Predicate->getOperand(1);
auto *CDS = dyn_cast<ConstantDataSequential>(ConstVec);
if (!CDS || CDS->getNumElements() != VF->getSExtValue())
return false;
if ((TCP.NumElements = CDS->getSplatValue())) {
assert(dyn_cast<ConstantInt>(TCP.NumElements)->getSExtValue() %
VF->getSExtValue() !=
0 &&
"tail-predication: trip count should not be a multiple of the VF");
LLVM_DEBUG(dbgs() << "ARM TP: Found const elem count: " << *TCP.NumElements
<< "\n");
return true;
}
return false;
}
// Pattern match the loop iteration count setup:
//
// %trip.count.minus.1 = add i32 %N, -1
// %broadcast.splatinsert10 = insertelement <4 x i32> undef,
// i32 %trip.count.minus.1, i32 0
// %broadcast.splat11 = shufflevector <4 x i32> %broadcast.splatinsert10,
// <4 x i32> undef,
// <4 x i32> zeroinitializer
// ..
// vector.body:
// ..
//
static bool MatchElemCountLoopSetup(Loop *L, Instruction *Shuffle,
Value *NumElements) {
using namespace PatternMatch;
Instruction *Insert = nullptr;
if (!match(Shuffle,
m_ShuffleVector(m_Instruction(Insert), m_Undef(), m_Zero())))
return false;
// Insert the limit into a vector.
Instruction *BECount = nullptr;
if (!match(Insert,
m_InsertElement(m_Undef(), m_Instruction(BECount), m_Zero())))
return false;
// The limit calculation, backedge count.
Value *TripCount = nullptr;
if (!match(BECount, m_Add(m_Value(TripCount), m_AllOnes())))
return false;
if (TripCount != NumElements || !L->isLoopInvariant(BECount))
return false;
return true;
}
bool MVETailPredication::ComputeRuntimeElements(TripCountPattern &TCP) {
using namespace PatternMatch;
const SCEV *TripCountSE = SE->getSCEV(TCP.TripCount);
ConstantInt *VF = ConstantInt::get(
cast<IntegerType>(TCP.TripCount->getType()), TCP.VecTy->getNumElements());
if (VF->equalsInt(1))
return nullptr;
return false;
// TODO: Support constant trip counts.
auto VisitAdd = [&](const SCEVAddExpr *S) -> const SCEVMulExpr* {
CmpInst::Predicate Pred;
if (!match(TCP.Predicate, m_ICmp(Pred, m_Instruction(TCP.Induction),
m_Instruction(TCP.Shuffle))) ||
Pred != ICmpInst::ICMP_ULE)
return false;
LLVM_DEBUG(dbgs() << "Computing number of elements for vector trip count: ";
TCP.TripCount->dump());
// Otherwise, continue and try to pattern match the vector iteration
// count expression
auto VisitAdd = [&](const SCEVAddExpr *S) -> const SCEVMulExpr * {
if (auto *Const = dyn_cast<SCEVConstant>(S->getOperand(0))) {
if (Const->getAPInt() != -VF->getValue())
return nullptr;
@ -380,7 +453,7 @@ Value* MVETailPredication::ComputeElements(Value *TripCount,
return dyn_cast<SCEVMulExpr>(S->getOperand(1));
};
auto VisitMul = [&](const SCEVMulExpr *S) -> const SCEVUDivExpr* {
auto VisitMul = [&](const SCEVMulExpr *S) -> const SCEVUDivExpr * {
if (auto *Const = dyn_cast<SCEVConstant>(S->getOperand(0))) {
if (Const->getValue() != VF)
return nullptr;
@ -389,7 +462,7 @@ Value* MVETailPredication::ComputeElements(Value *TripCount,
return dyn_cast<SCEVUDivExpr>(S->getOperand(1));
};
auto VisitDiv = [&](const SCEVUDivExpr *S) -> const SCEV* {
auto VisitDiv = [&](const SCEVUDivExpr *S) -> const SCEV * {
if (auto *Const = dyn_cast<SCEVConstant>(S->getRHS())) {
if (Const->getValue() != VF)
return nullptr;
@ -426,15 +499,20 @@ Value* MVETailPredication::ComputeElements(Value *TripCount,
Elems = Res;
if (!Elems)
return nullptr;
return false;
Instruction *InsertPt = L->getLoopPreheader()->getTerminator();
if (!isSafeToExpandAt(Elems, InsertPt, *SE))
return nullptr;
return false;
auto DL = L->getHeader()->getModule()->getDataLayout();
SCEVExpander Expander(*SE, DL, "elements");
return Expander.expandCodeFor(Elems, Elems->getType(), InsertPt);
TCP.NumElements = Expander.expandCodeFor(Elems, Elems->getType(), InsertPt);
if (!MatchElemCountLoopSetup(L, TCP.Shuffle, TCP.NumElements))
return false;
return true;
}
// Look through the exit block to see whether there's a duplicate predicate
@ -499,24 +577,23 @@ static bool Cleanup(DenseMap<Instruction*, Instruction*> &NewPredicates,
return ClonedVCTPInExitBlock;
}
void MVETailPredication::InsertVCTPIntrinsic(Instruction *Predicate,
DenseMap<Instruction*, Instruction*> &NewPredicates,
VectorType *VecTy, Value *NumElements) {
void MVETailPredication::InsertVCTPIntrinsic(TripCountPattern &TCP,
DenseMap<Instruction*, Instruction*> &NewPredicates) {
IRBuilder<> Builder(L->getHeader()->getFirstNonPHI());
Module *M = L->getHeader()->getModule();
Type *Ty = IntegerType::get(M->getContext(), 32);
// Insert a phi to count the number of elements processed by the loop.
PHINode *Processed = Builder.CreatePHI(Ty, 2);
Processed->addIncoming(NumElements, L->getLoopPreheader());
Processed->addIncoming(TCP.NumElements, L->getLoopPreheader());
// Insert the intrinsic to represent the effect of tail predication.
Builder.SetInsertPoint(cast<Instruction>(Predicate));
Builder.SetInsertPoint(cast<Instruction>(TCP.Predicate));
ConstantInt *Factor =
ConstantInt::get(cast<IntegerType>(Ty), VecTy->getNumElements());
ConstantInt::get(cast<IntegerType>(Ty), TCP.VecTy->getNumElements());
Intrinsic::ID VCTPID;
switch (VecTy->getNumElements()) {
switch (TCP.VecTy->getNumElements()) {
default:
llvm_unreachable("unexpected number of lanes");
case 4: VCTPID = Intrinsic::arm_mve_vctp32; break;
@ -531,8 +608,8 @@ void MVETailPredication::InsertVCTPIntrinsic(Instruction *Predicate,
}
Function *VCTP = Intrinsic::getDeclaration(M, VCTPID);
Value *TailPredicate = Builder.CreateCall(VCTP, Processed);
Predicate->replaceAllUsesWith(TailPredicate);
NewPredicates[Predicate] = cast<Instruction>(TailPredicate);
TCP.Predicate->replaceAllUsesWith(TailPredicate);
NewPredicates[TCP.Predicate] = cast<Instruction>(TailPredicate);
// Add the incoming value to the new phi.
// TODO: This add likely already exists in the loop.
@ -545,7 +622,7 @@ void MVETailPredication::InsertVCTPIntrinsic(Instruction *Predicate,
bool MVETailPredication::TryConvert(Value *TripCount) {
if (!IsPredicatedVectorLoop()) {
LLVM_DEBUG(dbgs() << "ARM TP: no masked instructions in loop");
LLVM_DEBUG(dbgs() << "ARM TP: no masked instructions in loop.\n");
return false;
}
@ -563,22 +640,24 @@ bool MVETailPredication::TryConvert(Value *TripCount) {
if (!Predicate || Predicates.count(Predicate))
continue;
VectorType *VecTy = getVectorType(I);
Value *NumElements = ComputeElements(TripCount, VecTy);
if (!NumElements)
TripCountPattern TCP(Predicate, TripCount, getVectorType(I));
if (!(ComputeConstElements(TCP) || ComputeRuntimeElements(TCP)))
continue;
if (!isTailPredicate(Predicate, NumElements)) {
if (!isTailPredicate(TCP)) {
LLVM_DEBUG(dbgs() << "ARM TP: Not tail predicate: " << *Predicate << "\n");
continue;
}
LLVM_DEBUG(dbgs() << "ARM TP: Found tail predicate: " << *Predicate << "\n");
Predicates.insert(Predicate);
InsertVCTPIntrinsic(Predicate, NewPredicates, VecTy, NumElements);
InsertVCTPIntrinsic(TCP, NewPredicates);
}
if (!NewPredicates.size())
return false;
// Now clean up.
ClonedVCTPInExitBlock = Cleanup(NewPredicates, Predicates, L);
return true;

View File

@ -0,0 +1,329 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -mtriple=thumbv8.1m.main -mve-tail-predication -disable-mve-tail-predication=false -mattr=+mve %s -S -o - | FileCheck %s
define dso_local void @foo(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32* noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 {
; CHECK-LABEL: @foo(
; CHECK-NEXT: entry:
; CHECK-NEXT: call void @llvm.set.loop.iterations.i32(i32 8001)
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[LSR_IV14:%.*]] = phi i32* [ [[SCEVGEP15:%.*]], [[VECTOR_BODY]] ], [ [[A:%.*]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[LSR_IV11:%.*]] = phi i32* [ [[SCEVGEP12:%.*]], [[VECTOR_BODY]] ], [ [[C:%.*]], [[ENTRY]] ]
; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32* [ [[SCEVGEP:%.*]], [[VECTOR_BODY]] ], [ [[B:%.*]], [[ENTRY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ 8001, [[ENTRY]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ 32002, [[ENTRY]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[LSR_IV1416:%.*]] = bitcast i32* [[LSR_IV14]] to <4 x i32>*
; CHECK-NEXT: [[LSR_IV1113:%.*]] = bitcast i32* [[LSR_IV11]] to <4 x i32>*
; CHECK-NEXT: [[LSR_IV10:%.*]] = bitcast i32* [[LSR_IV]] to <4 x i32>*
; CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[TMP1]])
; CHECK-NEXT: [[TMP3]] = sub i32 [[TMP1]], 4
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[LSR_IV10]], i32 4, <4 x i1> [[TMP2]], <4 x i32> undef)
; CHECK-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[LSR_IV1113]], i32 4, <4 x i1> [[TMP2]], <4 x i32> undef)
; CHECK-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_LOAD9]], [[WIDE_MASKED_LOAD]]
; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[TMP4]], <4 x i32>* [[LSR_IV1416]], i32 4, <4 x i1> [[TMP2]])
; CHECK-NEXT: [[SCEVGEP]] = getelementptr i32, i32* [[LSR_IV]], i32 4
; CHECK-NEXT: [[SCEVGEP12]] = getelementptr i32, i32* [[LSR_IV11]], i32 4
; CHECK-NEXT: [[SCEVGEP15]] = getelementptr i32, i32* [[LSR_IV14]], i32 4
; CHECK-NEXT: [[TMP5]] = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 [[TMP0]], i32 1)
; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0
; CHECK-NEXT: br i1 [[TMP6]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
;
entry:
call void @llvm.set.loop.iterations.i32(i32 8001)
br label %vector.body
vector.body:
%lsr.iv14 = phi i32* [ %scevgep15, %vector.body ], [ %A, %entry ]
%lsr.iv11 = phi i32* [ %scevgep12, %vector.body ], [ %C, %entry ]
%lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %B, %entry ]
%index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
%0 = phi i32 [ 8001, %entry ], [ %3, %vector.body ]
%lsr.iv1416 = bitcast i32* %lsr.iv14 to <4 x i32>*
%lsr.iv1113 = bitcast i32* %lsr.iv11 to <4 x i32>*
%lsr.iv10 = bitcast i32* %lsr.iv to <4 x i32>*
%broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
%broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
%induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
%1 = icmp ult <4 x i32> %induction, <i32 32002, i32 32002, i32 32002, i32 32002>
%wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv10, i32 4, <4 x i1> %1, <4 x i32> undef)
%wide.masked.load9 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv1113, i32 4, <4 x i1> %1, <4 x i32> undef)
%2 = add nsw <4 x i32> %wide.masked.load9, %wide.masked.load
call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %2, <4 x i32>* %lsr.iv1416, i32 4, <4 x i1> %1)
%index.next = add i32 %index, 4
%scevgep = getelementptr i32, i32* %lsr.iv, i32 4
%scevgep12 = getelementptr i32, i32* %lsr.iv11, i32 4
%scevgep15 = getelementptr i32, i32* %lsr.iv14, i32 4
%3 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %0, i32 1)
%4 = icmp ne i32 %3, 0
br i1 %4, label %vector.body, label %for.cond.cleanup
for.cond.cleanup:
ret void
}
; Silly test case: the loop count is constant and a multiple of the vectorisation
; factor. So, the vectoriser should not produce masked loads/stores and there's
; nothing to tail-predicate here, just checking.
define dso_local void @foo2(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32* noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 {
; CHECK-LABEL: @foo2(
; CHECK-NEXT: entry:
; CHECK-NEXT: call void @llvm.set.loop.iterations.i32(i32 2000)
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[LSR_IV14:%.*]] = phi i32* [ [[SCEVGEP15:%.*]], [[VECTOR_BODY]] ], [ [[A:%.*]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[LSR_IV11:%.*]] = phi i32* [ [[SCEVGEP12:%.*]], [[VECTOR_BODY]] ], [ [[C:%.*]], [[ENTRY]] ]
; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32* [ [[SCEVGEP:%.*]], [[VECTOR_BODY]] ], [ [[B:%.*]], [[ENTRY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ 2000, [[ENTRY]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[LSR_IV1416:%.*]] = bitcast i32* [[LSR_IV14]] to <4 x i32>*
; CHECK-NEXT: [[LSR_IV1113:%.*]] = bitcast i32* [[LSR_IV11]] to <4 x i32>*
; CHECK-NEXT: [[LSR_IV10:%.*]] = bitcast i32* [[LSR_IV]] to <4 x i32>*
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[LSR_IV10]], align 4
; CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <4 x i32>, <4 x i32>* [[LSR_IV1113]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD9]], [[WIDE_LOAD]]
; CHECK-NEXT: store <4 x i32> [[TMP1]], <4 x i32>* [[LSR_IV1416]], align 4
; CHECK-NEXT: [[SCEVGEP]] = getelementptr i32, i32* [[LSR_IV]], i32 4
; CHECK-NEXT: [[SCEVGEP12]] = getelementptr i32, i32* [[LSR_IV11]], i32 4
; CHECK-NEXT: [[SCEVGEP15]] = getelementptr i32, i32* [[LSR_IV14]], i32 4
; CHECK-NEXT: [[TMP2]] = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 [[TMP0]], i32 1)
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
; CHECK-NEXT: br i1 [[TMP3]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
;
entry:
call void @llvm.set.loop.iterations.i32(i32 2000)
br label %vector.body
vector.body:
%lsr.iv14 = phi i32* [ %scevgep15, %vector.body ], [ %A, %entry ]
%lsr.iv11 = phi i32* [ %scevgep12, %vector.body ], [ %C, %entry ]
%lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %B, %entry ]
%0 = phi i32 [ 2000, %entry ], [ %2, %vector.body ]
%lsr.iv1416 = bitcast i32* %lsr.iv14 to <4 x i32>*
%lsr.iv1113 = bitcast i32* %lsr.iv11 to <4 x i32>*
%lsr.iv10 = bitcast i32* %lsr.iv to <4 x i32>*
%wide.load = load <4 x i32>, <4 x i32>* %lsr.iv10, align 4
%wide.load9 = load <4 x i32>, <4 x i32>* %lsr.iv1113, align 4
%1 = add nsw <4 x i32> %wide.load9, %wide.load
store <4 x i32> %1, <4 x i32>* %lsr.iv1416, align 4
%scevgep = getelementptr i32, i32* %lsr.iv, i32 4
%scevgep12 = getelementptr i32, i32* %lsr.iv11, i32 4
%scevgep15 = getelementptr i32, i32* %lsr.iv14, i32 4
%2 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %0, i32 1)
%3 = icmp ne i32 %2, 0
br i1 %3, label %vector.body, label %for.cond.cleanup
for.cond.cleanup:
ret void
}
; Check that the icmp is a ult
define dso_local void @foo3(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32* noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 {
; CHECK-LABEL: @foo3(
; CHECK-NEXT: entry:
; CHECK-NEXT: call void @llvm.set.loop.iterations.i32(i32 8001)
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[LSR_IV14:%.*]] = phi i32* [ [[SCEVGEP15:%.*]], [[VECTOR_BODY]] ], [ [[A:%.*]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[LSR_IV11:%.*]] = phi i32* [ [[SCEVGEP12:%.*]], [[VECTOR_BODY]] ], [ [[C:%.*]], [[ENTRY]] ]
; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32* [ [[SCEVGEP:%.*]], [[VECTOR_BODY]] ], [ [[B:%.*]], [[ENTRY]] ]
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ 8001, [[ENTRY]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[LSR_IV1416:%.*]] = bitcast i32* [[LSR_IV14]] to <4 x i32>*
; CHECK-NEXT: [[LSR_IV1113:%.*]] = bitcast i32* [[LSR_IV11]] to <4 x i32>*
; CHECK-NEXT: [[LSR_IV10:%.*]] = bitcast i32* [[LSR_IV]] to <4 x i32>*
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> undef, i32 [[INDEX]], i32 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> undef, <4 x i32> zeroinitializer
; CHECK-NEXT: [[INDUCTION:%.*]] = add <4 x i32> [[BROADCAST_SPLAT]], <i32 0, i32 1, i32 2, i32 3>
; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt <4 x i32> [[INDUCTION]], <i32 32002, i32 32002, i32 32002, i32 32002>
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[LSR_IV10]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef)
; CHECK-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[LSR_IV1113]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef)
; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_LOAD9]], [[WIDE_MASKED_LOAD]]
; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[TMP2]], <4 x i32>* [[LSR_IV1416]], i32 4, <4 x i1> [[TMP1]])
; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4
; CHECK-NEXT: [[SCEVGEP]] = getelementptr i32, i32* [[LSR_IV]], i32 4
; CHECK-NEXT: [[SCEVGEP12]] = getelementptr i32, i32* [[LSR_IV11]], i32 4
; CHECK-NEXT: [[SCEVGEP15]] = getelementptr i32, i32* [[LSR_IV14]], i32 4
; CHECK-NEXT: [[TMP3]] = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 [[TMP0]], i32 1)
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
; CHECK-NEXT: br i1 [[TMP4]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
;
entry:
call void @llvm.set.loop.iterations.i32(i32 8001)
br label %vector.body
vector.body:
%lsr.iv14 = phi i32* [ %scevgep15, %vector.body ], [ %A, %entry ]
%lsr.iv11 = phi i32* [ %scevgep12, %vector.body ], [ %C, %entry ]
%lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %B, %entry ]
%index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
%0 = phi i32 [ 8001, %entry ], [ %3, %vector.body ]
%lsr.iv1416 = bitcast i32* %lsr.iv14 to <4 x i32>*
%lsr.iv1113 = bitcast i32* %lsr.iv11 to <4 x i32>*
%lsr.iv10 = bitcast i32* %lsr.iv to <4 x i32>*
%broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
%broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
%induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
; UGT here:
%1 = icmp ugt <4 x i32> %induction, <i32 32002, i32 32002, i32 32002, i32 32002>
%wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv10, i32 4, <4 x i1> %1, <4 x i32> undef)
%wide.masked.load9 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv1113, i32 4, <4 x i1> %1, <4 x i32> undef)
%2 = add nsw <4 x i32> %wide.masked.load9, %wide.masked.load
call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %2, <4 x i32>* %lsr.iv1416, i32 4, <4 x i1> %1)
%index.next = add i32 %index, 4
%scevgep = getelementptr i32, i32* %lsr.iv, i32 4
%scevgep12 = getelementptr i32, i32* %lsr.iv11, i32 4
%scevgep15 = getelementptr i32, i32* %lsr.iv14, i32 4
%3 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %0, i32 1)
%4 = icmp ne i32 %3, 0
br i1 %4, label %vector.body, label %for.cond.cleanup
for.cond.cleanup:
ret void
}
; Check that this loop behaves as expected, i.e, that the loop increment is
; an increment and not a decrement.
define dso_local void @foo4(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32* noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 {
; CHECK-LABEL: @foo4(
; CHECK-NEXT: entry:
; CHECK-NEXT: call void @llvm.set.loop.iterations.i32(i32 8001)
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[LSR_IV14:%.*]] = phi i32* [ [[SCEVGEP15:%.*]], [[VECTOR_BODY]] ], [ [[A:%.*]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[LSR_IV11:%.*]] = phi i32* [ [[SCEVGEP12:%.*]], [[VECTOR_BODY]] ], [ [[C:%.*]], [[ENTRY]] ]
; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32* [ [[SCEVGEP:%.*]], [[VECTOR_BODY]] ], [ [[B:%.*]], [[ENTRY]] ]
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ 8001, [[ENTRY]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[LSR_IV1416:%.*]] = bitcast i32* [[LSR_IV14]] to <4 x i32>*
; CHECK-NEXT: [[LSR_IV1113:%.*]] = bitcast i32* [[LSR_IV11]] to <4 x i32>*
; CHECK-NEXT: [[LSR_IV10:%.*]] = bitcast i32* [[LSR_IV]] to <4 x i32>*
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> undef, i32 [[INDEX]], i32 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> undef, <4 x i32> zeroinitializer
; CHECK-NEXT: [[INDUCTION:%.*]] = add <4 x i32> [[BROADCAST_SPLAT]], <i32 0, i32 1, i32 2, i32 3>
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <4 x i32> [[INDUCTION]], <i32 32002, i32 32002, i32 32002, i32 32002>
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[LSR_IV10]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef)
; CHECK-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[LSR_IV1113]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef)
; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_LOAD9]], [[WIDE_MASKED_LOAD]]
; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[TMP2]], <4 x i32>* [[LSR_IV1416]], i32 4, <4 x i1> [[TMP1]])
; CHECK-NEXT: [[INDEX_NEXT]] = sub i32 [[INDEX]], 4
; CHECK-NEXT: [[SCEVGEP]] = getelementptr i32, i32* [[LSR_IV]], i32 4
; CHECK-NEXT: [[SCEVGEP12]] = getelementptr i32, i32* [[LSR_IV11]], i32 4
; CHECK-NEXT: [[SCEVGEP15]] = getelementptr i32, i32* [[LSR_IV14]], i32 4
; CHECK-NEXT: [[TMP3]] = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 [[TMP0]], i32 1)
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
; CHECK-NEXT: br i1 [[TMP4]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
;
entry:
call void @llvm.set.loop.iterations.i32(i32 8001)
br label %vector.body
vector.body:
%lsr.iv14 = phi i32* [ %scevgep15, %vector.body ], [ %A, %entry ]
%lsr.iv11 = phi i32* [ %scevgep12, %vector.body ], [ %C, %entry ]
%lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %B, %entry ]
%index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
%0 = phi i32 [ 8001, %entry ], [ %3, %vector.body ]
%lsr.iv1416 = bitcast i32* %lsr.iv14 to <4 x i32>*
%lsr.iv1113 = bitcast i32* %lsr.iv11 to <4 x i32>*
%lsr.iv10 = bitcast i32* %lsr.iv to <4 x i32>*
%broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
%broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
%induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
%1 = icmp ult <4 x i32> %induction, <i32 32002, i32 32002, i32 32002, i32 32002>
%wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv10, i32 4, <4 x i1> %1, <4 x i32> undef)
%wide.masked.load9 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv1113, i32 4, <4 x i1> %1, <4 x i32> undef)
%2 = add nsw <4 x i32> %wide.masked.load9, %wide.masked.load
call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %2, <4 x i32>* %lsr.iv1416, i32 4, <4 x i1> %1)
; Counting down:
%index.next = sub i32 %index, 4
%scevgep = getelementptr i32, i32* %lsr.iv, i32 4
%scevgep12 = getelementptr i32, i32* %lsr.iv11, i32 4
%scevgep15 = getelementptr i32, i32* %lsr.iv14, i32 4
%3 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %0, i32 1)
%4 = icmp ne i32 %3, 0
br i1 %4, label %vector.body, label %for.cond.cleanup
for.cond.cleanup:
ret void
}
define dso_local void @foo5(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32* noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 {
; CHECK-LABEL: @foo5(
; CHECK-NEXT: entry:
; CHECK-NEXT: call void @llvm.set.loop.iterations.i32(i32 8001)
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[LSR_IV14:%.*]] = phi i32* [ [[SCEVGEP15:%.*]], [[VECTOR_BODY]] ], [ [[A:%.*]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[LSR_IV11:%.*]] = phi i32* [ [[SCEVGEP12:%.*]], [[VECTOR_BODY]] ], [ [[C:%.*]], [[ENTRY]] ]
; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32* [ [[SCEVGEP:%.*]], [[VECTOR_BODY]] ], [ [[B:%.*]], [[ENTRY]] ]
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ 8001, [[ENTRY]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[LSR_IV1416:%.*]] = bitcast i32* [[LSR_IV14]] to <4 x i32>*
; CHECK-NEXT: [[LSR_IV1113:%.*]] = bitcast i32* [[LSR_IV11]] to <4 x i32>*
; CHECK-NEXT: [[LSR_IV10:%.*]] = bitcast i32* [[LSR_IV]] to <4 x i32>*
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> undef, i32 [[INDEX]], i32 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> undef, <4 x i32> zeroinitializer
; CHECK-NEXT: [[INDUCTION:%.*]] = add <4 x i32> [[BROADCAST_SPLAT]], <i32 0, i32 1, i32 2, i32 3>
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <4 x i32> [[INDUCTION]], <i32 0, i32 3200, i32 32002, i32 32002>
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[LSR_IV10]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef)
; CHECK-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[LSR_IV1113]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef)
; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_LOAD9]], [[WIDE_MASKED_LOAD]]
; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[TMP2]], <4 x i32>* [[LSR_IV1416]], i32 4, <4 x i1> [[TMP1]])
; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4
; CHECK-NEXT: [[SCEVGEP]] = getelementptr i32, i32* [[LSR_IV]], i32 4
; CHECK-NEXT: [[SCEVGEP12]] = getelementptr i32, i32* [[LSR_IV11]], i32 4
; CHECK-NEXT: [[SCEVGEP15]] = getelementptr i32, i32* [[LSR_IV14]], i32 4
; CHECK-NEXT: [[TMP3]] = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 [[TMP0]], i32 1)
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
; CHECK-NEXT: br i1 [[TMP4]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
;
entry:
call void @llvm.set.loop.iterations.i32(i32 8001)
br label %vector.body
vector.body:
%lsr.iv14 = phi i32* [ %scevgep15, %vector.body ], [ %A, %entry ]
%lsr.iv11 = phi i32* [ %scevgep12, %vector.body ], [ %C, %entry ]
%lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %B, %entry ]
%index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
%0 = phi i32 [ 8001, %entry ], [ %3, %vector.body ]
%lsr.iv1416 = bitcast i32* %lsr.iv14 to <4 x i32>*
%lsr.iv1113 = bitcast i32* %lsr.iv11 to <4 x i32>*
%lsr.iv10 = bitcast i32* %lsr.iv to <4 x i32>*
%broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
%broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
%induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
; non-uniform constant vector here:
%1 = icmp ult <4 x i32> %induction, <i32 0, i32 3200, i32 32002, i32 32002>
%wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv10, i32 4, <4 x i1> %1, <4 x i32> undef)
%wide.masked.load9 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv1113, i32 4, <4 x i1> %1, <4 x i32> undef)
%2 = add nsw <4 x i32> %wide.masked.load9, %wide.masked.load
call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %2, <4 x i32>* %lsr.iv1416, i32 4, <4 x i1> %1)
%index.next = add i32 %index, 4
%scevgep = getelementptr i32, i32* %lsr.iv, i32 4
%scevgep12 = getelementptr i32, i32* %lsr.iv11, i32 4
%scevgep15 = getelementptr i32, i32* %lsr.iv14, i32 4
%3 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %0, i32 1)
%4 = icmp ne i32 %3, 0
br i1 %4, label %vector.body, label %for.cond.cleanup
for.cond.cleanup:
ret void
}
declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>) #1
declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>) #2
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 , i32 )
declare void @llvm.set.loop.iterations.i32(i32)