[VPlan] Introduce recipe to build scalar steps.

This patch adds a new VPScalarIVStepsRecipe to handle building scalar
steps.

In the first patch, it only handles the case where there is no vector
induction variable needed.

Reviewed By: Ayal

Differential Revision: https://reviews.llvm.org/D115953
This commit is contained in:
Florian Hahn 2022-02-27 17:32:41 +00:00
parent b6d75682f9
commit 49b23f451c
No known key found for this signature in database
GPG Key ID: CF59919C6547A668
21 changed files with 312 additions and 141 deletions

View File

@ -2642,7 +2642,7 @@ void InnerLoopVectorizer::widenIntOrFpInduction(
TruncInst *Trunc = Def->getTruncInst();
IRBuilderBase &Builder = State.Builder;
assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
assert(!State.VF.isZero() && "VF must be non-zero");
assert(State.VF.isVector() && "must have vector VF");
// The value from the original loop to which we are mapping the new induction
// variable.
@ -2695,37 +2695,11 @@ void InnerLoopVectorizer::widenIntOrFpInduction(
// Now do the actual transformations, and start with creating the step value.
Value *Step = CreateStepValue(ID.getStep());
if (State.VF.isScalar()) {
Value *ScalarIV = CreateScalarIV(Step);
Type *ScalarTy = IntegerType::get(ScalarIV->getContext(),
Step->getType()->getScalarSizeInBits());
Instruction::BinaryOps IncOp = ID.getInductionOpcode();
if (IncOp == Instruction::BinaryOpsEnd)
IncOp = Instruction::Add;
for (unsigned Part = 0; Part < UF; ++Part) {
Value *StartIdx = ConstantInt::get(ScalarTy, Part);
Instruction::BinaryOps MulOp = Instruction::Mul;
if (Step->getType()->isFloatingPointTy()) {
StartIdx = Builder.CreateUIToFP(StartIdx, Step->getType());
MulOp = Instruction::FMul;
}
Value *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
Value *EntryPart = Builder.CreateBinOp(IncOp, ScalarIV, Mul, "induction");
State.set(Def, EntryPart, Part);
if (Trunc) {
assert(!Step->getType()->isFloatingPointTy() &&
"fp inductions shouldn't be truncated");
addMetadata(EntryPart, Trunc);
}
}
return;
}
// Create a new independent vector induction variable, if one is needed.
if (Def->needsVectorIV())
createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, State);
// Create a new independent vector induction variable. Later VPlan2VPlan
// optimizations will remove it, if it won't be needed, e.g. because all users
// of it access scalar values.
createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, State);
if (Def->needsScalarIV()) {
// Create scalar steps that can be used by instructions we will later
@ -9328,6 +9302,7 @@ VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
// in ways that accessing values using original IR values is incorrect.
Plan->disableValue2VPValue();
VPlanTransforms::optimizeInductions(*Plan, *PSE.getSE());
VPlanTransforms::sinkScalarOperands(*Plan);
VPlanTransforms::mergeReplicateRegions(*Plan);
VPlanTransforms::removeDeadRecipes(*Plan, *OrigLoop);
@ -9754,6 +9729,69 @@ void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
State.ILV->widenIntOrFpInduction(IV, this, State, CanonicalIV);
}
void VPScalarIVStepsRecipe::execute(VPTransformState &State) {
assert(!State.Instance && "VPScalarIVStepsRecipe being replicated.");
// Fast-math-flags propagate from the original induction instruction.
IRBuilder<>::FastMathFlagGuard FMFG(State.Builder);
if (IndDesc.getInductionBinOp() &&
isa<FPMathOperator>(IndDesc.getInductionBinOp()))
State.Builder.setFastMathFlags(
IndDesc.getInductionBinOp()->getFastMathFlags());
Value *Step = State.get(getStepValue(), VPIteration(0, 0));
auto *Trunc = dyn_cast<TruncInst>(getUnderlyingValue());
auto CreateScalarIV = [&](Value *&Step) -> Value * {
Value *ScalarIV = State.get(getCanonicalIV(), VPIteration(0, 0));
auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0);
if (!isCanonical() || CanonicalIV->getType() != IV->getType()) {
ScalarIV = IV->getType()->isIntegerTy()
? State.Builder.CreateSExtOrTrunc(ScalarIV, IV->getType())
: State.Builder.CreateCast(Instruction::SIToFP, ScalarIV,
IV->getType());
ScalarIV = emitTransformedIndex(State.Builder, ScalarIV,
getStartValue()->getLiveInIRValue(), Step,
IndDesc);
ScalarIV->setName("offset.idx");
}
if (Trunc) {
auto *TruncType = cast<IntegerType>(Trunc->getType());
assert(Step->getType()->isIntegerTy() &&
"Truncation requires an integer step");
ScalarIV = State.Builder.CreateTrunc(ScalarIV, TruncType);
Step = State.Builder.CreateTrunc(Step, TruncType);
}
return ScalarIV;
};
Value *ScalarIV = CreateScalarIV(Step);
if (State.VF.isVector()) {
buildScalarSteps(ScalarIV, Step, IV, IndDesc, this, State);
return;
}
for (unsigned Part = 0; Part < State.UF; ++Part) {
assert(!State.VF.isScalable() && "scalable vectors not yet supported.");
Value *EntryPart;
if (Step->getType()->isFloatingPointTy()) {
Value *StartIdx =
getRuntimeVFAsFloat(State.Builder, Step->getType(), State.VF * Part);
// Floating-point operations inherit FMF via the builder's flags.
Value *MulOp = State.Builder.CreateFMul(StartIdx, Step);
EntryPart = State.Builder.CreateBinOp(IndDesc.getInductionOpcode(),
ScalarIV, MulOp);
} else {
Value *StartIdx =
getRuntimeVF(State.Builder, Step->getType(), State.VF * Part);
EntryPart = State.Builder.CreateAdd(
ScalarIV, State.Builder.CreateMul(StartIdx, Step), "induction");
}
State.set(this, EntryPart, Part);
if (Trunc)
State.ILV->addMetadata(EntryPart, Trunc);
}
}
void VPWidenPHIRecipe::execute(VPTransformState &State) {
State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this,
State);
@ -10161,7 +10199,8 @@ Value *VPTransformState::get(VPValue *Def, unsigned Part) {
// Check if there is a scalar value for the selected lane.
if (!hasScalarValue(Def, {Part, LastLane})) {
// At the moment, VPWidenIntOrFpInductionRecipes can also be uniform.
assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) &&
assert((isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) ||
isa<VPScalarIVStepsRecipe>(Def->getDef())) &&
"unexpected recipe found to be invariant");
IsUniform = true;
LastLane = 0;

View File

@ -583,7 +583,8 @@ bool VPRecipeBase::mayHaveSideEffects() const {
case VPWidenSC:
case VPWidenGEPSC:
case VPReductionSC:
case VPWidenSelectSC: {
case VPWidenSelectSC:
case VPScalarIVStepsSC: {
const Instruction *I =
dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue());
(void)I;
@ -608,6 +609,14 @@ void VPRecipeBase::insertBefore(VPRecipeBase *InsertPos) {
Parent->getRecipeList().insert(InsertPos->getIterator(), this);
}
void VPRecipeBase::insertBefore(VPBasicBlock &BB,
iplist<VPRecipeBase>::iterator I) {
assert(!Parent && "Recipe already in some VPBasicBlock");
assert(I == BB.end() || I->getParent() == &BB);
Parent = &BB;
BB.getRecipeList().insert(I, this);
}
void VPRecipeBase::insertAfter(VPRecipeBase *InsertPos) {
assert(!Parent && "Recipe already in some VPBasicBlock");
assert(InsertPos->getParent() &&
@ -634,10 +643,8 @@ void VPRecipeBase::moveAfter(VPRecipeBase *InsertPos) {
void VPRecipeBase::moveBefore(VPBasicBlock &BB,
iplist<VPRecipeBase>::iterator I) {
assert(I == BB.end() || I->getParent() == &BB);
removeFromParent();
Parent = &BB;
BB.getRecipeList().insert(I, this);
insertBefore(BB, I);
}
void VPInstruction::generateInstruction(VPTransformState &State,
@ -875,13 +882,16 @@ void VPlan::prepareToExecute(Value *TripCountV, Value *VectorTripCountV,
auto *IV = getCanonicalIV();
assert(all_of(IV->users(),
[](const VPUser *U) {
if (isa<VPScalarIVStepsRecipe>(U))
return true;
auto *VPI = cast<VPInstruction>(U);
return VPI->getOpcode() ==
VPInstruction::CanonicalIVIncrement ||
VPI->getOpcode() ==
VPInstruction::CanonicalIVIncrementNUW;
}) &&
"the canonical IV should only be used by its increments when "
"the canonical IV should only be used by its increments or "
"ScalarIVSteps when "
"resetting the start value");
IV->setOperand(0, VPV);
}
@ -1272,7 +1282,32 @@ bool VPWidenIntOrFpInductionRecipe::isCanonical() const {
return StartC && StartC->isZero() && StepC && StepC->isOne();
}
VPCanonicalIVPHIRecipe *VPScalarIVStepsRecipe::getCanonicalIV() const {
return cast<VPCanonicalIVPHIRecipe>(getOperand(0));
}
bool VPScalarIVStepsRecipe::isCanonical() const {
auto *CanIV = getCanonicalIV();
// The start value of the steps-recipe must match the start value of the
// canonical induction and it must step by 1.
if (CanIV->getStartValue() != getStartValue())
return false;
auto *StepVPV = getStepValue();
if (StepVPV->getDef())
return false;
auto *StepC = dyn_cast_or_null<ConstantInt>(StepVPV->getLiveInIRValue());
return StepC && StepC->isOne();
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void VPScalarIVStepsRecipe::print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const {
O << Indent;
printAsOperand(O, SlotTracker);
O << Indent << "= SCALAR-STEPS ";
printOperands(O, SlotTracker);
}
void VPWidenGEPRecipe::print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const {
O << Indent << "WIDEN-GEP ";

View File

@ -700,6 +700,9 @@ public:
/// Insert an unlinked recipe into a basic block immediately before
/// the specified recipe.
void insertBefore(VPRecipeBase *InsertPos);
/// Insert an unlinked recipe into \p BB immediately before the insertion
/// point \p IP;
void insertBefore(VPBasicBlock &BB, iplist<VPRecipeBase>::iterator IP);
/// Insert an unlinked Recipe into a basic block immediately after
/// the specified Recipe.
@ -1103,6 +1106,8 @@ public:
return dyn_cast_or_null<TruncInst>(getVPValue(0)->getUnderlyingValue());
}
PHINode *getPHINode() { return IV; }
/// Returns the induction descriptor for the recipe.
const InductionDescriptor &getInductionDescriptor() const { return IndDesc; }
@ -1769,6 +1774,12 @@ public:
static inline bool classof(const VPDef *D) {
return D->getVPDefID() == VPCanonicalIVPHISC;
}
static inline bool classof(const VPHeaderPHIRecipe *D) {
return D->getVPDefID() == VPCanonicalIVPHISC;
}
static inline bool classof(const VPValue *V) {
return V->getVPValueID() == VPValue::VPVCanonicalIVPHISC;
}
/// Generate the canonical scalar induction phi of the vector loop.
void execute(VPTransformState &State) override;
@ -1834,6 +1845,53 @@ public:
}
};
/// A recipe for handling phi nodes of integer and floating-point inductions,
/// producing their scalar values.
class VPScalarIVStepsRecipe : public VPRecipeBase, public VPValue {
PHINode *IV;
const InductionDescriptor &IndDesc;
public:
VPScalarIVStepsRecipe(PHINode *IV, const InductionDescriptor &IndDesc,
VPValue *CanonicalIV, VPValue *Start, VPValue *Step,
Instruction *Trunc)
: VPRecipeBase(VPScalarIVStepsSC, {CanonicalIV, Start, Step}),
VPValue(Trunc ? Trunc : IV, this), IV(IV), IndDesc(IndDesc) {}
~VPScalarIVStepsRecipe() override = default;
/// Method to support type inquiry through isa, cast, and dyn_cast.
static inline bool classof(const VPDef *D) {
return D->getVPDefID() == VPRecipeBase::VPScalarIVStepsSC;
}
/// Extra classof implementations to allow directly casting from VPUser ->
/// VPScalarIVStepsRecipe.
static inline bool classof(const VPUser *U) {
auto *R = dyn_cast<VPRecipeBase>(U);
return R && R->getVPDefID() == VPRecipeBase::VPScalarIVStepsSC;
}
static inline bool classof(const VPRecipeBase *R) {
return R->getVPDefID() == VPRecipeBase::VPScalarIVStepsSC;
}
/// Generate the scalarized versions of the phi node as needed by their users.
void execute(VPTransformState &State) override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif
/// Returns true if the induction is canonical, i.e. starting at 0 and
/// incremented by UF * VF (= the original IV is incremented by 1).
bool isCanonical() const;
VPCanonicalIVPHIRecipe *getCanonicalIV() const;
VPValue *getStartValue() const { return getOperand(1); }
VPValue *getStepValue() const { return getOperand(2); }
};
/// VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph. It
/// holds a sequence of zero or more VPRecipe's each representing a sequence of
/// output IR instructions. All PHI-like recipes must come before any non-PHI recipes.

View File

@ -378,3 +378,39 @@ void VPlanTransforms::removeDeadRecipes(VPlan &Plan, Loop &OrigLoop) {
R.eraseFromParent();
}
}
void VPlanTransforms::optimizeInductions(VPlan &Plan, ScalarEvolution &SE) {
SmallVector<VPRecipeBase *> ToRemove;
VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock();
for (VPRecipeBase &Phi : HeaderVPBB->phis()) {
auto *IV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
if (!IV || IV->needsVectorIV())
continue;
const InductionDescriptor &ID = IV->getInductionDescriptor();
const SCEV *StepSCEV = ID.getStep();
VPValue *Step = nullptr;
if (auto *E = dyn_cast<SCEVConstant>(StepSCEV)) {
Step = new VPValue(E->getValue());
Plan.addExternalDef(Step);
} else if (auto *E = dyn_cast<SCEVUnknown>(StepSCEV)) {
Step = new VPValue(E->getValue());
Plan.addExternalDef(Step);
} else {
Step = new VPExpandSCEVRecipe(StepSCEV, SE);
}
VPScalarIVStepsRecipe *Steps = new VPScalarIVStepsRecipe(
IV->getPHINode(), ID, Plan.getCanonicalIV(), IV->getStartValue(), Step,
IV->getTruncInst());
HeaderVPBB->insert(Steps, HeaderVPBB->getFirstNonPhi());
if (Step->getDef()) {
// TODO: Place the step in the preheader, once it is explicitly modeled in
// VPlan.
HeaderVPBB->insert(cast<VPRecipeBase>(Step->getDef()),
HeaderVPBB->getFirstNonPhi());
}
IV->replaceAllUsesWith(Steps);
}
}

View File

@ -54,6 +54,10 @@ struct VPlanTransforms {
/// Try to remove dead recipes. At the moment, only dead header recipes are
/// removed.
static void removeDeadRecipes(VPlan &Plan, Loop &OrigLoop);
// If all users of a vector IV need scalar values, provide them by building
// scalar steps off of the canonical scalar IV, and remove the vector IV.
static void optimizeInductions(VPlan &Plan, ScalarEvolution &SE);
};
} // namespace llvm

View File

@ -332,6 +332,7 @@ public:
VPInterleaveSC,
VPReductionSC,
VPReplicateSC,
VPScalarIVStepsSC,
VPWidenCallSC,
VPWidenCanonicalIVSC,
VPWidenGEPSC,

View File

@ -34,7 +34,7 @@ define void @outside_user_blocks_tail_folding(i8* nocapture readonly %ptr, i32 %
; CHECK-NEXT: store <16 x i8> [[WIDE_LOAD]], <16 x i8>* [[TMP6]], align 1
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !0
; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[SIZE]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[END:%.*]], label [[SCALAR_PH]]
@ -50,7 +50,7 @@ define void @outside_user_blocks_tail_folding(i8* nocapture readonly %ptr, i32 %
; CHECK-NEXT: [[TMP8:%.*]] = load i8, i8* [[INCDEC_PTR]], align 1
; CHECK-NEXT: store i8 [[TMP8]], i8* [[BUFF]], align 1
; CHECK-NEXT: [[TOBOOL11:%.*]] = icmp eq i32 [[DEC]], 0
; CHECK-NEXT: br i1 [[TOBOOL11]], label [[END]], label [[BODY]], !llvm.loop !2
; CHECK-NEXT: br i1 [[TOBOOL11]], label [[END]], label [[BODY]], !llvm.loop [[LOOP2:![0-9]+]]
; CHECK: end:
; CHECK-NEXT: [[INCDEC_PTR_LCSSA:%.*]] = phi i8* [ [[INCDEC_PTR]], [[BODY]] ], [ [[IND_END2]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: store i8* [[INCDEC_PTR_LCSSA]], i8** [[POS]], align 4

View File

@ -555,14 +555,6 @@ define dso_local signext i32 @f2(float* noalias %A, float* noalias %B, i32 signe
; VF-TWO-CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; VF-TWO-CHECK: vector.body:
; VF-TWO-CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VF-TWO-CHECK-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 0
; VF-TWO-CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 4
; VF-TWO-CHECK-NEXT: [[TMP14:%.*]] = add i64 [[INDEX]], 8
; VF-TWO-CHECK-NEXT: [[TMP15:%.*]] = add i64 [[INDEX]], 12
; VF-TWO-CHECK-NEXT: [[TMP16:%.*]] = add i64 [[INDEX]], 16
; VF-TWO-CHECK-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 20
; VF-TWO-CHECK-NEXT: [[TMP18:%.*]] = add i64 [[INDEX]], 24
; VF-TWO-CHECK-NEXT: [[TMP19:%.*]] = add i64 [[INDEX]], 28
; VF-TWO-CHECK-NEXT: [[OFFSET_IDX:%.*]] = trunc i64 [[INDEX]] to i32
; VF-TWO-CHECK-NEXT: [[TMP20:%.*]] = add i32 [[OFFSET_IDX]], 0
; VF-TWO-CHECK-NEXT: [[TMP21:%.*]] = add i32 [[OFFSET_IDX]], 4
@ -572,6 +564,14 @@ define dso_local signext i32 @f2(float* noalias %A, float* noalias %B, i32 signe
; VF-TWO-CHECK-NEXT: [[TMP25:%.*]] = add i32 [[OFFSET_IDX]], 20
; VF-TWO-CHECK-NEXT: [[TMP26:%.*]] = add i32 [[OFFSET_IDX]], 24
; VF-TWO-CHECK-NEXT: [[TMP27:%.*]] = add i32 [[OFFSET_IDX]], 28
; VF-TWO-CHECK-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 0
; VF-TWO-CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 4
; VF-TWO-CHECK-NEXT: [[TMP14:%.*]] = add i64 [[INDEX]], 8
; VF-TWO-CHECK-NEXT: [[TMP15:%.*]] = add i64 [[INDEX]], 12
; VF-TWO-CHECK-NEXT: [[TMP16:%.*]] = add i64 [[INDEX]], 16
; VF-TWO-CHECK-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 20
; VF-TWO-CHECK-NEXT: [[TMP18:%.*]] = add i64 [[INDEX]], 24
; VF-TWO-CHECK-NEXT: [[TMP19:%.*]] = add i64 [[INDEX]], 28
; VF-TWO-CHECK-NEXT: [[TMP28:%.*]] = xor i32 [[TMP20]], -1
; VF-TWO-CHECK-NEXT: [[TMP29:%.*]] = xor i32 [[TMP21]], -1
; VF-TWO-CHECK-NEXT: [[TMP30:%.*]] = xor i32 [[TMP22]], -1
@ -703,9 +703,9 @@ define dso_local signext i32 @f2(float* noalias %A, float* noalias %B, i32 signe
; VF-TWO-CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; VF-TWO-CHECK: vec.epilog.vector.body:
; VF-TWO-CHECK-NEXT: [[INDEX18:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT19:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
; VF-TWO-CHECK-NEXT: [[TMP117:%.*]] = add i64 [[INDEX18]], 0
; VF-TWO-CHECK-NEXT: [[OFFSET_IDX23:%.*]] = trunc i64 [[INDEX18]] to i32
; VF-TWO-CHECK-NEXT: [[TMP118:%.*]] = add i32 [[OFFSET_IDX23]], 0
; VF-TWO-CHECK-NEXT: [[TMP117:%.*]] = add i64 [[INDEX18]], 0
; VF-TWO-CHECK-NEXT: [[TMP119:%.*]] = xor i32 [[TMP118]], -1
; VF-TWO-CHECK-NEXT: [[TMP120:%.*]] = add i32 [[TMP119]], [[N]]
; VF-TWO-CHECK-NEXT: [[TMP121:%.*]] = sext i32 [[TMP120]] to i64
@ -782,14 +782,6 @@ define dso_local signext i32 @f2(float* noalias %A, float* noalias %B, i32 signe
; VF-FOUR-CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; VF-FOUR-CHECK: vector.body:
; VF-FOUR-CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VF-FOUR-CHECK-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 0
; VF-FOUR-CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 4
; VF-FOUR-CHECK-NEXT: [[TMP14:%.*]] = add i64 [[INDEX]], 8
; VF-FOUR-CHECK-NEXT: [[TMP15:%.*]] = add i64 [[INDEX]], 12
; VF-FOUR-CHECK-NEXT: [[TMP16:%.*]] = add i64 [[INDEX]], 16
; VF-FOUR-CHECK-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 20
; VF-FOUR-CHECK-NEXT: [[TMP18:%.*]] = add i64 [[INDEX]], 24
; VF-FOUR-CHECK-NEXT: [[TMP19:%.*]] = add i64 [[INDEX]], 28
; VF-FOUR-CHECK-NEXT: [[OFFSET_IDX:%.*]] = trunc i64 [[INDEX]] to i32
; VF-FOUR-CHECK-NEXT: [[TMP20:%.*]] = add i32 [[OFFSET_IDX]], 0
; VF-FOUR-CHECK-NEXT: [[TMP21:%.*]] = add i32 [[OFFSET_IDX]], 4
@ -799,6 +791,14 @@ define dso_local signext i32 @f2(float* noalias %A, float* noalias %B, i32 signe
; VF-FOUR-CHECK-NEXT: [[TMP25:%.*]] = add i32 [[OFFSET_IDX]], 20
; VF-FOUR-CHECK-NEXT: [[TMP26:%.*]] = add i32 [[OFFSET_IDX]], 24
; VF-FOUR-CHECK-NEXT: [[TMP27:%.*]] = add i32 [[OFFSET_IDX]], 28
; VF-FOUR-CHECK-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 0
; VF-FOUR-CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 4
; VF-FOUR-CHECK-NEXT: [[TMP14:%.*]] = add i64 [[INDEX]], 8
; VF-FOUR-CHECK-NEXT: [[TMP15:%.*]] = add i64 [[INDEX]], 12
; VF-FOUR-CHECK-NEXT: [[TMP16:%.*]] = add i64 [[INDEX]], 16
; VF-FOUR-CHECK-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 20
; VF-FOUR-CHECK-NEXT: [[TMP18:%.*]] = add i64 [[INDEX]], 24
; VF-FOUR-CHECK-NEXT: [[TMP19:%.*]] = add i64 [[INDEX]], 28
; VF-FOUR-CHECK-NEXT: [[TMP28:%.*]] = xor i32 [[TMP20]], -1
; VF-FOUR-CHECK-NEXT: [[TMP29:%.*]] = xor i32 [[TMP21]], -1
; VF-FOUR-CHECK-NEXT: [[TMP30:%.*]] = xor i32 [[TMP22]], -1
@ -930,9 +930,9 @@ define dso_local signext i32 @f2(float* noalias %A, float* noalias %B, i32 signe
; VF-FOUR-CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; VF-FOUR-CHECK: vec.epilog.vector.body:
; VF-FOUR-CHECK-NEXT: [[INDEX18:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT19:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
; VF-FOUR-CHECK-NEXT: [[TMP117:%.*]] = add i64 [[INDEX18]], 0
; VF-FOUR-CHECK-NEXT: [[OFFSET_IDX23:%.*]] = trunc i64 [[INDEX18]] to i32
; VF-FOUR-CHECK-NEXT: [[TMP118:%.*]] = add i32 [[OFFSET_IDX23]], 0
; VF-FOUR-CHECK-NEXT: [[TMP117:%.*]] = add i64 [[INDEX18]], 0
; VF-FOUR-CHECK-NEXT: [[TMP119:%.*]] = xor i32 [[TMP118]], -1
; VF-FOUR-CHECK-NEXT: [[TMP120:%.*]] = add i32 [[TMP119]], [[N]]
; VF-FOUR-CHECK-NEXT: [[TMP121:%.*]] = sext i32 [[TMP120]] to i64

View File

@ -8,13 +8,13 @@ define void @foo(i64* %ptr, i32* %ptr.2) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY:%.*]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 2, i64 3, i64 4, i64 5>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND_TRUNC:%.+]] = phi <4 x i32> [ <i32 2, i32 3, i32 4, i32 5>, %vector.ph ], [ [[VEC_IND_TRUNC_NEXT:%.+]], %vector.body ]
; CHECK-NEXT: = add i64 [[INDEX]], 0
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 2, [[INDEX]]
; CHECK-NEXT: [[TRUNC:%.+]] = trunc i64 [[OFFSET_IDX]] to i32
; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TRUNC]], 0
; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[TRUNC]], 1
; CHECK-NEXT: [[TMP9:%.*]] = add i32 [[TRUNC]], 2
; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TRUNC]], 3
; CHECK-NEXT: = add i64 [[INDEX]], 0
; CHECK-NEXT: store i32 [[TMP7]], i32* %ptr.2, align 4
; CHECK-NEXT: store i32 [[TMP8]], i32* %ptr.2, align 4
; CHECK-NEXT: store i32 [[TMP9]], i32* %ptr.2, align 4

View File

@ -157,11 +157,11 @@ define void @fp_iv_loop1_fast_FMF(float %init, float* noalias nocapture %A, i32
; VEC1_INTERL2-NEXT: br label [[VECTOR_BODY:%.*]]
; VEC1_INTERL2: vector.body:
; VEC1_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC1_INTERL2-NEXT: [[INDUCTION2:%.*]] = or i64 [[INDEX]], 1
; VEC1_INTERL2-NEXT: [[TMP4:%.*]] = sitofp i64 [[INDEX]] to float
; VEC1_INTERL2-NEXT: [[TMP5:%.*]] = fmul fast float [[FPINC]], [[TMP4]]
; VEC1_INTERL2-NEXT: [[OFFSET_IDX:%.*]] = fsub fast float [[INIT]], [[TMP5]]
; VEC1_INTERL2-NEXT: [[TMP6:%.*]] = fsub fast float [[OFFSET_IDX]], [[FPINC]]
; VEC1_INTERL2-NEXT: [[INDUCTION2:%.*]] = or i64 [[INDEX]], 1
; VEC1_INTERL2-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 [[INDEX]]
; VEC1_INTERL2-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[INDUCTION2]]
; VEC1_INTERL2-NEXT: store float [[OFFSET_IDX]], float* [[TMP7]], align 4
@ -416,13 +416,13 @@ define void @fp_iv_loop1_reassoc_FMF(float %init, float* noalias nocapture %A, i
; VEC1_INTERL2-NEXT: br label [[VECTOR_BODY:%.*]]
; VEC1_INTERL2: vector.body:
; VEC1_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC1_INTERL2-NEXT: [[INDUCTION2:%.*]] = or i64 [[INDEX]], 1
; VEC1_INTERL2-NEXT: [[TMP4:%.*]] = sitofp i64 [[INDEX]] to float
; VEC1_INTERL2-NEXT: [[TMP5:%.*]] = fmul reassoc float [[FPINC]], [[TMP4]]
; VEC1_INTERL2-NEXT: [[OFFSET_IDX:%.*]] = fsub reassoc float [[INIT]], [[TMP5]]
; VEC1_INTERL2-NEXT: [[TMP6:%.*]] = fmul reassoc float [[FPINC]], 0.000000e+00
; VEC1_INTERL2-NEXT: [[TMP7:%.*]] = fsub reassoc float [[OFFSET_IDX]], [[TMP6]]
; VEC1_INTERL2-NEXT: [[TMP8:%.*]] = fsub reassoc float [[OFFSET_IDX]], [[FPINC]]
; VEC1_INTERL2-NEXT: [[INDUCTION2:%.*]] = or i64 [[INDEX]], 1
; VEC1_INTERL2-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 [[INDEX]]
; VEC1_INTERL2-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[INDUCTION2]]
; VEC1_INTERL2-NEXT: store float [[TMP7]], float* [[TMP9]], align 4
@ -664,11 +664,11 @@ define void @fp_iv_loop2(float %init, float* noalias nocapture %A, i32 %N) #0 {
; VEC1_INTERL2-NEXT: br label [[VECTOR_BODY:%.*]]
; VEC1_INTERL2: vector.body:
; VEC1_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC1_INTERL2-NEXT: [[INDUCTION2:%.*]] = or i64 [[INDEX]], 1
; VEC1_INTERL2-NEXT: [[TMP4:%.*]] = sitofp i64 [[INDEX]] to float
; VEC1_INTERL2-NEXT: [[TMP5:%.*]] = fmul fast float [[TMP4]], 5.000000e-01
; VEC1_INTERL2-NEXT: [[OFFSET_IDX:%.*]] = fadd fast float [[TMP5]], [[INIT]]
; VEC1_INTERL2-NEXT: [[TMP6:%.*]] = fadd fast float [[OFFSET_IDX]], 5.000000e-01
; VEC1_INTERL2-NEXT: [[INDUCTION2:%.*]] = or i64 [[INDEX]], 1
; VEC1_INTERL2-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 [[INDEX]]
; VEC1_INTERL2-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[INDUCTION2]]
; VEC1_INTERL2-NEXT: store float [[OFFSET_IDX]], float* [[TMP7]], align 4
@ -984,29 +984,29 @@ define void @fp_iv_loop3(float %init, float* noalias nocapture %A, float* noalia
; VEC1_INTERL2-NEXT: br label [[VECTOR_BODY:%.*]]
; VEC1_INTERL2: vector.body:
; VEC1_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC1_INTERL2-NEXT: [[INDUCTION5:%.*]] = or i64 [[INDEX]], 1
; VEC1_INTERL2-NEXT: [[TMP6:%.*]] = sitofp i64 [[INDEX]] to float
; VEC1_INTERL2-NEXT: [[TMP7:%.*]] = fmul fast float [[TMP6]], -5.000000e-01
; VEC1_INTERL2-NEXT: [[TMP8:%.*]] = sitofp i64 [[INDEX]] to float
; VEC1_INTERL2-NEXT: [[TMP9:%.*]] = fmul fast float [[TMP0]], [[TMP8]]
; VEC1_INTERL2-NEXT: [[OFFSET_IDX8:%.*]] = fadd fast float [[TMP9]], [[INIT]]
; VEC1_INTERL2-NEXT: [[TMP10:%.*]] = fadd fast float [[OFFSET_IDX8]], [[TMP0]]
; VEC1_INTERL2-NEXT: [[TMP7:%.*]] = fmul fast float [[TMP0]], [[TMP6]]
; VEC1_INTERL2-NEXT: [[OFFSET_IDX:%.*]] = fadd fast float [[TMP7]], [[INIT]]
; VEC1_INTERL2-NEXT: [[TMP8:%.*]] = fadd fast float [[OFFSET_IDX]], [[TMP0]]
; VEC1_INTERL2-NEXT: [[TMP9:%.*]] = sitofp i64 [[INDEX]] to float
; VEC1_INTERL2-NEXT: [[TMP10:%.*]] = fmul fast float [[TMP9]], -5.000000e-01
; VEC1_INTERL2-NEXT: [[INDUCTION6:%.*]] = or i64 [[INDEX]], 1
; VEC1_INTERL2-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 [[INDEX]]
; VEC1_INTERL2-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[INDUCTION5]]
; VEC1_INTERL2-NEXT: store float [[OFFSET_IDX8]], float* [[TMP11]], align 4
; VEC1_INTERL2-NEXT: store float [[TMP10]], float* [[TMP12]], align 4
; VEC1_INTERL2-NEXT: [[TMP13:%.*]] = fadd fast float [[OFFSET_IDX8]], [[TMP0]]
; VEC1_INTERL2-NEXT: [[TMP14:%.*]] = fadd fast float [[TMP10]], [[TMP0]]
; VEC1_INTERL2-NEXT: [[TMP15:%.*]] = fadd fast float [[TMP7]], 0xBFD99999A0000000
; VEC1_INTERL2-NEXT: [[TMP16:%.*]] = fadd fast float [[TMP7]], 0xBFECCCCCC0000000
; VEC1_INTERL2-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[INDUCTION6]]
; VEC1_INTERL2-NEXT: store float [[OFFSET_IDX]], float* [[TMP11]], align 4
; VEC1_INTERL2-NEXT: store float [[TMP8]], float* [[TMP12]], align 4
; VEC1_INTERL2-NEXT: [[TMP13:%.*]] = fadd fast float [[OFFSET_IDX]], [[TMP0]]
; VEC1_INTERL2-NEXT: [[TMP14:%.*]] = fadd fast float [[TMP8]], [[TMP0]]
; VEC1_INTERL2-NEXT: [[TMP15:%.*]] = fadd fast float [[TMP10]], 0xBFD99999A0000000
; VEC1_INTERL2-NEXT: [[TMP16:%.*]] = fadd fast float [[TMP10]], 0xBFECCCCCC0000000
; VEC1_INTERL2-NEXT: [[TMP17:%.*]] = fadd fast float [[TMP15]], [[TMP13]]
; VEC1_INTERL2-NEXT: [[TMP18:%.*]] = fadd fast float [[TMP16]], [[TMP14]]
; VEC1_INTERL2-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, float* [[B:%.*]], i64 [[INDEX]]
; VEC1_INTERL2-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, float* [[B]], i64 [[INDUCTION5]]
; VEC1_INTERL2-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, float* [[B]], i64 [[INDUCTION6]]
; VEC1_INTERL2-NEXT: store float [[TMP17]], float* [[TMP19]], align 4
; VEC1_INTERL2-NEXT: store float [[TMP18]], float* [[TMP20]], align 4
; VEC1_INTERL2-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, float* [[C:%.*]], i64 [[INDEX]]
; VEC1_INTERL2-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, float* [[C]], i64 [[INDUCTION5]]
; VEC1_INTERL2-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, float* [[C]], i64 [[INDUCTION6]]
; VEC1_INTERL2-NEXT: store float [[TMP15]], float* [[TMP21]], align 4
; VEC1_INTERL2-NEXT: store float [[TMP16]], float* [[TMP22]], align 4
; VEC1_INTERL2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
@ -1279,11 +1279,11 @@ define void @fp_iv_loop4(float* noalias nocapture %A, i32 %N) {
; VEC1_INTERL2-NEXT: br label [[VECTOR_BODY:%.*]]
; VEC1_INTERL2: vector.body:
; VEC1_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC1_INTERL2-NEXT: [[INDUCTION2:%.*]] = or i64 [[INDEX]], 1
; VEC1_INTERL2-NEXT: [[TMP4:%.*]] = sitofp i64 [[INDEX]] to float
; VEC1_INTERL2-NEXT: [[TMP5:%.*]] = fmul fast float [[TMP4]], 5.000000e-01
; VEC1_INTERL2-NEXT: [[OFFSET_IDX:%.*]] = fadd fast float [[TMP5]], 1.000000e+00
; VEC1_INTERL2-NEXT: [[TMP6:%.*]] = fadd fast float [[TMP5]], 1.500000e+00
; VEC1_INTERL2-NEXT: [[INDUCTION2:%.*]] = or i64 [[INDEX]], 1
; VEC1_INTERL2-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 [[INDEX]]
; VEC1_INTERL2-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[INDUCTION2]]
; VEC1_INTERL2-NEXT: store float [[OFFSET_IDX]], float* [[TMP7]], align 4
@ -1471,8 +1471,8 @@ define void @non_primary_iv_float_scalar(float* %A, i64 %N) {
; VEC4_INTERL2-NEXT: br label [[VECTOR_BODY:%.*]]
; VEC4_INTERL2: vector.body:
; VEC4_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE17:%.*]] ]
; VEC4_INTERL2-NEXT: [[TMP0:%.*]] = or i64 [[INDEX]], 4
; VEC4_INTERL2-NEXT: [[TMP1:%.*]] = sitofp i64 [[INDEX]] to float
; VEC4_INTERL2-NEXT: [[TMP0:%.*]] = sitofp i64 [[INDEX]] to float
; VEC4_INTERL2-NEXT: [[TMP1:%.*]] = or i64 [[INDEX]], 4
; VEC4_INTERL2-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 [[INDEX]]
; VEC4_INTERL2-NEXT: [[TMP3:%.*]] = bitcast float* [[TMP2]] to <4 x float>*
; VEC4_INTERL2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, <4 x float>* [[TMP3]], align 4
@ -1485,13 +1485,13 @@ define void @non_primary_iv_float_scalar(float* %A, i64 %N) {
; VEC4_INTERL2-NEXT: br i1 [[TMP8]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; VEC4_INTERL2: pred.store.if:
; VEC4_INTERL2-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[INDEX]]
; VEC4_INTERL2-NEXT: store float [[TMP1]], float* [[TMP9]], align 4
; VEC4_INTERL2-NEXT: store float [[TMP0]], float* [[TMP9]], align 4
; VEC4_INTERL2-NEXT: br label [[PRED_STORE_CONTINUE]]
; VEC4_INTERL2: pred.store.continue:
; VEC4_INTERL2-NEXT: [[TMP10:%.*]] = extractelement <4 x i1> [[TMP6]], i64 1
; VEC4_INTERL2-NEXT: br i1 [[TMP10]], label [[PRED_STORE_IF4:%.*]], label [[PRED_STORE_CONTINUE5:%.*]]
; VEC4_INTERL2: pred.store.if4:
; VEC4_INTERL2-NEXT: [[TMP11:%.*]] = fadd fast float [[TMP1]], 1.000000e+00
; VEC4_INTERL2-NEXT: [[TMP11:%.*]] = fadd fast float [[TMP0]], 1.000000e+00
; VEC4_INTERL2-NEXT: [[TMP12:%.*]] = or i64 [[INDEX]], 1
; VEC4_INTERL2-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[TMP12]]
; VEC4_INTERL2-NEXT: store float [[TMP11]], float* [[TMP13]], align 4
@ -1500,7 +1500,7 @@ define void @non_primary_iv_float_scalar(float* %A, i64 %N) {
; VEC4_INTERL2-NEXT: [[TMP14:%.*]] = extractelement <4 x i1> [[TMP6]], i64 2
; VEC4_INTERL2-NEXT: br i1 [[TMP14]], label [[PRED_STORE_IF6:%.*]], label [[PRED_STORE_CONTINUE7:%.*]]
; VEC4_INTERL2: pred.store.if6:
; VEC4_INTERL2-NEXT: [[TMP15:%.*]] = fadd fast float [[TMP1]], 2.000000e+00
; VEC4_INTERL2-NEXT: [[TMP15:%.*]] = fadd fast float [[TMP0]], 2.000000e+00
; VEC4_INTERL2-NEXT: [[TMP16:%.*]] = or i64 [[INDEX]], 2
; VEC4_INTERL2-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[TMP16]]
; VEC4_INTERL2-NEXT: store float [[TMP15]], float* [[TMP17]], align 4
@ -1509,7 +1509,7 @@ define void @non_primary_iv_float_scalar(float* %A, i64 %N) {
; VEC4_INTERL2-NEXT: [[TMP18:%.*]] = extractelement <4 x i1> [[TMP6]], i64 3
; VEC4_INTERL2-NEXT: br i1 [[TMP18]], label [[PRED_STORE_IF8:%.*]], label [[PRED_STORE_CONTINUE9:%.*]]
; VEC4_INTERL2: pred.store.if8:
; VEC4_INTERL2-NEXT: [[TMP19:%.*]] = fadd fast float [[TMP1]], 3.000000e+00
; VEC4_INTERL2-NEXT: [[TMP19:%.*]] = fadd fast float [[TMP0]], 3.000000e+00
; VEC4_INTERL2-NEXT: [[TMP20:%.*]] = or i64 [[INDEX]], 3
; VEC4_INTERL2-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[TMP20]]
; VEC4_INTERL2-NEXT: store float [[TMP19]], float* [[TMP21]], align 4
@ -1518,15 +1518,15 @@ define void @non_primary_iv_float_scalar(float* %A, i64 %N) {
; VEC4_INTERL2-NEXT: [[TMP22:%.*]] = extractelement <4 x i1> [[TMP7]], i64 0
; VEC4_INTERL2-NEXT: br i1 [[TMP22]], label [[PRED_STORE_IF10:%.*]], label [[PRED_STORE_CONTINUE11:%.*]]
; VEC4_INTERL2: pred.store.if10:
; VEC4_INTERL2-NEXT: [[TMP23:%.*]] = fadd fast float [[TMP1]], 4.000000e+00
; VEC4_INTERL2-NEXT: [[TMP24:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[TMP0]]
; VEC4_INTERL2-NEXT: [[TMP23:%.*]] = fadd fast float [[TMP0]], 4.000000e+00
; VEC4_INTERL2-NEXT: [[TMP24:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[TMP1]]
; VEC4_INTERL2-NEXT: store float [[TMP23]], float* [[TMP24]], align 4
; VEC4_INTERL2-NEXT: br label [[PRED_STORE_CONTINUE11]]
; VEC4_INTERL2: pred.store.continue11:
; VEC4_INTERL2-NEXT: [[TMP25:%.*]] = extractelement <4 x i1> [[TMP7]], i64 1
; VEC4_INTERL2-NEXT: br i1 [[TMP25]], label [[PRED_STORE_IF12:%.*]], label [[PRED_STORE_CONTINUE13:%.*]]
; VEC4_INTERL2: pred.store.if12:
; VEC4_INTERL2-NEXT: [[TMP26:%.*]] = fadd fast float [[TMP1]], 5.000000e+00
; VEC4_INTERL2-NEXT: [[TMP26:%.*]] = fadd fast float [[TMP0]], 5.000000e+00
; VEC4_INTERL2-NEXT: [[TMP27:%.*]] = or i64 [[INDEX]], 5
; VEC4_INTERL2-NEXT: [[TMP28:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[TMP27]]
; VEC4_INTERL2-NEXT: store float [[TMP26]], float* [[TMP28]], align 4
@ -1535,7 +1535,7 @@ define void @non_primary_iv_float_scalar(float* %A, i64 %N) {
; VEC4_INTERL2-NEXT: [[TMP29:%.*]] = extractelement <4 x i1> [[TMP7]], i64 2
; VEC4_INTERL2-NEXT: br i1 [[TMP29]], label [[PRED_STORE_IF14:%.*]], label [[PRED_STORE_CONTINUE15:%.*]]
; VEC4_INTERL2: pred.store.if14:
; VEC4_INTERL2-NEXT: [[TMP30:%.*]] = fadd fast float [[TMP1]], 6.000000e+00
; VEC4_INTERL2-NEXT: [[TMP30:%.*]] = fadd fast float [[TMP0]], 6.000000e+00
; VEC4_INTERL2-NEXT: [[TMP31:%.*]] = or i64 [[INDEX]], 6
; VEC4_INTERL2-NEXT: [[TMP32:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[TMP31]]
; VEC4_INTERL2-NEXT: store float [[TMP30]], float* [[TMP32]], align 4
@ -1544,7 +1544,7 @@ define void @non_primary_iv_float_scalar(float* %A, i64 %N) {
; VEC4_INTERL2-NEXT: [[TMP33:%.*]] = extractelement <4 x i1> [[TMP7]], i64 3
; VEC4_INTERL2-NEXT: br i1 [[TMP33]], label [[PRED_STORE_IF16:%.*]], label [[PRED_STORE_CONTINUE17]]
; VEC4_INTERL2: pred.store.if16:
; VEC4_INTERL2-NEXT: [[TMP34:%.*]] = fadd fast float [[TMP1]], 7.000000e+00
; VEC4_INTERL2-NEXT: [[TMP34:%.*]] = fadd fast float [[TMP0]], 7.000000e+00
; VEC4_INTERL2-NEXT: [[TMP35:%.*]] = or i64 [[INDEX]], 7
; VEC4_INTERL2-NEXT: [[TMP36:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[TMP35]]
; VEC4_INTERL2-NEXT: store float [[TMP34]], float* [[TMP36]], align 4
@ -1588,9 +1588,9 @@ define void @non_primary_iv_float_scalar(float* %A, i64 %N) {
; VEC1_INTERL2-NEXT: [[CAST_CRD:%.*]] = sitofp i64 [[N_VEC]] to float
; VEC1_INTERL2-NEXT: br label [[VECTOR_BODY:%.*]]
; VEC1_INTERL2: vector.body:
; VEC1_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE5:%.*]] ]
; VEC1_INTERL2-NEXT: [[INDUCTION2:%.*]] = or i64 [[INDEX]], 1
; VEC1_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE4:%.*]] ]
; VEC1_INTERL2-NEXT: [[TMP0:%.*]] = sitofp i64 [[INDEX]] to float
; VEC1_INTERL2-NEXT: [[INDUCTION2:%.*]] = or i64 [[INDEX]], 1
; VEC1_INTERL2-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 [[INDEX]]
; VEC1_INTERL2-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[INDUCTION2]]
; VEC1_INTERL2-NEXT: [[TMP3:%.*]] = load float, float* [[TMP1]], align 4
@ -1602,12 +1602,12 @@ define void @non_primary_iv_float_scalar(float* %A, i64 %N) {
; VEC1_INTERL2-NEXT: store float [[TMP0]], float* [[TMP1]], align 4
; VEC1_INTERL2-NEXT: br label [[PRED_STORE_CONTINUE]]
; VEC1_INTERL2: pred.store.continue:
; VEC1_INTERL2-NEXT: br i1 [[TMP6]], label [[PRED_STORE_IF4:%.*]], label [[PRED_STORE_CONTINUE5]]
; VEC1_INTERL2: pred.store.if6:
; VEC1_INTERL2-NEXT: br i1 [[TMP6]], label [[PRED_STORE_IF3:%.*]], label [[PRED_STORE_CONTINUE4]]
; VEC1_INTERL2: pred.store.if3:
; VEC1_INTERL2-NEXT: [[TMP7:%.*]] = fadd fast float [[TMP0]], 1.000000e+00
; VEC1_INTERL2-NEXT: store float [[TMP7]], float* [[TMP2]], align 4
; VEC1_INTERL2-NEXT: br label [[PRED_STORE_CONTINUE5]]
; VEC1_INTERL2: pred.store.continue7:
; VEC1_INTERL2-NEXT: br label [[PRED_STORE_CONTINUE4]]
; VEC1_INTERL2: pred.store.continue4:
; VEC1_INTERL2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; VEC1_INTERL2-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; VEC1_INTERL2-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]

View File

@ -463,14 +463,14 @@ define void @minimal_bit_widths(i1 %c) {
; UNROLL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE6:%.*]] ]
; UNROLL-NEXT: br i1 [[C:%.*]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE6]]
; UNROLL: pred.store.if:
; UNROLL-NEXT: [[INDUCTION:%.*]] = add i64 [[INDEX]], 0
; UNROLL-NEXT: [[TMP0:%.*]] = getelementptr i8, i8* undef, i64 [[INDUCTION]]
; UNROLL-NEXT: [[INDUCTION3:%.*]] = add i64 [[INDEX]], 0
; UNROLL-NEXT: [[TMP0:%.*]] = getelementptr i8, i8* undef, i64 [[INDUCTION3]]
; UNROLL-NEXT: [[TMP1:%.*]] = load i8, i8* [[TMP0]], align 1
; UNROLL-NEXT: [[TMP2:%.*]] = zext i8 [[TMP1]] to i32
; UNROLL-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
; UNROLL-NEXT: store i8 [[TMP3]], i8* [[TMP0]], align 1
; UNROLL-NEXT: [[INDUCTION2:%.*]] = add i64 [[INDEX]], 1
; UNROLL-NEXT: [[TMP4:%.*]] = getelementptr i8, i8* undef, i64 [[INDUCTION2]]
; UNROLL-NEXT: [[INDUCTION4:%.*]] = add i64 [[INDEX]], 1
; UNROLL-NEXT: [[TMP4:%.*]] = getelementptr i8, i8* undef, i64 [[INDUCTION4]]
; UNROLL-NEXT: [[TMP5:%.*]] = load i8, i8* [[TMP4]], align 1
; UNROLL-NEXT: [[TMP6:%.*]] = zext i8 [[TMP5]] to i32
; UNROLL-NEXT: [[TMP7:%.*]] = trunc i32 [[TMP6]] to i8
@ -511,8 +511,8 @@ define void @minimal_bit_widths(i1 %c) {
; UNROLL-NOSIMPLIFY-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE6:%.*]] ]
; UNROLL-NOSIMPLIFY-NEXT: br i1 [[C:%.*]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; UNROLL-NOSIMPLIFY: pred.store.if:
; UNROLL-NOSIMPLIFY-NEXT: [[INDUCTION:%.*]] = add i64 [[INDEX]], 0
; UNROLL-NOSIMPLIFY-NEXT: [[TMP0:%.*]] = getelementptr i8, i8* undef, i64 [[INDUCTION]]
; UNROLL-NOSIMPLIFY-NEXT: [[INDUCTION3:%.*]] = add i64 [[INDEX]], 0
; UNROLL-NOSIMPLIFY-NEXT: [[TMP0:%.*]] = getelementptr i8, i8* undef, i64 [[INDUCTION3]]
; UNROLL-NOSIMPLIFY-NEXT: [[TMP1:%.*]] = load i8, i8* [[TMP0]], align 1
; UNROLL-NOSIMPLIFY-NEXT: [[TMP2:%.*]] = zext i8 [[TMP1]] to i32
; UNROLL-NOSIMPLIFY-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8

View File

@ -159,9 +159,9 @@ define void @multi_int_induction(i32* %A, i32 %N) {
; UNROLL-NO-IC: vector.body:
; UNROLL-NO-IC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NO-IC-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ <i32 190, i32 191>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NO-IC-NEXT: [[STEP_ADD:%.*]] = add <2 x i32> [[VEC_IND]], <i32 2, i32 2>
; UNROLL-NO-IC-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0
; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 2
; UNROLL-NO-IC-NEXT: [[STEP_ADD:%.*]] = add <2 x i32> [[VEC_IND]], <i32 2, i32 2>
; UNROLL-NO-IC-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[TMP3]]
; UNROLL-NO-IC-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP4]]
; UNROLL-NO-IC-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[TMP5]], i32 0
@ -5202,9 +5202,9 @@ define void @non_primary_iv_trunc(i32* %a, i64 %n) {
; UNROLL-NO-IC: vector.body:
; UNROLL-NO-IC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NO-IC-NEXT: [[VEC_IND3:%.*]] = phi <2 x i32> [ <i32 0, i32 2>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT6:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NO-IC-NEXT: [[STEP_ADD4:%.*]] = add <2 x i32> [[VEC_IND3]], <i32 4, i32 4>
; UNROLL-NO-IC-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
; UNROLL-NO-IC-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 2
; UNROLL-NO-IC-NEXT: [[STEP_ADD4:%.*]] = add <2 x i32> [[VEC_IND3]], <i32 4, i32 4>
; UNROLL-NO-IC-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[TMP0]]
; UNROLL-NO-IC-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP1]]
; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i32 0
@ -6716,9 +6716,9 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n
; UNROLL-NO-IC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i32> [ <i32 poison, i32 0>, [[VECTOR_PH]] ], [ [[STEP_ADD:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NO-IC-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NO-IC-NEXT: [[STEP_ADD]] = add <2 x i32> [[VEC_IND]], [[DOTSPLAT3]]
; UNROLL-NO-IC-NEXT: [[TMP21:%.*]] = add i64 [[INDEX]], 0
; UNROLL-NO-IC-NEXT: [[TMP22:%.*]] = add i64 [[INDEX]], 2
; UNROLL-NO-IC-NEXT: [[STEP_ADD]] = add <2 x i32> [[VEC_IND]], [[DOTSPLAT3]]
; UNROLL-NO-IC-NEXT: [[TMP23:%.*]] = shufflevector <2 x i32> [[VECTOR_RECUR]], <2 x i32> [[VEC_IND]], <2 x i32> <i32 1, i32 2>
; UNROLL-NO-IC-NEXT: [[TMP24:%.*]] = shufflevector <2 x i32> [[VEC_IND]], <2 x i32> [[STEP_ADD]], <2 x i32> <i32 1, i32 2>
; UNROLL-NO-IC-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, i32* [[PTR:%.*]], i64 [[TMP21]]

View File

@ -7,15 +7,13 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
define void @array_at_plus_one(i32 %n) {
; CHECK-LABEL: @array_at_plus_one(
; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
; CHECK-NEXT: [[VEC_IV_1:%.+]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %vector.ph ], [ [[VEC_IV_1_NEXT:%.+]], %vector.body ]
; CHECK-NEXT: [[VEC_IV_TRUNC:%.+]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %vector.ph ], [ [[VEC_IV_TRUNC_NEXT:%.+]], %vector.body ]
; CHECK: [[VEC_IV_TRUNC:%.+]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %vector.ph ], [ [[VEC_IV_TRUNC_NEXT:%.+]], %vector.body ]
; CHECK: [[T1:%.+]] = add i64 %index, 0
; CHECK: [[T2:%.+]] = add nsw i64 [[T1]], 12
; CHECK-NEXT: [[GEP:%.+]] = getelementptr inbounds [1024 x i32], [1024 x i32]* @array, i64 0, i64 [[T2]]
; CHECK-NEXT: [[GEP0:%.+]] = getelementptr inbounds i32, i32* [[GEP]], i32 0
; CHECK-NEXT: [[BC:%.+]] = bitcast i32* [[GEP0]] to <4 x i32>*
; CHECK-NEXT: store <4 x i32> [[VEC_IV_TRUNC]], <4 x i32>* [[BC]]
; CHECK: [[VEC_IV_1_NEXT]] = add <4 x i64> [[VEC_IV_1]], <i64 4, i64 4, i64 4, i64 4>
; CHECK: [[VEC_IV_TRUNC_NEXT]] = add <4 x i32> [[VEC_IV_TRUNC]], <i32 4, i32 4, i32 4, i32 4>
; CHECK: ret void
;

View File

@ -186,10 +186,10 @@ define dso_local signext i32 @f2(float* noalias %A, float* noalias %B, i32 signe
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 0
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = trunc i64 [[INDEX]] to i32
; CHECK-NEXT: [[TMP13:%.*]] = add i32 [[OFFSET_IDX]], 0
; CHECK-NEXT: [[TMP14:%.*]] = xor i32 [[TMP13]], -1
; CHECK-NEXT: [[TMP12:%.*]] = add i32 [[OFFSET_IDX]], 0
; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 0
; CHECK-NEXT: [[TMP14:%.*]] = xor i32 [[TMP12]], -1
; CHECK-NEXT: [[TMP15:%.*]] = add i32 [[TMP14]], [[N]]
; CHECK-NEXT: [[TMP16:%.*]] = sext i32 [[TMP15]] to i64
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, float* [[B:%.*]], i64 [[TMP16]]
@ -199,7 +199,7 @@ define dso_local signext i32 @f2(float* noalias %A, float* noalias %B, i32 signe
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, <4 x float>* [[TMP20]], align 4
; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x float> [[WIDE_LOAD]], <4 x float> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: [[TMP21:%.*]] = fadd fast <4 x float> [[REVERSE]], <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 [[TMP12]]
; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 [[TMP13]]
; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, float* [[TMP22]], i32 0
; CHECK-NEXT: [[TMP24:%.*]] = bitcast float* [[TMP23]] to <4 x float>*
; CHECK-NEXT: store <4 x float> [[TMP21]], <4 x float>* [[TMP24]], align 4
@ -222,10 +222,10 @@ define dso_local signext i32 @f2(float* noalias %A, float* noalias %B, i32 signe
; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; CHECK: vec.epilog.vector.body:
; CHECK-NEXT: [[INDEX4:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT5:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP26:%.*]] = add i64 [[INDEX4]], 0
; CHECK-NEXT: [[OFFSET_IDX9:%.*]] = trunc i64 [[INDEX4]] to i32
; CHECK-NEXT: [[TMP27:%.*]] = add i32 [[OFFSET_IDX9]], 0
; CHECK-NEXT: [[TMP28:%.*]] = xor i32 [[TMP27]], -1
; CHECK-NEXT: [[TMP26:%.*]] = add i32 [[OFFSET_IDX9]], 0
; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[INDEX4]], 0
; CHECK-NEXT: [[TMP28:%.*]] = xor i32 [[TMP26]], -1
; CHECK-NEXT: [[TMP29:%.*]] = add i32 [[TMP28]], [[N]]
; CHECK-NEXT: [[TMP30:%.*]] = sext i32 [[TMP29]] to i64
; CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds float, float* [[B]], i64 [[TMP30]]
@ -235,7 +235,7 @@ define dso_local signext i32 @f2(float* noalias %A, float* noalias %B, i32 signe
; CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x float>, <4 x float>* [[TMP34]], align 4
; CHECK-NEXT: [[REVERSE11:%.*]] = shufflevector <4 x float> [[WIDE_LOAD10]], <4 x float> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: [[TMP35:%.*]] = fadd fast <4 x float> [[REVERSE11]], <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
; CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[TMP26]]
; CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[TMP27]]
; CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds float, float* [[TMP36]], i32 0
; CHECK-NEXT: [[TMP38:%.*]] = bitcast float* [[TMP37]] to <4 x float>*
; CHECK-NEXT: store <4 x float> [[TMP35]], <4 x float>* [[TMP38]], align 4

View File

@ -151,7 +151,7 @@ define void @pointer_induction_used_as_vector(i8** noalias %start.1, i8* noalias
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8*, i8** [[NEXT_GEP]], i32 0
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8** [[TMP4]] to <4 x i8*>*
; CHECK-NEXT: store <4 x i8*> [[TMP3]], <4 x i8*>* [[TMP5]], align 8
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i8*> [[TMP2]], i32 0
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i8*> [[TMP1]], i32 0
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[TMP6]], i32 0
; CHECK-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to <4 x i8>*
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, <4 x i8>* [[TMP8]], align 1

View File

@ -32,9 +32,9 @@ loopend:
; CHECK-LABEL: @reverse_induction_i128(
; CHECK: %index = phi i128 [ 0, %vector.ph ], [ %index.next, %vector.body ]
; CHECK: %offset.idx = sub i128 %startval, %index
; CHECK: %[[a0:.+]] = add i128 %offset.idx, 0
; CHECK: %[[a4:.+]] = add i128 %offset.idx, -4
; CHECK: [[OFFSET_IDX:%.+]] = sub i128 %startval, %index
; CHECK: %[[a0:.+]] = add i128 [[OFFSET_IDX]], 0
; CHECK: %[[a4:.+]] = add i128 [[OFFSET_IDX]], -4
define i32 @reverse_induction_i128(i128 %startval, i32 * %ptr) {
entry:

View File

@ -12,18 +12,18 @@ define void @test(float* %A, i32 %x) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 0
; CHECK-NEXT: [[TMP9:%.*]] = trunc i64 [[INDEX]] to i32
; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TMP9]], 0
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
; CHECK-NEXT: [[TMP3:%.*]] = trunc i64 [[TMP2]] to i32
; CHECK-NEXT: [[TMP4:%.*]] = mul i32 [[TMP3]], [[X]]
; CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, float* [[TMP6]], i32 0
; CHECK-NEXT: [[TMP8:%.*]] = bitcast float* [[TMP7]] to <4 x float>*
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, <4 x float>* [[TMP8]], align 4
; CHECK-NEXT: [[TMP11:%.*]] = mul i32 [[TMP10]], [[X]]
; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[INDEX]] to i32
; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], 0
; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0
; CHECK-NEXT: [[TMP4:%.*]] = add nuw nsw i64 [[TMP3]], 1
; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32
; CHECK-NEXT: [[TMP6:%.*]] = mul i32 [[TMP5]], [[X]]
; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 [[TMP7]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, float* [[TMP8]], i32 0
; CHECK-NEXT: [[TMP10:%.*]] = bitcast float* [[TMP9]] to <4 x float>*
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, <4 x float>* [[TMP10]], align 4
; CHECK-NEXT: [[TMP11:%.*]] = mul i32 [[TMP2]], [[X]]
; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[TMP12]]
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, float* [[TMP13]], i32 0

View File

@ -37,7 +37,7 @@ define void @basic_loop(i8* nocapture readonly %ptr, i32 %size, i8** %pos) {
; CHECK-NEXT: store <4 x i8> [[WIDE_LOAD]], <4 x i8>* [[TMP6]], align 1
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !0
; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[SIZE]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[END:%.*]], label [[SCALAR_PH]]
@ -53,7 +53,7 @@ define void @basic_loop(i8* nocapture readonly %ptr, i32 %size, i8** %pos) {
; CHECK-NEXT: [[TMP8:%.*]] = load i8, i8* [[INCDEC_PTR]], align 1
; CHECK-NEXT: store i8 [[TMP8]], i8* [[BUFF]], align 1
; CHECK-NEXT: [[TOBOOL11:%.*]] = icmp eq i32 [[DEC]], 0
; CHECK-NEXT: br i1 [[TOBOOL11]], label [[END]], label [[BODY]], !llvm.loop !2
; CHECK-NEXT: br i1 [[TOBOOL11]], label [[END]], label [[BODY]], !llvm.loop [[LOOP2:![0-9]+]]
; CHECK: end:
; CHECK-NEXT: [[INCDEC_PTR_LCSSA:%.*]] = phi i8* [ [[INCDEC_PTR]], [[BODY]] ], [ [[IND_END2]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: store i8* [[INCDEC_PTR_LCSSA]], i8** [[POS]], align 4
@ -103,7 +103,7 @@ define void @metadata(i8* nocapture readonly %ptr, i32 %size, i8** %pos) {
; CHECK-NEXT: store <4 x i8> [[WIDE_LOAD]], <4 x i8>* [[TMP6]], align 1
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !4
; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[SIZE]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[END:%.*]], label [[SCALAR_PH]]
@ -119,7 +119,7 @@ define void @metadata(i8* nocapture readonly %ptr, i32 %size, i8** %pos) {
; CHECK-NEXT: [[TMP8:%.*]] = load i8, i8* [[INCDEC_PTR]], align 1
; CHECK-NEXT: store i8 [[TMP8]], i8* [[BUFF]], align 1
; CHECK-NEXT: [[TOBOOL11:%.*]] = icmp eq i32 [[DEC]], 0
; CHECK-NEXT: br i1 [[TOBOOL11]], label [[END]], label [[BODY]], !llvm.loop !5
; CHECK-NEXT: br i1 [[TOBOOL11]], label [[END]], label [[BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: end:
; CHECK-NEXT: [[INCDEC_PTR_LCSSA:%.*]] = phi i8* [ [[INCDEC_PTR]], [[BODY]] ], [ [[IND_END2]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: store i8* [[INCDEC_PTR_LCSSA]], i8** [[POS]], align 4

View File

@ -13,7 +13,7 @@ define void @print_call_and_memory(i64 %n, float* noalias %y, float* noalias %x)
; CHECK-NEXT: N1 [label =
; CHECK-NEXT: "for.body:\l" +
; CHECK-NEXT: " EMIT vp\<[[CAN_IV:%.+]]\> = CANONICAL-INDUCTION\l" +
; CHECK-NEXT: " WIDEN-INDUCTION %iv = phi %iv.next, 0\l" +
; CHECK-NEXT: " ir\<%iv\> = SCALAR-STEPS vp\<[[CAN_IV]]\>, ir\<0\>, ir\<1\>\l" +
; CHECK-NEXT: " CLONE ir\<%arrayidx\> = getelementptr ir\<%y\>, ir\<%iv\>\l" +
; CHECK-NEXT: " WIDEN ir\<%lv\> = load ir\<%arrayidx\>\l" +
; CHECK-NEXT: " WIDEN-CALL ir\<%call\> = call @llvm.sqrt.f32(ir\<%lv\>)\l" +

View File

@ -14,7 +14,7 @@ define void @print_call_and_memory(i64 %n, float* noalias %y, float* noalias %x)
; CHECK-NEXT: <x1> vector loop: {
; CHECK-NEXT: for.body:
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
; CHECK-NEXT: WIDEN-INDUCTION %iv = phi %iv.next, 0
; CHECK-NEXT: SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%y>, ir<%iv>
; CHECK-NEXT: WIDEN ir<%lv> = load ir<%arrayidx>
; CHECK-NEXT: WIDEN-CALL ir<%call> = call @llvm.sqrt.f32(ir<%lv>)
@ -98,8 +98,8 @@ define float @print_reduction(i64 %n, float* noalias %y) {
; CHECK-NEXT: <x1> vector loop: {
; CHECK-NEXT: for.body:
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
; CHECK-NEXT: WIDEN-INDUCTION %iv = phi %iv.next, 0
; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi ir<0.000000e+00>, ir<%red.next>
; CHECK-NEXT: SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%y>, ir<%iv>
; CHECK-NEXT: WIDEN ir<%lv> = load ir<%arrayidx>
; CHECK-NEXT: REDUCE ir<%red.next> = ir<%red> + fast reduce.fadd (ir<%lv>)
@ -208,7 +208,7 @@ define void @print_interleave_groups(i32 %C, i32 %D) {
; CHECK-NEXT: <x1> vector loop: {
; CHECK-NEXT: for.body:
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 0, %iv.next
; CHECK-NEXT: SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<4>
; CHECK-NEXT: CLONE ir<%gep.AB.0> = getelementptr ir<@AB>, ir<0>, ir<%iv>
; CHECK-NEXT: INTERLEAVE-GROUP with factor 4 at %AB.0, ir<%gep.AB.0>
; CHECK-NEXT: ir<%AB.0> = load from index 0
@ -268,8 +268,8 @@ define float @print_fmuladd_strict(float* %a, float* %b, i64 %n) {
; CHECK-NEXT: <x1> vector loop: {
; CHECK-NEXT: for.body:
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 0, %iv.next
; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%sum.07> = phi ir<0.000000e+00>, ir<%muladd>
; CHECK-NEXT: SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%a>, ir<%iv>
; CHECK-NEXT: WIDEN ir<%l.a> = load ir<%arrayidx>
; CHECK-NEXT: CLONE ir<%arrayidx2> = getelementptr ir<%b>, ir<%iv>
@ -308,7 +308,7 @@ define void @debug_loc_vpinstruction(i32* nocapture %asd, i32* nocapture %bsd) !
; CHECK-NEXT: <x1> vector loop: {
; CHECK-NEXT: loop:
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 0, %iv.next
; CHECK-NEXT: SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
; CHECK-NEXT: CLONE ir<%isd> = getelementptr ir<%asd>, ir<%iv>
; CHECK-NEXT: WIDEN ir<%lsd> = load ir<%isd>
; CHECK-NEXT: WIDEN ir<%psd> = add ir<%lsd>, ir<23>

View File

@ -13,7 +13,7 @@ define void @sink_with_sideeffects(i1 %c, i8* %ptr) {
; CHECK-NEXT: <x1> vector loop: {
; CHECK-NEXT: for.body:
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
; CHECK-NEXT: WIDEN-INDUCTION %tmp0 = phi %tmp6, 0
; CHECK-NEXT: ir<%tmp0> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
; CHECK-NEXT: CLONE ir<%tmp2> = getelementptr ir<%ptr>, ir<%tmp0>
; CHECK-NEXT: CLONE ir<%tmp3> = load ir<%tmp2>
; CHECK-NEXT: CLONE store ir<0>, ir<%tmp2>