forked from OSchip/llvm-project
Revert "[VPlan] Add VPDerivedIVRecipe, use for VPScalarIVStepsRecipe."
This reverts commit 0fa666eced
.
This triggers an assertion during AArch64 stage2 builds. Revert while I
investigate.
See https://lab.llvm.org/buildbot/#/builders/179/builds/4967/steps/11/logs/stdio
This commit is contained in:
parent
07008a8df5
commit
bf15f1e489
|
@ -2333,19 +2333,22 @@ static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step,
|
|||
/// variable on which to base the steps, \p Step is the size of the step.
|
||||
static void buildScalarSteps(Value *ScalarIV, Value *Step,
|
||||
const InductionDescriptor &ID, VPValue *Def,
|
||||
VPTransformState &State) {
|
||||
Type *TruncToTy, VPTransformState &State) {
|
||||
IRBuilderBase &Builder = State.Builder;
|
||||
|
||||
// Ensure step has the same type as that of scalar IV.
|
||||
Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
|
||||
if (ScalarIVTy != Step->getType()) {
|
||||
// TODO: Also use VPDerivedIVRecipe when only the step needs truncating, to
|
||||
// avoid separate truncate here.
|
||||
if (TruncToTy) {
|
||||
assert(Step->getType()->isIntegerTy() &&
|
||||
"Truncation requires an integer step");
|
||||
Step = State.Builder.CreateTrunc(Step, ScalarIVTy);
|
||||
ScalarIV = State.Builder.CreateTrunc(ScalarIV, TruncToTy);
|
||||
Step = State.Builder.CreateTrunc(Step, TruncToTy);
|
||||
ScalarIVTy = ScalarIV->getType()->getScalarType();
|
||||
}
|
||||
|
||||
// We shouldn't have to build scalar steps if we aren't vectorizing.
|
||||
// Get the value type and ensure it and the step have the same integer type.
|
||||
assert(ScalarIVTy == Step->getType() &&
|
||||
"Val and Step should have the same type");
|
||||
|
||||
// We build scalar steps for both integer and floating-point induction
|
||||
// variables. Here, we determine the kind of arithmetic we will perform.
|
||||
Instruction::BinaryOps AddOp;
|
||||
|
@ -9530,32 +9533,6 @@ void VPWidenPointerInductionRecipe::execute(VPTransformState &State) {
|
|||
}
|
||||
}
|
||||
|
||||
void VPDerivedIVRecipe::execute(VPTransformState &State) {
|
||||
assert(!State.Instance && "VPDerivedIVRecipe being replicated.");
|
||||
|
||||
// Fast-math-flags propagate from the original induction instruction.
|
||||
IRBuilder<>::FastMathFlagGuard FMFG(State.Builder);
|
||||
if (IndDesc.getInductionBinOp() &&
|
||||
isa<FPMathOperator>(IndDesc.getInductionBinOp()))
|
||||
State.Builder.setFastMathFlags(
|
||||
IndDesc.getInductionBinOp()->getFastMathFlags());
|
||||
|
||||
Value *Step = State.get(getStepValue(), VPIteration(0, 0));
|
||||
Value *CanonicalIV = State.get(getCanonicalIV(), VPIteration(0, 0));
|
||||
Value *DerivedIV =
|
||||
emitTransformedIndex(State.Builder, CanonicalIV,
|
||||
getStartValue()->getLiveInIRValue(), Step, IndDesc);
|
||||
DerivedIV->setName("offset.idx");
|
||||
if (ResultTy != DerivedIV->getType()) {
|
||||
assert(Step->getType()->isIntegerTy() &&
|
||||
"Truncation requires an integer step");
|
||||
DerivedIV = State.Builder.CreateTrunc(DerivedIV, ResultTy);
|
||||
}
|
||||
assert(DerivedIV != CanonicalIV && "IV didn't need transforming?");
|
||||
|
||||
State.set(this, DerivedIV, VPIteration(0, 0));
|
||||
}
|
||||
|
||||
void VPScalarIVStepsRecipe::execute(VPTransformState &State) {
|
||||
assert(!State.Instance && "VPScalarIVStepsRecipe being replicated.");
|
||||
|
||||
|
@ -9566,10 +9543,21 @@ void VPScalarIVStepsRecipe::execute(VPTransformState &State) {
|
|||
State.Builder.setFastMathFlags(
|
||||
IndDesc.getInductionBinOp()->getFastMathFlags());
|
||||
|
||||
Value *BaseIV = State.get(getOperand(0), VPIteration(0, 0));
|
||||
Value *Step = State.get(getStepValue(), VPIteration(0, 0));
|
||||
auto CreateScalarIV = [&](Value *&Step) -> Value * {
|
||||
Value *ScalarIV = State.get(getCanonicalIV(), VPIteration(0, 0));
|
||||
auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0);
|
||||
if (!isCanonical() || CanonicalIV->getType() != Step->getType()) {
|
||||
ScalarIV = emitTransformedIndex(State.Builder, ScalarIV,
|
||||
getStartValue()->getLiveInIRValue(), Step,
|
||||
IndDesc);
|
||||
ScalarIV->setName("offset.idx");
|
||||
}
|
||||
return ScalarIV;
|
||||
};
|
||||
|
||||
buildScalarSteps(BaseIV, Step, IndDesc, this, State);
|
||||
Value *ScalarIV = CreateScalarIV(Step);
|
||||
buildScalarSteps(ScalarIV, Step, IndDesc, this, TruncToTy, State);
|
||||
}
|
||||
|
||||
void VPInterleaveRecipe::execute(VPTransformState &State) {
|
||||
|
|
|
@ -648,14 +648,12 @@ void VPlan::prepareToExecute(Value *TripCountV, Value *VectorTripCountV,
|
|||
|
||||
// When vectorizing the epilogue loop, the canonical induction start value
|
||||
// needs to be changed from zero to the value after the main vector loop.
|
||||
// FIXME: Improve modeling for canonical IV start values in the epilogue loop.
|
||||
if (CanonicalIVStartValue) {
|
||||
VPValue *VPV = getOrAddExternalDef(CanonicalIVStartValue);
|
||||
auto *IV = getCanonicalIV();
|
||||
assert(all_of(IV->users(),
|
||||
[](const VPUser *U) {
|
||||
if (isa<VPScalarIVStepsRecipe>(U) ||
|
||||
isa<VPDerivedIVRecipe>(U))
|
||||
if (isa<VPScalarIVStepsRecipe>(U))
|
||||
return true;
|
||||
auto *VPI = cast<VPInstruction>(U);
|
||||
return VPI->getOpcode() ==
|
||||
|
|
|
@ -1869,10 +1869,6 @@ public:
|
|||
"Op must be an operand of the recipe");
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Check if the induction described by \p ID is canonical, i.e. has the same
|
||||
/// start, step (of 1), and type as the canonical IV.
|
||||
bool isCanonical(const InductionDescriptor &ID, Type *Ty) const;
|
||||
};
|
||||
|
||||
/// A recipe for generating the active lane mask for the vector loop that is
|
||||
|
@ -1953,76 +1949,19 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
/// A recipe for converting the canonical IV value to the corresponding value of
|
||||
/// an IV with different start and step values, using Start + CanonicalIV *
|
||||
/// Step.
|
||||
class VPDerivedIVRecipe : public VPRecipeBase, public VPValue {
|
||||
/// The type of the result value. It may be smaller than the type of the
|
||||
/// induction and in this case it will get truncated to ResultTy.
|
||||
Type *ResultTy;
|
||||
|
||||
/// Induction descriptor for the induction the canonical IV is transformed to.
|
||||
const InductionDescriptor &IndDesc;
|
||||
|
||||
public:
|
||||
VPDerivedIVRecipe(const InductionDescriptor &IndDesc, VPValue *Start,
|
||||
VPCanonicalIVPHIRecipe *CanonicalIV, VPValue *Step,
|
||||
Type *ResultTy)
|
||||
: VPRecipeBase(VPDerivedIVSC, {Start, CanonicalIV, Step}),
|
||||
VPValue(VPVDerivedIVSC, nullptr, this), ResultTy(ResultTy),
|
||||
IndDesc(IndDesc) {}
|
||||
|
||||
~VPDerivedIVRecipe() override = default;
|
||||
|
||||
/// Method to support type inquiry through isa, cast, and dyn_cast.
|
||||
static inline bool classof(const VPDef *D) {
|
||||
return D->getVPDefID() == VPRecipeBase::VPDerivedIVSC;
|
||||
}
|
||||
/// Extra classof implementations to allow directly casting from VPUser ->
|
||||
/// VPDerivedIVRecipe.
|
||||
static inline bool classof(const VPUser *U) {
|
||||
auto *R = dyn_cast<VPRecipeBase>(U);
|
||||
return R && R->getVPDefID() == VPRecipeBase::VPDerivedIVSC;
|
||||
}
|
||||
static inline bool classof(const VPRecipeBase *R) {
|
||||
return R->getVPDefID() == VPRecipeBase::VPDerivedIVSC;
|
||||
}
|
||||
static inline bool classof(const VPValue *V) {
|
||||
return V->getVPValueID() == VPValue::VPVDerivedIVSC;
|
||||
}
|
||||
|
||||
/// Generate the transformed value of the induction at offset StartValue (1.
|
||||
/// operand) + IV (2. operand) * StepValue (3, operand).
|
||||
void execute(VPTransformState &State) override;
|
||||
|
||||
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
||||
/// Print the recipe.
|
||||
void print(raw_ostream &O, const Twine &Indent,
|
||||
VPSlotTracker &SlotTracker) const override;
|
||||
#endif
|
||||
|
||||
VPValue *getStartValue() const { return getOperand(0); }
|
||||
VPValue *getCanonicalIV() const { return getOperand(1); }
|
||||
VPValue *getStepValue() const { return getOperand(2); }
|
||||
|
||||
/// Returns true if the recipe only uses the first lane of operand \p Op.
|
||||
bool onlyFirstLaneUsed(const VPValue *Op) const override {
|
||||
assert(is_contained(operands(), Op) &&
|
||||
"Op must be an operand of the recipe");
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
/// A recipe for handling phi nodes of integer and floating-point inductions,
|
||||
/// producing their scalar values.
|
||||
class VPScalarIVStepsRecipe : public VPRecipeBase, public VPValue {
|
||||
/// If not nullptr, truncate the generated values to TruncToTy.
|
||||
Type *TruncToTy;
|
||||
const InductionDescriptor &IndDesc;
|
||||
|
||||
public:
|
||||
VPScalarIVStepsRecipe(const InductionDescriptor &IndDesc, VPValue *IV,
|
||||
VPValue *Step)
|
||||
: VPRecipeBase(VPScalarIVStepsSC, {IV, Step}), VPValue(nullptr, this),
|
||||
IndDesc(IndDesc) {}
|
||||
VPScalarIVStepsRecipe(const InductionDescriptor &IndDesc,
|
||||
VPValue *CanonicalIV, VPValue *Start, VPValue *Step,
|
||||
Type *TruncToTy)
|
||||
: VPRecipeBase(VPScalarIVStepsSC, {CanonicalIV, Start, Step}),
|
||||
VPValue(nullptr, this), TruncToTy(TruncToTy), IndDesc(IndDesc) {}
|
||||
|
||||
~VPScalarIVStepsRecipe() override = default;
|
||||
|
||||
|
@ -2049,7 +1988,13 @@ public:
|
|||
VPSlotTracker &SlotTracker) const override;
|
||||
#endif
|
||||
|
||||
VPValue *getStepValue() const { return getOperand(1); }
|
||||
/// Returns true if the induction is canonical, i.e. starting at 0 and
|
||||
/// incremented by UF * VF (= the original IV is incremented by 1).
|
||||
bool isCanonical() const;
|
||||
|
||||
VPCanonicalIVPHIRecipe *getCanonicalIV() const;
|
||||
VPValue *getStartValue() const { return getOperand(1); }
|
||||
VPValue *getStepValue() const { return getOperand(2); }
|
||||
|
||||
/// Returns true if the recipe only uses the first lane of operand \p Op.
|
||||
bool onlyFirstLaneUsed(const VPValue *Op) const override {
|
||||
|
|
|
@ -103,7 +103,6 @@ bool VPRecipeBase::mayReadFromMemory() const {
|
|||
|
||||
bool VPRecipeBase::mayHaveSideEffects() const {
|
||||
switch (getVPDefID()) {
|
||||
case VPDerivedIVSC:
|
||||
case VPPredInstPHISC:
|
||||
return false;
|
||||
case VPWidenIntOrFpInductionSC:
|
||||
|
@ -713,22 +712,22 @@ bool VPWidenIntOrFpInductionRecipe::isCanonical() const {
|
|||
return StartC && StartC->isZero() && StepC && StepC->isOne();
|
||||
}
|
||||
|
||||
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
||||
void VPDerivedIVRecipe::print(raw_ostream &O, const Twine &Indent,
|
||||
VPSlotTracker &SlotTracker) const {
|
||||
O << Indent;
|
||||
printAsOperand(O, SlotTracker);
|
||||
O << Indent << "= DERIVED-IV ";
|
||||
getStartValue()->printAsOperand(O, SlotTracker);
|
||||
O << " + ";
|
||||
getCanonicalIV()->printAsOperand(O, SlotTracker);
|
||||
O << " * ";
|
||||
getStepValue()->printAsOperand(O, SlotTracker);
|
||||
|
||||
if (IndDesc.getStep()->getType() != ResultTy)
|
||||
O << " (truncated to " << *ResultTy << ")";
|
||||
VPCanonicalIVPHIRecipe *VPScalarIVStepsRecipe::getCanonicalIV() const {
|
||||
return cast<VPCanonicalIVPHIRecipe>(getOperand(0));
|
||||
}
|
||||
|
||||
bool VPScalarIVStepsRecipe::isCanonical() const {
|
||||
auto *CanIV = getCanonicalIV();
|
||||
// The start value of the steps-recipe must match the start value of the
|
||||
// canonical induction and it must step by 1.
|
||||
if (CanIV->getStartValue() != getStartValue())
|
||||
return false;
|
||||
auto *StepVPV = getStepValue();
|
||||
if (StepVPV->hasDefiningRecipe())
|
||||
return false;
|
||||
auto *StepC = dyn_cast_or_null<ConstantInt>(StepVPV->getLiveInIRValue());
|
||||
return StepC && StepC->isOne();
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
||||
void VPScalarIVStepsRecipe::print(raw_ostream &O, const Twine &Indent,
|
||||
|
@ -1051,20 +1050,6 @@ void VPCanonicalIVPHIRecipe::print(raw_ostream &O, const Twine &Indent,
|
|||
}
|
||||
#endif
|
||||
|
||||
bool VPCanonicalIVPHIRecipe::isCanonical(const InductionDescriptor &ID,
|
||||
Type *Ty) const {
|
||||
if (Ty != getScalarType())
|
||||
return false;
|
||||
// The start value of ID must match the start value of this canonical
|
||||
// induction.
|
||||
if (getStartValue()->getLiveInIRValue() != ID.getStartValue())
|
||||
return false;
|
||||
|
||||
ConstantInt *Step = ID.getConstIntStepValue();
|
||||
// ID must also be incremented by one.
|
||||
return ID.getInductionOpcode() == Instruction::Add && Step && Step->isOne();
|
||||
}
|
||||
|
||||
bool VPWidenPointerInductionRecipe::onlyScalarsGenerated(ElementCount VF) {
|
||||
return IsScalarAfterVectorization &&
|
||||
(!VF.isScalable() || vputils::onlyFirstLaneUsed(this));
|
||||
|
|
|
@ -382,40 +382,30 @@ void VPlanTransforms::optimizeInductions(VPlan &Plan, ScalarEvolution &SE) {
|
|||
VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock();
|
||||
bool HasOnlyVectorVFs = !Plan.hasVF(ElementCount::getFixed(1));
|
||||
for (VPRecipeBase &Phi : HeaderVPBB->phis()) {
|
||||
auto *WideIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
|
||||
if (!WideIV)
|
||||
auto *IV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
|
||||
if (!IV)
|
||||
continue;
|
||||
if (HasOnlyVectorVFs && none_of(WideIV->users(), [WideIV](VPUser *U) {
|
||||
return U->usesScalars(WideIV);
|
||||
}))
|
||||
if (HasOnlyVectorVFs &&
|
||||
none_of(IV->users(), [IV](VPUser *U) { return U->usesScalars(IV); }))
|
||||
continue;
|
||||
|
||||
auto IP = HeaderVPBB->getFirstNonPhi();
|
||||
VPCanonicalIVPHIRecipe *CanonicalIV = Plan.getCanonicalIV();
|
||||
Type *ResultTy = WideIV->getPHINode()->getType();
|
||||
if (Instruction *TruncI = WideIV->getTruncInst())
|
||||
ResultTy = TruncI->getType();
|
||||
const InductionDescriptor &ID = WideIV->getInductionDescriptor();
|
||||
const InductionDescriptor &ID = IV->getInductionDescriptor();
|
||||
VPValue *Step =
|
||||
vputils::getOrCreateVPValueForSCEVExpr(Plan, ID.getStep(), SE);
|
||||
VPValue *BaseIV = CanonicalIV;
|
||||
if (!CanonicalIV->isCanonical(ID, ResultTy)) {
|
||||
BaseIV = new VPDerivedIVRecipe(ID, WideIV->getStartValue(), CanonicalIV,
|
||||
Step, ResultTy);
|
||||
HeaderVPBB->insert(BaseIV->getDefiningRecipe(), IP);
|
||||
}
|
||||
|
||||
VPScalarIVStepsRecipe *Steps = new VPScalarIVStepsRecipe(ID, BaseIV, Step);
|
||||
HeaderVPBB->insert(Steps, IP);
|
||||
Instruction *TruncI = IV->getTruncInst();
|
||||
VPScalarIVStepsRecipe *Steps = new VPScalarIVStepsRecipe(
|
||||
ID, Plan.getCanonicalIV(), IV->getStartValue(), Step,
|
||||
TruncI ? TruncI->getType() : nullptr);
|
||||
HeaderVPBB->insert(Steps, HeaderVPBB->getFirstNonPhi());
|
||||
|
||||
// Update scalar users of IV to use Step instead. Use SetVector to ensure
|
||||
// the list of users doesn't contain duplicates.
|
||||
SetVector<VPUser *> Users(WideIV->user_begin(), WideIV->user_end());
|
||||
SetVector<VPUser *> Users(IV->user_begin(), IV->user_end());
|
||||
for (VPUser *U : Users) {
|
||||
if (HasOnlyVectorVFs && !U->usesScalars(WideIV))
|
||||
if (HasOnlyVectorVFs && !U->usesScalars(IV))
|
||||
continue;
|
||||
for (unsigned I = 0, E = U->getNumOperands(); I != E; I++) {
|
||||
if (U->getOperand(I) != WideIV)
|
||||
if (U->getOperand(I) != IV)
|
||||
continue;
|
||||
U->setOperand(I, Steps);
|
||||
}
|
||||
|
|
|
@ -90,7 +90,6 @@ public:
|
|||
/// type identification.
|
||||
enum {
|
||||
VPValueSC,
|
||||
VPVDerivedIVSC,
|
||||
VPVInstructionSC,
|
||||
VPVMemoryInstructionSC,
|
||||
VPVReductionSC,
|
||||
|
@ -356,7 +355,6 @@ public:
|
|||
/// type identification.
|
||||
using VPRecipeTy = enum {
|
||||
VPBranchOnMaskSC,
|
||||
VPDerivedIVSC,
|
||||
VPExpandSCEVSC,
|
||||
VPInstructionSC,
|
||||
VPInterleaveSC,
|
||||
|
|
|
@ -19,7 +19,7 @@ target triple = "aarch64-unknown-linux-gnu"
|
|||
; VPLANS-NEXT: vector.body:
|
||||
; VPLANS-NEXT: EMIT vp<%4> = CANONICAL-INDUCTION
|
||||
; VPLANS-NEXT: ACTIVE-LANE-MASK-PHI vp<%5> = phi vp<%3>, vp<%10>
|
||||
; VPLANS-NEXT: vp<%6> = SCALAR-STEPS vp<%4>, ir<1>
|
||||
; VPLANS-NEXT: vp<%6> = SCALAR-STEPS vp<%4>, ir<0>, ir<1>
|
||||
; VPLANS-NEXT: CLONE ir<%gep> = getelementptr ir<%ptr>, vp<%6>
|
||||
; VPLANS-NEXT: WIDEN store ir<%gep>, ir<%val>, vp<%5>
|
||||
; VPLANS-NEXT: EMIT vp<%8> = VF * UF + vp<%4>
|
||||
|
|
|
@ -15,7 +15,7 @@ target triple = "arm64-apple-ios"
|
|||
; CHECK-NEXT: <x1> vector loop: {
|
||||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<%2> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: vp<%3> = SCALAR-STEPS vp<%2>, ir<1>
|
||||
; CHECK-NEXT: vp<%3> = SCALAR-STEPS vp<%2>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: CLONE ir<%gep.src> = getelementptr ir<%src>, vp<%3>
|
||||
; CHECK-NEXT: WIDEN ir<%l> = load ir<%gep.src>
|
||||
; CHECK-NEXT: WIDEN ir<%conv> = fpext ir<%l>
|
||||
|
@ -41,7 +41,7 @@ target triple = "arm64-apple-ios"
|
|||
; CHECK-NEXT: <x1> vector loop: {
|
||||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<%2> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: vp<%3> = SCALAR-STEPS vp<%2>, ir<1>
|
||||
; CHECK-NEXT: vp<%3> = SCALAR-STEPS vp<%2>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: CLONE ir<%gep.src> = getelementptr ir<%src>, vp<%3>
|
||||
; CHECK-NEXT: WIDEN ir<%l> = load ir<%gep.src>
|
||||
; CHECK-NEXT: WIDEN ir<%conv> = fpext ir<%l>
|
||||
|
|
|
@ -56,18 +56,17 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
|
|||
; CHECK-NEXT: Successor(s): vector loop
|
||||
; CHECK: <x1> vector loop: {
|
||||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: vp<[[TRANS_IV:%.+]]> = DERIVED-IV ir<%n> + vp<[[CAN_IV]]> * ir<-1>
|
||||
; CHECK-NEXT: vp<[[SCALAR_STEPS:%.+]]> = SCALAR-STEPS vp<[[TRANS_IV]]>, ir<-1>
|
||||
; CHECK-NEXT: CLONE ir<%i.0> = add vp<[[SCALAR_STEPS]]>, ir<-1>
|
||||
; CHECK-NEXT: EMIT vp<%3> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: vp<%4> = SCALAR-STEPS vp<%3>, ir<%n>, ir<-1>
|
||||
; CHECK-NEXT: CLONE ir<%i.0> = add vp<%4>, ir<-1>
|
||||
; CHECK-NEXT: CLONE ir<%idxprom> = zext ir<%i.0>
|
||||
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%B>, ir<%idxprom>
|
||||
; CHECK-NEXT: WIDEN ir<%1> = load ir<%arrayidx>
|
||||
; CHECK-NEXT: WIDEN ir<%add9> = add ir<%1>, ir<1>
|
||||
; CHECK-NEXT: CLONE ir<%arrayidx3> = getelementptr ir<%A>, ir<%idxprom>
|
||||
; CHECK-NEXT: WIDEN store ir<%arrayidx3>, ir<%add9>
|
||||
; CHECK-NEXT: EMIT vp<[[IV_INC:%.+]]> = VF * UF +(nuw) vp<[[CAN_IV]]>
|
||||
; CHECK-NEXT: EMIT branch-on-count vp<[[IV_INC]]> vp<%2>
|
||||
; CHECK-NEXT: EMIT vp<%11> = VF * UF +(nuw) vp<%3>
|
||||
; CHECK-NEXT: EMIT branch-on-count vp<%11> vp<%2>
|
||||
; CHECK-NEXT: No successors
|
||||
; CHECK-NEXT: }
|
||||
; CHECK-NEXT: Successor(s): middle.block
|
||||
|
@ -188,18 +187,17 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur
|
|||
; CHECK-NEXT: Successor(s): vector loop
|
||||
; CHECK: <x1> vector loop: {
|
||||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: vp<[[TRANS_IV:%.+]]> = DERIVED-IV ir<%n> + vp<[[CAN_IV]]> * ir<-1>
|
||||
; CHECK-NEXT: vp<[[SCALAR_STEPS:%.+]]> = SCALAR-STEPS vp<[[TRANS_IV]]>, ir<-1>
|
||||
; CHECK-NEXT: CLONE ir<%i.0> = add vp<[[SCALAR_STEPS]]>, ir<-1>
|
||||
; CHECK-NEXT: EMIT vp<%3> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: vp<%4> = SCALAR-STEPS vp<%3>, ir<%n>, ir<-1>
|
||||
; CHECK-NEXT: CLONE ir<%i.0> = add vp<%4>, ir<-1>
|
||||
; CHECK-NEXT: CLONE ir<%idxprom> = zext ir<%i.0>
|
||||
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%B>, ir<%idxprom>
|
||||
; CHECK-NEXT: WIDEN ir<%1> = load ir<%arrayidx>
|
||||
; CHECK-NEXT: WIDEN ir<%conv1> = fadd ir<%1>, ir<1.000000e+00>
|
||||
; CHECK-NEXT: CLONE ir<%arrayidx3> = getelementptr ir<%A>, ir<%idxprom>
|
||||
; CHECK-NEXT: WIDEN store ir<%arrayidx3>, ir<%conv1>
|
||||
; CHECK-NEXT: EMIT vp<[[IV_INC:%.+]]> = VF * UF +(nuw) vp<[[CAN_IV]]>
|
||||
; CHECK-NEXT: EMIT branch-on-count vp<[[IV_INC]]> vp<%2>
|
||||
; CHECK-NEXT: EMIT vp<%11> = VF * UF +(nuw) vp<%3>
|
||||
; CHECK-NEXT: EMIT branch-on-count vp<%11> vp<%2>
|
||||
; CHECK-NEXT: No successors
|
||||
; CHECK-NEXT: }
|
||||
; CHECK-NEXT: Successor(s): middle.block
|
||||
|
|
|
@ -15,7 +15,7 @@ define void @test_chained_first_order_recurrences_1(i16* %ptr) {
|
|||
; CHECK-NEXT: EMIT vp<%2> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: FIRST-ORDER-RECURRENCE-PHI ir<%for.1> = phi ir<22>, ir<%for.1.next>
|
||||
; CHECK-NEXT: FIRST-ORDER-RECURRENCE-PHI ir<%for.2> = phi ir<33>, vp<%8>
|
||||
; CHECK-NEXT: vp<%5> = SCALAR-STEPS vp<%2>, ir<1>
|
||||
; CHECK-NEXT: vp<%5> = SCALAR-STEPS vp<%2>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: CLONE ir<%gep.ptr> = getelementptr ir<%ptr>, vp<%5>
|
||||
; CHECK-NEXT: WIDEN ir<%for.1.next> = load ir<%gep.ptr>
|
||||
; CHECK-NEXT: EMIT vp<%8> = first-order splice ir<%for.1> ir<%for.1.next>
|
||||
|
@ -65,7 +65,7 @@ define void @test_chained_first_order_recurrences_3(i16* %ptr) {
|
|||
; CHECK-NEXT: FIRST-ORDER-RECURRENCE-PHI ir<%for.1> = phi ir<22>, ir<%for.1.next>
|
||||
; CHECK-NEXT: FIRST-ORDER-RECURRENCE-PHI ir<%for.2> = phi ir<33>, vp<%9>
|
||||
; CHECK-NEXT: FIRST-ORDER-RECURRENCE-PHI ir<%for.3> = phi ir<33>, vp<%10>
|
||||
; CHECK-NEXT: vp<%6> = SCALAR-STEPS vp<%2>, ir<1>
|
||||
; CHECK-NEXT: vp<%6> = SCALAR-STEPS vp<%2>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: CLONE ir<%gep.ptr> = getelementptr ir<%ptr>, vp<%6>
|
||||
; CHECK-NEXT: WIDEN ir<%for.1.next> = load ir<%gep.ptr>
|
||||
; CHECK-NEXT: EMIT vp<%9> = first-order splice ir<%for.1> ir<%for.1.next>
|
||||
|
|
|
@ -21,7 +21,7 @@ define void @sink_replicate_region_1(i32 %x, i8* %ptr, i32* noalias %dst) optsiz
|
|||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: FIRST-ORDER-RECURRENCE-PHI ir<%0> = phi ir<0>, ir<%conv>
|
||||
; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 0, %iv.next, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: EMIT vp<[[MASK:%.+]]> = icmp ule ir<%iv> vp<[[BTC]]>
|
||||
; CHECK-NEXT: Successor(s): loop.0
|
||||
; CHECK-EMPTY:
|
||||
|
@ -117,7 +117,7 @@ define void @sink_replicate_region_2(i32 %x, i8 %y, i32* %ptr) optsize {
|
|||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: FIRST-ORDER-RECURRENCE-PHI ir<%recur> = phi ir<0>, ir<%recur.next>
|
||||
; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 0, %iv.next, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: EMIT vp<[[MASK:%.+]]> = icmp ule ir<%iv> vp<[[BTC]]>
|
||||
; CHECK-NEXT: Successor(s): loop.0
|
||||
; CHECK-EMPTY:
|
||||
|
@ -269,7 +269,7 @@ define void @sink_replicate_region_4_requires_split_at_end_of_block(i32 %x, i8*
|
|||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: FIRST-ORDER-RECURRENCE-PHI ir<%0> = phi ir<0>, ir<%conv>
|
||||
; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 0, %iv.next, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: EMIT vp<[[MASK:%.+]]> = icmp ule ir<%iv> vp<[[BTC]]>
|
||||
; CHECK-NEXT: REPLICATE ir<%gep> = getelementptr ir<%ptr>, vp<[[STEPS]]>
|
||||
; CHECK-NEXT: Successor(s): loop.0
|
||||
|
@ -376,7 +376,7 @@ define void @sink_replicate_region_after_replicate_region(i32* %ptr, i32 %x, i8
|
|||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: FIRST-ORDER-RECURRENCE-PHI ir<%recur> = phi ir<0>, ir<%recur.next>
|
||||
; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 0, %iv.next, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: EMIT vp<[[MASK:%.+]]> = icmp ule ir<%iv> vp<[[BTC]]>
|
||||
; CHECK-NEXT: Successor(s): loop.0
|
||||
; CHECK-EMPTY:
|
||||
|
@ -467,8 +467,7 @@ define void @need_new_block_after_sinking_pr56146(i32 %x, i32* %src, i32* noalia
|
|||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: FIRST-ORDER-RECURRENCE-PHI ir<%.pn> = phi ir<0>, ir<[[L:%.+]]>
|
||||
; CHECK-NEXT: vp<[[DERIVED_IV:%.+]]> = DERIVED-IV ir<2> + vp<[[CAN_IV]]> * ir<1>
|
||||
; CHECK-NEXT: vp<[[SCALAR_STEPS:%.+]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[SCALAR_STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<2>, ir<1>
|
||||
; CHECK-NEXT: EMIT vp<[[WIDE_IV:%.+]]> = WIDEN-CANONICAL-INDUCTION vp<[[CAN_IV]]>
|
||||
; CHECK-NEXT: EMIT vp<[[CMP:%.+]]> = icmp ule vp<[[WIDE_IV]]> vp<[[BTC]]>
|
||||
; CHECK-NEXT: Successor(s): loop.0
|
||||
|
@ -488,7 +487,7 @@ define void @need_new_block_after_sinking_pr56146(i32 %x, i32* %src, i32* noalia
|
|||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: pred.store.if:
|
||||
; CHECK-NEXT: REPLICATE ir<%val> = sdiv vp<[[SPLICE]]>, ir<%x>
|
||||
; CHECK-NEXT: REPLICATE ir<%gep.dst> = getelementptr ir<%dst>, vp<[[SCALAR_STEPS]]>
|
||||
; CHECK-NEXT: REPLICATE ir<%gep.dst> = getelementptr ir<%dst>, vp<%5>
|
||||
; CHECK-NEXT: REPLICATE store ir<%val>, ir<%gep.dst>
|
||||
; CHECK-NEXT: Successor(s): pred.store.continue
|
||||
; CHECK-EMPTY:
|
||||
|
|
|
@ -48,7 +48,7 @@ for.end:
|
|||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 0, %iv.next, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: EMIT vp<[[COND:%.+]]> = icmp ule ir<%iv> vp<[[BTC]]>
|
||||
; CHECK-NEXT: WIDEN ir<%cond0> = icmp ult ir<%iv>, ir<13>
|
||||
; CHECK-NEXT: WIDEN-SELECT ir<%s> = select ir<%cond0>, ir<10>, ir<20>
|
||||
|
|
|
@ -13,8 +13,7 @@
|
|||
; DBG-NEXT: <x1> vector loop: {
|
||||
; DBG-NEXT: vector.body:
|
||||
; DBG-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; DBG-NEXT: vp<[[DERIVED_IV:%.+]]> = DERIVED-IV ir<%start> + vp<[[CAN_IV]]> * ir<1>
|
||||
; DBG-NEXT: vp<[[IV_STEPS:%.]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<1>
|
||||
; DBG-NEXT: vp<[[IV_STEPS:%.]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<%start>, ir<1>
|
||||
; DBG-NEXT: CLONE ir<%min> = call @llvm.smin.i32(vp<[[IV_STEPS]]>, ir<65535>)
|
||||
; DBG-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%dst>, vp<[[IV_STEPS]]>
|
||||
; DBG-NEXT: CLONE store ir<%min>, ir<%arrayidx>
|
||||
|
@ -70,9 +69,8 @@ declare i32 @llvm.smin.i32(i32, i32)
|
|||
; DBG-NEXT: <x1> vector loop: {
|
||||
; DBG-NEXT: vector.body:
|
||||
; DBG-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; DBG-NEXT: vp<[[DERIVED_IV:%.+]]> = DERIVED-IV ir<false> + vp<[[CAN_IV]]> * ir<true>
|
||||
; DBG-NEXT: vp<[[STEPS1:%.+]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<true>
|
||||
; DBG-NEXT: vp<[[STEPS2:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
|
||||
; DBG-NEXT: vp<[[STEPS1:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<false>, ir<true>
|
||||
; DBG-NEXT: vp<[[STEPS2:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
|
||||
; DBG-NEXT: Successor(s): cond.false
|
||||
; DBG-EMPTY:
|
||||
; DBG-NEXT: cond.false:
|
||||
|
@ -183,14 +181,13 @@ exit:
|
|||
; DBG-EMPTY:
|
||||
; DBG-NEXT: <x1> vector loop: {
|
||||
; DBG-NEXT: vector.body:
|
||||
; DBG-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; DBG-NEXT: FIRST-ORDER-RECURRENCE-PHI ir<%for> = phi ir<0>, vp<[[SCALAR_STEPS:.+]]>
|
||||
; DBG-NEXT: vp<[[DERIVED_IV:%.+]]> = DERIVED-IV ir<0> + vp<[[CAN_IV]]> * ir<1> (truncated to i32)
|
||||
; DBG-NEXT: vp<[[SCALAR_STEPS]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<1>
|
||||
; DBG-NEXT: EMIT vp<[[SPLICE:%.+]]> = first-order splice ir<%for> vp<[[SCALAR_STEPS]]>
|
||||
; DBG-NEXT: CLONE store vp<[[SPLICE]]>, ir<%dst>
|
||||
; DBG-NEXT: EMIT vp<[[IV_INC:%.+]]> = VF * UF +(nuw) vp<[[CAN_IV]]>
|
||||
; DBG-NEXT: EMIT branch-on-count vp<[[IV_INC]]> vp<%1>
|
||||
; DBG-NEXT: EMIT vp<%2> = CANONICAL-INDUCTION
|
||||
; DBG-NEXT: FIRST-ORDER-RECURRENCE-PHI ir<%for> = phi ir<0>, vp<%4>
|
||||
; DBG-NEXT: vp<%4> = SCALAR-STEPS vp<%2>, ir<0>, ir<1>
|
||||
; DBG-NEXT: EMIT vp<%5> = first-order splice ir<%for> vp<%4>
|
||||
; DBG-NEXT: CLONE store vp<%5>, ir<%dst>
|
||||
; DBG-NEXT: EMIT vp<%7> = VF * UF +(nuw) vp<%2>
|
||||
; DBG-NEXT: EMIT branch-on-count vp<%7> vp<%1>
|
||||
; DBG-NEXT: No successors
|
||||
; DBG-NEXT: }
|
||||
; DBG-NEXT: Successor(s): middle.block
|
||||
|
|
|
@ -23,7 +23,7 @@ define void @print_call_and_memory(i64 %n, float* noalias %y, float* noalias %x)
|
|||
; CHECK-NEXT: N1 [label =
|
||||
; CHECK-NEXT: "vector.body:\l" +
|
||||
; CHECK-NEXT: " EMIT vp\<[[CAN_IV:%.+]]\> = CANONICAL-INDUCTION\l" +
|
||||
; CHECK-NEXT: " vp\<[[STEPS:%.+]]\> = SCALAR-STEPS vp\<[[CAN_IV]]\>, ir\<1\>\l" +
|
||||
; CHECK-NEXT: " vp\<[[STEPS:%.+]]\> = SCALAR-STEPS vp\<[[CAN_IV]]\>, ir\<0\>, ir\<1\>\l" +
|
||||
; CHECK-NEXT: " CLONE ir\<%arrayidx\> = getelementptr ir\<%y\>, vp\<[[STEPS]]\>\l" +
|
||||
; CHECK-NEXT: " WIDEN ir\<%lv\> = load ir\<%arrayidx\>\l" +
|
||||
; CHECK-NEXT: " WIDEN-CALL ir\<%call\> = call @llvm.sqrt.f32(ir\<%lv\>) (using vector intrinsic)\l" +
|
||||
|
|
|
@ -17,7 +17,7 @@ define void @print_call_and_memory(i64 %n, float* noalias %y, float* noalias %x)
|
|||
; CHECK-NEXT: <x1> vector loop: {
|
||||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%y>, vp<[[STEPS]]>
|
||||
; CHECK-NEXT: WIDEN ir<%lv> = load ir<%arrayidx>
|
||||
; CHECK-NEXT: WIDEN-CALL ir<%call> = call @llvm.sqrt.f32(ir<%lv>)
|
||||
|
@ -64,7 +64,7 @@ define void @print_widen_gep_and_select(i64 %n, float* noalias %y, float* noalia
|
|||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: WIDEN-INDUCTION %iv = phi %iv.next, 0, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: WIDEN-GEP Inv[Var] ir<%arrayidx> = getelementptr ir<%y>, ir<%iv>
|
||||
; CHECK-NEXT: WIDEN ir<%lv> = load ir<%arrayidx>
|
||||
; CHECK-NEXT: WIDEN ir<%cmp> = icmp eq ir<%arrayidx>, ir<%z>
|
||||
|
@ -115,7 +115,7 @@ define float @print_reduction(i64 %n, float* noalias %y) {
|
|||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi ir<0.000000e+00>, ir<%red.next>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%y>, vp<[[STEPS]]>
|
||||
; CHECK-NEXT: WIDEN ir<%lv> = load ir<%arrayidx>
|
||||
; CHECK-NEXT: REDUCE ir<%red.next> = ir<%red> + fast reduce.fadd (ir<%lv>)
|
||||
|
@ -160,7 +160,7 @@ define void @print_reduction_with_invariant_store(i64 %n, float* noalias %y, flo
|
|||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi ir<0.000000e+00>, ir<%red.next>
|
||||
; CHECK-NEXT: vp<[[IV:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[IV:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%y>, vp<[[IV]]>
|
||||
; CHECK-NEXT: WIDEN ir<%lv> = load ir<%arrayidx>
|
||||
; CHECK-NEXT: REDUCE ir<%red.next> = ir<%red> + fast reduce.fadd (ir<%lv>) (with final reduction value stored in invariant address sank outside of loop)
|
||||
|
@ -204,7 +204,7 @@ define void @print_replicate_predicated_phi(i64 %n, i64* %x) {
|
|||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: WIDEN-INDUCTION %i = phi 0, %i.next, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: WIDEN ir<%cmp> = icmp ult ir<%i>, ir<5>
|
||||
; CHECK-NEXT: Successor(s): if.then
|
||||
; CHECK-EMPTY:
|
||||
|
@ -282,8 +282,7 @@ define void @print_interleave_groups(i32 %C, i32 %D) {
|
|||
; CHECK-NEXT: <x1> vector loop: {
|
||||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: vp<[[DERIVED_IV:%.+]]> = DERIVED-IV ir<0> + vp<[[CAN_IV]]> * ir<4>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<4>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<4>
|
||||
; CHECK-NEXT: CLONE ir<%gep.AB.0> = getelementptr ir<@AB>, ir<0>, vp<[[STEPS]]>
|
||||
; CHECK-NEXT: INTERLEAVE-GROUP with factor 4 at %AB.0, ir<%gep.AB.0>
|
||||
; CHECK-NEXT: ir<%AB.0> = load from index 0
|
||||
|
@ -350,7 +349,7 @@ define float @print_fmuladd_strict(float* %a, float* %b, i64 %n) {
|
|||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%sum.07> = phi ir<0.000000e+00>, ir<%muladd>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%a>, vp<[[STEPS]]>
|
||||
; CHECK-NEXT: WIDEN ir<%l.a> = load ir<%arrayidx>
|
||||
; CHECK-NEXT: CLONE ir<%arrayidx2> = getelementptr ir<%b>, vp<[[STEPS]]>
|
||||
|
@ -399,7 +398,7 @@ define void @debug_loc_vpinstruction(i32* nocapture %asd, i32* nocapture %bsd) !
|
|||
; CHECK-NEXT: <x1> vector loop: {
|
||||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: CLONE ir<%isd> = getelementptr ir<%asd>, vp<[[STEPS]]>
|
||||
; CHECK-NEXT: WIDEN ir<%lsd> = load ir<%isd>
|
||||
; CHECK-NEXT: WIDEN ir<%psd> = add ir<%lsd>, ir<23>
|
||||
|
@ -497,8 +496,7 @@ define void @print_expand_scev(i64 %y, i8* %ptr) {
|
|||
; CHECK-NEXT: WIDEN-INDUCTION\l" +
|
||||
; CHECK-NEXT: " %iv = phi %iv.next, 0\l" +
|
||||
; CHECK-NEXT: " ir<%v2>, vp<[[EXP_SCEV]]>
|
||||
; CHECK-NEXT: vp<[[DERIVED_IV:%.+]]> = DERIVED-IV ir<0> + vp<[[CAN_IV]]> * vp<[[EXP_SCEV]]>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, vp<[[EXP_SCEV]]>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, vp<[[EXP_SCEV]]>
|
||||
; CHECK-NEXT: WIDEN ir<%v3> = add ir<%v2>, ir<1>
|
||||
; CHECK-NEXT: REPLICATE ir<%gep> = getelementptr ir<%ptr>, vp<[[STEPS]]>
|
||||
; CHECK-NEXT: REPLICATE store ir<%v3>, ir<%gep>
|
||||
|
@ -544,7 +542,7 @@ define i32 @print_exit_value(i8* %ptr, i32 %off) {
|
|||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 0, %iv.next, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: CLONE ir<%gep> = getelementptr ir<%ptr>, vp<[[STEPS]]>
|
||||
; CHECK-NEXT: WIDEN ir<%add> = add ir<%iv>, ir<%off>
|
||||
; CHECK-NEXT: WIDEN store ir<%gep>, ir<0>
|
||||
|
|
|
@ -16,7 +16,7 @@ define void @sink_with_sideeffects(i1 %c, i8* %ptr) {
|
|||
; CHECK-NEXT: <x1> vector loop: {
|
||||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: CLONE ir<%tmp2> = getelementptr ir<%ptr>, vp<[[STEPS]]>
|
||||
; CHECK-NEXT: CLONE ir<%tmp3> = load ir<%tmp2>
|
||||
; CHECK-NEXT: CLONE store ir<0>, ir<%tmp2>
|
||||
|
|
|
@ -22,7 +22,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
|
|||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 0, %iv.next, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: EMIT vp<[[MASK:%.+]]> = icmp ule ir<%iv> vp<[[BTC]]>
|
||||
; CHECK-NEXT: Successor(s): loop.0
|
||||
|
||||
|
@ -89,7 +89,7 @@ exit:
|
|||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 0, %iv.next, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: EMIT vp<[[MASK:%.+]]> = icmp ule ir<%iv> vp<[[BTC]]>
|
||||
; CHECK-NEXT: Successor(s): pred.load
|
||||
|
||||
|
@ -168,7 +168,7 @@ exit:
|
|||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 0, %iv.next, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: EMIT vp<[[MASK:%.+]]> = icmp ule ir<%iv> vp<[[BTC]]>
|
||||
; CHECK-NEXT: Successor(s): pred.load
|
||||
|
||||
|
@ -249,8 +249,7 @@ define void @uniform_gep(i64 %k, i16* noalias %A, i16* noalias %B) {
|
|||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 21, %iv.next, ir<1>
|
||||
; CHECK-NEXT: vp<[[DERIVED_IV:%.+]]> = DERIVED-IV ir<21> + vp<[[CAN_IV]]> * ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<21>, ir<1>
|
||||
; CHECK-NEXT: EMIT vp<[[WIDE_CAN_IV:%.+]]> = WIDEN-CANONICAL-INDUCTION vp<[[CAN_IV]]>
|
||||
; CHECK-NEXT: EMIT vp<[[MASK:%.+]]> = icmp ule vp<[[WIDE_CAN_IV]]> vp<[[BTC]]>
|
||||
; CHECK-NEXT: CLONE ir<%gep.A.uniform> = getelementptr ir<%A>, ir<0>
|
||||
|
@ -325,7 +324,7 @@ define void @pred_cfg1(i32 %k, i32 %j) {
|
|||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 0, %iv.next, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: EMIT vp<[[MASK1:%.+]]> = icmp ule ir<%iv> vp<[[BTC]]>
|
||||
; CHECK-NEXT: WIDEN ir<%c.1> = icmp ult ir<%iv>, ir<%j>
|
||||
; CHECK-NEXT: WIDEN ir<%mul> = mul ir<%iv>, ir<10>
|
||||
|
@ -426,7 +425,7 @@ define void @pred_cfg2(i32 %k, i32 %j) {
|
|||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 0, %iv.next, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: EMIT vp<[[MASK1:%.+]]> = icmp ule ir<%iv> vp<[[BTC]]>
|
||||
; CHECK-NEXT: WIDEN ir<%mul> = mul ir<%iv>, ir<10>
|
||||
; CHECK-NEXT: WIDEN ir<%c.0> = icmp ult ir<%iv>, ir<%j>
|
||||
|
@ -542,7 +541,7 @@ define void @pred_cfg3(i32 %k, i32 %j) {
|
|||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 0, %iv.next, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: EMIT vp<[[MASK1:%.+]]> = icmp ule ir<%iv> vp<[[BTC]]>
|
||||
; CHECK-NEXT: WIDEN ir<%mul> = mul ir<%iv>, ir<10>
|
||||
; CHECK-NEXT: WIDEN ir<%c.0> = icmp ult ir<%iv>, ir<%j>
|
||||
|
@ -661,7 +660,7 @@ define void @merge_3_replicate_region(i32 %k, i32 %j) {
|
|||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 0, %iv.next, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: EMIT vp<[[MASK:%.+]]> = icmp ule ir<%iv> vp<[[BTC]]>
|
||||
; CHECK-NEXT: REPLICATE ir<%gep.a> = getelementptr ir<@a>, ir<0>, vp<[[STEPS]]>
|
||||
; CHECK-NEXT: Successor(s): loop.0
|
||||
|
@ -776,7 +775,7 @@ define void @update_2_uses_in_same_recipe_in_merged_block(i32 %k) {
|
|||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 0, %iv.next, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: EMIT vp<[[MASK:%.+]]> = icmp ule ir<%iv> vp<[[BTC]]>
|
||||
; CHECK-NEXT: REPLICATE ir<%gep.a> = getelementptr ir<@a>, ir<0>, vp<[[STEPS]]>
|
||||
; CHECK-NEXT: Successor(s): loop.0
|
||||
|
@ -845,7 +844,7 @@ define void @recipe_in_merge_candidate_used_by_first_order_recurrence(i32 %k) {
|
|||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 0, %iv.next, ir<1>
|
||||
; CHECK-NEXT: FIRST-ORDER-RECURRENCE-PHI ir<%for> = phi ir<0>, vp<[[PRED:%.+]]>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: EMIT vp<[[MASK:%.+]]> = icmp ule ir<%iv> vp<[[BTC]]>
|
||||
; CHECK-NEXT: REPLICATE ir<%gep.a> = getelementptr ir<@a>, ir<0>, vp<[[STEPS]]>
|
||||
; CHECK-NEXT: Successor(s): pred.load
|
||||
|
@ -997,7 +996,7 @@ define void @sinking_requires_duplication(float* %addr) {
|
|||
; CHECK-NEXT: <x1> vector loop: {
|
||||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
|
||||
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<0>, ir<1>
|
||||
; CHECK-NEXT: CLONE ir<%gep> = getelementptr ir<%addr>, vp<[[STEPS]]>
|
||||
; CHECK-NEXT: Successor(s): loop.body
|
||||
; CHECK-EMPTY:
|
||||
|
@ -1075,8 +1074,7 @@ define void @merge_with_dead_gep_between_regions(i32 %n, i32* noalias %src, i32*
|
|||
; CHECK-NEXT: <x1> vector loop: {
|
||||
; CHECK-NEXT: vector.body:
|
||||
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; CHECK-NEXT: vp<[[DERIVED_IV:%.+]]> = DERIVED-IV ir<%n> + vp<[[CAN_IV]]> * ir<-1>
|
||||
; CHECK-NEXT: vp<[[SCALAR_STEPS:%.+]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<-1>
|
||||
; CHECK-NEXT: vp<[[SCALAR_STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<%n>, ir<-1>
|
||||
; CHECK-NEXT: EMIT vp<[[WIDE_IV:%.+]]> = WIDEN-CANONICAL-INDUCTION vp<[[CAN_IV]]>
|
||||
; CHECK-NEXT: EMIT vp<[[MASK:%.+]]> = icmp ule vp<[[WIDE_IV]]> vp<[[BTC]]>
|
||||
; CHECK-NEXT: Successor(s): loop.0
|
||||
|
|
Loading…
Reference in New Issue