forked from OSchip/llvm-project
[SVE][LoopVectorize] Add support for scalable vectorization of first-order recurrences
Adds support for scalable vectorization of loops containing first-order recurrences, e.g: ``` for(int i = 0; i < n; i++) b[i] = a[i] + a[i - 1] ``` This patch changes fixFirstOrderRecurrence for scalable vectors to take vscale into account when inserting into and extracting from the last lane of a vector. CreateVectorSplice has been added to construct a vector for the recurrence, which returns a splice intrinsic for scalable types. For fixed-width the behaviour remains unchanged as CreateVectorSplice will return a shufflevector instead. The tests included here are the same as test/Transform/LoopVectorize/first-order-recurrence.ll Reviewed By: david-arm, fhahn Differential Revision: https://reviews.llvm.org/D101076
This commit is contained in:
parent
cdf33962d9
commit
8c9742bd23
|
@ -2510,6 +2510,16 @@ public:
|
||||||
/// Return a vector value that contains the vector V reversed
|
/// Return a vector value that contains the vector V reversed
|
||||||
Value *CreateVectorReverse(Value *V, const Twine &Name = "");
|
Value *CreateVectorReverse(Value *V, const Twine &Name = "");
|
||||||
|
|
||||||
|
/// Return a vector splice intrinsic if using scalable vectors, otherwise
|
||||||
|
/// return a shufflevector. If the immediate is positive, a vector is
|
||||||
|
/// extracted from concat(V1, V2), starting at Imm. If the immediate
|
||||||
|
/// is negative, we extract -Imm elements from V1 and the remaining
|
||||||
|
/// elements from V2. Imm is a signed integer in the range
|
||||||
|
/// -VL <= Imm < VL (where VL is the runtime vector length of the
|
||||||
|
/// source/result vector)
|
||||||
|
Value *CreateVectorSplice(Value *V1, Value *V2, int64_t Imm,
|
||||||
|
const Twine &Name = "");
|
||||||
|
|
||||||
/// Return a vector value that contains \arg V broadcasted to \p
|
/// Return a vector value that contains \arg V broadcasted to \p
|
||||||
/// NumElts elements.
|
/// NumElts elements.
|
||||||
Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = "");
|
Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = "");
|
||||||
|
|
|
@ -1027,6 +1027,34 @@ Value *IRBuilderBase::CreateVectorReverse(Value *V, const Twine &Name) {
|
||||||
return CreateShuffleVector(V, ShuffleMask, Name);
|
return CreateShuffleVector(V, ShuffleMask, Name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Value *IRBuilderBase::CreateVectorSplice(Value *V1, Value *V2, int64_t Imm,
|
||||||
|
const Twine &Name) {
|
||||||
|
assert(isa<VectorType>(V1->getType()) && "Unexpected type");
|
||||||
|
assert(V1->getType() == V2->getType() &&
|
||||||
|
"Splice expects matching operand types!");
|
||||||
|
|
||||||
|
if (auto *VTy = dyn_cast<ScalableVectorType>(V1->getType())) {
|
||||||
|
Module *M = BB->getParent()->getParent();
|
||||||
|
Function *F = Intrinsic::getDeclaration(
|
||||||
|
M, Intrinsic::experimental_vector_splice, VTy);
|
||||||
|
|
||||||
|
Value *Ops[] = {V1, V2, getInt32(Imm)};
|
||||||
|
return Insert(CallInst::Create(F, Ops), Name);
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned NumElts = cast<FixedVectorType>(V1->getType())->getNumElements();
|
||||||
|
assert(((-Imm <= NumElts) || (Imm < NumElts)) &&
|
||||||
|
"Invalid immediate for vector splice!");
|
||||||
|
|
||||||
|
// Keep the original behaviour for fixed vector
|
||||||
|
unsigned Idx = (NumElts + Imm) % NumElts;
|
||||||
|
SmallVector<int, 8> Mask;
|
||||||
|
for (unsigned I = 0; I < NumElts; ++I)
|
||||||
|
Mask.push_back(Idx + I);
|
||||||
|
|
||||||
|
return CreateShuffleVector(V1, V2, Mask);
|
||||||
|
}
|
||||||
|
|
||||||
Value *IRBuilderBase::CreateVectorSplat(unsigned NumElts, Value *V,
|
Value *IRBuilderBase::CreateVectorSplat(unsigned NumElts, Value *V,
|
||||||
const Twine &Name) {
|
const Twine &Name) {
|
||||||
auto EC = ElementCount::getFixed(NumElts);
|
auto EC = ElementCount::getFixed(NumElts);
|
||||||
|
|
|
@ -4173,14 +4173,18 @@ void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi,
|
||||||
auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
|
auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
|
||||||
auto *Previous = Phi->getIncomingValueForBlock(Latch);
|
auto *Previous = Phi->getIncomingValueForBlock(Latch);
|
||||||
|
|
||||||
|
auto *IdxTy = Builder.getInt32Ty();
|
||||||
|
auto *One = ConstantInt::get(IdxTy, 1);
|
||||||
|
|
||||||
// Create a vector from the initial value.
|
// Create a vector from the initial value.
|
||||||
auto *VectorInit = ScalarInit;
|
auto *VectorInit = ScalarInit;
|
||||||
if (VF.isVector()) {
|
if (VF.isVector()) {
|
||||||
Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
|
Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
|
||||||
assert(!VF.isScalable() && "VF is assumed to be non scalable.");
|
auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
|
||||||
|
auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
|
||||||
VectorInit = Builder.CreateInsertElement(
|
VectorInit = Builder.CreateInsertElement(
|
||||||
PoisonValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit,
|
PoisonValue::get(VectorType::get(VectorInit->getType(), VF)),
|
||||||
Builder.getInt32(VF.getKnownMinValue() - 1), "vector.recur.init");
|
VectorInit, LastIdx, "vector.recur.init");
|
||||||
}
|
}
|
||||||
|
|
||||||
VPValue *PhiDef = State.Plan->getVPValue(Phi);
|
VPValue *PhiDef = State.Plan->getVPValue(Phi);
|
||||||
|
@ -4220,14 +4224,6 @@ void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi,
|
||||||
}
|
}
|
||||||
Builder.SetInsertPoint(&*InsertPt);
|
Builder.SetInsertPoint(&*InsertPt);
|
||||||
|
|
||||||
// We will construct a vector for the recurrence by combining the values for
|
|
||||||
// the current and previous iterations. This is the required shuffle mask.
|
|
||||||
assert(!VF.isScalable());
|
|
||||||
SmallVector<int, 8> ShuffleMask(VF.getKnownMinValue());
|
|
||||||
ShuffleMask[0] = VF.getKnownMinValue() - 1;
|
|
||||||
for (unsigned I = 1; I < VF.getKnownMinValue(); ++I)
|
|
||||||
ShuffleMask[I] = I + VF.getKnownMinValue() - 1;
|
|
||||||
|
|
||||||
// The vector from which to take the initial value for the current iteration
|
// The vector from which to take the initial value for the current iteration
|
||||||
// (actual or unrolled). Initially, this is the vector phi node.
|
// (actual or unrolled). Initially, this is the vector phi node.
|
||||||
Value *Incoming = VecPhi;
|
Value *Incoming = VecPhi;
|
||||||
|
@ -4236,9 +4232,8 @@ void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi,
|
||||||
for (unsigned Part = 0; Part < UF; ++Part) {
|
for (unsigned Part = 0; Part < UF; ++Part) {
|
||||||
Value *PreviousPart = State.get(PreviousDef, Part);
|
Value *PreviousPart = State.get(PreviousDef, Part);
|
||||||
Value *PhiPart = State.get(PhiDef, Part);
|
Value *PhiPart = State.get(PhiDef, Part);
|
||||||
auto *Shuffle =
|
auto *Shuffle = VF.isVector()
|
||||||
VF.isVector()
|
? Builder.CreateVectorSplice(Incoming, PreviousPart, -1)
|
||||||
? Builder.CreateShuffleVector(Incoming, PreviousPart, ShuffleMask)
|
|
||||||
: Incoming;
|
: Incoming;
|
||||||
PhiPart->replaceAllUsesWith(Shuffle);
|
PhiPart->replaceAllUsesWith(Shuffle);
|
||||||
cast<Instruction>(PhiPart)->eraseFromParent();
|
cast<Instruction>(PhiPart)->eraseFromParent();
|
||||||
|
@ -4254,8 +4249,9 @@ void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi,
|
||||||
auto *ExtractForScalar = Incoming;
|
auto *ExtractForScalar = Incoming;
|
||||||
if (VF.isVector()) {
|
if (VF.isVector()) {
|
||||||
Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
|
Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
|
||||||
ExtractForScalar = Builder.CreateExtractElement(
|
auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
|
||||||
ExtractForScalar, Builder.getInt32(VF.getKnownMinValue() - 1),
|
auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
|
||||||
|
ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx,
|
||||||
"vector.recur.extract");
|
"vector.recur.extract");
|
||||||
}
|
}
|
||||||
// Extract the second last element in the middle block if the
|
// Extract the second last element in the middle block if the
|
||||||
|
@ -4264,15 +4260,16 @@ void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi,
|
||||||
// will be the value when jumping to the exit block from the LoopMiddleBlock,
|
// will be the value when jumping to the exit block from the LoopMiddleBlock,
|
||||||
// when the scalar loop is not run at all.
|
// when the scalar loop is not run at all.
|
||||||
Value *ExtractForPhiUsedOutsideLoop = nullptr;
|
Value *ExtractForPhiUsedOutsideLoop = nullptr;
|
||||||
if (VF.isVector())
|
if (VF.isVector()) {
|
||||||
|
auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
|
||||||
|
auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2));
|
||||||
ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
|
ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
|
||||||
Incoming, Builder.getInt32(VF.getKnownMinValue() - 2),
|
Incoming, Idx, "vector.recur.extract.for.phi");
|
||||||
"vector.recur.extract.for.phi");
|
} else if (UF > 1)
|
||||||
// When loop is unrolled without vectorizing, initialize
|
// When loop is unrolled without vectorizing, initialize
|
||||||
// ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of
|
// ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value
|
||||||
// `Incoming`. This is analogous to the vectorized case above: extracting the
|
// of `Incoming`. This is analogous to the vectorized case above: extracting
|
||||||
// second last element when VF > 1.
|
// the second last element when VF > 1.
|
||||||
else if (UF > 1)
|
|
||||||
ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
|
ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
|
||||||
|
|
||||||
// Fix the initial value of the original recurrence in the scalar loop.
|
// Fix the initial value of the original recurrence in the scalar loop.
|
||||||
|
|
|
@ -0,0 +1,104 @@
|
||||||
|
; RUN: opt -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -mtriple aarch64-unknown-linux-gnu -mattr=+sve -S < %s | FileCheck %s --check-prefix=CHECK-VF4UF1
|
||||||
|
; RUN: opt -loop-vectorize -force-vector-width=4 -force-vector-interleave=2 -mtriple aarch64-unknown-linux-gnu -mattr=+sve -S < %s | FileCheck %s --check-prefix=CHECK-VF4UF2
|
||||||
|
|
||||||
|
; We vectorize this first order recurrence, with a set of insertelements for
|
||||||
|
; each unrolled part. Make sure these insertelements are generated in-order,
|
||||||
|
; because the shuffle of the first order recurrence will be added after the
|
||||||
|
; insertelement of the last part UF - 1, assuming the latter appears after the
|
||||||
|
; insertelements of all other parts.
|
||||||
|
;
|
||||||
|
; int PR33613(double *b, double j, int d) {
|
||||||
|
; int a = 0;
|
||||||
|
; for(int i = 0; i < 10240; i++, b+=25) {
|
||||||
|
; double f = b[d]; // Scalarize to form insertelements
|
||||||
|
; if (j * f)
|
||||||
|
; a++;
|
||||||
|
; j = f;
|
||||||
|
; }
|
||||||
|
; return a;
|
||||||
|
; }
|
||||||
|
;
|
||||||
|
define i32 @PR33613(double* %b, double %j, i32 %d) {
|
||||||
|
; CHECK-VF4UF2-LABEL: @PR33613
|
||||||
|
; CHECK-VF4UF2: vector.body
|
||||||
|
; CHECK-VF4UF2: %[[VEC_RECUR:.*]] = phi <vscale x 4 x double> [ {{.*}}, %vector.ph ], [ {{.*}}, %vector.body ]
|
||||||
|
; CHECK-VF4UF2: %[[SPLICE1:.*]] = call <vscale x 4 x double> @llvm.experimental.vector.splice.nxv4f64(<vscale x 4 x double> %[[VEC_RECUR]], <vscale x 4 x double> {{.*}}, i32 -1)
|
||||||
|
; CHECK-VF4UF2-NEXT: %[[SPLICE2:.*]] = call <vscale x 4 x double> @llvm.experimental.vector.splice.nxv4f64(<vscale x 4 x double> %{{.*}}, <vscale x 4 x double> %{{.*}}, i32 -1)
|
||||||
|
; CHECK-VF4UF2-NOT: insertelement <vscale x 4 x double>
|
||||||
|
; CHECK-VF4UF2: middle.block
|
||||||
|
entry:
|
||||||
|
%idxprom = sext i32 %d to i64
|
||||||
|
br label %for.body
|
||||||
|
|
||||||
|
for.cond.cleanup:
|
||||||
|
%a.1.lcssa = phi i32 [ %a.1, %for.body ]
|
||||||
|
ret i32 %a.1.lcssa
|
||||||
|
|
||||||
|
for.body:
|
||||||
|
%b.addr.012 = phi double* [ %b, %entry ], [ %add.ptr, %for.body ]
|
||||||
|
%i.011 = phi i32 [ 0, %entry ], [ %inc1, %for.body ]
|
||||||
|
%a.010 = phi i32 [ 0, %entry ], [ %a.1, %for.body ]
|
||||||
|
%j.addr.09 = phi double [ %j, %entry ], [ %0, %for.body ]
|
||||||
|
%arrayidx = getelementptr inbounds double, double* %b.addr.012, i64 %idxprom
|
||||||
|
%0 = load double, double* %arrayidx, align 8
|
||||||
|
%mul = fmul double %j.addr.09, %0
|
||||||
|
%tobool = fcmp une double %mul, 0.000000e+00
|
||||||
|
%inc = zext i1 %tobool to i32
|
||||||
|
%a.1 = add nsw i32 %a.010, %inc
|
||||||
|
%inc1 = add nuw nsw i32 %i.011, 1
|
||||||
|
%add.ptr = getelementptr inbounds double, double* %b.addr.012, i64 25
|
||||||
|
%exitcond = icmp eq i32 %inc1, 10240
|
||||||
|
br i1 %exitcond, label %for.cond.cleanup, label %for.body, !llvm.loop !0
|
||||||
|
}
|
||||||
|
|
||||||
|
; PR34711: given three consecutive instructions such that the first will be
|
||||||
|
; widened, the second is a cast that will be widened and needs to sink after the
|
||||||
|
; third, and the third is a first-order-recurring load that will be replicated
|
||||||
|
; instead of widened. Although the cast and the first instruction will both be
|
||||||
|
; widened, and are originally adjacent to each other, make sure the replicated
|
||||||
|
; load ends up appearing between them.
|
||||||
|
;
|
||||||
|
; void PR34711(short[2] *a, int *b, int *c, int n) {
|
||||||
|
; for(int i = 0; i < n; i++) {
|
||||||
|
; c[i] = 7;
|
||||||
|
; b[i] = (a[i][0] * a[i][1]);
|
||||||
|
; }
|
||||||
|
; }
|
||||||
|
;
|
||||||
|
; Check that the sext sank after the load in the vector loop.
|
||||||
|
define void @PR34711([2 x i16]* %a, i32* %b, i32* %c, i64 %n) {
|
||||||
|
; CHECK-VF4UF1-LABEL: @PR34711
|
||||||
|
; CHECK-VF4UF1: vector.body
|
||||||
|
; CHECK-VF4UF1: %[[VEC_RECUR:.*]] = phi <vscale x 4 x i16> [ %vector.recur.init, %vector.ph ], [ %[[MGATHER:.*]], %vector.body ]
|
||||||
|
; CHECK-VF4UF1: %[[MGATHER]] = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0i16(<vscale x 4 x i16*> {{.*}}, i32 2, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> undef, i1 true, i32 0), <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i16> undef)
|
||||||
|
; CHECK-VF4UF1-NEXT: %[[SPLICE:.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.splice.nxv4i16(<vscale x 4 x i16> %[[VEC_RECUR]], <vscale x 4 x i16> %[[MGATHER]], i32 -1)
|
||||||
|
; CHECK-VF4UF1-NEXT: %[[SXT1:.*]] = sext <vscale x 4 x i16> %[[SPLICE]] to <vscale x 4 x i32>
|
||||||
|
; CHECK-VF4UF1-NEXT: %[[SXT2:.*]] = sext <vscale x 4 x i16> %[[MGATHER]] to <vscale x 4 x i32>
|
||||||
|
; CHECK-VF4UF1-NEXT: mul nsw <vscale x 4 x i32> %[[SXT2]], %[[SXT1]]
|
||||||
|
entry:
|
||||||
|
%pre.index = getelementptr inbounds [2 x i16], [2 x i16]* %a, i64 0, i64 0
|
||||||
|
%.pre = load i16, i16* %pre.index
|
||||||
|
br label %for.body
|
||||||
|
|
||||||
|
for.body:
|
||||||
|
%0 = phi i16 [ %.pre, %entry ], [ %1, %for.body ]
|
||||||
|
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
|
||||||
|
%arraycidx = getelementptr inbounds i32, i32* %c, i64 %indvars.iv
|
||||||
|
%cur.index = getelementptr inbounds [2 x i16], [2 x i16]* %a, i64 %indvars.iv, i64 1
|
||||||
|
store i32 7, i32* %arraycidx ; 1st instruction, to be widened.
|
||||||
|
%conv = sext i16 %0 to i32 ; 2nd, cast to sink after third.
|
||||||
|
%1 = load i16, i16* %cur.index ; 3rd, first-order-recurring load not widened.
|
||||||
|
%conv3 = sext i16 %1 to i32
|
||||||
|
%mul = mul nsw i32 %conv3, %conv
|
||||||
|
%arrayidx5 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
|
||||||
|
store i32 %mul, i32* %arrayidx5
|
||||||
|
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
||||||
|
%exitcond = icmp eq i64 %indvars.iv.next, %n
|
||||||
|
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0
|
||||||
|
|
||||||
|
for.end:
|
||||||
|
ret void
|
||||||
|
}
|
||||||
|
|
||||||
|
!0 = distinct !{!0, !1}
|
||||||
|
!1 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
|
|
@ -0,0 +1,274 @@
|
||||||
|
; RUN: opt -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -force-target-supports-scalable-vectors=true -S < %s | FileCheck %s --check-prefix=CHECK-VF4UF1
|
||||||
|
; RUN: opt -loop-vectorize -force-vector-width=4 -force-vector-interleave=2 -force-target-supports-scalable-vectors=true -S < %s | FileCheck %s --check-prefix=CHECK-VF4UF2
|
||||||
|
|
||||||
|
; void recurrence_1(int *a, int *b, int n) {
|
||||||
|
; for(int i = 0; i < n; i++)
|
||||||
|
; b[i] = a[i] + a[i - 1]
|
||||||
|
; }
|
||||||
|
;
|
||||||
|
define void @recurrence_1(i32* nocapture readonly %a, i32* nocapture %b, i32 %n) {
|
||||||
|
; CHECK-VF4UF1-LABEL: @recurrence_1
|
||||||
|
; CHECK-VF4UF1: for.preheader
|
||||||
|
; CHECK-VF4UF1: %[[SUB_1:.*]] = add i32 %n, -1
|
||||||
|
; CHECK-VF4UF1: %[[ZEXT:.*]] = zext i32 %[[SUB_1]] to i64
|
||||||
|
; CHECK-VF4UF1: %[[ADD:.*]] = add nuw nsw i64 %[[ZEXT]], 1
|
||||||
|
; CHECK-VF4UF1: vector.ph:
|
||||||
|
; CHECK-VF4UF1: %[[VSCALE1:.*]] = call i32 @llvm.vscale.i32()
|
||||||
|
; CHECK-VF4UF1: %[[MUL1:.*]] = mul i32 %[[VSCALE1]], 4
|
||||||
|
; CHECK-VF4UF1: %[[SUB1:.*]] = sub i32 %[[MUL1]], 1
|
||||||
|
; CHECK-VF4UF1: %[[VEC_RECUR_INIT:.*]] = insertelement <vscale x 4 x i32> poison, i32 %pre_load, i32 %[[SUB1]]
|
||||||
|
; CHECK-VF4UF1: vector.body:
|
||||||
|
; CHECK-VF4UF1: %[[INDEX:.*]] = phi i64 [ 0, %vector.ph ], [ %[[NEXT_IDX:.*]], %vector.body ]
|
||||||
|
; CHECK-VF4UF1: %[[VEC_RECUR:.*]] = phi <vscale x 4 x i32> [ %[[VEC_RECUR_INIT]], %vector.ph ], [ %[[LOAD:.*]], %vector.body ]
|
||||||
|
; CHECK-VF4UF1: %[[LOAD]] = load <vscale x 4 x i32>, <vscale x 4 x i32>*
|
||||||
|
; CHECK-VF4UF1: %[[SPLICE:.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.splice.nxv4i32(<vscale x 4 x i32> %[[VEC_RECUR]], <vscale x 4 x i32> %[[LOAD]], i32 -1)
|
||||||
|
; CHECK-VF4UF1: middle.block:
|
||||||
|
; CHECK-VF4UF1: %[[VSCALE2:.*]] = call i32 @llvm.vscale.i32()
|
||||||
|
; CHECK-VF4UF1: %[[MUL2:.*]] = mul i32 %[[VSCALE2]], 4
|
||||||
|
; CHECK-VF4UF1: %[[SUB2:.*]] = sub i32 %[[MUL2]], 1
|
||||||
|
; CHECK-VF4UF1: %[[VEC_RECUR_EXT:.*]] = extractelement <vscale x 4 x i32> %[[LOAD]], i32 %[[SUB2]]
|
||||||
|
; CHECK-VF4UF1: %[[VSCALE3:.*]] = call i32 @llvm.vscale.i32()
|
||||||
|
; CHECK-VF4UF1: %[[MUL3:.*]] = mul i32 %[[VSCALE3]], 4
|
||||||
|
; CHECK-VF4UF1: %[[SUB3:.*]] = sub i32 %[[MUL3]], 2
|
||||||
|
; CHECK-VF4UF1: %[[VEC_RECUR_FOR_PHI:.*]] = extractelement <vscale x 4 x i32> %[[LOAD]], i32 %[[SUB3]]
|
||||||
|
entry:
|
||||||
|
br label %for.preheader
|
||||||
|
|
||||||
|
for.preheader:
|
||||||
|
%arrayidx.phi.trans.insert = getelementptr inbounds i32, i32* %a, i64 0
|
||||||
|
%pre_load = load i32, i32* %arrayidx.phi.trans.insert
|
||||||
|
br label %scalar.body
|
||||||
|
|
||||||
|
scalar.body:
|
||||||
|
%0 = phi i32 [ %pre_load, %for.preheader ], [ %1, %scalar.body ]
|
||||||
|
%indvars.iv = phi i64 [ 0, %for.preheader ], [ %indvars.iv.next, %scalar.body ]
|
||||||
|
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
||||||
|
%arrayidx32 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
|
||||||
|
%1 = load i32, i32* %arrayidx32
|
||||||
|
%arrayidx34 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
|
||||||
|
%add35 = add i32 %1, %0
|
||||||
|
store i32 %add35, i32* %arrayidx34
|
||||||
|
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
|
||||||
|
%exitcond = icmp eq i32 %lftr.wideiv, %n
|
||||||
|
br i1 %exitcond, label %for.exit, label %scalar.body, !llvm.loop !0
|
||||||
|
|
||||||
|
for.exit:
|
||||||
|
ret void
|
||||||
|
}
|
||||||
|
|
||||||
|
; int recurrence_2(int *a, int n) {
|
||||||
|
; int minmax;
|
||||||
|
; for (int i = 0; i < n; ++i)
|
||||||
|
; minmax = min(minmax, max(a[i] - a[i-1], 0));
|
||||||
|
; return minmax;
|
||||||
|
; }
|
||||||
|
;
|
||||||
|
define i32 @recurrence_2(i32* nocapture readonly %a, i32 %n) {
|
||||||
|
; CHECK-VF4UF1-LABEL: @recurrence_2
|
||||||
|
; CHECK-VF4UF1: vector.ph:
|
||||||
|
; CHECK-VF4UF1: %[[VSCALE1:.*]] = call i32 @llvm.vscale.i32()
|
||||||
|
; CHECK-VF4UF1: %[[MUL1:.*]] = mul i32 %[[VSCALE1]], 4
|
||||||
|
; CHECK-VF4UF1: %[[SUB1:.*]] = sub i32 %[[MUL1]], 1
|
||||||
|
; CHECK-VF4UF1: %[[VEC_RECUR_INIT:.*]] = insertelement <vscale x 4 x i32> poison, i32 %.pre, i32 %[[SUB1]]
|
||||||
|
; CHECK-VF4UF1: vector.body:
|
||||||
|
; CHECK-VF4UF1: %[[VEC_RECUR:.*]] = phi <vscale x 4 x i32> [ %[[VEC_RECUR_INIT]], %vector.ph ], [ %[[LOAD:.*]], %vector.body ]
|
||||||
|
; CHECK-VF4UF1: %[[LOAD]] = load <vscale x 4 x i32>, <vscale x 4 x i32>*
|
||||||
|
; CHECK-VF4UF1: %[[REVERSE:.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.splice.nxv4i32(<vscale x 4 x i32> %[[VEC_RECUR]], <vscale x 4 x i32> %[[LOAD]], i32 -1)
|
||||||
|
; CHECK-VF4UF1: middle.block:
|
||||||
|
; CHECK-VF4UF1: %[[VSCALE2:.*]] = call i32 @llvm.vscale.i32()
|
||||||
|
; CHECK-VF4UF1: %[[MUL2:.*]] = mul i32 %[[VSCALE2]], 4
|
||||||
|
; CHECK-VF4UF1: %[[SUB2:.*]] = sub i32 %[[MUL2]], 1
|
||||||
|
; CHECK-VF4UF1: %[[VEC_RECUR_EXT:.*]] = extractelement <vscale x 4 x i32> %[[LOAD]], i32 %[[SUB2]]
|
||||||
|
entry:
|
||||||
|
%cmp27 = icmp sgt i32 %n, 0
|
||||||
|
br i1 %cmp27, label %for.preheader, label %for.cond.cleanup
|
||||||
|
|
||||||
|
for.preheader:
|
||||||
|
%arrayidx2.phi.trans.insert = getelementptr inbounds i32, i32* %a, i64 -1
|
||||||
|
%.pre = load i32, i32* %arrayidx2.phi.trans.insert, align 4
|
||||||
|
br label %scalar.body
|
||||||
|
|
||||||
|
for.cond.cleanup.loopexit:
|
||||||
|
%minmax.0.cond.lcssa = phi i32 [ %minmax.0.cond, %scalar.body ]
|
||||||
|
br label %for.cond.cleanup
|
||||||
|
|
||||||
|
for.cond.cleanup:
|
||||||
|
%minmax.0.lcssa = phi i32 [ undef, %entry ], [ %minmax.0.cond.lcssa, %for.cond.cleanup.loopexit ]
|
||||||
|
ret i32 %minmax.0.lcssa
|
||||||
|
|
||||||
|
scalar.body:
|
||||||
|
%0 = phi i32 [ %.pre, %for.preheader ], [ %1, %scalar.body ]
|
||||||
|
%indvars.iv = phi i64 [ 0, %for.preheader ], [ %indvars.iv.next, %scalar.body ]
|
||||||
|
%minmax.028 = phi i32 [ undef, %for.preheader ], [ %minmax.0.cond, %scalar.body ]
|
||||||
|
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
|
||||||
|
%1 = load i32, i32* %arrayidx, align 4
|
||||||
|
%sub3 = sub nsw i32 %1, %0
|
||||||
|
%cmp4 = icmp sgt i32 %sub3, 0
|
||||||
|
%cond = select i1 %cmp4, i32 %sub3, i32 0
|
||||||
|
%cmp5 = icmp slt i32 %minmax.028, %cond
|
||||||
|
%minmax.0.cond = select i1 %cmp5, i32 %minmax.028, i32 %cond
|
||||||
|
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
||||||
|
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
|
||||||
|
%exitcond = icmp eq i32 %lftr.wideiv, %n
|
||||||
|
br i1 %exitcond, label %for.cond.cleanup.loopexit, label %scalar.body, !llvm.loop !0
|
||||||
|
}
|
||||||
|
|
||||||
|
define void @recurrence_3(i16* nocapture readonly %a, double* nocapture %b, i32 %n, float %f, i16 %p) {
|
||||||
|
; CHECK-VF4UF1: vector.ph:
|
||||||
|
; CHECK-VF4UF1: %[[VSCALE1:.*]] = call i32 @llvm.vscale.i32()
|
||||||
|
; CHECK-VF4UF1: %[[MUL1:.*]] = mul i32 %[[VSCALE1]], 4
|
||||||
|
; CHECK-VF4UF1: %[[SUB1:.*]] = sub i32 %[[MUL1]], 1
|
||||||
|
; CHECK-VF4UF1: %vector.recur.init = insertelement <vscale x 4 x i16> poison, i16 %0, i32 %[[SUB1]]
|
||||||
|
; CHECK-VF4UF1: vector.body:
|
||||||
|
; CHECK-VF4UF1: %vector.recur = phi <vscale x 4 x i16> [ %vector.recur.init, %vector.ph ], [ %[[L1:.*]], %vector.body ]
|
||||||
|
; CHECK-VF4UF1: %[[L1]] = load <vscale x 4 x i16>, <vscale x 4 x i16>*
|
||||||
|
; CHECK-VF4UF1: %[[SPLICE:.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.splice.nxv4i16(<vscale x 4 x i16> %vector.recur, <vscale x 4 x i16> %[[L1]], i32 -1)
|
||||||
|
; Check also that the casts were not moved needlessly.
|
||||||
|
; CHECK-VF4UF1: sitofp <vscale x 4 x i16> %[[L1]] to <vscale x 4 x double>
|
||||||
|
; CHECK-VF4UF1: sitofp <vscale x 4 x i16> %[[SPLICE]] to <vscale x 4 x double>
|
||||||
|
; CHECK-VF4UF1: middle.block:
|
||||||
|
; CHECK-VF4UF1: %[[VSCALE2:.*]] = call i32 @llvm.vscale.i32()
|
||||||
|
; CHECK-VF4UF1: %[[MUL2:.*]] = mul i32 %[[VSCALE2]], 4
|
||||||
|
; CHECK-VF4UF1: %[[SUB2:.*]] = sub i32 %[[MUL2]], 1
|
||||||
|
; CHECK-VF4UF1: %vector.recur.extract = extractelement <vscale x 4 x i16> %[[L1]], i32 %[[SUB2]]
|
||||||
|
entry:
|
||||||
|
%0 = load i16, i16* %a, align 2
|
||||||
|
%conv = sitofp i16 %0 to double
|
||||||
|
%conv1 = fpext float %f to double
|
||||||
|
%conv2 = sitofp i16 %p to double
|
||||||
|
%mul = fmul fast double %conv2, %conv1
|
||||||
|
%sub = fsub fast double %conv, %mul
|
||||||
|
store double %sub, double* %b, align 8
|
||||||
|
%cmp25 = icmp sgt i32 %n, 1
|
||||||
|
br i1 %cmp25, label %for.preheader, label %for.end
|
||||||
|
|
||||||
|
for.preheader:
|
||||||
|
br label %scalar.body
|
||||||
|
|
||||||
|
scalar.body:
|
||||||
|
%1 = phi i16 [ %0, %for.preheader ], [ %2, %scalar.body ]
|
||||||
|
%iv = phi i64 [ %iv.next, %scalar.body ], [ 1, %for.preheader ]
|
||||||
|
%arrayidx5 = getelementptr inbounds i16, i16* %a, i64 %iv
|
||||||
|
%2 = load i16, i16* %arrayidx5, align 2
|
||||||
|
%conv6 = sitofp i16 %2 to double
|
||||||
|
%conv11 = sitofp i16 %1 to double
|
||||||
|
%mul12 = fmul fast double %conv11, %conv1
|
||||||
|
%sub13 = fsub fast double %conv6, %mul12
|
||||||
|
%arrayidx15 = getelementptr inbounds double, double* %b, i64 %iv
|
||||||
|
store double %sub13, double* %arrayidx15, align 8
|
||||||
|
%iv.next = add nuw nsw i64 %iv, 1
|
||||||
|
%lftr.wideiv = trunc i64 %iv.next to i32
|
||||||
|
%exitcond = icmp eq i32 %lftr.wideiv, %n
|
||||||
|
br i1 %exitcond, label %for.end.loopexit, label %scalar.body, !llvm.loop !0
|
||||||
|
|
||||||
|
for.end.loopexit:
|
||||||
|
br label %for.end
|
||||||
|
|
||||||
|
for.end:
|
||||||
|
ret void
|
||||||
|
}
|
||||||
|
|
||||||
|
define void @constant_folded_previous_value() {
|
||||||
|
; CHECK-VF4UF2-LABEL: @constant_folded_previous_value
|
||||||
|
; CHECK-VF4UF2: vector.body
|
||||||
|
; CHECK-VF4UF2: %[[VECTOR_RECUR:.*]] = phi <vscale x 4 x i64> [ %vector.recur.init, %vector.ph ], [ shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> undef, i64 1, i32 0), <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer), %vector.body ]
|
||||||
|
; CHECK-VF4UF2-NEXT: %[[SPLICE1:.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.splice.nxv4i64(<vscale x 4 x i64> %vector.recur, <vscale x 4 x i64> shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> undef, i64 1, i32 0), <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer), i32 -1)
|
||||||
|
; CHECK-VF4UF2: %[[SPLICE2:.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.splice.nxv4i64(<vscale x 4 x i64> shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> undef, i64 1, i32 0), <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i64> shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> undef, i64 1, i32 0), <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer), i32 -1)
|
||||||
|
entry:
|
||||||
|
br label %scalar.body
|
||||||
|
|
||||||
|
scalar.body:
|
||||||
|
%i = phi i64 [ 0, %entry ], [ %i.next, %scalar.body ]
|
||||||
|
%tmp2 = phi i64 [ 0, %entry ], [ %tmp3, %scalar.body ]
|
||||||
|
%tmp3 = add i64 0, 1
|
||||||
|
%i.next = add nuw nsw i64 %i, 1
|
||||||
|
%cond = icmp eq i64 %i.next, undef
|
||||||
|
br i1 %cond, label %for.end, label %scalar.body, !llvm.loop !0
|
||||||
|
|
||||||
|
for.end:
|
||||||
|
ret void
|
||||||
|
}
|
||||||
|
|
||||||
|
; We vectorize this first order recurrence, by generating two
|
||||||
|
; extracts for the phi `val.phi` - one at the last index and
|
||||||
|
; another at the second last index. We need these 2 extracts because
|
||||||
|
; the first order recurrence phi is used outside the loop, so we require the phi
|
||||||
|
; itself and not its update (addx).
|
||||||
|
define i32 @extract_second_last_iteration(i32* %cval, i32 %x) {
|
||||||
|
; CHECK-VF4UF2-LABEL: @extract_second_last_iteration
|
||||||
|
; CHECK-VF4UF2: vector.ph
|
||||||
|
; CHECK-VF4UF2: %[[SPLAT_INS1:.*]] = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0
|
||||||
|
; CHECK-VF4UF2: %[[SPLAT1:.*]] = shufflevector <vscale x 4 x i32> %[[SPLAT_INS1]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
|
||||||
|
; CHECK-VF4UF2: %[[SPLAT_INS2:.*]] = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0
|
||||||
|
; CHECK-VF4UF2: %[[SPLAT2:.*]] = shufflevector <vscale x 4 x i32> %[[SPLAT_INS2]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
|
||||||
|
; CHECK-VF4UF2: %[[VSCALE1:.*]] = call i32 @llvm.vscale.i32()
|
||||||
|
; CHECK-VF4UF2: %[[MUL1:.*]] = mul i32 %[[VSCALE1]], 4
|
||||||
|
; CHECK-VF4UF2: %[[SUB1:.*]] = sub i32 %[[MUL1]], 1
|
||||||
|
; CHECK-VF4UF2: %[[VEC_RECUR_INIT:.*]] = insertelement <vscale x 4 x i32> poison, i32 0, i32 %[[SUB1]]
|
||||||
|
; CHECK-VF4UF2: vector.body
|
||||||
|
; CHECK-VF4UF2: %[[VEC_RECUR:.*]] = phi <vscale x 4 x i32> [ %[[VEC_RECUR_INIT]], %vector.ph ], [ %[[ADD2:.*]], %vector.body ]
|
||||||
|
; CHECK-VF4UF2: %[[ADD1:.*]] = add <vscale x 4 x i32> %{{.*}}, %[[SPLAT1]]
|
||||||
|
; CHECK-VF4UF2: %[[ADD2]] = add <vscale x 4 x i32> %{{.*}}, %[[SPLAT2]]
|
||||||
|
; CHECK-VF4UF2: middle.block
|
||||||
|
; CHECK-VF4UF2: %[[VSCALE2:.*]] = call i32 @llvm.vscale.i32()
|
||||||
|
; CHECK-VF4UF2: %[[MUL2:.*]] = mul i32 %[[VSCALE2]], 4
|
||||||
|
; CHECK-VF4UF2: %[[SUB2:.*]] = sub i32 %[[MUL2]], 1
|
||||||
|
; CHECK-VF4UF2: %vector.recur.extract = extractelement <vscale x 4 x i32> %[[ADD2]], i32 %[[SUB2]]
|
||||||
|
; CHECK-VF4UF2: %[[VSCALE3:.*]] = call i32 @llvm.vscale.i32()
|
||||||
|
; CHECK-VF4UF2: %[[MUL3:.*]] = mul i32 %[[VSCALE3]], 4
|
||||||
|
; CHECK-VF4UF2: %[[SUB3:.*]] = sub i32 %[[MUL3]], 2
|
||||||
|
; CHECK-VF4UF2: %vector.recur.extract.for.phi = extractelement <vscale x 4 x i32> %[[ADD2]], i32 %[[SUB3]]
|
||||||
|
entry:
|
||||||
|
br label %for.body
|
||||||
|
|
||||||
|
for.body:
|
||||||
|
%inc.phi = phi i32 [ 0, %entry ], [ %inc, %for.body ]
|
||||||
|
%val.phi = phi i32 [ 0, %entry ], [ %addx, %for.body ]
|
||||||
|
%inc = add i32 %inc.phi, 1
|
||||||
|
%bc = zext i32 %inc.phi to i64
|
||||||
|
%addx = add i32 %inc.phi, %x
|
||||||
|
%cmp = icmp eq i32 %inc.phi, 95
|
||||||
|
br i1 %cmp, label %for.end, label %for.body, !llvm.loop !0
|
||||||
|
|
||||||
|
for.end:
|
||||||
|
ret i32 %val.phi
|
||||||
|
}
|
||||||
|
|
||||||
|
; void sink_after(short *a, int n, int *b) {
|
||||||
|
; for(int i = 0; i < n; i++)
|
||||||
|
; b[i] = (a[i] * a[i + 1]);
|
||||||
|
; }
|
||||||
|
|
||||||
|
; Check that the sext sank after the load in the vector loop.
|
||||||
|
define void @sink_after(i16* %a, i32* %b, i64 %n) {
|
||||||
|
; CHECK-VF4UF1-LABEL: @sink_after
|
||||||
|
; CHECK-VF4UF1: vector.body
|
||||||
|
; CHECK-VF4UF1: %[[VEC_RECUR:.*]] = phi <vscale x 4 x i16> [ %vector.recur.init, %vector.ph ], [ %[[LOAD:.*]], %vector.body ]
|
||||||
|
; CHECK-VF4UF1: %[[LOAD]] = load <vscale x 4 x i16>, <vscale x 4 x i16>*
|
||||||
|
; CHECK-VF4UF1-NEXT: %[[SPLICE:.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.splice.nxv4i16(<vscale x 4 x i16> %[[VEC_RECUR]], <vscale x 4 x i16> %[[LOAD]], i32 -1)
|
||||||
|
; CHECK-VF4UF1-NEXT: sext <vscale x 4 x i16> %[[SPLICE]] to <vscale x 4 x i32>
|
||||||
|
; CHECK-VF4UF1-NEXT: sext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
|
||||||
|
entry:
|
||||||
|
%.pre = load i16, i16* %a
|
||||||
|
br label %for.body
|
||||||
|
|
||||||
|
for.body:
|
||||||
|
%0 = phi i16 [ %.pre, %entry ], [ %1, %for.body ]
|
||||||
|
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
|
||||||
|
%conv = sext i16 %0 to i32
|
||||||
|
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
||||||
|
%arrayidx2 = getelementptr inbounds i16, i16* %a, i64 %indvars.iv.next
|
||||||
|
%1 = load i16, i16* %arrayidx2
|
||||||
|
%conv3 = sext i16 %1 to i32
|
||||||
|
%mul = mul nsw i32 %conv3, %conv
|
||||||
|
%arrayidx5 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
|
||||||
|
store i32 %mul, i32* %arrayidx5
|
||||||
|
%exitcond = icmp eq i64 %indvars.iv.next, %n
|
||||||
|
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0
|
||||||
|
|
||||||
|
for.end:
|
||||||
|
ret void
|
||||||
|
}
|
||||||
|
|
||||||
|
!0 = distinct !{!0, !1}
|
||||||
|
!1 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
|
Loading…
Reference in New Issue