forked from OSchip/llvm-project
[SVE][LoopVectorize] Add masked load/store and gather/scatter support for SVE
This patch updates IRBuilder::CreateMaskedGather/Scatter to work with ScalableVectorType and adds isLegalMaskedGather/Scatter functions to AArch64TargetTransformInfo. In addition I've fixed up isLegalMaskedLoad/Store to return true for supported scalar types, since this is what the vectorizer asks for. In LoopVectorize.cpp I've changed LoopVectorizationCostModel::getInterleaveGroupCost to return an invalid cost for scalable vectors, since currently this relies upon using shuffle vector for reversing vectors. In addition, in LoopVectorizationCostModel::setCostBasedWideningDecision I have assumed that the cost of scalarising memory ops is infinitely expensive. I have added some simple masked load/store and gather/scatter tests, including cases where we use gathers and scatters for conditional invariant loads and stores. Differential Revision: https://reviews.llvm.org/D95350
This commit is contained in:
parent
679ef22f2e
commit
d4d4ceeb8f
|
@ -522,14 +522,14 @@ CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id,
|
|||
CallInst *IRBuilderBase::CreateMaskedGather(Value *Ptrs, Align Alignment,
|
||||
Value *Mask, Value *PassThru,
|
||||
const Twine &Name) {
|
||||
auto *PtrsTy = cast<FixedVectorType>(Ptrs->getType());
|
||||
auto *PtrsTy = cast<VectorType>(Ptrs->getType());
|
||||
auto *PtrTy = cast<PointerType>(PtrsTy->getElementType());
|
||||
unsigned NumElts = PtrsTy->getNumElements();
|
||||
auto *DataTy = FixedVectorType::get(PtrTy->getElementType(), NumElts);
|
||||
ElementCount NumElts = PtrsTy->getElementCount();
|
||||
auto *DataTy = VectorType::get(PtrTy->getElementType(), NumElts);
|
||||
|
||||
if (!Mask)
|
||||
Mask = Constant::getAllOnesValue(
|
||||
FixedVectorType::get(Type::getInt1Ty(Context), NumElts));
|
||||
VectorType::get(Type::getInt1Ty(Context), NumElts));
|
||||
|
||||
if (!PassThru)
|
||||
PassThru = UndefValue::get(DataTy);
|
||||
|
@ -552,20 +552,20 @@ CallInst *IRBuilderBase::CreateMaskedGather(Value *Ptrs, Align Alignment,
|
|||
/// be accessed in memory
|
||||
CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs,
|
||||
Align Alignment, Value *Mask) {
|
||||
auto *PtrsTy = cast<FixedVectorType>(Ptrs->getType());
|
||||
auto *DataTy = cast<FixedVectorType>(Data->getType());
|
||||
unsigned NumElts = PtrsTy->getNumElements();
|
||||
auto *PtrsTy = cast<VectorType>(Ptrs->getType());
|
||||
auto *DataTy = cast<VectorType>(Data->getType());
|
||||
ElementCount NumElts = PtrsTy->getElementCount();
|
||||
|
||||
#ifndef NDEBUG
|
||||
auto PtrTy = cast<PointerType>(PtrsTy->getElementType());
|
||||
assert(NumElts == DataTy->getNumElements() &&
|
||||
assert(NumElts == DataTy->getElementCount() &&
|
||||
PtrTy->getElementType() == DataTy->getElementType() &&
|
||||
"Incompatible pointer and data types");
|
||||
#endif
|
||||
|
||||
if (!Mask)
|
||||
Mask = Constant::getAllOnesValue(
|
||||
FixedVectorType::get(Type::getInt1Ty(Context), NumElts));
|
||||
VectorType::get(Type::getInt1Ty(Context), NumElts));
|
||||
|
||||
Type *OverloadedTypes[] = {DataTy, PtrsTy};
|
||||
Value *Ops[] = {Data, Ptrs, getInt32(Alignment.value()), Mask};
|
||||
|
|
|
@ -186,11 +186,7 @@ public:
|
|||
|
||||
bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info);
|
||||
|
||||
bool isLegalMaskedLoadStore(Type *DataType, Align Alignment) {
|
||||
if (!isa<ScalableVectorType>(DataType) || !ST->hasSVE())
|
||||
return false;
|
||||
|
||||
Type *Ty = cast<ScalableVectorType>(DataType)->getElementType();
|
||||
bool isLegalScalarTypeForSVEMaskedMemOp(Type *Ty) const {
|
||||
if (Ty->isPointerTy())
|
||||
return true;
|
||||
|
||||
|
@ -205,6 +201,13 @@ public:
|
|||
return false;
|
||||
}
|
||||
|
||||
bool isLegalMaskedLoadStore(Type *DataType, Align Alignment) {
|
||||
if (isa<FixedVectorType>(DataType) || !ST->hasSVE())
|
||||
return false;
|
||||
|
||||
return isLegalScalarTypeForSVEMaskedMemOp(DataType->getScalarType());
|
||||
}
|
||||
|
||||
bool isLegalMaskedLoad(Type *DataType, Align Alignment) {
|
||||
return isLegalMaskedLoadStore(DataType, Alignment);
|
||||
}
|
||||
|
@ -213,6 +216,20 @@ public:
|
|||
return isLegalMaskedLoadStore(DataType, Alignment);
|
||||
}
|
||||
|
||||
bool isLegalMaskedGatherScatter(Type *DataType) const {
|
||||
if (isa<FixedVectorType>(DataType) || !ST->hasSVE())
|
||||
return false;
|
||||
|
||||
return isLegalScalarTypeForSVEMaskedMemOp(DataType->getScalarType());
|
||||
}
|
||||
|
||||
bool isLegalMaskedGather(Type *DataType, Align Alignment) const {
|
||||
return isLegalMaskedGatherScatter(DataType);
|
||||
}
|
||||
bool isLegalMaskedScatter(Type *DataType, Align Alignment) const {
|
||||
return isLegalMaskedGatherScatter(DataType);
|
||||
}
|
||||
|
||||
bool isLegalNTStore(Type *DataType, Align Alignment) {
|
||||
// NOTE: The logic below is mostly geared towards LV, which calls it with
|
||||
// vectors with 2 elements. We might want to improve that, if other
|
||||
|
|
|
@ -6792,6 +6792,11 @@ LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
|
|||
InstructionCost
|
||||
LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
|
||||
ElementCount VF) {
|
||||
// TODO: Once we have support for interleaving with scalable vectors
|
||||
// we can calculate the cost properly here.
|
||||
if (VF.isScalable())
|
||||
return InstructionCost::getInvalid();
|
||||
|
||||
Type *ValTy = getMemInstValueType(I);
|
||||
auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
|
||||
unsigned AS = getLoadStoreAddressSpace(I);
|
||||
|
@ -6800,7 +6805,6 @@ LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
|
|||
assert(Group && "Fail to get an interleaved access group.");
|
||||
|
||||
unsigned InterleaveFactor = Group->getFactor();
|
||||
assert(!VF.isScalable() && "scalable vectors not yet supported.");
|
||||
auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
|
||||
|
||||
// Holds the indices of existing members in an interleaved load group.
|
||||
|
@ -7064,7 +7068,7 @@ void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
|
|||
}
|
||||
|
||||
// Choose between Interleaving, Gather/Scatter or Scalarization.
|
||||
InstructionCost InterleaveCost = std::numeric_limits<int>::max();
|
||||
InstructionCost InterleaveCost = InstructionCost::getInvalid();
|
||||
unsigned NumAccesses = 1;
|
||||
if (isAccessInterleaved(&I)) {
|
||||
auto Group = getInterleavedAccessGroup(&I);
|
||||
|
@ -7082,10 +7086,11 @@ void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
|
|||
InstructionCost GatherScatterCost =
|
||||
isLegalGatherOrScatter(&I)
|
||||
? getGatherScatterCost(&I, VF) * NumAccesses
|
||||
: std::numeric_limits<int>::max();
|
||||
: InstructionCost::getInvalid();
|
||||
|
||||
InstructionCost ScalarizationCost =
|
||||
getMemInstScalarizationCost(&I, VF) * NumAccesses;
|
||||
!VF.isScalable() ? getMemInstScalarizationCost(&I, VF) * NumAccesses
|
||||
: InstructionCost::getInvalid();
|
||||
|
||||
// Choose better solution for the current VF,
|
||||
// write down this decision and use it during vectorization.
|
||||
|
@ -7099,6 +7104,8 @@ void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
|
|||
Decision = CM_GatherScatter;
|
||||
Cost = GatherScatterCost;
|
||||
} else {
|
||||
assert(!VF.isScalable() &&
|
||||
"We cannot yet scalarise for scalable vectors");
|
||||
Decision = CM_Scalarize;
|
||||
Cost = ScalarizationCost;
|
||||
}
|
||||
|
@ -7448,8 +7455,12 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
|
|||
}
|
||||
}
|
||||
|
||||
assert(!VF.isScalable() && "VF is assumed to be non scalable");
|
||||
unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1;
|
||||
unsigned N;
|
||||
if (isScalarAfterVectorization(I, VF)) {
|
||||
assert(!VF.isScalable() && "VF is assumed to be non scalable");
|
||||
N = VF.getKnownMinValue();
|
||||
} else
|
||||
N = 1;
|
||||
return N *
|
||||
TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,130 @@
|
|||
; RUN: opt -loop-vectorize -dce -instcombine -mtriple aarch64-linux-gnu -mattr=+sve -S %s -o - | FileCheck %s
|
||||
|
||||
define void @gather_nxv4i32_ind64(float* noalias nocapture readonly %a, i64* noalias nocapture readonly %b, float* noalias nocapture %c, i64 %n) {
|
||||
; CHECK-LABEL: @gather_nxv4i32_ind64
|
||||
; CHECK: vector.body:
|
||||
; CHECK: %[[IND:.*]] = load <vscale x 4 x i64>, <vscale x 4 x i64>*
|
||||
; CHECK: %[[PTRS:.*]] = getelementptr inbounds float, float* %a, <vscale x 4 x i64> %[[IND]]
|
||||
; CHECK: %[[GLOAD:.*]] = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0f32(<vscale x 4 x float*> %[[PTRS]]
|
||||
; CHECK: store <vscale x 4 x float> %[[GLOAD]], <vscale x 4 x float>*
|
||||
entry:
|
||||
br label %for.body
|
||||
|
||||
for.body: ; preds = %entry, %for.body
|
||||
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
|
||||
%arrayidx = getelementptr inbounds i64, i64* %b, i64 %indvars.iv
|
||||
%0 = load i64, i64* %arrayidx, align 8
|
||||
%arrayidx3 = getelementptr inbounds float, float* %a, i64 %0
|
||||
%1 = load float, float* %arrayidx3, align 4
|
||||
%arrayidx5 = getelementptr inbounds float, float* %c, i64 %indvars.iv
|
||||
store float %1, float* %arrayidx5, align 4
|
||||
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
||||
%exitcond.not = icmp eq i64 %indvars.iv.next, %n
|
||||
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body, !llvm.loop !0
|
||||
|
||||
for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
|
||||
ret void
|
||||
}
|
||||
|
||||
; NOTE: I deliberately chose '%b' as an array of i32 indices, since the
|
||||
; additional 'sext' in the for.body loop exposes additional code paths
|
||||
; during vectorisation.
|
||||
define void @scatter_nxv4i32_ind32(float* noalias nocapture %a, i32* noalias nocapture readonly %b, float* noalias nocapture readonly %c, i64 %n) {
|
||||
; CHECK-LABEL: @scatter_nxv4i32_ind32
|
||||
; CHECK: vector.body:
|
||||
; CHECK: %[[VALS:.*]] = load <vscale x 4 x float>
|
||||
; CHECK: %[[IND:.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* %7, align 4
|
||||
; CHECK: %[[EXTIND:.*]] = sext <vscale x 4 x i32> %[[IND]] to <vscale x 4 x i64>
|
||||
; CHECK: %[[PTRS:.*]] = getelementptr inbounds float, float* %a, <vscale x 4 x i64> %[[EXTIND]]
|
||||
; CHECK: call void @llvm.masked.scatter.nxv4f32.nxv4p0f32(<vscale x 4 x float> %[[VALS]], <vscale x 4 x float*> %[[PTRS]]
|
||||
entry:
|
||||
br label %for.body
|
||||
|
||||
for.body: ; preds = %entry, %for.body
|
||||
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
|
||||
%arrayidx = getelementptr inbounds float, float* %c, i64 %indvars.iv
|
||||
%0 = load float, float* %arrayidx, align 4
|
||||
%arrayidx3 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
|
||||
%1 = load i32, i32* %arrayidx3, align 4
|
||||
%idxprom4 = sext i32 %1 to i64
|
||||
%arrayidx5 = getelementptr inbounds float, float* %a, i64 %idxprom4
|
||||
store float %0, float* %arrayidx5, align 4
|
||||
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
||||
%exitcond.not = icmp eq i64 %indvars.iv.next, %n
|
||||
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body, !llvm.loop !0
|
||||
|
||||
for.cond.cleanup: ; preds = %for.body, %entry
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @scatter_inv_nxv4i32(i32* noalias nocapture %inv, i32* noalias nocapture readonly %b, i64 %n) {
|
||||
; CHECK-LABEL: @scatter_inv_nxv4i32
|
||||
; CHECK: vector.ph:
|
||||
; CHECK: %[[INS:.*]] = insertelement <vscale x 4 x i32*> poison, i32* %inv, i32 0
|
||||
; CHECK: %[[PTRSPLAT:.*]] = shufflevector <vscale x 4 x i32*> %[[INS]], <vscale x 4 x i32*> poison, <vscale x 4 x i32> zeroinitializer
|
||||
; CHECK: vector.body:
|
||||
; CHECK: %[[VALS:.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* %5, align 4
|
||||
; CHECK: %[[MASK:.*]] = icmp ne <vscale x 4 x i32> %[[VALS]],
|
||||
; CHECK: call void @llvm.masked.scatter.nxv4i32.nxv4p0i32({{.*}}, <vscale x 4 x i32*> %[[PTRSPLAT]], i32 4, <vscale x 4 x i1> %[[MASK]])
|
||||
entry:
|
||||
br label %for.body
|
||||
|
||||
for.body: ; preds = %entry, %for.inc
|
||||
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
|
||||
%arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
|
||||
%0 = load i32, i32* %arrayidx, align 4
|
||||
%tobool.not = icmp eq i32 %0, 0
|
||||
br i1 %tobool.not, label %for.inc, label %if.then
|
||||
|
||||
if.then: ; preds = %for.body
|
||||
store i32 3, i32* %inv, align 4
|
||||
br label %for.inc
|
||||
|
||||
for.inc: ; preds = %for.body, %if.then
|
||||
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
||||
%exitcond.not = icmp eq i64 %indvars.iv.next, %n
|
||||
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body, !llvm.loop !0
|
||||
|
||||
for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @gather_inv_nxv4i32(i32* noalias nocapture %a, i32* noalias nocapture readonly %inv, i64 %n) {
|
||||
; CHECK-LABEL: @gather_inv_nxv4i32
|
||||
; CHECK: vector.ph:
|
||||
; CHECK: %[[INS:.*]] = insertelement <vscale x 4 x i32*> poison, i32* %inv, i32 0
|
||||
; CHECK: %[[PTRSPLAT:.*]] = shufflevector <vscale x 4 x i32*> %[[INS]], <vscale x 4 x i32*> poison, <vscale x 4 x i32> zeroinitializer
|
||||
; CHECK: vector.body:
|
||||
; CHECK: %[[VALS:.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* %5, align 4
|
||||
; CHECK: %[[MASK:.*]] = icmp sgt <vscale x 4 x i32> %[[VALS]],
|
||||
; CHECK: %{{.*}} = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0i32(<vscale x 4 x i32*> %[[PTRSPLAT]], i32 4, <vscale x 4 x i1> %[[MASK]]
|
||||
entry:
|
||||
br label %for.body
|
||||
|
||||
for.body: ; preds = %entry, %for.inc
|
||||
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
|
||||
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
|
||||
%0 = load i32, i32* %arrayidx, align 4
|
||||
%cmp2 = icmp sgt i32 %0, 3
|
||||
br i1 %cmp2, label %if.then, label %for.inc
|
||||
|
||||
if.then: ; preds = %for.body
|
||||
%1 = load i32, i32* %inv, align 4
|
||||
store i32 %1, i32* %arrayidx, align 4
|
||||
br label %for.inc
|
||||
|
||||
for.inc: ; preds = %for.body, %if.then
|
||||
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
||||
%exitcond.not = icmp eq i64 %indvars.iv.next, %n
|
||||
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body, !llvm.loop !0
|
||||
|
||||
for.cond.cleanup: ; preds = %for.inc, %entry
|
||||
ret void
|
||||
}
|
||||
|
||||
!0 = distinct !{!0, !1, !2, !3, !4, !5}
|
||||
!1 = !{!"llvm.loop.mustprogress"}
|
||||
!2 = !{!"llvm.loop.vectorize.width", i32 4}
|
||||
!3 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
|
||||
!4 = !{!"llvm.loop.interleave.count", i32 1}
|
||||
!5 = !{!"llvm.loop.vectorize.enable", i1 true}
|
|
@ -0,0 +1,82 @@
|
|||
; RUN: opt -loop-vectorize -dce -instcombine -mtriple aarch64-linux-gnu -mattr=+sve -S %s -o - | FileCheck %s
|
||||
|
||||
define void @mloadstore_f32(float* noalias nocapture %a, float* noalias nocapture readonly %b, i64 %n) {
|
||||
; CHECK-LABEL: @mloadstore_f32
|
||||
; CHECK: vector.body:
|
||||
; CHECK: %[[LOAD1:.*]] = load <vscale x 4 x float>, <vscale x 4 x float>*
|
||||
; CHECK-NEXT: %[[MASK:.*]] = fcmp ogt <vscale x 4 x float> %[[LOAD1]],
|
||||
; CHECK-NEXT: %[[GEPA:.*]] = getelementptr inbounds float, float* %a,
|
||||
; CHECK-NEXT: %[[MLOAD_PTRS:.*]] = bitcast float* %[[GEPA]] to <vscale x 4 x float>*
|
||||
; CHECK-NEXT: %[[LOAD2:.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0nxv4f32(<vscale x 4 x float>* %[[MLOAD_PTRS]], i32 4, <vscale x 4 x i1> %[[MASK]]
|
||||
; CHECK-NEXT: %[[FADD:.*]] = fadd <vscale x 4 x float> %[[LOAD1]], %[[LOAD2]]
|
||||
; CHECK-NEXT: %[[MSTORE_PTRS:.*]] = bitcast float* %[[GEPA]] to <vscale x 4 x float>*
|
||||
; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0nxv4f32(<vscale x 4 x float> %[[FADD]], <vscale x 4 x float>* %[[MSTORE_PTRS]], i32 4, <vscale x 4 x i1> %[[MASK]])
|
||||
entry:
|
||||
br label %for.body
|
||||
|
||||
for.body: ; preds = %entry, %for.inc
|
||||
%i.011 = phi i64 [ %inc, %for.inc ], [ 0, %entry ]
|
||||
%arrayidx = getelementptr inbounds float, float* %b, i64 %i.011
|
||||
%0 = load float, float* %arrayidx, align 4
|
||||
%cmp1 = fcmp ogt float %0, 0.000000e+00
|
||||
br i1 %cmp1, label %if.then, label %for.inc
|
||||
|
||||
if.then: ; preds = %for.body
|
||||
%arrayidx3 = getelementptr inbounds float, float* %a, i64 %i.011
|
||||
%1 = load float, float* %arrayidx3, align 4
|
||||
%add = fadd float %0, %1
|
||||
store float %add, float* %arrayidx3, align 4
|
||||
br label %for.inc
|
||||
|
||||
for.inc: ; preds = %for.body, %if.then
|
||||
%inc = add nuw nsw i64 %i.011, 1
|
||||
%exitcond.not = icmp eq i64 %inc, %n
|
||||
br i1 %exitcond.not, label %exit, label %for.body, !llvm.loop !0
|
||||
|
||||
exit: ; preds = %for.inc
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @mloadstore_i32(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i64 %n) {
|
||||
; CHECK-LABEL: @mloadstore_i32
|
||||
; CHECK: vector.body:
|
||||
; CHECK: %[[LOAD1:.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>*
|
||||
; CHECK-NEXT: %[[MASK:.*]] = icmp ne <vscale x 4 x i32> %[[LOAD1]],
|
||||
; CHECK-NEXT: %[[GEPA:.*]] = getelementptr inbounds i32, i32* %a,
|
||||
; CHECK-NEXT: %[[MLOAD_PTRS:.*]] = bitcast i32* %[[GEPA]] to <vscale x 4 x i32>*
|
||||
; CHECK-NEXT: %[[LOAD2:.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>* %[[MLOAD_PTRS]], i32 4, <vscale x 4 x i1> %[[MASK]]
|
||||
; CHECK-NEXT: %[[FADD:.*]] = add <vscale x 4 x i32> %[[LOAD1]], %[[LOAD2]]
|
||||
; CHECK-NEXT: %[[MSTORE_PTRS:.*]] = bitcast i32* %[[GEPA]] to <vscale x 4 x i32>*
|
||||
; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0nxv4i32(<vscale x 4 x i32> %[[FADD]], <vscale x 4 x i32>* %[[MSTORE_PTRS]], i32 4, <vscale x 4 x i1> %[[MASK]])
|
||||
entry:
|
||||
br label %for.body
|
||||
|
||||
for.body: ; preds = %entry, %for.inc
|
||||
%i.011 = phi i64 [ %inc, %for.inc ], [ 0, %entry ]
|
||||
%arrayidx = getelementptr inbounds i32, i32* %b, i64 %i.011
|
||||
%0 = load i32, i32* %arrayidx, align 4
|
||||
%cmp1 = icmp ne i32 %0, 0
|
||||
br i1 %cmp1, label %if.then, label %for.inc
|
||||
|
||||
if.then: ; preds = %for.body
|
||||
%arrayidx3 = getelementptr inbounds i32, i32* %a, i64 %i.011
|
||||
%1 = load i32, i32* %arrayidx3, align 4
|
||||
%add = add i32 %0, %1
|
||||
store i32 %add, i32* %arrayidx3, align 4
|
||||
br label %for.inc
|
||||
|
||||
for.inc: ; preds = %for.body, %if.then
|
||||
%inc = add nuw nsw i64 %i.011, 1
|
||||
%exitcond.not = icmp eq i64 %inc, %n
|
||||
br i1 %exitcond.not, label %exit, label %for.body, !llvm.loop !0
|
||||
|
||||
exit: ; preds = %for.inc
|
||||
ret void
|
||||
}
|
||||
|
||||
!0 = distinct !{!0, !1, !2, !3, !4, !5}
|
||||
!1 = !{!"llvm.loop.mustprogress"}
|
||||
!2 = !{!"llvm.loop.vectorize.width", i32 4}
|
||||
!3 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
|
||||
!4 = !{!"llvm.loop.interleave.count", i32 1}
|
||||
!5 = !{!"llvm.loop.vectorize.enable", i1 true}
|
Loading…
Reference in New Issue