[ARM] Fix GatherScatter AddLikeOr condition

This commit is contained in:
David Green 2021-11-15 09:44:41 +00:00
parent 2d9bdd9dba
commit 4c3bfdc7f1
2 changed files with 188 additions and 23 deletions

View File

@ -376,7 +376,7 @@ MVEGatherScatterLowering::getVarAndConst(Value *Inst, int TypeScale) {
// add-like-or.
Instruction *Add = dyn_cast<Instruction>(Inst);
if (Add == nullptr ||
(Add->getOpcode() != Instruction::Add && isAddLikeOr(Add, *DL)))
(Add->getOpcode() != Instruction::Add && !isAddLikeOr(Add, *DL)))
return ReturnFalse;
Value *Summand;

View File

@ -1,27 +1,36 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt --arm-mve-gather-scatter-lowering -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -S -o - | FileCheck %s
target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
define arm_aapcs_vfpcc void @push_out_add_sub_block(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n.vec) {
; CHECK-LABEL: @push_out_add_sub_block(
; CHECK-NEXT: vector.ph:
; CHECK-NEXT: [[PUSHEDOUTADD:%.*]] = add <4 x i32> <i32 0, i32 2, i32 4, i32 6>, <i32 6, i32 6, i32 6, i32 6>
; CHECK-NEXT: [[SCALEDINDEX:%.*]] = shl <4 x i32> [[PUSHEDOUTADD]], <i32 2, i32 2, i32 2, i32 2>
; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint i32* [[DATA:%.*]] to i32
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP0]], i32 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: [[STARTINDEX:%.*]] = add <4 x i32> [[SCALEDINDEX]], [[DOTSPLAT]]
; CHECK-NEXT: [[PREINCREMENTSTARTINDEX:%.*]] = sub <4 x i32> [[STARTINDEX]], <i32 32, i32 32, i32 32, i32 32>
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY_END:%.*]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ [[PUSHEDOUTADD]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY_END]] ]
; CHECK-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX]], 48
; CHECK-NEXT: br i1 [[TMP0]], label [[LOWER_BLOCK:%.*]], label [[END:%.*]]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ [[PREINCREMENTSTARTINDEX]], [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY_END]] ]
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[INDEX]], 48
; CHECK-NEXT: br i1 [[TMP1]], label [[LOWER_BLOCK:%.*]], label [[END:%.*]]
; CHECK: lower.block:
; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i32.v4i32(i32* [[DATA:%.*]], <4 x i32> [[VEC_IND]], i32 32, i32 2, i32 1)
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
; CHECK-NEXT: store <4 x i32> [[TMP1]], <4 x i32>* [[TMP3]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = call { <4 x i32>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4i32.v4i32(<4 x i32> [[VEC_IND]], i32 32)
; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[TMP2]], 0
; CHECK-NEXT: [[TMP4]] = extractvalue { <4 x i32>, <4 x i32> } [[TMP2]], 1
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32* [[TMP5]] to <4 x i32>*
; CHECK-NEXT: store <4 x i32> [[TMP3]], <4 x i32>* [[TMP6]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], <i32 8, i32 8, i32 8, i32 8>
; CHECK-NEXT: br label [[VECTOR_BODY_END]]
; CHECK: vector.body.end:
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC:%.*]]
; CHECK-NEXT: br i1 [[TMP4]], label [[END]], label [[VECTOR_BODY]]
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC:%.*]]
; CHECK-NEXT: br i1 [[TMP7]], label [[END]], label [[VECTOR_BODY]]
; CHECK: end:
; CHECK-NEXT: ret void
;
@ -60,23 +69,30 @@ define arm_aapcs_vfpcc void @push_out_mul_sub_block(i32* noalias nocapture reado
; CHECK-NEXT: [[PUSHEDOUTMUL:%.*]] = mul <4 x i32> <i32 0, i32 2, i32 4, i32 6>, <i32 3, i32 3, i32 3, i32 3>
; CHECK-NEXT: [[PRODUCT:%.*]] = mul <4 x i32> <i32 8, i32 8, i32 8, i32 8>, <i32 3, i32 3, i32 3, i32 3>
; CHECK-NEXT: [[PUSHEDOUTADD:%.*]] = add <4 x i32> [[PUSHEDOUTMUL]], <i32 6, i32 6, i32 6, i32 6>
; CHECK-NEXT: [[SCALEDINDEX:%.*]] = shl <4 x i32> [[PUSHEDOUTADD]], <i32 2, i32 2, i32 2, i32 2>
; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint i32* [[DATA:%.*]] to i32
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP0]], i32 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: [[STARTINDEX:%.*]] = add <4 x i32> [[SCALEDINDEX]], [[DOTSPLAT]]
; CHECK-NEXT: [[PREINCREMENTSTARTINDEX:%.*]] = sub <4 x i32> [[STARTINDEX]], <i32 96, i32 96, i32 96, i32 96>
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY_END:%.*]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ [[PUSHEDOUTADD]], [[VECTOR_PH]] ], [ [[INCREMENTPUSHEDOUTMUL:%.*]], [[VECTOR_BODY_END]] ]
; CHECK-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX]], 48
; CHECK-NEXT: br i1 [[TMP0]], label [[LOWER_BLOCK:%.*]], label [[END:%.*]]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ [[PREINCREMENTSTARTINDEX]], [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY_END]] ]
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[INDEX]], 48
; CHECK-NEXT: br i1 [[TMP1]], label [[LOWER_BLOCK:%.*]], label [[END:%.*]]
; CHECK: lower.block:
; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i32.v4i32(i32* [[DATA:%.*]], <4 x i32> [[VEC_IND]], i32 32, i32 2, i32 1)
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
; CHECK-NEXT: store <4 x i32> [[TMP1]], <4 x i32>* [[TMP3]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = call { <4 x i32>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4i32.v4i32(<4 x i32> [[VEC_IND]], i32 96)
; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[TMP2]], 0
; CHECK-NEXT: [[TMP4]] = extractvalue { <4 x i32>, <4 x i32> } [[TMP2]], 1
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32* [[TMP5]] to <4 x i32>*
; CHECK-NEXT: store <4 x i32> [[TMP3]], <4 x i32>* [[TMP6]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4
; CHECK-NEXT: br label [[VECTOR_BODY_END]]
; CHECK: vector.body.end:
; CHECK-NEXT: [[INCREMENTPUSHEDOUTMUL]] = add <4 x i32> [[VEC_IND]], [[PRODUCT]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC:%.*]]
; CHECK-NEXT: br i1 [[TMP4]], label [[END]], label [[VECTOR_BODY]]
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC:%.*]]
; CHECK-NEXT: br i1 [[TMP7]], label [[END]], label [[VECTOR_BODY]]
; CHECK: end:
; CHECK-NEXT: ret void
;
@ -123,8 +139,12 @@ define arm_aapcs_vfpcc void @push_out_mul_sub_loop(i32* noalias nocapture readon
; CHECK-NEXT: br label [[VECTOR_2_BODY:%.*]]
; CHECK: vector.2.body:
; CHECK-NEXT: [[TMP0:%.*]] = mul <4 x i32> [[VEC_IND]], <i32 3, i32 3, i32 3, i32 3>
; CHECK-NEXT: [[TMP1:%.*]] = add <4 x i32> [[TMP0]], <i32 6, i32 6, i32 6, i32 6>
; CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i32.v4i32(i32* [[DATA:%.*]], <4 x i32> [[TMP1]], i32 32, i32 2, i32 1)
; CHECK-NEXT: [[SCALEDINDEX:%.*]] = shl <4 x i32> [[TMP0]], <i32 2, i32 2, i32 2, i32 2>
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i32* [[DATA:%.*]] to i32
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP1]], i32 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: [[STARTINDEX:%.*]] = add <4 x i32> [[SCALEDINDEX]], [[DOTSPLAT]]
; CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vldr.gather.base.v4i32.v4i32(<4 x i32> [[STARTINDEX]], i32 24)
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32* [[TMP3]] to <4 x i32>*
; CHECK-NEXT: store <4 x i32> [[TMP2]], <4 x i32>* [[TMP4]], align 4
@ -221,5 +241,150 @@ end:
ret void;
}
define void @gatherload(i32 %n, i32 %m, i32* nocapture %a, i32* nocapture readonly %b, i32 %call.us.us) {
; CHECK-LABEL: @gatherload(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A57:%.*]] = bitcast i32* [[A:%.*]] to i8*
; CHECK-NEXT: [[CMP38:%.*]] = icmp sgt i32 [[N:%.*]], 0
; CHECK-NEXT: br i1 [[CMP38]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END16:%.*]]
; CHECK: for.body.lr.ph:
; CHECK-NEXT: [[CMP636:%.*]] = icmp sgt i32 [[M:%.*]], 0
; CHECK-NEXT: br i1 [[CMP636]], label [[FOR_BODY_US_US_PREHEADER:%.*]], label [[FOR_BODY:%.*]]
; CHECK: for.body.us.us.preheader:
; CHECK-NEXT: [[TMP0:%.*]] = shl nuw i32 [[M]], 2
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[A]], i32 [[M]]
; CHECK-NEXT: [[SCEVGEP64:%.*]] = getelementptr i32, i32* [[B:%.*]], i32 [[M]]
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[M]], 4
; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt i32* [[SCEVGEP64]], [[A]]
; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt i32* [[SCEVGEP]], [[B]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[M]], -4
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[M]]
; CHECK-NEXT: br label [[FOR_BODY_US_US:%.*]]
; CHECK: for.body.us.us:
; CHECK-NEXT: [[I_039_US_US:%.*]] = phi i32 [ [[INC15_US_US:%.*]], [[FOR_COND5_FOR_END13_CRIT_EDGE_US_US:%.*]] ], [ 0, [[FOR_BODY_US_US_PREHEADER]] ]
; CHECK-NEXT: [[VLA_US_US:%.*]] = alloca i32, i32 [[CALL_US_US:%.*]], align 4
; CHECK-NEXT: [[VLA_US_US56:%.*]] = bitcast i32* [[VLA_US_US]] to i8*
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* nonnull align 4 [[VLA_US_US56]], i8* align 4 [[A57]], i32 [[TMP0]], i1 false)
; CHECK-NEXT: [[BRMERGE:%.*]] = select i1 [[MIN_ITERS_CHECK]], i1 true, i1 [[FOUND_CONFLICT]]
; CHECK-NEXT: br i1 [[BRMERGE]], label [[FOR_BODY7_US_US_PREHEADER:%.*]], label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ], [ 0, [[FOR_BODY_US_US]] ]
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32* [[TMP1]] to <4 x i32>*
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP2]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i32.v4i32(i32* [[VLA_US_US]], <4 x i32> [[WIDE_LOAD]], i32 32, i32 2, i32 1)
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[INDEX]]
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32* [[TMP4]] to <4 x i32>*
; CHECK-NEXT: store <4 x i32> [[TMP3]], <4 x i32>* [[TMP5]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND5_FOR_END13_CRIT_EDGE_US_US]], label [[FOR_BODY7_US_US_PREHEADER]]
; CHECK: for.body7.us.us.preheader:
; CHECK-NEXT: [[J_137_US_US_PH:%.*]] = phi i32 [ 0, [[FOR_BODY_US_US]] ], [ [[N_VEC]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label [[FOR_BODY7_US_US:%.*]]
; CHECK: for.body7.us.us:
; CHECK-NEXT: [[J_137_US_US:%.*]] = phi i32 [ [[INC12_US_US:%.*]], [[FOR_BODY7_US_US]] ], [ [[J_137_US_US_PH]], [[FOR_BODY7_US_US_PREHEADER]] ]
; CHECK-NEXT: [[ARRAYIDX8_US_US:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_137_US_US]]
; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX8_US_US]], align 4
; CHECK-NEXT: [[ARRAYIDX9_US_US:%.*]] = getelementptr inbounds i32, i32* [[VLA_US_US]], i32 [[TMP7]]
; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[ARRAYIDX9_US_US]], align 4
; CHECK-NEXT: [[ARRAYIDX10_US_US:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[J_137_US_US]]
; CHECK-NEXT: store i32 [[TMP8]], i32* [[ARRAYIDX10_US_US]], align 4
; CHECK-NEXT: [[INC12_US_US]] = add nuw nsw i32 [[J_137_US_US]], 1
; CHECK-NEXT: [[EXITCOND58_NOT:%.*]] = icmp eq i32 [[INC12_US_US]], [[M]]
; CHECK-NEXT: br i1 [[EXITCOND58_NOT]], label [[FOR_COND5_FOR_END13_CRIT_EDGE_US_US]], label [[FOR_BODY7_US_US]]
; CHECK: for.cond5.for.end13_crit_edge.us.us:
; CHECK-NEXT: [[INC15_US_US]] = add nuw nsw i32 [[I_039_US_US]], 1
; CHECK-NEXT: [[EXITCOND59_NOT:%.*]] = icmp eq i32 [[INC15_US_US]], [[N]]
; CHECK-NEXT: br i1 [[EXITCOND59_NOT]], label [[FOR_END16]], label [[FOR_BODY_US_US]]
; CHECK: for.body:
; CHECK-NEXT: [[I_039:%.*]] = phi i32 [ [[INC15:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_BODY_LR_PH]] ]
; CHECK-NEXT: [[INC15]] = add nuw nsw i32 [[I_039]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC15]], [[N]]
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END16]], label [[FOR_BODY]]
; CHECK: for.end16:
; CHECK-NEXT: ret void
;
entry:
%a57 = bitcast i32* %a to i8*
%cmp38 = icmp sgt i32 %n, 0
br i1 %cmp38, label %for.body.lr.ph, label %for.end16
for.body.lr.ph: ; preds = %entry
%cmp636 = icmp sgt i32 %m, 0
br i1 %cmp636, label %for.body.us.us.preheader, label %for.body
for.body.us.us.preheader: ; preds = %for.body.lr.ph
%0 = shl nuw i32 %m, 2
%scevgep = getelementptr i32, i32* %a, i32 %m
%scevgep64 = getelementptr i32, i32* %b, i32 %m
%min.iters.check = icmp ult i32 %m, 4
%bound0 = icmp ugt i32* %scevgep64, %a
%bound1 = icmp ugt i32* %scevgep, %b
%found.conflict = and i1 %bound0, %bound1
%n.vec = and i32 %m, -4
%cmp.n = icmp eq i32 %n.vec, %m
br label %for.body.us.us
for.body.us.us: ; preds = %for.body.us.us.preheader, %for.cond5.for.end13_crit_edge.us.us
%i.039.us.us = phi i32 [ %inc15.us.us, %for.cond5.for.end13_crit_edge.us.us ], [ 0, %for.body.us.us.preheader ]
%1 = add i32 0, 0
%vla.us.us = alloca i32, i32 %call.us.us, align 4
%vla.us.us56 = bitcast i32* %vla.us.us to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* nonnull align 4 %vla.us.us56, i8* align 4 %a57, i32 %0, i1 false)
%brmerge = select i1 %min.iters.check, i1 true, i1 %found.conflict
br i1 %brmerge, label %for.body7.us.us.preheader, label %vector.body
vector.body: ; preds = %for.body.us.us, %vector.body
%index = phi i32 [ %index.next, %vector.body ], [ 0, %for.body.us.us ]
%2 = getelementptr inbounds i32, i32* %b, i32 %index
%3 = bitcast i32* %2 to <4 x i32>*
%wide.load = load <4 x i32>, <4 x i32>* %3, align 4
%4 = getelementptr inbounds i32, i32* %vla.us.us, <4 x i32> %wide.load
%wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %4, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
%5 = getelementptr inbounds i32, i32* %a, i32 %index
%6 = bitcast i32* %5 to <4 x i32>*
store <4 x i32> %wide.masked.gather, <4 x i32>* %6, align 4
%index.next = add nuw i32 %index, 4
%7 = icmp eq i32 %index.next, %n.vec
br i1 %7, label %middle.block, label %vector.body
middle.block: ; preds = %vector.body
br i1 %cmp.n, label %for.cond5.for.end13_crit_edge.us.us, label %for.body7.us.us.preheader
for.body7.us.us.preheader: ; preds = %for.body.us.us, %middle.block
%j.137.us.us.ph = phi i32 [ 0, %for.body.us.us ], [ %n.vec, %middle.block ]
br label %for.body7.us.us
for.body7.us.us: ; preds = %for.body7.us.us.preheader, %for.body7.us.us
%j.137.us.us = phi i32 [ %inc12.us.us, %for.body7.us.us ], [ %j.137.us.us.ph, %for.body7.us.us.preheader ]
%arrayidx8.us.us = getelementptr inbounds i32, i32* %b, i32 %j.137.us.us
%8 = load i32, i32* %arrayidx8.us.us, align 4
%arrayidx9.us.us = getelementptr inbounds i32, i32* %vla.us.us, i32 %8
%9 = load i32, i32* %arrayidx9.us.us, align 4
%arrayidx10.us.us = getelementptr inbounds i32, i32* %a, i32 %j.137.us.us
store i32 %9, i32* %arrayidx10.us.us, align 4
%inc12.us.us = add nuw nsw i32 %j.137.us.us, 1
%exitcond58.not = icmp eq i32 %inc12.us.us, %m
br i1 %exitcond58.not, label %for.cond5.for.end13_crit_edge.us.us, label %for.body7.us.us
for.cond5.for.end13_crit_edge.us.us: ; preds = %for.body7.us.us, %middle.block
%inc15.us.us = add nuw nsw i32 %i.039.us.us, 1
%exitcond59.not = icmp eq i32 %inc15.us.us, %n
br i1 %exitcond59.not, label %for.end16, label %for.body.us.us
for.body: ; preds = %for.body.lr.ph, %for.body
%i.039 = phi i32 [ %inc15, %for.body ], [ 0, %for.body.lr.ph ]
%inc15 = add nuw nsw i32 %i.039, 1
%exitcond.not = icmp eq i32 %inc15, %n
br i1 %exitcond.not, label %for.end16, label %for.body
for.end16: ; preds = %for.body, %for.cond5.for.end13_crit_edge.us.us, %entry
ret void
}
declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>)
declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i1)