[LoopVectorizer] Lower uniform loads as a single load (instead of relying on CSE)

A uniform load is one which loads from a uniform address across all lanes. As currently implemented, we cost model such loads as if we did a single scalar load + a broadcast, but the actual lowering replicates the load once per lane.

This change tweaks the lowering to use the REPLICATE strategy by marking such loads (and the computation leading to their memory operand) as uniform after vectorization. This is a useful change in itself, but it's real purpose is to pave the way for a following change which will generalize our uniformity logic.

In review discussion, there was an issue raised with coupling cost modeling with the lowering strategy for uniform inputs.  The discussion on that item remains unsettled and is pending larger architectural discussion.  We decided to move forward with this patch as is, and revise as warranted once the bigger picture design questions are settled.

Differential Revision: https://reviews.llvm.org/D91398
This commit is contained in:
Philip Reames 2020-11-23 15:23:46 -08:00
parent f7d033f4d8
commit b06a2ad94f
4 changed files with 127 additions and 148 deletions

View File

@ -2661,7 +2661,12 @@ void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPUser &User,
// Replace the operands of the cloned instructions with their scalar
// equivalents in the new loop.
for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) {
auto *NewOp = State.get(User.getOperand(op), Instance);
auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op));
auto InputInstance = Instance;
if (!Operand || !OrigLoop->contains(Operand) ||
(Cost->isUniformAfterVectorization(Operand, State.VF)))
InputInstance.Lane = 0;
auto *NewOp = State.get(User.getOperand(op), InputInstance);
Cloned->setOperand(op, NewOp);
}
addNewMetadata(Cloned, Instr);
@ -5031,6 +5036,11 @@ void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
// replicating region where only a single instance out of VF should be formed.
// TODO: optimize such seldom cases if found important, see PR40816.
auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
if (isOutOfScope(I)) {
LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
<< *I << "\n");
return;
}
if (isScalarWithPredication(I, VF)) {
LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
<< *I << "\n");
@ -5051,16 +5061,25 @@ void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
// are pointers that are treated like consecutive pointers during
// vectorization. The pointer operands of interleaved accesses are an
// example.
SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs;
SmallSetVector<Value *, 8> ConsecutiveLikePtrs;
// Holds pointer operands of instructions that are possibly non-uniform.
SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs;
SmallPtrSet<Value *, 8> PossibleNonUniformPtrs;
auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
InstWidening WideningDecision = getWideningDecision(I, VF);
assert(WideningDecision != CM_Unknown &&
"Widening decision should be ready at this moment");
// The address of a uniform mem op is itself uniform. We exclude stores
// here as there's an assumption in the current code that all uses of
// uniform instructions are uniform and, as noted below, uniform stores are
// still handled via replication (i.e. aren't uniform after vectorization).
if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
assert(WideningDecision == CM_Scalarize);
return true;
}
return (WideningDecision == CM_Widen ||
WideningDecision == CM_Widen_Reverse ||
WideningDecision == CM_Interleave);
@ -5076,10 +5095,21 @@ void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
for (auto *BB : TheLoop->blocks())
for (auto &I : *BB) {
// If there's no pointer operand, there's nothing to do.
auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
auto *Ptr = getLoadStorePointerOperand(&I);
if (!Ptr)
continue;
// For now, avoid walking use lists in other functions.
// TODO: Rewrite this algorithm from uses up.
if (!isa<Instruction>(Ptr) && !isa<Argument>(Ptr))
continue;
// A uniform memory op is itself uniform. We exclude stores here as we
// haven't yet added dedicated logic in the CLONE path and rely on
// REPLICATE + DSE for correctness.
if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
addToWorklistIfAllowed(&I);
// True if all users of Ptr are memory accesses that have Ptr as their
// pointer operand.
auto UsersAreMemAccesses =
@ -5105,7 +5135,8 @@ void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
// aren't also identified as possibly non-uniform.
for (auto *V : ConsecutiveLikePtrs)
if (!PossibleNonUniformPtrs.count(V))
addToWorklistIfAllowed(V);
if (auto *I = dyn_cast<Instruction>(V))
addToWorklistIfAllowed(I);
// Expand Worklist in topological order: whenever a new instruction
// is added , its users should be already inside Worklist. It ensures
@ -6228,6 +6259,8 @@ unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
ElementCount VF) {
assert(Legal->isUniformMemOp(*I));
Type *ValTy = getMemInstValueType(I);
auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
const Align Alignment = getLoadStoreAlignment(I);

View File

@ -28,34 +28,29 @@ define void @cff_index_load_offsets(i1 %cond, i8 %x, i8* %p) #0 {
; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, i8* null, i64 [[TMP1]]
; CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i8> [[BROADCAST_SPLAT]] to <4 x i32>
; CHECK-NEXT: [[TMP3:%.*]] = shl nuw <4 x i32> [[TMP2]], <i32 24, i32 24, i32 24, i32 24>
; CHECK-NEXT: [[TMP4:%.*]] = load i8, i8* [[P:%.*]], align 1, !tbaa !1
; CHECK-NEXT: [[TMP5:%.*]] = load i8, i8* [[P]], align 1, !tbaa !1
; CHECK-NEXT: [[TMP6:%.*]] = load i8, i8* [[P]], align 1, !tbaa !1
; CHECK-NEXT: [[TMP7:%.*]] = load i8, i8* [[P]], align 1, !tbaa !1
; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x i8> undef, i8 [[TMP4]], i32 0
; CHECK-NEXT: [[TMP9:%.*]] = insertelement <4 x i8> [[TMP8]], i8 [[TMP5]], i32 1
; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x i8> [[TMP9]], i8 [[TMP6]], i32 2
; CHECK-NEXT: [[TMP11:%.*]] = insertelement <4 x i8> [[TMP10]], i8 [[TMP7]], i32 3
; CHECK-NEXT: [[TMP12:%.*]] = zext <4 x i8> [[TMP11]] to <4 x i32>
; CHECK-NEXT: [[TMP13:%.*]] = shl nuw nsw <4 x i32> [[TMP12]], <i32 16, i32 16, i32 16, i32 16>
; CHECK-NEXT: [[TMP14:%.*]] = or <4 x i32> [[TMP13]], [[TMP3]]
; CHECK-NEXT: [[TMP15:%.*]] = load i8, i8* undef, align 1, !tbaa !1
; CHECK-NEXT: [[TMP16:%.*]] = load i8, i8* undef, align 1, !tbaa !1
; CHECK-NEXT: [[TMP17:%.*]] = load i8, i8* undef, align 1, !tbaa !1
; CHECK-NEXT: [[TMP18:%.*]] = load i8, i8* undef, align 1, !tbaa !1
; CHECK-NEXT: [[TMP19:%.*]] = or <4 x i32> [[TMP14]], zeroinitializer
; CHECK-NEXT: [[TMP20:%.*]] = or <4 x i32> [[TMP19]], zeroinitializer
; CHECK-NEXT: [[TMP21:%.*]] = extractelement <4 x i32> [[TMP20]], i32 0
; CHECK-NEXT: store i32 [[TMP21]], i32* undef, align 4, !tbaa !4
; CHECK-NEXT: [[TMP22:%.*]] = extractelement <4 x i32> [[TMP20]], i32 1
; CHECK-NEXT: store i32 [[TMP22]], i32* undef, align 4, !tbaa !4
; CHECK-NEXT: [[TMP23:%.*]] = extractelement <4 x i32> [[TMP20]], i32 2
; CHECK-NEXT: store i32 [[TMP23]], i32* undef, align 4, !tbaa !4
; CHECK-NEXT: [[TMP24:%.*]] = extractelement <4 x i32> [[TMP20]], i32 3
; CHECK-NEXT: store i32 [[TMP24]], i32* undef, align 4, !tbaa !4
; CHECK-NEXT: [[TMP4:%.*]] = load i8, i8* [[P:%.*]], align 1, [[TBAA1:!tbaa !.*]]
; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i8> undef, i8 [[TMP4]], i32 0
; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i8> [[BROADCAST_SPLATINSERT1]], <4 x i8> undef, <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i8> [[BROADCAST_SPLAT2]] to <4 x i32>
; CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw <4 x i32> [[TMP5]], <i32 16, i32 16, i32 16, i32 16>
; CHECK-NEXT: [[TMP7:%.*]] = or <4 x i32> [[TMP6]], [[TMP3]]
; CHECK-NEXT: [[TMP8:%.*]] = load i8, i8* undef, align 1, [[TBAA1]]
; CHECK-NEXT: [[TMP9:%.*]] = load i8, i8* undef, align 1, [[TBAA1]]
; CHECK-NEXT: [[TMP10:%.*]] = load i8, i8* undef, align 1, [[TBAA1]]
; CHECK-NEXT: [[TMP11:%.*]] = load i8, i8* undef, align 1, [[TBAA1]]
; CHECK-NEXT: [[TMP12:%.*]] = or <4 x i32> [[TMP7]], zeroinitializer
; CHECK-NEXT: [[TMP13:%.*]] = or <4 x i32> [[TMP12]], zeroinitializer
; CHECK-NEXT: [[TMP14:%.*]] = extractelement <4 x i32> [[TMP13]], i32 0
; CHECK-NEXT: store i32 [[TMP14]], i32* undef, align 4, [[TBAA4:!tbaa !.*]]
; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i32> [[TMP13]], i32 1
; CHECK-NEXT: store i32 [[TMP15]], i32* undef, align 4, [[TBAA4]]
; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i32> [[TMP13]], i32 2
; CHECK-NEXT: store i32 [[TMP16]], i32* undef, align 4, [[TBAA4]]
; CHECK-NEXT: [[TMP17:%.*]] = extractelement <4 x i32> [[TMP13]], i32 3
; CHECK-NEXT: store i32 [[TMP17]], i32* undef, align 4, [[TBAA4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !6
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 0
; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP6:!llvm.loop !.*]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1, 0
; CHECK-NEXT: br i1 [[CMP_N]], label [[SW_EPILOG:%.*]], label [[SCALAR_PH]]
@ -66,19 +61,19 @@ define void @cff_index_load_offsets(i1 %cond, i8 %x, i8* %p) #0 {
; CHECK-NEXT: [[P_359:%.*]] = phi i8* [ [[ADD_PTR86:%.*]], [[FOR_BODY68]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[CONV70:%.*]] = zext i8 [[X]] to i32
; CHECK-NEXT: [[SHL71:%.*]] = shl nuw i32 [[CONV70]], 24
; CHECK-NEXT: [[TMP26:%.*]] = load i8, i8* [[P]], align 1, !tbaa !1
; CHECK-NEXT: [[CONV73:%.*]] = zext i8 [[TMP26]] to i32
; CHECK-NEXT: [[TMP19:%.*]] = load i8, i8* [[P]], align 1, [[TBAA1]]
; CHECK-NEXT: [[CONV73:%.*]] = zext i8 [[TMP19]] to i32
; CHECK-NEXT: [[SHL74:%.*]] = shl nuw nsw i32 [[CONV73]], 16
; CHECK-NEXT: [[OR75:%.*]] = or i32 [[SHL74]], [[SHL71]]
; CHECK-NEXT: [[TMP27:%.*]] = load i8, i8* undef, align 1, !tbaa !1
; CHECK-NEXT: [[TMP20:%.*]] = load i8, i8* undef, align 1, [[TBAA1]]
; CHECK-NEXT: [[SHL78:%.*]] = shl nuw nsw i32 undef, 8
; CHECK-NEXT: [[OR79:%.*]] = or i32 [[OR75]], [[SHL78]]
; CHECK-NEXT: [[CONV81:%.*]] = zext i8 undef to i32
; CHECK-NEXT: [[OR83:%.*]] = or i32 [[OR79]], [[CONV81]]
; CHECK-NEXT: store i32 [[OR83]], i32* undef, align 4, !tbaa !4
; CHECK-NEXT: store i32 [[OR83]], i32* undef, align 4, [[TBAA4]]
; CHECK-NEXT: [[ADD_PTR86]] = getelementptr inbounds i8, i8* [[P_359]], i64 4
; CHECK-NEXT: [[CMP66:%.*]] = icmp ult i8* [[ADD_PTR86]], undef
; CHECK-NEXT: br i1 [[CMP66]], label [[FOR_BODY68]], label [[SW_EPILOG]], !llvm.loop !8
; CHECK-NEXT: br i1 [[CMP66]], label [[FOR_BODY68]], label [[SW_EPILOG]], [[LOOP8:!llvm.loop !.*]]
; CHECK: sw.epilog:
; CHECK-NEXT: unreachable
; CHECK: Exit:

View File

@ -24,21 +24,9 @@ define i32 @uniform_load(i32* align(4) %addr) {
; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP11:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP12:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP13:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP17:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP18:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP19:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16
; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096
; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP0:!llvm.loop !.*]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP0:!llvm.loop !.*]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 4097, 4096
; CHECK-NEXT: br i1 [[CMP_N]], label [[LOOPEXIT:%.*]], label [[SCALAR_PH]]
@ -52,7 +40,7 @@ define i32 @uniform_load(i32* align(4) %addr) {
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV]], 4096
; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOPEXIT]], label [[FOR_BODY]], [[LOOP2:!llvm.loop !.*]]
; CHECK: loopexit:
; CHECK-NEXT: [[LOAD_LCSSA:%.*]] = phi i32 [ [[LOAD]], [[FOR_BODY]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: [[LOAD_LCSSA:%.*]] = phi i32 [ [[LOAD]], [[FOR_BODY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[LOAD_LCSSA]]
;
entry:
@ -77,63 +65,43 @@ define i32 @uniform_load2(i32* align(4) %addr) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP36:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP37:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP38:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP39:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP8:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 8
; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 12
; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[ADDR:%.*]], align 4
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> undef, i32 [[TMP4]], i32 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> undef, <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[BROADCAST_SPLATINSERT4:%.*]] = insertelement <4 x i32> undef, i32 [[TMP5]], i32 0
; CHECK-NEXT: [[BROADCAST_SPLAT5:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT4]], <4 x i32> undef, <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[BROADCAST_SPLATINSERT6:%.*]] = insertelement <4 x i32> undef, i32 [[TMP6]], i32 0
; CHECK-NEXT: [[BROADCAST_SPLAT7:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT6]], <4 x i32> undef, <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x i32> undef, i32 [[TMP4]], i32 0
; CHECK-NEXT: [[TMP9:%.*]] = insertelement <4 x i32> [[TMP8]], i32 [[TMP5]], i32 1
; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x i32> [[TMP9]], i32 [[TMP6]], i32 2
; CHECK-NEXT: [[TMP11:%.*]] = insertelement <4 x i32> [[TMP10]], i32 [[TMP7]], i32 3
; CHECK-NEXT: [[TMP12:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP13:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP16:%.*]] = insertelement <4 x i32> undef, i32 [[TMP12]], i32 0
; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x i32> [[TMP16]], i32 [[TMP13]], i32 1
; CHECK-NEXT: [[TMP18:%.*]] = insertelement <4 x i32> [[TMP17]], i32 [[TMP14]], i32 2
; CHECK-NEXT: [[TMP19:%.*]] = insertelement <4 x i32> [[TMP18]], i32 [[TMP15]], i32 3
; CHECK-NEXT: [[TMP20:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP21:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP22:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP23:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x i32> undef, i32 [[TMP20]], i32 0
; CHECK-NEXT: [[TMP25:%.*]] = insertelement <4 x i32> [[TMP24]], i32 [[TMP21]], i32 1
; CHECK-NEXT: [[TMP26:%.*]] = insertelement <4 x i32> [[TMP25]], i32 [[TMP22]], i32 2
; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x i32> [[TMP26]], i32 [[TMP23]], i32 3
; CHECK-NEXT: [[TMP28:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP29:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP30:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP31:%.*]] = load i32, i32* [[ADDR]], align 4
; CHECK-NEXT: [[TMP32:%.*]] = insertelement <4 x i32> undef, i32 [[TMP28]], i32 0
; CHECK-NEXT: [[TMP33:%.*]] = insertelement <4 x i32> [[TMP32]], i32 [[TMP29]], i32 1
; CHECK-NEXT: [[TMP34:%.*]] = insertelement <4 x i32> [[TMP33]], i32 [[TMP30]], i32 2
; CHECK-NEXT: [[TMP35:%.*]] = insertelement <4 x i32> [[TMP34]], i32 [[TMP31]], i32 3
; CHECK-NEXT: [[TMP36]] = add <4 x i32> [[VEC_PHI]], [[TMP11]]
; CHECK-NEXT: [[TMP37]] = add <4 x i32> [[VEC_PHI1]], [[TMP19]]
; CHECK-NEXT: [[TMP38]] = add <4 x i32> [[VEC_PHI2]], [[TMP27]]
; CHECK-NEXT: [[TMP39]] = add <4 x i32> [[VEC_PHI3]], [[TMP35]]
; CHECK-NEXT: [[BROADCAST_SPLATINSERT8:%.*]] = insertelement <4 x i32> undef, i32 [[TMP7]], i32 0
; CHECK-NEXT: [[BROADCAST_SPLAT9:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT8]], <4 x i32> undef, <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP8]] = add <4 x i32> [[VEC_PHI]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP9]] = add <4 x i32> [[VEC_PHI1]], [[BROADCAST_SPLAT5]]
; CHECK-NEXT: [[TMP10]] = add <4 x i32> [[VEC_PHI2]], [[BROADCAST_SPLAT7]]
; CHECK-NEXT: [[TMP11]] = add <4 x i32> [[VEC_PHI3]], [[BROADCAST_SPLAT9]]
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16
; CHECK-NEXT: [[TMP40:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096
; CHECK-NEXT: br i1 [[TMP40]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP4:!llvm.loop !.*]]
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP4:!llvm.loop !.*]]
; CHECK: middle.block:
; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP37]], [[TMP36]]
; CHECK-NEXT: [[BIN_RDX4:%.*]] = add <4 x i32> [[TMP38]], [[BIN_RDX]]
; CHECK-NEXT: [[BIN_RDX5:%.*]] = add <4 x i32> [[TMP39]], [[BIN_RDX4]]
; CHECK-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX5]])
; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP9]], [[TMP8]]
; CHECK-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[TMP10]], [[BIN_RDX]]
; CHECK-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[TMP11]], [[BIN_RDX10]]
; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]])
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 4097, 4096
; CHECK-NEXT: br i1 [[CMP_N]], label [[LOOPEXIT:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 4096, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP41]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP13]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
@ -144,7 +112,7 @@ define i32 @uniform_load2(i32* align(4) %addr) {
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV]], 4096
; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOPEXIT]], label [[FOR_BODY]], [[LOOP5:!llvm.loop !.*]]
; CHECK: loopexit:
; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[FOR_BODY]] ], [ [[TMP41]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[FOR_BODY]] ], [ [[TMP13]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]]
;
entry:
@ -383,37 +351,25 @@ define void @uniform_copy(i32* %A, i32* %B) {
; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10
; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10
; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10
; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10
; CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10
; CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10
; CHECK-NEXT: [[TMP11:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10
; CHECK-NEXT: [[TMP12:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10
; CHECK-NEXT: [[TMP13:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10
; CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10
; CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10
; CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10
; CHECK-NEXT: [[TMP17:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10
; CHECK-NEXT: [[TMP18:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10
; CHECK-NEXT: [[TMP19:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10
; CHECK-NEXT: store i32 [[TMP4]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP4]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP4]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP4]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP5]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP5]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP5]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP5]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP6]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP6]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP6]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP6]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP7]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP8]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP9]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP10]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP11]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP12]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP13]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP14]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP15]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP16]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP17]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP18]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP19]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP7]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP7]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: store i32 [[TMP7]], i32* [[B]], align 4, !alias.scope !13, !noalias !10
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16
; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096
; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP15:!llvm.loop !.*]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP15:!llvm.loop !.*]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 4097, 4096
; CHECK-NEXT: br i1 [[CMP_N]], label [[LOOPEXIT:%.*]], label [[SCALAR_PH]]

View File

@ -68,24 +68,19 @@ define void @Test(%struct.s* nocapture %obj, i64 %z) #0 {
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32* [[TMP4]] to <4 x i32>*
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP5]], align 4, !alias.scope !0
; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4, !alias.scope !3
; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP1]], align 4, !alias.scope !3
; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP1]], align 4, !alias.scope !3
; CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP1]], align 4, !alias.scope !3
; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x i32> undef, i32 [[TMP6]], i32 0
; CHECK-NEXT: [[TMP11:%.*]] = insertelement <4 x i32> [[TMP10]], i32 [[TMP7]], i32 1
; CHECK-NEXT: [[TMP12:%.*]] = insertelement <4 x i32> [[TMP11]], i32 [[TMP8]], i32 2
; CHECK-NEXT: [[TMP13:%.*]] = insertelement <4 x i32> [[TMP12]], i32 [[TMP9]], i32 3
; CHECK-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[TMP13]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.s* [[OBJ]], i64 0, i32 2, i64 [[I]], i64 [[TMP2]]
; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, i32* [[TMP15]], i32 0
; CHECK-NEXT: [[TMP17:%.*]] = bitcast i32* [[TMP16]] to <4 x i32>*
; CHECK-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x i32>, <4 x i32>* [[TMP17]], align 4, !alias.scope !5, !noalias !7
; CHECK-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[TMP14]], [[WIDE_LOAD12]]
; CHECK-NEXT: [[TMP19:%.*]] = bitcast i32* [[TMP16]] to <4 x i32>*
; CHECK-NEXT: store <4 x i32> [[TMP18]], <4 x i32>* [[TMP19]], align 4, !alias.scope !5, !noalias !7
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> undef, i32 [[TMP6]], i32 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> undef, <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[BROADCAST_SPLAT]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.s* [[OBJ]], i64 0, i32 2, i64 [[I]], i64 [[TMP2]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i32 0
; CHECK-NEXT: [[TMP10:%.*]] = bitcast i32* [[TMP9]] to <4 x i32>*
; CHECK-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x i32>, <4 x i32>* [[TMP10]], align 4, !alias.scope !5, !noalias !7
; CHECK-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[TMP7]], [[WIDE_LOAD12]]
; CHECK-NEXT: [[TMP12:%.*]] = bitcast i32* [[TMP9]] to <4 x i32>*
; CHECK-NEXT: store <4 x i32> [[TMP11]], <4 x i32>* [[TMP12]], align 4, !alias.scope !5, !noalias !7
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !8
; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP8:!llvm.loop !.*]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[Z]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[DOTOUTER]], label [[SCALAR_PH]]
@ -100,17 +95,17 @@ define void @Test(%struct.s* nocapture %obj, i64 %z) #0 {
; CHECK-NEXT: br i1 [[EXITCOND_OUTER]], label [[DOTEXIT:%.*]], label [[DOTOUTER_PREHEADER]]
; CHECK: .inner:
; CHECK-NEXT: [[J:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[J_NEXT:%.*]], [[DOTINNER]] ]
; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.s* [[OBJ]], i64 0, i32 0, i64 [[J]]
; CHECK-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
; CHECK-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP1]], align 4
; CHECK-NEXT: [[TMP24:%.*]] = add nsw i32 [[TMP23]], [[TMP22]]
; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.s* [[OBJ]], i64 0, i32 2, i64 [[I]], i64 [[J]]
; CHECK-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
; CHECK-NEXT: [[TMP27:%.*]] = add nsw i32 [[TMP24]], [[TMP26]]
; CHECK-NEXT: store i32 [[TMP27]], i32* [[TMP25]]
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.s* [[OBJ]], i64 0, i32 0, i64 [[J]]
; CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
; CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP1]], align 4
; CHECK-NEXT: [[TMP17:%.*]] = add nsw i32 [[TMP16]], [[TMP15]]
; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.s* [[OBJ]], i64 0, i32 2, i64 [[I]], i64 [[J]]
; CHECK-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4
; CHECK-NEXT: [[TMP20:%.*]] = add nsw i32 [[TMP17]], [[TMP19]]
; CHECK-NEXT: store i32 [[TMP20]], i32* [[TMP18]], align 4
; CHECK-NEXT: [[J_NEXT]] = add nuw nsw i64 [[J]], 1
; CHECK-NEXT: [[EXITCOND_INNER:%.*]] = icmp eq i64 [[J_NEXT]], [[Z]]
; CHECK-NEXT: br i1 [[EXITCOND_INNER]], label [[DOTOUTER]], label [[DOTINNER]], !llvm.loop !10
; CHECK-NEXT: br i1 [[EXITCOND_INNER]], label [[DOTOUTER]], label [[DOTINNER]], [[LOOP10:!llvm.loop !.*]]
;
br label %.outer.preheader