forked from OSchip/llvm-project
[SLP] Distinguish "demanded and shrinkable" from "demanded and not shrinkable" values when determining the minimum bitwidth
We use two approaches for determining the minimum bitwidth. * Demanded bits * Value tracking If demanded bits doesn't result in a narrower type, we then try value tracking. We need this if we want to root SLP trees with the indices of getelementptr instructions since all the bits of the indices are demanded. But there is a missing piece though. We need to be able to distinguish "demanded and shrinkable" from "demanded and not shrinkable". For example, the bits of %i in %i = sext i32 %e1 to i64 %gep = getelementptr inbounds i64, i64* %p, i64 %i are demanded, but we can shrink %i's type to i32 because it won't change the result of the getelementptr. On the other hand, in %tmp15 = sext i32 %tmp14 to i64 %tmp16 = insertvalue { i64, i64 } undef, i64 %tmp15, 0 it doesn't make sense to shrink %tmp15 and we can skip the value tracking. Ideas are from Matthew Simpson! Differential Revision: https://reviews.llvm.org/D44868 llvm-svn: 329035
This commit is contained in:
parent
7d6131a898
commit
7f0daaeb86
|
@ -4430,24 +4430,9 @@ void BoUpSLP::computeMinimumValueSizes() {
|
|||
// additional roots that require investigating in Roots.
|
||||
SmallVector<Value *, 32> ToDemote;
|
||||
SmallVector<Value *, 4> Roots;
|
||||
for (auto *Root : TreeRoot) {
|
||||
// Do not include top zext/sext/trunc operations to those to be demoted, it
|
||||
// produces noise cast<vect>, trunc <vect>, exctract <vect>, cast <extract>
|
||||
// sequence.
|
||||
if (isa<Constant>(Root))
|
||||
continue;
|
||||
auto *I = dyn_cast<Instruction>(Root);
|
||||
if (!I || !I->hasOneUse() || !Expr.count(I))
|
||||
return;
|
||||
if (isa<ZExtInst>(I) || isa<SExtInst>(I))
|
||||
continue;
|
||||
if (auto *TI = dyn_cast<TruncInst>(I)) {
|
||||
Roots.push_back(TI->getOperand(0));
|
||||
continue;
|
||||
}
|
||||
for (auto *Root : TreeRoot)
|
||||
if (!collectValuesToDemote(Root, Expr, ToDemote, Roots))
|
||||
return;
|
||||
}
|
||||
|
||||
// The maximum bit width required to represent all the values that can be
|
||||
// demoted without loss of precision. It would be safe to truncate the roots
|
||||
|
@ -4476,7 +4461,11 @@ void BoUpSLP::computeMinimumValueSizes() {
|
|||
// We start by looking at each entry that can be demoted. We compute the
|
||||
// maximum bit width required to store the scalar by using ValueTracking to
|
||||
// compute the number of high-order bits we can truncate.
|
||||
if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType())) {
|
||||
if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) &&
|
||||
llvm::all_of(TreeRoot, [](Value *R) {
|
||||
assert(R->hasOneUse() && "Root should have only one use!");
|
||||
return isa<GetElementPtrInst>(R->user_back());
|
||||
})) {
|
||||
MaxBitWidth = 8u;
|
||||
|
||||
// Determine if the sign bit of all the roots is known to be zero. If not,
|
||||
|
|
|
@ -9,20 +9,24 @@ define void @test(<4 x i16> %a, <4 x i16> %b, i64* %p) {
|
|||
; Make sure types of sub and its sources are not extended.
|
||||
; CHECK-LABEL: @test(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[Z0:%.*]] = zext <4 x i16> [[A:%.*]] to <4 x i64>
|
||||
; CHECK-NEXT: [[Z1:%.*]] = zext <4 x i16> [[B:%.*]] to <4 x i64>
|
||||
; CHECK-NEXT: [[SUB0:%.*]] = sub nsw <4 x i64> [[Z0]], [[Z1]]
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = extractelement <4 x i64> [[SUB0]], i32 0
|
||||
; CHECK-NEXT: [[GEP0:%.*]] = getelementptr inbounds i64, i64* [[P:%.*]], i64 [[TMP0]]
|
||||
; CHECK-NEXT: [[Z0:%.*]] = zext <4 x i16> [[A:%.*]] to <4 x i32>
|
||||
; CHECK-NEXT: [[Z1:%.*]] = zext <4 x i16> [[B:%.*]] to <4 x i32>
|
||||
; CHECK-NEXT: [[SUB0:%.*]] = sub nsw <4 x i32> [[Z0]], [[Z1]]
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = extractelement <4 x i32> [[SUB0]], i32 0
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[TMP0]] to i64
|
||||
; CHECK-NEXT: [[GEP0:%.*]] = getelementptr inbounds i64, i64* [[P:%.*]], i64 [[TMP1]]
|
||||
; CHECK-NEXT: [[LOAD0:%.*]] = load i64, i64* [[GEP0]], align 4
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[SUB0]], i32 1
|
||||
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 [[TMP1]]
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i32> [[SUB0]], i32 1
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = sext i32 [[TMP2]] to i64
|
||||
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 [[TMP3]]
|
||||
; CHECK-NEXT: [[LOAD1:%.*]] = load i64, i64* [[GEP1]], align 4
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[SUB0]], i32 2
|
||||
; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 [[TMP2]]
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[SUB0]], i32 2
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = sext i32 [[TMP4]] to i64
|
||||
; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 [[TMP5]]
|
||||
; CHECK-NEXT: [[LOAD2:%.*]] = load i64, i64* [[GEP2]], align 4
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[SUB0]], i32 3
|
||||
; CHECK-NEXT: [[GEP3:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 [[TMP3]]
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i32> [[SUB0]], i32 3
|
||||
; CHECK-NEXT: [[TMP7:%.*]] = sext i32 [[TMP6]] to i64
|
||||
; CHECK-NEXT: [[GEP3:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 [[TMP7]]
|
||||
; CHECK-NEXT: [[LOAD3:%.*]] = load i64, i64* [[GEP3]], align 4
|
||||
; CHECK-NEXT: call void @foo(i64 [[LOAD0]], i64 [[LOAD1]], i64 [[LOAD2]], i64 [[LOAD3]])
|
||||
; CHECK-NEXT: ret void
|
||||
|
|
|
@ -25,14 +25,19 @@ define void @get_block(i32 %y_pos) local_unnamed_addr #0 {
|
|||
; CHECK-NEXT: [[TMP8:%.*]] = icmp slt <4 x i32> [[TMP7]], undef
|
||||
; CHECK-NEXT: [[TMP9:%.*]] = select <4 x i1> [[TMP8]], <4 x i32> [[TMP7]], <4 x i32> undef
|
||||
; CHECK-NEXT: [[TMP10:%.*]] = sext <4 x i32> [[TMP9]] to <4 x i64>
|
||||
; CHECK-NEXT: [[TMP11:%.*]] = extractelement <4 x i64> [[TMP10]], i32 0
|
||||
; CHECK-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds i16*, i16** undef, i64 [[TMP11]]
|
||||
; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i64> [[TMP10]], i32 1
|
||||
; CHECK-NEXT: [[ARRAYIDX31_1:%.*]] = getelementptr inbounds i16*, i16** undef, i64 [[TMP12]]
|
||||
; CHECK-NEXT: [[TMP13:%.*]] = extractelement <4 x i64> [[TMP10]], i32 2
|
||||
; CHECK-NEXT: [[ARRAYIDX31_2:%.*]] = getelementptr inbounds i16*, i16** undef, i64 [[TMP13]]
|
||||
; CHECK-NEXT: [[TMP14:%.*]] = extractelement <4 x i64> [[TMP10]], i32 3
|
||||
; CHECK-NEXT: [[ARRAYIDX31_3:%.*]] = getelementptr inbounds i16*, i16** undef, i64 [[TMP14]]
|
||||
; CHECK-NEXT: [[TMP11:%.*]] = trunc <4 x i64> [[TMP10]] to <4 x i32>
|
||||
; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i32> [[TMP11]], i32 0
|
||||
; CHECK-NEXT: [[TMP13:%.*]] = sext i32 [[TMP12]] to i64
|
||||
; CHECK-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds i16*, i16** undef, i64 [[TMP13]]
|
||||
; CHECK-NEXT: [[TMP14:%.*]] = extractelement <4 x i32> [[TMP11]], i32 1
|
||||
; CHECK-NEXT: [[TMP15:%.*]] = sext i32 [[TMP14]] to i64
|
||||
; CHECK-NEXT: [[ARRAYIDX31_1:%.*]] = getelementptr inbounds i16*, i16** undef, i64 [[TMP15]]
|
||||
; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i32> [[TMP11]], i32 2
|
||||
; CHECK-NEXT: [[TMP17:%.*]] = sext i32 [[TMP16]] to i64
|
||||
; CHECK-NEXT: [[ARRAYIDX31_2:%.*]] = getelementptr inbounds i16*, i16** undef, i64 [[TMP17]]
|
||||
; CHECK-NEXT: [[TMP18:%.*]] = extractelement <4 x i32> [[TMP11]], i32 3
|
||||
; CHECK-NEXT: [[TMP19:%.*]] = sext i32 [[TMP18]] to i64
|
||||
; CHECK-NEXT: [[ARRAYIDX31_3:%.*]] = getelementptr inbounds i16*, i16** undef, i64 [[TMP19]]
|
||||
; CHECK-NEXT: unreachable
|
||||
;
|
||||
entry:
|
||||
|
|
Loading…
Reference in New Issue