Teach SCEV normalization to de/normalize non-affine add recs

Summary:
Before this change, SCEV Normalization would incorrectly normalize
non-affine add recurrences.  To work around this there was (still is)
a check in place to make sure we only tried to normalize affine add
recurrences.

We recently found a bug in aforementioned check to bail out of
normalizing non-affine add recurrences.  However, instead of fixing
the bailout, I have decided to teach SCEV normalization to work
correctly with non-affine add recurrences, making the bailout
unnecessary (I'll remove it in a subsequent change).

I've also added some unit tests (which would have failed before this
change).

Reviewers: atrick, sunfish, efriedma

Reviewed By: atrick

Subscribers: mcrosier, mzolotukhin, llvm-commits

Differential Revision: https://reviews.llvm.org/D32104

llvm-svn: 301281
This commit is contained in:
Sanjoy Das 2017-04-25 00:09:19 +00:00
parent 831c72f4a9
commit bbebcb6c4d
2 changed files with 137 additions and 33 deletions

View File

@ -51,40 +51,47 @@ NormalizeDenormalizeRewriter::visitAddRecExpr(const SCEVAddRecExpr *AR) {
transform(AR->operands(), std::back_inserter(Operands), transform(AR->operands(), std::back_inserter(Operands),
[&](const SCEV *Op) { return visit(Op); }); [&](const SCEV *Op) { return visit(Op); });
// Conservatively use AnyWrap until/unless we need FlagNW. if (!Pred(AR))
const SCEV *Result = return SE.getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagAnyWrap);
SE.getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagAnyWrap);
switch (Kind) { // Normalization and denormalization are fancy names for decrementing and
case Normalize: // incrementing a SCEV expression with respect to a set of loops. Since
// We want to normalize step expression, because otherwise we might not be // Pred(AR) has returned true, we know we need to normalize or denormalize AR
// able to denormalize to the original expression. // with respect to its loop.
if (Kind == Denormalize) {
// Denormalization / "partial increment" is essentially the same as \c
// SCEVAddRecExpr::getPostIncExpr. Here we use an explicit loop to make the
// symmetry with Normalization clear.
for (int i = 0, e = Operands.size() - 1; i < e; i++)
Operands[i] = SE.getAddExpr(Operands[i], Operands[i + 1]);
} else {
assert(Kind == Normalize && "Only two possibilities!");
// Normalization / "partial decrement" is a bit more subtle. Since
// incrementing a SCEV expression (in general) changes the step of the SCEV
// expression as well, we cannot use the step of the current expression.
// Instead, we have to use the step of the very expression we're trying to
// compute!
// //
// Here is an example what will happen if we don't normalize step: // We solve the issue by recursively building up the result, starting from
// ORIGINAL ISE: // the "least significant" operand in the add recurrence:
// {(100 /u {1,+,1}<%bb16>),+,(100 /u {1,+,1}<%bb16>)}<%bb25> //
// NORMALIZED ISE: // Base case:
// {((-1 * (100 /u {1,+,1}<%bb16>)) + (100 /u {0,+,1}<%bb16>)),+, // Single operand add recurrence. It's its own normalization.
// (100 /u {0,+,1}<%bb16>)}<%bb25> //
// DENORMALIZED BACK ISE: // N-operand case:
// {((2 * (100 /u {1,+,1}<%bb16>)) + (-1 * (100 /u {2,+,1}<%bb16>))),+, // {S_{N-1},+,S_{N-2},+,...,+,S_0} = S
// (100 /u {1,+,1}<%bb16>)}<%bb25> //
// Note that the initial value changes after normalization + // Since the step recurrence of S is {S_{N-2},+,...,+,S_0}, we know its
// denormalization, which isn't correct. // normalization by induction. We subtract the normalized step
if (Pred(AR)) { // recurrence from S_{N-1} to get the normalization of S.
const SCEV *TransformedStep = visit(AR->getStepRecurrence(SE));
Result = SE.getMinusSCEV(Result, TransformedStep); for (int i = Operands.size() - 2; i >= 0; i--)
} Operands[i] = SE.getMinusSCEV(Operands[i], Operands[i + 1]);
break;
case Denormalize:
// Here we want to normalize step expressions for the same reasons, as
// stated above.
if (Pred(AR)) {
const SCEV *TransformedStep = visit(AR->getStepRecurrence(SE));
Result = SE.getAddExpr(Result, TransformedStep);
}
break;
} }
return Result;
return SE.getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagAnyWrap);
} }
const SCEV *llvm::normalizeForPostIncUse(const SCEV *S, const SCEV *llvm::normalizeForPostIncUse(const SCEV *S,

View File

@ -640,6 +640,25 @@ TEST_F(ScalarEvolutionsTest, SCEVNormalization) {
"for.end.loopexit: " "for.end.loopexit: "
" ret void " " ret void "
"} " "} "
" "
"define void @f_2(i32 %a, i32 %b, i32 %c, i32 %d) "
" local_unnamed_addr { "
"entry: "
" br label %loop_0 "
" "
"loop_0: "
" br i1 undef, label %loop_0, label %loop_1 "
" "
"loop_1: "
" br i1 undef, label %loop_2, label %loop_1 "
" "
" "
"loop_2: "
" br i1 undef, label %end, label %loop_2 "
" "
"end: "
" ret void "
"} "
, ,
Err, C); Err, C);
@ -664,6 +683,85 @@ TEST_F(ScalarEvolutionsTest, SCEVNormalization) {
auto *D1 = denormalizeForPostIncUse(N1, Loops, SE); auto *D1 = denormalizeForPostIncUse(N1, Loops, SE);
EXPECT_EQ(S1, D1) << *S1 << " " << *D1; EXPECT_EQ(S1, D1) << *S1 << " " << *D1;
}); });
runWithSE(*M, "f_2", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
auto *L2 = *LI.begin();
auto *L1 = *std::next(LI.begin());
auto *L0 = *std::next(LI.begin(), 2);
auto GetAddRec = [&SE](const Loop *L, std::initializer_list<const SCEV *> Ops) {
SmallVector<const SCEV *, 4> OpsCopy(Ops);
return SE.getAddRecExpr(OpsCopy, L, SCEV::FlagAnyWrap);
};
auto GetAdd = [&SE](std::initializer_list<const SCEV *> Ops) {
SmallVector<const SCEV *, 4> OpsCopy(Ops);
return SE.getAddExpr(OpsCopy, SCEV::FlagAnyWrap);
};
// We first populate the AddRecs vector with a few "interesting" SCEV
// expressions, and then we go through the list and assert that each
// expression in it has an invertible normalization.
std::vector<const SCEV *> Exprs;
{
const SCEV *V0 = SE.getSCEV(&*F.arg_begin());
const SCEV *V1 = SE.getSCEV(&*std::next(F.arg_begin(), 1));
const SCEV *V2 = SE.getSCEV(&*std::next(F.arg_begin(), 2));
const SCEV *V3 = SE.getSCEV(&*std::next(F.arg_begin(), 3));
Exprs.push_back(GetAddRec(L0, {V0})); // 0
Exprs.push_back(GetAddRec(L0, {V0, V1})); // 1
Exprs.push_back(GetAddRec(L0, {V0, V1, V2})); // 2
Exprs.push_back(GetAddRec(L0, {V0, V1, V2, V3})); // 3
Exprs.push_back(
GetAddRec(L1, {Exprs[1], Exprs[2], Exprs[3], Exprs[0]})); // 4
Exprs.push_back(
GetAddRec(L1, {Exprs[1], Exprs[2], Exprs[0], Exprs[3]})); // 5
Exprs.push_back(
GetAddRec(L1, {Exprs[1], Exprs[3], Exprs[3], Exprs[1]})); // 6
Exprs.push_back(GetAdd({Exprs[6], Exprs[3], V2})); // 7
Exprs.push_back(
GetAddRec(L2, {Exprs[4], Exprs[3], Exprs[3], Exprs[5]})); // 8
Exprs.push_back(
GetAddRec(L2, {Exprs[4], Exprs[6], Exprs[7], Exprs[3], V0})); // 9
}
std::vector<PostIncLoopSet> LoopSets;
for (int i = 0; i < 8; i++) {
LoopSets.emplace_back();
if (i & 1)
LoopSets.back().insert(L0);
if (i & 2)
LoopSets.back().insert(L1);
if (i & 4)
LoopSets.back().insert(L2);
}
for (const auto &LoopSet : LoopSets)
for (auto *S : Exprs) {
{
auto *N = llvm::normalizeForPostIncUse(S, LoopSet, SE);
auto *D = llvm::denormalizeForPostIncUse(N, LoopSet, SE);
// Normalization and then denormalizing better give us back the same
// value.
EXPECT_EQ(S, D) << "S = " << *S << " D = " << *D << " N = " << *N;
}
{
auto *D = llvm::denormalizeForPostIncUse(S, LoopSet, SE);
auto *N = llvm::normalizeForPostIncUse(D, LoopSet, SE);
// Denormalization and then normalizing better give us back the same
// value.
EXPECT_EQ(S, N) << "S = " << *S << " N = " << *N;
}
}
});
} }
// Expect the call of getZeroExtendExpr will not cost exponential time. // Expect the call of getZeroExtendExpr will not cost exponential time.
@ -755,6 +853,5 @@ TEST_F(ScalarEvolutionsTest, SCEVZeroExtendExpr) {
Type *I128Ty = Type::getInt128Ty(Context); Type *I128Ty = Type::getInt128Ty(Context);
SE.getZeroExtendExpr(S, I128Ty); SE.getZeroExtendExpr(S, I128Ty);
} }
} // end anonymous namespace } // end anonymous namespace
} // end namespace llvm } // end namespace llvm