2007-04-07 09:25:15 +08:00
|
|
|
//===- LoopRotation.cpp - Loop Rotation Pass ------------------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2007-04-07 09:25:15 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements Loop Rotation Pass.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2007-04-10 00:11:48 +08:00
|
|
|
#define DEBUG_TYPE "loop-rotate"
|
2007-04-07 09:25:15 +08:00
|
|
|
#include "llvm/Transforms/Scalar.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2011-01-02 15:35:53 +08:00
|
|
|
#include "llvm/Analysis/CodeMetrics.h"
|
2011-01-08 16:24:46 +08:00
|
|
|
#include "llvm/Analysis/InstructionSimplify.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Analysis/LoopPass.h"
|
2007-07-12 07:47:28 +08:00
|
|
|
#include "llvm/Analysis/ScalarEvolution.h"
|
2013-01-21 21:04:33 +08:00
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
2012-02-14 08:00:23 +08:00
|
|
|
#include "llvm/Analysis/ValueTracking.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Support/CFG.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
2007-07-12 07:47:28 +08:00
|
|
|
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Transforms/Utils/Local.h"
|
2009-10-25 07:19:52 +08:00
|
|
|
#include "llvm/Transforms/Utils/SSAUpdater.h"
|
2011-01-08 15:21:31 +08:00
|
|
|
#include "llvm/Transforms/Utils/ValueMapper.h"
|
2007-04-07 09:25:15 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
#define MAX_HEADER_SIZE 16
|
|
|
|
|
|
|
|
STATISTIC(NumRotated, "Number of loops rotated");
|
|
|
|
namespace {
|
|
|
|
|
2009-09-02 14:11:42 +08:00
|
|
|
class LoopRotate : public LoopPass {
|
2007-04-07 09:25:15 +08:00
|
|
|
public:
|
2007-05-03 09:11:54 +08:00
|
|
|
static char ID; // Pass ID, replacement for typeid
|
2010-10-20 01:21:58 +08:00
|
|
|
LoopRotate() : LoopPass(ID) {
|
|
|
|
initializeLoopRotatePass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
2007-05-02 05:15:47 +08:00
|
|
|
|
2007-04-10 00:11:48 +08:00
|
|
|
// LCSSA form makes instruction renaming easier.
|
2007-04-07 09:25:15 +08:00
|
|
|
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
|
2010-07-17 01:58:45 +08:00
|
|
|
AU.addPreserved<DominatorTree>();
|
|
|
|
AU.addRequired<LoopInfo>();
|
|
|
|
AU.addPreserved<LoopInfo>();
|
2008-02-15 09:24:49 +08:00
|
|
|
AU.addRequiredID(LoopSimplifyID);
|
|
|
|
AU.addPreservedID(LoopSimplifyID);
|
2007-04-07 09:25:15 +08:00
|
|
|
AU.addRequiredID(LCSSAID);
|
|
|
|
AU.addPreservedID(LCSSAID);
|
2007-07-12 07:47:28 +08:00
|
|
|
AU.addPreserved<ScalarEvolution>();
|
2013-01-21 21:04:33 +08:00
|
|
|
AU.addRequired<TargetTransformInfo>();
|
2007-04-07 09:25:15 +08:00
|
|
|
}
|
|
|
|
|
2011-01-09 02:55:50 +08:00
|
|
|
bool runOnLoop(Loop *L, LPPassManager &LPM);
|
2013-05-07 01:58:18 +08:00
|
|
|
bool simplifyLoopLatch(Loop *L);
|
|
|
|
bool rotateLoop(Loop *L, bool SimplifiedLatch);
|
2012-02-14 08:00:19 +08:00
|
|
|
|
2007-04-07 09:25:15 +08:00
|
|
|
private:
|
2011-01-09 01:38:45 +08:00
|
|
|
LoopInfo *LI;
|
2013-01-21 21:04:33 +08:00
|
|
|
const TargetTransformInfo *TTI;
|
2007-04-07 09:25:15 +08:00
|
|
|
};
|
|
|
|
}
|
2012-02-14 08:00:19 +08:00
|
|
|
|
2008-05-13 08:00:25 +08:00
|
|
|
char LoopRotate::ID = 0;
|
2010-10-13 03:48:12 +08:00
|
|
|
INITIALIZE_PASS_BEGIN(LoopRotate, "loop-rotate", "Rotate Loops", false, false)
|
2013-01-21 21:04:33 +08:00
|
|
|
INITIALIZE_AG_DEPENDENCY(TargetTransformInfo)
|
2010-10-13 03:48:12 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(LCSSA)
|
|
|
|
INITIALIZE_PASS_END(LoopRotate, "loop-rotate", "Rotate Loops", false, false)
|
2007-04-07 09:25:15 +08:00
|
|
|
|
2008-10-23 07:32:42 +08:00
|
|
|
Pass *llvm::createLoopRotatePass() { return new LoopRotate(); }
|
2007-04-07 09:25:15 +08:00
|
|
|
|
2007-04-10 00:11:48 +08:00
|
|
|
/// Rotate Loop L as many times as possible. Return true if
|
2009-06-25 08:22:44 +08:00
|
|
|
/// the loop is rotated at least once.
|
2011-01-09 01:48:33 +08:00
|
|
|
bool LoopRotate::runOnLoop(Loop *L, LPPassManager &LPM) {
|
2011-01-09 01:38:45 +08:00
|
|
|
LI = &getAnalysis<LoopInfo>();
|
2013-01-21 21:04:33 +08:00
|
|
|
TTI = &getAnalysis<TargetTransformInfo>();
|
2007-07-12 07:47:28 +08:00
|
|
|
|
2012-02-14 08:00:23 +08:00
|
|
|
// Simplify the loop latch before attempting to rotate the header
|
|
|
|
// upward. Rotation may not be needed if the loop tail can be folded into the
|
|
|
|
// loop exit.
|
2013-05-07 01:58:18 +08:00
|
|
|
bool SimplifiedLatch = simplifyLoopLatch(L);
|
2012-02-14 08:00:23 +08:00
|
|
|
|
2007-04-07 09:25:15 +08:00
|
|
|
// One loop can be rotated multiple times.
|
2011-01-09 01:38:45 +08:00
|
|
|
bool MadeChange = false;
|
2013-05-07 01:58:18 +08:00
|
|
|
while (rotateLoop(L, SimplifiedLatch)) {
|
2011-01-09 01:38:45 +08:00
|
|
|
MadeChange = true;
|
2013-05-07 01:58:18 +08:00
|
|
|
SimplifiedLatch = false;
|
|
|
|
}
|
2011-01-09 01:38:45 +08:00
|
|
|
return MadeChange;
|
2007-04-07 09:25:15 +08:00
|
|
|
}
|
|
|
|
|
2011-01-09 03:26:33 +08:00
|
|
|
/// RewriteUsesOfClonedInstructions - We just cloned the instructions from the
|
|
|
|
/// old header into the preheader. If there were uses of the values produced by
|
|
|
|
/// these instruction that were outside of the loop, we have to insert PHI nodes
|
|
|
|
/// to merge the two values. Do this now.
|
|
|
|
static void RewriteUsesOfClonedInstructions(BasicBlock *OrigHeader,
|
|
|
|
BasicBlock *OrigPreheader,
|
|
|
|
ValueToValueMapTy &ValueMap) {
|
|
|
|
// Remove PHI node entries that are no longer live.
|
|
|
|
BasicBlock::iterator I, E = OrigHeader->end();
|
|
|
|
for (I = OrigHeader->begin(); PHINode *PN = dyn_cast<PHINode>(I); ++I)
|
|
|
|
PN->removeIncomingValue(PN->getBasicBlockIndex(OrigPreheader));
|
2012-02-14 08:00:19 +08:00
|
|
|
|
2011-01-09 03:26:33 +08:00
|
|
|
// Now fix up users of the instructions in OrigHeader, inserting PHI nodes
|
|
|
|
// as necessary.
|
|
|
|
SSAUpdater SSA;
|
|
|
|
for (I = OrigHeader->begin(); I != E; ++I) {
|
|
|
|
Value *OrigHeaderVal = I;
|
2012-02-14 08:00:19 +08:00
|
|
|
|
2011-01-09 03:26:33 +08:00
|
|
|
// If there are no uses of the value (e.g. because it returns void), there
|
|
|
|
// is nothing to rewrite.
|
|
|
|
if (OrigHeaderVal->use_empty())
|
|
|
|
continue;
|
2012-02-14 08:00:19 +08:00
|
|
|
|
2011-01-09 03:26:33 +08:00
|
|
|
Value *OrigPreHeaderVal = ValueMap[OrigHeaderVal];
|
|
|
|
|
|
|
|
// The value now exits in two versions: the initial value in the preheader
|
|
|
|
// and the loop "next" value in the original header.
|
|
|
|
SSA.Initialize(OrigHeaderVal->getType(), OrigHeaderVal->getName());
|
|
|
|
SSA.AddAvailableValue(OrigHeader, OrigHeaderVal);
|
|
|
|
SSA.AddAvailableValue(OrigPreheader, OrigPreHeaderVal);
|
2012-02-14 08:00:19 +08:00
|
|
|
|
2011-01-09 03:26:33 +08:00
|
|
|
// Visit each use of the OrigHeader instruction.
|
|
|
|
for (Value::use_iterator UI = OrigHeaderVal->use_begin(),
|
|
|
|
UE = OrigHeaderVal->use_end(); UI != UE; ) {
|
|
|
|
// Grab the use before incrementing the iterator.
|
|
|
|
Use &U = UI.getUse();
|
2012-02-14 08:00:19 +08:00
|
|
|
|
2011-01-09 03:26:33 +08:00
|
|
|
// Increment the iterator before removing the use from the list.
|
|
|
|
++UI;
|
2012-02-14 08:00:19 +08:00
|
|
|
|
2011-01-09 03:26:33 +08:00
|
|
|
// SSAUpdater can't handle a non-PHI use in the same block as an
|
|
|
|
// earlier def. We can easily handle those cases manually.
|
|
|
|
Instruction *UserInst = cast<Instruction>(U.getUser());
|
|
|
|
if (!isa<PHINode>(UserInst)) {
|
|
|
|
BasicBlock *UserBB = UserInst->getParent();
|
2012-02-14 08:00:19 +08:00
|
|
|
|
2011-01-09 03:26:33 +08:00
|
|
|
// The original users in the OrigHeader are already using the
|
|
|
|
// original definitions.
|
|
|
|
if (UserBB == OrigHeader)
|
|
|
|
continue;
|
2012-02-14 08:00:19 +08:00
|
|
|
|
2011-01-09 03:26:33 +08:00
|
|
|
// Users in the OrigPreHeader need to use the value to which the
|
|
|
|
// original definitions are mapped.
|
|
|
|
if (UserBB == OrigPreheader) {
|
|
|
|
U = OrigPreHeaderVal;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2012-02-14 08:00:19 +08:00
|
|
|
|
2011-01-09 03:26:33 +08:00
|
|
|
// Anything else can be handled by SSAUpdater.
|
|
|
|
SSA.RewriteUse(U);
|
|
|
|
}
|
|
|
|
}
|
2012-02-14 08:00:19 +08:00
|
|
|
}
|
2011-01-09 03:26:33 +08:00
|
|
|
|
2012-02-14 08:00:23 +08:00
|
|
|
/// Determine whether the instructions in this range my be safely and cheaply
|
|
|
|
/// speculated. This is not an important enough situation to develop complex
|
|
|
|
/// heuristics. We handle a single arithmetic instruction along with any type
|
|
|
|
/// conversions.
|
|
|
|
static bool shouldSpeculateInstrs(BasicBlock::iterator Begin,
|
|
|
|
BasicBlock::iterator End) {
|
|
|
|
bool seenIncrement = false;
|
|
|
|
for (BasicBlock::iterator I = Begin; I != End; ++I) {
|
|
|
|
|
|
|
|
if (!isSafeToSpeculativelyExecute(I))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (isa<DbgInfoIntrinsic>(I))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
switch (I->getOpcode()) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case Instruction::GetElementPtr:
|
|
|
|
// GEPs are cheap if all indices are constant.
|
|
|
|
if (!cast<GEPOperator>(I)->hasAllConstantIndices())
|
|
|
|
return false;
|
|
|
|
// fall-thru to increment case
|
|
|
|
case Instruction::Add:
|
|
|
|
case Instruction::Sub:
|
|
|
|
case Instruction::And:
|
|
|
|
case Instruction::Or:
|
|
|
|
case Instruction::Xor:
|
|
|
|
case Instruction::Shl:
|
|
|
|
case Instruction::LShr:
|
|
|
|
case Instruction::AShr:
|
|
|
|
if (seenIncrement)
|
|
|
|
return false;
|
|
|
|
seenIncrement = true;
|
|
|
|
break;
|
|
|
|
case Instruction::Trunc:
|
|
|
|
case Instruction::ZExt:
|
|
|
|
case Instruction::SExt:
|
|
|
|
// ignore type conversions
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Fold the loop tail into the loop exit by speculating the loop tail
|
|
|
|
/// instructions. Typically, this is a single post-increment. In the case of a
|
|
|
|
/// simple 2-block loop, hoisting the increment can be much better than
|
|
|
|
/// duplicating the entire loop header. In the cast of loops with early exits,
|
|
|
|
/// rotation will not work anyway, but simplifyLoopLatch will put the loop in
|
|
|
|
/// canonical form so downstream passes can handle it.
|
|
|
|
///
|
|
|
|
/// I don't believe this invalidates SCEV.
|
2013-05-07 01:58:18 +08:00
|
|
|
bool LoopRotate::simplifyLoopLatch(Loop *L) {
|
2012-02-14 08:00:23 +08:00
|
|
|
BasicBlock *Latch = L->getLoopLatch();
|
|
|
|
if (!Latch || Latch->hasAddressTaken())
|
2013-05-07 01:58:18 +08:00
|
|
|
return false;
|
2012-02-14 08:00:23 +08:00
|
|
|
|
|
|
|
BranchInst *Jmp = dyn_cast<BranchInst>(Latch->getTerminator());
|
|
|
|
if (!Jmp || !Jmp->isUnconditional())
|
2013-05-07 01:58:18 +08:00
|
|
|
return false;
|
2012-02-14 08:00:23 +08:00
|
|
|
|
|
|
|
BasicBlock *LastExit = Latch->getSinglePredecessor();
|
|
|
|
if (!LastExit || !L->isLoopExiting(LastExit))
|
2013-05-07 01:58:18 +08:00
|
|
|
return false;
|
2012-02-14 08:00:23 +08:00
|
|
|
|
|
|
|
BranchInst *BI = dyn_cast<BranchInst>(LastExit->getTerminator());
|
|
|
|
if (!BI)
|
2013-05-07 01:58:18 +08:00
|
|
|
return false;
|
2012-02-14 08:00:23 +08:00
|
|
|
|
|
|
|
if (!shouldSpeculateInstrs(Latch->begin(), Jmp))
|
2013-05-07 01:58:18 +08:00
|
|
|
return false;
|
2012-02-14 08:00:23 +08:00
|
|
|
|
|
|
|
DEBUG(dbgs() << "Folding loop latch " << Latch->getName() << " into "
|
|
|
|
<< LastExit->getName() << "\n");
|
|
|
|
|
|
|
|
// Hoist the instructions from Latch into LastExit.
|
|
|
|
LastExit->getInstList().splice(BI, Latch->getInstList(), Latch->begin(), Jmp);
|
|
|
|
|
|
|
|
unsigned FallThruPath = BI->getSuccessor(0) == Latch ? 0 : 1;
|
|
|
|
BasicBlock *Header = Jmp->getSuccessor(0);
|
|
|
|
assert(Header == L->getHeader() && "expected a backward branch");
|
|
|
|
|
|
|
|
// Remove Latch from the CFG so that LastExit becomes the new Latch.
|
|
|
|
BI->setSuccessor(FallThruPath, Header);
|
|
|
|
Latch->replaceSuccessorsPhiUsesWith(LastExit);
|
|
|
|
Jmp->eraseFromParent();
|
|
|
|
|
|
|
|
// Nuke the Latch block.
|
|
|
|
assert(Latch->empty() && "unable to evacuate Latch");
|
|
|
|
LI->removeBlock(Latch);
|
|
|
|
if (DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>())
|
|
|
|
DT->eraseNode(Latch);
|
|
|
|
Latch->eraseFromParent();
|
2013-05-07 01:58:18 +08:00
|
|
|
return true;
|
2012-02-14 08:00:23 +08:00
|
|
|
}
|
|
|
|
|
2007-05-12 05:10:54 +08:00
|
|
|
/// Rotate loop LP. Return true if the loop is rotated.
|
2013-05-07 01:58:18 +08:00
|
|
|
///
|
|
|
|
/// \param SimplifiedLatch is true if the latch was just folded into the final
|
|
|
|
/// loop exit. In this case we may want to rotate even though the new latch is
|
|
|
|
/// now an exiting branch. This rotation would have happened had the latch not
|
|
|
|
/// been simplified. However, if SimplifiedLatch is false, then we avoid
|
|
|
|
/// rotating loops in which the latch exits to avoid excessive or endless
|
|
|
|
/// rotation. LoopRotate should be repeatable and converge to a canonical
|
|
|
|
/// form. This property is satisfied because simplifying the loop latch can only
|
|
|
|
/// happen once across multiple invocations of the LoopRotate pass.
|
|
|
|
bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
|
2009-06-25 08:22:44 +08:00
|
|
|
// If the loop has only one block then there is not much to rotate.
|
2007-04-10 00:11:48 +08:00
|
|
|
if (L->getBlocks().size() == 1)
|
2007-04-07 09:25:15 +08:00
|
|
|
return false;
|
2012-02-14 08:00:19 +08:00
|
|
|
|
2011-01-09 02:06:22 +08:00
|
|
|
BasicBlock *OrigHeader = L->getHeader();
|
2012-08-30 23:39:42 +08:00
|
|
|
BasicBlock *OrigLatch = L->getLoopLatch();
|
2012-02-14 08:00:19 +08:00
|
|
|
|
2011-01-09 02:06:22 +08:00
|
|
|
BranchInst *BI = dyn_cast<BranchInst>(OrigHeader->getTerminator());
|
|
|
|
if (BI == 0 || BI->isUnconditional())
|
|
|
|
return false;
|
2012-02-14 08:00:19 +08:00
|
|
|
|
2009-06-25 08:22:44 +08:00
|
|
|
// If the loop header is not one of the loop exiting blocks then
|
|
|
|
// either this loop is already rotated or it is not
|
2007-04-07 09:25:15 +08:00
|
|
|
// suitable for loop rotation transformations.
|
2009-10-25 07:34:26 +08:00
|
|
|
if (!L->isLoopExiting(OrigHeader))
|
2007-04-07 09:25:15 +08:00
|
|
|
return false;
|
|
|
|
|
2012-08-30 23:39:42 +08:00
|
|
|
// If the loop latch already contains a branch that leaves the loop then the
|
|
|
|
// loop is already rotated.
|
2013-05-07 01:58:18 +08:00
|
|
|
if (OrigLatch == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Rotate if either the loop latch does *not* exit the loop, or if the loop
|
|
|
|
// latch was just simplified.
|
|
|
|
if (L->isLoopExiting(OrigLatch) && !SimplifiedLatch)
|
2007-04-07 09:25:15 +08:00
|
|
|
return false;
|
|
|
|
|
2012-12-21 00:04:27 +08:00
|
|
|
// Check size of original header and reject loop if it is very big or we can't
|
|
|
|
// duplicate blocks inside it.
|
2011-01-02 15:35:53 +08:00
|
|
|
{
|
|
|
|
CodeMetrics Metrics;
|
2013-01-21 21:04:33 +08:00
|
|
|
Metrics.analyzeBasicBlock(OrigHeader, *TTI);
|
2012-12-21 00:04:27 +08:00
|
|
|
if (Metrics.notDuplicatable) {
|
|
|
|
DEBUG(dbgs() << "LoopRotation: NOT rotating - contains non duplicatable"
|
|
|
|
<< " instructions: "; L->dump());
|
|
|
|
return false;
|
|
|
|
}
|
2011-01-02 15:35:53 +08:00
|
|
|
if (Metrics.NumInsts > MAX_HEADER_SIZE)
|
|
|
|
return false;
|
2009-03-06 11:51:30 +08:00
|
|
|
}
|
|
|
|
|
2007-07-12 07:47:28 +08:00
|
|
|
// Now, this loop is suitable for rotation.
|
2011-01-09 03:26:33 +08:00
|
|
|
BasicBlock *OrigPreheader = L->getLoopPreheader();
|
2012-02-14 08:00:19 +08:00
|
|
|
|
2011-04-09 15:25:58 +08:00
|
|
|
// If the loop could not be converted to canonical form, it must have an
|
|
|
|
// indirectbr in it, just give up.
|
2012-08-30 23:39:42 +08:00
|
|
|
if (OrigPreheader == 0)
|
2011-04-09 15:25:58 +08:00
|
|
|
return false;
|
2007-07-12 07:47:28 +08:00
|
|
|
|
2009-09-27 23:37:03 +08:00
|
|
|
// Anything ScalarEvolution may know about this loop or the PHI nodes
|
|
|
|
// in its header will soon be invalidated.
|
|
|
|
if (ScalarEvolution *SE = getAnalysisIfAvailable<ScalarEvolution>())
|
2009-10-31 23:04:55 +08:00
|
|
|
SE->forgetLoop(L);
|
2009-09-27 23:37:03 +08:00
|
|
|
|
2012-08-30 23:39:42 +08:00
|
|
|
DEBUG(dbgs() << "LoopRotation: rotating "; L->dump());
|
|
|
|
|
2007-04-07 09:25:15 +08:00
|
|
|
// Find new Loop header. NewHeader is a Header's one and only successor
|
2009-01-26 09:57:01 +08:00
|
|
|
// that is inside loop. Header's other successor is outside the
|
|
|
|
// loop. Otherwise loop is not suitable for rotation.
|
2011-01-09 01:48:33 +08:00
|
|
|
BasicBlock *Exit = BI->getSuccessor(0);
|
|
|
|
BasicBlock *NewHeader = BI->getSuccessor(1);
|
2007-04-10 00:11:48 +08:00
|
|
|
if (L->contains(Exit))
|
|
|
|
std::swap(Exit, NewHeader);
|
2009-01-26 09:38:24 +08:00
|
|
|
assert(NewHeader && "Unable to determine new loop header");
|
2012-02-14 08:00:19 +08:00
|
|
|
assert(L->contains(NewHeader) && !L->contains(Exit) &&
|
2007-04-10 00:11:48 +08:00
|
|
|
"Unable to determine loop header and exit blocks");
|
2012-02-14 08:00:19 +08:00
|
|
|
|
2009-06-25 08:22:44 +08:00
|
|
|
// This code assumes that the new header has exactly one predecessor.
|
|
|
|
// Remove any single-entry PHI nodes in it.
|
2009-01-26 10:11:30 +08:00
|
|
|
assert(NewHeader->getSinglePredecessor() &&
|
|
|
|
"New header doesn't have one pred!");
|
|
|
|
FoldSingleEntryPHINodes(NewHeader);
|
2007-04-07 09:25:15 +08:00
|
|
|
|
2009-10-25 07:19:52 +08:00
|
|
|
// Begin by walking OrigHeader and populating ValueMap with an entry for
|
|
|
|
// each Instruction.
|
2007-04-10 00:11:48 +08:00
|
|
|
BasicBlock::iterator I = OrigHeader->begin(), E = OrigHeader->end();
|
2011-01-08 15:21:31 +08:00
|
|
|
ValueToValueMapTy ValueMap;
|
2007-04-10 00:11:48 +08:00
|
|
|
|
2009-10-25 07:19:52 +08:00
|
|
|
// For PHI nodes, the value available in OldPreHeader is just the
|
|
|
|
// incoming value from OldPreHeader.
|
|
|
|
for (; PHINode *PN = dyn_cast<PHINode>(I); ++I)
|
2011-06-20 22:18:48 +08:00
|
|
|
ValueMap[PN] = PN->getIncomingValueForBlock(OrigPreheader);
|
2007-04-10 03:04:21 +08:00
|
|
|
|
2010-09-06 09:10:22 +08:00
|
|
|
// For the rest of the instructions, either hoist to the OrigPreheader if
|
|
|
|
// possible or create a clone in the OldPreHeader if not.
|
2011-01-09 03:26:33 +08:00
|
|
|
TerminatorInst *LoopEntryBranch = OrigPreheader->getTerminator();
|
2010-09-06 09:10:22 +08:00
|
|
|
while (I != E) {
|
|
|
|
Instruction *Inst = I++;
|
2012-02-14 08:00:19 +08:00
|
|
|
|
2010-09-06 09:10:22 +08:00
|
|
|
// If the instruction's operands are invariant and it doesn't read or write
|
|
|
|
// memory, then it is safe to hoist. Doing this doesn't change the order of
|
|
|
|
// execution in the preheader, but does prevent the instruction from
|
|
|
|
// executing in each iteration of the loop. This means it is safe to hoist
|
|
|
|
// something that might trap, but isn't safe to hoist something that reads
|
|
|
|
// memory (without proving that the loop doesn't write).
|
|
|
|
if (L->hasLoopInvariantOperands(Inst) &&
|
|
|
|
!Inst->mayReadFromMemory() && !Inst->mayWriteToMemory() &&
|
2012-02-16 08:41:10 +08:00
|
|
|
!isa<TerminatorInst>(Inst) && !isa<DbgInfoIntrinsic>(Inst) &&
|
|
|
|
!isa<AllocaInst>(Inst)) {
|
2010-09-06 09:10:22 +08:00
|
|
|
Inst->moveBefore(LoopEntryBranch);
|
|
|
|
continue;
|
|
|
|
}
|
2012-02-14 08:00:19 +08:00
|
|
|
|
2010-09-06 09:10:22 +08:00
|
|
|
// Otherwise, create a duplicate of the instruction.
|
|
|
|
Instruction *C = Inst->clone();
|
2012-02-14 08:00:19 +08:00
|
|
|
|
2011-01-08 16:24:46 +08:00
|
|
|
// Eagerly remap the operands of the instruction.
|
|
|
|
RemapInstruction(C, ValueMap,
|
|
|
|
RF_NoModuleLevelChanges|RF_IgnoreMissingEntries);
|
2012-02-14 08:00:19 +08:00
|
|
|
|
2011-01-08 16:24:46 +08:00
|
|
|
// With the operands remapped, see if the instruction constant folds or is
|
|
|
|
// otherwise simplifyable. This commonly occurs because the entry from PHI
|
|
|
|
// nodes allows icmps and other instructions to fold.
|
2011-01-09 01:38:45 +08:00
|
|
|
Value *V = SimplifyInstruction(C);
|
|
|
|
if (V && LI->replacementPreservesLCSSAForm(C, V)) {
|
2011-01-08 16:24:46 +08:00
|
|
|
// If so, then delete the temporary instruction and stick the folded value
|
|
|
|
// in the map.
|
|
|
|
delete C;
|
|
|
|
ValueMap[Inst] = V;
|
|
|
|
} else {
|
|
|
|
// Otherwise, stick the new instruction into the new block!
|
|
|
|
C->setName(Inst->getName());
|
|
|
|
C->insertBefore(LoopEntryBranch);
|
|
|
|
ValueMap[Inst] = C;
|
|
|
|
}
|
2007-04-07 09:25:15 +08:00
|
|
|
}
|
|
|
|
|
2009-10-25 07:19:52 +08:00
|
|
|
// Along with all the other instructions, we just cloned OrigHeader's
|
|
|
|
// terminator into OrigPreHeader. Fix up the PHI nodes in each of OrigHeader's
|
|
|
|
// successors by duplicating their incoming values for OrigHeader.
|
|
|
|
TerminatorInst *TI = OrigHeader->getTerminator();
|
|
|
|
for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
|
|
|
|
for (BasicBlock::iterator BI = TI->getSuccessor(i)->begin();
|
|
|
|
PHINode *PN = dyn_cast<PHINode>(BI); ++BI)
|
2011-01-09 03:26:33 +08:00
|
|
|
PN->addIncoming(PN->getIncomingValueForBlock(OrigHeader), OrigPreheader);
|
2009-10-25 07:19:52 +08:00
|
|
|
|
|
|
|
// Now that OrigPreHeader has a clone of OrigHeader's terminator, remove
|
|
|
|
// OrigPreHeader's old terminator (the original branch into the loop), and
|
|
|
|
// remove the corresponding incoming values from the PHI nodes in OrigHeader.
|
|
|
|
LoopEntryBranch->eraseFromParent();
|
|
|
|
|
2011-01-09 03:26:33 +08:00
|
|
|
// If there were any uses of instructions in the duplicated block outside the
|
|
|
|
// loop, update them, inserting PHI nodes as required
|
|
|
|
RewriteUsesOfClonedInstructions(OrigHeader, OrigPreheader, ValueMap);
|
2007-04-07 09:25:15 +08:00
|
|
|
|
2009-10-25 07:19:52 +08:00
|
|
|
// NewHeader is now the header of the loop.
|
2007-04-07 09:25:15 +08:00
|
|
|
L->moveToHeader(NewHeader);
|
2011-01-09 03:10:28 +08:00
|
|
|
assert(L->getHeader() == NewHeader && "Latch block is our new header");
|
2007-04-07 09:25:15 +08:00
|
|
|
|
2012-02-14 08:00:19 +08:00
|
|
|
|
When loop rotation happens, it is *very* common for the duplicated condbr
to be foldable into an uncond branch. When this happens, we can make a
much simpler CFG for the loop, which is important for nested loop cases
where we want the outer loop to be aggressively optimized.
Handle this case more aggressively. For example, previously on
phi-duplicate.ll we would get this:
define void @test(i32 %N, double* %G) nounwind ssp {
entry:
%cmp1 = icmp slt i64 1, 1000
br i1 %cmp1, label %bb.nph, label %for.end
bb.nph: ; preds = %entry
br label %for.body
for.body: ; preds = %bb.nph, %for.cond
%j.02 = phi i64 [ 1, %bb.nph ], [ %inc, %for.cond ]
%arrayidx = getelementptr inbounds double* %G, i64 %j.02
%tmp3 = load double* %arrayidx
%sub = sub i64 %j.02, 1
%arrayidx6 = getelementptr inbounds double* %G, i64 %sub
%tmp7 = load double* %arrayidx6
%add = fadd double %tmp3, %tmp7
%arrayidx10 = getelementptr inbounds double* %G, i64 %j.02
store double %add, double* %arrayidx10
%inc = add nsw i64 %j.02, 1
br label %for.cond
for.cond: ; preds = %for.body
%cmp = icmp slt i64 %inc, 1000
br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
for.cond.for.end_crit_edge: ; preds = %for.cond
br label %for.end
for.end: ; preds = %for.cond.for.end_crit_edge, %entry
ret void
}
Now we get the much nicer:
define void @test(i32 %N, double* %G) nounwind ssp {
entry:
br label %for.body
for.body: ; preds = %entry, %for.body
%j.01 = phi i64 [ 1, %entry ], [ %inc, %for.body ]
%arrayidx = getelementptr inbounds double* %G, i64 %j.01
%tmp3 = load double* %arrayidx
%sub = sub i64 %j.01, 1
%arrayidx6 = getelementptr inbounds double* %G, i64 %sub
%tmp7 = load double* %arrayidx6
%add = fadd double %tmp3, %tmp7
%arrayidx10 = getelementptr inbounds double* %G, i64 %j.01
store double %add, double* %arrayidx10
%inc = add nsw i64 %j.01, 1
%cmp = icmp slt i64 %inc, 1000
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body
ret void
}
With all of these recent changes, we are now able to compile:
void foo(char *X) {
for (int i = 0; i != 100; ++i)
for (int j = 0; j != 100; ++j)
X[j+i*100] = 0;
}
into a single memset of 10000 bytes. This series of changes
should also be helpful for other nested loop scenarios as well.
llvm-svn: 123079
2011-01-09 03:59:06 +08:00
|
|
|
// At this point, we've finished our major CFG changes. As part of cloning
|
|
|
|
// the loop into the preheader we've simplified instructions and the
|
|
|
|
// duplicated conditional branch may now be branching on a constant. If it is
|
|
|
|
// branching on a constant and if that constant means that we enter the loop,
|
|
|
|
// then we fold away the cond branch to an uncond branch. This simplifies the
|
|
|
|
// loop in cases important for nested loops, and it also means we don't have
|
|
|
|
// to split as many edges.
|
|
|
|
BranchInst *PHBI = cast<BranchInst>(OrigPreheader->getTerminator());
|
|
|
|
assert(PHBI->isConditional() && "Should be clone of BI condbr!");
|
|
|
|
if (!isa<ConstantInt>(PHBI->getCondition()) ||
|
|
|
|
PHBI->getSuccessor(cast<ConstantInt>(PHBI->getCondition())->isZero())
|
|
|
|
!= NewHeader) {
|
|
|
|
// The conditional branch can't be folded, handle the general case.
|
|
|
|
// Update DominatorTree to reflect the CFG change we just made. Then split
|
|
|
|
// edges as necessary to preserve LoopSimplify form.
|
|
|
|
if (DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>()) {
|
2012-08-30 23:39:42 +08:00
|
|
|
// Everything that was dominated by the old loop header is now dominated
|
|
|
|
// by the original loop preheader. Conceptually the header was merged
|
|
|
|
// into the preheader, even though we reuse the actual block as a new
|
|
|
|
// loop latch.
|
|
|
|
DomTreeNode *OrigHeaderNode = DT->getNode(OrigHeader);
|
|
|
|
SmallVector<DomTreeNode *, 8> HeaderChildren(OrigHeaderNode->begin(),
|
|
|
|
OrigHeaderNode->end());
|
|
|
|
DomTreeNode *OrigPreheaderNode = DT->getNode(OrigPreheader);
|
|
|
|
for (unsigned I = 0, E = HeaderChildren.size(); I != E; ++I)
|
|
|
|
DT->changeImmediateDominator(HeaderChildren[I], OrigPreheaderNode);
|
2012-02-14 08:00:19 +08:00
|
|
|
|
2012-09-01 20:04:51 +08:00
|
|
|
assert(DT->getNode(Exit)->getIDom() == OrigPreheaderNode);
|
|
|
|
assert(DT->getNode(NewHeader)->getIDom() == OrigPreheaderNode);
|
|
|
|
|
When loop rotation happens, it is *very* common for the duplicated condbr
to be foldable into an uncond branch. When this happens, we can make a
much simpler CFG for the loop, which is important for nested loop cases
where we want the outer loop to be aggressively optimized.
Handle this case more aggressively. For example, previously on
phi-duplicate.ll we would get this:
define void @test(i32 %N, double* %G) nounwind ssp {
entry:
%cmp1 = icmp slt i64 1, 1000
br i1 %cmp1, label %bb.nph, label %for.end
bb.nph: ; preds = %entry
br label %for.body
for.body: ; preds = %bb.nph, %for.cond
%j.02 = phi i64 [ 1, %bb.nph ], [ %inc, %for.cond ]
%arrayidx = getelementptr inbounds double* %G, i64 %j.02
%tmp3 = load double* %arrayidx
%sub = sub i64 %j.02, 1
%arrayidx6 = getelementptr inbounds double* %G, i64 %sub
%tmp7 = load double* %arrayidx6
%add = fadd double %tmp3, %tmp7
%arrayidx10 = getelementptr inbounds double* %G, i64 %j.02
store double %add, double* %arrayidx10
%inc = add nsw i64 %j.02, 1
br label %for.cond
for.cond: ; preds = %for.body
%cmp = icmp slt i64 %inc, 1000
br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
for.cond.for.end_crit_edge: ; preds = %for.cond
br label %for.end
for.end: ; preds = %for.cond.for.end_crit_edge, %entry
ret void
}
Now we get the much nicer:
define void @test(i32 %N, double* %G) nounwind ssp {
entry:
br label %for.body
for.body: ; preds = %entry, %for.body
%j.01 = phi i64 [ 1, %entry ], [ %inc, %for.body ]
%arrayidx = getelementptr inbounds double* %G, i64 %j.01
%tmp3 = load double* %arrayidx
%sub = sub i64 %j.01, 1
%arrayidx6 = getelementptr inbounds double* %G, i64 %sub
%tmp7 = load double* %arrayidx6
%add = fadd double %tmp3, %tmp7
%arrayidx10 = getelementptr inbounds double* %G, i64 %j.01
store double %add, double* %arrayidx10
%inc = add nsw i64 %j.01, 1
%cmp = icmp slt i64 %inc, 1000
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body
ret void
}
With all of these recent changes, we are now able to compile:
void foo(char *X) {
for (int i = 0; i != 100; ++i)
for (int j = 0; j != 100; ++j)
X[j+i*100] = 0;
}
into a single memset of 10000 bytes. This series of changes
should also be helpful for other nested loop scenarios as well.
llvm-svn: 123079
2011-01-09 03:59:06 +08:00
|
|
|
// Update OrigHeader to be dominated by the new header block.
|
|
|
|
DT->changeImmediateDominator(OrigHeader, OrigLatch);
|
|
|
|
}
|
2012-02-14 08:00:19 +08:00
|
|
|
|
When loop rotation happens, it is *very* common for the duplicated condbr
to be foldable into an uncond branch. When this happens, we can make a
much simpler CFG for the loop, which is important for nested loop cases
where we want the outer loop to be aggressively optimized.
Handle this case more aggressively. For example, previously on
phi-duplicate.ll we would get this:
define void @test(i32 %N, double* %G) nounwind ssp {
entry:
%cmp1 = icmp slt i64 1, 1000
br i1 %cmp1, label %bb.nph, label %for.end
bb.nph: ; preds = %entry
br label %for.body
for.body: ; preds = %bb.nph, %for.cond
%j.02 = phi i64 [ 1, %bb.nph ], [ %inc, %for.cond ]
%arrayidx = getelementptr inbounds double* %G, i64 %j.02
%tmp3 = load double* %arrayidx
%sub = sub i64 %j.02, 1
%arrayidx6 = getelementptr inbounds double* %G, i64 %sub
%tmp7 = load double* %arrayidx6
%add = fadd double %tmp3, %tmp7
%arrayidx10 = getelementptr inbounds double* %G, i64 %j.02
store double %add, double* %arrayidx10
%inc = add nsw i64 %j.02, 1
br label %for.cond
for.cond: ; preds = %for.body
%cmp = icmp slt i64 %inc, 1000
br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
for.cond.for.end_crit_edge: ; preds = %for.cond
br label %for.end
for.end: ; preds = %for.cond.for.end_crit_edge, %entry
ret void
}
Now we get the much nicer:
define void @test(i32 %N, double* %G) nounwind ssp {
entry:
br label %for.body
for.body: ; preds = %entry, %for.body
%j.01 = phi i64 [ 1, %entry ], [ %inc, %for.body ]
%arrayidx = getelementptr inbounds double* %G, i64 %j.01
%tmp3 = load double* %arrayidx
%sub = sub i64 %j.01, 1
%arrayidx6 = getelementptr inbounds double* %G, i64 %sub
%tmp7 = load double* %arrayidx6
%add = fadd double %tmp3, %tmp7
%arrayidx10 = getelementptr inbounds double* %G, i64 %j.01
store double %add, double* %arrayidx10
%inc = add nsw i64 %j.01, 1
%cmp = icmp slt i64 %inc, 1000
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body
ret void
}
With all of these recent changes, we are now able to compile:
void foo(char *X) {
for (int i = 0; i != 100; ++i)
for (int j = 0; j != 100; ++j)
X[j+i*100] = 0;
}
into a single memset of 10000 bytes. This series of changes
should also be helpful for other nested loop scenarios as well.
llvm-svn: 123079
2011-01-09 03:59:06 +08:00
|
|
|
// Right now OrigPreHeader has two successors, NewHeader and ExitBlock, and
|
2012-07-24 18:51:42 +08:00
|
|
|
// thus is not a preheader anymore.
|
|
|
|
// Split the edge to form a real preheader.
|
When loop rotation happens, it is *very* common for the duplicated condbr
to be foldable into an uncond branch. When this happens, we can make a
much simpler CFG for the loop, which is important for nested loop cases
where we want the outer loop to be aggressively optimized.
Handle this case more aggressively. For example, previously on
phi-duplicate.ll we would get this:
define void @test(i32 %N, double* %G) nounwind ssp {
entry:
%cmp1 = icmp slt i64 1, 1000
br i1 %cmp1, label %bb.nph, label %for.end
bb.nph: ; preds = %entry
br label %for.body
for.body: ; preds = %bb.nph, %for.cond
%j.02 = phi i64 [ 1, %bb.nph ], [ %inc, %for.cond ]
%arrayidx = getelementptr inbounds double* %G, i64 %j.02
%tmp3 = load double* %arrayidx
%sub = sub i64 %j.02, 1
%arrayidx6 = getelementptr inbounds double* %G, i64 %sub
%tmp7 = load double* %arrayidx6
%add = fadd double %tmp3, %tmp7
%arrayidx10 = getelementptr inbounds double* %G, i64 %j.02
store double %add, double* %arrayidx10
%inc = add nsw i64 %j.02, 1
br label %for.cond
for.cond: ; preds = %for.body
%cmp = icmp slt i64 %inc, 1000
br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
for.cond.for.end_crit_edge: ; preds = %for.cond
br label %for.end
for.end: ; preds = %for.cond.for.end_crit_edge, %entry
ret void
}
Now we get the much nicer:
define void @test(i32 %N, double* %G) nounwind ssp {
entry:
br label %for.body
for.body: ; preds = %entry, %for.body
%j.01 = phi i64 [ 1, %entry ], [ %inc, %for.body ]
%arrayidx = getelementptr inbounds double* %G, i64 %j.01
%tmp3 = load double* %arrayidx
%sub = sub i64 %j.01, 1
%arrayidx6 = getelementptr inbounds double* %G, i64 %sub
%tmp7 = load double* %arrayidx6
%add = fadd double %tmp3, %tmp7
%arrayidx10 = getelementptr inbounds double* %G, i64 %j.01
store double %add, double* %arrayidx10
%inc = add nsw i64 %j.01, 1
%cmp = icmp slt i64 %inc, 1000
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body
ret void
}
With all of these recent changes, we are now able to compile:
void foo(char *X) {
for (int i = 0; i != 100; ++i)
for (int j = 0; j != 100; ++j)
X[j+i*100] = 0;
}
into a single memset of 10000 bytes. This series of changes
should also be helpful for other nested loop scenarios as well.
llvm-svn: 123079
2011-01-09 03:59:06 +08:00
|
|
|
BasicBlock *NewPH = SplitCriticalEdge(OrigPreheader, NewHeader, this);
|
|
|
|
NewPH->setName(NewHeader->getName() + ".lr.ph");
|
2012-02-14 08:00:19 +08:00
|
|
|
|
2012-07-24 18:51:42 +08:00
|
|
|
// Preserve canonical loop form, which means that 'Exit' should have only
|
|
|
|
// one predecessor.
|
When loop rotation happens, it is *very* common for the duplicated condbr
to be foldable into an uncond branch. When this happens, we can make a
much simpler CFG for the loop, which is important for nested loop cases
where we want the outer loop to be aggressively optimized.
Handle this case more aggressively. For example, previously on
phi-duplicate.ll we would get this:
define void @test(i32 %N, double* %G) nounwind ssp {
entry:
%cmp1 = icmp slt i64 1, 1000
br i1 %cmp1, label %bb.nph, label %for.end
bb.nph: ; preds = %entry
br label %for.body
for.body: ; preds = %bb.nph, %for.cond
%j.02 = phi i64 [ 1, %bb.nph ], [ %inc, %for.cond ]
%arrayidx = getelementptr inbounds double* %G, i64 %j.02
%tmp3 = load double* %arrayidx
%sub = sub i64 %j.02, 1
%arrayidx6 = getelementptr inbounds double* %G, i64 %sub
%tmp7 = load double* %arrayidx6
%add = fadd double %tmp3, %tmp7
%arrayidx10 = getelementptr inbounds double* %G, i64 %j.02
store double %add, double* %arrayidx10
%inc = add nsw i64 %j.02, 1
br label %for.cond
for.cond: ; preds = %for.body
%cmp = icmp slt i64 %inc, 1000
br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
for.cond.for.end_crit_edge: ; preds = %for.cond
br label %for.end
for.end: ; preds = %for.cond.for.end_crit_edge, %entry
ret void
}
Now we get the much nicer:
define void @test(i32 %N, double* %G) nounwind ssp {
entry:
br label %for.body
for.body: ; preds = %entry, %for.body
%j.01 = phi i64 [ 1, %entry ], [ %inc, %for.body ]
%arrayidx = getelementptr inbounds double* %G, i64 %j.01
%tmp3 = load double* %arrayidx
%sub = sub i64 %j.01, 1
%arrayidx6 = getelementptr inbounds double* %G, i64 %sub
%tmp7 = load double* %arrayidx6
%add = fadd double %tmp3, %tmp7
%arrayidx10 = getelementptr inbounds double* %G, i64 %j.01
store double %add, double* %arrayidx10
%inc = add nsw i64 %j.01, 1
%cmp = icmp slt i64 %inc, 1000
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body
ret void
}
With all of these recent changes, we are now able to compile:
void foo(char *X) {
for (int i = 0; i != 100; ++i)
for (int j = 0; j != 100; ++j)
X[j+i*100] = 0;
}
into a single memset of 10000 bytes. This series of changes
should also be helpful for other nested loop scenarios as well.
llvm-svn: 123079
2011-01-09 03:59:06 +08:00
|
|
|
BasicBlock *ExitSplit = SplitCriticalEdge(L->getLoopLatch(), Exit, this);
|
|
|
|
ExitSplit->moveBefore(Exit);
|
|
|
|
} else {
|
|
|
|
// We can fold the conditional branch in the preheader, this makes things
|
|
|
|
// simpler. The first step is to remove the extra edge to the Exit block.
|
|
|
|
Exit->removePredecessor(OrigPreheader, true /*preserve LCSSA*/);
|
2011-04-30 04:38:55 +08:00
|
|
|
BranchInst *NewBI = BranchInst::Create(NewHeader, PHBI);
|
|
|
|
NewBI->setDebugLoc(PHBI->getDebugLoc());
|
When loop rotation happens, it is *very* common for the duplicated condbr
to be foldable into an uncond branch. When this happens, we can make a
much simpler CFG for the loop, which is important for nested loop cases
where we want the outer loop to be aggressively optimized.
Handle this case more aggressively. For example, previously on
phi-duplicate.ll we would get this:
define void @test(i32 %N, double* %G) nounwind ssp {
entry:
%cmp1 = icmp slt i64 1, 1000
br i1 %cmp1, label %bb.nph, label %for.end
bb.nph: ; preds = %entry
br label %for.body
for.body: ; preds = %bb.nph, %for.cond
%j.02 = phi i64 [ 1, %bb.nph ], [ %inc, %for.cond ]
%arrayidx = getelementptr inbounds double* %G, i64 %j.02
%tmp3 = load double* %arrayidx
%sub = sub i64 %j.02, 1
%arrayidx6 = getelementptr inbounds double* %G, i64 %sub
%tmp7 = load double* %arrayidx6
%add = fadd double %tmp3, %tmp7
%arrayidx10 = getelementptr inbounds double* %G, i64 %j.02
store double %add, double* %arrayidx10
%inc = add nsw i64 %j.02, 1
br label %for.cond
for.cond: ; preds = %for.body
%cmp = icmp slt i64 %inc, 1000
br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
for.cond.for.end_crit_edge: ; preds = %for.cond
br label %for.end
for.end: ; preds = %for.cond.for.end_crit_edge, %entry
ret void
}
Now we get the much nicer:
define void @test(i32 %N, double* %G) nounwind ssp {
entry:
br label %for.body
for.body: ; preds = %entry, %for.body
%j.01 = phi i64 [ 1, %entry ], [ %inc, %for.body ]
%arrayidx = getelementptr inbounds double* %G, i64 %j.01
%tmp3 = load double* %arrayidx
%sub = sub i64 %j.01, 1
%arrayidx6 = getelementptr inbounds double* %G, i64 %sub
%tmp7 = load double* %arrayidx6
%add = fadd double %tmp3, %tmp7
%arrayidx10 = getelementptr inbounds double* %G, i64 %j.01
store double %add, double* %arrayidx10
%inc = add nsw i64 %j.01, 1
%cmp = icmp slt i64 %inc, 1000
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body
ret void
}
With all of these recent changes, we are now able to compile:
void foo(char *X) {
for (int i = 0; i != 100; ++i)
for (int j = 0; j != 100; ++j)
X[j+i*100] = 0;
}
into a single memset of 10000 bytes. This series of changes
should also be helpful for other nested loop scenarios as well.
llvm-svn: 123079
2011-01-09 03:59:06 +08:00
|
|
|
PHBI->eraseFromParent();
|
2012-02-14 08:00:19 +08:00
|
|
|
|
When loop rotation happens, it is *very* common for the duplicated condbr
to be foldable into an uncond branch. When this happens, we can make a
much simpler CFG for the loop, which is important for nested loop cases
where we want the outer loop to be aggressively optimized.
Handle this case more aggressively. For example, previously on
phi-duplicate.ll we would get this:
define void @test(i32 %N, double* %G) nounwind ssp {
entry:
%cmp1 = icmp slt i64 1, 1000
br i1 %cmp1, label %bb.nph, label %for.end
bb.nph: ; preds = %entry
br label %for.body
for.body: ; preds = %bb.nph, %for.cond
%j.02 = phi i64 [ 1, %bb.nph ], [ %inc, %for.cond ]
%arrayidx = getelementptr inbounds double* %G, i64 %j.02
%tmp3 = load double* %arrayidx
%sub = sub i64 %j.02, 1
%arrayidx6 = getelementptr inbounds double* %G, i64 %sub
%tmp7 = load double* %arrayidx6
%add = fadd double %tmp3, %tmp7
%arrayidx10 = getelementptr inbounds double* %G, i64 %j.02
store double %add, double* %arrayidx10
%inc = add nsw i64 %j.02, 1
br label %for.cond
for.cond: ; preds = %for.body
%cmp = icmp slt i64 %inc, 1000
br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
for.cond.for.end_crit_edge: ; preds = %for.cond
br label %for.end
for.end: ; preds = %for.cond.for.end_crit_edge, %entry
ret void
}
Now we get the much nicer:
define void @test(i32 %N, double* %G) nounwind ssp {
entry:
br label %for.body
for.body: ; preds = %entry, %for.body
%j.01 = phi i64 [ 1, %entry ], [ %inc, %for.body ]
%arrayidx = getelementptr inbounds double* %G, i64 %j.01
%tmp3 = load double* %arrayidx
%sub = sub i64 %j.01, 1
%arrayidx6 = getelementptr inbounds double* %G, i64 %sub
%tmp7 = load double* %arrayidx6
%add = fadd double %tmp3, %tmp7
%arrayidx10 = getelementptr inbounds double* %G, i64 %j.01
store double %add, double* %arrayidx10
%inc = add nsw i64 %j.01, 1
%cmp = icmp slt i64 %inc, 1000
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body
ret void
}
With all of these recent changes, we are now able to compile:
void foo(char *X) {
for (int i = 0; i != 100; ++i)
for (int j = 0; j != 100; ++j)
X[j+i*100] = 0;
}
into a single memset of 10000 bytes. This series of changes
should also be helpful for other nested loop scenarios as well.
llvm-svn: 123079
2011-01-09 03:59:06 +08:00
|
|
|
// With our CFG finalized, update DomTree if it is available.
|
|
|
|
if (DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>()) {
|
|
|
|
// Update OrigHeader to be dominated by the new header block.
|
|
|
|
DT->changeImmediateDominator(NewHeader, OrigPreheader);
|
|
|
|
DT->changeImmediateDominator(OrigHeader, OrigLatch);
|
2012-08-30 23:39:42 +08:00
|
|
|
|
|
|
|
// Brute force incremental dominator tree update. Call
|
|
|
|
// findNearestCommonDominator on all CFG predecessors of each child of the
|
|
|
|
// original header.
|
|
|
|
DomTreeNode *OrigHeaderNode = DT->getNode(OrigHeader);
|
2012-09-02 19:57:22 +08:00
|
|
|
SmallVector<DomTreeNode *, 8> HeaderChildren(OrigHeaderNode->begin(),
|
|
|
|
OrigHeaderNode->end());
|
|
|
|
bool Changed;
|
|
|
|
do {
|
|
|
|
Changed = false;
|
|
|
|
for (unsigned I = 0, E = HeaderChildren.size(); I != E; ++I) {
|
|
|
|
DomTreeNode *Node = HeaderChildren[I];
|
|
|
|
BasicBlock *BB = Node->getBlock();
|
|
|
|
|
|
|
|
pred_iterator PI = pred_begin(BB);
|
|
|
|
BasicBlock *NearestDom = *PI;
|
|
|
|
for (pred_iterator PE = pred_end(BB); PI != PE; ++PI)
|
|
|
|
NearestDom = DT->findNearestCommonDominator(NearestDom, *PI);
|
|
|
|
|
|
|
|
// Remember if this changes the DomTree.
|
|
|
|
if (Node->getIDom()->getBlock() != NearestDom) {
|
|
|
|
DT->changeImmediateDominator(BB, NearestDom);
|
|
|
|
Changed = true;
|
2012-08-30 23:39:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-02 19:57:22 +08:00
|
|
|
// If the dominator changed, this may have an effect on other
|
|
|
|
// predecessors, continue until we reach a fixpoint.
|
|
|
|
} while (Changed);
|
When loop rotation happens, it is *very* common for the duplicated condbr
to be foldable into an uncond branch. When this happens, we can make a
much simpler CFG for the loop, which is important for nested loop cases
where we want the outer loop to be aggressively optimized.
Handle this case more aggressively. For example, previously on
phi-duplicate.ll we would get this:
define void @test(i32 %N, double* %G) nounwind ssp {
entry:
%cmp1 = icmp slt i64 1, 1000
br i1 %cmp1, label %bb.nph, label %for.end
bb.nph: ; preds = %entry
br label %for.body
for.body: ; preds = %bb.nph, %for.cond
%j.02 = phi i64 [ 1, %bb.nph ], [ %inc, %for.cond ]
%arrayidx = getelementptr inbounds double* %G, i64 %j.02
%tmp3 = load double* %arrayidx
%sub = sub i64 %j.02, 1
%arrayidx6 = getelementptr inbounds double* %G, i64 %sub
%tmp7 = load double* %arrayidx6
%add = fadd double %tmp3, %tmp7
%arrayidx10 = getelementptr inbounds double* %G, i64 %j.02
store double %add, double* %arrayidx10
%inc = add nsw i64 %j.02, 1
br label %for.cond
for.cond: ; preds = %for.body
%cmp = icmp slt i64 %inc, 1000
br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
for.cond.for.end_crit_edge: ; preds = %for.cond
br label %for.end
for.end: ; preds = %for.cond.for.end_crit_edge, %entry
ret void
}
Now we get the much nicer:
define void @test(i32 %N, double* %G) nounwind ssp {
entry:
br label %for.body
for.body: ; preds = %entry, %for.body
%j.01 = phi i64 [ 1, %entry ], [ %inc, %for.body ]
%arrayidx = getelementptr inbounds double* %G, i64 %j.01
%tmp3 = load double* %arrayidx
%sub = sub i64 %j.01, 1
%arrayidx6 = getelementptr inbounds double* %G, i64 %sub
%tmp7 = load double* %arrayidx6
%add = fadd double %tmp3, %tmp7
%arrayidx10 = getelementptr inbounds double* %G, i64 %j.01
store double %add, double* %arrayidx10
%inc = add nsw i64 %j.01, 1
%cmp = icmp slt i64 %inc, 1000
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body
ret void
}
With all of these recent changes, we are now able to compile:
void foo(char *X) {
for (int i = 0; i != 100; ++i)
for (int j = 0; j != 100; ++j)
X[j+i*100] = 0;
}
into a single memset of 10000 bytes. This series of changes
should also be helpful for other nested loop scenarios as well.
llvm-svn: 123079
2011-01-09 03:59:06 +08:00
|
|
|
}
|
2007-07-12 07:47:28 +08:00
|
|
|
}
|
2012-02-14 08:00:19 +08:00
|
|
|
|
When loop rotation happens, it is *very* common for the duplicated condbr
to be foldable into an uncond branch. When this happens, we can make a
much simpler CFG for the loop, which is important for nested loop cases
where we want the outer loop to be aggressively optimized.
Handle this case more aggressively. For example, previously on
phi-duplicate.ll we would get this:
define void @test(i32 %N, double* %G) nounwind ssp {
entry:
%cmp1 = icmp slt i64 1, 1000
br i1 %cmp1, label %bb.nph, label %for.end
bb.nph: ; preds = %entry
br label %for.body
for.body: ; preds = %bb.nph, %for.cond
%j.02 = phi i64 [ 1, %bb.nph ], [ %inc, %for.cond ]
%arrayidx = getelementptr inbounds double* %G, i64 %j.02
%tmp3 = load double* %arrayidx
%sub = sub i64 %j.02, 1
%arrayidx6 = getelementptr inbounds double* %G, i64 %sub
%tmp7 = load double* %arrayidx6
%add = fadd double %tmp3, %tmp7
%arrayidx10 = getelementptr inbounds double* %G, i64 %j.02
store double %add, double* %arrayidx10
%inc = add nsw i64 %j.02, 1
br label %for.cond
for.cond: ; preds = %for.body
%cmp = icmp slt i64 %inc, 1000
br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
for.cond.for.end_crit_edge: ; preds = %for.cond
br label %for.end
for.end: ; preds = %for.cond.for.end_crit_edge, %entry
ret void
}
Now we get the much nicer:
define void @test(i32 %N, double* %G) nounwind ssp {
entry:
br label %for.body
for.body: ; preds = %entry, %for.body
%j.01 = phi i64 [ 1, %entry ], [ %inc, %for.body ]
%arrayidx = getelementptr inbounds double* %G, i64 %j.01
%tmp3 = load double* %arrayidx
%sub = sub i64 %j.01, 1
%arrayidx6 = getelementptr inbounds double* %G, i64 %sub
%tmp7 = load double* %arrayidx6
%add = fadd double %tmp3, %tmp7
%arrayidx10 = getelementptr inbounds double* %G, i64 %j.01
store double %add, double* %arrayidx10
%inc = add nsw i64 %j.01, 1
%cmp = icmp slt i64 %inc, 1000
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body
ret void
}
With all of these recent changes, we are now able to compile:
void foo(char *X) {
for (int i = 0; i != 100; ++i)
for (int j = 0; j != 100; ++j)
X[j+i*100] = 0;
}
into a single memset of 10000 bytes. This series of changes
should also be helpful for other nested loop scenarios as well.
llvm-svn: 123079
2011-01-09 03:59:06 +08:00
|
|
|
assert(L->getLoopPreheader() && "Invalid loop preheader after loop rotation");
|
2011-01-09 02:52:51 +08:00
|
|
|
assert(L->getLoopLatch() && "Invalid loop latch after loop rotation");
|
2011-01-09 02:55:50 +08:00
|
|
|
|
2011-01-11 15:47:59 +08:00
|
|
|
// Now that the CFG and DomTree are in a consistent state again, try to merge
|
|
|
|
// the OrigHeader block into OrigLatch. This will succeed if they are
|
|
|
|
// connected by an unconditional branch. This is just a cleanup so the
|
|
|
|
// emitted code isn't too gross in this common case.
|
|
|
|
MergeBlockIntoPredecessor(OrigHeader, this);
|
2012-02-14 08:00:19 +08:00
|
|
|
|
2012-08-30 23:39:42 +08:00
|
|
|
DEBUG(dbgs() << "LoopRotation: into "; L->dump());
|
|
|
|
|
2011-01-09 02:55:50 +08:00
|
|
|
++NumRotated;
|
|
|
|
return true;
|
2007-04-10 04:19:46 +08:00
|
|
|
}
|