misched: Allow subtargets to enable misched and dependent options.

This allows me to begin enabling (or backing out) misched by default
for one subtarget at a time. To run misched we typically want to:
- Disable SelectionDAG scheduling (use the source order scheduler)
- Enable more aggressive coalescing (until we decide to always run the coalescer this way)
- Enable MachineScheduler pass itself.

Disabling PostRA sched may follow for some subtargets.

llvm-svn: 167826
This commit is contained in:
Andrew Trick 2012-11-13 08:47:29 +00:00
parent 40534fe9a5
commit 108c88c5b7
6 changed files with 54 additions and 14 deletions

View File

@ -54,6 +54,13 @@ public:
return 0;
}
/// \brief True if the subtarget should run MachineScheduler after aggressive
/// coalescing.
///
/// This currently replaces the SelectionDAG scheduler with the "source" order
/// scheduler. It does not yet disable the postRA scheduler.
virtual bool enableMachineScheduler() const;
// enablePostRAScheduler - If the target can benefit from post-regalloc
// scheduling and the specified optimization level meets the requirement
// return true to enable post-register-allocation scheduling. In

View File

@ -60,11 +60,11 @@ static cl::opt<unsigned> ILPWindow("ilp-window", cl::Hidden,
// Experimental heuristics
static cl::opt<bool> EnableLoadCluster("misched-cluster", cl::Hidden,
cl::desc("Enable load clustering."));
cl::desc("Enable load clustering."), cl::init(true));
// Experimental heuristics
static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden,
cl::desc("Enable scheduling for macro fusion."));
cl::desc("Enable scheduling for macro fusion."), cl::init(true));
//===----------------------------------------------------------------------===//
// Machine Instruction Scheduling Pass and Registry

View File

@ -22,6 +22,7 @@
#include "llvm/CodeGen/RegAllocRegistry.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Assembly/PrintModulePass.h"
#include "llvm/Support/CommandLine.h"
@ -241,7 +242,9 @@ TargetPassConfig::TargetPassConfig(TargetMachine *tm, PassManagerBase &pm)
disablePass(&EarlyIfConverterID);
// Temporarily disable experimental passes.
substitutePass(&MachineSchedulerID, 0);
const TargetSubtargetInfo &ST = TM->getSubtarget<TargetSubtargetInfo>();
if (!ST.enableMachineScheduler())
disablePass(&MachineSchedulerID);
}
/// Insert InsertedPassID pass after TargetPassID.

View File

@ -45,6 +45,7 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
#include <cmath>
using namespace llvm;
@ -64,16 +65,16 @@ EnableJoining("join-liveintervals",
cl::init(true));
// Temporary flag to test critical edge unsplitting.
static cl::opt<bool>
static cl::opt<cl::boolOrDefault>
EnableJoinSplits("join-splitedges",
cl::desc("Coalesce copies on split edges (default=false)"),
cl::init(false), cl::Hidden);
cl::desc("Coalesce copies on split edges (default=subtarget)"),
cl::init(cl::BOU_UNSET), cl::Hidden);
// Temporary flag to test global copy optimization.
static cl::opt<bool>
static cl::opt<cl::boolOrDefault>
EnableGlobalCopies("join-globalcopies",
cl::desc("Coalesce copies that don't locally define an lrg"),
cl::init(false));
cl::desc("Coalesce copies that span blocks (default=subtarget)"),
cl::init(cl::BOU_UNSET), cl::Hidden);
static cl::opt<bool>
VerifyCoalescing("verify-coalescing",
@ -94,6 +95,14 @@ namespace {
AliasAnalysis *AA;
RegisterClassInfo RegClassInfo;
/// \brief True if the coalescer should aggressively coalesce global copies
/// in favor of keeping local copies.
bool JoinGlobalCopies;
/// \brief True if the coalescer should aggressively coalesce fall-thru
/// blocks exclusively containing copies.
bool JoinSplitEdges;
/// WorkList - Copy instructions yet to be coalesced.
SmallVector<MachineInstr*, 8> WorkList;
SmallVector<MachineInstr*, 8> LocalWorkList;
@ -1943,6 +1952,10 @@ namespace {
//
// EnableGlobalCopies assumes that the primary sort key is loop depth.
struct MBBPriorityCompare {
bool JoinSplitEdges;
MBBPriorityCompare(bool joinsplits): JoinSplitEdges(joinsplits) {}
bool operator()(const MBBPriorityInfo &LHS,
const MBBPriorityInfo &RHS) const {
// Deeper loops first
@ -1950,7 +1963,7 @@ namespace {
return LHS.Depth > RHS.Depth;
// Try to unsplit critical edges next.
if (EnableJoinSplits && LHS.IsSplit != RHS.IsSplit)
if (JoinSplitEdges && LHS.IsSplit != RHS.IsSplit)
return LHS.IsSplit;
// Prefer blocks that are more connected in the CFG. This takes care of
@ -2011,7 +2024,7 @@ RegisterCoalescer::copyCoalesceInMBB(MachineBasicBlock *MBB) {
// Collect all copy-like instructions in MBB. Don't start coalescing anything
// yet, it might invalidate the iterator.
const unsigned PrevSize = WorkList.size();
if (EnableGlobalCopies) {
if (JoinGlobalCopies) {
// Coalesce copies bottom-up to coalesce local defs before local uses. They
// are not inherently easier to resolve, but slightly preferable until we
// have local live range splitting. In particular this is required by
@ -2061,13 +2074,13 @@ void RegisterCoalescer::joinAllIntervals() {
MBBs.push_back(MBBPriorityInfo(MBB, Loops->getLoopDepth(MBB),
isSplitEdge(MBB)));
}
std::sort(MBBs.begin(), MBBs.end(), MBBPriorityCompare());
std::sort(MBBs.begin(), MBBs.end(), MBBPriorityCompare(JoinSplitEdges));
// Coalesce intervals in MBB priority order.
unsigned CurrDepth = UINT_MAX;
for (unsigned i = 0, e = MBBs.size(); i != e; ++i) {
// Try coalescing the collected local copies for deeper loops.
if (EnableGlobalCopies && MBBs[i].Depth < CurrDepth)
if (JoinGlobalCopies && MBBs[i].Depth < CurrDepth)
coalesceLocals();
copyCoalesceInMBB(MBBs[i].MBB);
}
@ -2097,6 +2110,17 @@ bool RegisterCoalescer::runOnMachineFunction(MachineFunction &fn) {
AA = &getAnalysis<AliasAnalysis>();
Loops = &getAnalysis<MachineLoopInfo>();
const TargetSubtargetInfo &ST = TM->getSubtarget<TargetSubtargetInfo>();
if (EnableGlobalCopies == cl::BOU_UNSET)
JoinGlobalCopies = ST.enableMachineScheduler();
else
JoinGlobalCopies = (EnableGlobalCopies == cl::BOU_TRUE);
if (EnableJoinSplits == cl::BOU_UNSET)
JoinSplitEdges = ST.enableMachineScheduler();
else
JoinSplitEdges = (EnableJoinSplits == cl::BOU_TRUE);
DEBUG(dbgs() << "********** SIMPLE REGISTER COALESCING **********\n"
<< "********** Function: " << MF->getName() << '\n');

View File

@ -45,6 +45,7 @@
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
@ -216,8 +217,9 @@ namespace llvm {
ScheduleDAGSDNodes* createDefaultScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel) {
const TargetLowering &TLI = IS->getTargetLowering();
const TargetSubtargetInfo &ST = IS->TM.getSubtarget<TargetSubtargetInfo>();
if (OptLevel == CodeGenOpt::None ||
if (OptLevel == CodeGenOpt::None || ST.enableMachineScheduler() ||
TLI.getSchedulingPreference() == Sched::Source)
return createSourceListDAGScheduler(IS, OptLevel);
if (TLI.getSchedulingPreference() == Sched::RegPressure)

View File

@ -22,6 +22,10 @@ TargetSubtargetInfo::TargetSubtargetInfo() {}
TargetSubtargetInfo::~TargetSubtargetInfo() {}
bool TargetSubtargetInfo::enableMachineScheduler() const {
return false;
}
bool TargetSubtargetInfo::enablePostRAScheduler(
CodeGenOpt::Level OptLevel,
AntiDepBreakMode& Mode,