misched preparation: rename core scheduler methods for consistency.

We had half the API with one convention, half with another. Now was a
good time to clean it up.

llvm-svn: 152255
This commit is contained in:
Andrew Trick 2012-03-07 23:00:49 +00:00
parent 22842f89e4
commit 52226d409b
15 changed files with 112 additions and 112 deletions

View File

@ -85,11 +85,11 @@ namespace llvm {
virtual void dump(ScheduleDAG* DAG) const; virtual void dump(ScheduleDAG* DAG) const;
// ScheduledNode - As nodes are scheduled, we look to see if there are any // scheduledNode - As nodes are scheduled, we look to see if there are any
// successor nodes that have a single unscheduled predecessor. If so, that // successor nodes that have a single unscheduled predecessor. If so, that
// single predecessor has a higher priority, since scheduling it will make // single predecessor has a higher priority, since scheduling it will make
// the node available. // the node available.
void ScheduledNode(SUnit *Node); void scheduledNode(SUnit *Node);
private: private:
void AdjustPriorityOfUnscheduledPreds(SUnit *SU); void AdjustPriorityOfUnscheduledPreds(SUnit *SU);

View File

@ -126,8 +126,8 @@ namespace llvm {
virtual void dump(ScheduleDAG* DAG) const; virtual void dump(ScheduleDAG* DAG) const;
/// ScheduledNode - Main resource tracking point. /// scheduledNode - Main resource tracking point.
void ScheduledNode(SUnit *Node); void scheduledNode(SUnit *Node);
bool isResourceAvailable(SUnit *SU); bool isResourceAvailable(SUnit *SU);
void reserveResources(SUnit *SU); void reserveResources(SUnit *SU);

View File

@ -467,13 +467,13 @@ namespace llvm {
virtual void dump(ScheduleDAG *) const {} virtual void dump(ScheduleDAG *) const {}
/// ScheduledNode - As each node is scheduled, this method is invoked. This /// scheduledNode - As each node is scheduled, this method is invoked. This
/// allows the priority function to adjust the priority of related /// allows the priority function to adjust the priority of related
/// unscheduled nodes, for example. /// unscheduled nodes, for example.
/// ///
virtual void ScheduledNode(SUnit *) {} virtual void scheduledNode(SUnit *) {}
virtual void UnscheduledNode(SUnit *) {} virtual void unscheduledNode(SUnit *) {}
void setCurCycle(unsigned Cycle) { void setCurCycle(unsigned Cycle) {
CurCycle = Cycle; CurCycle = Cycle;
@ -543,18 +543,18 @@ namespace llvm {
protected: protected:
/// ComputeLatency - Compute node latency. /// ComputeLatency - Compute node latency.
/// ///
virtual void ComputeLatency(SUnit *SU) = 0; virtual void computeLatency(SUnit *SU) = 0;
/// ComputeOperandLatency - Override dependence edge latency using /// ComputeOperandLatency - Override dependence edge latency using
/// operand use/def information /// operand use/def information
/// ///
virtual void ComputeOperandLatency(SUnit *, SUnit *, virtual void computeOperandLatency(SUnit *, SUnit *,
SDep&) const { } SDep&) const { }
/// ForceUnitLatencies - Return true if all scheduling edges should be given /// ForceUnitLatencies - Return true if all scheduling edges should be given
/// a latency value of one. The default is to return false; schedulers may /// a latency value of one. The default is to return false; schedulers may
/// override this as needed. /// override this as needed.
virtual bool ForceUnitLatencies() const { return false; } virtual bool forceUnitLatencies() const { return false; }
private: private:
// Return the MCInstrDesc of this SDNode or NULL. // Return the MCInstrDesc of this SDNode or NULL.

View File

@ -111,7 +111,7 @@ public:
DefaultVLIWScheduler(MachineFunction &MF, MachineLoopInfo &MLI, DefaultVLIWScheduler(MachineFunction &MF, MachineLoopInfo &MLI,
MachineDominatorTree &MDT, bool IsPostRA); MachineDominatorTree &MDT, bool IsPostRA);
// Schedule - Actual scheduling work. // Schedule - Actual scheduling work.
void Schedule(); void schedule();
}; };
} // end anonymous namespace } // end anonymous namespace
@ -121,9 +121,9 @@ DefaultVLIWScheduler::DefaultVLIWScheduler(
ScheduleDAGInstrs(MF, MLI, MDT, IsPostRA) { ScheduleDAGInstrs(MF, MLI, MDT, IsPostRA) {
} }
void DefaultVLIWScheduler::Schedule() { void DefaultVLIWScheduler::schedule() {
// Build the scheduling graph. // Build the scheduling graph.
BuildSchedGraph(0); buildSchedGraph(0);
} }
// VLIWPacketizerList Ctor // VLIWPacketizerList Ctor
@ -186,7 +186,7 @@ void VLIWPacketizerList::PacketizeMIs(MachineBasicBlock *MBB,
MachineBasicBlock::iterator EndItr) { MachineBasicBlock::iterator EndItr) {
DefaultVLIWScheduler *Scheduler = (DefaultVLIWScheduler *)SchedulerImpl; DefaultVLIWScheduler *Scheduler = (DefaultVLIWScheduler *)SchedulerImpl;
Scheduler->enterRegion(MBB, BeginItr, EndItr, MBB->size()); Scheduler->enterRegion(MBB, BeginItr, EndItr, MBB->size());
Scheduler->Schedule(); Scheduler->schedule();
Scheduler->exitRegion(); Scheduler->exitRegion();
// Remember scheduling units. // Remember scheduling units.

View File

@ -84,11 +84,11 @@ void LatencyPriorityQueue::push(SUnit *SU) {
} }
// ScheduledNode - As nodes are scheduled, we look to see if there are any // scheduledNode - As nodes are scheduled, we look to see if there are any
// successor nodes that have a single unscheduled predecessor. If so, that // successor nodes that have a single unscheduled predecessor. If so, that
// single predecessor has a higher priority, since scheduling it will make // single predecessor has a higher priority, since scheduling it will make
// the node available. // the node available.
void LatencyPriorityQueue::ScheduledNode(SUnit *SU) { void LatencyPriorityQueue::scheduledNode(SUnit *SU) {
for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) { I != E; ++I) {
AdjustPriorityOfUnscheduledPreds(I->getSUnit()); AdjustPriorityOfUnscheduledPreds(I->getSUnit());

View File

@ -160,7 +160,7 @@ public:
Pass(P) {} Pass(P) {}
/// ScheduleDAGInstrs callback. /// ScheduleDAGInstrs callback.
void Schedule(); void schedule();
/// Interface implemented by the selected top-down liveinterval scheduler. /// Interface implemented by the selected top-down liveinterval scheduler.
/// ///
@ -203,10 +203,10 @@ void ScheduleTopDownLive::releaseSuccessors(SUnit *SU) {
} }
} }
/// Schedule - This is called back from ScheduleDAGInstrs::Run() when it's /// schedule - This is called back from ScheduleDAGInstrs::Run() when it's
/// time to do some work. /// time to do some work.
void ScheduleTopDownLive::Schedule() { void ScheduleTopDownLive::schedule() {
BuildSchedGraph(&Pass->getAnalysis<AliasAnalysis>()); buildSchedGraph(&Pass->getAnalysis<AliasAnalysis>());
DEBUG(dbgs() << "********** MI Scheduling **********\n"); DEBUG(dbgs() << "********** MI Scheduling **********\n");
DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
@ -273,7 +273,7 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
unsigned RemainingCount = MBB->size(); unsigned RemainingCount = MBB->size();
for(MachineBasicBlock::iterator RegionEnd = MBB->end(); for(MachineBasicBlock::iterator RegionEnd = MBB->end();
RegionEnd != MBB->begin();) { RegionEnd != MBB->begin();) {
Scheduler->StartBlock(MBB); Scheduler->startBlock(MBB);
// The next region starts above the previous region. Look backward in the // The next region starts above the previous region. Look backward in the
// instruction stream until we find the nearest boundary. // instruction stream until we find the nearest boundary.
MachineBasicBlock::iterator I = RegionEnd; MachineBasicBlock::iterator I = RegionEnd;
@ -301,8 +301,8 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
dbgs() << " Remaining: " << RemainingCount << "\n"); dbgs() << " Remaining: " << RemainingCount << "\n");
// Inform ScheduleDAGInstrs of the region being scheduled. It calls back // Inform ScheduleDAGInstrs of the region being scheduled. It calls back
// to our Schedule() method. // to our schedule() method.
Scheduler->Schedule(); Scheduler->schedule();
Scheduler->exitRegion(); Scheduler->exitRegion();
// Scheduling has invalidated the current iterator 'I'. Ask the // Scheduling has invalidated the current iterator 'I'. Ask the
@ -310,7 +310,7 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
RegionEnd = Scheduler->begin(); RegionEnd = Scheduler->begin();
} }
assert(RemainingCount == 0 && "Instruction count mismatch!"); assert(RemainingCount == 0 && "Instruction count mismatch!");
Scheduler->FinishBlock(); Scheduler->finishBlock();
} }
return true; return true;
} }
@ -331,9 +331,9 @@ public:
ScheduleDAGInstrs(*P->MF, *P->MLI, *P->MDT, /*IsPostRA=*/false, P->LIS), ScheduleDAGInstrs(*P->MF, *P->MLI, *P->MDT, /*IsPostRA=*/false, P->LIS),
Pass(P) {} Pass(P) {}
/// Schedule - This is called back from ScheduleDAGInstrs::Run() when it's /// schedule - This is called back from ScheduleDAGInstrs::Run() when it's
/// time to do some work. /// time to do some work.
void Schedule(); void schedule();
}; };
} // namespace } // namespace
@ -348,8 +348,8 @@ SchedDefaultRegistry("default", "Activate the scheduler pass, "
/// Schedule - This is called back from ScheduleDAGInstrs::Run() when it's /// Schedule - This is called back from ScheduleDAGInstrs::Run() when it's
/// time to do some work. /// time to do some work.
void DefaultMachineScheduler::Schedule() { void DefaultMachineScheduler::schedule() {
BuildSchedGraph(&Pass->getAnalysis<AliasAnalysis>()); buildSchedGraph(&Pass->getAnalysis<AliasAnalysis>());
DEBUG(dbgs() << "********** MI Scheduling **********\n"); DEBUG(dbgs() << "********** MI Scheduling **********\n");
DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)

View File

@ -139,10 +139,10 @@ namespace {
~SchedulePostRATDList(); ~SchedulePostRATDList();
/// StartBlock - Initialize register live-range state for scheduling in /// startBlock - Initialize register live-range state for scheduling in
/// this block. /// this block.
/// ///
void StartBlock(MachineBasicBlock *BB); void startBlock(MachineBasicBlock *BB);
/// Initialize the scheduler state for the next scheduling region. /// Initialize the scheduler state for the next scheduling region.
virtual void enterRegion(MachineBasicBlock *bb, virtual void enterRegion(MachineBasicBlock *bb,
@ -155,7 +155,7 @@ namespace {
/// Schedule - Schedule the instruction range using list scheduling. /// Schedule - Schedule the instruction range using list scheduling.
/// ///
void Schedule(); void schedule();
void EmitSchedule(); void EmitSchedule();
@ -164,9 +164,9 @@ namespace {
/// ///
void Observe(MachineInstr *MI, unsigned Count); void Observe(MachineInstr *MI, unsigned Count);
/// FinishBlock - Clean up register live-range state. /// finishBlock - Clean up register live-range state.
/// ///
void FinishBlock(); void finishBlock();
/// FixupKills - Fix register kill flags that have been made /// FixupKills - Fix register kill flags that have been made
/// invalid due to scheduling /// invalid due to scheduling
@ -301,7 +301,7 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
#endif #endif
// Initialize register live-range state for scheduling in this block. // Initialize register live-range state for scheduling in this block.
Scheduler.StartBlock(MBB); Scheduler.startBlock(MBB);
// Schedule each sequence of instructions not interrupted by a label // Schedule each sequence of instructions not interrupted by a label
// or anything else that effectively needs to shut down scheduling. // or anything else that effectively needs to shut down scheduling.
@ -314,7 +314,7 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
// don't need to worry about register pressure. // don't need to worry about register pressure.
if (MI->isCall() || TII->isSchedulingBoundary(MI, MBB, Fn)) { if (MI->isCall() || TII->isSchedulingBoundary(MI, MBB, Fn)) {
Scheduler.enterRegion(MBB, I, Current, CurrentCount); Scheduler.enterRegion(MBB, I, Current, CurrentCount);
Scheduler.Schedule(); Scheduler.schedule();
Scheduler.exitRegion(); Scheduler.exitRegion();
Scheduler.EmitSchedule(); Scheduler.EmitSchedule();
Current = MI; Current = MI;
@ -330,12 +330,12 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
assert((MBB->begin() == Current || CurrentCount != 0) && assert((MBB->begin() == Current || CurrentCount != 0) &&
"Instruction count mismatch!"); "Instruction count mismatch!");
Scheduler.enterRegion(MBB, MBB->begin(), Current, CurrentCount); Scheduler.enterRegion(MBB, MBB->begin(), Current, CurrentCount);
Scheduler.Schedule(); Scheduler.schedule();
Scheduler.exitRegion(); Scheduler.exitRegion();
Scheduler.EmitSchedule(); Scheduler.EmitSchedule();
// Clean up register live-range state. // Clean up register live-range state.
Scheduler.FinishBlock(); Scheduler.finishBlock();
// Update register kills // Update register kills
Scheduler.FixupKills(MBB); Scheduler.FixupKills(MBB);
@ -347,9 +347,9 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
/// StartBlock - Initialize register live-range state for scheduling in /// StartBlock - Initialize register live-range state for scheduling in
/// this block. /// this block.
/// ///
void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) { void SchedulePostRATDList::startBlock(MachineBasicBlock *BB) {
// Call the superclass. // Call the superclass.
ScheduleDAGInstrs::StartBlock(BB); ScheduleDAGInstrs::startBlock(BB);
// Reset the hazard recognizer and anti-dep breaker. // Reset the hazard recognizer and anti-dep breaker.
HazardRec->Reset(); HazardRec->Reset();
@ -359,9 +359,9 @@ void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
/// Schedule - Schedule the instruction range using list scheduling. /// Schedule - Schedule the instruction range using list scheduling.
/// ///
void SchedulePostRATDList::Schedule() { void SchedulePostRATDList::schedule() {
// Build the scheduling graph. // Build the scheduling graph.
BuildSchedGraph(AA); buildSchedGraph(AA);
if (AntiDepBreak != NULL) { if (AntiDepBreak != NULL) {
unsigned Broken = unsigned Broken =
@ -376,7 +376,7 @@ void SchedulePostRATDList::Schedule() {
// that register, and add new anti-dependence and output-dependence // that register, and add new anti-dependence and output-dependence
// edges based on the next live range of the register. // edges based on the next live range of the register.
ScheduleDAG::clearDAG(); ScheduleDAG::clearDAG();
BuildSchedGraph(AA); buildSchedGraph(AA);
NumFixedAnti += Broken; NumFixedAnti += Broken;
} }
@ -401,12 +401,12 @@ void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
/// FinishBlock - Clean up register live-range state. /// FinishBlock - Clean up register live-range state.
/// ///
void SchedulePostRATDList::FinishBlock() { void SchedulePostRATDList::finishBlock() {
if (AntiDepBreak != NULL) if (AntiDepBreak != NULL)
AntiDepBreak->FinishBlock(); AntiDepBreak->FinishBlock();
// Call the superclass. // Call the superclass.
ScheduleDAGInstrs::FinishBlock(); ScheduleDAGInstrs::finishBlock();
} }
/// StartBlockForKills - Initialize register live-range state for updating kills /// StartBlockForKills - Initialize register live-range state for updating kills
@ -635,7 +635,7 @@ void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
ReleaseSuccessors(SU); ReleaseSuccessors(SU);
SU->isScheduled = true; SU->isScheduled = true;
AvailableQueue.ScheduledNode(SU); AvailableQueue.scheduledNode(SU);
} }
/// ListScheduleTopDown - The main loop of list scheduling for top-down /// ListScheduleTopDown - The main loop of list scheduling for top-down

View File

@ -125,14 +125,14 @@ static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI,
return 0; return 0;
} }
void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) { void ScheduleDAGInstrs::startBlock(MachineBasicBlock *BB) {
LoopRegs.Deps.clear(); LoopRegs.Deps.clear();
if (MachineLoop *ML = MLI.getLoopFor(BB)) if (MachineLoop *ML = MLI.getLoopFor(BB))
if (BB == ML->getLoopLatch()) if (BB == ML->getLoopLatch())
LoopRegs.VisitLoop(ML); LoopRegs.VisitLoop(ML);
} }
void ScheduleDAGInstrs::FinishBlock() { void ScheduleDAGInstrs::finishBlock() {
// Nothing to do. // Nothing to do.
} }
@ -164,7 +164,7 @@ void ScheduleDAGInstrs::enterRegion(MachineBasicBlock *bb,
InsertPosIndex = endcount; InsertPosIndex = endcount;
// Check to see if the scheduler cares about latencies. // Check to see if the scheduler cares about latencies.
UnitLatencies = ForceUnitLatencies(); UnitLatencies = forceUnitLatencies();
ScheduleDAG::clearDAG(); ScheduleDAG::clearDAG();
} }
@ -175,7 +175,7 @@ void ScheduleDAGInstrs::exitRegion() {
// Nothing to do. // Nothing to do.
} }
/// AddSchedBarrierDeps - Add dependencies from instructions in the current /// addSchedBarrierDeps - Add dependencies from instructions in the current
/// list of instructions being scheduled to scheduling barrier by adding /// list of instructions being scheduled to scheduling barrier by adding
/// the exit SU to the register defs and use list. This is because we want to /// the exit SU to the register defs and use list. This is because we want to
/// make sure instructions which define registers that are either used by /// make sure instructions which define registers that are either used by
@ -183,7 +183,7 @@ void ScheduleDAGInstrs::exitRegion() {
/// especially important when the definition latency of the return value(s) /// especially important when the definition latency of the return value(s)
/// are too high to be hidden by the branch or when the liveout registers /// are too high to be hidden by the branch or when the liveout registers
/// used by instructions in the fallthrough block. /// used by instructions in the fallthrough block.
void ScheduleDAGInstrs::AddSchedBarrierDeps() { void ScheduleDAGInstrs::addSchedBarrierDeps() {
MachineInstr *ExitMI = InsertPos != BB->end() ? &*InsertPos : 0; MachineInstr *ExitMI = InsertPos != BB->end() ? &*InsertPos : 0;
ExitSU.setInstr(ExitMI); ExitSU.setInstr(ExitMI);
bool AllDepKnown = ExitMI && bool AllDepKnown = ExitMI &&
@ -259,7 +259,7 @@ void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU,
// perform its own adjustments. // perform its own adjustments.
const SDep& dep = SDep(SU, SDep::Data, LDataLatency, *Alias); const SDep& dep = SDep(SU, SDep::Data, LDataLatency, *Alias);
if (!UnitLatencies) { if (!UnitLatencies) {
ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep)); computeOperandLatency(SU, UseSU, const_cast<SDep &>(dep));
ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep)); ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep));
} }
UseSU->addPred(dep); UseSU->addPred(dep);
@ -449,7 +449,7 @@ void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) {
if (!UnitLatencies) { if (!UnitLatencies) {
// Adjust the dependence latency using operand def/use information, then // Adjust the dependence latency using operand def/use information, then
// allow the target to perform its own adjustments. // allow the target to perform its own adjustments.
ComputeOperandLatency(DefSU, SU, const_cast<SDep &>(dep)); computeOperandLatency(DefSU, SU, const_cast<SDep &>(dep));
const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>(); const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
ST.adjustSchedDependency(DefSU, SU, const_cast<SDep &>(dep)); ST.adjustSchedDependency(DefSU, SU, const_cast<SDep &>(dep));
} }
@ -481,7 +481,7 @@ void ScheduleDAGInstrs::initSUnits() {
if (MI->isDebugValue()) if (MI->isDebugValue())
continue; continue;
SUnit *SU = NewSUnit(MI); SUnit *SU = newSUnit(MI);
MISUnitMap[MI] = SU; MISUnitMap[MI] = SU;
SU->isCall = MI->isCall(); SU->isCall = MI->isCall();
@ -491,11 +491,11 @@ void ScheduleDAGInstrs::initSUnits() {
if (UnitLatencies) if (UnitLatencies)
SU->Latency = 1; SU->Latency = 1;
else else
ComputeLatency(SU); computeLatency(SU);
} }
} }
void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) { void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA) {
// Create an SUnit for each real instruction. // Create an SUnit for each real instruction.
initSUnits(); initSUnits();
@ -530,7 +530,7 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
// Model data dependencies between instructions being scheduled and the // Model data dependencies between instructions being scheduled and the
// ExitSU. // ExitSU.
AddSchedBarrierDeps(); addSchedBarrierDeps();
// Walk the list of instructions, from bottom moving up. // Walk the list of instructions, from bottom moving up.
MachineInstr *PrevMI = NULL; MachineInstr *PrevMI = NULL;
@ -728,7 +728,7 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
MISUnitMap.clear(); MISUnitMap.clear();
} }
void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) { void ScheduleDAGInstrs::computeLatency(SUnit *SU) {
// Compute the latency for the node. // Compute the latency for the node.
if (!InstrItins || InstrItins->isEmpty()) { if (!InstrItins || InstrItins->isEmpty()) {
SU->Latency = 1; SU->Latency = 1;
@ -742,7 +742,7 @@ void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) {
} }
} }
void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use, void ScheduleDAGInstrs::computeOperandLatency(SUnit *Def, SUnit *Use,
SDep& dep) const { SDep& dep) const {
if (!InstrItins || InstrItins->isEmpty()) if (!InstrItins || InstrItins->isEmpty())
return; return;

View File

@ -243,7 +243,7 @@ namespace llvm {
/// NewSUnit - Creates a new SUnit and return a ptr to it. /// NewSUnit - Creates a new SUnit and return a ptr to it.
/// ///
SUnit *NewSUnit(MachineInstr *MI) { SUnit *newSUnit(MachineInstr *MI) {
#ifndef NDEBUG #ifndef NDEBUG
const SUnit *Addr = SUnits.empty() ? 0 : &SUnits[0]; const SUnit *Addr = SUnits.empty() ? 0 : &SUnits[0];
#endif #endif
@ -254,13 +254,13 @@ namespace llvm {
return &SUnits.back(); return &SUnits.back();
} }
/// StartBlock - Prepare to perform scheduling in the given block. /// startBlock - Prepare to perform scheduling in the given block.
/// ///
virtual void StartBlock(MachineBasicBlock *BB); virtual void startBlock(MachineBasicBlock *BB);
/// FinishBlock - Clean up after scheduling in the given block. /// finishBlock - Clean up after scheduling in the given block.
/// ///
virtual void FinishBlock(); virtual void finishBlock();
/// Initialize the scheduler state for the next scheduling region. /// Initialize the scheduler state for the next scheduling region.
virtual void enterRegion(MachineBasicBlock *bb, virtual void enterRegion(MachineBasicBlock *bb,
@ -271,35 +271,35 @@ namespace llvm {
/// Notify that the scheduler has finished scheduling the current region. /// Notify that the scheduler has finished scheduling the current region.
virtual void exitRegion(); virtual void exitRegion();
/// BuildSchedGraph - Build SUnits from the MachineBasicBlock that we are /// buildSchedGraph - Build SUnits from the MachineBasicBlock that we are
/// input. /// input.
void BuildSchedGraph(AliasAnalysis *AA); void buildSchedGraph(AliasAnalysis *AA);
/// AddSchedBarrierDeps - Add dependencies from instructions in the current /// addSchedBarrierDeps - Add dependencies from instructions in the current
/// list of instructions being scheduled to scheduling barrier. We want to /// list of instructions being scheduled to scheduling barrier. We want to
/// make sure instructions which define registers that are either used by /// make sure instructions which define registers that are either used by
/// the terminator or are live-out are properly scheduled. This is /// the terminator or are live-out are properly scheduled. This is
/// especially important when the definition latency of the return value(s) /// especially important when the definition latency of the return value(s)
/// are too high to be hidden by the branch or when the liveout registers /// are too high to be hidden by the branch or when the liveout registers
/// used by instructions in the fallthrough block. /// used by instructions in the fallthrough block.
void AddSchedBarrierDeps(); void addSchedBarrierDeps();
/// ComputeLatency - Compute node latency. /// computeLatency - Compute node latency.
/// ///
virtual void ComputeLatency(SUnit *SU); virtual void computeLatency(SUnit *SU);
/// ComputeOperandLatency - Override dependence edge latency using /// computeOperandLatency - Override dependence edge latency using
/// operand use/def information /// operand use/def information
/// ///
virtual void ComputeOperandLatency(SUnit *Def, SUnit *Use, virtual void computeOperandLatency(SUnit *Def, SUnit *Use,
SDep& dep) const; SDep& dep) const;
/// Schedule - Order nodes according to selected style, filling /// schedule - Order nodes according to selected style, filling
/// in the Sequence member. /// in the Sequence member.
/// ///
/// Typically, a scheduling algorithm will implement Schedule() without /// Typically, a scheduling algorithm will implement schedule() without
/// overriding enterRegion() or exitRegion(). /// overriding enterRegion() or exitRegion().
virtual void Schedule() = 0; virtual void schedule() = 0;
virtual void dumpNode(const SUnit *SU) const; virtual void dumpNode(const SUnit *SU) const;

View File

@ -470,7 +470,7 @@ signed ResourcePriorityQueue::SUSchedulingCost(SUnit *SU) {
/// Main resource tracking point. /// Main resource tracking point.
void ResourcePriorityQueue::ScheduledNode(SUnit *SU) { void ResourcePriorityQueue::scheduledNode(SUnit *SU) {
// Use NULL entry as an event marker to reset // Use NULL entry as an event marker to reset
// the DFA state. // the DFA state.
if (!SU) { if (!SU) {

View File

@ -101,8 +101,8 @@ private:
bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&); bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
void ListScheduleBottomUp(); void ListScheduleBottomUp();
/// ForceUnitLatencies - The fast scheduler doesn't care about real latencies. /// forceUnitLatencies - The fast scheduler doesn't care about real latencies.
bool ForceUnitLatencies() const { return true; } bool forceUnitLatencies() const { return true; }
}; };
} // end anonymous namespace } // end anonymous namespace
@ -245,7 +245,7 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1), DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
SDValue(LoadNode, 1)); SDValue(LoadNode, 1));
SUnit *NewSU = NewSUnit(N); SUnit *NewSU = newSUnit(N);
assert(N->getNodeId() == -1 && "Node already inserted!"); assert(N->getNodeId() == -1 && "Node already inserted!");
N->setNodeId(NewSU->NodeNum); N->setNodeId(NewSU->NodeNum);
@ -268,7 +268,7 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
LoadSU = &SUnits[LoadNode->getNodeId()]; LoadSU = &SUnits[LoadNode->getNodeId()];
isNewLoad = false; isNewLoad = false;
} else { } else {
LoadSU = NewSUnit(LoadNode); LoadSU = newSUnit(LoadNode);
LoadNode->setNodeId(LoadSU->NodeNum); LoadNode->setNodeId(LoadSU->NodeNum);
} }
@ -381,11 +381,11 @@ void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
const TargetRegisterClass *DestRC, const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC, const TargetRegisterClass *SrcRC,
SmallVector<SUnit*, 2> &Copies) { SmallVector<SUnit*, 2> &Copies) {
SUnit *CopyFromSU = NewSUnit(static_cast<SDNode *>(NULL)); SUnit *CopyFromSU = newSUnit(static_cast<SDNode *>(NULL));
CopyFromSU->CopySrcRC = SrcRC; CopyFromSU->CopySrcRC = SrcRC;
CopyFromSU->CopyDstRC = DestRC; CopyFromSU->CopyDstRC = DestRC;
SUnit *CopyToSU = NewSUnit(static_cast<SDNode *>(NULL)); SUnit *CopyToSU = newSUnit(static_cast<SDNode *>(NULL));
CopyToSU->CopySrcRC = DestRC; CopyToSU->CopySrcRC = DestRC;
CopyToSU->CopyDstRC = SrcRC; CopyToSU->CopyDstRC = SrcRC;

View File

@ -232,7 +232,7 @@ private:
/// Updates the topological ordering if required. /// Updates the topological ordering if required.
SUnit *CreateNewSUnit(SDNode *N) { SUnit *CreateNewSUnit(SDNode *N) {
unsigned NumSUnits = SUnits.size(); unsigned NumSUnits = SUnits.size();
SUnit *NewNode = NewSUnit(N); SUnit *NewNode = newSUnit(N);
// Update the topological ordering. // Update the topological ordering.
if (NewNode->NodeNum >= NumSUnits) if (NewNode->NodeNum >= NumSUnits)
Topo.InitDAGTopologicalSorting(); Topo.InitDAGTopologicalSorting();
@ -250,9 +250,9 @@ private:
return NewNode; return NewNode;
} }
/// ForceUnitLatencies - Register-pressure-reducing scheduling doesn't /// forceUnitLatencies - Register-pressure-reducing scheduling doesn't
/// need actual latency information but the hybrid scheduler does. /// need actual latency information but the hybrid scheduler does.
bool ForceUnitLatencies() const { bool forceUnitLatencies() const {
return !NeedLatency; return !NeedLatency;
} }
}; };
@ -354,7 +354,7 @@ void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
#endif #endif
--PredSU->NumSuccsLeft; --PredSU->NumSuccsLeft;
if (!ForceUnitLatencies()) { if (!forceUnitLatencies()) {
// Updating predecessor's height. This is now the cycle when the // Updating predecessor's height. This is now the cycle when the
// predecessor can be scheduled without causing a pipeline stall. // predecessor can be scheduled without causing a pipeline stall.
PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency()); PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency());
@ -701,7 +701,7 @@ void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
Sequence.push_back(SU); Sequence.push_back(SU);
AvailableQueue->ScheduledNode(SU); AvailableQueue->scheduledNode(SU);
// If HazardRec is disabled, and each inst counts as one cycle, then // If HazardRec is disabled, and each inst counts as one cycle, then
// advance CurCycle before ReleasePredecessors to avoid useless pushes to // advance CurCycle before ReleasePredecessors to avoid useless pushes to
@ -848,7 +848,7 @@ void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
else { else {
AvailableQueue->push(SU); AvailableQueue->push(SU);
} }
AvailableQueue->UnscheduledNode(SU); AvailableQueue->unscheduledNode(SU);
} }
/// After backtracking, the hazard checker needs to be restored to a state /// After backtracking, the hazard checker needs to be restored to a state
@ -969,7 +969,7 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
LoadNode->setNodeId(LoadSU->NodeNum); LoadNode->setNodeId(LoadSU->NodeNum);
InitNumRegDefsLeft(LoadSU); InitNumRegDefsLeft(LoadSU);
ComputeLatency(LoadSU); computeLatency(LoadSU);
} }
SUnit *NewSU = CreateNewSUnit(N); SUnit *NewSU = CreateNewSUnit(N);
@ -987,7 +987,7 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
NewSU->isCommutable = true; NewSU->isCommutable = true;
InitNumRegDefsLeft(NewSU); InitNumRegDefsLeft(NewSU);
ComputeLatency(NewSU); computeLatency(NewSU);
// Record all the edges to and from the old SU, by category. // Record all the edges to and from the old SU, by category.
SmallVector<SDep, 4> ChainPreds; SmallVector<SDep, 4> ChainPreds;
@ -1687,9 +1687,9 @@ public:
int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const; int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const;
void ScheduledNode(SUnit *SU); void scheduledNode(SUnit *SU);
void UnscheduledNode(SUnit *SU); void unscheduledNode(SUnit *SU);
protected: protected:
bool canClobber(const SUnit *SU, const SUnit *Op); bool canClobber(const SUnit *SU, const SUnit *Op);
@ -1990,7 +1990,7 @@ int RegReductionPQBase::RegPressureDiff(SUnit *SU, unsigned &LiveUses) const {
return PDiff; return PDiff;
} }
void RegReductionPQBase::ScheduledNode(SUnit *SU) { void RegReductionPQBase::scheduledNode(SUnit *SU) {
if (!TracksRegPressure) if (!TracksRegPressure)
return; return;
@ -2059,7 +2059,7 @@ void RegReductionPQBase::ScheduledNode(SUnit *SU) {
dumpRegPressure(); dumpRegPressure();
} }
void RegReductionPQBase::UnscheduledNode(SUnit *SU) { void RegReductionPQBase::unscheduledNode(SUnit *SU) {
if (!TracksRegPressure) if (!TracksRegPressure)
return; return;

View File

@ -65,7 +65,7 @@ void ScheduleDAGSDNodes::Run(SelectionDAG *dag, MachineBasicBlock *bb) {
/// NewSUnit - Creates a new SUnit and return a ptr to it. /// NewSUnit - Creates a new SUnit and return a ptr to it.
/// ///
SUnit *ScheduleDAGSDNodes::NewSUnit(SDNode *N) { SUnit *ScheduleDAGSDNodes::newSUnit(SDNode *N) {
#ifndef NDEBUG #ifndef NDEBUG
const SUnit *Addr = 0; const SUnit *Addr = 0;
if (!SUnits.empty()) if (!SUnits.empty())
@ -87,7 +87,7 @@ SUnit *ScheduleDAGSDNodes::NewSUnit(SDNode *N) {
} }
SUnit *ScheduleDAGSDNodes::Clone(SUnit *Old) { SUnit *ScheduleDAGSDNodes::Clone(SUnit *Old) {
SUnit *SU = NewSUnit(Old->getNode()); SUnit *SU = newSUnit(Old->getNode());
SU->OrigNode = Old->OrigNode; SU->OrigNode = Old->OrigNode;
SU->Latency = Old->Latency; SU->Latency = Old->Latency;
SU->isVRegCycle = Old->isVRegCycle; SU->isVRegCycle = Old->isVRegCycle;
@ -310,7 +310,7 @@ void ScheduleDAGSDNodes::BuildSchedUnits() {
// If this node has already been processed, stop now. // If this node has already been processed, stop now.
if (NI->getNodeId() != -1) continue; if (NI->getNodeId() != -1) continue;
SUnit *NodeSUnit = NewSUnit(NI); SUnit *NodeSUnit = newSUnit(NI);
// See if anything is glued to this node, if so, add them to glued // See if anything is glued to this node, if so, add them to glued
// nodes. Nodes can have at most one glue input and one glue output. Glue // nodes. Nodes can have at most one glue input and one glue output. Glue
@ -368,7 +368,7 @@ void ScheduleDAGSDNodes::BuildSchedUnits() {
InitNumRegDefsLeft(NodeSUnit); InitNumRegDefsLeft(NodeSUnit);
// Assign the Latency field of NodeSUnit using target-provided information. // Assign the Latency field of NodeSUnit using target-provided information.
ComputeLatency(NodeSUnit); computeLatency(NodeSUnit);
} }
// Find all call operands. // Find all call operands.
@ -390,7 +390,7 @@ void ScheduleDAGSDNodes::AddSchedEdges() {
const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>(); const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
// Check to see if the scheduler cares about latencies. // Check to see if the scheduler cares about latencies.
bool UnitLatencies = ForceUnitLatencies(); bool UnitLatencies = forceUnitLatencies();
// Pass 2: add the preds, succs, etc. // Pass 2: add the preds, succs, etc.
for (unsigned su = 0, e = SUnits.size(); su != e; ++su) { for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
@ -456,7 +456,7 @@ void ScheduleDAGSDNodes::AddSchedEdges() {
const SDep &dep = SDep(OpSU, isChain ? SDep::Order : SDep::Data, const SDep &dep = SDep(OpSU, isChain ? SDep::Order : SDep::Data,
OpLatency, PhysReg); OpLatency, PhysReg);
if (!isChain && !UnitLatencies) { if (!isChain && !UnitLatencies) {
ComputeOperandLatency(OpN, N, i, const_cast<SDep &>(dep)); computeOperandLatency(OpN, N, i, const_cast<SDep &>(dep));
ST.adjustSchedDependency(OpSU, SU, const_cast<SDep &>(dep)); ST.adjustSchedDependency(OpSU, SU, const_cast<SDep &>(dep));
} }
@ -549,7 +549,7 @@ void ScheduleDAGSDNodes::InitNumRegDefsLeft(SUnit *SU) {
} }
} }
void ScheduleDAGSDNodes::ComputeLatency(SUnit *SU) { void ScheduleDAGSDNodes::computeLatency(SUnit *SU) {
SDNode *N = SU->getNode(); SDNode *N = SU->getNode();
// TokenFactor operands are considered zero latency, and some schedulers // TokenFactor operands are considered zero latency, and some schedulers
@ -561,7 +561,7 @@ void ScheduleDAGSDNodes::ComputeLatency(SUnit *SU) {
} }
// Check to see if the scheduler cares about latencies. // Check to see if the scheduler cares about latencies.
if (ForceUnitLatencies()) { if (forceUnitLatencies()) {
SU->Latency = 1; SU->Latency = 1;
return; return;
} }
@ -583,10 +583,10 @@ void ScheduleDAGSDNodes::ComputeLatency(SUnit *SU) {
SU->Latency += TII->getInstrLatency(InstrItins, N); SU->Latency += TII->getInstrLatency(InstrItins, N);
} }
void ScheduleDAGSDNodes::ComputeOperandLatency(SDNode *Def, SDNode *Use, void ScheduleDAGSDNodes::computeOperandLatency(SDNode *Def, SDNode *Use,
unsigned OpIdx, SDep& dep) const{ unsigned OpIdx, SDep& dep) const{
// Check to see if the scheduler cares about latencies. // Check to see if the scheduler cares about latencies.
if (ForceUnitLatencies()) if (forceUnitLatencies())
return; return;
if (dep.getKind() != SDep::Data) if (dep.getKind() != SDep::Data)

View File

@ -71,7 +71,7 @@ namespace llvm {
/// NewSUnit - Creates a new SUnit and return a ptr to it. /// NewSUnit - Creates a new SUnit and return a ptr to it.
/// ///
SUnit *NewSUnit(SDNode *N); SUnit *newSUnit(SDNode *N);
/// Clone - Creates a clone of the specified SUnit. It does not copy the /// Clone - Creates a clone of the specified SUnit. It does not copy the
/// predecessors / successors info nor the temporary scheduling states. /// predecessors / successors info nor the temporary scheduling states.
@ -94,17 +94,17 @@ namespace llvm {
/// ///
void InitNumRegDefsLeft(SUnit *SU); void InitNumRegDefsLeft(SUnit *SU);
/// ComputeLatency - Compute node latency. /// computeLatency - Compute node latency.
/// ///
virtual void ComputeLatency(SUnit *SU); virtual void computeLatency(SUnit *SU);
/// ComputeOperandLatency - Override dependence edge latency using /// computeOperandLatency - Override dependence edge latency using
/// operand use/def information /// operand use/def information
/// ///
virtual void ComputeOperandLatency(SUnit *Def, SUnit *Use, virtual void computeOperandLatency(SUnit *Def, SUnit *Use,
SDep& dep) const { } SDep& dep) const { }
virtual void ComputeOperandLatency(SDNode *Def, SDNode *Use, virtual void computeOperandLatency(SDNode *Def, SDNode *Use,
unsigned OpIdx, SDep& dep) const; unsigned OpIdx, SDep& dep) const;
/// Schedule - Order nodes according to selected style, filling /// Schedule - Order nodes according to selected style, filling

View File

@ -158,7 +158,7 @@ void ScheduleDAGVLIW::scheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
releaseSuccessors(SU); releaseSuccessors(SU);
SU->isScheduled = true; SU->isScheduled = true;
AvailableQueue->ScheduledNode(SU); AvailableQueue->scheduledNode(SU);
} }
/// listScheduleTopDown - The main loop of list scheduling for top-down /// listScheduleTopDown - The main loop of list scheduling for top-down
@ -202,7 +202,7 @@ void ScheduleDAGVLIW::listScheduleTopDown() {
// don't advance the hazard recognizer. // don't advance the hazard recognizer.
if (AvailableQueue->empty()) { if (AvailableQueue->empty()) {
// Reset DFA state. // Reset DFA state.
AvailableQueue->ScheduledNode(0); AvailableQueue->scheduledNode(0);
++CurCycle; ++CurCycle;
continue; continue;
} }