2017-02-23 06:32:51 +08:00
|
|
|
//===- ScheduleDAG.cpp - Implement the ScheduleDAG class ------------------===//
|
2008-11-20 07:18:57 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2008-11-20 07:18:57 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2017-02-21 09:27:33 +08:00
|
|
|
/// \file Implements the ScheduleDAG class, which is a base class used by
|
|
|
|
/// scheduling implementation classes.
|
2008-11-20 07:18:57 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/CodeGen/ScheduleDAG.h"
|
2017-02-23 06:32:51 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2019-04-10 17:03:03 +08:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/ADT/iterator_range.h"
|
2017-02-23 06:32:51 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
2009-01-16 06:18:12 +08:00
|
|
|
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
|
Various bits of framework needed for precise machine-level selection
DAG scheduling during isel. Most new functionality is currently
guarded by -enable-sched-cycles and -enable-sched-hazard.
Added InstrItineraryData::IssueWidth field, currently derived from
ARM itineraries, but could be initialized differently on other targets.
Added ScheduleHazardRecognizer::MaxLookAhead to indicate whether it is
active, and if so how many cycles of state it holds.
Added SchedulingPriorityQueue::HasReadyFilter to allowing gating entry
into the scheduler's available queue.
ScoreboardHazardRecognizer now accesses the ScheduleDAG in order to
get information about it's SUnits, provides RecedeCycle for bottom-up
scheduling, correctly computes scoreboard depth, tracks IssueCount, and
considers potential stall cycles when checking for hazards.
ScheduleDAGRRList now models machine cycles and hazards (under
flags). It tracks MinAvailableCycle, drives the hazard recognizer and
priority queue's ready filter, manages a new PendingQueue, properly
accounts for stall cycles, etc.
llvm-svn: 122541
2010-12-24 13:03:26 +08:00
|
|
|
#include "llvm/CodeGen/SelectionDAGNodes.h"
|
2017-11-08 09:01:31 +08:00
|
|
|
#include "llvm/CodeGen/TargetInstrInfo.h"
|
2017-11-17 09:07:10 +08:00
|
|
|
#include "llvm/CodeGen/TargetRegisterInfo.h"
|
|
|
|
#include "llvm/CodeGen/TargetSubtargetInfo.h"
|
2018-04-30 22:59:11 +08:00
|
|
|
#include "llvm/Config/llvm-config.h"
|
2011-06-16 01:16:12 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2017-02-23 06:32:51 +08:00
|
|
|
#include "llvm/Support/Compiler.h"
|
2008-11-20 07:18:57 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2009-07-24 17:53:24 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2017-02-23 06:32:51 +08:00
|
|
|
#include <algorithm>
|
|
|
|
#include <cassert>
|
|
|
|
#include <iterator>
|
|
|
|
#include <limits>
|
|
|
|
#include <utility>
|
|
|
|
#include <vector>
|
|
|
|
|
2008-11-20 07:18:57 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2014-04-22 10:02:50 +08:00
|
|
|
#define DEBUG_TYPE "pre-RA-sched"
|
|
|
|
|
2019-04-10 17:03:03 +08:00
|
|
|
STATISTIC(NumNewPredsAdded, "Number of times a single predecessor was added");
|
|
|
|
STATISTIC(NumTopoInits,
|
|
|
|
"Number of times the topological order has been recomputed");
|
|
|
|
|
2011-06-16 01:16:12 +08:00
|
|
|
#ifndef NDEBUG
|
2011-08-19 09:42:18 +08:00
|
|
|
static cl::opt<bool> StressSchedOpt(
|
2011-06-16 01:16:12 +08:00
|
|
|
"stress-sched", cl::Hidden, cl::init(false),
|
|
|
|
cl::desc("Stress test instruction scheduling"));
|
|
|
|
#endif
|
|
|
|
|
2017-02-23 06:32:51 +08:00
|
|
|
void SchedulingPriorityQueue::anchor() {}
|
2011-12-20 10:50:00 +08:00
|
|
|
|
2009-01-16 03:20:50 +08:00
|
|
|
ScheduleDAG::ScheduleDAG(MachineFunction &mf)
|
2015-01-27 16:48:42 +08:00
|
|
|
: TM(mf.getTarget()), TII(mf.getSubtarget().getInstrInfo()),
|
|
|
|
TRI(mf.getSubtarget().getRegisterInfo()), MF(mf),
|
2017-02-23 06:32:51 +08:00
|
|
|
MRI(mf.getRegInfo()) {
|
2011-06-16 01:16:12 +08:00
|
|
|
#ifndef NDEBUG
|
|
|
|
StressSched = StressSchedOpt;
|
|
|
|
#endif
|
2008-11-20 07:18:57 +08:00
|
|
|
}
|
|
|
|
|
2017-02-23 06:32:51 +08:00
|
|
|
ScheduleDAG::~ScheduleDAG() = default;
|
2008-11-20 07:18:57 +08:00
|
|
|
|
2012-03-07 13:21:52 +08:00
|
|
|
void ScheduleDAG::clearDAG() {
|
2009-01-16 03:20:50 +08:00
|
|
|
SUnits.clear();
|
2009-02-11 07:27:53 +08:00
|
|
|
EntrySU = SUnit();
|
|
|
|
ExitSU = SUnit();
|
2012-03-07 13:21:52 +08:00
|
|
|
}
|
2009-01-16 03:20:50 +08:00
|
|
|
|
2012-03-07 13:21:52 +08:00
|
|
|
const MCInstrDesc *ScheduleDAG::getNodeDesc(const SDNode *Node) const {
|
2014-04-14 08:51:57 +08:00
|
|
|
if (!Node || !Node->isMachineOpcode()) return nullptr;
|
2012-03-07 13:21:52 +08:00
|
|
|
return &TII->get(Node->getMachineOpcode());
|
2008-11-20 07:18:57 +08:00
|
|
|
}
|
|
|
|
|
2018-09-19 08:23:35 +08:00
|
|
|
LLVM_DUMP_METHOD void SDep::dump(const TargetRegisterInfo *TRI) const {
|
2017-07-12 23:30:59 +08:00
|
|
|
switch (getKind()) {
|
2018-09-19 08:23:35 +08:00
|
|
|
case Data: dbgs() << "Data"; break;
|
|
|
|
case Anti: dbgs() << "Anti"; break;
|
|
|
|
case Output: dbgs() << "Out "; break;
|
|
|
|
case Order: dbgs() << "Ord "; break;
|
2017-07-12 23:30:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (getKind()) {
|
|
|
|
case Data:
|
2018-09-19 08:23:35 +08:00
|
|
|
dbgs() << " Latency=" << getLatency();
|
2017-07-12 23:30:59 +08:00
|
|
|
if (TRI && isAssignedRegDep())
|
2018-09-19 08:23:35 +08:00
|
|
|
dbgs() << " Reg=" << printReg(getReg(), TRI);
|
2017-07-12 23:30:59 +08:00
|
|
|
break;
|
|
|
|
case Anti:
|
|
|
|
case Output:
|
2018-09-19 08:23:35 +08:00
|
|
|
dbgs() << " Latency=" << getLatency();
|
2017-07-12 23:30:59 +08:00
|
|
|
break;
|
|
|
|
case Order:
|
2018-09-19 08:23:35 +08:00
|
|
|
dbgs() << " Latency=" << getLatency();
|
2017-07-12 23:30:59 +08:00
|
|
|
switch(Contents.OrdKind) {
|
2018-09-19 08:23:35 +08:00
|
|
|
case Barrier: dbgs() << " Barrier"; break;
|
2017-07-12 23:30:59 +08:00
|
|
|
case MayAliasMem:
|
2018-09-19 08:23:35 +08:00
|
|
|
case MustAliasMem: dbgs() << " Memory"; break;
|
|
|
|
case Artificial: dbgs() << " Artificial"; break;
|
|
|
|
case Weak: dbgs() << " Weak"; break;
|
|
|
|
case Cluster: dbgs() << " Cluster"; break;
|
2017-07-12 23:30:59 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-13 03:28:57 +08:00
|
|
|
bool SUnit::addPred(const SDep &D, bool Required) {
|
2014-01-25 01:20:08 +08:00
|
|
|
// If this node already has this dependence, don't add a redundant one.
|
2017-02-21 09:27:33 +08:00
|
|
|
for (SDep &PredDep : Preds) {
|
2012-11-13 03:28:57 +08:00
|
|
|
// Zero-latency weak edges may be added purely for heuristic ordering. Don't
|
|
|
|
// add them if another kind of edge already exists.
|
2017-02-21 09:27:33 +08:00
|
|
|
if (!Required && PredDep.getSUnit() == D.getSUnit())
|
2012-11-13 03:28:57 +08:00
|
|
|
return false;
|
2017-02-21 09:27:33 +08:00
|
|
|
if (PredDep.overlaps(D)) {
|
|
|
|
// Extend the latency if needed. Equivalent to
|
|
|
|
// removePred(PredDep) + addPred(D).
|
|
|
|
if (PredDep.getLatency() < D.getLatency()) {
|
|
|
|
SUnit *PredSU = PredDep.getSUnit();
|
2012-06-13 10:39:00 +08:00
|
|
|
// Find the corresponding successor in N.
|
2017-02-21 09:27:33 +08:00
|
|
|
SDep ForwardD = PredDep;
|
2012-06-13 10:39:00 +08:00
|
|
|
ForwardD.setSUnit(this);
|
2017-02-21 09:27:33 +08:00
|
|
|
for (SDep &SuccDep : PredSU->Succs) {
|
|
|
|
if (SuccDep == ForwardD) {
|
|
|
|
SuccDep.setLatency(D.getLatency());
|
2012-06-13 10:39:00 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2017-02-21 09:27:33 +08:00
|
|
|
PredDep.setLatency(D.getLatency());
|
2012-06-13 10:39:00 +08:00
|
|
|
}
|
2011-02-04 11:18:17 +08:00
|
|
|
return false;
|
2012-06-13 10:39:00 +08:00
|
|
|
}
|
|
|
|
}
|
2008-12-16 09:05:52 +08:00
|
|
|
// Now add a corresponding succ to N.
|
|
|
|
SDep P = D;
|
|
|
|
P.setSUnit(this);
|
|
|
|
SUnit *N = D.getSUnit();
|
|
|
|
// Update the bookkeeping.
|
|
|
|
if (D.getKind() == SDep::Data) {
|
2017-02-23 06:32:51 +08:00
|
|
|
assert(NumPreds < std::numeric_limits<unsigned>::max() &&
|
|
|
|
"NumPreds will overflow!");
|
|
|
|
assert(N->NumSuccs < std::numeric_limits<unsigned>::max() &&
|
|
|
|
"NumSuccs will overflow!");
|
2008-12-16 09:05:52 +08:00
|
|
|
++NumPreds;
|
|
|
|
++N->NumSuccs;
|
|
|
|
}
|
2009-10-01 04:15:38 +08:00
|
|
|
if (!N->isScheduled) {
|
2012-11-13 10:35:06 +08:00
|
|
|
if (D.isWeak()) {
|
2012-11-13 03:28:57 +08:00
|
|
|
++WeakPredsLeft;
|
|
|
|
}
|
|
|
|
else {
|
2017-02-23 06:32:51 +08:00
|
|
|
assert(NumPredsLeft < std::numeric_limits<unsigned>::max() &&
|
|
|
|
"NumPredsLeft will overflow!");
|
2012-11-13 03:28:57 +08:00
|
|
|
++NumPredsLeft;
|
|
|
|
}
|
2009-10-01 04:15:38 +08:00
|
|
|
}
|
|
|
|
if (!isScheduled) {
|
2012-11-13 10:35:06 +08:00
|
|
|
if (D.isWeak()) {
|
2012-11-13 03:28:57 +08:00
|
|
|
++N->WeakSuccsLeft;
|
|
|
|
}
|
|
|
|
else {
|
2017-02-23 06:32:51 +08:00
|
|
|
assert(N->NumSuccsLeft < std::numeric_limits<unsigned>::max() &&
|
|
|
|
"NumSuccsLeft will overflow!");
|
2012-11-13 03:28:57 +08:00
|
|
|
++N->NumSuccsLeft;
|
|
|
|
}
|
2009-10-01 04:15:38 +08:00
|
|
|
}
|
2008-12-16 11:25:46 +08:00
|
|
|
Preds.push_back(D);
|
2009-01-14 03:08:45 +08:00
|
|
|
N->Succs.push_back(P);
|
2009-01-06 06:40:26 +08:00
|
|
|
if (P.getLatency() != 0) {
|
|
|
|
this->setDepthDirty();
|
|
|
|
N->setHeightDirty();
|
|
|
|
}
|
2011-02-04 11:18:17 +08:00
|
|
|
return true;
|
2008-12-16 09:05:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void SUnit::removePred(const SDep &D) {
|
|
|
|
// Find the matching predecessor.
|
2017-02-23 06:32:51 +08:00
|
|
|
SmallVectorImpl<SDep>::iterator I = llvm::find(Preds, D);
|
2017-02-21 09:27:33 +08:00
|
|
|
if (I == Preds.end())
|
|
|
|
return;
|
|
|
|
// Find the corresponding successor in N.
|
|
|
|
SDep P = D;
|
|
|
|
P.setSUnit(this);
|
|
|
|
SUnit *N = D.getSUnit();
|
2017-02-23 06:32:51 +08:00
|
|
|
SmallVectorImpl<SDep>::iterator Succ = llvm::find(N->Succs, P);
|
2017-02-21 09:27:33 +08:00
|
|
|
assert(Succ != N->Succs.end() && "Mismatching preds / succs lists!");
|
|
|
|
N->Succs.erase(Succ);
|
|
|
|
Preds.erase(I);
|
|
|
|
// Update the bookkeeping.
|
|
|
|
if (P.getKind() == SDep::Data) {
|
|
|
|
assert(NumPreds > 0 && "NumPreds will underflow!");
|
|
|
|
assert(N->NumSuccs > 0 && "NumSuccs will underflow!");
|
|
|
|
--NumPreds;
|
|
|
|
--N->NumSuccs;
|
|
|
|
}
|
|
|
|
if (!N->isScheduled) {
|
|
|
|
if (D.isWeak())
|
|
|
|
--WeakPredsLeft;
|
|
|
|
else {
|
|
|
|
assert(NumPredsLeft > 0 && "NumPredsLeft will underflow!");
|
|
|
|
--NumPredsLeft;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!isScheduled) {
|
|
|
|
if (D.isWeak())
|
|
|
|
--N->WeakSuccsLeft;
|
|
|
|
else {
|
|
|
|
assert(N->NumSuccsLeft > 0 && "NumSuccsLeft will underflow!");
|
|
|
|
--N->NumSuccsLeft;
|
2008-12-16 09:05:52 +08:00
|
|
|
}
|
2017-02-21 09:27:33 +08:00
|
|
|
}
|
|
|
|
if (P.getLatency() != 0) {
|
|
|
|
this->setDepthDirty();
|
|
|
|
N->setHeightDirty();
|
|
|
|
}
|
2008-12-16 09:05:52 +08:00
|
|
|
}
|
|
|
|
|
2008-12-16 11:25:46 +08:00
|
|
|
void SUnit::setDepthDirty() {
|
2008-12-23 05:11:33 +08:00
|
|
|
if (!isDepthCurrent) return;
|
2008-12-16 11:25:46 +08:00
|
|
|
SmallVector<SUnit*, 8> WorkList;
|
|
|
|
WorkList.push_back(this);
|
2008-12-23 05:11:33 +08:00
|
|
|
do {
|
2008-12-21 00:42:33 +08:00
|
|
|
SUnit *SU = WorkList.pop_back_val();
|
2008-12-16 11:25:46 +08:00
|
|
|
SU->isDepthCurrent = false;
|
2017-02-21 09:27:33 +08:00
|
|
|
for (SDep &SuccDep : SU->Succs) {
|
|
|
|
SUnit *SuccSU = SuccDep.getSUnit();
|
2008-12-23 05:11:33 +08:00
|
|
|
if (SuccSU->isDepthCurrent)
|
|
|
|
WorkList.push_back(SuccSU);
|
|
|
|
}
|
|
|
|
} while (!WorkList.empty());
|
2008-12-16 11:25:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void SUnit::setHeightDirty() {
|
2008-12-23 05:11:33 +08:00
|
|
|
if (!isHeightCurrent) return;
|
2008-12-16 11:25:46 +08:00
|
|
|
SmallVector<SUnit*, 8> WorkList;
|
|
|
|
WorkList.push_back(this);
|
2008-12-23 05:11:33 +08:00
|
|
|
do {
|
2008-12-21 00:42:33 +08:00
|
|
|
SUnit *SU = WorkList.pop_back_val();
|
2008-12-16 11:25:46 +08:00
|
|
|
SU->isHeightCurrent = false;
|
2017-02-21 09:27:33 +08:00
|
|
|
for (SDep &PredDep : SU->Preds) {
|
|
|
|
SUnit *PredSU = PredDep.getSUnit();
|
2008-12-23 05:11:33 +08:00
|
|
|
if (PredSU->isHeightCurrent)
|
|
|
|
WorkList.push_back(PredSU);
|
|
|
|
}
|
|
|
|
} while (!WorkList.empty());
|
2008-12-16 11:25:46 +08:00
|
|
|
}
|
|
|
|
|
2009-11-21 03:32:48 +08:00
|
|
|
void SUnit::setDepthToAtLeast(unsigned NewDepth) {
|
|
|
|
if (NewDepth <= getDepth())
|
2008-12-16 11:25:46 +08:00
|
|
|
return;
|
|
|
|
setDepthDirty();
|
|
|
|
Depth = NewDepth;
|
|
|
|
isDepthCurrent = true;
|
|
|
|
}
|
|
|
|
|
2009-11-21 03:32:48 +08:00
|
|
|
void SUnit::setHeightToAtLeast(unsigned NewHeight) {
|
|
|
|
if (NewHeight <= getHeight())
|
2008-12-16 11:25:46 +08:00
|
|
|
return;
|
|
|
|
setHeightDirty();
|
|
|
|
Height = NewHeight;
|
|
|
|
isHeightCurrent = true;
|
|
|
|
}
|
|
|
|
|
2017-02-21 09:27:33 +08:00
|
|
|
/// Calculates the maximal path from the node to the exit.
|
2009-11-21 03:32:48 +08:00
|
|
|
void SUnit::ComputeDepth() {
|
2008-12-16 11:25:46 +08:00
|
|
|
SmallVector<SUnit*, 8> WorkList;
|
|
|
|
WorkList.push_back(this);
|
2008-12-24 01:22:32 +08:00
|
|
|
do {
|
2008-12-16 11:25:46 +08:00
|
|
|
SUnit *Cur = WorkList.back();
|
|
|
|
|
|
|
|
bool Done = true;
|
|
|
|
unsigned MaxPredDepth = 0;
|
2017-02-21 09:27:33 +08:00
|
|
|
for (const SDep &PredDep : Cur->Preds) {
|
|
|
|
SUnit *PredSU = PredDep.getSUnit();
|
2008-12-16 11:25:46 +08:00
|
|
|
if (PredSU->isDepthCurrent)
|
|
|
|
MaxPredDepth = std::max(MaxPredDepth,
|
2017-02-21 09:27:33 +08:00
|
|
|
PredSU->Depth + PredDep.getLatency());
|
2008-12-16 11:25:46 +08:00
|
|
|
else {
|
|
|
|
Done = false;
|
|
|
|
WorkList.push_back(PredSU);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Done) {
|
|
|
|
WorkList.pop_back();
|
|
|
|
if (MaxPredDepth != Cur->Depth) {
|
|
|
|
Cur->setDepthDirty();
|
|
|
|
Cur->Depth = MaxPredDepth;
|
|
|
|
}
|
|
|
|
Cur->isDepthCurrent = true;
|
|
|
|
}
|
2008-12-24 01:22:32 +08:00
|
|
|
} while (!WorkList.empty());
|
2008-12-16 11:25:46 +08:00
|
|
|
}
|
|
|
|
|
2017-02-21 09:27:33 +08:00
|
|
|
/// Calculates the maximal path from the node to the entry.
|
2009-11-21 03:32:48 +08:00
|
|
|
void SUnit::ComputeHeight() {
|
2008-12-16 11:25:46 +08:00
|
|
|
SmallVector<SUnit*, 8> WorkList;
|
|
|
|
WorkList.push_back(this);
|
2008-12-24 01:22:32 +08:00
|
|
|
do {
|
2008-12-16 11:25:46 +08:00
|
|
|
SUnit *Cur = WorkList.back();
|
|
|
|
|
|
|
|
bool Done = true;
|
|
|
|
unsigned MaxSuccHeight = 0;
|
2017-02-21 09:27:33 +08:00
|
|
|
for (const SDep &SuccDep : Cur->Succs) {
|
|
|
|
SUnit *SuccSU = SuccDep.getSUnit();
|
2008-12-16 11:25:46 +08:00
|
|
|
if (SuccSU->isHeightCurrent)
|
|
|
|
MaxSuccHeight = std::max(MaxSuccHeight,
|
2017-02-21 09:27:33 +08:00
|
|
|
SuccSU->Height + SuccDep.getLatency());
|
2008-12-16 11:25:46 +08:00
|
|
|
else {
|
|
|
|
Done = false;
|
|
|
|
WorkList.push_back(SuccSU);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Done) {
|
|
|
|
WorkList.pop_back();
|
|
|
|
if (MaxSuccHeight != Cur->Height) {
|
|
|
|
Cur->setHeightDirty();
|
|
|
|
Cur->Height = MaxSuccHeight;
|
|
|
|
}
|
|
|
|
Cur->isHeightCurrent = true;
|
|
|
|
}
|
2008-12-24 01:22:32 +08:00
|
|
|
} while (!WorkList.empty());
|
2008-12-16 11:25:46 +08:00
|
|
|
}
|
|
|
|
|
2013-01-24 10:09:55 +08:00
|
|
|
void SUnit::biasCriticalPath() {
|
|
|
|
if (NumPreds < 2)
|
|
|
|
return;
|
|
|
|
|
|
|
|
SUnit::pred_iterator BestI = Preds.begin();
|
|
|
|
unsigned MaxDepth = BestI->getSUnit()->getDepth();
|
2014-03-02 20:27:27 +08:00
|
|
|
for (SUnit::pred_iterator I = std::next(BestI), E = Preds.end(); I != E;
|
|
|
|
++I) {
|
2013-01-24 10:09:55 +08:00
|
|
|
if (I->getKind() == SDep::Data && I->getSUnit()->getDepth() > MaxDepth)
|
|
|
|
BestI = I;
|
|
|
|
}
|
|
|
|
if (BestI != Preds.begin())
|
|
|
|
std::swap(*Preds.begin(), *BestI);
|
|
|
|
}
|
|
|
|
|
2017-10-15 22:32:27 +08:00
|
|
|
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
2018-09-19 08:23:35 +08:00
|
|
|
LLVM_DUMP_METHOD void SUnit::dumpAttributes() const {
|
2010-01-05 09:25:41 +08:00
|
|
|
dbgs() << " # preds left : " << NumPredsLeft << "\n";
|
|
|
|
dbgs() << " # succs left : " << NumSuccsLeft << "\n";
|
2012-11-13 03:28:57 +08:00
|
|
|
if (WeakPredsLeft)
|
|
|
|
dbgs() << " # weak preds left : " << WeakPredsLeft << "\n";
|
|
|
|
if (WeakSuccsLeft)
|
|
|
|
dbgs() << " # weak succs left : " << WeakSuccsLeft << "\n";
|
2011-02-04 11:18:17 +08:00
|
|
|
dbgs() << " # rdefs left : " << NumRegDefsLeft << "\n";
|
2010-01-05 09:25:41 +08:00
|
|
|
dbgs() << " Latency : " << Latency << "\n";
|
2013-03-01 08:19:09 +08:00
|
|
|
dbgs() << " Depth : " << getDepth() << "\n";
|
|
|
|
dbgs() << " Height : " << getHeight() << "\n";
|
2018-09-19 08:23:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
LLVM_DUMP_METHOD void ScheduleDAG::dumpNodeName(const SUnit &SU) const {
|
|
|
|
if (&SU == &EntrySU)
|
|
|
|
dbgs() << "EntrySU";
|
|
|
|
else if (&SU == &ExitSU)
|
|
|
|
dbgs() << "ExitSU";
|
|
|
|
else
|
|
|
|
dbgs() << "SU(" << SU.NodeNum << ")";
|
|
|
|
}
|
2008-11-20 07:18:57 +08:00
|
|
|
|
2018-09-19 08:23:35 +08:00
|
|
|
LLVM_DUMP_METHOD void ScheduleDAG::dumpNodeAll(const SUnit &SU) const {
|
|
|
|
dumpNode(SU);
|
|
|
|
SU.dumpAttributes();
|
|
|
|
if (SU.Preds.size() > 0) {
|
2010-01-05 09:25:41 +08:00
|
|
|
dbgs() << " Predecessors:\n";
|
2018-09-19 08:23:35 +08:00
|
|
|
for (const SDep &Dep : SU.Preds) {
|
2017-07-12 23:30:59 +08:00
|
|
|
dbgs() << " ";
|
2018-09-19 08:23:35 +08:00
|
|
|
dumpNodeName(*Dep.getSUnit());
|
|
|
|
dbgs() << ": ";
|
|
|
|
Dep.dump(TRI);
|
|
|
|
dbgs() << '\n';
|
2008-11-20 07:18:57 +08:00
|
|
|
}
|
|
|
|
}
|
2018-09-19 08:23:35 +08:00
|
|
|
if (SU.Succs.size() > 0) {
|
2010-01-05 09:25:41 +08:00
|
|
|
dbgs() << " Successors:\n";
|
2018-09-19 08:23:35 +08:00
|
|
|
for (const SDep &Dep : SU.Succs) {
|
2017-07-12 23:30:59 +08:00
|
|
|
dbgs() << " ";
|
2018-09-19 08:23:35 +08:00
|
|
|
dumpNodeName(*Dep.getSUnit());
|
|
|
|
dbgs() << ": ";
|
|
|
|
Dep.dump(TRI);
|
|
|
|
dbgs() << '\n';
|
2008-11-20 07:18:57 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-09-07 03:06:06 +08:00
|
|
|
#endif
|
2008-11-20 09:26:25 +08:00
|
|
|
|
|
|
|
#ifndef NDEBUG
|
2012-03-07 13:21:36 +08:00
|
|
|
unsigned ScheduleDAG::VerifyScheduledDAG(bool isBottomUp) {
|
2008-11-20 09:26:25 +08:00
|
|
|
bool AnyNotSched = false;
|
|
|
|
unsigned DeadNodes = 0;
|
2017-02-21 09:27:33 +08:00
|
|
|
for (const SUnit &SUnit : SUnits) {
|
|
|
|
if (!SUnit.isScheduled) {
|
|
|
|
if (SUnit.NumPreds == 0 && SUnit.NumSuccs == 0) {
|
2008-11-20 09:26:25 +08:00
|
|
|
++DeadNodes;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!AnyNotSched)
|
2010-01-05 09:25:41 +08:00
|
|
|
dbgs() << "*** Scheduling failed! ***\n";
|
2018-09-19 08:23:35 +08:00
|
|
|
dumpNode(SUnit);
|
2010-01-05 09:25:41 +08:00
|
|
|
dbgs() << "has not been scheduled!\n";
|
2008-11-20 09:26:25 +08:00
|
|
|
AnyNotSched = true;
|
|
|
|
}
|
2017-02-21 09:27:33 +08:00
|
|
|
if (SUnit.isScheduled &&
|
|
|
|
(isBottomUp ? SUnit.getHeight() : SUnit.getDepth()) >
|
2017-02-23 06:32:51 +08:00
|
|
|
unsigned(std::numeric_limits<int>::max())) {
|
2008-11-20 09:26:25 +08:00
|
|
|
if (!AnyNotSched)
|
2010-01-05 09:25:41 +08:00
|
|
|
dbgs() << "*** Scheduling failed! ***\n";
|
2018-09-19 08:23:35 +08:00
|
|
|
dumpNode(SUnit);
|
2010-01-05 09:25:41 +08:00
|
|
|
dbgs() << "has an unexpected "
|
2008-12-16 11:25:46 +08:00
|
|
|
<< (isBottomUp ? "Height" : "Depth") << " value!\n";
|
2008-11-20 09:26:25 +08:00
|
|
|
AnyNotSched = true;
|
|
|
|
}
|
|
|
|
if (isBottomUp) {
|
2017-02-21 09:27:33 +08:00
|
|
|
if (SUnit.NumSuccsLeft != 0) {
|
2008-11-20 09:26:25 +08:00
|
|
|
if (!AnyNotSched)
|
2010-01-05 09:25:41 +08:00
|
|
|
dbgs() << "*** Scheduling failed! ***\n";
|
2018-09-19 08:23:35 +08:00
|
|
|
dumpNode(SUnit);
|
2010-01-05 09:25:41 +08:00
|
|
|
dbgs() << "has successors left!\n";
|
2008-11-20 09:26:25 +08:00
|
|
|
AnyNotSched = true;
|
|
|
|
}
|
|
|
|
} else {
|
2017-02-21 09:27:33 +08:00
|
|
|
if (SUnit.NumPredsLeft != 0) {
|
2008-11-20 09:26:25 +08:00
|
|
|
if (!AnyNotSched)
|
2010-01-05 09:25:41 +08:00
|
|
|
dbgs() << "*** Scheduling failed! ***\n";
|
2018-09-19 08:23:35 +08:00
|
|
|
dumpNode(SUnit);
|
2010-01-05 09:25:41 +08:00
|
|
|
dbgs() << "has predecessors left!\n";
|
2008-11-20 09:26:25 +08:00
|
|
|
AnyNotSched = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert(!AnyNotSched);
|
2012-03-07 13:21:36 +08:00
|
|
|
return SUnits.size() - DeadNodes;
|
2008-11-20 09:26:25 +08:00
|
|
|
}
|
|
|
|
#endif
|
2008-11-25 08:52:40 +08:00
|
|
|
|
|
|
|
void ScheduleDAGTopologicalSort::InitDAGTopologicalSorting() {
|
2017-02-21 09:27:33 +08:00
|
|
|
// The idea of the algorithm is taken from
|
|
|
|
// "Online algorithms for managing the topological order of
|
|
|
|
// a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly
|
|
|
|
// This is the MNR algorithm, which was first introduced by
|
|
|
|
// A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in
|
|
|
|
// "Maintaining a topological order under edge insertions".
|
|
|
|
//
|
|
|
|
// Short description of the algorithm:
|
|
|
|
//
|
|
|
|
// Topological ordering, ord, of a DAG maps each node to a topological
|
|
|
|
// index so that for all edges X->Y it is the case that ord(X) < ord(Y).
|
|
|
|
//
|
|
|
|
// This means that if there is a path from the node X to the node Z,
|
|
|
|
// then ord(X) < ord(Z).
|
|
|
|
//
|
|
|
|
// This property can be used to check for reachability of nodes:
|
|
|
|
// if Z is reachable from X, then an insertion of the edge Z->X would
|
|
|
|
// create a cycle.
|
|
|
|
//
|
|
|
|
// The algorithm first computes a topological ordering for the DAG by
|
|
|
|
// initializing the Index2Node and Node2Index arrays and then tries to keep
|
|
|
|
// the ordering up-to-date after edge insertions by reordering the DAG.
|
|
|
|
//
|
|
|
|
// On insertion of the edge X->Y, the algorithm first marks by calling DFS
|
|
|
|
// the nodes reachable from Y, and then shifts them using Shift to lie
|
|
|
|
// immediately after X in Index2Node.
|
[ScheduleDAGRRList] Recompute topological ordering on demand.
Currently there is a single point in ScheduleDAGRRList, where we
actually query the topological order (besides init code). Currently we
are recomputing the order after adding a node (which does not have
predecessors) and then we add predecessors edge-by-edge.
We can avoid adding edges one-by-one after we added a new node. In that case, we can
just rebuild the order from scratch after adding the edges to the DAG
and avoid all the updates to the ordering.
Also, we can delay updating the DAG until we query the DAG, if we keep a
list of added edges. Depending on the number of updates, we can either
apply them when needed or recompute the order from scratch.
This brings down the geomean compile time for of CTMark with -O1 down 0.3% on X86,
with no regressions.
Reviewers: MatzeB, atrick, efriedma, niravd, paquette
Reviewed By: efriedma
Differential Revision: https://reviews.llvm.org/D60125
llvm-svn: 358583
2019-04-17 23:05:29 +08:00
|
|
|
|
|
|
|
// Cancel pending updates, mark as valid.
|
|
|
|
Dirty = false;
|
|
|
|
Updates.clear();
|
|
|
|
|
2008-11-25 08:52:40 +08:00
|
|
|
unsigned DAGSize = SUnits.size();
|
|
|
|
std::vector<SUnit*> WorkList;
|
|
|
|
WorkList.reserve(DAGSize);
|
|
|
|
|
|
|
|
Index2Node.resize(DAGSize);
|
|
|
|
Node2Index.resize(DAGSize);
|
|
|
|
|
|
|
|
// Initialize the data structures.
|
2012-11-13 03:28:57 +08:00
|
|
|
if (ExitSU)
|
|
|
|
WorkList.push_back(ExitSU);
|
2017-02-21 09:27:33 +08:00
|
|
|
for (SUnit &SU : SUnits) {
|
|
|
|
int NodeNum = SU.NodeNum;
|
|
|
|
unsigned Degree = SU.Succs.size();
|
2008-11-25 08:52:40 +08:00
|
|
|
// Temporarily use the Node2Index array as scratch space for degree counts.
|
|
|
|
Node2Index[NodeNum] = Degree;
|
|
|
|
|
|
|
|
// Is it a node without dependencies?
|
|
|
|
if (Degree == 0) {
|
2017-02-21 09:27:33 +08:00
|
|
|
assert(SU.Succs.empty() && "SUnit should have no successors");
|
2008-11-25 08:52:40 +08:00
|
|
|
// Collect leaf nodes.
|
2017-02-21 09:27:33 +08:00
|
|
|
WorkList.push_back(&SU);
|
2008-11-25 08:52:40 +08:00
|
|
|
}
|
2010-06-30 11:40:54 +08:00
|
|
|
}
|
2008-11-25 08:52:40 +08:00
|
|
|
|
|
|
|
int Id = DAGSize;
|
|
|
|
while (!WorkList.empty()) {
|
|
|
|
SUnit *SU = WorkList.back();
|
|
|
|
WorkList.pop_back();
|
2012-11-13 03:28:57 +08:00
|
|
|
if (SU->NodeNum < DAGSize)
|
|
|
|
Allocate(SU->NodeNum, --Id);
|
2017-02-21 09:27:33 +08:00
|
|
|
for (const SDep &PredDep : SU->Preds) {
|
|
|
|
SUnit *SU = PredDep.getSUnit();
|
2012-11-13 03:28:57 +08:00
|
|
|
if (SU->NodeNum < DAGSize && !--Node2Index[SU->NodeNum])
|
2008-11-25 08:52:40 +08:00
|
|
|
// If all dependencies of the node are processed already,
|
|
|
|
// then the node can be computed now.
|
|
|
|
WorkList.push_back(SU);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Visited.resize(DAGSize);
|
2019-04-10 17:03:03 +08:00
|
|
|
NumTopoInits++;
|
2008-11-25 08:52:40 +08:00
|
|
|
|
|
|
|
#ifndef NDEBUG
|
|
|
|
// Check correctness of the ordering
|
2017-02-21 09:27:33 +08:00
|
|
|
for (SUnit &SU : SUnits) {
|
|
|
|
for (const SDep &PD : SU.Preds) {
|
|
|
|
assert(Node2Index[SU.NodeNum] > Node2Index[PD.getSUnit()->NodeNum] &&
|
2008-11-25 08:52:40 +08:00
|
|
|
"Wrong topological sorting");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
[ScheduleDAGRRList] Recompute topological ordering on demand.
Currently there is a single point in ScheduleDAGRRList, where we
actually query the topological order (besides init code). Currently we
are recomputing the order after adding a node (which does not have
predecessors) and then we add predecessors edge-by-edge.
We can avoid adding edges one-by-one after we added a new node. In that case, we can
just rebuild the order from scratch after adding the edges to the DAG
and avoid all the updates to the ordering.
Also, we can delay updating the DAG until we query the DAG, if we keep a
list of added edges. Depending on the number of updates, we can either
apply them when needed or recompute the order from scratch.
This brings down the geomean compile time for of CTMark with -O1 down 0.3% on X86,
with no regressions.
Reviewers: MatzeB, atrick, efriedma, niravd, paquette
Reviewed By: efriedma
Differential Revision: https://reviews.llvm.org/D60125
llvm-svn: 358583
2019-04-17 23:05:29 +08:00
|
|
|
void ScheduleDAGTopologicalSort::FixOrder() {
|
|
|
|
// Recompute from scratch after new nodes have been added.
|
|
|
|
if (Dirty) {
|
|
|
|
InitDAGTopologicalSorting();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise apply updates one-by-one.
|
|
|
|
for (auto &U : Updates)
|
|
|
|
AddPred(U.first, U.second);
|
|
|
|
Updates.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
void ScheduleDAGTopologicalSort::AddPredQueued(SUnit *Y, SUnit *X) {
|
|
|
|
// Recomputing the order from scratch is likely more efficient than applying
|
|
|
|
// updates one-by-one for too many updates. The current cut-off is arbitrarily
|
|
|
|
// chosen.
|
|
|
|
Dirty = Dirty || Updates.size() > 10;
|
|
|
|
|
|
|
|
if (Dirty)
|
|
|
|
return;
|
|
|
|
|
|
|
|
Updates.emplace_back(Y, X);
|
|
|
|
}
|
|
|
|
|
2008-11-25 08:52:40 +08:00
|
|
|
void ScheduleDAGTopologicalSort::AddPred(SUnit *Y, SUnit *X) {
|
|
|
|
int UpperBound, LowerBound;
|
|
|
|
LowerBound = Node2Index[Y->NodeNum];
|
|
|
|
UpperBound = Node2Index[X->NodeNum];
|
|
|
|
bool HasLoop = false;
|
|
|
|
// Is Ord(X) < Ord(Y) ?
|
|
|
|
if (LowerBound < UpperBound) {
|
|
|
|
// Update the topological order.
|
|
|
|
Visited.reset();
|
|
|
|
DFS(Y, UpperBound, HasLoop);
|
|
|
|
assert(!HasLoop && "Inserted edge creates a loop!");
|
|
|
|
// Recompute topological indexes.
|
|
|
|
Shift(Visited, LowerBound, UpperBound);
|
|
|
|
}
|
2019-04-10 17:03:03 +08:00
|
|
|
|
|
|
|
NumNewPredsAdded++;
|
2008-11-25 08:52:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void ScheduleDAGTopologicalSort::RemovePred(SUnit *M, SUnit *N) {
|
|
|
|
// InitDAGTopologicalSorting();
|
|
|
|
}
|
|
|
|
|
2008-12-10 00:37:48 +08:00
|
|
|
void ScheduleDAGTopologicalSort::DFS(const SUnit *SU, int UpperBound,
|
2010-12-20 08:50:16 +08:00
|
|
|
bool &HasLoop) {
|
2008-11-25 08:52:40 +08:00
|
|
|
std::vector<const SUnit*> WorkList;
|
2010-06-30 11:40:54 +08:00
|
|
|
WorkList.reserve(SUnits.size());
|
2008-11-25 08:52:40 +08:00
|
|
|
|
|
|
|
WorkList.push_back(SU);
|
2008-12-24 01:22:32 +08:00
|
|
|
do {
|
2008-11-25 08:52:40 +08:00
|
|
|
SU = WorkList.back();
|
|
|
|
WorkList.pop_back();
|
|
|
|
Visited.set(SU->NodeNum);
|
2017-02-21 09:27:33 +08:00
|
|
|
for (const SDep &SuccDep
|
|
|
|
: make_range(SU->Succs.rbegin(), SU->Succs.rend())) {
|
|
|
|
unsigned s = SuccDep.getSUnit()->NodeNum;
|
2012-11-13 03:28:57 +08:00
|
|
|
// Edges to non-SUnits are allowed but ignored (e.g. ExitSU).
|
|
|
|
if (s >= Node2Index.size())
|
|
|
|
continue;
|
2008-11-25 08:52:40 +08:00
|
|
|
if (Node2Index[s] == UpperBound) {
|
2010-06-30 11:40:54 +08:00
|
|
|
HasLoop = true;
|
2008-11-25 08:52:40 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
// Visit successors if not already and in affected region.
|
|
|
|
if (!Visited.test(s) && Node2Index[s] < UpperBound) {
|
2017-02-21 09:27:33 +08:00
|
|
|
WorkList.push_back(SuccDep.getSUnit());
|
2010-06-30 11:40:54 +08:00
|
|
|
}
|
|
|
|
}
|
2008-12-24 01:22:32 +08:00
|
|
|
} while (!WorkList.empty());
|
2008-11-25 08:52:40 +08:00
|
|
|
}
|
|
|
|
|
2017-03-28 13:12:31 +08:00
|
|
|
std::vector<int> ScheduleDAGTopologicalSort::GetSubGraph(const SUnit &StartSU,
|
|
|
|
const SUnit &TargetSU,
|
|
|
|
bool &Success) {
|
|
|
|
std::vector<const SUnit*> WorkList;
|
|
|
|
int LowerBound = Node2Index[StartSU.NodeNum];
|
|
|
|
int UpperBound = Node2Index[TargetSU.NodeNum];
|
|
|
|
bool Found = false;
|
|
|
|
BitVector VisitedBack;
|
|
|
|
std::vector<int> Nodes;
|
|
|
|
|
|
|
|
if (LowerBound > UpperBound) {
|
|
|
|
Success = false;
|
|
|
|
return Nodes;
|
|
|
|
}
|
|
|
|
|
|
|
|
WorkList.reserve(SUnits.size());
|
|
|
|
Visited.reset();
|
|
|
|
|
|
|
|
// Starting from StartSU, visit all successors up
|
|
|
|
// to UpperBound.
|
|
|
|
WorkList.push_back(&StartSU);
|
|
|
|
do {
|
|
|
|
const SUnit *SU = WorkList.back();
|
|
|
|
WorkList.pop_back();
|
|
|
|
for (int I = SU->Succs.size()-1; I >= 0; --I) {
|
|
|
|
const SUnit *Succ = SU->Succs[I].getSUnit();
|
|
|
|
unsigned s = Succ->NodeNum;
|
|
|
|
// Edges to non-SUnits are allowed but ignored (e.g. ExitSU).
|
|
|
|
if (Succ->isBoundaryNode())
|
|
|
|
continue;
|
|
|
|
if (Node2Index[s] == UpperBound) {
|
|
|
|
Found = true;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// Visit successors if not already and in affected region.
|
|
|
|
if (!Visited.test(s) && Node2Index[s] < UpperBound) {
|
|
|
|
Visited.set(s);
|
|
|
|
WorkList.push_back(Succ);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} while (!WorkList.empty());
|
|
|
|
|
|
|
|
if (!Found) {
|
|
|
|
Success = false;
|
|
|
|
return Nodes;
|
|
|
|
}
|
|
|
|
|
|
|
|
WorkList.clear();
|
|
|
|
VisitedBack.resize(SUnits.size());
|
|
|
|
Found = false;
|
|
|
|
|
|
|
|
// Starting from TargetSU, visit all predecessors up
|
|
|
|
// to LowerBound. SUs that are visited by the two
|
|
|
|
// passes are added to Nodes.
|
|
|
|
WorkList.push_back(&TargetSU);
|
|
|
|
do {
|
|
|
|
const SUnit *SU = WorkList.back();
|
|
|
|
WorkList.pop_back();
|
|
|
|
for (int I = SU->Preds.size()-1; I >= 0; --I) {
|
|
|
|
const SUnit *Pred = SU->Preds[I].getSUnit();
|
|
|
|
unsigned s = Pred->NodeNum;
|
|
|
|
// Edges to non-SUnits are allowed but ignored (e.g. EntrySU).
|
|
|
|
if (Pred->isBoundaryNode())
|
|
|
|
continue;
|
|
|
|
if (Node2Index[s] == LowerBound) {
|
|
|
|
Found = true;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!VisitedBack.test(s) && Visited.test(s)) {
|
|
|
|
VisitedBack.set(s);
|
|
|
|
WorkList.push_back(Pred);
|
|
|
|
Nodes.push_back(s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} while (!WorkList.empty());
|
|
|
|
|
|
|
|
assert(Found && "Error in SUnit Graph!");
|
|
|
|
Success = true;
|
|
|
|
return Nodes;
|
|
|
|
}
|
|
|
|
|
2010-06-30 11:40:54 +08:00
|
|
|
void ScheduleDAGTopologicalSort::Shift(BitVector& Visited, int LowerBound,
|
2008-12-10 00:37:48 +08:00
|
|
|
int UpperBound) {
|
2008-11-25 08:52:40 +08:00
|
|
|
std::vector<int> L;
|
|
|
|
int shift = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = LowerBound; i <= UpperBound; ++i) {
|
|
|
|
// w is node at topological index i.
|
|
|
|
int w = Index2Node[i];
|
|
|
|
if (Visited.test(w)) {
|
|
|
|
// Unmark.
|
|
|
|
Visited.reset(w);
|
|
|
|
L.push_back(w);
|
|
|
|
shift = shift + 1;
|
|
|
|
} else {
|
|
|
|
Allocate(w, i - shift);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-21 09:27:33 +08:00
|
|
|
for (unsigned LI : L) {
|
|
|
|
Allocate(LI, i - shift);
|
2008-11-25 08:52:40 +08:00
|
|
|
i = i + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-13 03:28:57 +08:00
|
|
|
bool ScheduleDAGTopologicalSort::WillCreateCycle(SUnit *TargetSU, SUnit *SU) {
|
[ScheduleDAGRRList] Recompute topological ordering on demand.
Currently there is a single point in ScheduleDAGRRList, where we
actually query the topological order (besides init code). Currently we
are recomputing the order after adding a node (which does not have
predecessors) and then we add predecessors edge-by-edge.
We can avoid adding edges one-by-one after we added a new node. In that case, we can
just rebuild the order from scratch after adding the edges to the DAG
and avoid all the updates to the ordering.
Also, we can delay updating the DAG until we query the DAG, if we keep a
list of added edges. Depending on the number of updates, we can either
apply them when needed or recompute the order from scratch.
This brings down the geomean compile time for of CTMark with -O1 down 0.3% on X86,
with no regressions.
Reviewers: MatzeB, atrick, efriedma, niravd, paquette
Reviewed By: efriedma
Differential Revision: https://reviews.llvm.org/D60125
llvm-svn: 358583
2019-04-17 23:05:29 +08:00
|
|
|
FixOrder();
|
2012-11-13 03:28:57 +08:00
|
|
|
// Is SU reachable from TargetSU via successor edges?
|
|
|
|
if (IsReachable(SU, TargetSU))
|
2008-11-25 08:52:40 +08:00
|
|
|
return true;
|
2017-02-21 09:27:33 +08:00
|
|
|
for (const SDep &PredDep : TargetSU->Preds)
|
|
|
|
if (PredDep.isAssignedRegDep() &&
|
|
|
|
IsReachable(SU, PredDep.getSUnit()))
|
2008-11-25 08:52:40 +08:00
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2008-12-10 00:37:48 +08:00
|
|
|
bool ScheduleDAGTopologicalSort::IsReachable(const SUnit *SU,
|
|
|
|
const SUnit *TargetSU) {
|
[ScheduleDAGRRList] Recompute topological ordering on demand.
Currently there is a single point in ScheduleDAGRRList, where we
actually query the topological order (besides init code). Currently we
are recomputing the order after adding a node (which does not have
predecessors) and then we add predecessors edge-by-edge.
We can avoid adding edges one-by-one after we added a new node. In that case, we can
just rebuild the order from scratch after adding the edges to the DAG
and avoid all the updates to the ordering.
Also, we can delay updating the DAG until we query the DAG, if we keep a
list of added edges. Depending on the number of updates, we can either
apply them when needed or recompute the order from scratch.
This brings down the geomean compile time for of CTMark with -O1 down 0.3% on X86,
with no regressions.
Reviewers: MatzeB, atrick, efriedma, niravd, paquette
Reviewed By: efriedma
Differential Revision: https://reviews.llvm.org/D60125
llvm-svn: 358583
2019-04-17 23:05:29 +08:00
|
|
|
FixOrder();
|
2008-11-25 08:52:40 +08:00
|
|
|
// If insertion of the edge SU->TargetSU would create a cycle
|
|
|
|
// then there is a path from TargetSU to SU.
|
|
|
|
int UpperBound, LowerBound;
|
|
|
|
LowerBound = Node2Index[TargetSU->NodeNum];
|
|
|
|
UpperBound = Node2Index[SU->NodeNum];
|
|
|
|
bool HasLoop = false;
|
|
|
|
// Is Ord(TargetSU) < Ord(SU) ?
|
|
|
|
if (LowerBound < UpperBound) {
|
|
|
|
Visited.reset();
|
2010-06-30 11:40:54 +08:00
|
|
|
// There may be a path from TargetSU to SU. Check for it.
|
2008-11-25 08:52:40 +08:00
|
|
|
DFS(TargetSU, UpperBound, HasLoop);
|
|
|
|
}
|
|
|
|
return HasLoop;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ScheduleDAGTopologicalSort::Allocate(int n, int index) {
|
|
|
|
Node2Index[n] = index;
|
|
|
|
Index2Node[index] = n;
|
|
|
|
}
|
|
|
|
|
2010-06-30 11:40:54 +08:00
|
|
|
ScheduleDAGTopologicalSort::
|
2012-11-13 03:28:57 +08:00
|
|
|
ScheduleDAGTopologicalSort(std::vector<SUnit> &sunits, SUnit *exitsu)
|
|
|
|
: SUnits(sunits), ExitSU(exitsu) {}
|
2009-01-16 06:18:12 +08:00
|
|
|
|
2017-02-23 06:32:51 +08:00
|
|
|
ScheduleHazardRecognizer::~ScheduleHazardRecognizer() = default;
|