2006-01-25 17:14:32 +08:00
|
|
|
//===---- ScheduleDAGList.cpp - Implement a list scheduler for isel DAG ---===//
|
2006-01-23 16:26:10 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file was developed by Evan Cheng and is distributed under the
|
|
|
|
// University of Illinois Open Source License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2006-03-07 01:58:04 +08:00
|
|
|
// This implements bottom-up and top-down list schedulers, using standard
|
|
|
|
// algorithms. The basic approach uses a priority queue of available nodes to
|
|
|
|
// schedule. One at a time, nodes are taken from the priority queue (thus in
|
|
|
|
// priority order), checked for legality to schedule, and emitted if legal.
|
|
|
|
//
|
|
|
|
// Nodes may not be legal to schedule either due to structural hazards (e.g.
|
|
|
|
// pipeline or resource constraints) or because an input to the instruction has
|
|
|
|
// not completed execution.
|
2006-01-23 16:26:10 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "sched"
|
|
|
|
#include "llvm/CodeGen/ScheduleDAG.h"
|
2006-05-05 03:16:39 +08:00
|
|
|
#include "llvm/CodeGen/SSARegMap.h"
|
|
|
|
#include "llvm/Target/MRegisterInfo.h"
|
2006-01-23 16:26:10 +08:00
|
|
|
#include "llvm/Target/TargetMachine.h"
|
|
|
|
#include "llvm/Target/TargetInstrInfo.h"
|
2006-01-25 17:14:32 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2006-03-06 07:13:56 +08:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2006-01-25 17:14:32 +08:00
|
|
|
#include <climits>
|
|
|
|
#include <iostream>
|
2006-01-23 16:26:10 +08:00
|
|
|
#include <queue>
|
- Fixed some priority calculation bugs that were causing bug 478. Among them:
a predecessor appearing more than once in the operand list was counted as
multiple predecessor; priority1 should be updated during scheduling;
CycleBound was updated after the node is inserted into priority queue; one
of the tie breaking condition was flipped.
- Take into consideration of two address opcodes. If a predecessor is a def&use
operand, it should have a higher priority.
- Scheduler should also favor floaters, i.e. nodes that do not have real
predecessors such as MOV32ri.
- The scheduling fixes / tweaks fixed bug 478:
.text
.align 4
.globl _f
_f:
movl 4(%esp), %eax
movl 8(%esp), %ecx
movl %eax, %edx
imull %ecx, %edx
imull %eax, %eax
imull %ecx, %ecx
addl %eax, %ecx
leal (%ecx,%edx,2), %eax
ret
It is also a slight performance win (1% - 3%) for most tests.
llvm-svn: 26470
2006-03-03 05:38:29 +08:00
|
|
|
#include <set>
|
|
|
|
#include <vector>
|
2006-03-09 15:15:18 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2006-01-23 16:26:10 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2006-05-05 03:16:39 +08:00
|
|
|
namespace {
|
2006-05-09 15:13:34 +08:00
|
|
|
cl::opt<bool> SchedVertically("sched-vertically", cl::Hidden);
|
|
|
|
cl::opt<bool> SchedLowerDefNUse("sched-lower-defnuse", cl::Hidden);
|
2006-05-05 03:16:39 +08:00
|
|
|
}
|
|
|
|
|
2006-01-25 17:14:32 +08:00
|
|
|
namespace {
|
2006-03-06 07:13:56 +08:00
|
|
|
Statistic<> NumNoops ("scheduler", "Number of noops inserted");
|
|
|
|
Statistic<> NumStalls("scheduler", "Number of pipeline stalls");
|
2006-01-23 16:26:10 +08:00
|
|
|
|
2006-03-08 12:41:06 +08:00
|
|
|
/// SUnit - Scheduling unit. It's an wrapper around either a single SDNode or
|
|
|
|
/// a group of nodes flagged together.
|
2006-03-08 12:37:58 +08:00
|
|
|
struct SUnit {
|
|
|
|
SDNode *Node; // Representative node.
|
|
|
|
std::vector<SDNode*> FlaggedNodes; // All nodes flagged to Node.
|
2006-03-12 06:24:20 +08:00
|
|
|
|
|
|
|
// Preds/Succs - The SUnits before/after us in the graph. The boolean value
|
|
|
|
// is true if the edge is a token chain edge, false if it is a value edge.
|
|
|
|
std::set<std::pair<SUnit*,bool> > Preds; // All sunit predecessors.
|
|
|
|
std::set<std::pair<SUnit*,bool> > Succs; // All sunit successors.
|
|
|
|
|
2006-03-08 12:41:06 +08:00
|
|
|
short NumPredsLeft; // # of preds not scheduled.
|
|
|
|
short NumSuccsLeft; // # of succs not scheduled.
|
|
|
|
short NumChainPredsLeft; // # of chain preds not scheduled.
|
|
|
|
short NumChainSuccsLeft; // # of chain succs not scheduled.
|
|
|
|
bool isTwoAddress : 1; // Is a two-address instruction.
|
|
|
|
bool isDefNUseOperand : 1; // Is a def&use operand.
|
As a pending queue data structure to keep track of instructions whose
operands have all issued, but whose results are not yet available. This
allows us to compile:
int G;
int test(int A, int B, int* P) {
return (G+A)*(B+1);
}
to:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
addi r4, r4, 1
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
instead of this, which has a stall between the lis/lwz:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
addi r4, r4, 1
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
llvm-svn: 26716
2006-03-12 08:38:57 +08:00
|
|
|
bool isPending : 1; // True once pending.
|
Teach the latency scheduler some new tricks. In particular, to break ties,
keep track of a sense of "mobility", i.e. how many other nodes scheduling one
node will free up. For something like this:
float testadd(float *X, float *Y, float *Z, float *W, float *V) {
return (*X+*Y)*(*Z+*W)+*V;
}
For example, this makes us schedule *X then *Y, not *X then *Z. The former
allows us to issue the add, the later only lets us issue other loads.
This turns the above code from this:
_testadd:
lfs f0, 0(r3)
lfs f1, 0(r6)
lfs f2, 0(r4)
lfs f3, 0(r5)
fadds f0, f0, f2
fadds f1, f3, f1
lfs f2, 0(r7)
fmadds f1, f0, f1, f2
blr
into this:
_testadd:
lfs f0, 0(r6)
lfs f1, 0(r5)
fadds f0, f1, f0
lfs f1, 0(r4)
lfs f2, 0(r3)
fadds f1, f2, f1
lfs f2, 0(r7)
fmadds f1, f1, f0, f2
blr
llvm-svn: 26680
2006-03-10 13:51:05 +08:00
|
|
|
bool isAvailable : 1; // True once available.
|
|
|
|
bool isScheduled : 1; // True once scheduled.
|
2006-03-08 12:41:06 +08:00
|
|
|
unsigned short Latency; // Node latency.
|
2006-03-08 12:37:58 +08:00
|
|
|
unsigned CycleBound; // Upper/lower cycle to be scheduled at.
|
2006-03-12 06:44:37 +08:00
|
|
|
unsigned Cycle; // Once scheduled, the cycle of the op.
|
2006-03-08 13:18:27 +08:00
|
|
|
unsigned NodeNum; // Entry # of node in the node vector.
|
2006-03-08 12:37:58 +08:00
|
|
|
|
2006-03-08 13:18:27 +08:00
|
|
|
SUnit(SDNode *node, unsigned nodenum)
|
2006-03-08 12:37:58 +08:00
|
|
|
: Node(node), NumPredsLeft(0), NumSuccsLeft(0),
|
2006-05-05 03:16:39 +08:00
|
|
|
NumChainPredsLeft(0), NumChainSuccsLeft(0),
|
|
|
|
isTwoAddress(false), isDefNUseOperand(false),
|
|
|
|
isPending(false), isAvailable(false), isScheduled(false),
|
|
|
|
Latency(0), CycleBound(0), Cycle(0), NodeNum(nodenum) {}
|
2006-03-08 12:37:58 +08:00
|
|
|
|
2006-03-09 15:15:18 +08:00
|
|
|
void dump(const SelectionDAG *G) const;
|
|
|
|
void dumpAll(const SelectionDAG *G) const;
|
2006-03-08 12:37:58 +08:00
|
|
|
};
|
|
|
|
}
|
2006-01-25 17:14:32 +08:00
|
|
|
|
2006-03-09 15:15:18 +08:00
|
|
|
void SUnit::dump(const SelectionDAG *G) const {
|
2006-05-03 10:10:45 +08:00
|
|
|
std::cerr << "SU(" << NodeNum << "): ";
|
2006-01-25 17:14:32 +08:00
|
|
|
Node->dump(G);
|
|
|
|
std::cerr << "\n";
|
|
|
|
if (FlaggedNodes.size() != 0) {
|
|
|
|
for (unsigned i = 0, e = FlaggedNodes.size(); i != e; i++) {
|
2006-01-26 08:30:29 +08:00
|
|
|
std::cerr << " ";
|
2006-01-25 17:14:32 +08:00
|
|
|
FlaggedNodes[i]->dump(G);
|
|
|
|
std::cerr << "\n";
|
|
|
|
}
|
|
|
|
}
|
2006-03-09 15:15:18 +08:00
|
|
|
}
|
2006-01-25 17:14:32 +08:00
|
|
|
|
2006-03-09 15:15:18 +08:00
|
|
|
void SUnit::dumpAll(const SelectionDAG *G) const {
|
|
|
|
dump(G);
|
|
|
|
|
|
|
|
std::cerr << " # preds left : " << NumPredsLeft << "\n";
|
|
|
|
std::cerr << " # succs left : " << NumSuccsLeft << "\n";
|
|
|
|
std::cerr << " # chain preds left : " << NumChainPredsLeft << "\n";
|
|
|
|
std::cerr << " # chain succs left : " << NumChainSuccsLeft << "\n";
|
|
|
|
std::cerr << " Latency : " << Latency << "\n";
|
|
|
|
|
|
|
|
if (Preds.size() != 0) {
|
|
|
|
std::cerr << " Predecessors:\n";
|
2006-03-12 06:24:20 +08:00
|
|
|
for (std::set<std::pair<SUnit*,bool> >::const_iterator I = Preds.begin(),
|
2006-03-09 15:15:18 +08:00
|
|
|
E = Preds.end(); I != E; ++I) {
|
2006-03-12 06:24:20 +08:00
|
|
|
if (I->second)
|
|
|
|
std::cerr << " ch ";
|
|
|
|
else
|
|
|
|
std::cerr << " val ";
|
|
|
|
I->first->dump(G);
|
2006-01-25 17:14:32 +08:00
|
|
|
}
|
2006-03-09 15:15:18 +08:00
|
|
|
}
|
|
|
|
if (Succs.size() != 0) {
|
|
|
|
std::cerr << " Successors:\n";
|
2006-03-12 06:24:20 +08:00
|
|
|
for (std::set<std::pair<SUnit*, bool> >::const_iterator I = Succs.begin(),
|
2006-03-09 15:15:18 +08:00
|
|
|
E = Succs.end(); I != E; ++I) {
|
2006-03-12 06:24:20 +08:00
|
|
|
if (I->second)
|
|
|
|
std::cerr << " ch ";
|
|
|
|
else
|
|
|
|
std::cerr << " val ";
|
|
|
|
I->first->dump(G);
|
2006-01-25 17:14:32 +08:00
|
|
|
}
|
|
|
|
}
|
2006-03-09 15:15:18 +08:00
|
|
|
std::cerr << "\n";
|
2006-01-25 17:14:32 +08:00
|
|
|
}
|
|
|
|
|
2006-03-09 14:35:14 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2006-03-09 14:37:29 +08:00
|
|
|
/// SchedulingPriorityQueue - This interface is used to plug different
|
|
|
|
/// priorities computation algorithms into the list scheduler. It implements the
|
|
|
|
/// interface of a standard priority queue, where nodes are inserted in
|
|
|
|
/// arbitrary order and returned in priority order. The computation of the
|
|
|
|
/// priority and the representation of the queue are totally up to the
|
|
|
|
/// implementation to decide.
|
|
|
|
///
|
|
|
|
namespace {
|
2006-03-09 14:35:14 +08:00
|
|
|
class SchedulingPriorityQueue {
|
|
|
|
public:
|
|
|
|
virtual ~SchedulingPriorityQueue() {}
|
2006-03-08 13:18:27 +08:00
|
|
|
|
2006-03-09 14:35:14 +08:00
|
|
|
virtual void initNodes(const std::vector<SUnit> &SUnits) = 0;
|
|
|
|
virtual void releaseState() = 0;
|
2006-03-08 13:18:27 +08:00
|
|
|
|
2006-03-09 14:35:14 +08:00
|
|
|
virtual bool empty() const = 0;
|
|
|
|
virtual void push(SUnit *U) = 0;
|
2006-03-10 12:32:49 +08:00
|
|
|
|
|
|
|
virtual void push_all(const std::vector<SUnit *> &Nodes) = 0;
|
2006-03-09 14:35:14 +08:00
|
|
|
virtual SUnit *pop() = 0;
|
2006-05-05 03:16:39 +08:00
|
|
|
|
|
|
|
virtual void RemoveFromPriorityQueue(SUnit *SU) = 0;
|
2006-03-10 12:32:49 +08:00
|
|
|
|
|
|
|
/// ScheduledNode - As each node is scheduled, this method is invoked. This
|
|
|
|
/// allows the priority function to adjust the priority of node that have
|
|
|
|
/// already been emitted.
|
|
|
|
virtual void ScheduledNode(SUnit *Node) {}
|
2006-03-09 14:35:14 +08:00
|
|
|
};
|
2006-03-09 14:37:29 +08:00
|
|
|
}
|
2006-03-08 13:18:27 +08:00
|
|
|
|
2006-01-23 16:26:10 +08:00
|
|
|
|
2006-03-06 06:45:01 +08:00
|
|
|
|
2006-03-08 12:37:58 +08:00
|
|
|
namespace {
|
2006-03-09 14:37:29 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
/// ScheduleDAGList - The actual list scheduler implementation. This supports
|
|
|
|
/// both top-down and bottom-up scheduling.
|
|
|
|
///
|
2006-01-23 16:26:10 +08:00
|
|
|
class ScheduleDAGList : public ScheduleDAG {
|
|
|
|
private:
|
2006-01-25 17:14:32 +08:00
|
|
|
// SDNode to SUnit mapping (many to one).
|
|
|
|
std::map<SDNode*, SUnit*> SUnitMap;
|
2006-05-05 03:16:39 +08:00
|
|
|
|
2006-03-06 07:59:20 +08:00
|
|
|
// The schedule. Null SUnit*'s represent noop instructions.
|
2006-01-25 17:14:32 +08:00
|
|
|
std::vector<SUnit*> Sequence;
|
2006-03-08 12:54:34 +08:00
|
|
|
|
|
|
|
// The scheduling units.
|
|
|
|
std::vector<SUnit> SUnits;
|
2006-01-23 16:26:10 +08:00
|
|
|
|
2006-03-06 05:10:33 +08:00
|
|
|
/// isBottomUp - This is true if the scheduling problem is bottom-up, false if
|
|
|
|
/// it is top-down.
|
|
|
|
bool isBottomUp;
|
|
|
|
|
2006-03-12 06:44:37 +08:00
|
|
|
/// AvailableQueue - The priority queue to use for the available SUnits.
|
|
|
|
///
|
|
|
|
SchedulingPriorityQueue *AvailableQueue;
|
2006-03-09 14:35:14 +08:00
|
|
|
|
As a pending queue data structure to keep track of instructions whose
operands have all issued, but whose results are not yet available. This
allows us to compile:
int G;
int test(int A, int B, int* P) {
return (G+A)*(B+1);
}
to:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
addi r4, r4, 1
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
instead of this, which has a stall between the lis/lwz:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
addi r4, r4, 1
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
llvm-svn: 26716
2006-03-12 08:38:57 +08:00
|
|
|
/// PendingQueue - This contains all of the instructions whose operands have
|
|
|
|
/// been issued, but their results are not ready yet (due to the latency of
|
|
|
|
/// the operation). Once the operands becomes available, the instruction is
|
|
|
|
/// added to the AvailableQueue. This keeps track of each SUnit and the
|
|
|
|
/// number of cycles left to execute before the operation is available.
|
|
|
|
std::vector<std::pair<unsigned, SUnit*> > PendingQueue;
|
2006-05-05 03:16:39 +08:00
|
|
|
|
2006-03-06 06:45:01 +08:00
|
|
|
/// HazardRec - The hazard recognizer to use.
|
2006-03-08 12:25:59 +08:00
|
|
|
HazardRecognizer *HazardRec;
|
2006-05-05 03:16:39 +08:00
|
|
|
|
|
|
|
/// OpenNodes - Nodes with open live ranges, i.e. predecessors or successors
|
|
|
|
/// of scheduled nodes which are not themselves scheduled.
|
|
|
|
std::map<const TargetRegisterClass*, std::set<SUnit*> > OpenNodes;
|
|
|
|
|
2006-05-09 15:13:34 +08:00
|
|
|
/// RegPressureLimits - Keep track of upper limit of register pressure for
|
|
|
|
/// each register class that allows the scheduler to go into vertical mode.
|
2006-05-05 03:16:39 +08:00
|
|
|
std::map<const TargetRegisterClass*, unsigned> RegPressureLimits;
|
|
|
|
|
2006-01-23 16:26:10 +08:00
|
|
|
public:
|
|
|
|
ScheduleDAGList(SelectionDAG &dag, MachineBasicBlock *bb,
|
2006-03-06 06:45:01 +08:00
|
|
|
const TargetMachine &tm, bool isbottomup,
|
2006-03-12 06:44:37 +08:00
|
|
|
SchedulingPriorityQueue *availqueue,
|
2006-03-08 12:25:59 +08:00
|
|
|
HazardRecognizer *HR)
|
2006-03-12 06:34:41 +08:00
|
|
|
: ScheduleDAG(dag, bb, tm), isBottomUp(isbottomup),
|
2006-03-12 06:44:37 +08:00
|
|
|
AvailableQueue(availqueue), HazardRec(HR) {
|
2006-03-06 06:45:01 +08:00
|
|
|
}
|
2006-01-25 17:14:32 +08:00
|
|
|
|
|
|
|
~ScheduleDAGList() {
|
2006-03-08 12:25:59 +08:00
|
|
|
delete HazardRec;
|
2006-03-12 06:44:37 +08:00
|
|
|
delete AvailableQueue;
|
2006-01-25 17:14:32 +08:00
|
|
|
}
|
2006-01-23 16:26:10 +08:00
|
|
|
|
|
|
|
void Schedule();
|
2006-01-25 17:14:32 +08:00
|
|
|
|
2006-03-09 15:15:18 +08:00
|
|
|
void dumpSchedule() const;
|
2006-01-25 17:14:32 +08:00
|
|
|
|
|
|
|
private:
|
2006-01-26 08:30:29 +08:00
|
|
|
SUnit *NewSUnit(SDNode *N);
|
2006-03-12 06:34:41 +08:00
|
|
|
void ReleasePred(SUnit *PredSU, bool isChain, unsigned CurCycle);
|
As a pending queue data structure to keep track of instructions whose
operands have all issued, but whose results are not yet available. This
allows us to compile:
int G;
int test(int A, int B, int* P) {
return (G+A)*(B+1);
}
to:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
addi r4, r4, 1
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
instead of this, which has a stall between the lis/lwz:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
addi r4, r4, 1
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
llvm-svn: 26716
2006-03-12 08:38:57 +08:00
|
|
|
void ReleaseSucc(SUnit *SuccSU, bool isChain);
|
2006-05-05 03:16:39 +08:00
|
|
|
void ScheduleNodeBottomUp(SUnit *SU, unsigned& CurCycle, bool Veritical=true);
|
|
|
|
void ScheduleVertically(SUnit *SU, unsigned& CurCycle);
|
2006-03-12 06:34:41 +08:00
|
|
|
void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
|
2006-03-09 14:48:37 +08:00
|
|
|
void ListScheduleTopDown();
|
|
|
|
void ListScheduleBottomUp();
|
2006-01-25 17:14:32 +08:00
|
|
|
void BuildSchedUnits();
|
|
|
|
void EmitSchedule();
|
2006-01-23 16:26:10 +08:00
|
|
|
};
|
2006-03-08 12:37:58 +08:00
|
|
|
} // end anonymous namespace
|
2006-01-25 17:14:32 +08:00
|
|
|
|
2006-03-06 08:22:00 +08:00
|
|
|
HazardRecognizer::~HazardRecognizer() {}
|
|
|
|
|
2006-01-26 08:30:29 +08:00
|
|
|
|
|
|
|
/// NewSUnit - Creates a new SUnit and return a ptr to it.
|
|
|
|
SUnit *ScheduleDAGList::NewSUnit(SDNode *N) {
|
2006-03-08 13:18:27 +08:00
|
|
|
SUnits.push_back(SUnit(N, SUnits.size()));
|
2006-03-08 12:54:34 +08:00
|
|
|
return &SUnits.back();
|
2006-01-26 08:30:29 +08:00
|
|
|
}
|
|
|
|
|
2006-03-12 06:28:35 +08:00
|
|
|
/// BuildSchedUnits - Build SUnits from the selection dag that we are input.
|
|
|
|
/// This SUnit graph is similar to the SelectionDAG, but represents flagged
|
|
|
|
/// together nodes with a single SUnit.
|
|
|
|
void ScheduleDAGList::BuildSchedUnits() {
|
|
|
|
// Reserve entries in the vector for each of the SUnits we are creating. This
|
|
|
|
// ensure that reallocation of the vector won't happen, so SUnit*'s won't get
|
|
|
|
// invalidated.
|
|
|
|
SUnits.reserve(std::distance(DAG.allnodes_begin(), DAG.allnodes_end()));
|
|
|
|
|
|
|
|
const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
|
|
|
|
|
|
|
|
for (SelectionDAG::allnodes_iterator NI = DAG.allnodes_begin(),
|
|
|
|
E = DAG.allnodes_end(); NI != E; ++NI) {
|
|
|
|
if (isPassiveNode(NI)) // Leaf node, e.g. a TargetImmediate.
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// If this node has already been processed, stop now.
|
|
|
|
if (SUnitMap[NI]) continue;
|
|
|
|
|
|
|
|
SUnit *NodeSUnit = NewSUnit(NI);
|
|
|
|
|
|
|
|
// See if anything is flagged to this node, if so, add them to flagged
|
|
|
|
// nodes. Nodes can have at most one flag input and one flag output. Flags
|
|
|
|
// are required the be the last operand and result of a node.
|
|
|
|
|
|
|
|
// Scan up, adding flagged preds to FlaggedNodes.
|
|
|
|
SDNode *N = NI;
|
|
|
|
while (N->getNumOperands() &&
|
|
|
|
N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Flag) {
|
|
|
|
N = N->getOperand(N->getNumOperands()-1).Val;
|
|
|
|
NodeSUnit->FlaggedNodes.push_back(N);
|
|
|
|
SUnitMap[N] = NodeSUnit;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Scan down, adding this node and any flagged succs to FlaggedNodes if they
|
|
|
|
// have a user of the flag operand.
|
|
|
|
N = NI;
|
|
|
|
while (N->getValueType(N->getNumValues()-1) == MVT::Flag) {
|
|
|
|
SDOperand FlagVal(N, N->getNumValues()-1);
|
|
|
|
|
|
|
|
// There are either zero or one users of the Flag result.
|
|
|
|
bool HasFlagUse = false;
|
|
|
|
for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
|
|
|
|
UI != E; ++UI)
|
|
|
|
if (FlagVal.isOperand(*UI)) {
|
|
|
|
HasFlagUse = true;
|
|
|
|
NodeSUnit->FlaggedNodes.push_back(N);
|
|
|
|
SUnitMap[N] = NodeSUnit;
|
|
|
|
N = *UI;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!HasFlagUse) break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now all flagged nodes are in FlaggedNodes and N is the bottom-most node.
|
|
|
|
// Update the SUnit
|
|
|
|
NodeSUnit->Node = N;
|
|
|
|
SUnitMap[N] = NodeSUnit;
|
|
|
|
|
|
|
|
// Compute the latency for the node. We use the sum of the latencies for
|
|
|
|
// all nodes flagged together into this SUnit.
|
|
|
|
if (InstrItins.isEmpty()) {
|
|
|
|
// No latency information.
|
|
|
|
NodeSUnit->Latency = 1;
|
|
|
|
} else {
|
|
|
|
NodeSUnit->Latency = 0;
|
|
|
|
if (N->isTargetOpcode()) {
|
|
|
|
unsigned SchedClass = TII->getSchedClass(N->getTargetOpcode());
|
|
|
|
InstrStage *S = InstrItins.begin(SchedClass);
|
|
|
|
InstrStage *E = InstrItins.end(SchedClass);
|
|
|
|
for (; S != E; ++S)
|
|
|
|
NodeSUnit->Latency += S->Cycles;
|
|
|
|
}
|
|
|
|
for (unsigned i = 0, e = NodeSUnit->FlaggedNodes.size(); i != e; ++i) {
|
|
|
|
SDNode *FNode = NodeSUnit->FlaggedNodes[i];
|
|
|
|
if (FNode->isTargetOpcode()) {
|
|
|
|
unsigned SchedClass = TII->getSchedClass(FNode->getTargetOpcode());
|
|
|
|
InstrStage *S = InstrItins.begin(SchedClass);
|
|
|
|
InstrStage *E = InstrItins.end(SchedClass);
|
|
|
|
for (; S != E; ++S)
|
|
|
|
NodeSUnit->Latency += S->Cycles;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pass 2: add the preds, succs, etc.
|
|
|
|
for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
|
|
|
|
SUnit *SU = &SUnits[su];
|
|
|
|
SDNode *MainNode = SU->Node;
|
|
|
|
|
2006-05-01 17:14:40 +08:00
|
|
|
if (MainNode->isTargetOpcode()) {
|
|
|
|
unsigned Opc = MainNode->getTargetOpcode();
|
2006-05-03 10:10:45 +08:00
|
|
|
if (TII->isTwoAddrInstr(Opc)) {
|
2006-05-01 17:14:40 +08:00
|
|
|
SU->isTwoAddress = true;
|
2006-05-03 10:10:45 +08:00
|
|
|
SDNode *OpN = MainNode->getOperand(0).Val;
|
|
|
|
SUnit *OpSU = SUnitMap[OpN];
|
|
|
|
if (OpSU)
|
|
|
|
OpSU->isDefNUseOperand = true;
|
|
|
|
}
|
2006-05-01 17:14:40 +08:00
|
|
|
}
|
2006-03-12 06:28:35 +08:00
|
|
|
|
|
|
|
// Find all predecessors and successors of the group.
|
|
|
|
// Temporarily add N to make code simpler.
|
|
|
|
SU->FlaggedNodes.push_back(MainNode);
|
|
|
|
|
|
|
|
for (unsigned n = 0, e = SU->FlaggedNodes.size(); n != e; ++n) {
|
|
|
|
SDNode *N = SU->FlaggedNodes[n];
|
|
|
|
|
|
|
|
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
|
|
|
|
SDNode *OpN = N->getOperand(i).Val;
|
|
|
|
if (isPassiveNode(OpN)) continue; // Not scheduled.
|
|
|
|
SUnit *OpSU = SUnitMap[OpN];
|
|
|
|
assert(OpSU && "Node has no SUnit!");
|
|
|
|
if (OpSU == SU) continue; // In the same group.
|
2006-05-03 10:10:45 +08:00
|
|
|
|
2006-03-12 06:28:35 +08:00
|
|
|
MVT::ValueType OpVT = N->getOperand(i).getValueType();
|
|
|
|
assert(OpVT != MVT::Flag && "Flagged nodes should be in same sunit!");
|
|
|
|
bool isChain = OpVT == MVT::Other;
|
|
|
|
|
|
|
|
if (SU->Preds.insert(std::make_pair(OpSU, isChain)).second) {
|
|
|
|
if (!isChain) {
|
|
|
|
SU->NumPredsLeft++;
|
|
|
|
} else {
|
|
|
|
SU->NumChainPredsLeft++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (OpSU->Succs.insert(std::make_pair(SU, isChain)).second) {
|
|
|
|
if (!isChain) {
|
|
|
|
OpSU->NumSuccsLeft++;
|
|
|
|
} else {
|
|
|
|
OpSU->NumChainSuccsLeft++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove MainNode from FlaggedNodes again.
|
|
|
|
SU->FlaggedNodes.pop_back();
|
|
|
|
}
|
2006-03-12 17:01:41 +08:00
|
|
|
|
2006-03-12 06:28:35 +08:00
|
|
|
DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
|
|
|
|
SUnits[su].dumpAll(&DAG));
|
2006-05-01 17:14:40 +08:00
|
|
|
return;
|
2006-03-12 06:28:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// EmitSchedule - Emit the machine code in scheduled order.
|
|
|
|
void ScheduleDAGList::EmitSchedule() {
|
|
|
|
std::map<SDNode*, unsigned> VRBaseMap;
|
|
|
|
for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
|
|
|
|
if (SUnit *SU = Sequence[i]) {
|
|
|
|
for (unsigned j = 0, ee = SU->FlaggedNodes.size(); j != ee; j++)
|
|
|
|
EmitNode(SU->FlaggedNodes[j], VRBaseMap);
|
|
|
|
EmitNode(SU->Node, VRBaseMap);
|
|
|
|
} else {
|
|
|
|
// Null SUnit* is a noop.
|
|
|
|
EmitNoop();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// dump - dump the schedule.
|
|
|
|
void ScheduleDAGList::dumpSchedule() const {
|
|
|
|
for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
|
|
|
|
if (SUnit *SU = Sequence[i])
|
|
|
|
SU->dump(&DAG);
|
|
|
|
else
|
|
|
|
std::cerr << "**** NOOP ****\n";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Schedule - Schedule the DAG using list scheduling.
|
|
|
|
void ScheduleDAGList::Schedule() {
|
|
|
|
DEBUG(std::cerr << "********** List Scheduling **********\n");
|
|
|
|
|
|
|
|
// Build scheduling units.
|
|
|
|
BuildSchedUnits();
|
2006-05-09 15:13:34 +08:00
|
|
|
|
2006-03-12 06:44:37 +08:00
|
|
|
AvailableQueue->initNodes(SUnits);
|
2006-03-12 06:28:35 +08:00
|
|
|
|
|
|
|
// Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
|
|
|
|
if (isBottomUp)
|
|
|
|
ListScheduleBottomUp();
|
|
|
|
else
|
|
|
|
ListScheduleTopDown();
|
|
|
|
|
2006-03-12 06:44:37 +08:00
|
|
|
AvailableQueue->releaseState();
|
2006-03-12 06:28:35 +08:00
|
|
|
|
|
|
|
DEBUG(std::cerr << "*** Final schedule ***\n");
|
|
|
|
DEBUG(dumpSchedule());
|
|
|
|
DEBUG(std::cerr << "\n");
|
|
|
|
|
|
|
|
// Emit in scheduled order
|
|
|
|
EmitSchedule();
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Bottom-Up Scheduling
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2006-05-05 03:16:39 +08:00
|
|
|
static const TargetRegisterClass *getRegClass(SUnit *SU,
|
|
|
|
const TargetInstrInfo *TII,
|
|
|
|
const MRegisterInfo *MRI,
|
|
|
|
SSARegMap *RegMap) {
|
|
|
|
if (SU->Node->isTargetOpcode()) {
|
|
|
|
unsigned Opc = SU->Node->getTargetOpcode();
|
|
|
|
const TargetInstrDescriptor &II = TII->get(Opc);
|
|
|
|
return II.OpInfo->RegClass;
|
|
|
|
} else {
|
|
|
|
assert(SU->Node->getOpcode() == ISD::CopyFromReg);
|
|
|
|
unsigned SrcReg = cast<RegisterSDNode>(SU->Node->getOperand(1))->getReg();
|
|
|
|
if (MRegisterInfo::isVirtualRegister(SrcReg))
|
|
|
|
return RegMap->getRegClass(SrcReg);
|
|
|
|
else {
|
|
|
|
for (MRegisterInfo::regclass_iterator I = MRI->regclass_begin(),
|
|
|
|
E = MRI->regclass_end(); I != E; ++I)
|
|
|
|
if ((*I)->hasType(SU->Node->getValueType(0)) &&
|
|
|
|
(*I)->contains(SrcReg))
|
|
|
|
return *I;
|
|
|
|
assert(false && "Couldn't find register class for reg copy!");
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned getNumResults(SUnit *SU) {
|
|
|
|
unsigned NumResults = 0;
|
|
|
|
for (unsigned i = 0, e = SU->Node->getNumValues(); i != e; ++i) {
|
|
|
|
MVT::ValueType VT = SU->Node->getValueType(i);
|
|
|
|
if (VT != MVT::Other && VT != MVT::Flag)
|
|
|
|
NumResults++;
|
|
|
|
}
|
|
|
|
return NumResults;
|
|
|
|
}
|
|
|
|
|
2006-01-26 08:30:29 +08:00
|
|
|
/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
|
|
|
|
/// the Available queue is the count reaches zero. Also update its cycle bound.
|
2006-03-12 06:34:41 +08:00
|
|
|
void ScheduleDAGList::ReleasePred(SUnit *PredSU, bool isChain,
|
2006-03-12 06:44:37 +08:00
|
|
|
unsigned CurCycle) {
|
- Fixed some priority calculation bugs that were causing bug 478. Among them:
a predecessor appearing more than once in the operand list was counted as
multiple predecessor; priority1 should be updated during scheduling;
CycleBound was updated after the node is inserted into priority queue; one
of the tie breaking condition was flipped.
- Take into consideration of two address opcodes. If a predecessor is a def&use
operand, it should have a higher priority.
- Scheduler should also favor floaters, i.e. nodes that do not have real
predecessors such as MOV32ri.
- The scheduling fixes / tweaks fixed bug 478:
.text
.align 4
.globl _f
_f:
movl 4(%esp), %eax
movl 8(%esp), %ecx
movl %eax, %edx
imull %ecx, %edx
imull %eax, %eax
imull %ecx, %ecx
addl %eax, %ecx
leal (%ecx,%edx,2), %eax
ret
It is also a slight performance win (1% - 3%) for most tests.
llvm-svn: 26470
2006-03-03 05:38:29 +08:00
|
|
|
// FIXME: the distance between two nodes is not always == the predecessor's
|
|
|
|
// latency. For example, the reader can very well read the register written
|
|
|
|
// by the predecessor later than the issue cycle. It also depends on the
|
|
|
|
// interrupt model (drain vs. freeze).
|
2006-03-12 06:44:37 +08:00
|
|
|
PredSU->CycleBound = std::max(PredSU->CycleBound, CurCycle + PredSU->Latency);
|
- Fixed some priority calculation bugs that were causing bug 478. Among them:
a predecessor appearing more than once in the operand list was counted as
multiple predecessor; priority1 should be updated during scheduling;
CycleBound was updated after the node is inserted into priority queue; one
of the tie breaking condition was flipped.
- Take into consideration of two address opcodes. If a predecessor is a def&use
operand, it should have a higher priority.
- Scheduler should also favor floaters, i.e. nodes that do not have real
predecessors such as MOV32ri.
- The scheduling fixes / tweaks fixed bug 478:
.text
.align 4
.globl _f
_f:
movl 4(%esp), %eax
movl 8(%esp), %ecx
movl %eax, %edx
imull %ecx, %edx
imull %eax, %eax
imull %ecx, %ecx
addl %eax, %ecx
leal (%ecx,%edx,2), %eax
ret
It is also a slight performance win (1% - 3%) for most tests.
llvm-svn: 26470
2006-03-03 05:38:29 +08:00
|
|
|
|
2006-03-06 14:08:54 +08:00
|
|
|
if (!isChain)
|
- Fixed some priority calculation bugs that were causing bug 478. Among them:
a predecessor appearing more than once in the operand list was counted as
multiple predecessor; priority1 should be updated during scheduling;
CycleBound was updated after the node is inserted into priority queue; one
of the tie breaking condition was flipped.
- Take into consideration of two address opcodes. If a predecessor is a def&use
operand, it should have a higher priority.
- Scheduler should also favor floaters, i.e. nodes that do not have real
predecessors such as MOV32ri.
- The scheduling fixes / tweaks fixed bug 478:
.text
.align 4
.globl _f
_f:
movl 4(%esp), %eax
movl 8(%esp), %ecx
movl %eax, %edx
imull %ecx, %edx
imull %eax, %eax
imull %ecx, %ecx
addl %eax, %ecx
leal (%ecx,%edx,2), %eax
ret
It is also a slight performance win (1% - 3%) for most tests.
llvm-svn: 26470
2006-03-03 05:38:29 +08:00
|
|
|
PredSU->NumSuccsLeft--;
|
2006-03-06 14:08:54 +08:00
|
|
|
else
|
- Fixed some priority calculation bugs that were causing bug 478. Among them:
a predecessor appearing more than once in the operand list was counted as
multiple predecessor; priority1 should be updated during scheduling;
CycleBound was updated after the node is inserted into priority queue; one
of the tie breaking condition was flipped.
- Take into consideration of two address opcodes. If a predecessor is a def&use
operand, it should have a higher priority.
- Scheduler should also favor floaters, i.e. nodes that do not have real
predecessors such as MOV32ri.
- The scheduling fixes / tweaks fixed bug 478:
.text
.align 4
.globl _f
_f:
movl 4(%esp), %eax
movl 8(%esp), %ecx
movl %eax, %edx
imull %ecx, %edx
imull %eax, %eax
imull %ecx, %ecx
addl %eax, %ecx
leal (%ecx,%edx,2), %eax
ret
It is also a slight performance win (1% - 3%) for most tests.
llvm-svn: 26470
2006-03-03 05:38:29 +08:00
|
|
|
PredSU->NumChainSuccsLeft--;
|
2006-03-06 05:10:33 +08:00
|
|
|
|
2006-01-25 17:14:32 +08:00
|
|
|
#ifndef NDEBUG
|
2006-03-06 05:10:33 +08:00
|
|
|
if (PredSU->NumSuccsLeft < 0 || PredSU->NumChainSuccsLeft < 0) {
|
2006-01-25 17:14:32 +08:00
|
|
|
std::cerr << "*** List scheduling failed! ***\n";
|
|
|
|
PredSU->dump(&DAG);
|
|
|
|
std::cerr << " has been released too many times!\n";
|
|
|
|
assert(0);
|
2006-03-06 05:10:33 +08:00
|
|
|
}
|
2006-01-25 17:14:32 +08:00
|
|
|
#endif
|
2006-03-06 05:10:33 +08:00
|
|
|
|
|
|
|
if ((PredSU->NumSuccsLeft + PredSU->NumChainSuccsLeft) == 0) {
|
|
|
|
// EntryToken has to go last! Special case it here.
|
Teach the latency scheduler some new tricks. In particular, to break ties,
keep track of a sense of "mobility", i.e. how many other nodes scheduling one
node will free up. For something like this:
float testadd(float *X, float *Y, float *Z, float *W, float *V) {
return (*X+*Y)*(*Z+*W)+*V;
}
For example, this makes us schedule *X then *Y, not *X then *Z. The former
allows us to issue the add, the later only lets us issue other loads.
This turns the above code from this:
_testadd:
lfs f0, 0(r3)
lfs f1, 0(r6)
lfs f2, 0(r4)
lfs f3, 0(r5)
fadds f0, f0, f2
fadds f1, f3, f1
lfs f2, 0(r7)
fmadds f1, f0, f1, f2
blr
into this:
_testadd:
lfs f0, 0(r6)
lfs f1, 0(r5)
fadds f0, f1, f0
lfs f1, 0(r4)
lfs f2, 0(r3)
fadds f1, f2, f1
lfs f2, 0(r7)
fmadds f1, f1, f0, f2
blr
llvm-svn: 26680
2006-03-10 13:51:05 +08:00
|
|
|
if (PredSU->Node->getOpcode() != ISD::EntryToken) {
|
|
|
|
PredSU->isAvailable = true;
|
2006-03-12 06:44:37 +08:00
|
|
|
AvailableQueue->push(PredSU);
|
Teach the latency scheduler some new tricks. In particular, to break ties,
keep track of a sense of "mobility", i.e. how many other nodes scheduling one
node will free up. For something like this:
float testadd(float *X, float *Y, float *Z, float *W, float *V) {
return (*X+*Y)*(*Z+*W)+*V;
}
For example, this makes us schedule *X then *Y, not *X then *Z. The former
allows us to issue the add, the later only lets us issue other loads.
This turns the above code from this:
_testadd:
lfs f0, 0(r3)
lfs f1, 0(r6)
lfs f2, 0(r4)
lfs f3, 0(r5)
fadds f0, f0, f2
fadds f1, f3, f1
lfs f2, 0(r7)
fmadds f1, f0, f1, f2
blr
into this:
_testadd:
lfs f0, 0(r6)
lfs f1, 0(r5)
fadds f0, f1, f0
lfs f1, 0(r4)
lfs f2, 0(r3)
fadds f1, f2, f1
lfs f2, 0(r7)
fmadds f1, f1, f0, f2
blr
llvm-svn: 26680
2006-03-10 13:51:05 +08:00
|
|
|
}
|
2006-01-25 17:14:32 +08:00
|
|
|
}
|
2006-05-05 03:16:39 +08:00
|
|
|
|
|
|
|
if (getNumResults(PredSU) > 0) {
|
|
|
|
const TargetRegisterClass *RegClass = getRegClass(PredSU, TII, MRI, RegMap);
|
|
|
|
OpenNodes[RegClass].insert(PredSU);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// SharesOperandWithTwoAddr - Check if there is a unscheduled two-address node
|
|
|
|
/// with which SU shares an operand. If so, returns the node.
|
|
|
|
static SUnit *SharesOperandWithTwoAddr(SUnit *SU) {
|
|
|
|
assert(!SU->isTwoAddress && "Node cannot be two-address op");
|
|
|
|
for (std::set<std::pair<SUnit*, bool> >::iterator I = SU->Preds.begin(),
|
|
|
|
E = SU->Preds.end(); I != E; ++I) {
|
|
|
|
if (I->second) continue;
|
|
|
|
SUnit *PredSU = I->first;
|
|
|
|
for (std::set<std::pair<SUnit*, bool> >::iterator II =
|
|
|
|
PredSU->Succs.begin(), EE = PredSU->Succs.end(); II != EE; ++II) {
|
|
|
|
if (II->second) continue;
|
|
|
|
SUnit *SSU = II->first;
|
|
|
|
if (SSU->isTwoAddress && !SSU->isScheduled) {
|
|
|
|
return SSU;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
2006-01-25 17:14:32 +08:00
|
|
|
}
|
2006-05-05 03:16:39 +08:00
|
|
|
|
|
|
|
static bool isFloater(const SUnit *SU) {
|
|
|
|
unsigned Opc = SU->Node->getOpcode();
|
|
|
|
return (Opc != ISD::CopyFromReg && SU->NumPredsLeft == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool isSimpleFloaterUse(const SUnit *SU) {
|
|
|
|
unsigned NumOps = 0;
|
2006-05-05 09:47:05 +08:00
|
|
|
for (std::set<std::pair<SUnit*, bool> >::const_iterator I = SU->Preds.begin(),
|
2006-05-05 03:16:39 +08:00
|
|
|
E = SU->Preds.end(); I != E; ++I) {
|
|
|
|
if (I->second) continue;
|
|
|
|
if (++NumOps > 1)
|
|
|
|
return false;
|
|
|
|
if (!isFloater(I->first))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// ScheduleVertically - Schedule vertically. That is, follow up the D&U chain
|
|
|
|
/// (of two-address code) and schedule floaters aggressively.
|
|
|
|
void ScheduleDAGList::ScheduleVertically(SUnit *SU, unsigned& CurCycle) {
|
|
|
|
// Try scheduling Def&Use operand if register pressure is low.
|
|
|
|
const TargetRegisterClass *RegClass = getRegClass(SU, TII, MRI, RegMap);
|
|
|
|
unsigned Pressure = OpenNodes[RegClass].size();
|
|
|
|
unsigned Limit = RegPressureLimits[RegClass];
|
|
|
|
|
|
|
|
// See if we can schedule any predecessor that takes no registers.
|
|
|
|
for (std::set<std::pair<SUnit*, bool> >::iterator I = SU->Preds.begin(),
|
|
|
|
E = SU->Preds.end(); I != E; ++I) {
|
|
|
|
if (I->second) continue;
|
|
|
|
|
|
|
|
SUnit *PredSU = I->first;
|
|
|
|
if (!PredSU->isAvailable || PredSU->isScheduled)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (isFloater(PredSU)) {
|
|
|
|
DEBUG(std::cerr<<"*** Scheduling floater\n");
|
|
|
|
AvailableQueue->RemoveFromPriorityQueue(PredSU);
|
|
|
|
ScheduleNodeBottomUp(PredSU, CurCycle, false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
SUnit *DUSU = NULL;
|
|
|
|
if (SU->isTwoAddress && Pressure < Limit) {
|
|
|
|
DUSU = SUnitMap[SU->Node->getOperand(0).Val];
|
|
|
|
if (!DUSU->isAvailable || DUSU->isScheduled)
|
|
|
|
DUSU = NULL;
|
|
|
|
else if (!DUSU->isTwoAddress) {
|
|
|
|
SUnit *SSU = SharesOperandWithTwoAddr(DUSU);
|
|
|
|
if (SSU && SSU->isAvailable) {
|
|
|
|
AvailableQueue->RemoveFromPriorityQueue(SSU);
|
|
|
|
ScheduleNodeBottomUp(SSU, CurCycle, false);
|
|
|
|
Pressure = OpenNodes[RegClass].size();
|
|
|
|
if (Pressure >= Limit)
|
|
|
|
DUSU = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (DUSU) {
|
|
|
|
DEBUG(std::cerr<<"*** Low register pressure: scheduling D&U operand\n");
|
|
|
|
AvailableQueue->RemoveFromPriorityQueue(DUSU);
|
|
|
|
ScheduleNodeBottomUp(DUSU, CurCycle, false);
|
|
|
|
Pressure = OpenNodes[RegClass].size();
|
|
|
|
ScheduleVertically(DUSU, CurCycle);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-03-06 05:10:33 +08:00
|
|
|
/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
|
|
|
|
/// count of its predecessors. If a predecessor pending count is zero, add it to
|
|
|
|
/// the Available queue.
|
2006-05-05 03:16:39 +08:00
|
|
|
void ScheduleDAGList::ScheduleNodeBottomUp(SUnit *SU, unsigned& CurCycle,
|
|
|
|
bool Vertical) {
|
As a pending queue data structure to keep track of instructions whose
operands have all issued, but whose results are not yet available. This
allows us to compile:
int G;
int test(int A, int B, int* P) {
return (G+A)*(B+1);
}
to:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
addi r4, r4, 1
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
instead of this, which has a stall between the lis/lwz:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
addi r4, r4, 1
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
llvm-svn: 26716
2006-03-12 08:38:57 +08:00
|
|
|
DEBUG(std::cerr << "*** Scheduling [" << CurCycle << "]: ");
|
2006-03-09 15:15:18 +08:00
|
|
|
DEBUG(SU->dump(&DAG));
|
2006-03-12 06:44:37 +08:00
|
|
|
SU->Cycle = CurCycle;
|
2006-03-03 14:23:43 +08:00
|
|
|
|
2006-05-03 10:10:45 +08:00
|
|
|
AvailableQueue->ScheduledNode(SU);
|
2006-01-25 17:14:32 +08:00
|
|
|
Sequence.push_back(SU);
|
|
|
|
|
|
|
|
// Bottom up: release predecessors
|
2006-03-12 06:24:20 +08:00
|
|
|
for (std::set<std::pair<SUnit*, bool> >::iterator I = SU->Preds.begin(),
|
2006-05-05 03:16:39 +08:00
|
|
|
E = SU->Preds.end(); I != E; ++I)
|
2006-03-12 06:44:37 +08:00
|
|
|
ReleasePred(I->first, I->second, CurCycle);
|
2006-05-05 03:16:39 +08:00
|
|
|
SU->isScheduled = true;
|
|
|
|
CurCycle++;
|
|
|
|
|
|
|
|
if (getNumResults(SU) != 0) {
|
|
|
|
const TargetRegisterClass *RegClass = getRegClass(SU, TII, MRI, RegMap);
|
|
|
|
OpenNodes[RegClass].erase(SU);
|
|
|
|
|
|
|
|
if (SchedVertically && Vertical)
|
|
|
|
ScheduleVertically(SU, CurCycle);
|
- Fixed some priority calculation bugs that were causing bug 478. Among them:
a predecessor appearing more than once in the operand list was counted as
multiple predecessor; priority1 should be updated during scheduling;
CycleBound was updated after the node is inserted into priority queue; one
of the tie breaking condition was flipped.
- Take into consideration of two address opcodes. If a predecessor is a def&use
operand, it should have a higher priority.
- Scheduler should also favor floaters, i.e. nodes that do not have real
predecessors such as MOV32ri.
- The scheduling fixes / tweaks fixed bug 478:
.text
.align 4
.globl _f
_f:
movl 4(%esp), %eax
movl 8(%esp), %ecx
movl %eax, %edx
imull %ecx, %edx
imull %eax, %eax
imull %ecx, %ecx
addl %eax, %ecx
leal (%ecx,%edx,2), %eax
ret
It is also a slight performance win (1% - 3%) for most tests.
llvm-svn: 26470
2006-03-03 05:38:29 +08:00
|
|
|
}
|
2006-01-25 17:14:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// isReady - True if node's lower cycle bound is less or equal to the current
|
|
|
|
/// scheduling cycle. Always true if all nodes have uniform latency 1.
|
2006-05-05 03:16:39 +08:00
|
|
|
static inline bool isReady(SUnit *SU, unsigned CurCycle) {
|
|
|
|
return SU->CycleBound <= CurCycle;
|
2006-01-25 17:14:32 +08:00
|
|
|
}
|
|
|
|
|
2006-03-06 05:10:33 +08:00
|
|
|
/// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
|
|
|
|
/// schedulers.
|
2006-03-09 14:48:37 +08:00
|
|
|
void ScheduleDAGList::ListScheduleBottomUp() {
|
2006-05-05 03:16:39 +08:00
|
|
|
// Determine rough register pressure limit.
|
|
|
|
for (MRegisterInfo::regclass_iterator RCI = MRI->regclass_begin(),
|
|
|
|
E = MRI->regclass_end(); RCI != E; ++RCI) {
|
|
|
|
const TargetRegisterClass *RC = *RCI;
|
|
|
|
unsigned Limit = RC->getNumRegs();
|
|
|
|
Limit = (Limit > 2) ? Limit - 2 : 0;
|
|
|
|
std::map<const TargetRegisterClass*, unsigned>::iterator RPI =
|
|
|
|
RegPressureLimits.find(RC);
|
|
|
|
if (RPI == RegPressureLimits.end())
|
|
|
|
RegPressureLimits[RC] = Limit;
|
|
|
|
else {
|
|
|
|
unsigned &OldLimit = RegPressureLimits[RC];
|
|
|
|
if (Limit < OldLimit)
|
|
|
|
OldLimit = Limit;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned CurCycle = 0;
|
2006-03-06 04:21:55 +08:00
|
|
|
// Add root to Available queue.
|
2006-03-12 06:44:37 +08:00
|
|
|
AvailableQueue->push(SUnitMap[DAG.getRoot().Val]);
|
2006-01-25 17:14:32 +08:00
|
|
|
|
|
|
|
// While Available queue is not empty, grab the node with the highest
|
|
|
|
// priority. If it is not ready put it back. Schedule the node.
|
|
|
|
std::vector<SUnit*> NotReady;
|
2006-05-05 03:16:39 +08:00
|
|
|
SUnit *CurNode = NULL;
|
2006-03-12 06:44:37 +08:00
|
|
|
while (!AvailableQueue->empty()) {
|
2006-05-05 03:16:39 +08:00
|
|
|
SUnit *CurNode = AvailableQueue->pop();
|
|
|
|
while (!isReady(CurNode, CurCycle)) {
|
|
|
|
NotReady.push_back(CurNode);
|
|
|
|
CurNode = AvailableQueue->pop();
|
2006-01-25 17:14:32 +08:00
|
|
|
}
|
2006-03-06 05:10:33 +08:00
|
|
|
|
|
|
|
// Add the nodes that aren't ready back onto the available list.
|
2006-03-12 06:44:37 +08:00
|
|
|
AvailableQueue->push_all(NotReady);
|
2006-03-10 12:32:49 +08:00
|
|
|
NotReady.clear();
|
2006-01-25 17:14:32 +08:00
|
|
|
|
2006-05-05 03:16:39 +08:00
|
|
|
ScheduleNodeBottomUp(CurNode, CurCycle);
|
2006-01-25 17:14:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Add entry node last
|
|
|
|
if (DAG.getEntryNode().Val != DAG.getRoot().Val) {
|
|
|
|
SUnit *Entry = SUnitMap[DAG.getEntryNode().Val];
|
|
|
|
Sequence.push_back(Entry);
|
|
|
|
}
|
|
|
|
|
2006-03-06 05:10:33 +08:00
|
|
|
// Reverse the order if it is bottom up.
|
|
|
|
std::reverse(Sequence.begin(), Sequence.end());
|
|
|
|
|
|
|
|
|
2006-01-25 17:14:32 +08:00
|
|
|
#ifndef NDEBUG
|
2006-03-06 05:10:33 +08:00
|
|
|
// Verify that all SUnits were scheduled.
|
2006-01-26 08:30:29 +08:00
|
|
|
bool AnyNotSched = false;
|
2006-03-08 12:54:34 +08:00
|
|
|
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
|
|
|
|
if (SUnits[i].NumSuccsLeft != 0 || SUnits[i].NumChainSuccsLeft != 0) {
|
2006-01-26 08:30:29 +08:00
|
|
|
if (!AnyNotSched)
|
|
|
|
std::cerr << "*** List scheduling failed! ***\n";
|
2006-03-08 12:54:34 +08:00
|
|
|
SUnits[i].dump(&DAG);
|
2006-01-26 08:30:29 +08:00
|
|
|
std::cerr << "has not been scheduled!\n";
|
|
|
|
AnyNotSched = true;
|
2006-01-25 17:14:32 +08:00
|
|
|
}
|
|
|
|
}
|
2006-01-26 08:30:29 +08:00
|
|
|
assert(!AnyNotSched);
|
2006-01-26 05:49:13 +08:00
|
|
|
#endif
|
2006-03-06 05:10:33 +08:00
|
|
|
}
|
2006-01-25 17:14:32 +08:00
|
|
|
|
2006-03-12 06:28:35 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Top-Down Scheduling
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
|
As a pending queue data structure to keep track of instructions whose
operands have all issued, but whose results are not yet available. This
allows us to compile:
int G;
int test(int A, int B, int* P) {
return (G+A)*(B+1);
}
to:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
addi r4, r4, 1
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
instead of this, which has a stall between the lis/lwz:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
addi r4, r4, 1
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
llvm-svn: 26716
2006-03-12 08:38:57 +08:00
|
|
|
/// the PendingQueue if the count reaches zero.
|
|
|
|
void ScheduleDAGList::ReleaseSucc(SUnit *SuccSU, bool isChain) {
|
2006-03-12 06:28:35 +08:00
|
|
|
if (!isChain)
|
|
|
|
SuccSU->NumPredsLeft--;
|
|
|
|
else
|
|
|
|
SuccSU->NumChainPredsLeft--;
|
|
|
|
|
As a pending queue data structure to keep track of instructions whose
operands have all issued, but whose results are not yet available. This
allows us to compile:
int G;
int test(int A, int B, int* P) {
return (G+A)*(B+1);
}
to:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
addi r4, r4, 1
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
instead of this, which has a stall between the lis/lwz:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
addi r4, r4, 1
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
llvm-svn: 26716
2006-03-12 08:38:57 +08:00
|
|
|
assert(SuccSU->NumPredsLeft >= 0 && SuccSU->NumChainPredsLeft >= 0 &&
|
|
|
|
"List scheduling internal error");
|
2006-03-12 06:28:35 +08:00
|
|
|
|
|
|
|
if ((SuccSU->NumPredsLeft + SuccSU->NumChainPredsLeft) == 0) {
|
As a pending queue data structure to keep track of instructions whose
operands have all issued, but whose results are not yet available. This
allows us to compile:
int G;
int test(int A, int B, int* P) {
return (G+A)*(B+1);
}
to:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
addi r4, r4, 1
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
instead of this, which has a stall between the lis/lwz:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
addi r4, r4, 1
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
llvm-svn: 26716
2006-03-12 08:38:57 +08:00
|
|
|
// Compute how many cycles it will be before this actually becomes
|
|
|
|
// available. This is the max of the start time of all predecessors plus
|
|
|
|
// their latencies.
|
|
|
|
unsigned AvailableCycle = 0;
|
|
|
|
for (std::set<std::pair<SUnit*, bool> >::iterator I = SuccSU->Preds.begin(),
|
|
|
|
E = SuccSU->Preds.end(); I != E; ++I) {
|
2006-03-12 17:01:41 +08:00
|
|
|
// If this is a token edge, we don't need to wait for the latency of the
|
|
|
|
// preceeding instruction (e.g. a long-latency load) unless there is also
|
|
|
|
// some other data dependence.
|
2006-03-12 11:52:09 +08:00
|
|
|
unsigned PredDoneCycle = I->first->Cycle;
|
|
|
|
if (!I->second)
|
|
|
|
PredDoneCycle += I->first->Latency;
|
2006-03-12 17:01:41 +08:00
|
|
|
else if (I->first->Latency)
|
|
|
|
PredDoneCycle += 1;
|
2006-03-12 11:52:09 +08:00
|
|
|
|
|
|
|
AvailableCycle = std::max(AvailableCycle, PredDoneCycle);
|
As a pending queue data structure to keep track of instructions whose
operands have all issued, but whose results are not yet available. This
allows us to compile:
int G;
int test(int A, int B, int* P) {
return (G+A)*(B+1);
}
to:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
addi r4, r4, 1
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
instead of this, which has a stall between the lis/lwz:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
addi r4, r4, 1
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
llvm-svn: 26716
2006-03-12 08:38:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
PendingQueue.push_back(std::make_pair(AvailableCycle, SuccSU));
|
2006-03-12 06:28:35 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
|
|
|
|
/// count of its successors. If a successor pending count is zero, add it to
|
|
|
|
/// the Available queue.
|
2006-03-12 06:44:37 +08:00
|
|
|
void ScheduleDAGList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
|
As a pending queue data structure to keep track of instructions whose
operands have all issued, but whose results are not yet available. This
allows us to compile:
int G;
int test(int A, int B, int* P) {
return (G+A)*(B+1);
}
to:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
addi r4, r4, 1
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
instead of this, which has a stall between the lis/lwz:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
addi r4, r4, 1
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
llvm-svn: 26716
2006-03-12 08:38:57 +08:00
|
|
|
DEBUG(std::cerr << "*** Scheduling [" << CurCycle << "]: ");
|
2006-03-12 06:28:35 +08:00
|
|
|
DEBUG(SU->dump(&DAG));
|
|
|
|
|
|
|
|
Sequence.push_back(SU);
|
2006-03-12 06:44:37 +08:00
|
|
|
SU->Cycle = CurCycle;
|
2006-03-12 06:28:35 +08:00
|
|
|
|
|
|
|
// Bottom up: release successors.
|
|
|
|
for (std::set<std::pair<SUnit*, bool> >::iterator I = SU->Succs.begin(),
|
2006-03-12 06:44:37 +08:00
|
|
|
E = SU->Succs.end(); I != E; ++I)
|
As a pending queue data structure to keep track of instructions whose
operands have all issued, but whose results are not yet available. This
allows us to compile:
int G;
int test(int A, int B, int* P) {
return (G+A)*(B+1);
}
to:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
addi r4, r4, 1
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
instead of this, which has a stall between the lis/lwz:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
addi r4, r4, 1
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
llvm-svn: 26716
2006-03-12 08:38:57 +08:00
|
|
|
ReleaseSucc(I->first, I->second);
|
2006-03-12 06:28:35 +08:00
|
|
|
}
|
|
|
|
|
2006-03-06 05:10:33 +08:00
|
|
|
/// ListScheduleTopDown - The main loop of list scheduling for top-down
|
|
|
|
/// schedulers.
|
2006-03-09 14:48:37 +08:00
|
|
|
void ScheduleDAGList::ListScheduleTopDown() {
|
As a pending queue data structure to keep track of instructions whose
operands have all issued, but whose results are not yet available. This
allows us to compile:
int G;
int test(int A, int B, int* P) {
return (G+A)*(B+1);
}
to:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
addi r4, r4, 1
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
instead of this, which has a stall between the lis/lwz:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
addi r4, r4, 1
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
llvm-svn: 26716
2006-03-12 08:38:57 +08:00
|
|
|
unsigned CurCycle = 0;
|
2006-03-06 05:10:33 +08:00
|
|
|
SUnit *Entry = SUnitMap[DAG.getEntryNode().Val];
|
As a pending queue data structure to keep track of instructions whose
operands have all issued, but whose results are not yet available. This
allows us to compile:
int G;
int test(int A, int B, int* P) {
return (G+A)*(B+1);
}
to:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
addi r4, r4, 1
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
instead of this, which has a stall between the lis/lwz:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
addi r4, r4, 1
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
llvm-svn: 26716
2006-03-12 08:38:57 +08:00
|
|
|
|
2006-03-06 05:10:33 +08:00
|
|
|
// All leaves to Available queue.
|
2006-03-08 12:54:34 +08:00
|
|
|
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
|
2006-03-06 05:10:33 +08:00
|
|
|
// It is available if it has no predecessors.
|
As a pending queue data structure to keep track of instructions whose
operands have all issued, but whose results are not yet available. This
allows us to compile:
int G;
int test(int A, int B, int* P) {
return (G+A)*(B+1);
}
to:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
addi r4, r4, 1
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
instead of this, which has a stall between the lis/lwz:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
addi r4, r4, 1
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
llvm-svn: 26716
2006-03-12 08:38:57 +08:00
|
|
|
if (SUnits[i].Preds.size() == 0 && &SUnits[i] != Entry) {
|
2006-03-12 06:44:37 +08:00
|
|
|
AvailableQueue->push(&SUnits[i]);
|
As a pending queue data structure to keep track of instructions whose
operands have all issued, but whose results are not yet available. This
allows us to compile:
int G;
int test(int A, int B, int* P) {
return (G+A)*(B+1);
}
to:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
addi r4, r4, 1
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
instead of this, which has a stall between the lis/lwz:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
addi r4, r4, 1
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
llvm-svn: 26716
2006-03-12 08:38:57 +08:00
|
|
|
SUnits[i].isAvailable = SUnits[i].isPending = true;
|
|
|
|
}
|
2006-03-06 05:10:33 +08:00
|
|
|
}
|
|
|
|
|
As a pending queue data structure to keep track of instructions whose
operands have all issued, but whose results are not yet available. This
allows us to compile:
int G;
int test(int A, int B, int* P) {
return (G+A)*(B+1);
}
to:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
addi r4, r4, 1
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
instead of this, which has a stall between the lis/lwz:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
addi r4, r4, 1
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
llvm-svn: 26716
2006-03-12 08:38:57 +08:00
|
|
|
// Emit the entry node first.
|
|
|
|
ScheduleNodeTopDown(Entry, CurCycle);
|
|
|
|
HazardRec->EmitInstruction(Entry->Node);
|
|
|
|
|
2006-03-06 05:10:33 +08:00
|
|
|
// While Available queue is not empty, grab the node with the highest
|
|
|
|
// priority. If it is not ready put it back. Schedule the node.
|
|
|
|
std::vector<SUnit*> NotReady;
|
As a pending queue data structure to keep track of instructions whose
operands have all issued, but whose results are not yet available. This
allows us to compile:
int G;
int test(int A, int B, int* P) {
return (G+A)*(B+1);
}
to:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
addi r4, r4, 1
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
instead of this, which has a stall between the lis/lwz:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
addi r4, r4, 1
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
llvm-svn: 26716
2006-03-12 08:38:57 +08:00
|
|
|
while (!AvailableQueue->empty() || !PendingQueue.empty()) {
|
|
|
|
// Check to see if any of the pending instructions are ready to issue. If
|
|
|
|
// so, add them to the available queue.
|
2006-03-12 17:01:41 +08:00
|
|
|
for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
|
As a pending queue data structure to keep track of instructions whose
operands have all issued, but whose results are not yet available. This
allows us to compile:
int G;
int test(int A, int B, int* P) {
return (G+A)*(B+1);
}
to:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
addi r4, r4, 1
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
instead of this, which has a stall between the lis/lwz:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
addi r4, r4, 1
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
llvm-svn: 26716
2006-03-12 08:38:57 +08:00
|
|
|
if (PendingQueue[i].first == CurCycle) {
|
|
|
|
AvailableQueue->push(PendingQueue[i].second);
|
|
|
|
PendingQueue[i].second->isAvailable = true;
|
|
|
|
PendingQueue[i] = PendingQueue.back();
|
|
|
|
PendingQueue.pop_back();
|
|
|
|
--i; --e;
|
|
|
|
} else {
|
|
|
|
assert(PendingQueue[i].first > CurCycle && "Negative latency?");
|
|
|
|
}
|
2006-03-12 17:01:41 +08:00
|
|
|
}
|
As a pending queue data structure to keep track of instructions whose
operands have all issued, but whose results are not yet available. This
allows us to compile:
int G;
int test(int A, int B, int* P) {
return (G+A)*(B+1);
}
to:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
addi r4, r4, 1
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
instead of this, which has a stall between the lis/lwz:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
addi r4, r4, 1
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
llvm-svn: 26716
2006-03-12 08:38:57 +08:00
|
|
|
|
2006-03-12 17:01:41 +08:00
|
|
|
// If there are no instructions available, don't try to issue anything, and
|
|
|
|
// don't advance the hazard recognizer.
|
|
|
|
if (AvailableQueue->empty()) {
|
|
|
|
++CurCycle;
|
|
|
|
continue;
|
|
|
|
}
|
2006-01-25 17:14:32 +08:00
|
|
|
|
2006-03-12 17:01:41 +08:00
|
|
|
SUnit *FoundSUnit = 0;
|
|
|
|
SDNode *FoundNode = 0;
|
|
|
|
|
2006-03-06 06:45:01 +08:00
|
|
|
bool HasNoopHazards = false;
|
As a pending queue data structure to keep track of instructions whose
operands have all issued, but whose results are not yet available. This
allows us to compile:
int G;
int test(int A, int B, int* P) {
return (G+A)*(B+1);
}
to:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
addi r4, r4, 1
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
instead of this, which has a stall between the lis/lwz:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
addi r4, r4, 1
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
llvm-svn: 26716
2006-03-12 08:38:57 +08:00
|
|
|
while (!AvailableQueue->empty()) {
|
2006-03-12 17:01:41 +08:00
|
|
|
SUnit *CurSUnit = AvailableQueue->pop();
|
2006-03-07 13:40:43 +08:00
|
|
|
|
|
|
|
// Get the node represented by this SUnit.
|
2006-03-12 17:01:41 +08:00
|
|
|
FoundNode = CurSUnit->Node;
|
|
|
|
|
2006-03-07 13:40:43 +08:00
|
|
|
// If this is a pseudo op, like copyfromreg, look to see if there is a
|
|
|
|
// real target node flagged to it. If so, use the target node.
|
2006-03-12 17:01:41 +08:00
|
|
|
for (unsigned i = 0, e = CurSUnit->FlaggedNodes.size();
|
|
|
|
FoundNode->getOpcode() < ISD::BUILTIN_OP_END && i != e; ++i)
|
|
|
|
FoundNode = CurSUnit->FlaggedNodes[i];
|
2006-03-07 13:40:43 +08:00
|
|
|
|
2006-03-12 17:01:41 +08:00
|
|
|
HazardRecognizer::HazardType HT = HazardRec->getHazardType(FoundNode);
|
2006-03-06 06:45:01 +08:00
|
|
|
if (HT == HazardRecognizer::NoHazard) {
|
2006-03-12 17:01:41 +08:00
|
|
|
FoundSUnit = CurSUnit;
|
2006-03-06 06:45:01 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remember if this is a noop hazard.
|
|
|
|
HasNoopHazards |= HT == HazardRecognizer::NoopHazard;
|
|
|
|
|
2006-03-12 17:01:41 +08:00
|
|
|
NotReady.push_back(CurSUnit);
|
As a pending queue data structure to keep track of instructions whose
operands have all issued, but whose results are not yet available. This
allows us to compile:
int G;
int test(int A, int B, int* P) {
return (G+A)*(B+1);
}
to:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
addi r4, r4, 1
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
instead of this, which has a stall between the lis/lwz:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
addi r4, r4, 1
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
llvm-svn: 26716
2006-03-12 08:38:57 +08:00
|
|
|
}
|
2006-03-06 06:45:01 +08:00
|
|
|
|
2006-03-06 05:10:33 +08:00
|
|
|
// Add the nodes that aren't ready back onto the available list.
|
2006-03-12 17:01:41 +08:00
|
|
|
if (!NotReady.empty()) {
|
|
|
|
AvailableQueue->push_all(NotReady);
|
|
|
|
NotReady.clear();
|
|
|
|
}
|
2006-03-06 06:45:01 +08:00
|
|
|
|
|
|
|
// If we found a node to schedule, do it now.
|
2006-03-12 17:01:41 +08:00
|
|
|
if (FoundSUnit) {
|
|
|
|
ScheduleNodeTopDown(FoundSUnit, CurCycle);
|
|
|
|
HazardRec->EmitInstruction(FoundNode);
|
|
|
|
FoundSUnit->isScheduled = true;
|
|
|
|
AvailableQueue->ScheduledNode(FoundSUnit);
|
As a pending queue data structure to keep track of instructions whose
operands have all issued, but whose results are not yet available. This
allows us to compile:
int G;
int test(int A, int B, int* P) {
return (G+A)*(B+1);
}
to:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
addi r4, r4, 1
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
instead of this, which has a stall between the lis/lwz:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
addi r4, r4, 1
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
llvm-svn: 26716
2006-03-12 08:38:57 +08:00
|
|
|
|
|
|
|
// If this is a pseudo-op node, we don't want to increment the current
|
|
|
|
// cycle.
|
2006-03-12 17:01:41 +08:00
|
|
|
if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
|
|
|
|
++CurCycle;
|
2006-03-06 06:45:01 +08:00
|
|
|
} else if (!HasNoopHazards) {
|
|
|
|
// Otherwise, we have a pipeline stall, but no other problem, just advance
|
|
|
|
// the current cycle and try again.
|
2006-03-07 13:40:43 +08:00
|
|
|
DEBUG(std::cerr << "*** Advancing cycle, no work to do\n");
|
2006-03-08 12:25:59 +08:00
|
|
|
HazardRec->AdvanceCycle();
|
2006-03-06 07:13:56 +08:00
|
|
|
++NumStalls;
|
2006-03-12 17:01:41 +08:00
|
|
|
++CurCycle;
|
2006-03-06 06:45:01 +08:00
|
|
|
} else {
|
|
|
|
// Otherwise, we have no instructions to issue and we have instructions
|
|
|
|
// that will fault if we don't do this right. This is the case for
|
|
|
|
// processors without pipeline interlocks and other cases.
|
2006-03-07 13:40:43 +08:00
|
|
|
DEBUG(std::cerr << "*** Emitting noop\n");
|
2006-03-08 12:25:59 +08:00
|
|
|
HazardRec->EmitNoop();
|
2006-03-06 07:59:20 +08:00
|
|
|
Sequence.push_back(0); // NULL SUnit* -> noop
|
2006-03-06 07:13:56 +08:00
|
|
|
++NumNoops;
|
2006-03-12 17:01:41 +08:00
|
|
|
++CurCycle;
|
2006-03-06 06:45:01 +08:00
|
|
|
}
|
2006-03-06 05:10:33 +08:00
|
|
|
}
|
2006-01-23 16:26:10 +08:00
|
|
|
|
2006-03-06 05:10:33 +08:00
|
|
|
#ifndef NDEBUG
|
|
|
|
// Verify that all SUnits were scheduled.
|
|
|
|
bool AnyNotSched = false;
|
2006-03-08 12:54:34 +08:00
|
|
|
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
|
|
|
|
if (SUnits[i].NumPredsLeft != 0 || SUnits[i].NumChainPredsLeft != 0) {
|
2006-03-06 05:10:33 +08:00
|
|
|
if (!AnyNotSched)
|
|
|
|
std::cerr << "*** List scheduling failed! ***\n";
|
2006-03-08 12:54:34 +08:00
|
|
|
SUnits[i].dump(&DAG);
|
2006-03-06 05:10:33 +08:00
|
|
|
std::cerr << "has not been scheduled!\n";
|
|
|
|
AnyNotSched = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert(!AnyNotSched);
|
|
|
|
#endif
|
2006-01-25 17:14:32 +08:00
|
|
|
}
|
|
|
|
|
2006-03-09 14:35:14 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// RegReductionPriorityQueue Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
|
|
|
|
// to reduce register pressure.
|
|
|
|
//
|
|
|
|
namespace {
|
2006-05-10 14:16:44 +08:00
|
|
|
template<class SF>
|
2006-03-09 14:35:14 +08:00
|
|
|
class RegReductionPriorityQueue;
|
|
|
|
|
|
|
|
/// Sorting functions for the Available queue.
|
|
|
|
struct ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
|
2006-05-10 14:16:44 +08:00
|
|
|
RegReductionPriorityQueue<ls_rr_sort> *SPQ;
|
|
|
|
ls_rr_sort(RegReductionPriorityQueue<ls_rr_sort> *spq) : SPQ(spq) {}
|
2006-03-09 14:35:14 +08:00
|
|
|
ls_rr_sort(const ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
|
|
|
|
|
|
|
|
bool operator()(const SUnit* left, const SUnit* right) const;
|
|
|
|
};
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
|
|
|
namespace {
|
2006-05-10 14:16:44 +08:00
|
|
|
template<class SF>
|
2006-03-09 14:35:14 +08:00
|
|
|
class RegReductionPriorityQueue : public SchedulingPriorityQueue {
|
|
|
|
// SUnits - The SUnits for the current graph.
|
|
|
|
const std::vector<SUnit> *SUnits;
|
|
|
|
|
|
|
|
// SethiUllmanNumbers - The SethiUllman number for each node.
|
2006-05-03 10:10:45 +08:00
|
|
|
std::vector<int> SethiUllmanNumbers;
|
2006-03-09 14:35:14 +08:00
|
|
|
|
2006-05-10 14:16:44 +08:00
|
|
|
std::priority_queue<SUnit*, std::vector<SUnit*>, SF> Queue;
|
2006-03-09 14:35:14 +08:00
|
|
|
public:
|
2006-05-03 10:10:45 +08:00
|
|
|
RegReductionPriorityQueue() :
|
|
|
|
Queue(ls_rr_sort(this)) {}
|
2006-03-09 14:35:14 +08:00
|
|
|
|
|
|
|
void initNodes(const std::vector<SUnit> &sunits) {
|
|
|
|
SUnits = &sunits;
|
2006-05-09 15:13:34 +08:00
|
|
|
// Add pseudo dependency edges for two-address nodes.
|
|
|
|
if (SchedLowerDefNUse)
|
|
|
|
AddPseudoTwoAddrDeps();
|
2006-03-09 14:35:14 +08:00
|
|
|
// Calculate node priorities.
|
|
|
|
CalculatePriorities();
|
|
|
|
}
|
|
|
|
void releaseState() {
|
|
|
|
SUnits = 0;
|
|
|
|
SethiUllmanNumbers.clear();
|
|
|
|
}
|
|
|
|
|
2006-05-03 10:10:45 +08:00
|
|
|
int getSethiUllmanNumber(unsigned NodeNum) const {
|
2006-03-09 14:35:14 +08:00
|
|
|
assert(NodeNum < SethiUllmanNumbers.size());
|
|
|
|
return SethiUllmanNumbers[NodeNum];
|
|
|
|
}
|
|
|
|
|
|
|
|
bool empty() const { return Queue.empty(); }
|
|
|
|
|
|
|
|
void push(SUnit *U) {
|
|
|
|
Queue.push(U);
|
|
|
|
}
|
2006-03-10 12:32:49 +08:00
|
|
|
void push_all(const std::vector<SUnit *> &Nodes) {
|
|
|
|
for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
|
|
|
|
Queue.push(Nodes[i]);
|
|
|
|
}
|
|
|
|
|
2006-03-09 14:35:14 +08:00
|
|
|
SUnit *pop() {
|
|
|
|
SUnit *V = Queue.top();
|
|
|
|
Queue.pop();
|
|
|
|
return V;
|
|
|
|
}
|
2006-05-03 10:10:45 +08:00
|
|
|
|
2006-05-05 03:16:39 +08:00
|
|
|
/// RemoveFromPriorityQueue - This is a really inefficient way to remove a
|
|
|
|
/// node from a priority queue. We should roll our own heap to make this
|
|
|
|
/// better or something.
|
|
|
|
void RemoveFromPriorityQueue(SUnit *SU) {
|
|
|
|
std::vector<SUnit*> Temp;
|
|
|
|
|
|
|
|
assert(!Queue.empty() && "Not in queue!");
|
|
|
|
while (Queue.top() != SU) {
|
|
|
|
Temp.push_back(Queue.top());
|
|
|
|
Queue.pop();
|
|
|
|
assert(!Queue.empty() && "Not in queue!");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the node from the PQ.
|
|
|
|
Queue.pop();
|
|
|
|
|
|
|
|
// Add all the other nodes back.
|
|
|
|
for (unsigned i = 0, e = Temp.size(); i != e; ++i)
|
|
|
|
Queue.push(Temp[i]);
|
|
|
|
}
|
|
|
|
|
2006-03-09 14:35:14 +08:00
|
|
|
private:
|
2006-05-09 15:13:34 +08:00
|
|
|
void AddPseudoTwoAddrDeps();
|
2006-03-09 14:35:14 +08:00
|
|
|
void CalculatePriorities();
|
2006-05-03 10:10:45 +08:00
|
|
|
int CalcNodePriority(const SUnit *SU);
|
2006-03-09 14:35:14 +08:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
|
|
|
|
unsigned LeftNum = left->NodeNum;
|
|
|
|
unsigned RightNum = right->NodeNum;
|
2006-05-03 10:10:45 +08:00
|
|
|
bool LIsTarget = left->Node->isTargetOpcode();
|
|
|
|
bool RIsTarget = right->Node->isTargetOpcode();
|
|
|
|
int LPriority = SPQ->getSethiUllmanNumber(LeftNum);
|
|
|
|
int RPriority = SPQ->getSethiUllmanNumber(RightNum);
|
|
|
|
bool LIsFloater = LIsTarget && (LPriority == 1 || LPriority == 0);
|
|
|
|
bool RIsFloater = RIsTarget && (RPriority == 1 || RPriority == 0);
|
2006-05-05 03:16:39 +08:00
|
|
|
int LBonus = 0;
|
|
|
|
int RBonus = 0;
|
2006-05-03 10:10:45 +08:00
|
|
|
|
2006-05-05 03:16:39 +08:00
|
|
|
// Schedule floaters (e.g. load from some constant address) and those nodes
|
|
|
|
// with a single predecessor each first. They maintain / reduce register
|
|
|
|
// pressure.
|
|
|
|
if (LIsFloater)
|
|
|
|
LBonus += 2;
|
|
|
|
if (RIsFloater)
|
|
|
|
RBonus += 2;
|
2006-05-03 10:10:45 +08:00
|
|
|
|
2006-05-09 15:13:34 +08:00
|
|
|
if (!SchedLowerDefNUse) {
|
|
|
|
// Special tie breaker: if two nodes share a operand, the one that use it
|
|
|
|
// as a def&use operand is preferred.
|
|
|
|
if (LIsTarget && RIsTarget) {
|
|
|
|
if (left->isTwoAddress && !right->isTwoAddress) {
|
|
|
|
SDNode *DUNode = left->Node->getOperand(0).Val;
|
|
|
|
if (DUNode->isOperand(right->Node))
|
|
|
|
LBonus += 2;
|
|
|
|
}
|
|
|
|
if (!left->isTwoAddress && right->isTwoAddress) {
|
|
|
|
SDNode *DUNode = right->Node->getOperand(0).Val;
|
|
|
|
if (DUNode->isOperand(left->Node))
|
|
|
|
RBonus += 2;
|
|
|
|
}
|
2006-05-03 10:10:45 +08:00
|
|
|
}
|
2006-03-09 14:35:14 +08:00
|
|
|
}
|
2006-05-03 10:10:45 +08:00
|
|
|
|
2006-05-05 03:16:39 +08:00
|
|
|
if (LPriority+LBonus < RPriority+RBonus)
|
2006-03-09 14:35:14 +08:00
|
|
|
return true;
|
2006-05-05 03:16:39 +08:00
|
|
|
else if (LPriority+LBonus == RPriority+RBonus)
|
2006-05-03 10:10:45 +08:00
|
|
|
if (left->NumPredsLeft > right->NumPredsLeft)
|
2006-03-09 14:35:14 +08:00
|
|
|
return true;
|
2006-05-05 03:16:39 +08:00
|
|
|
else if (left->NumPredsLeft+LBonus == right->NumPredsLeft+RBonus)
|
2006-03-09 14:35:14 +08:00
|
|
|
if (left->CycleBound > right->CycleBound)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2006-05-09 15:13:34 +08:00
|
|
|
static inline bool isCopyFromLiveIn(const SUnit *SU) {
|
|
|
|
SDNode *N = SU->Node;
|
|
|
|
return N->getOpcode() == ISD::CopyFromReg &&
|
|
|
|
N->getOperand(N->getNumOperands()-1).getValueType() != MVT::Flag;
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: This is probably too slow!
|
|
|
|
static void isReachable(SUnit *SU, SUnit *TargetSU,
|
|
|
|
std::set<SUnit *> &Visited, bool &Reached) {
|
|
|
|
if (Reached) return;
|
|
|
|
if (SU == TargetSU) {
|
|
|
|
Reached = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!Visited.insert(SU).second) return;
|
|
|
|
|
|
|
|
for (std::set<std::pair<SUnit*, bool> >::iterator I = SU->Preds.begin(),
|
|
|
|
E = SU->Preds.end(); I != E; ++I)
|
|
|
|
isReachable(I->first, TargetSU, Visited, Reached);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool isReachable(SUnit *SU, SUnit *TargetSU) {
|
|
|
|
std::set<SUnit *> Visited;
|
|
|
|
bool Reached = false;
|
|
|
|
isReachable(SU, TargetSU, Visited, Reached);
|
|
|
|
return Reached;
|
|
|
|
}
|
|
|
|
|
|
|
|
static SUnit *getDefUsePredecessor(SUnit *SU) {
|
|
|
|
SDNode *DU = SU->Node->getOperand(0).Val;
|
|
|
|
for (std::set<std::pair<SUnit*, bool> >::iterator
|
|
|
|
I = SU->Preds.begin(), E = SU->Preds.end(); I != E; ++I) {
|
|
|
|
if (I->second) continue; // ignore chain preds
|
|
|
|
SUnit *PredSU = I->first;
|
|
|
|
if (PredSU->Node == DU)
|
|
|
|
return PredSU;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Must be flagged.
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool canClobber(SUnit *SU, SUnit *Op) {
|
|
|
|
if (SU->isTwoAddress)
|
|
|
|
return Op == getDefUsePredecessor(SU);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
|
|
|
|
/// it as a def&use operand. Add a pseudo control edge from it to the other
|
|
|
|
/// node (if it won't create a cycle) so the two-address one will be scheduled
|
|
|
|
/// first (lower in the schedule).
|
2006-05-10 14:16:44 +08:00
|
|
|
template<class SF>
|
|
|
|
void RegReductionPriorityQueue<SF>::AddPseudoTwoAddrDeps() {
|
2006-05-09 15:13:34 +08:00
|
|
|
for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
|
|
|
|
SUnit *SU = (SUnit *)&((*SUnits)[i]);
|
|
|
|
SDNode *Node = SU->Node;
|
|
|
|
if (!Node->isTargetOpcode())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (SU->isTwoAddress) {
|
|
|
|
unsigned Depth = SU->Node->getNodeDepth();
|
|
|
|
SUnit *DUSU = getDefUsePredecessor(SU);
|
|
|
|
if (!DUSU) continue;
|
|
|
|
|
|
|
|
for (std::set<std::pair<SUnit*, bool> >::iterator I = DUSU->Succs.begin(),
|
|
|
|
E = DUSU->Succs.end(); I != E; ++I) {
|
|
|
|
SUnit *SuccSU = I->first;
|
|
|
|
if (SuccSU != SU && !canClobber(SuccSU, DUSU)) {
|
|
|
|
if (SuccSU->Node->getNodeDepth() <= Depth+2 &&
|
|
|
|
!isReachable(SuccSU, SU)) {
|
|
|
|
DEBUG(std::cerr << "Adding an edge from SU # " << SU->NodeNum
|
|
|
|
<< " to SU #" << SuccSU->NodeNum << "\n");
|
|
|
|
if (SU->Preds.insert(std::make_pair(SuccSU, true)).second)
|
|
|
|
SU->NumChainPredsLeft++;
|
|
|
|
if (SuccSU->Succs.insert(std::make_pair(SU, true)).second)
|
|
|
|
SuccSU->NumChainSuccsLeft++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2006-03-09 14:35:14 +08:00
|
|
|
|
|
|
|
/// CalcNodePriority - Priority is the Sethi Ullman number.
|
|
|
|
/// Smaller number is the higher priority.
|
2006-05-10 14:16:44 +08:00
|
|
|
template<class SF>
|
|
|
|
int RegReductionPriorityQueue<SF>::CalcNodePriority(const SUnit *SU) {
|
2006-05-03 10:10:45 +08:00
|
|
|
int &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum];
|
2006-05-01 17:14:40 +08:00
|
|
|
if (SethiUllmanNumber != 0)
|
2006-03-09 14:35:14 +08:00
|
|
|
return SethiUllmanNumber;
|
2006-05-03 10:10:45 +08:00
|
|
|
|
|
|
|
unsigned Opc = SU->Node->getOpcode();
|
|
|
|
if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
|
|
|
|
SethiUllmanNumber = INT_MAX - 10;
|
|
|
|
else if (SU->NumSuccsLeft == 0)
|
|
|
|
// If SU does not have a use, i.e. it doesn't produce a value that would
|
|
|
|
// be consumed (e.g. store), then it terminates a chain of computation.
|
|
|
|
// Give it a small SethiUllman number so it will be scheduled right before its
|
|
|
|
// predecessors that it doesn't lengthen their live ranges.
|
|
|
|
SethiUllmanNumber = INT_MIN + 10;
|
2006-05-09 15:13:34 +08:00
|
|
|
else if (SU->NumPredsLeft == 0 &&
|
|
|
|
(Opc != ISD::CopyFromReg || isCopyFromLiveIn(SU)))
|
2006-03-09 14:35:14 +08:00
|
|
|
SethiUllmanNumber = 1;
|
2006-05-03 10:10:45 +08:00
|
|
|
else {
|
2006-03-09 14:35:14 +08:00
|
|
|
int Extra = 0;
|
2006-03-12 06:24:20 +08:00
|
|
|
for (std::set<std::pair<SUnit*, bool> >::const_iterator
|
|
|
|
I = SU->Preds.begin(), E = SU->Preds.end(); I != E; ++I) {
|
2006-05-03 10:10:45 +08:00
|
|
|
if (I->second) continue; // ignore chain preds
|
2006-03-12 06:24:20 +08:00
|
|
|
SUnit *PredSU = I->first;
|
2006-05-03 10:10:45 +08:00
|
|
|
int PredSethiUllman = CalcNodePriority(PredSU);
|
2006-03-09 14:35:14 +08:00
|
|
|
if (PredSethiUllman > SethiUllmanNumber) {
|
|
|
|
SethiUllmanNumber = PredSethiUllman;
|
|
|
|
Extra = 0;
|
2006-05-03 10:10:45 +08:00
|
|
|
} else if (PredSethiUllman == SethiUllmanNumber && !I->second)
|
2006-03-09 14:35:14 +08:00
|
|
|
Extra++;
|
|
|
|
}
|
2006-05-03 10:10:45 +08:00
|
|
|
|
2006-05-01 17:14:40 +08:00
|
|
|
SethiUllmanNumber += Extra;
|
2006-03-09 14:35:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return SethiUllmanNumber;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// CalculatePriorities - Calculate priorities of all scheduling units.
|
2006-05-10 14:16:44 +08:00
|
|
|
template<class SF>
|
|
|
|
void RegReductionPriorityQueue<SF>::CalculatePriorities() {
|
2006-05-01 17:14:40 +08:00
|
|
|
SethiUllmanNumbers.assign(SUnits->size(), 0);
|
2006-03-09 14:35:14 +08:00
|
|
|
|
|
|
|
for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
|
|
|
|
CalcNodePriority(&(*SUnits)[i]);
|
|
|
|
}
|
|
|
|
|
2006-03-09 15:38:27 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// LatencyPriorityQueue Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This is a SchedulingPriorityQueue that schedules using latency information to
|
|
|
|
// reduce the length of the critical path through the basic block.
|
|
|
|
//
|
|
|
|
namespace {
|
|
|
|
class LatencyPriorityQueue;
|
|
|
|
|
|
|
|
/// Sorting functions for the Available queue.
|
|
|
|
struct latency_sort : public std::binary_function<SUnit*, SUnit*, bool> {
|
|
|
|
LatencyPriorityQueue *PQ;
|
|
|
|
latency_sort(LatencyPriorityQueue *pq) : PQ(pq) {}
|
|
|
|
latency_sort(const latency_sort &RHS) : PQ(RHS.PQ) {}
|
|
|
|
|
|
|
|
bool operator()(const SUnit* left, const SUnit* right) const;
|
|
|
|
};
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
class LatencyPriorityQueue : public SchedulingPriorityQueue {
|
|
|
|
// SUnits - The SUnits for the current graph.
|
|
|
|
const std::vector<SUnit> *SUnits;
|
|
|
|
|
|
|
|
// Latencies - The latency (max of latency from this node to the bb exit)
|
|
|
|
// for each node.
|
|
|
|
std::vector<int> Latencies;
|
Teach the latency scheduler some new tricks. In particular, to break ties,
keep track of a sense of "mobility", i.e. how many other nodes scheduling one
node will free up. For something like this:
float testadd(float *X, float *Y, float *Z, float *W, float *V) {
return (*X+*Y)*(*Z+*W)+*V;
}
For example, this makes us schedule *X then *Y, not *X then *Z. The former
allows us to issue the add, the later only lets us issue other loads.
This turns the above code from this:
_testadd:
lfs f0, 0(r3)
lfs f1, 0(r6)
lfs f2, 0(r4)
lfs f3, 0(r5)
fadds f0, f0, f2
fadds f1, f3, f1
lfs f2, 0(r7)
fmadds f1, f0, f1, f2
blr
into this:
_testadd:
lfs f0, 0(r6)
lfs f1, 0(r5)
fadds f0, f1, f0
lfs f1, 0(r4)
lfs f2, 0(r3)
fadds f1, f2, f1
lfs f2, 0(r7)
fmadds f1, f1, f0, f2
blr
llvm-svn: 26680
2006-03-10 13:51:05 +08:00
|
|
|
|
|
|
|
/// NumNodesSolelyBlocking - This vector contains, for every node in the
|
|
|
|
/// Queue, the number of nodes that the node is the sole unscheduled
|
|
|
|
/// predecessor for. This is used as a tie-breaker heuristic for better
|
|
|
|
/// mobility.
|
|
|
|
std::vector<unsigned> NumNodesSolelyBlocking;
|
|
|
|
|
2006-03-09 15:38:27 +08:00
|
|
|
std::priority_queue<SUnit*, std::vector<SUnit*>, latency_sort> Queue;
|
|
|
|
public:
|
|
|
|
LatencyPriorityQueue() : Queue(latency_sort(this)) {
|
|
|
|
}
|
|
|
|
|
|
|
|
void initNodes(const std::vector<SUnit> &sunits) {
|
|
|
|
SUnits = &sunits;
|
|
|
|
// Calculate node priorities.
|
|
|
|
CalculatePriorities();
|
|
|
|
}
|
|
|
|
void releaseState() {
|
|
|
|
SUnits = 0;
|
|
|
|
Latencies.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned getLatency(unsigned NodeNum) const {
|
|
|
|
assert(NodeNum < Latencies.size());
|
|
|
|
return Latencies[NodeNum];
|
|
|
|
}
|
|
|
|
|
Teach the latency scheduler some new tricks. In particular, to break ties,
keep track of a sense of "mobility", i.e. how many other nodes scheduling one
node will free up. For something like this:
float testadd(float *X, float *Y, float *Z, float *W, float *V) {
return (*X+*Y)*(*Z+*W)+*V;
}
For example, this makes us schedule *X then *Y, not *X then *Z. The former
allows us to issue the add, the later only lets us issue other loads.
This turns the above code from this:
_testadd:
lfs f0, 0(r3)
lfs f1, 0(r6)
lfs f2, 0(r4)
lfs f3, 0(r5)
fadds f0, f0, f2
fadds f1, f3, f1
lfs f2, 0(r7)
fmadds f1, f0, f1, f2
blr
into this:
_testadd:
lfs f0, 0(r6)
lfs f1, 0(r5)
fadds f0, f1, f0
lfs f1, 0(r4)
lfs f2, 0(r3)
fadds f1, f2, f1
lfs f2, 0(r7)
fmadds f1, f1, f0, f2
blr
llvm-svn: 26680
2006-03-10 13:51:05 +08:00
|
|
|
unsigned getNumSolelyBlockNodes(unsigned NodeNum) const {
|
|
|
|
assert(NodeNum < NumNodesSolelyBlocking.size());
|
|
|
|
return NumNodesSolelyBlocking[NodeNum];
|
|
|
|
}
|
|
|
|
|
2006-03-09 15:38:27 +08:00
|
|
|
bool empty() const { return Queue.empty(); }
|
|
|
|
|
Teach the latency scheduler some new tricks. In particular, to break ties,
keep track of a sense of "mobility", i.e. how many other nodes scheduling one
node will free up. For something like this:
float testadd(float *X, float *Y, float *Z, float *W, float *V) {
return (*X+*Y)*(*Z+*W)+*V;
}
For example, this makes us schedule *X then *Y, not *X then *Z. The former
allows us to issue the add, the later only lets us issue other loads.
This turns the above code from this:
_testadd:
lfs f0, 0(r3)
lfs f1, 0(r6)
lfs f2, 0(r4)
lfs f3, 0(r5)
fadds f0, f0, f2
fadds f1, f3, f1
lfs f2, 0(r7)
fmadds f1, f0, f1, f2
blr
into this:
_testadd:
lfs f0, 0(r6)
lfs f1, 0(r5)
fadds f0, f1, f0
lfs f1, 0(r4)
lfs f2, 0(r3)
fadds f1, f2, f1
lfs f2, 0(r7)
fmadds f1, f1, f0, f2
blr
llvm-svn: 26680
2006-03-10 13:51:05 +08:00
|
|
|
virtual void push(SUnit *U) {
|
|
|
|
push_impl(U);
|
2006-03-09 15:38:27 +08:00
|
|
|
}
|
Teach the latency scheduler some new tricks. In particular, to break ties,
keep track of a sense of "mobility", i.e. how many other nodes scheduling one
node will free up. For something like this:
float testadd(float *X, float *Y, float *Z, float *W, float *V) {
return (*X+*Y)*(*Z+*W)+*V;
}
For example, this makes us schedule *X then *Y, not *X then *Z. The former
allows us to issue the add, the later only lets us issue other loads.
This turns the above code from this:
_testadd:
lfs f0, 0(r3)
lfs f1, 0(r6)
lfs f2, 0(r4)
lfs f3, 0(r5)
fadds f0, f0, f2
fadds f1, f3, f1
lfs f2, 0(r7)
fmadds f1, f0, f1, f2
blr
into this:
_testadd:
lfs f0, 0(r6)
lfs f1, 0(r5)
fadds f0, f1, f0
lfs f1, 0(r4)
lfs f2, 0(r3)
fadds f1, f2, f1
lfs f2, 0(r7)
fmadds f1, f1, f0, f2
blr
llvm-svn: 26680
2006-03-10 13:51:05 +08:00
|
|
|
void push_impl(SUnit *U);
|
|
|
|
|
2006-03-10 12:32:49 +08:00
|
|
|
void push_all(const std::vector<SUnit *> &Nodes) {
|
|
|
|
for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
|
Teach the latency scheduler some new tricks. In particular, to break ties,
keep track of a sense of "mobility", i.e. how many other nodes scheduling one
node will free up. For something like this:
float testadd(float *X, float *Y, float *Z, float *W, float *V) {
return (*X+*Y)*(*Z+*W)+*V;
}
For example, this makes us schedule *X then *Y, not *X then *Z. The former
allows us to issue the add, the later only lets us issue other loads.
This turns the above code from this:
_testadd:
lfs f0, 0(r3)
lfs f1, 0(r6)
lfs f2, 0(r4)
lfs f3, 0(r5)
fadds f0, f0, f2
fadds f1, f3, f1
lfs f2, 0(r7)
fmadds f1, f0, f1, f2
blr
into this:
_testadd:
lfs f0, 0(r6)
lfs f1, 0(r5)
fadds f0, f1, f0
lfs f1, 0(r4)
lfs f2, 0(r3)
fadds f1, f2, f1
lfs f2, 0(r7)
fmadds f1, f1, f0, f2
blr
llvm-svn: 26680
2006-03-10 13:51:05 +08:00
|
|
|
push_impl(Nodes[i]);
|
2006-03-10 12:32:49 +08:00
|
|
|
}
|
|
|
|
|
2006-03-09 15:38:27 +08:00
|
|
|
SUnit *pop() {
|
|
|
|
SUnit *V = Queue.top();
|
|
|
|
Queue.pop();
|
|
|
|
return V;
|
|
|
|
}
|
2006-05-09 15:13:34 +08:00
|
|
|
|
Teach the latency scheduler some new tricks. In particular, to break ties,
keep track of a sense of "mobility", i.e. how many other nodes scheduling one
node will free up. For something like this:
float testadd(float *X, float *Y, float *Z, float *W, float *V) {
return (*X+*Y)*(*Z+*W)+*V;
}
For example, this makes us schedule *X then *Y, not *X then *Z. The former
allows us to issue the add, the later only lets us issue other loads.
This turns the above code from this:
_testadd:
lfs f0, 0(r3)
lfs f1, 0(r6)
lfs f2, 0(r4)
lfs f3, 0(r5)
fadds f0, f0, f2
fadds f1, f3, f1
lfs f2, 0(r7)
fmadds f1, f0, f1, f2
blr
into this:
_testadd:
lfs f0, 0(r6)
lfs f1, 0(r5)
fadds f0, f1, f0
lfs f1, 0(r4)
lfs f2, 0(r3)
fadds f1, f2, f1
lfs f2, 0(r7)
fmadds f1, f1, f0, f2
blr
llvm-svn: 26680
2006-03-10 13:51:05 +08:00
|
|
|
/// RemoveFromPriorityQueue - This is a really inefficient way to remove a
|
|
|
|
/// node from a priority queue. We should roll our own heap to make this
|
|
|
|
/// better or something.
|
|
|
|
void RemoveFromPriorityQueue(SUnit *SU) {
|
|
|
|
std::vector<SUnit*> Temp;
|
|
|
|
|
|
|
|
assert(!Queue.empty() && "Not in queue!");
|
|
|
|
while (Queue.top() != SU) {
|
|
|
|
Temp.push_back(Queue.top());
|
|
|
|
Queue.pop();
|
|
|
|
assert(!Queue.empty() && "Not in queue!");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the node from the PQ.
|
|
|
|
Queue.pop();
|
|
|
|
|
|
|
|
// Add all the other nodes back.
|
|
|
|
for (unsigned i = 0, e = Temp.size(); i != e; ++i)
|
|
|
|
Queue.push(Temp[i]);
|
|
|
|
}
|
2006-05-05 03:16:39 +08:00
|
|
|
|
|
|
|
// ScheduledNode - As nodes are scheduled, we look to see if there are any
|
|
|
|
// successor nodes that have a single unscheduled predecessor. If so, that
|
|
|
|
// single predecessor has a higher priority, since scheduling it will make
|
|
|
|
// the node available.
|
|
|
|
void ScheduledNode(SUnit *Node);
|
|
|
|
|
|
|
|
private:
|
|
|
|
void CalculatePriorities();
|
|
|
|
int CalcLatency(const SUnit &SU);
|
|
|
|
void AdjustPriorityOfUnscheduledPreds(SUnit *SU);
|
2006-03-09 15:38:27 +08:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
bool latency_sort::operator()(const SUnit *LHS, const SUnit *RHS) const {
|
|
|
|
unsigned LHSNum = LHS->NodeNum;
|
|
|
|
unsigned RHSNum = RHS->NodeNum;
|
Teach the latency scheduler some new tricks. In particular, to break ties,
keep track of a sense of "mobility", i.e. how many other nodes scheduling one
node will free up. For something like this:
float testadd(float *X, float *Y, float *Z, float *W, float *V) {
return (*X+*Y)*(*Z+*W)+*V;
}
For example, this makes us schedule *X then *Y, not *X then *Z. The former
allows us to issue the add, the later only lets us issue other loads.
This turns the above code from this:
_testadd:
lfs f0, 0(r3)
lfs f1, 0(r6)
lfs f2, 0(r4)
lfs f3, 0(r5)
fadds f0, f0, f2
fadds f1, f3, f1
lfs f2, 0(r7)
fmadds f1, f0, f1, f2
blr
into this:
_testadd:
lfs f0, 0(r6)
lfs f1, 0(r5)
fadds f0, f1, f0
lfs f1, 0(r4)
lfs f2, 0(r3)
fadds f1, f2, f1
lfs f2, 0(r7)
fmadds f1, f1, f0, f2
blr
llvm-svn: 26680
2006-03-10 13:51:05 +08:00
|
|
|
|
|
|
|
// The most important heuristic is scheduling the critical path.
|
|
|
|
unsigned LHSLatency = PQ->getLatency(LHSNum);
|
|
|
|
unsigned RHSLatency = PQ->getLatency(RHSNum);
|
|
|
|
if (LHSLatency < RHSLatency) return true;
|
|
|
|
if (LHSLatency > RHSLatency) return false;
|
|
|
|
|
|
|
|
// After that, if two nodes have identical latencies, look to see if one will
|
|
|
|
// unblock more other nodes than the other.
|
|
|
|
unsigned LHSBlocked = PQ->getNumSolelyBlockNodes(LHSNum);
|
|
|
|
unsigned RHSBlocked = PQ->getNumSolelyBlockNodes(RHSNum);
|
|
|
|
if (LHSBlocked < RHSBlocked) return true;
|
|
|
|
if (LHSBlocked > RHSBlocked) return false;
|
2006-03-09 15:38:27 +08:00
|
|
|
|
Teach the latency scheduler some new tricks. In particular, to break ties,
keep track of a sense of "mobility", i.e. how many other nodes scheduling one
node will free up. For something like this:
float testadd(float *X, float *Y, float *Z, float *W, float *V) {
return (*X+*Y)*(*Z+*W)+*V;
}
For example, this makes us schedule *X then *Y, not *X then *Z. The former
allows us to issue the add, the later only lets us issue other loads.
This turns the above code from this:
_testadd:
lfs f0, 0(r3)
lfs f1, 0(r6)
lfs f2, 0(r4)
lfs f3, 0(r5)
fadds f0, f0, f2
fadds f1, f3, f1
lfs f2, 0(r7)
fmadds f1, f0, f1, f2
blr
into this:
_testadd:
lfs f0, 0(r6)
lfs f1, 0(r5)
fadds f0, f1, f0
lfs f1, 0(r4)
lfs f2, 0(r3)
fadds f1, f2, f1
lfs f2, 0(r7)
fmadds f1, f1, f0, f2
blr
llvm-svn: 26680
2006-03-10 13:51:05 +08:00
|
|
|
// Finally, just to provide a stable ordering, use the node number as a
|
|
|
|
// deciding factor.
|
|
|
|
return LHSNum < RHSNum;
|
2006-03-09 15:38:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/// CalcNodePriority - Calculate the maximal path from the node to the exit.
|
|
|
|
///
|
|
|
|
int LatencyPriorityQueue::CalcLatency(const SUnit &SU) {
|
|
|
|
int &Latency = Latencies[SU.NodeNum];
|
|
|
|
if (Latency != -1)
|
|
|
|
return Latency;
|
|
|
|
|
|
|
|
int MaxSuccLatency = 0;
|
2006-03-12 06:24:20 +08:00
|
|
|
for (std::set<std::pair<SUnit*, bool> >::const_iterator I = SU.Succs.begin(),
|
2006-03-09 15:38:27 +08:00
|
|
|
E = SU.Succs.end(); I != E; ++I)
|
2006-03-12 06:24:20 +08:00
|
|
|
MaxSuccLatency = std::max(MaxSuccLatency, CalcLatency(*I->first));
|
2006-03-09 15:38:27 +08:00
|
|
|
|
|
|
|
return Latency = MaxSuccLatency + SU.Latency;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// CalculatePriorities - Calculate priorities of all scheduling units.
|
|
|
|
void LatencyPriorityQueue::CalculatePriorities() {
|
|
|
|
Latencies.assign(SUnits->size(), -1);
|
Teach the latency scheduler some new tricks. In particular, to break ties,
keep track of a sense of "mobility", i.e. how many other nodes scheduling one
node will free up. For something like this:
float testadd(float *X, float *Y, float *Z, float *W, float *V) {
return (*X+*Y)*(*Z+*W)+*V;
}
For example, this makes us schedule *X then *Y, not *X then *Z. The former
allows us to issue the add, the later only lets us issue other loads.
This turns the above code from this:
_testadd:
lfs f0, 0(r3)
lfs f1, 0(r6)
lfs f2, 0(r4)
lfs f3, 0(r5)
fadds f0, f0, f2
fadds f1, f3, f1
lfs f2, 0(r7)
fmadds f1, f0, f1, f2
blr
into this:
_testadd:
lfs f0, 0(r6)
lfs f1, 0(r5)
fadds f0, f1, f0
lfs f1, 0(r4)
lfs f2, 0(r3)
fadds f1, f2, f1
lfs f2, 0(r7)
fmadds f1, f1, f0, f2
blr
llvm-svn: 26680
2006-03-10 13:51:05 +08:00
|
|
|
NumNodesSolelyBlocking.assign(SUnits->size(), 0);
|
2006-03-09 15:38:27 +08:00
|
|
|
|
|
|
|
for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
|
|
|
|
CalcLatency((*SUnits)[i]);
|
|
|
|
}
|
|
|
|
|
Teach the latency scheduler some new tricks. In particular, to break ties,
keep track of a sense of "mobility", i.e. how many other nodes scheduling one
node will free up. For something like this:
float testadd(float *X, float *Y, float *Z, float *W, float *V) {
return (*X+*Y)*(*Z+*W)+*V;
}
For example, this makes us schedule *X then *Y, not *X then *Z. The former
allows us to issue the add, the later only lets us issue other loads.
This turns the above code from this:
_testadd:
lfs f0, 0(r3)
lfs f1, 0(r6)
lfs f2, 0(r4)
lfs f3, 0(r5)
fadds f0, f0, f2
fadds f1, f3, f1
lfs f2, 0(r7)
fmadds f1, f0, f1, f2
blr
into this:
_testadd:
lfs f0, 0(r6)
lfs f1, 0(r5)
fadds f0, f1, f0
lfs f1, 0(r4)
lfs f2, 0(r3)
fadds f1, f2, f1
lfs f2, 0(r7)
fmadds f1, f1, f0, f2
blr
llvm-svn: 26680
2006-03-10 13:51:05 +08:00
|
|
|
/// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor
|
|
|
|
/// of SU, return it, otherwise return null.
|
|
|
|
static SUnit *getSingleUnscheduledPred(SUnit *SU) {
|
|
|
|
SUnit *OnlyAvailablePred = 0;
|
2006-03-12 06:24:20 +08:00
|
|
|
for (std::set<std::pair<SUnit*, bool> >::const_iterator I = SU->Preds.begin(),
|
Teach the latency scheduler some new tricks. In particular, to break ties,
keep track of a sense of "mobility", i.e. how many other nodes scheduling one
node will free up. For something like this:
float testadd(float *X, float *Y, float *Z, float *W, float *V) {
return (*X+*Y)*(*Z+*W)+*V;
}
For example, this makes us schedule *X then *Y, not *X then *Z. The former
allows us to issue the add, the later only lets us issue other loads.
This turns the above code from this:
_testadd:
lfs f0, 0(r3)
lfs f1, 0(r6)
lfs f2, 0(r4)
lfs f3, 0(r5)
fadds f0, f0, f2
fadds f1, f3, f1
lfs f2, 0(r7)
fmadds f1, f0, f1, f2
blr
into this:
_testadd:
lfs f0, 0(r6)
lfs f1, 0(r5)
fadds f0, f1, f0
lfs f1, 0(r4)
lfs f2, 0(r3)
fadds f1, f2, f1
lfs f2, 0(r7)
fmadds f1, f1, f0, f2
blr
llvm-svn: 26680
2006-03-10 13:51:05 +08:00
|
|
|
E = SU->Preds.end(); I != E; ++I)
|
2006-03-12 06:24:20 +08:00
|
|
|
if (!I->first->isScheduled) {
|
Teach the latency scheduler some new tricks. In particular, to break ties,
keep track of a sense of "mobility", i.e. how many other nodes scheduling one
node will free up. For something like this:
float testadd(float *X, float *Y, float *Z, float *W, float *V) {
return (*X+*Y)*(*Z+*W)+*V;
}
For example, this makes us schedule *X then *Y, not *X then *Z. The former
allows us to issue the add, the later only lets us issue other loads.
This turns the above code from this:
_testadd:
lfs f0, 0(r3)
lfs f1, 0(r6)
lfs f2, 0(r4)
lfs f3, 0(r5)
fadds f0, f0, f2
fadds f1, f3, f1
lfs f2, 0(r7)
fmadds f1, f0, f1, f2
blr
into this:
_testadd:
lfs f0, 0(r6)
lfs f1, 0(r5)
fadds f0, f1, f0
lfs f1, 0(r4)
lfs f2, 0(r3)
fadds f1, f2, f1
lfs f2, 0(r7)
fmadds f1, f1, f0, f2
blr
llvm-svn: 26680
2006-03-10 13:51:05 +08:00
|
|
|
// We found an available, but not scheduled, predecessor. If it's the
|
|
|
|
// only one we have found, keep track of it... otherwise give up.
|
2006-03-12 06:24:20 +08:00
|
|
|
if (OnlyAvailablePred && OnlyAvailablePred != I->first)
|
Teach the latency scheduler some new tricks. In particular, to break ties,
keep track of a sense of "mobility", i.e. how many other nodes scheduling one
node will free up. For something like this:
float testadd(float *X, float *Y, float *Z, float *W, float *V) {
return (*X+*Y)*(*Z+*W)+*V;
}
For example, this makes us schedule *X then *Y, not *X then *Z. The former
allows us to issue the add, the later only lets us issue other loads.
This turns the above code from this:
_testadd:
lfs f0, 0(r3)
lfs f1, 0(r6)
lfs f2, 0(r4)
lfs f3, 0(r5)
fadds f0, f0, f2
fadds f1, f3, f1
lfs f2, 0(r7)
fmadds f1, f0, f1, f2
blr
into this:
_testadd:
lfs f0, 0(r6)
lfs f1, 0(r5)
fadds f0, f1, f0
lfs f1, 0(r4)
lfs f2, 0(r3)
fadds f1, f2, f1
lfs f2, 0(r7)
fmadds f1, f1, f0, f2
blr
llvm-svn: 26680
2006-03-10 13:51:05 +08:00
|
|
|
return 0;
|
2006-03-12 06:24:20 +08:00
|
|
|
OnlyAvailablePred = I->first;
|
Teach the latency scheduler some new tricks. In particular, to break ties,
keep track of a sense of "mobility", i.e. how many other nodes scheduling one
node will free up. For something like this:
float testadd(float *X, float *Y, float *Z, float *W, float *V) {
return (*X+*Y)*(*Z+*W)+*V;
}
For example, this makes us schedule *X then *Y, not *X then *Z. The former
allows us to issue the add, the later only lets us issue other loads.
This turns the above code from this:
_testadd:
lfs f0, 0(r3)
lfs f1, 0(r6)
lfs f2, 0(r4)
lfs f3, 0(r5)
fadds f0, f0, f2
fadds f1, f3, f1
lfs f2, 0(r7)
fmadds f1, f0, f1, f2
blr
into this:
_testadd:
lfs f0, 0(r6)
lfs f1, 0(r5)
fadds f0, f1, f0
lfs f1, 0(r4)
lfs f2, 0(r3)
fadds f1, f2, f1
lfs f2, 0(r7)
fmadds f1, f1, f0, f2
blr
llvm-svn: 26680
2006-03-10 13:51:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return OnlyAvailablePred;
|
|
|
|
}
|
|
|
|
|
|
|
|
void LatencyPriorityQueue::push_impl(SUnit *SU) {
|
|
|
|
// Look at all of the successors of this node. Count the number of nodes that
|
|
|
|
// this node is the sole unscheduled node for.
|
|
|
|
unsigned NumNodesBlocking = 0;
|
2006-03-12 06:24:20 +08:00
|
|
|
for (std::set<std::pair<SUnit*, bool> >::const_iterator I = SU->Succs.begin(),
|
Teach the latency scheduler some new tricks. In particular, to break ties,
keep track of a sense of "mobility", i.e. how many other nodes scheduling one
node will free up. For something like this:
float testadd(float *X, float *Y, float *Z, float *W, float *V) {
return (*X+*Y)*(*Z+*W)+*V;
}
For example, this makes us schedule *X then *Y, not *X then *Z. The former
allows us to issue the add, the later only lets us issue other loads.
This turns the above code from this:
_testadd:
lfs f0, 0(r3)
lfs f1, 0(r6)
lfs f2, 0(r4)
lfs f3, 0(r5)
fadds f0, f0, f2
fadds f1, f3, f1
lfs f2, 0(r7)
fmadds f1, f0, f1, f2
blr
into this:
_testadd:
lfs f0, 0(r6)
lfs f1, 0(r5)
fadds f0, f1, f0
lfs f1, 0(r4)
lfs f2, 0(r3)
fadds f1, f2, f1
lfs f2, 0(r7)
fmadds f1, f1, f0, f2
blr
llvm-svn: 26680
2006-03-10 13:51:05 +08:00
|
|
|
E = SU->Succs.end(); I != E; ++I)
|
2006-03-12 06:24:20 +08:00
|
|
|
if (getSingleUnscheduledPred(I->first) == SU)
|
Teach the latency scheduler some new tricks. In particular, to break ties,
keep track of a sense of "mobility", i.e. how many other nodes scheduling one
node will free up. For something like this:
float testadd(float *X, float *Y, float *Z, float *W, float *V) {
return (*X+*Y)*(*Z+*W)+*V;
}
For example, this makes us schedule *X then *Y, not *X then *Z. The former
allows us to issue the add, the later only lets us issue other loads.
This turns the above code from this:
_testadd:
lfs f0, 0(r3)
lfs f1, 0(r6)
lfs f2, 0(r4)
lfs f3, 0(r5)
fadds f0, f0, f2
fadds f1, f3, f1
lfs f2, 0(r7)
fmadds f1, f0, f1, f2
blr
into this:
_testadd:
lfs f0, 0(r6)
lfs f1, 0(r5)
fadds f0, f1, f0
lfs f1, 0(r4)
lfs f2, 0(r3)
fadds f1, f2, f1
lfs f2, 0(r7)
fmadds f1, f1, f0, f2
blr
llvm-svn: 26680
2006-03-10 13:51:05 +08:00
|
|
|
++NumNodesBlocking;
|
2006-03-12 06:24:20 +08:00
|
|
|
NumNodesSolelyBlocking[SU->NodeNum] = NumNodesBlocking;
|
Teach the latency scheduler some new tricks. In particular, to break ties,
keep track of a sense of "mobility", i.e. how many other nodes scheduling one
node will free up. For something like this:
float testadd(float *X, float *Y, float *Z, float *W, float *V) {
return (*X+*Y)*(*Z+*W)+*V;
}
For example, this makes us schedule *X then *Y, not *X then *Z. The former
allows us to issue the add, the later only lets us issue other loads.
This turns the above code from this:
_testadd:
lfs f0, 0(r3)
lfs f1, 0(r6)
lfs f2, 0(r4)
lfs f3, 0(r5)
fadds f0, f0, f2
fadds f1, f3, f1
lfs f2, 0(r7)
fmadds f1, f0, f1, f2
blr
into this:
_testadd:
lfs f0, 0(r6)
lfs f1, 0(r5)
fadds f0, f1, f0
lfs f1, 0(r4)
lfs f2, 0(r3)
fadds f1, f2, f1
lfs f2, 0(r7)
fmadds f1, f1, f0, f2
blr
llvm-svn: 26680
2006-03-10 13:51:05 +08:00
|
|
|
|
|
|
|
Queue.push(SU);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// ScheduledNode - As nodes are scheduled, we look to see if there are any
|
|
|
|
// successor nodes that have a single unscheduled predecessor. If so, that
|
|
|
|
// single predecessor has a higher priority, since scheduling it will make
|
|
|
|
// the node available.
|
|
|
|
void LatencyPriorityQueue::ScheduledNode(SUnit *SU) {
|
2006-03-12 06:24:20 +08:00
|
|
|
for (std::set<std::pair<SUnit*, bool> >::const_iterator I = SU->Succs.begin(),
|
Teach the latency scheduler some new tricks. In particular, to break ties,
keep track of a sense of "mobility", i.e. how many other nodes scheduling one
node will free up. For something like this:
float testadd(float *X, float *Y, float *Z, float *W, float *V) {
return (*X+*Y)*(*Z+*W)+*V;
}
For example, this makes us schedule *X then *Y, not *X then *Z. The former
allows us to issue the add, the later only lets us issue other loads.
This turns the above code from this:
_testadd:
lfs f0, 0(r3)
lfs f1, 0(r6)
lfs f2, 0(r4)
lfs f3, 0(r5)
fadds f0, f0, f2
fadds f1, f3, f1
lfs f2, 0(r7)
fmadds f1, f0, f1, f2
blr
into this:
_testadd:
lfs f0, 0(r6)
lfs f1, 0(r5)
fadds f0, f1, f0
lfs f1, 0(r4)
lfs f2, 0(r3)
fadds f1, f2, f1
lfs f2, 0(r7)
fmadds f1, f1, f0, f2
blr
llvm-svn: 26680
2006-03-10 13:51:05 +08:00
|
|
|
E = SU->Succs.end(); I != E; ++I)
|
2006-03-12 06:24:20 +08:00
|
|
|
AdjustPriorityOfUnscheduledPreds(I->first);
|
Teach the latency scheduler some new tricks. In particular, to break ties,
keep track of a sense of "mobility", i.e. how many other nodes scheduling one
node will free up. For something like this:
float testadd(float *X, float *Y, float *Z, float *W, float *V) {
return (*X+*Y)*(*Z+*W)+*V;
}
For example, this makes us schedule *X then *Y, not *X then *Z. The former
allows us to issue the add, the later only lets us issue other loads.
This turns the above code from this:
_testadd:
lfs f0, 0(r3)
lfs f1, 0(r6)
lfs f2, 0(r4)
lfs f3, 0(r5)
fadds f0, f0, f2
fadds f1, f3, f1
lfs f2, 0(r7)
fmadds f1, f0, f1, f2
blr
into this:
_testadd:
lfs f0, 0(r6)
lfs f1, 0(r5)
fadds f0, f1, f0
lfs f1, 0(r4)
lfs f2, 0(r3)
fadds f1, f2, f1
lfs f2, 0(r7)
fmadds f1, f1, f0, f2
blr
llvm-svn: 26680
2006-03-10 13:51:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// AdjustPriorityOfUnscheduledPreds - One of the predecessors of SU was just
|
|
|
|
/// scheduled. If SU is not itself available, then there is at least one
|
|
|
|
/// predecessor node that has not been scheduled yet. If SU has exactly ONE
|
|
|
|
/// unscheduled predecessor, we want to increase its priority: it getting
|
|
|
|
/// scheduled will make this node available, so it is better than some other
|
|
|
|
/// node of the same priority that will not make a node available.
|
|
|
|
void LatencyPriorityQueue::AdjustPriorityOfUnscheduledPreds(SUnit *SU) {
|
As a pending queue data structure to keep track of instructions whose
operands have all issued, but whose results are not yet available. This
allows us to compile:
int G;
int test(int A, int B, int* P) {
return (G+A)*(B+1);
}
to:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
addi r4, r4, 1
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
instead of this, which has a stall between the lis/lwz:
_test:
lis r2, ha16(L_G$non_lazy_ptr)
lwz r2, lo16(L_G$non_lazy_ptr)(r2)
addi r4, r4, 1
lwz r2, 0(r2)
add r2, r2, r3
mullw r3, r2, r4
blr
llvm-svn: 26716
2006-03-12 08:38:57 +08:00
|
|
|
if (SU->isPending) return; // All preds scheduled.
|
Teach the latency scheduler some new tricks. In particular, to break ties,
keep track of a sense of "mobility", i.e. how many other nodes scheduling one
node will free up. For something like this:
float testadd(float *X, float *Y, float *Z, float *W, float *V) {
return (*X+*Y)*(*Z+*W)+*V;
}
For example, this makes us schedule *X then *Y, not *X then *Z. The former
allows us to issue the add, the later only lets us issue other loads.
This turns the above code from this:
_testadd:
lfs f0, 0(r3)
lfs f1, 0(r6)
lfs f2, 0(r4)
lfs f3, 0(r5)
fadds f0, f0, f2
fadds f1, f3, f1
lfs f2, 0(r7)
fmadds f1, f0, f1, f2
blr
into this:
_testadd:
lfs f0, 0(r6)
lfs f1, 0(r5)
fadds f0, f1, f0
lfs f1, 0(r4)
lfs f2, 0(r3)
fadds f1, f2, f1
lfs f2, 0(r7)
fmadds f1, f1, f0, f2
blr
llvm-svn: 26680
2006-03-10 13:51:05 +08:00
|
|
|
|
|
|
|
SUnit *OnlyAvailablePred = getSingleUnscheduledPred(SU);
|
|
|
|
if (OnlyAvailablePred == 0 || !OnlyAvailablePred->isAvailable) return;
|
|
|
|
|
|
|
|
// Okay, we found a single predecessor that is available, but not scheduled.
|
|
|
|
// Since it is available, it must be in the priority queue. First remove it.
|
|
|
|
RemoveFromPriorityQueue(OnlyAvailablePred);
|
|
|
|
|
|
|
|
// Reinsert the node into the priority queue, which recomputes its
|
|
|
|
// NumNodesSolelyBlocking value.
|
|
|
|
push(OnlyAvailablePred);
|
|
|
|
}
|
|
|
|
|
2006-03-09 14:35:14 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Public Constructor Functions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2006-01-25 17:14:32 +08:00
|
|
|
llvm::ScheduleDAG* llvm::createBURRListDAGScheduler(SelectionDAG &DAG,
|
|
|
|
MachineBasicBlock *BB) {
|
2006-03-08 12:25:59 +08:00
|
|
|
return new ScheduleDAGList(DAG, BB, DAG.getTarget(), true,
|
2006-05-10 14:16:44 +08:00
|
|
|
new RegReductionPriorityQueue<ls_rr_sort>(),
|
2006-03-08 12:25:59 +08:00
|
|
|
new HazardRecognizer());
|
2006-03-06 05:10:33 +08:00
|
|
|
}
|
|
|
|
|
2006-03-06 08:22:00 +08:00
|
|
|
/// createTDListDAGScheduler - This creates a top-down list scheduler with the
|
|
|
|
/// specified hazard recognizer.
|
|
|
|
ScheduleDAG* llvm::createTDListDAGScheduler(SelectionDAG &DAG,
|
|
|
|
MachineBasicBlock *BB,
|
2006-03-08 12:25:59 +08:00
|
|
|
HazardRecognizer *HR) {
|
2006-03-09 14:35:14 +08:00
|
|
|
return new ScheduleDAGList(DAG, BB, DAG.getTarget(), false,
|
2006-03-09 15:38:27 +08:00
|
|
|
new LatencyPriorityQueue(),
|
2006-03-09 14:35:14 +08:00
|
|
|
HR);
|
2006-01-23 16:26:10 +08:00
|
|
|
}
|