I'm introducing a new machine model to simultaneously allow simple

subtarget CPU descriptions and support new features of
MachineScheduler.

MachineModel has three categories of data:
1) Basic properties for coarse grained instruction cost model.
2) Scheduler Read/Write resources for simple per-opcode and operand cost model (TBD).
3) Instruction itineraties for detailed per-cycle reservation tables.

These will all live side-by-side. Any subtarget can use any
combination of them. Instruction itineraries will not change in the
near term. In the long run, I expect them to only be relevant for
in-order VLIW machines that have complex contraints and require a
precise scheduling/bundling model. Once itineraries are only actively
used by VLIW-ish targets, they could be replaced by something more
appropriate for those targets.

This tablegen backend rewrite sets things up for introducing
MachineModel type #2: per opcode/operand cost model.

llvm-svn: 159891
This commit is contained in:
Andrew Trick 2012-07-07 04:00:00 +00:00
parent 91118a6155
commit 87255e340e
27 changed files with 905 additions and 503 deletions

View File

@ -16,6 +16,7 @@
#ifndef LLVM_MC_MCINSTRITINERARIES_H #ifndef LLVM_MC_MCINSTRITINERARIES_H
#define LLVM_MC_MCINSTRITINERARIES_H #define LLVM_MC_MCINSTRITINERARIES_H
#include "llvm/MC/MCSchedule.h"
#include <algorithm> #include <algorithm>
namespace llvm { namespace llvm {
@ -103,82 +104,13 @@ struct InstrItinerary {
}; };
//===----------------------------------------------------------------------===//
/// Instruction itinerary properties - These properties provide general
/// information about the microarchitecture to the scheduler.
///
struct InstrItineraryProps {
// IssueWidth is the maximum number of instructions that may be scheduled in
// the same per-cycle group.
unsigned IssueWidth;
static const unsigned DefaultIssueWidth = 1;
// MinLatency is the minimum latency between a register write
// followed by a data dependent read. This determines which
// instructions may be scheduled in the same per-cycle group. This
// is distinct from *expected* latency, which determines the likely
// critical path but does not guarantee a pipeline
// hazard. MinLatency can always be overridden by the number of
// InstrStage cycles.
//
// (-1) Standard in-order processor.
// Use InstrItinerary OperandCycles as MinLatency.
// If no OperandCycles exist, then use the cycle of the last InstrStage.
//
// (0) Out-of-order processor, or in-order with bundled dependencies.
// RAW dependencies may be dispatched in the same cycle.
// Optional InstrItinerary OperandCycles provides expected latency.
//
// (>0) In-order processor with variable latencies.
// Use the greater of this value or the cycle of the last InstrStage.
// Optional InstrItinerary OperandCycles provides expected latency.
// TODO: can't yet specify both min and expected latency per operand.
int MinLatency;
static const unsigned DefaultMinLatency = -1;
// LoadLatency is the expected latency of load instructions.
//
// If MinLatency >= 0, this may be overriden for individual load opcodes by
// InstrItinerary OperandCycles.
unsigned LoadLatency;
static const unsigned DefaultLoadLatency = 4;
// HighLatency is the expected latency of "very high latency" operations.
// See TargetInstrInfo::isHighLatencyDef().
// By default, this is set to an arbitrarily high number of cycles
// likely to have some impact on scheduling heuristics.
// If MinLatency >= 0, this may be overriden by InstrItinData OperandCycles.
unsigned HighLatency;
static const unsigned DefaultHighLatency = 10;
// Default's must be specified as static const literals so that tablegenerated
// target code can use it in static initializers. The defaults need to be
// initialized in this default ctor because some clients directly instantiate
// InstrItineraryData instead of using a generated itinerary.
InstrItineraryProps(): IssueWidth(DefaultMinLatency),
MinLatency(DefaultMinLatency),
LoadLatency(DefaultLoadLatency),
HighLatency(DefaultHighLatency) {}
InstrItineraryProps(unsigned iw, int ml, unsigned ll, unsigned hl):
IssueWidth(iw), MinLatency(ml), LoadLatency(ll), HighLatency(hl) {}
};
//===----------------------------------------------------------------------===//
/// Encapsulate all subtarget specific information for scheduling for use with
/// SubtargetInfoKV.
struct InstrItinerarySubtargetValue {
const InstrItineraryProps *Props;
const InstrItinerary *Itineraries;
};
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
/// Instruction itinerary Data - Itinerary data supplied by a subtarget to be /// Instruction itinerary Data - Itinerary data supplied by a subtarget to be
/// used by a target. /// used by a target.
/// ///
class InstrItineraryData { class InstrItineraryData {
public: public:
InstrItineraryProps Props; const MCSchedModel *SchedModel; ///< Basic machine properties.
const InstrStage *Stages; ///< Array of stages selected const InstrStage *Stages; ///< Array of stages selected
const unsigned *OperandCycles; ///< Array of operand cycles selected const unsigned *OperandCycles; ///< Array of operand cycles selected
const unsigned *Forwardings; ///< Array of pipeline forwarding pathes const unsigned *Forwardings; ///< Array of pipeline forwarding pathes
@ -186,13 +118,14 @@ public:
/// Ctors. /// Ctors.
/// ///
InstrItineraryData() : Stages(0), OperandCycles(0), Forwardings(0), InstrItineraryData() : SchedModel(&MCSchedModel::DefaultSchedModel),
Itineraries(0) {} Stages(0), OperandCycles(0),
Forwardings(0), Itineraries(0) {}
InstrItineraryData(const InstrItineraryProps *P, const InstrStage *S, InstrItineraryData(const MCSchedModel *SM, const InstrStage *S,
const unsigned *OS, const unsigned *F, const unsigned *OS, const unsigned *F)
const InstrItinerary *I) : SchedModel(SM), Stages(S), OperandCycles(OS), Forwardings(F),
: Props(*P), Stages(S), OperandCycles(OS), Forwardings(F), Itineraries(I) {} Itineraries(SchedModel->InstrItineraries) {}
/// isEmpty - Returns true if there are no itineraries. /// isEmpty - Returns true if there are no itineraries.
/// ///
@ -232,13 +165,9 @@ public:
/// then it defaults to one cycle. /// then it defaults to one cycle.
unsigned getStageLatency(unsigned ItinClassIndx) const { unsigned getStageLatency(unsigned ItinClassIndx) const {
// If the target doesn't provide itinerary information, use a simple // If the target doesn't provide itinerary information, use a simple
// non-zero default value for all instructions. Some target's provide a // non-zero default value for all instructions.
// dummy (Generic) itinerary which should be handled as if it's itinerary is if (isEmpty())
// empty. We identify this by looking for a reference to stage zero (invalid return SchedModel->MinLatency < 0 ? 1 : SchedModel->MinLatency;
// stage). This is different from beginStage == endStage != 0, which could
// be used for zero-latency pseudo ops.
if (isEmpty() || Itineraries[ItinClassIndx].FirstStage == 0)
return (Props.MinLatency < 0) ? 1 : Props.MinLatency;
// Calculate the maximum completion time for any stage. // Calculate the maximum completion time for any stage.
unsigned Latency = 0, StartCycle = 0; unsigned Latency = 0, StartCycle = 0;

View File

@ -0,0 +1,108 @@
//===-- llvm/MC/MCSchedule.h - Scheduling -----------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the classes used to describe a subtarget's machine model
// for scheduling and other instruction cost heuristics.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_MC_MCSCHEDMODEL_H
#define LLVM_MC_MCSCHEDMODEL_H
#include "llvm/Support/DataTypes.h"
namespace llvm {
struct InstrItinerary;
/// Machine model for scheduling, bundling, and heuristics.
///
/// The machine model directly provides basic information about the
/// microarchitecture to the scheduler in the form of properties. It also
/// optionally refers to scheduler resources tables and itinerary
/// tables. Scheduler resources tables model the latency and cost for each
/// instruction type. Itinerary tables are an independant mechanism that
/// provides a detailed reservation table describing each cycle of instruction
/// execution. Subtargets may define any or all of the above categories of data
/// depending on the type of CPU and selected scheduler.
class MCSchedModel {
public:
static MCSchedModel DefaultSchedModel; // For unknown processors.
// IssueWidth is the maximum number of instructions that may be scheduled in
// the same per-cycle group.
unsigned IssueWidth;
static const unsigned DefaultIssueWidth = 1;
// MinLatency is the minimum latency between a register write
// followed by a data dependent read. This determines which
// instructions may be scheduled in the same per-cycle group. This
// is distinct from *expected* latency, which determines the likely
// critical path but does not guarantee a pipeline
// hazard. MinLatency can always be overridden by the number of
// InstrStage cycles.
//
// (-1) Standard in-order processor.
// Use InstrItinerary OperandCycles as MinLatency.
// If no OperandCycles exist, then use the cycle of the last InstrStage.
//
// (0) Out-of-order processor, or in-order with bundled dependencies.
// RAW dependencies may be dispatched in the same cycle.
// Optional InstrItinerary OperandCycles provides expected latency.
//
// (>0) In-order processor with variable latencies.
// Use the greater of this value or the cycle of the last InstrStage.
// Optional InstrItinerary OperandCycles provides expected latency.
// TODO: can't yet specify both min and expected latency per operand.
int MinLatency;
static const unsigned DefaultMinLatency = -1;
// LoadLatency is the expected latency of load instructions.
//
// If MinLatency >= 0, this may be overriden for individual load opcodes by
// InstrItinerary OperandCycles.
unsigned LoadLatency;
static const unsigned DefaultLoadLatency = 4;
// HighLatency is the expected latency of "very high latency" operations.
// See TargetInstrInfo::isHighLatencyDef().
// By default, this is set to an arbitrarily high number of cycles
// likely to have some impact on scheduling heuristics.
// If MinLatency >= 0, this may be overriden by InstrItinData OperandCycles.
unsigned HighLatency;
static const unsigned DefaultHighLatency = 10;
private:
// TODO: Add a reference to proc resource types and sched resource tables.
// Instruction itinerary tables used by InstrItineraryData.
friend class InstrItineraryData;
const InstrItinerary *InstrItineraries;
public:
// Default's must be specified as static const literals so that tablegenerated
// target code can use it in static initializers. The defaults need to be
// initialized in this default ctor because some clients directly instantiate
// MCSchedModel instead of using a generated itinerary.
MCSchedModel(): IssueWidth(DefaultMinLatency),
MinLatency(DefaultMinLatency),
LoadLatency(DefaultLoadLatency),
HighLatency(DefaultHighLatency),
InstrItineraries(0) {}
// Table-gen driven ctor.
MCSchedModel(unsigned iw, int ml, unsigned ll, unsigned hl,
const InstrItinerary *ii):
IssueWidth(iw), MinLatency(ml), LoadLatency(ll), HighLatency(hl),
InstrItineraries(ii){}
};
} // End llvm namespace
#endif

View File

@ -30,9 +30,9 @@ class MCSubtargetInfo {
std::string TargetTriple; // Target triple std::string TargetTriple; // Target triple
const SubtargetFeatureKV *ProcFeatures; // Processor feature list const SubtargetFeatureKV *ProcFeatures; // Processor feature list
const SubtargetFeatureKV *ProcDesc; // Processor descriptions const SubtargetFeatureKV *ProcDesc; // Processor descriptions
const SubtargetInfoKV *ProcItins; // Scheduling itineraries const SubtargetInfoKV *ProcSchedModel; // Scheduler machine model
const InstrStage *Stages; // Instruction stages const InstrStage *Stages; // Instruction itinerary stages
const unsigned *OperandCycles; // Operand cycles const unsigned *OperandCycles; // Itinerary operand cycles
const unsigned *ForwardingPaths; // Forwarding paths const unsigned *ForwardingPaths; // Forwarding paths
unsigned NumFeatures; // Number of processor features unsigned NumFeatures; // Number of processor features
unsigned NumProcs; // Number of processors unsigned NumProcs; // Number of processors
@ -42,7 +42,8 @@ public:
void InitMCSubtargetInfo(StringRef TT, StringRef CPU, StringRef FS, void InitMCSubtargetInfo(StringRef TT, StringRef CPU, StringRef FS,
const SubtargetFeatureKV *PF, const SubtargetFeatureKV *PF,
const SubtargetFeatureKV *PD, const SubtargetFeatureKV *PD,
const SubtargetInfoKV *PI, const InstrStage *IS, const SubtargetInfoKV *ProcSched,
const InstrStage *IS,
const unsigned *OC, const unsigned *FP, const unsigned *OC, const unsigned *FP,
unsigned NF, unsigned NP); unsigned NF, unsigned NP);
@ -69,6 +70,10 @@ public:
/// bits. This version will also change all implied bits. /// bits. This version will also change all implied bits.
uint64_t ToggleFeature(StringRef FS); uint64_t ToggleFeature(StringRef FS);
/// getSchedModelForCPU - Get the machine model of a CPU.
///
MCSchedModel *getSchedModelForCPU(StringRef CPU) const;
/// getInstrItineraryForCPU - Get scheduling itinerary of a CPU. /// getInstrItineraryForCPU - Get scheduling itinerary of a CPU.
/// ///
InstrItineraryData getInstrItineraryForCPU(StringRef CPU) const; InstrItineraryData getInstrItineraryForCPU(StringRef CPU) const;

View File

@ -933,6 +933,10 @@ class Processor<string n, ProcessorItineraries pi, list<SubtargetFeature> f> {
// //
string Name = n; string Name = n;
// SchedModel - The machine model for scheduling and instruction cost.
//
SchedMachineModel SchedModel = NoSchedModel;
// ProcItin - The scheduling information for the target processor. // ProcItin - The scheduling information for the target processor.
// //
ProcessorItineraries ProcItin = pi; ProcessorItineraries ProcItin = pi;
@ -941,6 +945,14 @@ class Processor<string n, ProcessorItineraries pi, list<SubtargetFeature> f> {
list<SubtargetFeature> Features = f; list<SubtargetFeature> Features = f;
} }
// ProcessorModel allows subtargets to specify the more general
// SchedMachineModel instead if a ProcessorItinerary. Subtargets will
// gradually move to this newer form.
class ProcessorModel<string n, SchedMachineModel m, list<SubtargetFeature> f>
: Processor<n, NoItineraries, f> {
let SchedModel = m;
}
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// Pull in the common support for calling conventions. // Pull in the common support for calling conventions.
// //

View File

@ -0,0 +1,136 @@
//===- TargetItinerary.td - Target Itinierary Description --*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the target-independent scheduling interfaces
// which should be implemented by each target that uses instruction
// itineraries for scheduling. Itineraries are details reservation
// tables for each instruction class. They are most appropriate for
// in-order machine with complicated scheduling or bundling constraints.
//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// Processor functional unit - These values represent the function units
// available across all chip sets for the target. Eg., IntUnit, FPUnit, ...
// These may be independent values for each chip set or may be shared across
// all chip sets of the target. Each functional unit is treated as a resource
// during scheduling and has an affect instruction order based on availability
// during a time interval.
//
class FuncUnit;
//===----------------------------------------------------------------------===//
// Pipeline bypass / forwarding - These values specifies the symbolic names of
// pipeline bypasses which can be used to forward results of instructions
// that are forwarded to uses.
class Bypass;
def NoBypass : Bypass;
class ReservationKind<bits<1> val> {
int Value = val;
}
def Required : ReservationKind<0>;
def Reserved : ReservationKind<1>;
//===----------------------------------------------------------------------===//
// Instruction stage - These values represent a non-pipelined step in
// the execution of an instruction. Cycles represents the number of
// discrete time slots needed to complete the stage. Units represent
// the choice of functional units that can be used to complete the
// stage. Eg. IntUnit1, IntUnit2. NextCycles indicates how many
// cycles should elapse from the start of this stage to the start of
// the next stage in the itinerary. For example:
//
// A stage is specified in one of two ways:
//
// InstrStage<1, [FU_x, FU_y]> - TimeInc defaults to Cycles
// InstrStage<1, [FU_x, FU_y], 0> - TimeInc explicit
//
class InstrStage<int cycles, list<FuncUnit> units,
int timeinc = -1,
ReservationKind kind = Required> {
int Cycles = cycles; // length of stage in machine cycles
list<FuncUnit> Units = units; // choice of functional units
int TimeInc = timeinc; // cycles till start of next stage
int Kind = kind.Value; // kind of FU reservation
}
//===----------------------------------------------------------------------===//
// Instruction itinerary - An itinerary represents a sequential series of steps
// required to complete an instruction. Itineraries are represented as lists of
// instruction stages.
//
//===----------------------------------------------------------------------===//
// Instruction itinerary classes - These values represent 'named' instruction
// itinerary. Using named itineraries simplifies managing groups of
// instructions across chip sets. An instruction uses the same itinerary class
// across all chip sets. Thus a new chip set can be added without modifying
// instruction information.
//
class InstrItinClass;
def NoItinerary : InstrItinClass;
//===----------------------------------------------------------------------===//
// Instruction itinerary data - These values provide a runtime map of an
// instruction itinerary class (name) to its itinerary data.
//
// NumMicroOps represents the number of micro-operations that each instruction
// in the class are decoded to. If the number is zero, then it means the
// instruction can decode into variable number of micro-ops and it must be
// determined dynamically. This directly relates to the itineraries
// global IssueWidth property, which constrains the number of microops
// that can issue per cycle.
//
// OperandCycles are optional "cycle counts". They specify the cycle after
// instruction issue the values which correspond to specific operand indices
// are defined or read. Bypasses are optional "pipeline forwarding pathes", if
// a def by an instruction is available on a specific bypass and the use can
// read from the same bypass, then the operand use latency is reduced by one.
//
// InstrItinData<IIC_iLoad_i , [InstrStage<1, [A9_Pipe1]>,
// InstrStage<1, [A9_AGU]>],
// [3, 1], [A9_LdBypass]>,
// InstrItinData<IIC_iMVNr , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>],
// [1, 1], [NoBypass, A9_LdBypass]>,
//
// In this example, the instruction of IIC_iLoadi reads its input on cycle 1
// (after issue) and the result of the load is available on cycle 3. The result
// is available via forwarding path A9_LdBypass. If it's used by the first
// source operand of instructions of IIC_iMVNr class, then the operand latency
// is reduced by 1.
class InstrItinData<InstrItinClass Class, list<InstrStage> stages,
list<int> operandcycles = [],
list<Bypass> bypasses = [], int uops = 1> {
InstrItinClass TheClass = Class;
int NumMicroOps = uops;
list<InstrStage> Stages = stages;
list<int> OperandCycles = operandcycles;
list<Bypass> Bypasses = bypasses;
}
//===----------------------------------------------------------------------===//
// Processor itineraries - These values represent the set of all itinerary
// classes for a given chip set.
//
// Set property values to -1 to use the default.
// See InstrItineraryProps for comments and defaults.
class ProcessorItineraries<list<FuncUnit> fu, list<Bypass> bp,
list<InstrItinData> iid> {
list<FuncUnit> FU = fu;
list<Bypass> BP = bp;
list<InstrItinData> IID = iid;
}
// NoItineraries - A marker that can be used by processors without schedule
// info. Subtargets using NoItineraries can bypass the scheduler's
// expensive HazardRecognizer because no reservation table is needed.
def NoItineraries : ProcessorItineraries<[], [], []>;

View File

@ -12,141 +12,29 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===// include "llvm/Target/TargetItinerary.td"
// Processor functional unit - These values represent the function units
// available across all chip sets for the target. Eg., IntUnit, FPUnit, ...
// These may be independent values for each chip set or may be shared across
// all chip sets of the target. Each functional unit is treated as a resource
// during scheduling and has an affect instruction order based on availability
// during a time interval.
//
class FuncUnit;
//===----------------------------------------------------------------------===// // The SchedMachineModel is defined by subtargets for three categories of data:
// Pipeline bypass / forwarding - These values specifies the symbolic names of // 1) Basic properties for coarse grained instruction cost model.
// pipeline bypasses which can be used to forward results of instructions // 2) Scheduler Read/Write resources for simple per-opcode cost model.
// that are forwarded to uses. // 3) Instruction itineraties for detailed reservation tables.
class Bypass;
def NoBypass : Bypass;
class ReservationKind<bits<1> val> {
int Value = val;
}
def Required : ReservationKind<0>;
def Reserved : ReservationKind<1>;
//===----------------------------------------------------------------------===//
// Instruction stage - These values represent a non-pipelined step in
// the execution of an instruction. Cycles represents the number of
// discrete time slots needed to complete the stage. Units represent
// the choice of functional units that can be used to complete the
// stage. Eg. IntUnit1, IntUnit2. NextCycles indicates how many
// cycles should elapse from the start of this stage to the start of
// the next stage in the itinerary. For example:
// //
// A stage is specified in one of two ways: // Default values for basic properties are defined in MCSchedModel. "-1"
// // indicates that the property is not overriden by the target description.
// InstrStage<1, [FU_x, FU_y]> - TimeInc defaults to Cycles class SchedMachineModel {
// InstrStage<1, [FU_x, FU_y], 0> - TimeInc explicit
//
class InstrStage<int cycles, list<FuncUnit> units,
int timeinc = -1,
ReservationKind kind = Required> {
int Cycles = cycles; // length of stage in machine cycles
list<FuncUnit> Units = units; // choice of functional units
int TimeInc = timeinc; // cycles till start of next stage
int Kind = kind.Value; // kind of FU reservation
}
//===----------------------------------------------------------------------===//
// Instruction itinerary - An itinerary represents a sequential series of steps
// required to complete an instruction. Itineraries are represented as lists of
// instruction stages.
//
//===----------------------------------------------------------------------===//
// Instruction itinerary classes - These values represent 'named' instruction
// itinerary. Using named itineraries simplifies managing groups of
// instructions across chip sets. An instruction uses the same itinerary class
// across all chip sets. Thus a new chip set can be added without modifying
// instruction information.
//
class InstrItinClass;
def NoItinerary : InstrItinClass;
//===----------------------------------------------------------------------===//
// Instruction itinerary data - These values provide a runtime map of an
// instruction itinerary class (name) to its itinerary data.
//
// NumMicroOps represents the number of micro-operations that each instruction
// in the class are decoded to. If the number is zero, then it means the
// instruction can decode into variable number of micro-ops and it must be
// determined dynamically. This directly relates to the itineraries
// global IssueWidth property, which constrains the number of microops
// that can issue per cycle.
//
// OperandCycles are optional "cycle counts". They specify the cycle after
// instruction issue the values which correspond to specific operand indices
// are defined or read. Bypasses are optional "pipeline forwarding pathes", if
// a def by an instruction is available on a specific bypass and the use can
// read from the same bypass, then the operand use latency is reduced by one.
//
// InstrItinData<IIC_iLoad_i , [InstrStage<1, [A9_Pipe1]>,
// InstrStage<1, [A9_AGU]>],
// [3, 1], [A9_LdBypass]>,
// InstrItinData<IIC_iMVNr , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>],
// [1, 1], [NoBypass, A9_LdBypass]>,
//
// In this example, the instruction of IIC_iLoadi reads its input on cycle 1
// (after issue) and the result of the load is available on cycle 3. The result
// is available via forwarding path A9_LdBypass. If it's used by the first
// source operand of instructions of IIC_iMVNr class, then the operand latency
// is reduced by 1.
class InstrItinData<InstrItinClass Class, list<InstrStage> stages,
list<int> operandcycles = [],
list<Bypass> bypasses = [], int uops = 1> {
InstrItinClass TheClass = Class;
int NumMicroOps = uops;
list<InstrStage> Stages = stages;
list<int> OperandCycles = operandcycles;
list<Bypass> Bypasses = bypasses;
}
//===----------------------------------------------------------------------===//
// Processor itineraries - These values represent the set of all itinerary
// classes for a given chip set.
//
// Set property values to -1 to use the default.
// See InstrItineraryProps for comments and defaults.
class ProcessorItineraries<list<FuncUnit> fu, list<Bypass> bp,
list<InstrItinData> iid> {
int IssueWidth = -1; // Max instructions that may be scheduled per cycle. int IssueWidth = -1; // Max instructions that may be scheduled per cycle.
int MinLatency = -1; // Determines which instrucions are allowed in a group. int MinLatency = -1; // Determines which instrucions are allowed in a group.
// (-1) inorder (0) ooo, (1): inorder +var latencies. // (-1) inorder (0) ooo, (1): inorder +var latencies.
int LoadLatency = -1; // Cycles for loads to access the cache. int LoadLatency = -1; // Cycles for loads to access the cache.
int HighLatency = -1; // Approximation of cycles for "high latency" ops. int HighLatency = -1; // Approximation of cycles for "high latency" ops.
list<FuncUnit> FU = fu; ProcessorItineraries Itineraries = NoItineraries;
list<Bypass> BP = bp;
list<InstrItinData> IID = iid; bit NoModel = 0; // Special tag to indicate missing machine model.
} }
// NoItineraries - A marker that can be used by processors without schedule def NoSchedModel : SchedMachineModel {
// info. Subtargets using NoItineraries can bypass the scheduler's let NoModel = 1;
// expensive HazardRecognizer because no reservation table is needed.
def NoItineraries : ProcessorItineraries<[], [], []>;
// Processor itineraries with non-unit issue width. This allows issue
// width to be explicity specified at the beginning of the itinerary.
class MultiIssueItineraries<int issuewidth, int minlatency,
int loadlatency, int highlatency,
list<FuncUnit> fu, list<Bypass> bp,
list<InstrItinData> iid>
: ProcessorItineraries<fu, bp, iid> {
let IssueWidth = issuewidth;
let MinLatency = minlatency;
let LoadLatency = loadlatency;
let HighLatency = highlatency;
} }
// TODO: Define classes for processor and scheduler resources.

View File

@ -403,7 +403,8 @@ public:
/// getIssueWidth - Return the max instructions per scheduling group. /// getIssueWidth - Return the max instructions per scheduling group.
unsigned getIssueWidth() const { unsigned getIssueWidth() const {
return InstrItins ? InstrItins->Props.IssueWidth : 1; return (InstrItins && InstrItins->SchedModel)
? InstrItins->SchedModel->IssueWidth : 1;
} }
/// getNumMicroOps - Return the number of issue slots required for this MI. /// getNumMicroOps - Return the number of issue slots required for this MI.

View File

@ -72,10 +72,12 @@ ScoreboardHazardRecognizer(const InstrItineraryData *II,
ReservedScoreboard.reset(ScoreboardDepth); ReservedScoreboard.reset(ScoreboardDepth);
RequiredScoreboard.reset(ScoreboardDepth); RequiredScoreboard.reset(ScoreboardDepth);
// If MaxLookAhead is not set above, then we are not enabled.
if (!isEnabled()) if (!isEnabled())
DEBUG(dbgs() << "Disabled scoreboard hazard recognizer\n"); DEBUG(dbgs() << "Disabled scoreboard hazard recognizer\n");
else { else {
IssueWidth = ItinData->Props.IssueWidth; // A nonempty itinerary must have a SchedModel.
IssueWidth = ItinData->SchedModel->IssueWidth;
DEBUG(dbgs() << "Using scoreboard hazard recognizer: Depth = " DEBUG(dbgs() << "Using scoreboard hazard recognizer: Depth = "
<< ScoreboardDepth << '\n'); << ScoreboardDepth << '\n');
} }

View File

@ -318,7 +318,7 @@ void ResourcePriorityQueue::reserveResources(SUnit *SU) {
// If packet is now full, reset the state so in the next cycle // If packet is now full, reset the state so in the next cycle
// we start fresh. // we start fresh.
if (Packet.size() >= InstrItins->Props.IssueWidth) { if (Packet.size() >= InstrItins->SchedModel->IssueWidth) {
ResourcesModel->clearResources(); ResourcesModel->clearResources();
Packet.clear(); Packet.clear();
} }

View File

@ -573,9 +573,9 @@ TargetInstrInfoImpl::getNumMicroOps(const InstrItineraryData *ItinData,
unsigned TargetInstrInfo::defaultDefLatency(const InstrItineraryData *ItinData, unsigned TargetInstrInfo::defaultDefLatency(const InstrItineraryData *ItinData,
const MachineInstr *DefMI) const { const MachineInstr *DefMI) const {
if (DefMI->mayLoad()) if (DefMI->mayLoad())
return ItinData->Props.LoadLatency; return ItinData->SchedModel->LoadLatency;
if (isHighLatencyDef(DefMI->getOpcode())) if (isHighLatencyDef(DefMI->getOpcode()))
return ItinData->Props.HighLatency; return ItinData->SchedModel->HighLatency;
return 1; return 1;
} }
@ -629,7 +629,7 @@ static int computeDefOperandLatency(
if (FindMin) { if (FindMin) {
// If MinLatency is valid, call getInstrLatency. This uses Stage latency if // If MinLatency is valid, call getInstrLatency. This uses Stage latency if
// it exists before defaulting to MinLatency. // it exists before defaulting to MinLatency.
if (ItinData->Props.MinLatency >= 0) if (ItinData->SchedModel->MinLatency >= 0)
return TII->getInstrLatency(ItinData, DefMI); return TII->getInstrLatency(ItinData, DefMI);
// If MinLatency is invalid, OperandLatency is interpreted as MinLatency. // If MinLatency is invalid, OperandLatency is interpreted as MinLatency.

View File

@ -17,11 +17,13 @@
using namespace llvm; using namespace llvm;
MCSchedModel MCSchedModel::DefaultSchedModel; // For unknown processors.
void void
MCSubtargetInfo::InitMCSubtargetInfo(StringRef TT, StringRef CPU, StringRef FS, MCSubtargetInfo::InitMCSubtargetInfo(StringRef TT, StringRef CPU, StringRef FS,
const SubtargetFeatureKV *PF, const SubtargetFeatureKV *PF,
const SubtargetFeatureKV *PD, const SubtargetFeatureKV *PD,
const SubtargetInfoKV *PI, const SubtargetInfoKV *ProcSched,
const InstrStage *IS, const InstrStage *IS,
const unsigned *OC, const unsigned *OC,
const unsigned *FP, const unsigned *FP,
@ -29,7 +31,7 @@ MCSubtargetInfo::InitMCSubtargetInfo(StringRef TT, StringRef CPU, StringRef FS,
TargetTriple = TT; TargetTriple = TT;
ProcFeatures = PF; ProcFeatures = PF;
ProcDesc = PD; ProcDesc = PD;
ProcItins = PI; ProcSchedModel = ProcSched;
Stages = IS; Stages = IS;
OperandCycles = OC; OperandCycles = OC;
ForwardingPaths = FP; ForwardingPaths = FP;
@ -68,14 +70,14 @@ uint64_t MCSubtargetInfo::ToggleFeature(StringRef FS) {
} }
InstrItineraryData MCSchedModel *
MCSubtargetInfo::getInstrItineraryForCPU(StringRef CPU) const { MCSubtargetInfo::getSchedModelForCPU(StringRef CPU) const {
assert(ProcItins && "Instruction itineraries information not available!"); assert(ProcSchedModel && "Processor machine model not available!");
#ifndef NDEBUG #ifndef NDEBUG
for (size_t i = 1; i < NumProcs; i++) { for (size_t i = 1; i < NumProcs; i++) {
assert(strcmp(ProcItins[i - 1].Key, ProcItins[i].Key) < 0 && assert(strcmp(ProcSchedModel[i - 1].Key, ProcSchedModel[i].Key) < 0 &&
"Itineraries table is not sorted"); "Processor machine model table is not sorted");
} }
#endif #endif
@ -83,16 +85,19 @@ MCSubtargetInfo::getInstrItineraryForCPU(StringRef CPU) const {
SubtargetInfoKV KV; SubtargetInfoKV KV;
KV.Key = CPU.data(); KV.Key = CPU.data();
const SubtargetInfoKV *Found = const SubtargetInfoKV *Found =
std::lower_bound(ProcItins, ProcItins+NumProcs, KV); std::lower_bound(ProcSchedModel, ProcSchedModel+NumProcs, KV);
if (Found == ProcItins+NumProcs || StringRef(Found->Key) != CPU) { if (Found == ProcSchedModel+NumProcs || StringRef(Found->Key) != CPU) {
errs() << "'" << CPU errs() << "'" << CPU
<< "' is not a recognized processor for this target" << "' is not a recognized processor for this target"
<< " (ignoring processor)\n"; << " (ignoring processor)\n";
return InstrItineraryData(); return &MCSchedModel::DefaultSchedModel;
}
assert(Found->Value && "Missing processor SchedModel value");
return (MCSchedModel *)Found->Value;
} }
InstrItinerarySubtargetValue *V = InstrItineraryData
(InstrItinerarySubtargetValue *)Found->Value; MCSubtargetInfo::getInstrItineraryForCPU(StringRef CPU) const {
return InstrItineraryData(V->Props, Stages, OperandCycles, ForwardingPaths, MCSchedModel *SchedModel = getSchedModelForCPU(CPU);
V->Itineraries); return InstrItineraryData(SchedModel, Stages, OperandCycles, ForwardingPaths);
} }

View File

@ -204,13 +204,13 @@ def : Processor<"arm1156t2f-s", ARMV6Itineraries, [HasV6T2Ops, FeatureVFP2,
FeatureDSPThumb2]>; FeatureDSPThumb2]>;
// V7a Processors. // V7a Processors.
def : Processor<"cortex-a8", CortexA8Itineraries, def : ProcessorModel<"cortex-a8", CortexA8Model,
[ProcA8, HasV7Ops, FeatureNEON, FeatureDB, [ProcA8, HasV7Ops, FeatureNEON, FeatureDB,
FeatureDSPThumb2, FeatureHasRAS]>; FeatureDSPThumb2, FeatureHasRAS]>;
def : Processor<"cortex-a9", CortexA9Itineraries, def : ProcessorModel<"cortex-a9", CortexA9Model,
[ProcA9, HasV7Ops, FeatureNEON, FeatureDB, [ProcA9, HasV7Ops, FeatureNEON, FeatureDB,
FeatureDSPThumb2, FeatureHasRAS]>; FeatureDSPThumb2, FeatureHasRAS]>;
def : Processor<"cortex-a9-mp", CortexA9Itineraries, def : ProcessorModel<"cortex-a9-mp", CortexA9Model,
[ProcA9, HasV7Ops, FeatureNEON, FeatureDB, [ProcA9, HasV7Ops, FeatureNEON, FeatureDB,
FeatureDSPThumb2, FeatureMP, FeatureDSPThumb2, FeatureMP,
FeatureHasRAS]>; FeatureHasRAS]>;

View File

@ -22,11 +22,7 @@ def A8_NLSPipe : FuncUnit; // NEON LS pipe
// //
// Dual issue pipeline represented by A8_Pipe0 | A8_Pipe1 // Dual issue pipeline represented by A8_Pipe0 | A8_Pipe1
// //
def CortexA8Itineraries : MultiIssueItineraries< def CortexA8Itineraries : ProcessorItineraries<
2, // IssueWidth
-1, // MinLatency - OperandCycles are interpreted as MinLatency.
2, // LoadLatency - overriden by OperandCycles.
10, // HighLatency - currently unused.
[A8_Pipe0, A8_Pipe1, A8_LSPipe, A8_NPipe, A8_NLSPipe], [A8_Pipe0, A8_Pipe1, A8_LSPipe, A8_NPipe, A8_NLSPipe],
[], [ [], [
// Two fully-pipelined integer ALU pipelines // Two fully-pipelined integer ALU pipelines
@ -1061,3 +1057,18 @@ def CortexA8Itineraries : MultiIssueItineraries<
InstrStage<1, [A8_NPipe], 0>, InstrStage<1, [A8_NPipe], 0>,
InstrStage<2, [A8_NLSPipe]>], [4, 1, 2, 2, 3, 3, 1]> InstrStage<2, [A8_NLSPipe]>], [4, 1, 2, 2, 3, 3, 1]>
]>; ]>;
// ===---------------------------------------------------------------------===//
// This following definitions describe the simple machine model which
// will replace itineraries.
// Cortex-A8 machine model for scheduling and other instruction cost heuristics.
def CortexA8Model : SchedMachineModel {
let IssueWidth = 2; // 2 micro-ops are dispatched per cycle.
let MinLatency = -1; // OperandCycles are interpreted as MinLatency.
let LoadLatency = 2; // Optimistic load latency assuming bypass.
// This is overriden by OperandCycles if the
// Itineraries are queried instead.
let Itineraries = CortexA8Itineraries;
}

View File

@ -11,6 +11,10 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// ===---------------------------------------------------------------------===//
// This section contains legacy support for itineraries. This is
// required until SD and PostRA schedulers are replaced by MachineScheduler.
// //
// Ad-hoc scheduling information derived from pretty vague "Cortex-A9 Technical // Ad-hoc scheduling information derived from pretty vague "Cortex-A9 Technical
// Reference Manual". // Reference Manual".
@ -31,11 +35,7 @@ def A9_DRegsN : FuncUnit; // FP register set, NEON side
// Bypasses // Bypasses
def A9_LdBypass : Bypass; def A9_LdBypass : Bypass;
def CortexA9Itineraries : MultiIssueItineraries< def CortexA9Itineraries : ProcessorItineraries<
2, // IssueWidth - FIXME: A9_Issue0, A9_Issue1 are now redundant.
0, // MinLatency - FIXME: for misched, remove InstrStage for OOO operations.
2, // LoadLatency - optimistic, assumes bypass, overriden by OperandCycles.
10, // HighLatency - currently unused.
[A9_Issue0, A9_Issue1, A9_Branch, A9_ALU0, A9_ALU1, A9_AGU, A9_NPipe, A9_MUX0, [A9_Issue0, A9_Issue1, A9_Branch, A9_ALU0, A9_ALU1, A9_AGU, A9_NPipe, A9_MUX0,
A9_LSUnit, A9_DRegsVFP, A9_DRegsN], A9_LSUnit, A9_DRegsVFP, A9_DRegsN],
[A9_LdBypass], [ [A9_LdBypass], [
@ -1874,3 +1874,21 @@ def CortexA9Itineraries : MultiIssueItineraries<
InstrStage<2, [A9_NPipe]>], InstrStage<2, [A9_NPipe]>],
[4, 1, 2, 2, 3, 3, 1]> [4, 1, 2, 2, 3, 3, 1]>
]>; ]>;
// ===---------------------------------------------------------------------===//
// This following definitions describe the simple machine model which
// will replace itineraries.
// Cortex-A9 machine model for scheduling and other instruction cost heuristics.
def CortexA9Model : SchedMachineModel {
let IssueWidth = 2; // 2 micro-ops are dispatched per cycle.
let MinLatency = 0; // Data dependencies are allowed within dispatch groups.
let LoadLatency = 2; // Optimistic load latency assuming bypass.
// This is overriden by OperandCycles if the
// Itineraries are queried instead.
let Itineraries = CortexA9Itineraries;
}
// TODO: Add Cortex-A9 processor and scheduler resources.

View File

@ -47,14 +47,14 @@ def HexagonInstrInfo : InstrInfo;
// Hexagon processors supported. // Hexagon processors supported.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
class Proc<string Name, ProcessorItineraries Itin, class Proc<string Name, SchedMachineModel Model,
list<SubtargetFeature> Features> list<SubtargetFeature> Features>
: Processor<Name, Itin, Features>; : ProcessorModel<Name, Model, Features>;
def : Proc<"hexagonv2", HexagonItineraries, [ArchV2]>; def : Proc<"hexagonv2", HexagonModel, [ArchV2]>;
def : Proc<"hexagonv3", HexagonItineraries, [ArchV2, ArchV3]>; def : Proc<"hexagonv3", HexagonModel, [ArchV2, ArchV3]>;
def : Proc<"hexagonv4", HexagonItinerariesV4, [ArchV2, ArchV3, ArchV4]>; def : Proc<"hexagonv4", HexagonModelV4, [ArchV2, ArchV3, ArchV4]>;
def : Proc<"hexagonv5", HexagonItinerariesV4, [ArchV2, ArchV3, ArchV4, ArchV5]>; def : Proc<"hexagonv5", HexagonModelV4, [ArchV2, ArchV3, ArchV4, ArchV5]>;
// Hexagon Uses the MC printer for assembler output, so make sure the TableGen // Hexagon Uses the MC printer for assembler output, so make sure the TableGen

View File

@ -41,9 +41,12 @@ def HexagonItineraries :
InstrItinData<SYS , [InstrStage<1, [LSUNIT]>]>, InstrItinData<SYS , [InstrStage<1, [LSUNIT]>]>,
InstrItinData<MARKER , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>, InstrItinData<MARKER , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>,
InstrItinData<PSEUDO , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]> InstrItinData<PSEUDO , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>
]> { ]>;
def HexagonModel : SchedMachineModel {
// Max issue per cycle == bundle width. // Max issue per cycle == bundle width.
let IssueWidth = 4; let IssueWidth = 4;
let Itineraries = HexagonItineraries;
} }
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//

View File

@ -52,12 +52,14 @@ def HexagonItinerariesV4 :
InstrItinData<MARKER , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>, InstrItinData<MARKER , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
InstrItinData<PREFIX , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>, InstrItinData<PREFIX , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]> InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>
]> { ]>;
def HexagonModelV4 : SchedMachineModel {
// Max issue per cycle == bundle width. // Max issue per cycle == bundle width.
let IssueWidth = 4; let IssueWidth = 4;
let Itineraries = HexagonItinerariesV4;
} }
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// Hexagon V4 Resource Definitions - // Hexagon V4 Resource Definitions -
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//

View File

@ -131,10 +131,10 @@ def ProcIntelAtom : SubtargetFeature<"atom", "X86ProcFamily", "IntelAtom",
"Intel Atom processors">; "Intel Atom processors">;
class Proc<string Name, list<SubtargetFeature> Features> class Proc<string Name, list<SubtargetFeature> Features>
: Processor<Name, GenericItineraries, Features>; : ProcessorModel<Name, GenericModel, Features>;
class AtomProc<string Name, list<SubtargetFeature> Features> class AtomProc<string Name, list<SubtargetFeature> Features>
: Processor<Name, AtomItineraries, Features>; : ProcessorModel<Name, AtomModel, Features>;
def : Proc<"generic", []>; def : Proc<"generic", []>;
def : Proc<"i386", []>; def : Proc<"i386", []>;

View File

@ -470,14 +470,12 @@ def IIC_NOP : InstrItinClass;
// latencies. Since these latencies are not used for pipeline hazards, // latencies. Since these latencies are not used for pipeline hazards,
// they do not need to be exact. // they do not need to be exact.
// //
// This set of instruction itineraries should contain no reference to // The GenericModel contains no instruciton itineraries.
// InstrStages. When an iterary has no stages, the scheduler can def GenericModel : SchedMachineModel {
// bypass the logic needed for checking pipeline stage hazards. let IssueWidth = 4;
def GenericItineraries : MultiIssueItineraries< let MinLatency = 0;
4, // IssueWidth let LoadLatency = 4;
0, // MinLatency let HighLatency = 10;
4, // LoadLatency (expected, may be overriden by OperandCycles) }
10, // HighLatency (expected, may be overriden by OperandCycles)
[], [], []>; // no FuncUnits, Bypasses, or InstrItinData.
include "X86ScheduleAtom.td" include "X86ScheduleAtom.td"

View File

@ -22,12 +22,7 @@ def Port0 : FuncUnit; // ALU: ALU0, shift/rotate, load/store
def Port1 : FuncUnit; // ALU: ALU1, bit processing, jump, and LEA def Port1 : FuncUnit; // ALU: ALU1, bit processing, jump, and LEA
// SIMD/FP: SIMD ALU, FP Adder // SIMD/FP: SIMD ALU, FP Adder
def AtomItineraries : MultiIssueItineraries< def AtomItineraries : ProcessorItineraries<
2, // IssueWidth=2 allows 2 instructions per scheduling group.
1, // MinLatency=1. InstrStage cycles overrides MinLatency.
// OperandCycles may be used for expected latency.
3, // LoadLatency (expected, may be overriden by OperandCycles)
30,// HighLatency (expected, may be overriden by OperandCycles)
[ Port0, Port1 ], [ Port0, Port1 ],
[], [ [], [
// P0 only // P0 only
@ -523,3 +518,13 @@ def AtomItineraries : MultiIssueItineraries<
InstrItinData<IIC_NOP, [InstrStage<1, [Port0, Port1]>] > InstrItinData<IIC_NOP, [InstrStage<1, [Port0, Port1]>] >
]>; ]>;
// Atom machine model.
def AtomModel : SchedMachineModel {
let IssueWidth = 2; // Allows 2 instructions per scheduling group.
let MinLatency = 1; // InstrStage cycles overrides MinLatency.
// OperandCycles may be used for expected latency.
let LoadLatency = 3; // Expected cycles, may be overriden by OperandCycles.
let HighLatency = 30;// Expected, may be overriden by OperandCycles.
let Itineraries = AtomItineraries;
}

View File

@ -11,6 +11,7 @@ add_tablegen(llvm-tblgen LLVM
CodeGenDAGPatterns.cpp CodeGenDAGPatterns.cpp
CodeGenInstruction.cpp CodeGenInstruction.cpp
CodeGenRegisters.cpp CodeGenRegisters.cpp
CodeGenSchedule.cpp
CodeGenTarget.cpp CodeGenTarget.cpp
DAGISelEmitter.cpp DAGISelEmitter.cpp
DAGISelMatcherEmitter.cpp DAGISelMatcherEmitter.cpp

View File

@ -0,0 +1,151 @@
//===- CodeGenSchedule.cpp - Scheduling MachineModels ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines structures to encapsulate the machine model as decribed in
// the target description.
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "subtarget-emitter"
#include "CodeGenSchedule.h"
#include "CodeGenTarget.h"
#include "llvm/Support/Debug.h"
using namespace llvm;
// CodeGenModels ctor interprets machine model records and populates maps.
CodeGenSchedModels::CodeGenSchedModels(RecordKeeper &RK,
const CodeGenTarget &TGT):
Records(RK), Target(TGT), NumItineraryClasses(0), HasProcItineraries(false) {
// Populate SchedClassIdxMap and set NumItineraryClasses.
CollectSchedClasses();
// Populate ProcModelMap.
CollectProcModels();
}
// Visit all the instruction definitions for this target to gather and enumerate
// the itinerary classes. These are the explicitly specified SchedClasses. More
// SchedClasses may be inferred.
void CodeGenSchedModels::CollectSchedClasses() {
// NoItinerary is always the first class at Index=0
SchedClasses.resize(1);
SchedClasses.back().Name = "NoItinerary";
SchedClassIdxMap[SchedClasses.back().Name] = 0;
// Gather and sort all itinerary classes used by instruction descriptions.
std::vector<Record*> ItinClassList;
for (CodeGenTarget::inst_iterator I = Target.inst_begin(),
E = Target.inst_end(); I != E; ++I) {
Record *SchedDef = (*I)->TheDef->getValueAsDef("Itinerary");
// Map a new SchedClass with no index.
if (!SchedClassIdxMap.count(SchedDef->getName())) {
SchedClassIdxMap[SchedDef->getName()] = 0;
ItinClassList.push_back(SchedDef);
}
}
// Assign each itinerary class unique number, skipping NoItinerary==0
NumItineraryClasses = ItinClassList.size();
std::sort(ItinClassList.begin(), ItinClassList.end(), LessRecord());
for (unsigned i = 0, N = NumItineraryClasses; i < N; i++) {
Record *ItinDef = ItinClassList[i];
SchedClassIdxMap[ItinDef->getName()] = SchedClasses.size();
SchedClasses.push_back(CodeGenSchedClass(ItinDef));
}
// TODO: Infer classes from non-itinerary scheduler resources.
}
// Gather all processor models.
void CodeGenSchedModels::CollectProcModels() {
std::vector<Record*> ProcRecords =
Records.getAllDerivedDefinitions("Processor");
std::sort(ProcRecords.begin(), ProcRecords.end(), LessRecordFieldName());
// Reserve space because we can. Reallocation would be ok.
ProcModels.reserve(ProcRecords.size());
// For each processor, find a unique machine model.
for (unsigned i = 0, N = ProcRecords.size(); i < N; ++i)
addProcModel(ProcRecords[i]);
}
// Get a unique processor model based on the defined MachineModel and
// ProcessorItineraries.
void CodeGenSchedModels::addProcModel(Record *ProcDef) {
unsigned Idx = getProcModelIdx(ProcDef);
if (Idx < ProcModels.size())
return;
Record *ModelDef = ProcDef->getValueAsDef("SchedModel");
Record *ItinsDef = ProcDef->getValueAsDef("ProcItin");
std::string ModelName = ModelDef->getName();
const std::string &ItinName = ItinsDef->getName();
bool NoModel = ModelDef->getValueAsBit("NoModel");
bool hasTopLevelItin = !ItinsDef->getValueAsListOfDefs("IID").empty();
if (NoModel) {
// If an itinerary is defined without a machine model, infer a new model.
if (NoModel && hasTopLevelItin) {
ModelName = ItinName + "Model";
ModelDef = NULL;
}
}
else {
// If a machine model is defined, the itinerary must be defined within it
// rather than in the Processor definition itself.
assert(!hasTopLevelItin && "Itinerary must be defined in SchedModel");
ItinsDef = ModelDef->getValueAsDef("Itineraries");
}
ProcModelMap[getProcModelKey(ProcDef)]= ProcModels.size();
ProcModels.push_back(CodeGenProcModel(ModelName, ModelDef, ItinsDef));
std::vector<Record*> ItinRecords = ItinsDef->getValueAsListOfDefs("IID");
CollectProcItin(ProcModels.back(), ItinRecords);
}
// Gather the processor itineraries.
void CodeGenSchedModels::CollectProcItin(CodeGenProcModel &ProcModel,
std::vector<Record*> ItinRecords) {
// Skip empty itinerary.
if (ItinRecords.empty())
return;
HasProcItineraries = true;
ProcModel.ItinDefList.resize(NumItineraryClasses+1);
// Insert each itinerary data record in the correct position within
// the processor model's ItinDefList.
for (unsigned i = 0, N = ItinRecords.size(); i < N; i++) {
Record *ItinData = ItinRecords[i];
Record *ItinDef = ItinData->getValueAsDef("TheClass");
if (!SchedClassIdxMap.count(ItinDef->getName())) {
DEBUG(dbgs() << ProcModel.ItinsDef->getName()
<< " has unused itinerary class " << ItinDef->getName() << '\n');
continue;
}
ProcModel.ItinDefList[getItinClassIdx(ItinDef)] = ItinData;
}
#ifndef NDEBUG
// Check for missing itinerary entries.
assert(!ProcModel.ItinDefList[0] && "NoItinerary class can't have rec");
for (unsigned i = 1, N = ProcModel.ItinDefList.size(); i < N; ++i) {
if (!ProcModel.ItinDefList[i])
DEBUG(dbgs() << ProcModel.ItinsDef->getName()
<< " missing itinerary for class " << SchedClasses[i].Name << '\n');
}
#endif
}

View File

@ -0,0 +1,172 @@
//===- CodeGenSchedule.h - Scheduling Machine Models ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines structures to encapsulate the machine model as decribed in
// the target description.
//
//===----------------------------------------------------------------------===//
#ifndef CODEGEN_SCHEDULE_H
#define CODEGEN_SCHEDULE_H
#include "llvm/TableGen/Record.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
namespace llvm {
class CodeGenTarget;
// Scheduling class.
//
// Each instruction description will be mapped to a scheduling class. It may be
// an explicitly defined itinerary class, or an inferred class in which case
// ItinClassDef == NULL.
struct CodeGenSchedClass {
std::string Name;
unsigned Index;
Record *ItinClassDef;
CodeGenSchedClass(): Index(0), ItinClassDef(0) {}
CodeGenSchedClass(Record *rec): Index(0), ItinClassDef(rec) {
Name = rec->getName();
}
};
// Processor model.
//
// ModelName is a unique name used to name an instantiation of MCSchedModel.
//
// ModelDef is NULL for inferred Models. This happens when a processor defines
// an itinerary but no machine model. If the processer defines neither a machine
// model nor itinerary, then ModelDef remains pointing to NoModel. NoModel has
// the special "NoModel" field set to true.
//
// ItinsDef always points to a valid record definition, but may point to the
// default NoItineraries. NoItineraries has an empty list of InstrItinData
// records.
//
// ItinDefList orders this processor's InstrItinData records by SchedClass idx.
struct CodeGenProcModel {
std::string ModelName;
Record *ModelDef;
Record *ItinsDef;
// Array of InstrItinData records indexed by CodeGenSchedClass::Index.
// The list is empty if the subtarget has no itineraries.
std::vector<Record *> ItinDefList;
CodeGenProcModel(const std::string &Name, Record *MDef, Record *IDef):
ModelName(Name), ModelDef(MDef), ItinsDef(IDef) {}
};
// Top level container for machine model data.
class CodeGenSchedModels {
RecordKeeper &Records;
const CodeGenTarget &Target;
// List of unique SchedClasses.
std::vector<CodeGenSchedClass> SchedClasses;
// Map SchedClass name to itinerary index.
// These are either explicit itinerary classes or inferred classes.
StringMap<unsigned> SchedClassIdxMap;
// SchedClass indices 1 up to and including NumItineraryClasses identify
// itinerary classes that are explicitly used for this target's instruction
// definitions. NoItinerary always has index 0 regardless of whether it is
// explicitly referenced.
//
// Any inferred SchedClass have a index greater than NumItineraryClasses.
unsigned NumItineraryClasses;
// List of unique processor models.
std::vector<CodeGenProcModel> ProcModels;
// Map Processor's MachineModel + ProcItin fields to a CodeGenProcModel index.
typedef DenseMap<std::pair<Record*, Record*>, unsigned> ProcModelMapTy;
ProcModelMapTy ProcModelMap;
// True if any processors have nonempty itineraries.
bool HasProcItineraries;
public:
CodeGenSchedModels(RecordKeeper& RK, const CodeGenTarget &TGT);
// Check if any instructions are assigned to an explicit itinerary class other
// than NoItinerary.
bool hasItineraryClasses() const { return NumItineraryClasses > 0; }
// Return the number of itinerary classes in use by this target's instruction
// descriptions, not including "NoItinerary".
unsigned numItineraryClasses() const {
return NumItineraryClasses;
}
// Get a SchedClass from its index.
const CodeGenSchedClass &getSchedClass(unsigned Idx) {
assert(Idx < SchedClasses.size() && "bad SchedClass index");
return SchedClasses[Idx];
}
// Get an itinerary class's index. Value indices are '0' for NoItinerary up to
// and including numItineraryClasses().
unsigned getItinClassIdx(Record *ItinDef) const {
assert(SchedClassIdxMap.count(ItinDef->getName()) && "missing ItinClass");
unsigned Idx = SchedClassIdxMap.lookup(ItinDef->getName());
assert(Idx <= NumItineraryClasses && "bad ItinClass index");
return Idx;
}
bool hasProcessorItineraries() const {
return HasProcItineraries;
}
// Get an existing machine model for a processor definition.
const CodeGenProcModel &getProcModel(Record *ProcDef) const {
unsigned idx = getProcModelIdx(ProcDef);
assert(idx < ProcModels.size() && "missing machine model");
return ProcModels[idx];
}
// Iterate over the unique processor models.
typedef std::vector<CodeGenProcModel>::const_iterator ProcIter;
ProcIter procModelBegin() const { return ProcModels.begin(); }
ProcIter procModelEnd() const { return ProcModels.end(); }
private:
// Get a key that can uniquely identify a machine model.
ProcModelMapTy::key_type getProcModelKey(Record *ProcDef) const {
Record *ModelDef = ProcDef->getValueAsDef("SchedModel");
Record *ItinsDef = ProcDef->getValueAsDef("ProcItin");
return std::make_pair(ModelDef, ItinsDef);
}
// Get the unique index of a machine model.
unsigned getProcModelIdx(Record *ProcDef) const {
ProcModelMapTy::const_iterator I =
ProcModelMap.find(getProcModelKey(ProcDef));
if (I == ProcModelMap.end())
return ProcModels.size();
return I->second;
}
// Initialize a new processor model if it is unique.
void addProcModel(Record *ProcDef);
void CollectSchedClasses();
void CollectProcModels();
void CollectProcItin(CodeGenProcModel &ProcModel,
std::vector<Record*> ItinRecords);
};
} // namespace llvm
#endif

View File

@ -16,6 +16,7 @@
#include "CodeGenTarget.h" #include "CodeGenTarget.h"
#include "CodeGenIntrinsics.h" #include "CodeGenIntrinsics.h"
#include "CodeGenSchedule.h"
#include "llvm/TableGen/Record.h" #include "llvm/TableGen/Record.h"
#include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/STLExtras.h"
@ -112,7 +113,7 @@ std::string llvm::getQualifiedName(const Record *R) {
/// getTarget - Return the current instance of the Target class. /// getTarget - Return the current instance of the Target class.
/// ///
CodeGenTarget::CodeGenTarget(RecordKeeper &records) CodeGenTarget::CodeGenTarget(RecordKeeper &records)
: Records(records), RegBank(0) { : Records(records), RegBank(0), SchedModels(0) {
std::vector<Record*> Targets = Records.getAllDerivedDefinitions("Target"); std::vector<Record*> Targets = Records.getAllDerivedDefinitions("Target");
if (Targets.size() == 0) if (Targets.size() == 0)
throw std::string("ERROR: No 'Target' subclasses defined!"); throw std::string("ERROR: No 'Target' subclasses defined!");
@ -121,6 +122,10 @@ CodeGenTarget::CodeGenTarget(RecordKeeper &records)
TargetRec = Targets[0]; TargetRec = Targets[0];
} }
CodeGenTarget::~CodeGenTarget() {
delete RegBank;
delete SchedModels;
}
const std::string &CodeGenTarget::getName() const { const std::string &CodeGenTarget::getName() const {
return TargetRec->getName(); return TargetRec->getName();
@ -235,6 +240,11 @@ void CodeGenTarget::ReadLegalValueTypes() const {
LegalValueTypes.end()); LegalValueTypes.end());
} }
CodeGenSchedModels &CodeGenTarget::getSchedModels() const {
if (!SchedModels)
SchedModels = new CodeGenSchedModels(Records, *this);
return *SchedModels;
}
void CodeGenTarget::ReadInstructions() const { void CodeGenTarget::ReadInstructions() const {
std::vector<Record*> Insts = Records.getAllDerivedDefinitions("Instruction"); std::vector<Record*> Insts = Records.getAllDerivedDefinitions("Instruction");

View File

@ -26,6 +26,7 @@
namespace llvm { namespace llvm {
struct CodeGenRegister; struct CodeGenRegister;
class CodeGenSchedModels;
class CodeGenTarget; class CodeGenTarget;
// SelectionDAG node properties. // SelectionDAG node properties.
@ -72,9 +73,12 @@ class CodeGenTarget {
void ReadInstructions() const; void ReadInstructions() const;
void ReadLegalValueTypes() const; void ReadLegalValueTypes() const;
mutable CodeGenSchedModels *SchedModels;
mutable std::vector<const CodeGenInstruction*> InstrsByEnum; mutable std::vector<const CodeGenInstruction*> InstrsByEnum;
public: public:
CodeGenTarget(RecordKeeper &Records); CodeGenTarget(RecordKeeper &Records);
~CodeGenTarget();
Record *getTargetRecord() const { return TargetRec; } Record *getTargetRecord() const { return TargetRec; }
const std::string &getName() const; const std::string &getName() const;
@ -139,6 +143,8 @@ public:
return false; return false;
} }
CodeGenSchedModels &getSchedModels() const;
private: private:
DenseMap<const Record*, CodeGenInstruction*> &getInstructions() const { DenseMap<const Record*, CodeGenInstruction*> &getInstructions() const {
if (Instructions.empty()) ReadInstructions(); if (Instructions.empty()) ReadInstructions();

View File

@ -14,6 +14,7 @@
#include "CodeGenDAGPatterns.h" #include "CodeGenDAGPatterns.h"
#include "CodeGenSchedule.h"
#include "CodeGenTarget.h" #include "CodeGenTarget.h"
#include "SequenceToOffsetTable.h" #include "SequenceToOffsetTable.h"
#include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringExtras.h"
@ -29,10 +30,11 @@ namespace {
class InstrInfoEmitter { class InstrInfoEmitter {
RecordKeeper &Records; RecordKeeper &Records;
CodeGenDAGPatterns CDP; CodeGenDAGPatterns CDP;
std::map<std::string, unsigned> ItinClassMap; const CodeGenSchedModels &SchedModels;
public: public:
InstrInfoEmitter(RecordKeeper &R) : Records(R), CDP(R) { } InstrInfoEmitter(RecordKeeper &R):
Records(R), CDP(R), SchedModels(CDP.getTargetInfo().getSchedModels()) {}
// run - Output the instruction set description. // run - Output the instruction set description.
void run(raw_ostream &OS); void run(raw_ostream &OS);
@ -47,10 +49,6 @@ private:
const OperandInfoMapTy &OpInfo, const OperandInfoMapTy &OpInfo,
raw_ostream &OS); raw_ostream &OS);
// Itinerary information.
void GatherItinClasses();
unsigned getItinClassNumber(const Record *InstRec);
// Operand information. // Operand information.
void EmitOperandInfo(raw_ostream &OS, OperandInfoMapTy &OperandInfoIDs); void EmitOperandInfo(raw_ostream &OS, OperandInfoMapTy &OperandInfoIDs);
std::vector<std::string> GetOperandInfo(const CodeGenInstruction &Inst); std::vector<std::string> GetOperandInfo(const CodeGenInstruction &Inst);
@ -65,23 +63,6 @@ static void PrintDefList(const std::vector<Record*> &Uses,
OS << "0 };\n"; OS << "0 };\n";
} }
//===----------------------------------------------------------------------===//
// Instruction Itinerary Information.
//===----------------------------------------------------------------------===//
void InstrInfoEmitter::GatherItinClasses() {
std::vector<Record*> DefList =
Records.getAllDerivedDefinitions("InstrItinClass");
std::sort(DefList.begin(), DefList.end(), LessRecord());
for (unsigned i = 0, N = DefList.size(); i < N; i++)
ItinClassMap[DefList[i]->getName()] = i;
}
unsigned InstrInfoEmitter::getItinClassNumber(const Record *InstRec) {
return ItinClassMap[InstRec->getValueAsDef("Itinerary")->getName()];
}
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// Operand Info Emission. // Operand Info Emission.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
@ -202,8 +183,6 @@ void InstrInfoEmitter::run(raw_ostream &OS) {
emitSourceFileHeader("Target Instruction Enum Values", OS); emitSourceFileHeader("Target Instruction Enum Values", OS);
emitEnums(OS); emitEnums(OS);
GatherItinClasses();
emitSourceFileHeader("Target Instruction Descriptors", OS); emitSourceFileHeader("Target Instruction Descriptors", OS);
OS << "\n#ifdef GET_INSTRINFO_MC_DESC\n"; OS << "\n#ifdef GET_INSTRINFO_MC_DESC\n";
@ -325,10 +304,11 @@ void InstrInfoEmitter::emitRecord(const CodeGenInstruction &Inst, unsigned Num,
MinOperands = Inst.Operands.back().MIOperandNo + MinOperands = Inst.Operands.back().MIOperandNo +
Inst.Operands.back().MINumOperands; Inst.Operands.back().MINumOperands;
Record *ItinDef = Inst.TheDef->getValueAsDef("Itinerary");
OS << " { "; OS << " { ";
OS << Num << ",\t" << MinOperands << ",\t" OS << Num << ",\t" << MinOperands << ",\t"
<< Inst.Operands.NumDefs << ",\t" << Inst.Operands.NumDefs << ",\t"
<< getItinClassNumber(Inst.TheDef) << ",\t" << SchedModels.getItinClassIdx(ItinDef) << ",\t"
<< Inst.TheDef->getValueAsInt("Size") << ",\t0"; << Inst.TheDef->getValueAsInt("Size") << ",\t0";
// Emit all of the target indepedent flags... // Emit all of the target indepedent flags...

View File

@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "CodeGenTarget.h" #include "CodeGenTarget.h"
#include "CodeGenSchedule.h"
#include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringExtras.h"
#include "llvm/MC/MCInstrItineraries.h" #include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Support/Debug.h" #include "llvm/Support/Debug.h"
@ -27,15 +28,12 @@ namespace {
class SubtargetEmitter { class SubtargetEmitter {
RecordKeeper &Records; RecordKeeper &Records;
CodeGenSchedModels &SchedModels;
std::string Target; std::string Target;
bool HasItineraries;
void Enumeration(raw_ostream &OS, const char *ClassName, bool isBits); void Enumeration(raw_ostream &OS, const char *ClassName, bool isBits);
unsigned FeatureKeyValues(raw_ostream &OS); unsigned FeatureKeyValues(raw_ostream &OS);
unsigned CPUKeyValues(raw_ostream &OS); unsigned CPUKeyValues(raw_ostream &OS);
unsigned CollectAllItinClasses(raw_ostream &OS,
std::map<std::string,unsigned> &ItinClassesMap,
std::vector<Record*> &ItinClassList);
void FormItineraryStageString(const std::string &Names, void FormItineraryStageString(const std::string &Names,
Record *ItinData, std::string &ItinString, Record *ItinData, std::string &ItinString,
unsigned &NStages); unsigned &NStages);
@ -44,22 +42,23 @@ class SubtargetEmitter {
void FormItineraryBypassString(const std::string &Names, void FormItineraryBypassString(const std::string &Names,
Record *ItinData, Record *ItinData,
std::string &ItinString, unsigned NOperandCycles); std::string &ItinString, unsigned NOperandCycles);
void EmitStageAndOperandCycleData(raw_ostream &OS, unsigned NItinClasses, void EmitStageAndOperandCycleData(raw_ostream &OS,
std::map<std::string, unsigned> &ItinClassesMap, std::vector<std::vector<InstrItinerary> >
std::vector<Record*> &ItinClassList, &ProcItinLists);
std::vector<std::vector<InstrItinerary> > &ProcList); void EmitItineraries(raw_ostream &OS,
void EmitItineraryProp(raw_ostream &OS, const Record *R, const char *Name, std::vector<std::vector<InstrItinerary> >
&ProcItinLists);
void EmitProcessorProp(raw_ostream &OS, const Record *R, const char *Name,
char Separator); char Separator);
void EmitProcessorData(raw_ostream &OS, void EmitProcessorModels(raw_ostream &OS);
std::vector<Record*> &ItinClassList,
std::vector<std::vector<InstrItinerary> > &ProcList);
void EmitProcessorLookup(raw_ostream &OS); void EmitProcessorLookup(raw_ostream &OS);
void EmitData(raw_ostream &OS); void EmitSchedModel(raw_ostream &OS);
void ParseFeaturesFunction(raw_ostream &OS, unsigned NumFeatures, void ParseFeaturesFunction(raw_ostream &OS, unsigned NumFeatures,
unsigned NumProcs); unsigned NumProcs);
public: public:
SubtargetEmitter(RecordKeeper &R) : Records(R), HasItineraries(false) {} SubtargetEmitter(RecordKeeper &R, CodeGenTarget &TGT):
Records(R), SchedModels(TGT.getSchedModels()), Target(TGT.getName()) {}
void run(raw_ostream &o); void run(raw_ostream &o);
@ -242,28 +241,6 @@ unsigned SubtargetEmitter::CPUKeyValues(raw_ostream &OS) {
return ProcessorList.size(); return ProcessorList.size();
} }
//
// CollectAllItinClasses - Gathers and enumerates all the itinerary classes.
// Returns itinerary class count.
//
unsigned SubtargetEmitter::
CollectAllItinClasses(raw_ostream &OS,
std::map<std::string, unsigned> &ItinClassesMap,
std::vector<Record*> &ItinClassList) {
// For each itinerary class
unsigned N = ItinClassList.size();
for (unsigned i = 0; i < N; i++) {
// Next itinerary class
const Record *ItinClass = ItinClassList[i];
// Get name of itinerary class
// Assign itinerary class a unique number
ItinClassesMap[ItinClass->getName()] = i;
}
// Return itinerary class count
return N;
}
// //
// FormItineraryStageString - Compose a string containing the stage // FormItineraryStageString - Compose a string containing the stage
// data initialization for the specified itinerary. N is the number // data initialization for the specified itinerary. N is the number
@ -350,32 +327,25 @@ void SubtargetEmitter::FormItineraryBypassString(const std::string &Name,
} }
// //
// EmitStageAndOperandCycleData - Generate unique itinerary stages and // EmitStageAndOperandCycleData - Generate unique itinerary stages and operand
// operand cycle tables. Record itineraries for processors. // cycle tables. Create a list of InstrItinerary objects (ProcItinLists) indexed
// by CodeGenSchedClass::Index.
// //
void SubtargetEmitter::EmitStageAndOperandCycleData(raw_ostream &OS, void SubtargetEmitter::
unsigned NItinClasses, EmitStageAndOperandCycleData(raw_ostream &OS,
std::map<std::string, unsigned> &ItinClassesMap, std::vector<std::vector<InstrItinerary> >
std::vector<Record*> &ItinClassList, &ProcItinLists) {
std::vector<std::vector<InstrItinerary> > &ProcList) {
// Gather processor iteraries
std::vector<Record*> ProcItinList =
Records.getAllDerivedDefinitions("ProcessorItineraries");
// If just no itinerary then don't bother
if (ProcItinList.size() < 2) return;
// Emit functional units for all the itineraries. // Emit functional units for all the itineraries.
for (unsigned i = 0, N = ProcItinList.size(); i < N; ++i) { for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
// Next record PE = SchedModels.procModelEnd(); PI != PE; ++PI) {
Record *Proc = ProcItinList[i];
std::vector<Record*> FUs = Proc->getValueAsListOfDefs("FU"); std::vector<Record*> FUs = PI->ItinsDef->getValueAsListOfDefs("FU");
if (FUs.empty()) if (FUs.empty())
continue; continue;
const std::string &Name = Proc->getName(); const std::string &Name = PI->ItinsDef->getName();
OS << "\n// Functional units for itineraries \"" << Name << "\"\n" OS << "\n// Functional units for \"" << Name << "\"\n"
<< "namespace " << Name << "FU {\n"; << "namespace " << Name << "FU {\n";
for (unsigned j = 0, FUN = FUs.size(); j < FUN; ++j) for (unsigned j = 0, FUN = FUs.size(); j < FUN; ++j)
@ -384,7 +354,7 @@ void SubtargetEmitter::EmitStageAndOperandCycleData(raw_ostream &OS,
OS << "}\n"; OS << "}\n";
std::vector<Record*> BPs = Proc->getValueAsListOfDefs("BP"); std::vector<Record*> BPs = PI->ItinsDef->getValueAsListOfDefs("BP");
if (BPs.size()) { if (BPs.size()) {
OS << "\n// Pipeline forwarding pathes for itineraries \"" << Name OS << "\n// Pipeline forwarding pathes for itineraries \"" << Name
<< "\"\n" << "namespace " << Name << "Bypass {\n"; << "\"\n" << "namespace " << Name << "Bypass {\n";
@ -413,47 +383,54 @@ void SubtargetEmitter::EmitStageAndOperandCycleData(raw_ostream &OS,
"ForwardingPaths[] = {\n"; "ForwardingPaths[] = {\n";
BypassTable += " 0, // No itinerary\n"; BypassTable += " 0, // No itinerary\n";
// For each Itinerary across all processors, add a unique entry to the stages,
// operand cycles, and pipepine bypess tables. Then add the new Itinerary
// object with computed offsets to the ProcItinLists result.
unsigned StageCount = 1, OperandCycleCount = 1; unsigned StageCount = 1, OperandCycleCount = 1;
std::map<std::string, unsigned> ItinStageMap, ItinOperandMap; std::map<std::string, unsigned> ItinStageMap, ItinOperandMap;
for (unsigned i = 0, N = ProcItinList.size(); i < N; i++) { for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
// Next record PE = SchedModels.procModelEnd(); PI != PE; ++PI) {
Record *Proc = ProcItinList[i]; const CodeGenProcModel &ProcModel = *PI;
// Get processor itinerary name // Add process itinerary to the list.
const std::string &Name = Proc->getName(); ProcItinLists.resize(ProcItinLists.size()+1);
// Get itinerary data list // If this processor defines no itineraries, then leave the itinerary list
std::vector<Record*> ItinDataList = Proc->getValueAsListOfDefs("IID"); // empty.
std::vector<InstrItinerary> ItinList; std::vector<InstrItinerary> &ItinList = ProcItinLists.back();
if (ProcModel.ItinDefList.empty())
// Add an empty itinerary.
if (ItinDataList.empty()) {
ProcList.push_back(ItinList);
continue; continue;
}
// Expand processor itinerary to cover all itinerary classes // Reserve index==0 for NoItinerary.
ItinList.resize(NItinClasses); ItinList.resize(SchedModels.numItineraryClasses()+1);
const std::string &Name = ProcModel.ItinsDef->getName();
// For each itinerary data // For each itinerary data
for (unsigned j = 0, M = ItinDataList.size(); j < M; j++) { for (unsigned SchedClassIdx = 0,
SchedClassEnd = ProcModel.ItinDefList.size();
SchedClassIdx < SchedClassEnd; ++SchedClassIdx) {
// Next itinerary data // Next itinerary data
Record *ItinData = ItinDataList[j]; Record *ItinData = ProcModel.ItinDefList[SchedClassIdx];
// Get string and stage count // Get string and stage count
std::string ItinStageString; std::string ItinStageString;
unsigned NStages; unsigned NStages = 0;
if (ItinData)
FormItineraryStageString(Name, ItinData, ItinStageString, NStages); FormItineraryStageString(Name, ItinData, ItinStageString, NStages);
// Get string and operand cycle count // Get string and operand cycle count
std::string ItinOperandCycleString; std::string ItinOperandCycleString;
unsigned NOperandCycles; unsigned NOperandCycles = 0;
std::string ItinBypassString;
if (ItinData) {
FormItineraryOperandCycleString(ItinData, ItinOperandCycleString, FormItineraryOperandCycleString(ItinData, ItinOperandCycleString,
NOperandCycles); NOperandCycles);
std::string ItinBypassString;
FormItineraryBypassString(Name, ItinData, ItinBypassString, FormItineraryBypassString(Name, ItinData, ItinBypassString,
NOperandCycles); NOperandCycles);
}
// Check to see if stage already exists and create if it doesn't // Check to see if stage already exists and create if it doesn't
unsigned FindStage = 0; unsigned FindStage = 0;
@ -493,33 +470,26 @@ void SubtargetEmitter::EmitStageAndOperandCycleData(raw_ostream &OS,
} }
} }
// Locate where to inject into processor itinerary table
const std::string &Name = ItinData->getValueAsDef("TheClass")->getName();
unsigned Find = ItinClassesMap[Name];
// Set up itinerary as location and location + stage count // Set up itinerary as location and location + stage count
int NumUOps = ItinData->getValueAsInt("NumMicroOps"); int NumUOps = ItinData ? ItinData->getValueAsInt("NumMicroOps") : 0;
InstrItinerary Intinerary = { NumUOps, FindStage, FindStage + NStages, InstrItinerary Intinerary = { NumUOps, FindStage, FindStage + NStages,
FindOperandCycle, FindOperandCycle,
FindOperandCycle + NOperandCycles}; FindOperandCycle + NOperandCycles};
// Inject - empty slots will be 0, 0 // Inject - empty slots will be 0, 0
ItinList[Find] = Intinerary; ItinList[SchedClassIdx] = Intinerary;
} }
// Add process itinerary to list
ProcList.push_back(ItinList);
} }
// Closing stage // Closing stage
StageTable += " { 0, 0, 0, llvm::InstrStage::Required } // End itinerary\n"; StageTable += " { 0, 0, 0, llvm::InstrStage::Required } // End stages\n";
StageTable += "};\n"; StageTable += "};\n";
// Closing operand cycles // Closing operand cycles
OperandCycleTable += " 0 // End itinerary\n"; OperandCycleTable += " 0 // End operand cycles\n";
OperandCycleTable += "};\n"; OperandCycleTable += "};\n";
BypassTable += " 0 // End itinerary\n"; BypassTable += " 0 // End bypass tables\n";
BypassTable += "};\n"; BypassTable += "};\n";
// Emit tables. // Emit tables.
@ -528,89 +498,91 @@ void SubtargetEmitter::EmitStageAndOperandCycleData(raw_ostream &OS,
OS << BypassTable; OS << BypassTable;
} }
void SubtargetEmitter::EmitItineraryProp(raw_ostream &OS, const Record *R,
const char *Name, char Separator) {
OS << " ";
int V = R->getValueAsInt(Name);
if (V >= 0)
OS << V << Separator << " // " << Name;
else
OS << "InstrItineraryProps::Default" << Name << Separator;
OS << '\n';
}
// //
// EmitProcessorData - Generate data for processor itineraries. // EmitProcessorData - Generate data for processor itineraries that were
// computed during EmitStageAndOperandCycleData(). ProcItinLists lists all
// Itineraries for each processor. The Itinerary lists are indexed on
// CodeGenSchedClass::Index.
// //
void SubtargetEmitter:: void SubtargetEmitter::
EmitProcessorData(raw_ostream &OS, EmitItineraries(raw_ostream &OS,
std::vector<Record*> &ItinClassList, std::vector<std::vector<InstrItinerary> > &ProcItinLists) {
std::vector<std::vector<InstrItinerary> > &ProcList) {
// Get an iterator for processor itinerary stages // For each processor's machine model
std::vector<std::vector<InstrItinerary> >::iterator std::vector<std::vector<InstrItinerary> >::iterator
ProcListIter = ProcList.begin(); ProcItinListsIter = ProcItinLists.begin();
for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
// For each processor itinerary PE = SchedModels.procModelEnd(); PI != PE; ++PI) {
std::vector<Record*> Itins = Record *ItinsDef = PI->ItinsDef;
Records.getAllDerivedDefinitions("ProcessorItineraries");
for (unsigned i = 0, N = Itins.size(); i < N; i++) {
// Next record
Record *Itin = Itins[i];
// Get processor itinerary name // Get processor itinerary name
const std::string &Name = Itin->getName(); const std::string &Name = ItinsDef->getName();
// Get the itinerary list for the processor.
assert(ProcItinListsIter != ProcItinLists.end() && "bad iterator");
std::vector<InstrItinerary> &ItinList = *ProcItinListsIter++;
// Skip default
// Begin processor itinerary properties
OS << "\n"; OS << "\n";
OS << "static const llvm::InstrItineraryProps " << Name << "Props(\n"; OS << "static const llvm::InstrItinerary ";
EmitItineraryProp(OS, Itin, "IssueWidth", ','); if (ItinList.empty()) {
EmitItineraryProp(OS, Itin, "MinLatency", ','); OS << '*' << Name << " = 0;\n";
EmitItineraryProp(OS, Itin, "LoadLatency", ','); continue;
EmitItineraryProp(OS, Itin, "HighLatency", ' '); }
OS << ");\n";
// For each itinerary class
std::vector<InstrItinerary> &ItinList = *ProcListIter++;
if (!ItinList.empty()) {
assert(ItinList.size() == ItinClassList.size() && "bad itinerary");
// Begin processor itinerary table // Begin processor itinerary table
OS << "\n"; OS << Name << "[] = {\n";
OS << "static const llvm::InstrItinerary " << Name << "Entries"
<< "[] = {\n";
// For each itinerary class in CodeGenSchedClass::Index order.
for (unsigned j = 0, M = ItinList.size(); j < M; ++j) { for (unsigned j = 0, M = ItinList.size(); j < M; ++j) {
InstrItinerary &Intinerary = ItinList[j]; InstrItinerary &Intinerary = ItinList[j];
// Emit in the form of // Emit Itinerary in the form of
// { firstStage, lastStage, firstCycle, lastCycle } // index // { firstStage, lastStage, firstCycle, lastCycle } // index
if (Intinerary.FirstStage == 0) {
OS << " { 1, 0, 0, 0, 0 }";
} else {
OS << " { " << OS << " { " <<
Intinerary.NumMicroOps << ", " << Intinerary.NumMicroOps << ", " <<
Intinerary.FirstStage << ", " << Intinerary.FirstStage << ", " <<
Intinerary.LastStage << ", " << Intinerary.LastStage << ", " <<
Intinerary.FirstOperandCycle << ", " << Intinerary.FirstOperandCycle << ", " <<
Intinerary.LastOperandCycle << " }"; Intinerary.LastOperandCycle << " }" <<
} ", // " << j << " " << SchedModels.getSchedClass(j).Name << "\n";
OS << ", // " << j << " " << ItinClassList[j]->getName() << "\n";
} }
// End processor itinerary table // End processor itinerary table
OS << " { 1, ~0U, ~0U, ~0U, ~0U } // end marker\n"; OS << " { 0, ~0U, ~0U, ~0U, ~0U } // end marker\n";
OS << "};\n"; OS << "};\n";
} }
OS << '\n'; }
OS << "static const llvm::InstrItinerarySubtargetValue "
<< Name << " = {\n"; // Emit either the the value defined in the TableGen Record, or the default
OS << " &" << Name << "Props,\n"; // value defined in the C++ header. The Record is null if the processor does not
if (ItinList.empty()) // define a model.
OS << " 0\n"; void SubtargetEmitter::EmitProcessorProp(raw_ostream &OS, const Record *R,
const char *Name, char Separator) {
OS << " ";
int V = R ? R->getValueAsInt(Name) : -1;
if (V >= 0)
OS << V << Separator << " // " << Name;
else else
OS << " " << Name << "Entries\n"; OS << "MCSchedModel::Default" << Name << Separator;
OS << "};\n"; OS << '\n';
}
void SubtargetEmitter::EmitProcessorModels(raw_ostream &OS) {
// For each processor model.
for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
PE = SchedModels.procModelEnd(); PI != PE; ++PI) {
// Skip default
// Begin processor itinerary properties
OS << "\n";
OS << "static const llvm::MCSchedModel " << PI->ModelName << "(\n";
EmitProcessorProp(OS, PI->ModelDef, "IssueWidth", ',');
EmitProcessorProp(OS, PI->ModelDef, "MinLatency", ',');
EmitProcessorProp(OS, PI->ModelDef, "LoadLatency", ',');
EmitProcessorProp(OS, PI->ModelDef, "HighLatency", ',');
if (SchedModels.hasItineraryClasses())
OS << " " << PI->ItinsDef->getName();
else
OS << " 0";
OS << ");\n";
} }
} }
@ -627,7 +599,7 @@ void SubtargetEmitter::EmitProcessorLookup(raw_ostream &OS) {
OS << "\n"; OS << "\n";
OS << "// Sorted (by key) array of itineraries for CPU subtype.\n" OS << "// Sorted (by key) array of itineraries for CPU subtype.\n"
<< "extern const llvm::SubtargetInfoKV " << "extern const llvm::SubtargetInfoKV "
<< Target << "ProcItinKV[] = {\n"; << Target << "ProcSchedKV[] = {\n";
// For each processor // For each processor
for (unsigned i = 0, N = ProcessorList.size(); i < N;) { for (unsigned i = 0, N = ProcessorList.size(); i < N;) {
@ -635,13 +607,13 @@ void SubtargetEmitter::EmitProcessorLookup(raw_ostream &OS) {
Record *Processor = ProcessorList[i]; Record *Processor = ProcessorList[i];
const std::string &Name = Processor->getValueAsString("Name"); const std::string &Name = Processor->getValueAsString("Name");
const std::string &ProcItin = const std::string &ProcModelName =
Processor->getValueAsDef("ProcItin")->getName(); SchedModels.getProcModel(Processor).ModelName;
// Emit as { "cpu", procinit }, // Emit as { "cpu", procinit },
OS << " { " OS << " { "
<< "\"" << Name << "\", " << "\"" << Name << "\", "
<< "(void *)&" << ProcItin; << "(void *)&" << ProcModelName;
OS << " }"; OS << " }";
@ -656,32 +628,20 @@ void SubtargetEmitter::EmitProcessorLookup(raw_ostream &OS) {
} }
// //
// EmitData - Emits all stages and itineries, folding common patterns. // EmitSchedModel - Emits all scheduling model tables, folding common patterns.
// //
void SubtargetEmitter::EmitData(raw_ostream &OS) { void SubtargetEmitter::EmitSchedModel(raw_ostream &OS) {
std::map<std::string, unsigned> ItinClassesMap; if (SchedModels.hasItineraryClasses()) {
// Gather and sort all itinerary classes std::vector<std::vector<InstrItinerary> > ProcItinLists;
std::vector<Record*> ItinClassList =
Records.getAllDerivedDefinitions("InstrItinClass");
std::sort(ItinClassList.begin(), ItinClassList.end(), LessRecord());
// Enumerate all the itinerary classes
unsigned NItinClasses = CollectAllItinClasses(OS, ItinClassesMap,
ItinClassList);
// Make sure the rest is worth the effort
HasItineraries = NItinClasses != 1; // Ignore NoItinerary.
if (HasItineraries) {
std::vector<std::vector<InstrItinerary> > ProcList;
// Emit the stage data // Emit the stage data
EmitStageAndOperandCycleData(OS, NItinClasses, ItinClassesMap, EmitStageAndOperandCycleData(OS, ProcItinLists);
ItinClassList, ProcList); EmitItineraries(OS, ProcItinLists);
// Emit the processor itinerary data }
EmitProcessorData(OS, ItinClassList, ProcList); // Emit the processor machine model
EmitProcessorModels(OS);
// Emit the processor lookup data // Emit the processor lookup data
EmitProcessorLookup(OS); EmitProcessorLookup(OS);
} }
}
// //
// ParseFeaturesFunction - Produces a subtarget specific function for parsing // ParseFeaturesFunction - Produces a subtarget specific function for parsing
@ -734,8 +694,6 @@ void SubtargetEmitter::ParseFeaturesFunction(raw_ostream &OS,
// SubtargetEmitter::run - Main subtarget enumeration emitter. // SubtargetEmitter::run - Main subtarget enumeration emitter.
// //
void SubtargetEmitter::run(raw_ostream &OS) { void SubtargetEmitter::run(raw_ostream &OS) {
Target = CodeGenTarget(Records).getName();
emitSourceFileHeader("Subtarget Enumeration Source Fragment", OS); emitSourceFileHeader("Subtarget Enumeration Source Fragment", OS);
OS << "\n#ifdef GET_SUBTARGETINFO_ENUM\n"; OS << "\n#ifdef GET_SUBTARGETINFO_ENUM\n";
@ -757,7 +715,7 @@ void SubtargetEmitter::run(raw_ostream &OS) {
OS << "\n"; OS << "\n";
unsigned NumProcs = CPUKeyValues(OS); unsigned NumProcs = CPUKeyValues(OS);
OS << "\n"; OS << "\n";
EmitData(OS); EmitSchedModel(OS);
OS << "\n"; OS << "\n";
#if 0 #if 0
OS << "}\n"; OS << "}\n";
@ -776,8 +734,8 @@ void SubtargetEmitter::run(raw_ostream &OS) {
OS << Target << "SubTypeKV, "; OS << Target << "SubTypeKV, ";
else else
OS << "0, "; OS << "0, ";
if (HasItineraries) { if (SchedModels.hasItineraryClasses()) {
OS << Target << "ProcItinKV, " OS << Target << "ProcSchedKV, "
<< Target << "Stages, " << Target << "Stages, "
<< Target << "OperandCycles, " << Target << "OperandCycles, "
<< Target << "ForwardingPaths, "; << Target << "ForwardingPaths, ";
@ -822,8 +780,8 @@ void SubtargetEmitter::run(raw_ostream &OS) {
OS << "namespace llvm {\n"; OS << "namespace llvm {\n";
OS << "extern const llvm::SubtargetFeatureKV " << Target << "FeatureKV[];\n"; OS << "extern const llvm::SubtargetFeatureKV " << Target << "FeatureKV[];\n";
OS << "extern const llvm::SubtargetFeatureKV " << Target << "SubTypeKV[];\n"; OS << "extern const llvm::SubtargetFeatureKV " << Target << "SubTypeKV[];\n";
if (HasItineraries) { if (SchedModels.hasItineraryClasses()) {
OS << "extern const llvm::SubtargetInfoKV " << Target << "ProcItinKV[];\n"; OS << "extern const llvm::SubtargetInfoKV " << Target << "ProcSchedKV[];\n";
OS << "extern const llvm::InstrStage " << Target << "Stages[];\n"; OS << "extern const llvm::InstrStage " << Target << "Stages[];\n";
OS << "extern const unsigned " << Target << "OperandCycles[];\n"; OS << "extern const unsigned " << Target << "OperandCycles[];\n";
OS << "extern const unsigned " << Target << "ForwardingPaths[];\n"; OS << "extern const unsigned " << Target << "ForwardingPaths[];\n";
@ -841,8 +799,8 @@ void SubtargetEmitter::run(raw_ostream &OS) {
OS << Target << "SubTypeKV, "; OS << Target << "SubTypeKV, ";
else else
OS << "0, "; OS << "0, ";
if (HasItineraries) { if (SchedModels.hasItineraryClasses()) {
OS << Target << "ProcItinKV, " OS << Target << "ProcSchedKV, "
<< Target << "Stages, " << Target << "Stages, "
<< Target << "OperandCycles, " << Target << "OperandCycles, "
<< Target << "ForwardingPaths, "; << Target << "ForwardingPaths, ";
@ -857,7 +815,8 @@ void SubtargetEmitter::run(raw_ostream &OS) {
namespace llvm { namespace llvm {
void EmitSubtarget(RecordKeeper &RK, raw_ostream &OS) { void EmitSubtarget(RecordKeeper &RK, raw_ostream &OS) {
SubtargetEmitter(RK).run(OS); CodeGenTarget CGTarget(RK);
SubtargetEmitter(RK, CGTarget).run(OS);
} }
} // End llvm namespace } // End llvm namespace