forked from OSchip/llvm-project
[llvm-mca] Introduce the ExecuteStage (was originally the Scheduler class).
Summary: This patch transforms the Scheduler class into the ExecuteStage. Most of the logic remains. Reviewers: andreadb, RKSimon, courbet Reviewed By: andreadb Subscribers: mgorny, javed.absar, tschuett, gbedwell, llvm-commits Differential Revision: https://reviews.llvm.org/D47246 llvm-svn: 334679
This commit is contained in:
parent
239452ca3e
commit
488ac4cb39
|
@ -41,14 +41,13 @@ void Backend::runCycle(unsigned Cycle) {
|
|||
InstRef IR;
|
||||
Retire->preExecute(IR);
|
||||
Dispatch->preExecute(IR);
|
||||
|
||||
// This will execute scheduled instructions.
|
||||
HWS->cycleEvent(); // TODO: This will eventually be stage-ified.
|
||||
Execute->preExecute(IR);
|
||||
|
||||
// Fetch instructions and dispatch them to the hardware.
|
||||
while (Fetch->execute(IR)) {
|
||||
if (!Dispatch->execute(IR))
|
||||
break;
|
||||
Execute->execute(IR);
|
||||
Fetch->postExecute(IR);
|
||||
}
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#define LLVM_TOOLS_LLVM_MCA_BACKEND_H
|
||||
|
||||
#include "DispatchStage.h"
|
||||
#include "ExecuteStage.h"
|
||||
#include "FetchStage.h"
|
||||
#include "InstrBuilder.h"
|
||||
#include "RegisterFile.h"
|
||||
|
@ -57,12 +58,13 @@ class Backend {
|
|||
// The following are the simulated hardware components of the backend.
|
||||
RetireControlUnit RCU;
|
||||
RegisterFile PRF;
|
||||
Scheduler HWS;
|
||||
|
||||
/// TODO: Eventually this will become a list of unique Stage* that this
|
||||
/// backend pipeline executes.
|
||||
std::unique_ptr<FetchStage> Fetch;
|
||||
std::unique_ptr<Scheduler> HWS;
|
||||
std::unique_ptr<DispatchStage> Dispatch;
|
||||
std::unique_ptr<ExecuteStage> Execute;
|
||||
std::unique_ptr<RetireStage> Retire;
|
||||
|
||||
std::set<HWEventListener *> Listeners;
|
||||
|
@ -78,13 +80,13 @@ public:
|
|||
unsigned StoreQueueSize = 0, bool AssumeNoAlias = false)
|
||||
: RCU(Subtarget.getSchedModel()),
|
||||
PRF(Subtarget.getSchedModel(), MRI, RegisterFileSize),
|
||||
HWS(Subtarget.getSchedModel(), LoadQueueSize, StoreQueueSize,
|
||||
AssumeNoAlias),
|
||||
Fetch(std::move(InitialStage)),
|
||||
HWS(llvm::make_unique<Scheduler>(this, Subtarget.getSchedModel(), RCU,
|
||||
LoadQueueSize, StoreQueueSize,
|
||||
AssumeNoAlias)),
|
||||
Dispatch(llvm::make_unique<DispatchStage>(
|
||||
this, Subtarget, MRI, RegisterFileSize, DispatchWidth, RCU, PRF,
|
||||
HWS.get())),
|
||||
HWS)),
|
||||
Execute(llvm::make_unique<ExecuteStage>(this, RCU, HWS)),
|
||||
Retire(llvm::make_unique<RetireStage>(this, RCU, PRF)), Cycles(0) {}
|
||||
|
||||
void run();
|
||||
|
|
|
@ -15,6 +15,7 @@ add_llvm_tool(llvm-mca
|
|||
CodeRegion.cpp
|
||||
DispatchStage.cpp
|
||||
DispatchStatistics.cpp
|
||||
ExecuteStage.cpp
|
||||
FetchStage.cpp
|
||||
HWEventListener.cpp
|
||||
InstrBuilder.cpp
|
||||
|
|
|
@ -60,7 +60,11 @@ bool DispatchStage::checkRCU(const InstRef &IR) {
|
|||
}
|
||||
|
||||
bool DispatchStage::checkScheduler(const InstRef &IR) {
|
||||
return SC->canBeDispatched(IR);
|
||||
HWStallEvent::GenericEventType Event;
|
||||
const bool Ready = SC.canBeDispatched(IR, Event);
|
||||
if (!Ready)
|
||||
Owner->notifyStallEvent(HWStallEvent(Event, IR));
|
||||
return Ready;
|
||||
}
|
||||
|
||||
void DispatchStage::updateRAWDependencies(ReadState &RS,
|
||||
|
@ -129,11 +133,6 @@ void DispatchStage::dispatch(InstRef IR) {
|
|||
|
||||
// Notify listeners of the "instruction dispatched" event.
|
||||
notifyInstructionDispatched(IR, RegisterFiles);
|
||||
|
||||
// Now move the instruction into the scheduler's queue.
|
||||
// The scheduler is responsible for checking if this is a zero-latency
|
||||
// instruction that doesn't consume pipeline/scheduler resources.
|
||||
SC->scheduleInstruction(IR);
|
||||
}
|
||||
|
||||
void DispatchStage::preExecute(const InstRef &IR) {
|
||||
|
|
|
@ -59,11 +59,11 @@ class DispatchStage : public Stage {
|
|||
unsigned DispatchWidth;
|
||||
unsigned AvailableEntries;
|
||||
unsigned CarryOver;
|
||||
Scheduler *SC;
|
||||
Backend *Owner;
|
||||
const llvm::MCSubtargetInfo &STI;
|
||||
RetireControlUnit &RCU;
|
||||
RegisterFile &PRF;
|
||||
Scheduler &SC;
|
||||
|
||||
bool checkRCU(const InstRef &IR);
|
||||
bool checkPRF(const InstRef &IR);
|
||||
|
@ -93,9 +93,9 @@ public:
|
|||
DispatchStage(Backend *B, const llvm::MCSubtargetInfo &Subtarget,
|
||||
const llvm::MCRegisterInfo &MRI, unsigned RegisterFileSize,
|
||||
unsigned MaxDispatchWidth, RetireControlUnit &R,
|
||||
RegisterFile &F, Scheduler *Sched)
|
||||
RegisterFile &F, Scheduler &Sched)
|
||||
: DispatchWidth(MaxDispatchWidth), AvailableEntries(MaxDispatchWidth),
|
||||
CarryOver(0U), SC(Sched), Owner(B), STI(Subtarget), RCU(R), PRF(F) {}
|
||||
CarryOver(0U), Owner(B), STI(Subtarget), RCU(R), PRF(F), SC(Sched) {}
|
||||
|
||||
virtual bool isReady() const override final { return isRCUEmpty(); }
|
||||
virtual void preExecute(const InstRef &IR) override final;
|
||||
|
|
|
@ -0,0 +1,205 @@
|
|||
//===---------------------- ExecuteStage.cpp --------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
/// \file
|
||||
///
|
||||
/// This file defines the execution stage of an instruction pipeline.
|
||||
///
|
||||
/// The ExecuteStage is responsible for managing the hardware scheduler
|
||||
/// and issuing notifications that an instruction has been executed.
|
||||
///
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "ExecuteStage.h"
|
||||
#include "Backend.h"
|
||||
#include "Scheduler.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/Support/Debug.h"
|
||||
|
||||
#define DEBUG_TYPE "llvm-mca"
|
||||
|
||||
namespace mca {
|
||||
|
||||
using namespace llvm;
|
||||
|
||||
// Reclaim the simulated resources used by the scheduler.
|
||||
void ExecuteStage::reclaimSchedulerResources() {
|
||||
SmallVector<ResourceRef, 8> ResourcesFreed;
|
||||
HWS.reclaimSimulatedResources(ResourcesFreed);
|
||||
for (const ResourceRef &RR : ResourcesFreed)
|
||||
notifyResourceAvailable(RR);
|
||||
}
|
||||
|
||||
// Update the scheduler's instruction queues.
|
||||
void ExecuteStage::updateSchedulerQueues() {
|
||||
SmallVector<InstRef, 4> InstructionIDs;
|
||||
HWS.updateIssuedQueue(InstructionIDs);
|
||||
for (const InstRef &IR : InstructionIDs)
|
||||
notifyInstructionExecuted(IR);
|
||||
InstructionIDs.clear();
|
||||
|
||||
HWS.updatePendingQueue(InstructionIDs);
|
||||
for (const InstRef &IR : InstructionIDs)
|
||||
notifyInstructionReady(IR);
|
||||
}
|
||||
|
||||
// Issue instructions that are waiting in the scheduler's ready queue.
|
||||
void ExecuteStage::issueReadyInstructions() {
|
||||
SmallVector<InstRef, 4> InstructionIDs;
|
||||
InstRef IR = HWS.select();
|
||||
while (IR.isValid()) {
|
||||
SmallVector<std::pair<ResourceRef, double>, 4> Used;
|
||||
HWS.issueInstruction(IR, Used);
|
||||
|
||||
// Reclaim instruction resources and perform notifications.
|
||||
const InstrDesc &Desc = IR.getInstruction()->getDesc();
|
||||
notifyReleasedBuffers(Desc.Buffers);
|
||||
notifyInstructionIssued(IR, Used);
|
||||
if (IR.getInstruction()->isExecuted())
|
||||
notifyInstructionExecuted(IR);
|
||||
|
||||
// Instructions that have been issued during this cycle might have unblocked
|
||||
// other dependent instructions. Dependent instructions may be issued during
|
||||
// this same cycle if operands have ReadAdvance entries. Promote those
|
||||
// instructions to the ReadyQueue and tell to the caller that we need
|
||||
// another round of 'issue()'.
|
||||
HWS.promoteToReadyQueue(InstructionIDs);
|
||||
for (const InstRef &I : InstructionIDs)
|
||||
notifyInstructionReady(I);
|
||||
InstructionIDs.clear();
|
||||
|
||||
// Select the next instruction to issue.
|
||||
IR = HWS.select();
|
||||
}
|
||||
}
|
||||
|
||||
// The following routine is the maintenance routine of the ExecuteStage.
|
||||
// It is responsible for updating the hardware scheduler (HWS), including
|
||||
// reclaiming the HWS's simulated hardware resources, as well as updating the
|
||||
// HWS's queues.
|
||||
//
|
||||
// This routine also processes the instructions that are ready for issuance.
|
||||
// These instructions are managed by the HWS's ready queue and can be accessed
|
||||
// via the Scheduler::select() routine.
|
||||
//
|
||||
// Notifications are issued to this stage's listeners when instructions are
|
||||
// moved between the HWS's queues. In particular, when an instruction becomes
|
||||
// ready or executed.
|
||||
void ExecuteStage::preExecute(const InstRef &Unused) {
|
||||
reclaimSchedulerResources();
|
||||
updateSchedulerQueues();
|
||||
issueReadyInstructions();
|
||||
}
|
||||
|
||||
// Schedule the instruction for execution on the hardware.
|
||||
bool ExecuteStage::execute(InstRef &IR) {
|
||||
#ifndef NDEBUG
|
||||
// Ensure that the HWS has not stored this instruction in its queues.
|
||||
HWS.sanityCheck(IR);
|
||||
#endif
|
||||
// Reserve a slot in each buffered resource. Also, mark units with
|
||||
// BufferSize=0 as reserved. Resources with a buffer size of zero will only
|
||||
// be released after MCIS is issued, and all the ResourceCycles for those
|
||||
// units have been consumed.
|
||||
const InstrDesc &Desc = IR.getInstruction()->getDesc();
|
||||
HWS.reserveBuffers(Desc.Buffers);
|
||||
notifyReservedBuffers(Desc.Buffers);
|
||||
|
||||
// Obtain a slot in the LSU.
|
||||
if (!HWS.reserveResources(IR))
|
||||
return false;
|
||||
|
||||
// If we did not return early, then the scheduler is ready for execution.
|
||||
notifyInstructionReady(IR);
|
||||
|
||||
// Don't add a zero-latency instruction to the Wait or Ready queue.
|
||||
// A zero-latency instruction doesn't consume any scheduler resources. That is
|
||||
// because it doesn't need to be executed, and it is often removed at register
|
||||
// renaming stage. For example, register-register moves are often optimized at
|
||||
// register renaming stage by simply updating register aliases. On some
|
||||
// targets, zero-idiom instructions (for example: a xor that clears the value
|
||||
// of a register) are treated specially, and are often eliminated at register
|
||||
// renaming stage.
|
||||
//
|
||||
// Instructions that use an in-order dispatch/issue processor resource must be
|
||||
// issued immediately to the pipeline(s). Any other in-order buffered
|
||||
// resources (i.e. BufferSize=1) is consumed.
|
||||
//
|
||||
// If we cannot issue immediately, the HWS will add IR to its ready queue for
|
||||
// execution later, so we must return early here.
|
||||
if (!HWS.issueImmediately(IR))
|
||||
return true;
|
||||
|
||||
LLVM_DEBUG(dbgs() << "[SCHEDULER] Instruction " << IR
|
||||
<< " issued immediately\n");
|
||||
|
||||
// Issue IR. The resources for this issuance will be placed in 'Used.'
|
||||
SmallVector<std::pair<ResourceRef, double>, 4> Used;
|
||||
HWS.issueInstruction(IR, Used);
|
||||
|
||||
// Perform notifications.
|
||||
notifyReleasedBuffers(Desc.Buffers);
|
||||
notifyInstructionIssued(IR, Used);
|
||||
if (IR.getInstruction()->isExecuted())
|
||||
notifyInstructionExecuted(IR);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void ExecuteStage::notifyInstructionExecuted(const InstRef &IR) {
|
||||
HWS.onInstructionExecuted(IR);
|
||||
LLVM_DEBUG(dbgs() << "[E] Instruction Executed: " << IR << '\n');
|
||||
Owner->notifyInstructionEvent(
|
||||
HWInstructionEvent(HWInstructionEvent::Executed, IR));
|
||||
RCU.onInstructionExecuted(IR.getInstruction()->getRCUTokenID());
|
||||
}
|
||||
|
||||
void ExecuteStage::notifyInstructionReady(const InstRef &IR) {
|
||||
LLVM_DEBUG(dbgs() << "[E] Instruction Ready: " << IR << '\n');
|
||||
Owner->notifyInstructionEvent(
|
||||
HWInstructionEvent(HWInstructionEvent::Ready, IR));
|
||||
}
|
||||
|
||||
void ExecuteStage::notifyResourceAvailable(const ResourceRef &RR) {
|
||||
Owner->notifyResourceAvailable(RR);
|
||||
}
|
||||
|
||||
void ExecuteStage::notifyInstructionIssued(
|
||||
const InstRef &IR, ArrayRef<std::pair<ResourceRef, double>> Used) {
|
||||
LLVM_DEBUG({
|
||||
dbgs() << "[E] Instruction Issued: " << IR << '\n';
|
||||
for (const std::pair<ResourceRef, unsigned> &Resource : Used) {
|
||||
dbgs() << "[E] Resource Used: [" << Resource.first.first << '.'
|
||||
<< Resource.first.second << "]\n";
|
||||
dbgs() << " cycles: " << Resource.second << '\n';
|
||||
}
|
||||
});
|
||||
Owner->notifyInstructionEvent(HWInstructionIssuedEvent(IR, Used));
|
||||
}
|
||||
|
||||
void ExecuteStage::notifyReservedBuffers(ArrayRef<uint64_t> Buffers) {
|
||||
if (Buffers.empty())
|
||||
return;
|
||||
|
||||
SmallVector<unsigned, 4> BufferIDs(Buffers.begin(), Buffers.end());
|
||||
std::transform(Buffers.begin(), Buffers.end(), BufferIDs.begin(),
|
||||
[&](uint64_t Op) { return HWS.getResourceID(Op); });
|
||||
Owner->notifyReservedBuffers(BufferIDs);
|
||||
}
|
||||
|
||||
void ExecuteStage::notifyReleasedBuffers(ArrayRef<uint64_t> Buffers) {
|
||||
if (Buffers.empty())
|
||||
return;
|
||||
|
||||
SmallVector<unsigned, 4> BufferIDs(Buffers.begin(), Buffers.end());
|
||||
std::transform(Buffers.begin(), Buffers.end(), BufferIDs.begin(),
|
||||
[&](uint64_t Op) { return HWS.getResourceID(Op); });
|
||||
Owner->notifyReleasedBuffers(BufferIDs);
|
||||
}
|
||||
|
||||
} // namespace mca
|
|
@ -0,0 +1,67 @@
|
|||
//===---------------------- ExecuteStage.h ----------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
/// \file
|
||||
///
|
||||
/// This file defines the execution stage of an instruction pipeline.
|
||||
///
|
||||
/// The ExecuteStage is responsible for managing the hardware scheduler
|
||||
/// and issuing notifications that an instruction has been executed.
|
||||
///
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_TOOLS_LLVM_MCA_EXECUTE_STAGE_H
|
||||
#define LLVM_TOOLS_LLVM_MCA_EXECUTE_STAGE_H
|
||||
|
||||
#include "Instruction.h"
|
||||
#include "RetireControlUnit.h"
|
||||
#include "Scheduler.h"
|
||||
#include "Stage.h"
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
|
||||
namespace mca {
|
||||
|
||||
class Backend;
|
||||
|
||||
class ExecuteStage : public Stage {
|
||||
// Owner will go away when we move listeners/eventing to the stages.
|
||||
Backend *Owner;
|
||||
RetireControlUnit &RCU;
|
||||
Scheduler &HWS;
|
||||
|
||||
// The following routines are used to maintain the HWS.
|
||||
void reclaimSchedulerResources();
|
||||
void updateSchedulerQueues();
|
||||
void issueReadyInstructions();
|
||||
|
||||
public:
|
||||
ExecuteStage(Backend *B, RetireControlUnit &R, Scheduler &S)
|
||||
: Stage(), Owner(B), RCU(R), HWS(S) {}
|
||||
ExecuteStage(const ExecuteStage &Other) = delete;
|
||||
ExecuteStage &operator=(const ExecuteStage &Other) = delete;
|
||||
|
||||
virtual void preExecute(const InstRef &IR) override final;
|
||||
virtual bool execute(InstRef &IR) override final;
|
||||
|
||||
void
|
||||
notifyInstructionIssued(const InstRef &IR,
|
||||
llvm::ArrayRef<std::pair<ResourceRef, double>> Used);
|
||||
void notifyInstructionExecuted(const InstRef &IR);
|
||||
void notifyInstructionReady(const InstRef &IR);
|
||||
void notifyResourceAvailable(const ResourceRef &RR);
|
||||
|
||||
// Notify listeners that buffered resources were consumed.
|
||||
void notifyReservedBuffers(llvm::ArrayRef<uint64_t> Buffers);
|
||||
|
||||
// Notify listeners that buffered resources were freed.
|
||||
void notifyReleasedBuffers(llvm::ArrayRef<uint64_t> Buffers);
|
||||
};
|
||||
|
||||
} // namespace mca
|
||||
|
||||
#endif // LLVM_TOOLS_LLVM_MCA_EXECUTE_STAGE_H
|
|
@ -15,11 +15,8 @@
|
|||
#include "Backend.h"
|
||||
#include "HWEventListener.h"
|
||||
#include "Support.h"
|
||||
#include "llvm/Support/Debug.h"
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
|
||||
#define DEBUG_TYPE "llvm-mca"
|
||||
|
||||
namespace mca {
|
||||
|
||||
using namespace llvm;
|
||||
|
@ -226,93 +223,6 @@ void ResourceManager::cycleEvent(SmallVectorImpl<ResourceRef> &ResourcesFreed) {
|
|||
BusyResources.erase(RF);
|
||||
}
|
||||
|
||||
void Scheduler::scheduleInstruction(InstRef &IR) {
|
||||
const unsigned Idx = IR.getSourceIndex();
|
||||
assert(WaitQueue.find(Idx) == WaitQueue.end());
|
||||
assert(ReadyQueue.find(Idx) == ReadyQueue.end());
|
||||
assert(IssuedQueue.find(Idx) == IssuedQueue.end());
|
||||
|
||||
// Reserve a slot in each buffered resource. Also, mark units with
|
||||
// BufferSize=0 as reserved. Resources with a buffer size of zero will only
|
||||
// be released after MCIS is issued, and all the ResourceCycles for those
|
||||
// units have been consumed.
|
||||
const InstrDesc &Desc = IR.getInstruction()->getDesc();
|
||||
reserveBuffers(Desc.Buffers);
|
||||
notifyReservedBuffers(Desc.Buffers);
|
||||
|
||||
// If necessary, reserve queue entries in the load-store unit (LSU).
|
||||
bool Reserved = LSU->reserve(IR);
|
||||
if (!IR.getInstruction()->isReady() || (Reserved && !LSU->isReady(IR))) {
|
||||
LLVM_DEBUG(dbgs() << "[SCHEDULER] Adding " << Idx
|
||||
<< " to the Wait Queue\n");
|
||||
WaitQueue[Idx] = IR.getInstruction();
|
||||
return;
|
||||
}
|
||||
notifyInstructionReady(IR);
|
||||
|
||||
// Don't add a zero-latency instruction to the Wait or Ready queue.
|
||||
// A zero-latency instruction doesn't consume any scheduler resources. That is
|
||||
// because it doesn't need to be executed, and it is often removed at register
|
||||
// renaming stage. For example, register-register moves are often optimized at
|
||||
// register renaming stage by simply updating register aliases. On some
|
||||
// targets, zero-idiom instructions (for example: a xor that clears the value
|
||||
// of a register) are treated speacially, and are often eliminated at register
|
||||
// renaming stage.
|
||||
|
||||
// Instructions that use an in-order dispatch/issue processor resource must be
|
||||
// issued immediately to the pipeline(s). Any other in-order buffered
|
||||
// resources (i.e. BufferSize=1) is consumed.
|
||||
|
||||
if (!Desc.isZeroLatency() && !Resources->mustIssueImmediately(Desc)) {
|
||||
LLVM_DEBUG(dbgs() << "[SCHEDULER] Adding " << IR
|
||||
<< " to the Ready Queue\n");
|
||||
ReadyQueue[IR.getSourceIndex()] = IR.getInstruction();
|
||||
return;
|
||||
}
|
||||
|
||||
LLVM_DEBUG(dbgs() << "[SCHEDULER] Instruction " << IR
|
||||
<< " issued immediately\n");
|
||||
// Release buffered resources and issue MCIS to the underlying pipelines.
|
||||
issueInstruction(IR);
|
||||
}
|
||||
|
||||
void Scheduler::cycleEvent() {
|
||||
SmallVector<ResourceRef, 8> ResourcesFreed;
|
||||
Resources->cycleEvent(ResourcesFreed);
|
||||
|
||||
for (const ResourceRef &RR : ResourcesFreed)
|
||||
notifyResourceAvailable(RR);
|
||||
|
||||
SmallVector<InstRef, 4> InstructionIDs;
|
||||
updateIssuedQueue(InstructionIDs);
|
||||
for (const InstRef &IR : InstructionIDs)
|
||||
notifyInstructionExecuted(IR);
|
||||
InstructionIDs.clear();
|
||||
|
||||
updatePendingQueue(InstructionIDs);
|
||||
for (const InstRef &IR : InstructionIDs)
|
||||
notifyInstructionReady(IR);
|
||||
InstructionIDs.clear();
|
||||
|
||||
InstRef IR = select();
|
||||
while (IR.isValid()) {
|
||||
issueInstruction(IR);
|
||||
|
||||
// Instructions that have been issued during this cycle might have unblocked
|
||||
// other dependent instructions. Dependent instructions may be issued during
|
||||
// this same cycle if operands have ReadAdvance entries. Promote those
|
||||
// instructions to the ReadyQueue and tell to the caller that we need
|
||||
// another round of 'issue()'.
|
||||
promoteToReadyQueue(InstructionIDs);
|
||||
for (const InstRef &I : InstructionIDs)
|
||||
notifyInstructionReady(I);
|
||||
InstructionIDs.clear();
|
||||
|
||||
// Select the next instruction to issue.
|
||||
IR = select();
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef NDEBUG
|
||||
void Scheduler::dump() const {
|
||||
dbgs() << "[SCHEDULER]: WaitQueue size is: " << WaitQueue.size() << '\n';
|
||||
|
@ -322,27 +232,27 @@ void Scheduler::dump() const {
|
|||
}
|
||||
#endif
|
||||
|
||||
bool Scheduler::canBeDispatched(const InstRef &IR) const {
|
||||
HWStallEvent::GenericEventType Type = HWStallEvent::Invalid;
|
||||
bool Scheduler::canBeDispatched(const InstRef &IR,
|
||||
HWStallEvent::GenericEventType &Event) const {
|
||||
Event = HWStallEvent::Invalid;
|
||||
const InstrDesc &Desc = IR.getInstruction()->getDesc();
|
||||
|
||||
if (Desc.MayLoad && LSU->isLQFull())
|
||||
Type = HWStallEvent::LoadQueueFull;
|
||||
Event = HWStallEvent::LoadQueueFull;
|
||||
else if (Desc.MayStore && LSU->isSQFull())
|
||||
Type = HWStallEvent::StoreQueueFull;
|
||||
Event = HWStallEvent::StoreQueueFull;
|
||||
else {
|
||||
switch (Resources->canBeDispatched(Desc.Buffers)) {
|
||||
default:
|
||||
return true;
|
||||
case ResourceStateEvent::RS_BUFFER_UNAVAILABLE:
|
||||
Type = HWStallEvent::SchedulerQueueFull;
|
||||
Event = HWStallEvent::SchedulerQueueFull;
|
||||
break;
|
||||
case ResourceStateEvent::RS_RESERVED:
|
||||
Type = HWStallEvent::DispatchGroupStall;
|
||||
Event = HWStallEvent::DispatchGroupStall;
|
||||
}
|
||||
}
|
||||
|
||||
Owner->notifyStallEvent(HWStallEvent(Type, IR));
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -364,18 +274,13 @@ void Scheduler::issueInstructionImpl(
|
|||
IssuedQueue[IR.getSourceIndex()] = IS;
|
||||
}
|
||||
|
||||
void Scheduler::issueInstruction(InstRef &IR) {
|
||||
// Release buffered resources.
|
||||
// Release the buffered resources and issue the instruction.
|
||||
void Scheduler::issueInstruction(
|
||||
InstRef &IR,
|
||||
SmallVectorImpl<std::pair<ResourceRef, double>> &UsedResources) {
|
||||
const InstrDesc &Desc = IR.getInstruction()->getDesc();
|
||||
releaseBuffers(Desc.Buffers);
|
||||
notifyReleasedBuffers(Desc.Buffers);
|
||||
|
||||
// Issue IS to the underlying pipelines and notify listeners.
|
||||
SmallVector<std::pair<ResourceRef, double>, 4> Pipes;
|
||||
issueInstructionImpl(IR, Pipes);
|
||||
notifyInstructionIssued(IR, Pipes);
|
||||
if (IR.getInstruction()->isExecuted())
|
||||
notifyInstructionExecuted(IR);
|
||||
issueInstructionImpl(IR, UsedResources);
|
||||
}
|
||||
|
||||
void Scheduler::promoteToReadyQueue(SmallVectorImpl<InstRef> &Ready) {
|
||||
|
@ -448,56 +353,34 @@ void Scheduler::updateIssuedQueue(SmallVectorImpl<InstRef> &Executed) {
|
|||
}
|
||||
}
|
||||
|
||||
void Scheduler::notifyInstructionIssued(
|
||||
const InstRef &IR, ArrayRef<std::pair<ResourceRef, double>> Used) {
|
||||
LLVM_DEBUG({
|
||||
dbgs() << "[E] Instruction Issued: " << IR << '\n';
|
||||
for (const std::pair<ResourceRef, unsigned> &Resource : Used) {
|
||||
dbgs() << "[E] Resource Used: [" << Resource.first.first << '.'
|
||||
<< Resource.first.second << "]\n";
|
||||
dbgs() << " cycles: " << Resource.second << '\n';
|
||||
}
|
||||
});
|
||||
Owner->notifyInstructionEvent(HWInstructionIssuedEvent(IR, Used));
|
||||
}
|
||||
|
||||
void Scheduler::notifyInstructionExecuted(const InstRef &IR) {
|
||||
void Scheduler::onInstructionExecuted(const InstRef &IR) {
|
||||
LSU->onInstructionExecuted(IR);
|
||||
LLVM_DEBUG(dbgs() << "[E] Instruction Executed: " << IR << '\n');
|
||||
Owner->notifyInstructionEvent(
|
||||
HWInstructionEvent(HWInstructionEvent::Executed, IR));
|
||||
RCU.onInstructionExecuted(IR.getInstruction()->getRCUTokenID());
|
||||
}
|
||||
|
||||
void Scheduler::notifyInstructionReady(const InstRef &IR) {
|
||||
LLVM_DEBUG(dbgs() << "[E] Instruction Ready: " << IR << '\n');
|
||||
Owner->notifyInstructionEvent(
|
||||
HWInstructionEvent(HWInstructionEvent::Ready, IR));
|
||||
void Scheduler::reclaimSimulatedResources(SmallVectorImpl<ResourceRef> &Freed) {
|
||||
Resources->cycleEvent(Freed);
|
||||
}
|
||||
|
||||
void Scheduler::notifyResourceAvailable(const ResourceRef &RR) {
|
||||
Owner->notifyResourceAvailable(RR);
|
||||
bool Scheduler::reserveResources(InstRef &IR) {
|
||||
// If necessary, reserve queue entries in the load-store unit (LSU).
|
||||
const bool Reserved = LSU->reserve(IR);
|
||||
if (!IR.getInstruction()->isReady() || (Reserved && !LSU->isReady(IR))) {
|
||||
LLVM_DEBUG(dbgs() << "[SCHEDULER] Adding " << IR << " to the Wait Queue\n");
|
||||
WaitQueue[IR.getSourceIndex()] = IR.getInstruction();
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void Scheduler::notifyReservedBuffers(ArrayRef<uint64_t> Buffers) {
|
||||
if (Buffers.empty())
|
||||
return;
|
||||
|
||||
SmallVector<unsigned, 4> BufferIDs(Buffers.begin(), Buffers.end());
|
||||
std::transform(
|
||||
Buffers.begin(), Buffers.end(), BufferIDs.begin(),
|
||||
[&](uint64_t Op) { return Resources->resolveResourceMask(Op); });
|
||||
Owner->notifyReservedBuffers(BufferIDs);
|
||||
bool Scheduler::issueImmediately(InstRef &IR) {
|
||||
const InstrDesc &Desc = IR.getInstruction()->getDesc();
|
||||
if (!Desc.isZeroLatency() && !Resources->mustIssueImmediately(Desc)) {
|
||||
LLVM_DEBUG(dbgs() << "[SCHEDULER] Adding " << IR
|
||||
<< " to the Ready Queue\n");
|
||||
ReadyQueue[IR.getSourceIndex()] = IR.getInstruction();
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void Scheduler::notifyReleasedBuffers(ArrayRef<uint64_t> Buffers) {
|
||||
if (Buffers.empty())
|
||||
return;
|
||||
|
||||
SmallVector<unsigned, 4> BufferIDs(Buffers.begin(), Buffers.end());
|
||||
std::transform(
|
||||
Buffers.begin(), Buffers.end(), BufferIDs.begin(),
|
||||
[&](uint64_t Op) { return Resources->resolveResourceMask(Op); });
|
||||
Owner->notifyReleasedBuffers(BufferIDs);
|
||||
}
|
||||
} // namespace mca
|
||||
|
|
|
@ -15,17 +15,18 @@
|
|||
#ifndef LLVM_TOOLS_LLVM_MCA_SCHEDULER_H
|
||||
#define LLVM_TOOLS_LLVM_MCA_SCHEDULER_H
|
||||
|
||||
#include "HWEventListener.h"
|
||||
#include "Instruction.h"
|
||||
#include "LSUnit.h"
|
||||
#include "RetireControlUnit.h"
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/MC/MCSubtargetInfo.h"
|
||||
#include <map>
|
||||
|
||||
namespace mca {
|
||||
|
||||
class Backend;
|
||||
|
||||
/// Used to notify the internal state of a processor resource.
|
||||
///
|
||||
/// A processor resource is available if it is not reserved, and there are
|
||||
|
@ -402,68 +403,52 @@ public:
|
|||
/// An Instruction leaves the IssuedQueue when it reaches the write-back stage.
|
||||
class Scheduler {
|
||||
const llvm::MCSchedModel &SM;
|
||||
RetireControlUnit &RCU;
|
||||
|
||||
// Hardware resources that are managed by this scheduler.
|
||||
std::unique_ptr<ResourceManager> Resources;
|
||||
std::unique_ptr<LSUnit> LSU;
|
||||
|
||||
// The Backend gets notified when instructions are ready/issued/executed.
|
||||
Backend *const Owner;
|
||||
|
||||
using QueueEntryTy = std::pair<unsigned, Instruction *>;
|
||||
std::map<unsigned, Instruction *> WaitQueue;
|
||||
std::map<unsigned, Instruction *> ReadyQueue;
|
||||
std::map<unsigned, Instruction *> IssuedQueue;
|
||||
|
||||
void
|
||||
notifyInstructionIssued(const InstRef &IR,
|
||||
llvm::ArrayRef<std::pair<ResourceRef, double>> Used);
|
||||
void notifyInstructionExecuted(const InstRef &IR);
|
||||
void notifyInstructionReady(const InstRef &IR);
|
||||
void notifyResourceAvailable(const ResourceRef &RR);
|
||||
|
||||
// Notify the Backend that buffered resources were consumed.
|
||||
void notifyReservedBuffers(llvm::ArrayRef<uint64_t> Buffers);
|
||||
// Notify the Backend that buffered resources were freed.
|
||||
void notifyReleasedBuffers(llvm::ArrayRef<uint64_t> Buffers);
|
||||
|
||||
/// Select the next instruction to issue from the ReadyQueue.
|
||||
/// This method gives priority to older instructions.
|
||||
InstRef select();
|
||||
|
||||
/// Move instructions from the WaitQueue to the ReadyQueue if input operands
|
||||
/// are all available.
|
||||
void promoteToReadyQueue(llvm::SmallVectorImpl<InstRef> &Ready);
|
||||
|
||||
/// Issue an instruction without updating the ready queue.
|
||||
void issueInstructionImpl(
|
||||
InstRef &IR,
|
||||
llvm::SmallVectorImpl<std::pair<ResourceRef, double>> &Pipes);
|
||||
|
||||
void updatePendingQueue(llvm::SmallVectorImpl<InstRef> &Ready);
|
||||
void updateIssuedQueue(llvm::SmallVectorImpl<InstRef> &Executed);
|
||||
|
||||
public:
|
||||
Scheduler(Backend *B, const llvm::MCSchedModel &Model, RetireControlUnit &R,
|
||||
unsigned LoadQueueSize, unsigned StoreQueueSize, bool AssumeNoAlias)
|
||||
: SM(Model), RCU(R), Resources(llvm::make_unique<ResourceManager>(SM)),
|
||||
Scheduler(const llvm::MCSchedModel &Model, unsigned LoadQueueSize,
|
||||
unsigned StoreQueueSize, bool AssumeNoAlias)
|
||||
: SM(Model), Resources(llvm::make_unique<ResourceManager>(SM)),
|
||||
LSU(llvm::make_unique<LSUnit>(LoadQueueSize, StoreQueueSize,
|
||||
AssumeNoAlias)),
|
||||
Owner(B) {}
|
||||
AssumeNoAlias)) {}
|
||||
|
||||
/// Check if the instruction in 'IR' can be dispatched.
|
||||
///
|
||||
/// The DispatchStage is responsible for querying the Scheduler before
|
||||
/// dispatching new instructions. Queries are performed through method
|
||||
/// `Scheduler::canBeDispatched`. If scheduling resources are available,
|
||||
/// and the instruction can be dispatched, then this method returns true.
|
||||
/// Otherwise, a generic HWStallEvent is notified to the listeners.
|
||||
bool canBeDispatched(const InstRef &IR) const;
|
||||
void scheduleInstruction(InstRef &IR);
|
||||
/// dispatching new instructions. This routine is used for performing such
|
||||
/// a query. If the instruction 'IR' can be dispatched, then true is
|
||||
/// returned, otherwise false is returned with Event set to the stall type.
|
||||
bool canBeDispatched(const InstRef &IR,
|
||||
HWStallEvent::GenericEventType &Event) const;
|
||||
|
||||
/// Issue an instruction.
|
||||
void issueInstruction(InstRef &IR);
|
||||
/// Returns true if there is availibility for IR in the LSU.
|
||||
bool isReady(const InstRef &IR) const { return LSU->isReady(IR); }
|
||||
|
||||
/// Issue an instruction. The Used container is populated with
|
||||
/// the resource objects consumed on behalf of issuing this instruction.
|
||||
void
|
||||
issueInstruction(InstRef &IR,
|
||||
llvm::SmallVectorImpl<std::pair<ResourceRef, double>> &Used);
|
||||
|
||||
/// This routine will attempt to issue an instruction immediately (for
|
||||
/// zero-latency instructions).
|
||||
///
|
||||
/// Returns true if the instruction is issued immediately. If this does not
|
||||
/// occur, then the instruction will be added to the Scheduler's ReadyQueue.
|
||||
bool issueImmediately(InstRef &IR);
|
||||
|
||||
/// Reserve one entry in each buffered resource.
|
||||
void reserveBuffers(llvm::ArrayRef<uint64_t> Buffers) {
|
||||
|
@ -475,12 +460,55 @@ public:
|
|||
Resources->releaseBuffers(Buffers);
|
||||
}
|
||||
|
||||
void cycleEvent();
|
||||
/// Update the resources managed by the scheduler.
|
||||
/// This routine is to be called at the start of a new cycle, and is
|
||||
/// responsible for updating scheduler resources. Resources are released
|
||||
/// once they have been fully consumed.
|
||||
void reclaimSimulatedResources(llvm::SmallVectorImpl<ResourceRef> &Freed);
|
||||
|
||||
/// Move instructions from the WaitQueue to the ReadyQueue if input operands
|
||||
/// are all available.
|
||||
void promoteToReadyQueue(llvm::SmallVectorImpl<InstRef> &Ready);
|
||||
|
||||
/// Update the ready queue.
|
||||
void updatePendingQueue(llvm::SmallVectorImpl<InstRef> &Ready);
|
||||
|
||||
/// Update the issued queue.
|
||||
void updateIssuedQueue(llvm::SmallVectorImpl<InstRef> &Executed);
|
||||
|
||||
/// Updates the Scheduler's resources to reflect that an instruction has just
|
||||
/// been executed.
|
||||
void onInstructionExecuted(const InstRef &IR);
|
||||
|
||||
/// Obtain the processor's resource identifier for the given
|
||||
/// resource mask.
|
||||
unsigned getResourceID(uint64_t Mask) {
|
||||
return Resources->resolveResourceMask(Mask);
|
||||
}
|
||||
|
||||
/// Reserve resources necessary to issue the instruction.
|
||||
/// Returns true if the resources are ready and the (LSU) can
|
||||
/// execute the given instruction immediately.
|
||||
bool reserveResources(InstRef &IR);
|
||||
|
||||
/// Select the next instruction to issue from the ReadyQueue.
|
||||
/// This method gives priority to older instructions.
|
||||
InstRef select();
|
||||
|
||||
#ifndef NDEBUG
|
||||
// Update the ready queues.
|
||||
void dump() const;
|
||||
#endif
|
||||
};
|
||||
} // Namespace mca
|
||||
|
||||
#endif
|
||||
// This routine performs a sanity check. This routine should only be called
|
||||
// when we know that 'IR' is not in the scheduler's instruction queues.
|
||||
void sanityCheck(const InstRef &IR) const {
|
||||
const unsigned Idx = IR.getSourceIndex();
|
||||
assert(WaitQueue.find(Idx) == WaitQueue.end());
|
||||
assert(ReadyQueue.find(Idx) == ReadyQueue.end());
|
||||
assert(IssuedQueue.find(Idx) == IssuedQueue.end());
|
||||
}
|
||||
#endif // !NDEBUG
|
||||
};
|
||||
} // namespace mca
|
||||
|
||||
#endif // LLVM_TOOLS_LLVM_MCA_SCHEDULER_H
|
||||
|
|
Loading…
Reference in New Issue