2018-03-08 21:05:02 +08:00
|
|
|
//===--------------------- Instruction.cpp ----------------------*- C++ -*-===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2018-03-08 21:05:02 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2018-06-26 00:53:00 +08:00
|
|
|
// This file defines abstractions used by the Pipeline to model register reads,
|
2018-03-08 21:05:02 +08:00
|
|
|
// register writes and instructions.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2018-12-17 16:08:31 +08:00
|
|
|
#include "llvm/MCA/Instruction.h"
|
2018-03-08 21:05:02 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
|
|
|
|
2018-10-30 23:56:08 +08:00
|
|
|
namespace llvm {
|
2018-03-08 21:05:02 +08:00
|
|
|
namespace mca {
|
|
|
|
|
2019-08-22 21:32:17 +08:00
|
|
|
void WriteState::writeStartEvent(unsigned IID, MCPhysReg RegID,
|
2019-05-27 02:41:35 +08:00
|
|
|
unsigned Cycles) {
|
2019-05-24 00:32:19 +08:00
|
|
|
CRD.IID = IID;
|
|
|
|
CRD.RegID = RegID;
|
|
|
|
CRD.Cycles = Cycles;
|
|
|
|
DependentWriteCyclesLeft = Cycles;
|
|
|
|
DependentWrite = nullptr;
|
|
|
|
}
|
|
|
|
|
2021-03-23 22:47:01 +08:00
|
|
|
void ReadState::writeStartEvent(unsigned IID, MCPhysReg RegID,
|
|
|
|
unsigned Cycles) {
|
2018-03-08 21:05:02 +08:00
|
|
|
assert(DependentWrites);
|
|
|
|
assert(CyclesLeft == UNKNOWN_CYCLES);
|
|
|
|
|
|
|
|
// This read may be dependent on more than one write. This typically occurs
|
|
|
|
// when a definition is the result of multiple writes where at least one
|
|
|
|
// write does a partial register update.
|
|
|
|
// The HW is forced to do some extra bookkeeping to track of all the
|
|
|
|
// dependent writes, and implement a merging scheme for the partial writes.
|
|
|
|
--DependentWrites;
|
2019-05-24 00:32:19 +08:00
|
|
|
if (TotalCycles < Cycles) {
|
|
|
|
CRD.IID = IID;
|
|
|
|
CRD.RegID = RegID;
|
|
|
|
CRD.Cycles = Cycles;
|
|
|
|
TotalCycles = Cycles;
|
|
|
|
}
|
2018-03-08 21:05:02 +08:00
|
|
|
|
2018-06-27 19:17:07 +08:00
|
|
|
if (!DependentWrites) {
|
2018-03-08 21:05:02 +08:00
|
|
|
CyclesLeft = TotalCycles;
|
2018-06-27 19:17:07 +08:00
|
|
|
IsReady = !CyclesLeft;
|
|
|
|
}
|
2018-03-08 21:05:02 +08:00
|
|
|
}
|
|
|
|
|
2019-02-18 19:27:11 +08:00
|
|
|
void WriteState::onInstructionIssued(unsigned IID) {
|
2018-03-08 21:05:02 +08:00
|
|
|
assert(CyclesLeft == UNKNOWN_CYCLES);
|
|
|
|
// Update the number of cycles left based on the WriteDescriptor info.
|
2018-07-06 21:46:10 +08:00
|
|
|
CyclesLeft = getLatency();
|
2018-03-08 21:05:02 +08:00
|
|
|
|
2018-06-06 01:12:02 +08:00
|
|
|
// Now that the time left before write-back is known, notify
|
2018-03-08 21:05:02 +08:00
|
|
|
// all the users.
|
|
|
|
for (const std::pair<ReadState *, int> &User : Users) {
|
|
|
|
ReadState *RS = User.first;
|
|
|
|
unsigned ReadCycles = std::max(0, CyclesLeft - User.second);
|
2019-02-18 19:27:11 +08:00
|
|
|
RS->writeStartEvent(IID, RegisterID, ReadCycles);
|
2018-03-08 21:05:02 +08:00
|
|
|
}
|
2018-11-22 20:48:57 +08:00
|
|
|
|
|
|
|
// Notify any writes that are in a false dependency with this write.
|
|
|
|
if (PartialWrite)
|
2019-02-18 19:27:11 +08:00
|
|
|
PartialWrite->writeStartEvent(IID, RegisterID, CyclesLeft);
|
2018-03-08 21:05:02 +08:00
|
|
|
}
|
|
|
|
|
2019-02-18 19:27:11 +08:00
|
|
|
void WriteState::addUser(unsigned IID, ReadState *User, int ReadAdvance) {
|
2018-03-08 21:05:02 +08:00
|
|
|
// If CyclesLeft is different than -1, then we don't need to
|
|
|
|
// update the list of users. We can just notify the user with
|
|
|
|
// the actual number of cycles left (which may be zero).
|
|
|
|
if (CyclesLeft != UNKNOWN_CYCLES) {
|
|
|
|
unsigned ReadCycles = std::max(0, CyclesLeft - ReadAdvance);
|
2019-02-18 19:27:11 +08:00
|
|
|
User->writeStartEvent(IID, RegisterID, ReadCycles);
|
2018-03-08 21:05:02 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-02-05 19:36:55 +08:00
|
|
|
Users.emplace_back(User, ReadAdvance);
|
2018-03-08 21:05:02 +08:00
|
|
|
}
|
|
|
|
|
2019-02-18 19:27:11 +08:00
|
|
|
void WriteState::addUser(unsigned IID, WriteState *User) {
|
2018-11-22 20:48:57 +08:00
|
|
|
if (CyclesLeft != UNKNOWN_CYCLES) {
|
2019-02-18 19:27:11 +08:00
|
|
|
User->writeStartEvent(IID, RegisterID, std::max(0, CyclesLeft));
|
2018-11-22 20:48:57 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(!PartialWrite && "PartialWrite already set!");
|
|
|
|
PartialWrite = User;
|
|
|
|
User->setDependentWrite(this);
|
|
|
|
}
|
|
|
|
|
2018-03-08 21:05:02 +08:00
|
|
|
void WriteState::cycleEvent() {
|
|
|
|
// Note: CyclesLeft can be a negative number. It is an error to
|
|
|
|
// make it an unsigned quantity because users of this write may
|
|
|
|
// specify a negative ReadAdvance.
|
|
|
|
if (CyclesLeft != UNKNOWN_CYCLES)
|
|
|
|
CyclesLeft--;
|
2018-11-22 20:48:57 +08:00
|
|
|
|
|
|
|
if (DependentWriteCyclesLeft)
|
|
|
|
DependentWriteCyclesLeft--;
|
2018-03-08 21:05:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void ReadState::cycleEvent() {
|
2018-06-06 01:12:02 +08:00
|
|
|
// Update the total number of cycles.
|
|
|
|
if (DependentWrites && TotalCycles) {
|
|
|
|
--TotalCycles;
|
2018-03-08 21:05:02 +08:00
|
|
|
return;
|
2018-06-06 01:12:02 +08:00
|
|
|
}
|
2018-03-08 21:05:02 +08:00
|
|
|
|
2018-06-06 01:12:02 +08:00
|
|
|
// Bail out immediately if we don't know how many cycles are left.
|
|
|
|
if (CyclesLeft == UNKNOWN_CYCLES)
|
2018-03-08 21:05:02 +08:00
|
|
|
return;
|
|
|
|
|
2018-06-27 19:17:07 +08:00
|
|
|
if (CyclesLeft) {
|
2018-06-06 01:12:02 +08:00
|
|
|
--CyclesLeft;
|
2018-06-27 19:17:07 +08:00
|
|
|
IsReady = !CyclesLeft;
|
|
|
|
}
|
2018-03-08 21:05:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef NDEBUG
|
|
|
|
void WriteState::dump() const {
|
[llvm-mca] Lower to mca::Instructon before the pipeline is run.
Before this change, the lowering of instructions from llvm::MCInst to
mca::Instruction was done as part of the first stage of the pipeline (i.e. the
FetchStage). In particular, FetchStage was responsible for picking the next
instruction from the source sequence, and lower it to an mca::Instruction with
the help of an object of class InstrBuilder.
The dependency on InstrBuilder was problematic for a number of reasons. Class
InstrBuilder only knows how to lower from llvm::MCInst to mca::Instruction.
That means, it is hard to support a different scenario where instructions
in input are not instances of class llvm::MCInst. Even if we managed to
specialize InstrBuilder, and generalize most of its internal logic, the
dependency on InstrBuilder in FetchStage would have caused more troubles (other
than complicating the pipeline logic).
With this patch, the lowering step is done before the pipeline is run. The
pipeline is no longer responsible for lowering from MCInst to mca::Instruction.
As a consequence of this, the FetchStage no longer needs to interact with an
InstrBuilder. The mca::SourceMgr class now simply wraps a reference to a
sequence of mca::Instruction objects.
This simplifies the logic of FetchStage, and increases the usability of it. As
a result, on a debug build, we see a 7-9% speedup; on a release build, the
speedup is around 3-4%.
llvm-svn: 345500
2018-10-29 21:29:22 +08:00
|
|
|
dbgs() << "{ OpIdx=" << WD->OpIndex << ", Lat=" << getLatency() << ", RegID "
|
2018-07-06 00:13:49 +08:00
|
|
|
<< getRegisterID() << ", Cycles Left=" << getCyclesLeft() << " }";
|
2018-03-08 21:05:02 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-05-27 02:41:35 +08:00
|
|
|
const CriticalDependency &Instruction::computeCriticalRegDep() {
|
|
|
|
if (CriticalRegDep.Cycles)
|
|
|
|
return CriticalRegDep;
|
|
|
|
|
2019-05-24 00:32:19 +08:00
|
|
|
unsigned MaxLatency = 0;
|
2019-05-27 02:41:35 +08:00
|
|
|
for (const WriteState &WS : getDefs()) {
|
|
|
|
const CriticalDependency &WriteCRD = WS.getCriticalRegDep();
|
2019-05-24 00:32:19 +08:00
|
|
|
if (WriteCRD.Cycles > MaxLatency)
|
2019-05-27 02:41:35 +08:00
|
|
|
CriticalRegDep = WriteCRD;
|
2019-05-24 00:32:19 +08:00
|
|
|
}
|
|
|
|
|
2019-05-27 02:41:35 +08:00
|
|
|
for (const ReadState &RS : getUses()) {
|
|
|
|
const CriticalDependency &ReadCRD = RS.getCriticalRegDep();
|
2019-05-24 00:32:19 +08:00
|
|
|
if (ReadCRD.Cycles > MaxLatency)
|
2019-05-27 02:41:35 +08:00
|
|
|
CriticalRegDep = ReadCRD;
|
2019-05-24 00:32:19 +08:00
|
|
|
}
|
|
|
|
|
2019-05-27 02:41:35 +08:00
|
|
|
return CriticalRegDep;
|
2019-05-24 00:32:19 +08:00
|
|
|
}
|
|
|
|
|
2022-06-06 03:06:01 +08:00
|
|
|
void Instruction::reset() {
|
|
|
|
// Note that this won't clear read/write descriptors
|
|
|
|
// or other non-trivial fields
|
|
|
|
Stage = IS_INVALID;
|
|
|
|
CyclesLeft = UNKNOWN_CYCLES;
|
|
|
|
clearOptimizableMove();
|
|
|
|
RCUTokenID = 0;
|
|
|
|
LSUTokenID = 0;
|
|
|
|
CriticalResourceMask = 0;
|
|
|
|
IsEliminated = false;
|
|
|
|
}
|
|
|
|
|
2018-03-22 19:39:34 +08:00
|
|
|
void Instruction::dispatch(unsigned RCUToken) {
|
2018-03-22 18:19:20 +08:00
|
|
|
assert(Stage == IS_INVALID);
|
2019-02-05 22:11:41 +08:00
|
|
|
Stage = IS_DISPATCHED;
|
2018-03-22 19:39:34 +08:00
|
|
|
RCUTokenID = RCUToken;
|
2018-03-08 21:05:02 +08:00
|
|
|
|
2018-03-22 19:39:34 +08:00
|
|
|
// Check if input operands are already available.
|
2019-02-16 02:28:11 +08:00
|
|
|
if (updateDispatched())
|
|
|
|
updatePending();
|
2018-03-08 21:05:02 +08:00
|
|
|
}
|
|
|
|
|
2019-02-18 19:27:11 +08:00
|
|
|
void Instruction::execute(unsigned IID) {
|
2018-03-08 21:05:02 +08:00
|
|
|
assert(Stage == IS_READY);
|
|
|
|
Stage = IS_EXECUTING;
|
2018-03-22 19:39:34 +08:00
|
|
|
|
|
|
|
// Set the cycles left before the write-back stage.
|
2018-10-26 01:03:51 +08:00
|
|
|
CyclesLeft = getLatency();
|
2018-03-22 19:39:34 +08:00
|
|
|
|
2018-10-26 01:03:51 +08:00
|
|
|
for (WriteState &WS : getDefs())
|
2019-02-18 19:27:11 +08:00
|
|
|
WS.onInstructionIssued(IID);
|
2018-03-22 19:39:34 +08:00
|
|
|
|
|
|
|
// Transition to the "executed" stage if this is a zero-latency instruction.
|
2018-03-22 18:19:20 +08:00
|
|
|
if (!CyclesLeft)
|
|
|
|
Stage = IS_EXECUTED;
|
2018-03-08 21:05:02 +08:00
|
|
|
}
|
|
|
|
|
2018-10-03 23:02:44 +08:00
|
|
|
void Instruction::forceExecuted() {
|
|
|
|
assert(Stage == IS_READY && "Invalid internal state!");
|
|
|
|
CyclesLeft = 0;
|
|
|
|
Stage = IS_EXECUTED;
|
|
|
|
}
|
|
|
|
|
2019-02-13 19:02:42 +08:00
|
|
|
bool Instruction::updatePending() {
|
|
|
|
assert(isPending() && "Unexpected instruction stage found!");
|
[llvm-mca][BtVer2] teach how to identify false dependencies on partially written
registers.
The goal of this patch is to improve the throughput analysis in llvm-mca for the
case where instructions perform partial register writes.
On x86, partial register writes are quite difficult to model, mainly because
different processors tend to implement different register merging schemes in
hardware.
When the code contains partial register writes, the IPC (instructions per
cycles) estimated by llvm-mca tends to diverge quite significantly from the
observed IPC (using perf).
Modern AMD processors (at least, from Bulldozer onwards) don't rename partial
registers. Quoting Agner Fog's microarchitecture.pdf:
" The processor always keeps the different parts of an integer register together.
For example, AL and AH are not treated as independent by the out-of-order
execution mechanism. An instruction that writes to part of a register will
therefore have a false dependence on any previous write to the same register or
any part of it."
This patch is a first important step towards improving the analysis of partial
register updates. It changes the semantic of RegisterFile descriptors in
tablegen, and teaches llvm-mca how to identify false dependences in the presence
of partial register writes (for more details: see the new code comments in
include/Target/TargetSchedule.h - class RegisterFile).
This patch doesn't address the case where a write to a part of a register is
followed by a read from the whole register. On Intel chips, high8 registers
(AH/BH/CH/DH)) can be stored in separate physical registers. However, a later
(dirty) read of the full register (example: AX/EAX) triggers a merge uOp, which
adds extra latency (and potentially affects the pipe usage).
This is a very interesting article on the subject with a very informative answer
from Peter Cordes:
https://stackoverflow.com/questions/45660139/how-exactly-do-partial-registers-on-haswell-skylake-perform-writing-al-seems-to
In future, the definition of RegisterFile can be extended with extra information
that may be used to identify delays caused by merge opcodes triggered by a dirty
read of a partial write.
Differential Revision: https://reviews.llvm.org/D49196
llvm-svn: 337123
2018-07-15 19:01:38 +08:00
|
|
|
|
2018-10-26 01:03:51 +08:00
|
|
|
if (!all_of(getUses(), [](const ReadState &Use) { return Use.isReady(); }))
|
2019-02-13 19:02:42 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// A partial register write cannot complete before a dependent write.
|
|
|
|
if (!all_of(getDefs(), [](const WriteState &Def) { return Def.isReady(); }))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Stage = IS_READY;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Instruction::updateDispatched() {
|
|
|
|
assert(isDispatched() && "Unexpected instruction stage found!");
|
|
|
|
|
|
|
|
if (!all_of(getUses(), [](const ReadState &Use) {
|
|
|
|
return Use.isPending() || Use.isReady();
|
|
|
|
}))
|
|
|
|
return false;
|
[llvm-mca][BtVer2] teach how to identify false dependencies on partially written
registers.
The goal of this patch is to improve the throughput analysis in llvm-mca for the
case where instructions perform partial register writes.
On x86, partial register writes are quite difficult to model, mainly because
different processors tend to implement different register merging schemes in
hardware.
When the code contains partial register writes, the IPC (instructions per
cycles) estimated by llvm-mca tends to diverge quite significantly from the
observed IPC (using perf).
Modern AMD processors (at least, from Bulldozer onwards) don't rename partial
registers. Quoting Agner Fog's microarchitecture.pdf:
" The processor always keeps the different parts of an integer register together.
For example, AL and AH are not treated as independent by the out-of-order
execution mechanism. An instruction that writes to part of a register will
therefore have a false dependence on any previous write to the same register or
any part of it."
This patch is a first important step towards improving the analysis of partial
register updates. It changes the semantic of RegisterFile descriptors in
tablegen, and teaches llvm-mca how to identify false dependences in the presence
of partial register writes (for more details: see the new code comments in
include/Target/TargetSchedule.h - class RegisterFile).
This patch doesn't address the case where a write to a part of a register is
followed by a read from the whole register. On Intel chips, high8 registers
(AH/BH/CH/DH)) can be stored in separate physical registers. However, a later
(dirty) read of the full register (example: AX/EAX) triggers a merge uOp, which
adds extra latency (and potentially affects the pipe usage).
This is a very interesting article on the subject with a very informative answer
from Peter Cordes:
https://stackoverflow.com/questions/45660139/how-exactly-do-partial-registers-on-haswell-skylake-perform-writing-al-seems-to
In future, the definition of RegisterFile can be extended with extra information
that may be used to identify delays caused by merge opcodes triggered by a dirty
read of a partial write.
Differential Revision: https://reviews.llvm.org/D49196
llvm-svn: 337123
2018-07-15 19:01:38 +08:00
|
|
|
|
|
|
|
// A partial register write cannot complete before a dependent write.
|
2019-02-13 19:02:42 +08:00
|
|
|
if (!all_of(getDefs(),
|
|
|
|
[](const WriteState &Def) { return !Def.getDependentWrite(); }))
|
2018-11-22 20:48:57 +08:00
|
|
|
return false;
|
[llvm-mca][BtVer2] teach how to identify false dependencies on partially written
registers.
The goal of this patch is to improve the throughput analysis in llvm-mca for the
case where instructions perform partial register writes.
On x86, partial register writes are quite difficult to model, mainly because
different processors tend to implement different register merging schemes in
hardware.
When the code contains partial register writes, the IPC (instructions per
cycles) estimated by llvm-mca tends to diverge quite significantly from the
observed IPC (using perf).
Modern AMD processors (at least, from Bulldozer onwards) don't rename partial
registers. Quoting Agner Fog's microarchitecture.pdf:
" The processor always keeps the different parts of an integer register together.
For example, AL and AH are not treated as independent by the out-of-order
execution mechanism. An instruction that writes to part of a register will
therefore have a false dependence on any previous write to the same register or
any part of it."
This patch is a first important step towards improving the analysis of partial
register updates. It changes the semantic of RegisterFile descriptors in
tablegen, and teaches llvm-mca how to identify false dependences in the presence
of partial register writes (for more details: see the new code comments in
include/Target/TargetSchedule.h - class RegisterFile).
This patch doesn't address the case where a write to a part of a register is
followed by a read from the whole register. On Intel chips, high8 registers
(AH/BH/CH/DH)) can be stored in separate physical registers. However, a later
(dirty) read of the full register (example: AX/EAX) triggers a merge uOp, which
adds extra latency (and potentially affects the pipe usage).
This is a very interesting article on the subject with a very informative answer
from Peter Cordes:
https://stackoverflow.com/questions/45660139/how-exactly-do-partial-registers-on-haswell-skylake-perform-writing-al-seems-to
In future, the definition of RegisterFile can be extended with extra information
that may be used to identify delays caused by merge opcodes triggered by a dirty
read of a partial write.
Differential Revision: https://reviews.llvm.org/D49196
llvm-svn: 337123
2018-07-15 19:01:38 +08:00
|
|
|
|
2019-02-13 19:02:42 +08:00
|
|
|
Stage = IS_PENDING;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Instruction::update() {
|
|
|
|
if (isDispatched())
|
|
|
|
updateDispatched();
|
|
|
|
if (isPending())
|
|
|
|
updatePending();
|
2018-03-29 22:26:56 +08:00
|
|
|
}
|
|
|
|
|
2018-03-08 21:05:02 +08:00
|
|
|
void Instruction::cycleEvent() {
|
2018-03-22 18:19:20 +08:00
|
|
|
if (isReady())
|
|
|
|
return;
|
|
|
|
|
2019-02-13 19:02:42 +08:00
|
|
|
if (isDispatched() || isPending()) {
|
2018-10-26 01:03:51 +08:00
|
|
|
for (ReadState &Use : getUses())
|
|
|
|
Use.cycleEvent();
|
2018-06-27 19:17:07 +08:00
|
|
|
|
2018-11-22 20:48:57 +08:00
|
|
|
for (WriteState &Def : getDefs())
|
|
|
|
Def.cycleEvent();
|
|
|
|
|
[llvm-mca][BtVer2] teach how to identify false dependencies on partially written
registers.
The goal of this patch is to improve the throughput analysis in llvm-mca for the
case where instructions perform partial register writes.
On x86, partial register writes are quite difficult to model, mainly because
different processors tend to implement different register merging schemes in
hardware.
When the code contains partial register writes, the IPC (instructions per
cycles) estimated by llvm-mca tends to diverge quite significantly from the
observed IPC (using perf).
Modern AMD processors (at least, from Bulldozer onwards) don't rename partial
registers. Quoting Agner Fog's microarchitecture.pdf:
" The processor always keeps the different parts of an integer register together.
For example, AL and AH are not treated as independent by the out-of-order
execution mechanism. An instruction that writes to part of a register will
therefore have a false dependence on any previous write to the same register or
any part of it."
This patch is a first important step towards improving the analysis of partial
register updates. It changes the semantic of RegisterFile descriptors in
tablegen, and teaches llvm-mca how to identify false dependences in the presence
of partial register writes (for more details: see the new code comments in
include/Target/TargetSchedule.h - class RegisterFile).
This patch doesn't address the case where a write to a part of a register is
followed by a read from the whole register. On Intel chips, high8 registers
(AH/BH/CH/DH)) can be stored in separate physical registers. However, a later
(dirty) read of the full register (example: AX/EAX) triggers a merge uOp, which
adds extra latency (and potentially affects the pipe usage).
This is a very interesting article on the subject with a very informative answer
from Peter Cordes:
https://stackoverflow.com/questions/45660139/how-exactly-do-partial-registers-on-haswell-skylake-perform-writing-al-seems-to
In future, the definition of RegisterFile can be extended with extra information
that may be used to identify delays caused by merge opcodes triggered by a dirty
read of a partial write.
Differential Revision: https://reviews.llvm.org/D49196
llvm-svn: 337123
2018-07-15 19:01:38 +08:00
|
|
|
update();
|
2018-03-08 21:05:02 +08:00
|
|
|
return;
|
|
|
|
}
|
2018-03-22 18:19:20 +08:00
|
|
|
|
|
|
|
assert(isExecuting() && "Instruction not in-flight?");
|
|
|
|
assert(CyclesLeft && "Instruction already executed?");
|
2018-10-26 01:03:51 +08:00
|
|
|
for (WriteState &Def : getDefs())
|
|
|
|
Def.cycleEvent();
|
2018-03-22 18:19:20 +08:00
|
|
|
CyclesLeft--;
|
2018-03-08 21:05:02 +08:00
|
|
|
if (!CyclesLeft)
|
|
|
|
Stage = IS_EXECUTED;
|
|
|
|
}
|
2018-06-28 23:50:26 +08:00
|
|
|
|
2018-03-08 21:05:02 +08:00
|
|
|
} // namespace mca
|
2018-10-30 23:56:08 +08:00
|
|
|
} // namespace llvm
|