2017-08-08 08:47:13 +08:00
|
|
|
//===- SILowerControlFlow.cpp - Use predicates for control flow -----------===//
|
2013-01-19 05:15:53 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
/// \file
|
|
|
|
/// \brief Insert wait instructions for memory reads and writes.
|
|
|
|
///
|
|
|
|
/// Memory reads and writes are issued asynchronously, so we need to insert
|
|
|
|
/// S_WAITCNT instructions when we want to access any of their results or
|
|
|
|
/// overwrite any register that's used asynchronously.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "AMDGPU.h"
|
2014-08-05 05:25:23 +08:00
|
|
|
#include "AMDGPUSubtarget.h"
|
2014-09-29 23:50:26 +08:00
|
|
|
#include "SIDefines.h"
|
2014-09-29 23:53:15 +08:00
|
|
|
#include "SIInstrInfo.h"
|
2013-01-19 05:15:53 +08:00
|
|
|
#include "SIMachineFunctionInfo.h"
|
2017-01-21 08:53:49 +08:00
|
|
|
#include "SIRegisterInfo.h"
|
2016-10-01 01:01:40 +08:00
|
|
|
#include "Utils/AMDGPUBaseInfo.h"
|
2017-01-21 08:53:49 +08:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
|
|
|
#include "llvm/ADT/StringRef.h"
|
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
2013-01-19 05:15:53 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
2017-01-21 08:53:49 +08:00
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
2013-01-19 05:15:53 +08:00
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2017-01-21 08:53:49 +08:00
|
|
|
#include "llvm/CodeGen/MachineOperand.h"
|
2013-01-19 05:15:53 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2017-01-21 08:53:49 +08:00
|
|
|
#include "llvm/IR/DebugLoc.h"
|
2017-08-08 08:47:13 +08:00
|
|
|
#include "llvm/MC/MCInstrDesc.h"
|
2017-01-21 08:53:49 +08:00
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
|
|
|
#include <algorithm>
|
|
|
|
#include <cassert>
|
|
|
|
#include <cstdint>
|
|
|
|
#include <cstring>
|
|
|
|
#include <utility>
|
2013-01-19 05:15:53 +08:00
|
|
|
|
2016-02-06 01:42:38 +08:00
|
|
|
#define DEBUG_TYPE "si-insert-waits"
|
|
|
|
|
2013-01-19 05:15:53 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
/// \brief One variable for each of the hardware counters
|
2017-08-08 08:47:13 +08:00
|
|
|
using Counters = union {
|
2013-01-19 05:15:53 +08:00
|
|
|
struct {
|
|
|
|
unsigned VM;
|
|
|
|
unsigned EXP;
|
|
|
|
unsigned LGKM;
|
|
|
|
} Named;
|
|
|
|
unsigned Array[3];
|
2017-08-08 08:47:13 +08:00
|
|
|
};
|
2013-01-19 05:15:53 +08:00
|
|
|
|
2017-08-08 08:47:13 +08:00
|
|
|
using InstType = enum {
|
2014-12-08 01:17:43 +08:00
|
|
|
OTHER,
|
|
|
|
SMEM,
|
|
|
|
VMEM
|
2017-08-08 08:47:13 +08:00
|
|
|
};
|
2014-12-08 01:17:43 +08:00
|
|
|
|
2017-08-08 08:47:13 +08:00
|
|
|
using RegCounters = Counters[512];
|
|
|
|
using RegInterval = std::pair<unsigned, unsigned>;
|
2013-01-19 05:15:53 +08:00
|
|
|
|
|
|
|
class SIInsertWaits : public MachineFunctionPass {
|
|
|
|
private:
|
2017-01-21 08:53:49 +08:00
|
|
|
const SISubtarget *ST = nullptr;
|
|
|
|
const SIInstrInfo *TII = nullptr;
|
|
|
|
const SIRegisterInfo *TRI = nullptr;
|
2013-01-19 05:15:53 +08:00
|
|
|
const MachineRegisterInfo *MRI;
|
2017-02-08 22:05:23 +08:00
|
|
|
AMDGPU::IsaInfo::IsaVersion ISA;
|
2013-01-19 05:15:53 +08:00
|
|
|
|
|
|
|
/// \brief Constant zero value
|
|
|
|
static const Counters ZeroCounts;
|
|
|
|
|
2016-10-12 02:58:22 +08:00
|
|
|
/// \brief Hardware limits
|
|
|
|
Counters HardwareLimits;
|
|
|
|
|
2013-01-19 05:15:53 +08:00
|
|
|
/// \brief Counter values we have already waited on.
|
|
|
|
Counters WaitedOn;
|
|
|
|
|
2016-04-27 23:46:01 +08:00
|
|
|
/// \brief Counter values that we must wait on before the next counter
|
|
|
|
/// increase.
|
|
|
|
Counters DelayedWaitOn;
|
|
|
|
|
2013-01-19 05:15:53 +08:00
|
|
|
/// \brief Counter values for last instruction issued.
|
|
|
|
Counters LastIssued;
|
|
|
|
|
|
|
|
/// \brief Registers used by async instructions.
|
|
|
|
RegCounters UsedRegs;
|
|
|
|
|
|
|
|
/// \brief Registers defined by async instructions.
|
|
|
|
RegCounters DefinedRegs;
|
|
|
|
|
|
|
|
/// \brief Different export instruction types seen since last wait.
|
2017-01-21 08:53:49 +08:00
|
|
|
unsigned ExpInstrTypesSeen = 0;
|
2013-01-19 05:15:53 +08:00
|
|
|
|
2014-12-08 01:17:43 +08:00
|
|
|
/// \brief Type of the last opcode.
|
|
|
|
InstType LastOpcodeType;
|
|
|
|
|
2015-02-04 01:37:52 +08:00
|
|
|
bool LastInstWritesM0;
|
|
|
|
|
2016-10-29 07:53:48 +08:00
|
|
|
/// Whether or not we have flat operations outstanding.
|
|
|
|
bool IsFlatOutstanding;
|
|
|
|
|
2016-01-14 01:23:09 +08:00
|
|
|
/// \brief Whether the machine function returns void
|
|
|
|
bool ReturnsVoid;
|
|
|
|
|
2016-02-09 03:49:20 +08:00
|
|
|
/// Whether the VCCZ bit is possibly corrupt
|
2017-01-21 08:53:49 +08:00
|
|
|
bool VCCZCorrupt = false;
|
2016-02-09 03:49:20 +08:00
|
|
|
|
2013-01-19 05:15:53 +08:00
|
|
|
/// \brief Get increment/decrement amount for this instruction.
|
|
|
|
Counters getHwCounts(MachineInstr &MI);
|
|
|
|
|
|
|
|
/// \brief Is operand relevant for async execution?
|
|
|
|
bool isOpRelevant(MachineOperand &Op);
|
|
|
|
|
|
|
|
/// \brief Get register interval an operand affects.
|
2015-10-02 05:43:15 +08:00
|
|
|
RegInterval getRegInterval(const TargetRegisterClass *RC,
|
|
|
|
const MachineOperand &Reg) const;
|
2013-01-19 05:15:53 +08:00
|
|
|
|
|
|
|
/// \brief Handle instructions async components
|
2014-12-08 01:17:43 +08:00
|
|
|
void pushInstruction(MachineBasicBlock &MBB,
|
2016-04-27 23:46:01 +08:00
|
|
|
MachineBasicBlock::iterator I,
|
|
|
|
const Counters& Increment);
|
2013-01-19 05:15:53 +08:00
|
|
|
|
|
|
|
/// \brief Insert the actual wait instruction
|
|
|
|
bool insertWait(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator I,
|
|
|
|
const Counters &Counts);
|
|
|
|
|
2016-04-27 23:46:01 +08:00
|
|
|
/// \brief Handle existing wait instructions (from intrinsics)
|
|
|
|
void handleExistingWait(MachineBasicBlock::iterator I);
|
|
|
|
|
2013-03-01 17:46:04 +08:00
|
|
|
/// \brief Do we need def2def checks?
|
|
|
|
bool unorderedDefines(MachineInstr &MI);
|
|
|
|
|
2013-01-19 05:15:53 +08:00
|
|
|
/// \brief Resolve all operand dependencies to counter requirements
|
|
|
|
Counters handleOperands(MachineInstr &MI);
|
|
|
|
|
2015-02-04 01:37:52 +08:00
|
|
|
/// \brief Insert S_NOP between an instruction writing M0 and S_SENDMSG.
|
|
|
|
void handleSendMsg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I);
|
|
|
|
|
2016-02-09 03:49:20 +08:00
|
|
|
/// Return true if there are LGKM instrucitons that haven't been waited on
|
|
|
|
/// yet.
|
|
|
|
bool hasOutstandingLGKM() const;
|
|
|
|
|
2013-01-19 05:15:53 +08:00
|
|
|
public:
|
2016-02-06 01:42:38 +08:00
|
|
|
static char ID;
|
|
|
|
|
2017-01-21 08:53:49 +08:00
|
|
|
SIInsertWaits() : MachineFunctionPass(ID) {}
|
2013-01-19 05:15:53 +08:00
|
|
|
|
2014-04-29 15:57:24 +08:00
|
|
|
bool runOnMachineFunction(MachineFunction &MF) override;
|
2013-01-19 05:15:53 +08:00
|
|
|
|
2016-10-01 10:56:57 +08:00
|
|
|
StringRef getPassName() const override {
|
2015-09-26 01:21:28 +08:00
|
|
|
return "SI insert wait instructions";
|
2013-01-19 05:15:53 +08:00
|
|
|
}
|
|
|
|
|
2015-09-26 01:21:28 +08:00
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
|
|
AU.setPreservesCFG();
|
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
}
|
2013-01-19 05:15:53 +08:00
|
|
|
};
|
|
|
|
|
2017-01-21 08:53:49 +08:00
|
|
|
} // end anonymous namespace
|
2013-01-19 05:15:53 +08:00
|
|
|
|
2016-02-06 01:42:38 +08:00
|
|
|
INITIALIZE_PASS_BEGIN(SIInsertWaits, DEBUG_TYPE,
|
|
|
|
"SI Insert Waits", false, false)
|
|
|
|
INITIALIZE_PASS_END(SIInsertWaits, DEBUG_TYPE,
|
|
|
|
"SI Insert Waits", false, false)
|
|
|
|
|
2013-01-19 05:15:53 +08:00
|
|
|
char SIInsertWaits::ID = 0;
|
|
|
|
|
2016-02-06 01:42:38 +08:00
|
|
|
char &llvm::SIInsertWaitsID = SIInsertWaits::ID;
|
|
|
|
|
|
|
|
FunctionPass *llvm::createSIInsertWaitsPass() {
|
|
|
|
return new SIInsertWaits();
|
|
|
|
}
|
|
|
|
|
2013-01-19 05:15:53 +08:00
|
|
|
const Counters SIInsertWaits::ZeroCounts = { { 0, 0, 0 } };
|
|
|
|
|
2016-11-08 03:09:27 +08:00
|
|
|
static bool readsVCCZ(const MachineInstr &MI) {
|
|
|
|
unsigned Opc = MI.getOpcode();
|
|
|
|
return (Opc == AMDGPU::S_CBRANCH_VCCNZ || Opc == AMDGPU::S_CBRANCH_VCCZ) &&
|
|
|
|
!MI.getOperand(1).isUndef();
|
2016-02-09 03:49:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool SIInsertWaits::hasOutstandingLGKM() const {
|
|
|
|
return WaitedOn.Named.LGKM != LastIssued.Named.LGKM;
|
|
|
|
}
|
2013-01-19 05:15:53 +08:00
|
|
|
|
|
|
|
Counters SIInsertWaits::getHwCounts(MachineInstr &MI) {
|
2015-10-02 05:43:15 +08:00
|
|
|
uint64_t TSFlags = MI.getDesc().TSFlags;
|
2015-09-25 03:52:27 +08:00
|
|
|
Counters Result = { { 0, 0, 0 } };
|
2013-01-19 05:15:53 +08:00
|
|
|
|
|
|
|
Result.Named.VM = !!(TSFlags & SIInstrFlags::VM_CNT);
|
|
|
|
|
|
|
|
// Only consider stores or EXP for EXP_CNT
|
2016-12-06 04:23:10 +08:00
|
|
|
Result.Named.EXP = !!(TSFlags & SIInstrFlags::EXP_CNT) && MI.mayStore();
|
2013-01-19 05:15:53 +08:00
|
|
|
|
|
|
|
// LGKM may uses larger values
|
|
|
|
if (TSFlags & SIInstrFlags::LGKM_CNT) {
|
|
|
|
|
2015-10-20 12:35:43 +08:00
|
|
|
if (TII->isSMRD(MI)) {
|
2013-08-17 00:19:24 +08:00
|
|
|
|
2015-09-25 03:52:27 +08:00
|
|
|
if (MI.getNumOperands() != 0) {
|
2015-10-02 06:40:35 +08:00
|
|
|
assert(MI.getOperand(0).isReg() &&
|
|
|
|
"First LGKM operand must be a register!");
|
2015-09-25 03:52:27 +08:00
|
|
|
|
|
|
|
// XXX - What if this is a write into a super register?
|
2015-10-02 05:43:15 +08:00
|
|
|
const TargetRegisterClass *RC = TII->getOpRegClass(MI, 0);
|
2017-04-25 02:55:33 +08:00
|
|
|
unsigned Size = TRI->getRegSizeInBits(*RC);
|
|
|
|
Result.Named.LGKM = Size > 32 ? 2 : 1;
|
2015-09-25 03:52:27 +08:00
|
|
|
} else {
|
|
|
|
// s_dcache_inv etc. do not have a a destination register. Assume we
|
|
|
|
// want a wait on these.
|
|
|
|
// XXX - What is the right value?
|
|
|
|
Result.Named.LGKM = 1;
|
|
|
|
}
|
2013-08-17 00:19:24 +08:00
|
|
|
} else {
|
|
|
|
// DS
|
|
|
|
Result.Named.LGKM = 1;
|
|
|
|
}
|
2013-01-19 05:15:53 +08:00
|
|
|
|
|
|
|
} else {
|
|
|
|
Result.Named.LGKM = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SIInsertWaits::isOpRelevant(MachineOperand &Op) {
|
|
|
|
// Constants are always irrelevant
|
2015-10-02 05:43:15 +08:00
|
|
|
if (!Op.isReg() || !TRI->isInAllocatableClass(Op.getReg()))
|
2013-01-19 05:15:53 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Defines are always relevant
|
|
|
|
if (Op.isDef())
|
|
|
|
return true;
|
|
|
|
|
2016-12-06 04:23:10 +08:00
|
|
|
// For exports all registers are relevant.
|
|
|
|
// TODO: Skip undef/disabled registers.
|
2013-01-19 05:15:53 +08:00
|
|
|
MachineInstr &MI = *Op.getParent();
|
2016-12-06 04:23:10 +08:00
|
|
|
if (TII->isEXP(MI))
|
2013-01-19 05:15:53 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
// For stores the stored value is also relevant
|
|
|
|
if (!MI.getDesc().mayStore())
|
|
|
|
return false;
|
|
|
|
|
2015-01-07 03:52:04 +08:00
|
|
|
// Check if this operand is the value being stored.
|
2016-02-19 23:33:13 +08:00
|
|
|
// Special case for DS/FLAT instructions, since the address
|
2015-01-07 03:52:04 +08:00
|
|
|
// operand comes before the value operand and it may have
|
|
|
|
// multiple data operands.
|
|
|
|
|
2016-02-19 23:33:13 +08:00
|
|
|
if (TII->isDS(MI)) {
|
2015-01-07 03:52:04 +08:00
|
|
|
MachineOperand *Data0 = TII->getNamedOperand(MI, AMDGPU::OpName::data0);
|
|
|
|
if (Data0 && Op.isIdenticalTo(*Data0))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
MachineOperand *Data1 = TII->getNamedOperand(MI, AMDGPU::OpName::data1);
|
2016-03-03 07:00:21 +08:00
|
|
|
return Data1 && Op.isIdenticalTo(*Data1);
|
2015-01-07 03:52:04 +08:00
|
|
|
}
|
|
|
|
|
2016-11-30 03:30:44 +08:00
|
|
|
if (TII->isFLAT(MI)) {
|
|
|
|
MachineOperand *Data = TII->getNamedOperand(MI, AMDGPU::OpName::vdata);
|
|
|
|
if (Data && Op.isIdenticalTo(*Data))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-01-07 03:52:04 +08:00
|
|
|
// NOTE: This assumes that the value operand is before the
|
|
|
|
// address operand, and that there is only one value operand.
|
2013-01-19 05:15:53 +08:00
|
|
|
for (MachineInstr::mop_iterator I = MI.operands_begin(),
|
|
|
|
E = MI.operands_end(); I != E; ++I) {
|
|
|
|
|
|
|
|
if (I->isReg() && I->isUse())
|
|
|
|
return Op.isIdenticalTo(*I);
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-10-02 05:43:15 +08:00
|
|
|
RegInterval SIInsertWaits::getRegInterval(const TargetRegisterClass *RC,
|
|
|
|
const MachineOperand &Reg) const {
|
2017-04-25 02:55:33 +08:00
|
|
|
unsigned Size = TRI->getRegSizeInBits(*RC);
|
|
|
|
assert(Size >= 32);
|
2013-01-19 05:15:53 +08:00
|
|
|
|
|
|
|
RegInterval Result;
|
2015-10-02 05:43:15 +08:00
|
|
|
Result.first = TRI->getEncodingValue(Reg.getReg());
|
2017-04-25 02:55:33 +08:00
|
|
|
Result.second = Result.first + Size / 32;
|
2013-01-19 05:15:53 +08:00
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2014-12-08 01:17:43 +08:00
|
|
|
void SIInsertWaits::pushInstruction(MachineBasicBlock &MBB,
|
2016-04-27 23:46:01 +08:00
|
|
|
MachineBasicBlock::iterator I,
|
|
|
|
const Counters &Increment) {
|
2013-01-19 05:15:53 +08:00
|
|
|
// Get the hardware counter increments and sum them up
|
AMDGPU/SI: Better handle s_wait insertion
We can wait on either VM, EXP or LGKM.
The waits are independent.
Without this patch, a wait inserted because of one of them
would also wait for all the previous others.
This patch makes s_wait only wait for the ones we need for the next
instruction.
Here's an example of subtle perf reduction this patch solves:
This is without the patch:
buffer_load_format_xyzw v[8:11], v0, s[44:47], 0 idxen
buffer_load_format_xyzw v[12:15], v0, s[48:51], 0 idxen
s_load_dwordx4 s[44:47], s[8:9], 0xc
s_waitcnt lgkmcnt(0)
buffer_load_format_xyzw v[16:19], v0, s[52:55], 0 idxen
s_load_dwordx4 s[48:51], s[8:9], 0x10
s_waitcnt vmcnt(1)
buffer_load_format_xyzw v[20:23], v0, s[44:47], 0 idxen
The s_waitcnt vmcnt(1) is useless.
The reason it is added is because the last
buffer_load_format_xyzw needs s[44:47], which was issued
by the first s_load_dwordx4. It waits for all VM
before that call to have finished.
Internally after every instruction, 3 counters (for VM, EXP and LGTM)
are updated after every instruction. For example buffer_load_format_xyzw
will
increase the VM counter, and s_load_dwordx4 the LGKM one.
Without the patch, for every defined register,
the current 3 counters are stored, and are used to know
how long to wait when an instruction needs the register.
Because of that, the s[44:47] counter includes that to use the register
you need to wait for the previous buffer_load_format_xyzw.
Instead this patch stores only the counters that matter for the
register,
and puts zero for the other ones, since we don't need any wait for them.
Patch by: Axel Davy
Differential Revision: http://reviews.llvm.org/D11883
llvm-svn: 245755
2015-08-22 06:47:27 +08:00
|
|
|
Counters Limit = ZeroCounts;
|
2013-01-19 05:15:53 +08:00
|
|
|
unsigned Sum = 0;
|
|
|
|
|
2016-10-29 07:53:48 +08:00
|
|
|
if (TII->mayAccessFlatAddressSpace(*I))
|
|
|
|
IsFlatOutstanding = true;
|
|
|
|
|
2013-01-19 05:15:53 +08:00
|
|
|
for (unsigned i = 0; i < 3; ++i) {
|
|
|
|
LastIssued.Array[i] += Increment.Array[i];
|
AMDGPU/SI: Better handle s_wait insertion
We can wait on either VM, EXP or LGKM.
The waits are independent.
Without this patch, a wait inserted because of one of them
would also wait for all the previous others.
This patch makes s_wait only wait for the ones we need for the next
instruction.
Here's an example of subtle perf reduction this patch solves:
This is without the patch:
buffer_load_format_xyzw v[8:11], v0, s[44:47], 0 idxen
buffer_load_format_xyzw v[12:15], v0, s[48:51], 0 idxen
s_load_dwordx4 s[44:47], s[8:9], 0xc
s_waitcnt lgkmcnt(0)
buffer_load_format_xyzw v[16:19], v0, s[52:55], 0 idxen
s_load_dwordx4 s[48:51], s[8:9], 0x10
s_waitcnt vmcnt(1)
buffer_load_format_xyzw v[20:23], v0, s[44:47], 0 idxen
The s_waitcnt vmcnt(1) is useless.
The reason it is added is because the last
buffer_load_format_xyzw needs s[44:47], which was issued
by the first s_load_dwordx4. It waits for all VM
before that call to have finished.
Internally after every instruction, 3 counters (for VM, EXP and LGTM)
are updated after every instruction. For example buffer_load_format_xyzw
will
increase the VM counter, and s_load_dwordx4 the LGKM one.
Without the patch, for every defined register,
the current 3 counters are stored, and are used to know
how long to wait when an instruction needs the register.
Because of that, the s[44:47] counter includes that to use the register
you need to wait for the previous buffer_load_format_xyzw.
Instead this patch stores only the counters that matter for the
register,
and puts zero for the other ones, since we don't need any wait for them.
Patch by: Axel Davy
Differential Revision: http://reviews.llvm.org/D11883
llvm-svn: 245755
2015-08-22 06:47:27 +08:00
|
|
|
if (Increment.Array[i])
|
|
|
|
Limit.Array[i] = LastIssued.Array[i];
|
2013-01-19 05:15:53 +08:00
|
|
|
Sum += Increment.Array[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we don't increase anything then that's it
|
2014-12-08 01:17:43 +08:00
|
|
|
if (Sum == 0) {
|
|
|
|
LastOpcodeType = OTHER;
|
2013-01-19 05:15:53 +08:00
|
|
|
return;
|
2014-12-08 01:17:43 +08:00
|
|
|
}
|
|
|
|
|
2016-06-24 14:30:11 +08:00
|
|
|
if (ST->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
|
2015-08-09 02:27:36 +08:00
|
|
|
// Any occurrence of consecutive VMEM or SMEM instructions forms a VMEM
|
2014-12-08 01:17:43 +08:00
|
|
|
// or SMEM clause, respectively.
|
|
|
|
//
|
|
|
|
// The temporary workaround is to break the clauses with S_NOP.
|
|
|
|
//
|
|
|
|
// The proper solution would be to allocate registers such that all source
|
|
|
|
// and destination registers don't overlap, e.g. this is illegal:
|
|
|
|
// r0 = load r2
|
|
|
|
// r2 = load r0
|
2016-05-03 01:39:06 +08:00
|
|
|
if (LastOpcodeType == VMEM && Increment.Named.VM) {
|
2014-12-08 01:17:43 +08:00
|
|
|
// Insert a NOP to break the clause.
|
|
|
|
BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_NOP))
|
|
|
|
.addImm(0);
|
2015-02-04 01:37:52 +08:00
|
|
|
LastInstWritesM0 = false;
|
2014-12-08 01:17:43 +08:00
|
|
|
}
|
|
|
|
|
2015-10-20 12:35:43 +08:00
|
|
|
if (TII->isSMRD(*I))
|
2014-12-08 01:17:43 +08:00
|
|
|
LastOpcodeType = SMEM;
|
|
|
|
else if (Increment.Named.VM)
|
|
|
|
LastOpcodeType = VMEM;
|
|
|
|
}
|
2013-01-19 05:15:53 +08:00
|
|
|
|
|
|
|
// Remember which export instructions we have seen
|
|
|
|
if (Increment.Named.EXP) {
|
2016-12-06 04:23:10 +08:00
|
|
|
ExpInstrTypesSeen |= TII->isEXP(*I) ? 1 : 2;
|
2013-01-19 05:15:53 +08:00
|
|
|
}
|
|
|
|
|
2014-12-08 01:17:43 +08:00
|
|
|
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
|
|
|
|
MachineOperand &Op = I->getOperand(i);
|
2013-01-19 05:15:53 +08:00
|
|
|
if (!isOpRelevant(Op))
|
|
|
|
continue;
|
|
|
|
|
2015-10-02 05:43:15 +08:00
|
|
|
const TargetRegisterClass *RC = TII->getOpRegClass(*I, i);
|
|
|
|
RegInterval Interval = getRegInterval(RC, Op);
|
2013-01-19 05:15:53 +08:00
|
|
|
for (unsigned j = Interval.first; j < Interval.second; ++j) {
|
|
|
|
|
|
|
|
// Remember which registers we define
|
|
|
|
if (Op.isDef())
|
AMDGPU/SI: Better handle s_wait insertion
We can wait on either VM, EXP or LGKM.
The waits are independent.
Without this patch, a wait inserted because of one of them
would also wait for all the previous others.
This patch makes s_wait only wait for the ones we need for the next
instruction.
Here's an example of subtle perf reduction this patch solves:
This is without the patch:
buffer_load_format_xyzw v[8:11], v0, s[44:47], 0 idxen
buffer_load_format_xyzw v[12:15], v0, s[48:51], 0 idxen
s_load_dwordx4 s[44:47], s[8:9], 0xc
s_waitcnt lgkmcnt(0)
buffer_load_format_xyzw v[16:19], v0, s[52:55], 0 idxen
s_load_dwordx4 s[48:51], s[8:9], 0x10
s_waitcnt vmcnt(1)
buffer_load_format_xyzw v[20:23], v0, s[44:47], 0 idxen
The s_waitcnt vmcnt(1) is useless.
The reason it is added is because the last
buffer_load_format_xyzw needs s[44:47], which was issued
by the first s_load_dwordx4. It waits for all VM
before that call to have finished.
Internally after every instruction, 3 counters (for VM, EXP and LGTM)
are updated after every instruction. For example buffer_load_format_xyzw
will
increase the VM counter, and s_load_dwordx4 the LGKM one.
Without the patch, for every defined register,
the current 3 counters are stored, and are used to know
how long to wait when an instruction needs the register.
Because of that, the s[44:47] counter includes that to use the register
you need to wait for the previous buffer_load_format_xyzw.
Instead this patch stores only the counters that matter for the
register,
and puts zero for the other ones, since we don't need any wait for them.
Patch by: Axel Davy
Differential Revision: http://reviews.llvm.org/D11883
llvm-svn: 245755
2015-08-22 06:47:27 +08:00
|
|
|
DefinedRegs[j] = Limit;
|
2013-01-19 05:15:53 +08:00
|
|
|
|
|
|
|
// and which one we are using
|
|
|
|
if (Op.isUse())
|
AMDGPU/SI: Better handle s_wait insertion
We can wait on either VM, EXP or LGKM.
The waits are independent.
Without this patch, a wait inserted because of one of them
would also wait for all the previous others.
This patch makes s_wait only wait for the ones we need for the next
instruction.
Here's an example of subtle perf reduction this patch solves:
This is without the patch:
buffer_load_format_xyzw v[8:11], v0, s[44:47], 0 idxen
buffer_load_format_xyzw v[12:15], v0, s[48:51], 0 idxen
s_load_dwordx4 s[44:47], s[8:9], 0xc
s_waitcnt lgkmcnt(0)
buffer_load_format_xyzw v[16:19], v0, s[52:55], 0 idxen
s_load_dwordx4 s[48:51], s[8:9], 0x10
s_waitcnt vmcnt(1)
buffer_load_format_xyzw v[20:23], v0, s[44:47], 0 idxen
The s_waitcnt vmcnt(1) is useless.
The reason it is added is because the last
buffer_load_format_xyzw needs s[44:47], which was issued
by the first s_load_dwordx4. It waits for all VM
before that call to have finished.
Internally after every instruction, 3 counters (for VM, EXP and LGTM)
are updated after every instruction. For example buffer_load_format_xyzw
will
increase the VM counter, and s_load_dwordx4 the LGKM one.
Without the patch, for every defined register,
the current 3 counters are stored, and are used to know
how long to wait when an instruction needs the register.
Because of that, the s[44:47] counter includes that to use the register
you need to wait for the previous buffer_load_format_xyzw.
Instead this patch stores only the counters that matter for the
register,
and puts zero for the other ones, since we don't need any wait for them.
Patch by: Axel Davy
Differential Revision: http://reviews.llvm.org/D11883
llvm-svn: 245755
2015-08-22 06:47:27 +08:00
|
|
|
UsedRegs[j] = Limit;
|
2013-01-19 05:15:53 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SIInsertWaits::insertWait(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator I,
|
|
|
|
const Counters &Required) {
|
|
|
|
// End of program? No need to wait on anything
|
2016-01-14 01:23:09 +08:00
|
|
|
// A function not returning void needs to wait, because other bytecode will
|
|
|
|
// be appended after it and we don't know what it will be.
|
|
|
|
if (I != MBB.end() && I->getOpcode() == AMDGPU::S_ENDPGM && ReturnsVoid)
|
2013-01-19 05:15:53 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Figure out if the async instructions execute in order
|
|
|
|
bool Ordered[3];
|
|
|
|
|
2016-10-29 07:53:48 +08:00
|
|
|
// VM_CNT is always ordered except when there are flat instructions, which
|
|
|
|
// can return out of order.
|
|
|
|
Ordered[0] = !IsFlatOutstanding;
|
2013-01-19 05:15:53 +08:00
|
|
|
|
|
|
|
// EXP_CNT is unordered if we have both EXP & VM-writes
|
|
|
|
Ordered[1] = ExpInstrTypesSeen == 3;
|
|
|
|
|
|
|
|
// LGKM_CNT is handled as always unordered. TODO: Handle LDS and GDS
|
|
|
|
Ordered[2] = false;
|
|
|
|
|
|
|
|
// The values we are going to put into the S_WAITCNT instruction
|
2016-10-12 02:58:22 +08:00
|
|
|
Counters Counts = HardwareLimits;
|
2013-01-19 05:15:53 +08:00
|
|
|
|
|
|
|
// Do we really need to wait?
|
|
|
|
bool NeedWait = false;
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < 3; ++i) {
|
|
|
|
if (Required.Array[i] <= WaitedOn.Array[i])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
NeedWait = true;
|
2014-07-18 01:50:22 +08:00
|
|
|
|
2013-01-19 05:15:53 +08:00
|
|
|
if (Ordered[i]) {
|
|
|
|
unsigned Value = LastIssued.Array[i] - Required.Array[i];
|
|
|
|
|
2014-07-18 01:50:22 +08:00
|
|
|
// Adjust the value to the real hardware possibilities.
|
2016-10-12 02:58:22 +08:00
|
|
|
Counts.Array[i] = std::min(Value, HardwareLimits.Array[i]);
|
2013-01-19 05:15:53 +08:00
|
|
|
} else
|
|
|
|
Counts.Array[i] = 0;
|
|
|
|
|
2014-07-18 01:50:22 +08:00
|
|
|
// Remember on what we have waited on.
|
2013-01-19 05:15:53 +08:00
|
|
|
WaitedOn.Array[i] = LastIssued.Array[i] - Counts.Array[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!NeedWait)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Reset EXP_CNT instruction types
|
|
|
|
if (Counts.Named.EXP == 0)
|
|
|
|
ExpInstrTypesSeen = 0;
|
|
|
|
|
|
|
|
// Build the wait instruction
|
|
|
|
BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_WAITCNT))
|
2017-02-08 22:05:23 +08:00
|
|
|
.addImm(AMDGPU::encodeWaitcnt(ISA,
|
|
|
|
Counts.Named.VM,
|
|
|
|
Counts.Named.EXP,
|
|
|
|
Counts.Named.LGKM));
|
2013-01-19 05:15:53 +08:00
|
|
|
|
2014-12-08 01:17:43 +08:00
|
|
|
LastOpcodeType = OTHER;
|
2015-02-04 01:37:52 +08:00
|
|
|
LastInstWritesM0 = false;
|
2016-10-29 07:53:48 +08:00
|
|
|
IsFlatOutstanding = false;
|
2013-01-19 05:15:53 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief helper function for handleOperands
|
|
|
|
static void increaseCounters(Counters &Dst, const Counters &Src) {
|
|
|
|
for (unsigned i = 0; i < 3; ++i)
|
|
|
|
Dst.Array[i] = std::max(Dst.Array[i], Src.Array[i]);
|
|
|
|
}
|
|
|
|
|
2016-04-27 23:46:01 +08:00
|
|
|
/// \brief check whether any of the counters is non-zero
|
|
|
|
static bool countersNonZero(const Counters &Counter) {
|
|
|
|
for (unsigned i = 0; i < 3; ++i)
|
|
|
|
if (Counter.Array[i])
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void SIInsertWaits::handleExistingWait(MachineBasicBlock::iterator I) {
|
|
|
|
assert(I->getOpcode() == AMDGPU::S_WAITCNT);
|
|
|
|
|
|
|
|
unsigned Imm = I->getOperand(0).getImm();
|
|
|
|
Counters Counts, WaitOn;
|
|
|
|
|
2017-02-08 22:05:23 +08:00
|
|
|
Counts.Named.VM = AMDGPU::decodeVmcnt(ISA, Imm);
|
|
|
|
Counts.Named.EXP = AMDGPU::decodeExpcnt(ISA, Imm);
|
|
|
|
Counts.Named.LGKM = AMDGPU::decodeLgkmcnt(ISA, Imm);
|
2016-04-27 23:46:01 +08:00
|
|
|
|
|
|
|
for (unsigned i = 0; i < 3; ++i) {
|
|
|
|
if (Counts.Array[i] <= LastIssued.Array[i])
|
|
|
|
WaitOn.Array[i] = LastIssued.Array[i] - Counts.Array[i];
|
|
|
|
else
|
|
|
|
WaitOn.Array[i] = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
increaseCounters(DelayedWaitOn, WaitOn);
|
|
|
|
}
|
|
|
|
|
2013-01-19 05:15:53 +08:00
|
|
|
Counters SIInsertWaits::handleOperands(MachineInstr &MI) {
|
|
|
|
Counters Result = ZeroCounts;
|
|
|
|
|
2015-10-02 05:43:15 +08:00
|
|
|
// For each register affected by this instruction increase the result
|
|
|
|
// sequence.
|
|
|
|
//
|
|
|
|
// TODO: We could probably just look at explicit operands if we removed VCC /
|
|
|
|
// EXEC from SMRD dest reg classes.
|
2013-01-19 05:15:53 +08:00
|
|
|
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
|
|
|
|
MachineOperand &Op = MI.getOperand(i);
|
2015-10-02 05:43:15 +08:00
|
|
|
if (!Op.isReg() || !TRI->isInAllocatableClass(Op.getReg()))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
const TargetRegisterClass *RC = TII->getOpRegClass(MI, i);
|
|
|
|
RegInterval Interval = getRegInterval(RC, Op);
|
2013-01-19 05:15:53 +08:00
|
|
|
for (unsigned j = Interval.first; j < Interval.second; ++j) {
|
2013-03-01 17:46:04 +08:00
|
|
|
if (Op.isDef()) {
|
2013-01-19 05:15:53 +08:00
|
|
|
increaseCounters(Result, UsedRegs[j]);
|
2013-03-18 19:33:45 +08:00
|
|
|
increaseCounters(Result, DefinedRegs[j]);
|
2013-03-01 17:46:04 +08:00
|
|
|
}
|
2013-01-19 05:15:53 +08:00
|
|
|
|
|
|
|
if (Op.isUse())
|
|
|
|
increaseCounters(Result, DefinedRegs[j]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2015-02-04 01:37:52 +08:00
|
|
|
void SIInsertWaits::handleSendMsg(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator I) {
|
2016-06-24 14:30:11 +08:00
|
|
|
if (ST->getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
|
2015-02-04 01:37:52 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
// There must be "S_NOP 0" between an instruction writing M0 and S_SENDMSG.
|
2017-01-05 02:06:55 +08:00
|
|
|
if (LastInstWritesM0 && (I->getOpcode() == AMDGPU::S_SENDMSG || I->getOpcode() == AMDGPU::S_SENDMSGHALT)) {
|
2015-02-04 01:37:52 +08:00
|
|
|
BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_NOP)).addImm(0);
|
|
|
|
LastInstWritesM0 = false;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set whether this instruction sets M0
|
|
|
|
LastInstWritesM0 = false;
|
|
|
|
|
|
|
|
unsigned NumOperands = I->getNumOperands();
|
|
|
|
for (unsigned i = 0; i < NumOperands; i++) {
|
|
|
|
const MachineOperand &Op = I->getOperand(i);
|
|
|
|
|
|
|
|
if (Op.isReg() && Op.isDef() && Op.getReg() == AMDGPU::M0)
|
|
|
|
LastInstWritesM0 = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-08 09:06:58 +08:00
|
|
|
/// Return true if \p MBB has one successor immediately following, and is its
|
|
|
|
/// only predecessor
|
|
|
|
static bool hasTrivialSuccessor(const MachineBasicBlock &MBB) {
|
|
|
|
if (MBB.succ_size() != 1)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const MachineBasicBlock *Succ = *MBB.succ_begin();
|
|
|
|
return (Succ->pred_size() == 1) && MBB.isLayoutSuccessor(Succ);
|
|
|
|
}
|
|
|
|
|
2014-06-19 09:19:19 +08:00
|
|
|
// FIXME: Insert waits listed in Table 4.2 "Required User-Inserted Wait States"
|
|
|
|
// around other non-memory instructions.
|
2013-01-19 05:15:53 +08:00
|
|
|
bool SIInsertWaits::runOnMachineFunction(MachineFunction &MF) {
|
|
|
|
bool Changes = false;
|
|
|
|
|
2016-06-24 14:30:11 +08:00
|
|
|
ST = &MF.getSubtarget<SISubtarget>();
|
|
|
|
TII = ST->getInstrInfo();
|
|
|
|
TRI = &TII->getRegisterInfo();
|
2013-01-19 05:15:53 +08:00
|
|
|
MRI = &MF.getRegInfo();
|
2017-02-08 22:05:23 +08:00
|
|
|
ISA = AMDGPU::IsaInfo::getIsaVersion(ST->getFeatureBits());
|
2016-11-26 01:37:09 +08:00
|
|
|
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
|
2013-01-19 05:15:53 +08:00
|
|
|
|
2017-02-08 22:05:23 +08:00
|
|
|
HardwareLimits.Named.VM = AMDGPU::getVmcntBitMask(ISA);
|
|
|
|
HardwareLimits.Named.EXP = AMDGPU::getExpcntBitMask(ISA);
|
|
|
|
HardwareLimits.Named.LGKM = AMDGPU::getLgkmcntBitMask(ISA);
|
2016-10-12 02:58:22 +08:00
|
|
|
|
2013-01-19 05:15:53 +08:00
|
|
|
WaitedOn = ZeroCounts;
|
2016-04-27 23:46:01 +08:00
|
|
|
DelayedWaitOn = ZeroCounts;
|
2013-01-19 05:15:53 +08:00
|
|
|
LastIssued = ZeroCounts;
|
2014-12-08 01:17:43 +08:00
|
|
|
LastOpcodeType = OTHER;
|
2015-02-04 01:37:52 +08:00
|
|
|
LastInstWritesM0 = false;
|
2016-10-29 07:53:48 +08:00
|
|
|
IsFlatOutstanding = false;
|
2016-11-26 01:37:09 +08:00
|
|
|
ReturnsVoid = MFI->returnsVoid();
|
2013-01-19 05:15:53 +08:00
|
|
|
|
|
|
|
memset(&UsedRegs, 0, sizeof(UsedRegs));
|
|
|
|
memset(&DefinedRegs, 0, sizeof(DefinedRegs));
|
|
|
|
|
2016-04-27 23:46:01 +08:00
|
|
|
SmallVector<MachineInstr *, 4> RemoveMI;
|
2016-11-26 01:37:09 +08:00
|
|
|
SmallVector<MachineBasicBlock *, 4> EndPgmBlocks;
|
|
|
|
|
|
|
|
bool HaveScalarStores = false;
|
2016-04-27 23:46:01 +08:00
|
|
|
|
2013-01-19 05:15:53 +08:00
|
|
|
for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
|
|
|
|
BI != BE; ++BI) {
|
|
|
|
MachineBasicBlock &MBB = *BI;
|
2016-11-26 01:37:09 +08:00
|
|
|
|
2013-01-19 05:15:53 +08:00
|
|
|
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
|
|
|
|
I != E; ++I) {
|
2016-11-26 01:37:09 +08:00
|
|
|
if (!HaveScalarStores && TII->isScalarStore(*I))
|
|
|
|
HaveScalarStores = true;
|
|
|
|
|
2016-06-24 14:30:11 +08:00
|
|
|
if (ST->getGeneration() <= SISubtarget::SEA_ISLANDS) {
|
2016-02-09 03:49:20 +08:00
|
|
|
// There is a hardware bug on CI/SI where SMRD instruction may corrupt
|
|
|
|
// vccz bit, so when we detect that an instruction may read from a
|
|
|
|
// corrupt vccz bit, we need to:
|
|
|
|
// 1. Insert s_waitcnt lgkm(0) to wait for all outstanding SMRD operations to
|
|
|
|
// complete.
|
|
|
|
// 2. Restore the correct value of vccz by writing the current value
|
|
|
|
// of vcc back to vcc.
|
|
|
|
|
|
|
|
if (TII->isSMRD(I->getOpcode())) {
|
|
|
|
VCCZCorrupt = true;
|
|
|
|
} else if (!hasOutstandingLGKM() && I->modifiesRegister(AMDGPU::VCC, TRI)) {
|
|
|
|
// FIXME: We only care about SMRD instructions here, not LDS or GDS.
|
|
|
|
// Whenever we store a value in vcc, the correct value of vccz is
|
|
|
|
// restored.
|
|
|
|
VCCZCorrupt = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if we need to apply the bug work-around
|
2016-11-08 03:09:27 +08:00
|
|
|
if (VCCZCorrupt && readsVCCZ(*I)) {
|
2016-02-09 03:49:20 +08:00
|
|
|
DEBUG(dbgs() << "Inserting vccz bug work-around before: " << *I << '\n');
|
|
|
|
|
|
|
|
// Wait on everything, not just LGKM. vccz reads usually come from
|
|
|
|
// terminators, and we always wait on everything at the end of the
|
|
|
|
// block, so if we only wait on LGKM here, we might end up with
|
|
|
|
// another s_waitcnt inserted right after this if there are non-LGKM
|
|
|
|
// instructions still outstanding.
|
|
|
|
insertWait(MBB, I, LastIssued);
|
|
|
|
|
|
|
|
// Restore the vccz bit. Any time a value is written to vcc, the vcc
|
|
|
|
// bit is updated, so we can restore the bit by reading the value of
|
|
|
|
// vcc and then writing it back to the register.
|
|
|
|
BuildMI(MBB, I, I->getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
|
|
|
|
AMDGPU::VCC)
|
2016-11-08 03:09:27 +08:00
|
|
|
.addReg(AMDGPU::VCC);
|
2016-02-09 03:49:20 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-27 23:46:01 +08:00
|
|
|
// Record pre-existing, explicitly requested waits
|
|
|
|
if (I->getOpcode() == AMDGPU::S_WAITCNT) {
|
|
|
|
handleExistingWait(*I);
|
2016-07-09 03:16:05 +08:00
|
|
|
RemoveMI.push_back(&*I);
|
2016-04-27 23:46:01 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
Counters Required;
|
|
|
|
|
2015-01-07 03:52:07 +08:00
|
|
|
// Wait for everything before a barrier.
|
2016-04-27 23:46:01 +08:00
|
|
|
//
|
|
|
|
// S_SENDMSG implicitly waits for all outstanding LGKM transfers to finish,
|
|
|
|
// but we also want to wait for any other outstanding transfers before
|
|
|
|
// signalling other hardware blocks
|
2016-10-01 00:50:36 +08:00
|
|
|
if ((I->getOpcode() == AMDGPU::S_BARRIER &&
|
2017-06-03 01:40:26 +08:00
|
|
|
!ST->hasAutoWaitcntBeforeBarrier()) ||
|
2017-01-05 02:06:55 +08:00
|
|
|
I->getOpcode() == AMDGPU::S_SENDMSG ||
|
|
|
|
I->getOpcode() == AMDGPU::S_SENDMSGHALT)
|
2016-04-27 23:46:01 +08:00
|
|
|
Required = LastIssued;
|
2015-01-07 03:52:07 +08:00
|
|
|
else
|
2016-04-27 23:46:01 +08:00
|
|
|
Required = handleOperands(*I);
|
|
|
|
|
|
|
|
Counters Increment = getHwCounts(*I);
|
2015-02-04 01:37:52 +08:00
|
|
|
|
2016-04-27 23:46:01 +08:00
|
|
|
if (countersNonZero(Required) || countersNonZero(Increment))
|
|
|
|
increaseCounters(Required, DelayedWaitOn);
|
|
|
|
|
|
|
|
Changes |= insertWait(MBB, I, Required);
|
|
|
|
|
|
|
|
pushInstruction(MBB, I, Increment);
|
2015-02-04 01:37:52 +08:00
|
|
|
handleSendMsg(MBB, I);
|
2016-11-26 01:37:09 +08:00
|
|
|
|
|
|
|
if (I->getOpcode() == AMDGPU::S_ENDPGM ||
|
2017-03-22 06:18:10 +08:00
|
|
|
I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG)
|
2016-11-26 01:37:09 +08:00
|
|
|
EndPgmBlocks.push_back(&MBB);
|
2013-01-19 05:15:53 +08:00
|
|
|
}
|
|
|
|
|
2017-03-08 09:06:58 +08:00
|
|
|
// Wait for everything at the end of the MBB. If there is only one
|
|
|
|
// successor, we can defer this until the uses there.
|
|
|
|
if (!hasTrivialSuccessor(MBB))
|
|
|
|
Changes |= insertWait(MBB, MBB.getFirstTerminator(), LastIssued);
|
2013-01-19 05:15:53 +08:00
|
|
|
}
|
|
|
|
|
2016-11-26 01:37:09 +08:00
|
|
|
if (HaveScalarStores) {
|
|
|
|
// If scalar writes are used, the cache must be flushed or else the next
|
|
|
|
// wave to reuse the same scratch memory can be clobbered.
|
|
|
|
//
|
|
|
|
// Insert s_dcache_wb at wave termination points if there were any scalar
|
|
|
|
// stores, and only if the cache hasn't already been flushed. This could be
|
|
|
|
// improved by looking across blocks for flushes in postdominating blocks
|
|
|
|
// from the stores but an explicitly requested flush is probably very rare.
|
|
|
|
for (MachineBasicBlock *MBB : EndPgmBlocks) {
|
|
|
|
bool SeenDCacheWB = false;
|
|
|
|
|
|
|
|
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
|
|
|
|
I != E; ++I) {
|
|
|
|
if (I->getOpcode() == AMDGPU::S_DCACHE_WB)
|
|
|
|
SeenDCacheWB = true;
|
|
|
|
else if (TII->isScalarStore(*I))
|
|
|
|
SeenDCacheWB = false;
|
|
|
|
|
|
|
|
// FIXME: It would be better to insert this before a waitcnt if any.
|
|
|
|
if ((I->getOpcode() == AMDGPU::S_ENDPGM ||
|
2017-03-22 06:18:10 +08:00
|
|
|
I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) && !SeenDCacheWB) {
|
2016-11-26 01:37:09 +08:00
|
|
|
Changes = true;
|
|
|
|
BuildMI(*MBB, I, I->getDebugLoc(), TII->get(AMDGPU::S_DCACHE_WB));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-27 23:46:01 +08:00
|
|
|
for (MachineInstr *I : RemoveMI)
|
|
|
|
I->eraseFromParent();
|
|
|
|
|
2017-04-12 06:29:31 +08:00
|
|
|
if (!MFI->isEntryFunction()) {
|
|
|
|
// Wait for any outstanding memory operations that the input registers may
|
|
|
|
// depend on. We can't track them and it's better to to the wait after the
|
|
|
|
// costly call sequence.
|
|
|
|
|
|
|
|
// TODO: Could insert earlier and schedule more liberally with operations
|
|
|
|
// that only use caller preserved registers.
|
|
|
|
MachineBasicBlock &EntryBB = MF.front();
|
|
|
|
BuildMI(EntryBB, EntryBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_WAITCNT))
|
|
|
|
.addImm(0);
|
|
|
|
|
|
|
|
Changes = true;
|
|
|
|
}
|
|
|
|
|
2013-01-19 05:15:53 +08:00
|
|
|
return Changes;
|
|
|
|
}
|