2017-08-08 08:47:13 +08:00
|
|
|
//===- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies ---------===//
|
2013-08-07 07:08:28 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
/// \file
|
|
|
|
/// Copies from VGPR to SGPR registers are illegal and the register coalescer
|
|
|
|
/// will sometimes generate these illegal copies in situations like this:
|
|
|
|
///
|
|
|
|
/// Register Class <vsrc> is the union of <vgpr> and <sgpr>
|
|
|
|
///
|
|
|
|
/// BB0:
|
2017-11-30 20:12:19 +08:00
|
|
|
/// %0 <sgpr> = SCALAR_INST
|
|
|
|
/// %1 <vsrc> = COPY %0 <sgpr>
|
2013-08-07 07:08:28 +08:00
|
|
|
/// ...
|
|
|
|
/// BRANCH %cond BB1, BB2
|
|
|
|
/// BB1:
|
2017-11-30 20:12:19 +08:00
|
|
|
/// %2 <vgpr> = VECTOR_INST
|
|
|
|
/// %3 <vsrc> = COPY %2 <vgpr>
|
2013-08-07 07:08:28 +08:00
|
|
|
/// BB2:
|
2017-12-05 01:18:51 +08:00
|
|
|
/// %4 <vsrc> = PHI %1 <vsrc>, <%bb.0>, %3 <vrsc>, <%bb.1>
|
2017-11-30 20:12:19 +08:00
|
|
|
/// %5 <vgpr> = VECTOR_INST %4 <vsrc>
|
2013-11-14 12:05:22 +08:00
|
|
|
///
|
2013-08-07 07:08:28 +08:00
|
|
|
///
|
|
|
|
/// The coalescer will begin at BB0 and eliminate its copy, then the resulting
|
|
|
|
/// code will look like this:
|
|
|
|
///
|
|
|
|
/// BB0:
|
2017-11-30 20:12:19 +08:00
|
|
|
/// %0 <sgpr> = SCALAR_INST
|
2013-08-07 07:08:28 +08:00
|
|
|
/// ...
|
|
|
|
/// BRANCH %cond BB1, BB2
|
|
|
|
/// BB1:
|
2017-11-30 20:12:19 +08:00
|
|
|
/// %2 <vgpr> = VECTOR_INST
|
|
|
|
/// %3 <vsrc> = COPY %2 <vgpr>
|
2013-08-07 07:08:28 +08:00
|
|
|
/// BB2:
|
2017-12-05 01:18:51 +08:00
|
|
|
/// %4 <sgpr> = PHI %0 <sgpr>, <%bb.0>, %3 <vsrc>, <%bb.1>
|
2017-11-30 20:12:19 +08:00
|
|
|
/// %5 <vgpr> = VECTOR_INST %4 <sgpr>
|
2013-08-07 07:08:28 +08:00
|
|
|
///
|
|
|
|
/// Now that the result of the PHI instruction is an SGPR, the register
|
2017-11-30 20:12:19 +08:00
|
|
|
/// allocator is now forced to constrain the register class of %3 to
|
2013-08-07 07:08:28 +08:00
|
|
|
/// <sgpr> so we end up with final code like this:
|
2013-11-14 12:05:22 +08:00
|
|
|
///
|
2013-08-07 07:08:28 +08:00
|
|
|
/// BB0:
|
2017-11-30 20:12:19 +08:00
|
|
|
/// %0 <sgpr> = SCALAR_INST
|
2013-08-07 07:08:28 +08:00
|
|
|
/// ...
|
|
|
|
/// BRANCH %cond BB1, BB2
|
|
|
|
/// BB1:
|
2017-11-30 20:12:19 +08:00
|
|
|
/// %2 <vgpr> = VECTOR_INST
|
|
|
|
/// %3 <sgpr> = COPY %2 <vgpr>
|
2013-08-07 07:08:28 +08:00
|
|
|
/// BB2:
|
2017-12-05 01:18:51 +08:00
|
|
|
/// %4 <sgpr> = PHI %0 <sgpr>, <%bb.0>, %3 <sgpr>, <%bb.1>
|
2017-11-30 20:12:19 +08:00
|
|
|
/// %5 <vgpr> = VECTOR_INST %4 <sgpr>
|
2013-08-07 07:08:28 +08:00
|
|
|
///
|
2013-11-14 12:05:22 +08:00
|
|
|
/// Now this code contains an illegal copy from a VGPR to an SGPR.
|
2013-08-07 07:08:28 +08:00
|
|
|
///
|
|
|
|
/// In order to avoid this problem, this pass searches for PHI instructions
|
|
|
|
/// which define a <vsrc> register and constrains its definition class to
|
|
|
|
/// <vgpr> if the user of the PHI's definition register is a vector instruction.
|
|
|
|
/// If the PHI's definition class is constrained to <vgpr> then the coalescer
|
|
|
|
/// will be unable to perform the COPY removal from the above example which
|
|
|
|
/// ultimately led to the creation of an illegal COPY.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "AMDGPU.h"
|
2014-08-05 05:25:23 +08:00
|
|
|
#include "AMDGPUSubtarget.h"
|
2013-08-07 07:08:28 +08:00
|
|
|
#include "SIInstrInfo.h"
|
2017-08-08 08:47:13 +08:00
|
|
|
#include "SIRegisterInfo.h"
|
AMDGPU: Remove #include "MCTargetDesc/AMDGPUMCTargetDesc.h" from common headers
Summary:
MCTargetDesc/AMDGPUMCTargetDesc.h contains enums for all the instuction
and register defintions, which are huge so we only want to include
them where needed.
This will also make it easier if we want to split the R600 and GCN
definitions into separate tablegenerated files.
I was unable to remove AMDGPUMCTargetDesc.h from SIMachineFunctionInfo.h
because it uses some enums from the header to initialize default values
for the SIMachineFunction class, so I ended up having to remove includes of
SIMachineFunctionInfo.h from headers too.
Reviewers: arsenm, nhaehnle
Reviewed By: nhaehnle
Subscribers: MatzeB, kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D46272
llvm-svn: 332930
2018-05-22 10:03:23 +08:00
|
|
|
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/ADT/DenseSet.h"
|
2017-08-08 08:47:13 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
|
|
|
#include "llvm/ADT/SmallSet.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
2016-11-29 08:46:46 +08:00
|
|
|
#include "llvm/CodeGen/MachineDominators.h"
|
2017-08-08 08:47:13 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
2013-08-07 07:08:28 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
2017-08-08 08:47:13 +08:00
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
2013-11-14 07:36:37 +08:00
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2017-08-08 08:47:13 +08:00
|
|
|
#include "llvm/CodeGen/MachineOperand.h"
|
2013-08-07 07:08:28 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2017-11-17 09:07:10 +08:00
|
|
|
#include "llvm/CodeGen/TargetRegisterInfo.h"
|
2017-08-08 08:47:13 +08:00
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/CodeGen.h"
|
|
|
|
#include "llvm/Support/CommandLine.h"
|
2013-11-14 07:36:37 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2013-11-15 07:24:09 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2013-08-07 07:08:28 +08:00
|
|
|
#include "llvm/Target/TargetMachine.h"
|
2017-08-08 08:47:13 +08:00
|
|
|
#include <cassert>
|
|
|
|
#include <cstdint>
|
|
|
|
#include <iterator>
|
|
|
|
#include <list>
|
|
|
|
#include <map>
|
|
|
|
#include <tuple>
|
|
|
|
#include <utility>
|
2013-08-07 07:08:28 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2016-04-22 02:21:54 +08:00
|
|
|
#define DEBUG_TYPE "si-fix-sgpr-copies"
|
2014-04-22 10:41:26 +08:00
|
|
|
|
2017-04-25 03:37:54 +08:00
|
|
|
static cl::opt<bool> EnableM0Merge(
|
|
|
|
"amdgpu-enable-merge-m0",
|
|
|
|
cl::desc("Merge and hoist M0 initializations"),
|
|
|
|
cl::init(false));
|
|
|
|
|
2013-08-07 07:08:28 +08:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
class SIFixSGPRCopies : public MachineFunctionPass {
|
2016-11-29 08:46:46 +08:00
|
|
|
MachineDominatorTree *MDT;
|
2018-04-25 20:32:46 +08:00
|
|
|
|
2015-11-04 06:30:13 +08:00
|
|
|
public:
|
2013-08-07 07:08:28 +08:00
|
|
|
static char ID;
|
|
|
|
|
2017-08-08 08:47:13 +08:00
|
|
|
SIFixSGPRCopies() : MachineFunctionPass(ID) {}
|
2013-08-07 07:08:28 +08:00
|
|
|
|
2014-04-29 15:57:24 +08:00
|
|
|
bool runOnMachineFunction(MachineFunction &MF) override;
|
2013-08-07 07:08:28 +08:00
|
|
|
|
2016-10-01 10:56:57 +08:00
|
|
|
StringRef getPassName() const override { return "SI Fix SGPR copies"; }
|
2013-08-07 07:08:28 +08:00
|
|
|
|
2015-09-26 01:21:28 +08:00
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
2016-11-29 08:46:46 +08:00
|
|
|
AU.addRequired<MachineDominatorTree>();
|
|
|
|
AU.addPreserved<MachineDominatorTree>();
|
2015-09-26 01:21:28 +08:00
|
|
|
AU.setPreservesCFG();
|
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
}
|
2013-08-07 07:08:28 +08:00
|
|
|
};
|
|
|
|
|
2017-08-08 08:47:13 +08:00
|
|
|
} // end anonymous namespace
|
2013-08-07 07:08:28 +08:00
|
|
|
|
2016-11-29 08:46:46 +08:00
|
|
|
INITIALIZE_PASS_BEGIN(SIFixSGPRCopies, DEBUG_TYPE,
|
|
|
|
"SI Fix SGPR copies", false, false)
|
2017-04-25 03:37:54 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
|
2016-11-29 08:46:46 +08:00
|
|
|
INITIALIZE_PASS_END(SIFixSGPRCopies, DEBUG_TYPE,
|
|
|
|
"SI Fix SGPR copies", false, false)
|
|
|
|
|
2013-08-07 07:08:28 +08:00
|
|
|
char SIFixSGPRCopies::ID = 0;
|
|
|
|
|
2015-11-04 06:30:13 +08:00
|
|
|
char &llvm::SIFixSGPRCopiesID = SIFixSGPRCopies::ID;
|
|
|
|
|
|
|
|
FunctionPass *llvm::createSIFixSGPRCopiesPass() {
|
|
|
|
return new SIFixSGPRCopies();
|
2013-08-07 07:08:28 +08:00
|
|
|
}
|
|
|
|
|
2013-11-14 07:36:37 +08:00
|
|
|
static bool hasVGPROperands(const MachineInstr &MI, const SIRegisterInfo *TRI) {
|
|
|
|
const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
|
|
|
|
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
|
|
|
|
if (!MI.getOperand(i).isReg() ||
|
|
|
|
!TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (TRI->hasVGPRs(MRI.getRegClass(MI.getOperand(i).getReg())))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-11-03 07:15:42 +08:00
|
|
|
static std::pair<const TargetRegisterClass *, const TargetRegisterClass *>
|
|
|
|
getCopyRegClasses(const MachineInstr &Copy,
|
|
|
|
const SIRegisterInfo &TRI,
|
|
|
|
const MachineRegisterInfo &MRI) {
|
2013-11-14 07:36:37 +08:00
|
|
|
unsigned DstReg = Copy.getOperand(0).getReg();
|
|
|
|
unsigned SrcReg = Copy.getOperand(1).getReg();
|
2014-12-03 13:22:39 +08:00
|
|
|
|
2015-10-13 08:07:54 +08:00
|
|
|
const TargetRegisterClass *SrcRC =
|
|
|
|
TargetRegisterInfo::isVirtualRegister(SrcReg) ?
|
|
|
|
MRI.getRegClass(SrcReg) :
|
|
|
|
TRI.getPhysRegClass(SrcReg);
|
|
|
|
|
|
|
|
// We don't really care about the subregister here.
|
|
|
|
// SrcRC = TRI.getSubRegClass(SrcRC, Copy.getOperand(1).getSubReg());
|
2015-05-12 22:18:11 +08:00
|
|
|
|
2015-10-13 08:07:54 +08:00
|
|
|
const TargetRegisterClass *DstRC =
|
|
|
|
TargetRegisterInfo::isVirtualRegister(DstReg) ?
|
|
|
|
MRI.getRegClass(DstReg) :
|
|
|
|
TRI.getPhysRegClass(DstReg);
|
|
|
|
|
|
|
|
return std::make_pair(SrcRC, DstRC);
|
|
|
|
}
|
|
|
|
|
2015-11-03 07:15:42 +08:00
|
|
|
static bool isVGPRToSGPRCopy(const TargetRegisterClass *SrcRC,
|
|
|
|
const TargetRegisterClass *DstRC,
|
|
|
|
const SIRegisterInfo &TRI) {
|
AMDGPU: Rewrite SILowerI1Copies to always stay on SALU
Summary:
Instead of writing boolean values temporarily into 32-bit VGPRs
if they are involved in PHIs or are observed from outside a loop,
we use bitwise masking operations to combine lane masks in a way
that is consistent with wave control flow.
Move SIFixSGPRCopies to before this pass, since that pass
incorrectly attempts to move SGPR phis to VGPRs.
This should recover most of the code quality that was lost with
the bug fix in "AMDGPU: Remove PHI loop condition optimization".
There are still some relevant cases where code quality could be
improved, in particular:
- We often introduce redundant masks with EXEC. Ideally, we'd
have a generic computeKnownBits-like analysis to determine
whether masks are already masked by EXEC, so we can avoid this
masking both here and when lowering uniform control flow.
- The criterion we use to determine whether a def is observed
from outside a loop is conservative: it doesn't check whether
(loop) branch conditions are uniform.
Change-Id: Ibabdb373a7510e426b90deef00f5e16c5d56e64b
Reviewers: arsenm, rampitec, tpr
Subscribers: kzhuravl, jvesely, wdng, mgorny, yaxunl, dstuttard, t-tye, eraman, llvm-commits
Differential Revision: https://reviews.llvm.org/D53496
llvm-svn: 345719
2018-10-31 21:27:08 +08:00
|
|
|
return SrcRC != &AMDGPU::VReg_1RegClass && TRI.isSGPRClass(DstRC) &&
|
|
|
|
TRI.hasVGPRs(SrcRC);
|
2015-10-13 08:07:54 +08:00
|
|
|
}
|
2013-11-14 07:36:37 +08:00
|
|
|
|
2015-11-03 07:15:42 +08:00
|
|
|
static bool isSGPRToVGPRCopy(const TargetRegisterClass *SrcRC,
|
|
|
|
const TargetRegisterClass *DstRC,
|
|
|
|
const SIRegisterInfo &TRI) {
|
AMDGPU: Rewrite SILowerI1Copies to always stay on SALU
Summary:
Instead of writing boolean values temporarily into 32-bit VGPRs
if they are involved in PHIs or are observed from outside a loop,
we use bitwise masking operations to combine lane masks in a way
that is consistent with wave control flow.
Move SIFixSGPRCopies to before this pass, since that pass
incorrectly attempts to move SGPR phis to VGPRs.
This should recover most of the code quality that was lost with
the bug fix in "AMDGPU: Remove PHI loop condition optimization".
There are still some relevant cases where code quality could be
improved, in particular:
- We often introduce redundant masks with EXEC. Ideally, we'd
have a generic computeKnownBits-like analysis to determine
whether masks are already masked by EXEC, so we can avoid this
masking both here and when lowering uniform control flow.
- The criterion we use to determine whether a def is observed
from outside a loop is conservative: it doesn't check whether
(loop) branch conditions are uniform.
Change-Id: Ibabdb373a7510e426b90deef00f5e16c5d56e64b
Reviewers: arsenm, rampitec, tpr
Subscribers: kzhuravl, jvesely, wdng, mgorny, yaxunl, dstuttard, t-tye, eraman, llvm-commits
Differential Revision: https://reviews.llvm.org/D53496
llvm-svn: 345719
2018-10-31 21:27:08 +08:00
|
|
|
return DstRC != &AMDGPU::VReg_1RegClass && TRI.isSGPRClass(SrcRC) &&
|
|
|
|
TRI.hasVGPRs(DstRC);
|
2013-11-14 07:36:37 +08:00
|
|
|
}
|
|
|
|
|
2017-06-21 02:32:42 +08:00
|
|
|
static bool tryChangeVGPRtoSGPRinCopy(MachineInstr &MI,
|
|
|
|
const SIRegisterInfo *TRI,
|
|
|
|
const SIInstrInfo *TII) {
|
|
|
|
MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
|
|
|
|
auto &Src = MI.getOperand(1);
|
|
|
|
unsigned DstReg = MI.getOperand(0).getReg();
|
|
|
|
unsigned SrcReg = Src.getReg();
|
|
|
|
if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
|
|
|
|
!TargetRegisterInfo::isVirtualRegister(DstReg))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (const auto &MO : MRI.reg_nodbg_operands(DstReg)) {
|
|
|
|
const auto *UseMI = MO.getParent();
|
|
|
|
if (UseMI == &MI)
|
|
|
|
continue;
|
|
|
|
if (MO.isDef() || UseMI->getParent() != MI.getParent() ||
|
|
|
|
UseMI->getOpcode() <= TargetOpcode::GENERIC_OP_END ||
|
|
|
|
!TII->isOperandLegal(*UseMI, UseMI->getOperandNo(&MO), &Src))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
// Change VGPR to SGPR destination.
|
|
|
|
MRI.setRegClass(DstReg, TRI->getEquivalentSGPRClass(MRI.getRegClass(DstReg)));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-11-03 07:15:42 +08:00
|
|
|
// Distribute an SGPR->VGPR copy of a REG_SEQUENCE into a VGPR REG_SEQUENCE.
|
|
|
|
//
|
|
|
|
// SGPRx = ...
|
|
|
|
// SGPRy = REG_SEQUENCE SGPRx, sub0 ...
|
|
|
|
// VGPRz = COPY SGPRy
|
|
|
|
//
|
|
|
|
// ==>
|
|
|
|
//
|
|
|
|
// VGPRx = COPY SGPRx
|
|
|
|
// VGPRz = REG_SEQUENCE VGPRx, sub0
|
|
|
|
//
|
|
|
|
// This exposes immediate folding opportunities when materializing 64-bit
|
|
|
|
// immediates.
|
|
|
|
static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI,
|
|
|
|
const SIRegisterInfo *TRI,
|
|
|
|
const SIInstrInfo *TII,
|
|
|
|
MachineRegisterInfo &MRI) {
|
|
|
|
assert(MI.isRegSequence());
|
|
|
|
|
|
|
|
unsigned DstReg = MI.getOperand(0).getReg();
|
|
|
|
if (!TRI->isSGPRClass(MRI.getRegClass(DstReg)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!MRI.hasOneUse(DstReg))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MachineInstr &CopyUse = *MRI.use_instr_begin(DstReg);
|
|
|
|
if (!CopyUse.isCopy())
|
|
|
|
return false;
|
|
|
|
|
2017-04-12 06:29:19 +08:00
|
|
|
// It is illegal to have vreg inputs to a physreg defining reg_sequence.
|
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(CopyUse.getOperand(0).getReg()))
|
|
|
|
return false;
|
|
|
|
|
2015-11-03 07:15:42 +08:00
|
|
|
const TargetRegisterClass *SrcRC, *DstRC;
|
|
|
|
std::tie(SrcRC, DstRC) = getCopyRegClasses(CopyUse, *TRI, MRI);
|
|
|
|
|
|
|
|
if (!isSGPRToVGPRCopy(SrcRC, DstRC, *TRI))
|
|
|
|
return false;
|
|
|
|
|
2017-06-21 02:32:42 +08:00
|
|
|
if (tryChangeVGPRtoSGPRinCopy(CopyUse, TRI, TII))
|
|
|
|
return true;
|
|
|
|
|
2015-11-03 07:15:42 +08:00
|
|
|
// TODO: Could have multiple extracts?
|
|
|
|
unsigned SubReg = CopyUse.getOperand(1).getSubReg();
|
|
|
|
if (SubReg != AMDGPU::NoSubRegister)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MRI.setRegClass(DstReg, DstRC);
|
|
|
|
|
|
|
|
// SGPRx = ...
|
|
|
|
// SGPRy = REG_SEQUENCE SGPRx, sub0 ...
|
|
|
|
// VGPRz = COPY SGPRy
|
|
|
|
|
|
|
|
// =>
|
|
|
|
// VGPRx = COPY SGPRx
|
|
|
|
// VGPRz = REG_SEQUENCE VGPRx, sub0
|
|
|
|
|
|
|
|
MI.getOperand(0).setReg(CopyUse.getOperand(0).getReg());
|
|
|
|
|
|
|
|
for (unsigned I = 1, N = MI.getNumOperands(); I != N; I += 2) {
|
|
|
|
unsigned SrcReg = MI.getOperand(I).getReg();
|
2016-01-08 01:10:29 +08:00
|
|
|
unsigned SrcSubReg = MI.getOperand(I).getSubReg();
|
2015-11-03 07:15:42 +08:00
|
|
|
|
|
|
|
const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
|
|
|
|
assert(TRI->isSGPRClass(SrcRC) &&
|
|
|
|
"Expected SGPR REG_SEQUENCE to only have SGPR inputs");
|
|
|
|
|
|
|
|
SrcRC = TRI->getSubRegClass(SrcRC, SrcSubReg);
|
|
|
|
const TargetRegisterClass *NewSrcRC = TRI->getEquivalentVGPRClass(SrcRC);
|
|
|
|
|
|
|
|
unsigned TmpReg = MRI.createVirtualRegister(NewSrcRC);
|
|
|
|
|
2017-01-13 17:58:52 +08:00
|
|
|
BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(AMDGPU::COPY),
|
|
|
|
TmpReg)
|
|
|
|
.add(MI.getOperand(I));
|
2015-11-03 07:15:42 +08:00
|
|
|
|
|
|
|
MI.getOperand(I).setReg(TmpReg);
|
|
|
|
}
|
|
|
|
|
|
|
|
CopyUse.eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-11-12 07:35:42 +08:00
|
|
|
static bool phiHasVGPROperands(const MachineInstr &PHI,
|
|
|
|
const MachineRegisterInfo &MRI,
|
|
|
|
const SIRegisterInfo *TRI,
|
|
|
|
const SIInstrInfo *TII) {
|
|
|
|
for (unsigned i = 1; i < PHI.getNumOperands(); i += 2) {
|
|
|
|
unsigned Reg = PHI.getOperand(i).getReg();
|
|
|
|
if (TRI->hasVGPRs(MRI.getRegClass(Reg)))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2017-08-08 08:47:13 +08:00
|
|
|
|
2016-11-12 07:35:42 +08:00
|
|
|
static bool phiHasBreakDef(const MachineInstr &PHI,
|
|
|
|
const MachineRegisterInfo &MRI,
|
|
|
|
SmallSet<unsigned, 8> &Visited) {
|
|
|
|
for (unsigned i = 1; i < PHI.getNumOperands(); i += 2) {
|
|
|
|
unsigned Reg = PHI.getOperand(i).getReg();
|
|
|
|
if (Visited.count(Reg))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
Visited.insert(Reg);
|
|
|
|
|
2017-04-29 09:26:34 +08:00
|
|
|
MachineInstr *DefInstr = MRI.getVRegDef(Reg);
|
2016-11-12 07:35:42 +08:00
|
|
|
switch (DefInstr->getOpcode()) {
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
case AMDGPU::SI_IF_BREAK:
|
|
|
|
return true;
|
|
|
|
case AMDGPU::PHI:
|
|
|
|
if (phiHasBreakDef(*DefInstr, MRI, Visited))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-11-29 08:46:46 +08:00
|
|
|
static bool hasTerminatorThatModifiesExec(const MachineBasicBlock &MBB,
|
|
|
|
const TargetRegisterInfo &TRI) {
|
|
|
|
for (MachineBasicBlock::const_iterator I = MBB.getFirstTerminator(),
|
|
|
|
E = MBB.end(); I != E; ++I) {
|
|
|
|
if (I->modifiesRegister(AMDGPU::EXEC, &TRI))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-12-07 05:13:30 +08:00
|
|
|
static bool isSafeToFoldImmIntoCopy(const MachineInstr *Copy,
|
|
|
|
const MachineInstr *MoveImm,
|
|
|
|
const SIInstrInfo *TII,
|
|
|
|
unsigned &SMovOp,
|
|
|
|
int64_t &Imm) {
|
[AMDGPU] Add an llvm.amdgcn.wqm intrinsic for WQM
Summary:
Previously, we assumed that certain types of instructions needed WQM in
pixel shaders, particularly DS instructions and image sampling
instructions. This was ok because with OpenGL, the assumption was
correct. But we want to start using DPP instructions for derivatives as
well as other things, so the assumption that we can infer whether to use
WQM based on the instruction won't continue to hold. This intrinsic lets
frontends like Mesa indicate what things need WQM based on their
knowledge of the API, rather than second-guessing them in the backend.
We need to keep around the old method of enabling WQM, but eventually we
should remove it once Mesa catches up. For now, this will let us use DPP
instructions for computing derivatives correctly.
Reviewers: arsenm, tpr, nhaehnle
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, llvm-commits, t-tye
Differential Revision: https://reviews.llvm.org/D35167
llvm-svn: 310085
2017-08-05 02:36:49 +08:00
|
|
|
if (Copy->getOpcode() != AMDGPU::COPY)
|
|
|
|
return false;
|
|
|
|
|
2016-12-07 05:13:30 +08:00
|
|
|
if (!MoveImm->isMoveImmediate())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const MachineOperand *ImmOp =
|
|
|
|
TII->getNamedOperand(*MoveImm, AMDGPU::OpName::src0);
|
|
|
|
if (!ImmOp->isImm())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// FIXME: Handle copies with sub-regs.
|
|
|
|
if (Copy->getOperand(0).getSubReg())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
switch (MoveImm->getOpcode()) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case AMDGPU::V_MOV_B32_e32:
|
|
|
|
SMovOp = AMDGPU::S_MOV_B32;
|
|
|
|
break;
|
|
|
|
case AMDGPU::V_MOV_B64_PSEUDO:
|
|
|
|
SMovOp = AMDGPU::S_MOV_B64;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
Imm = ImmOp->getImm();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-04-25 03:37:54 +08:00
|
|
|
template <class UnaryPredicate>
|
|
|
|
bool searchPredecessors(const MachineBasicBlock *MBB,
|
|
|
|
const MachineBasicBlock *CutOff,
|
|
|
|
UnaryPredicate Predicate) {
|
|
|
|
if (MBB == CutOff)
|
|
|
|
return false;
|
|
|
|
|
2017-08-08 08:47:13 +08:00
|
|
|
DenseSet<const MachineBasicBlock *> Visited;
|
|
|
|
SmallVector<MachineBasicBlock *, 4> Worklist(MBB->pred_begin(),
|
|
|
|
MBB->pred_end());
|
2017-04-13 07:51:47 +08:00
|
|
|
|
|
|
|
while (!Worklist.empty()) {
|
2017-04-25 03:37:54 +08:00
|
|
|
MachineBasicBlock *MBB = Worklist.pop_back_val();
|
2017-04-13 07:51:47 +08:00
|
|
|
|
2017-04-25 03:37:54 +08:00
|
|
|
if (!Visited.insert(MBB).second)
|
2017-04-13 07:51:47 +08:00
|
|
|
continue;
|
2017-04-25 03:37:54 +08:00
|
|
|
if (MBB == CutOff)
|
|
|
|
continue;
|
|
|
|
if (Predicate(MBB))
|
2017-04-13 07:51:47 +08:00
|
|
|
return true;
|
|
|
|
|
2017-04-25 03:37:54 +08:00
|
|
|
Worklist.append(MBB->pred_begin(), MBB->pred_end());
|
2017-04-13 07:51:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-04-25 20:32:46 +08:00
|
|
|
static bool predsHasDivergentTerminator(MachineBasicBlock *MBB,
|
|
|
|
const TargetRegisterInfo *TRI) {
|
|
|
|
return searchPredecessors(MBB, nullptr, [TRI](MachineBasicBlock *MBB) {
|
|
|
|
return hasTerminatorThatModifiesExec(*MBB, *TRI); });
|
|
|
|
}
|
|
|
|
|
2017-04-25 03:37:54 +08:00
|
|
|
// Checks if there is potential path From instruction To instruction.
|
|
|
|
// If CutOff is specified and it sits in between of that path we ignore
|
|
|
|
// a higher portion of the path and report it is not reachable.
|
|
|
|
static bool isReachable(const MachineInstr *From,
|
|
|
|
const MachineInstr *To,
|
|
|
|
const MachineBasicBlock *CutOff,
|
|
|
|
MachineDominatorTree &MDT) {
|
|
|
|
// If either From block dominates To block or instructions are in the same
|
|
|
|
// block and From is higher.
|
|
|
|
if (MDT.dominates(From, To))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
const MachineBasicBlock *MBBFrom = From->getParent();
|
|
|
|
const MachineBasicBlock *MBBTo = To->getParent();
|
|
|
|
if (MBBFrom == MBBTo)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Instructions are in different blocks, do predecessor search.
|
|
|
|
// We should almost never get here since we do not usually produce M0 stores
|
|
|
|
// other than -1.
|
|
|
|
return searchPredecessors(MBBTo, CutOff, [MBBFrom]
|
|
|
|
(const MachineBasicBlock *MBB) { return MBB == MBBFrom; });
|
|
|
|
}
|
|
|
|
|
|
|
|
// Hoist and merge identical SGPR initializations into a common predecessor.
|
|
|
|
// This is intended to combine M0 initializations, but can work with any
|
|
|
|
// SGPR. A VGPR cannot be processed since we cannot guarantee vector
|
|
|
|
// executioon.
|
|
|
|
static bool hoistAndMergeSGPRInits(unsigned Reg,
|
|
|
|
const MachineRegisterInfo &MRI,
|
|
|
|
MachineDominatorTree &MDT) {
|
|
|
|
// List of inits by immediate value.
|
2017-08-08 08:47:13 +08:00
|
|
|
using InitListMap = std::map<unsigned, std::list<MachineInstr *>>;
|
2017-04-25 03:37:54 +08:00
|
|
|
InitListMap Inits;
|
|
|
|
// List of clobbering instructions.
|
|
|
|
SmallVector<MachineInstr*, 8> Clobbers;
|
|
|
|
bool Changed = false;
|
|
|
|
|
|
|
|
for (auto &MI : MRI.def_instructions(Reg)) {
|
|
|
|
MachineOperand *Imm = nullptr;
|
|
|
|
for (auto &MO: MI.operands()) {
|
|
|
|
if ((MO.isReg() && ((MO.isDef() && MO.getReg() != Reg) || !MO.isDef())) ||
|
|
|
|
(!MO.isImm() && !MO.isReg()) || (MO.isImm() && Imm)) {
|
|
|
|
Imm = nullptr;
|
|
|
|
break;
|
|
|
|
} else if (MO.isImm())
|
|
|
|
Imm = &MO;
|
|
|
|
}
|
|
|
|
if (Imm)
|
|
|
|
Inits[Imm->getImm()].push_front(&MI);
|
|
|
|
else
|
|
|
|
Clobbers.push_back(&MI);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto &Init : Inits) {
|
|
|
|
auto &Defs = Init.second;
|
|
|
|
|
|
|
|
for (auto I1 = Defs.begin(), E = Defs.end(); I1 != E; ) {
|
|
|
|
MachineInstr *MI1 = *I1;
|
|
|
|
|
|
|
|
for (auto I2 = std::next(I1); I2 != E; ) {
|
|
|
|
MachineInstr *MI2 = *I2;
|
|
|
|
|
|
|
|
// Check any possible interference
|
|
|
|
auto intereferes = [&](MachineBasicBlock::iterator From,
|
|
|
|
MachineBasicBlock::iterator To) -> bool {
|
|
|
|
|
|
|
|
assert(MDT.dominates(&*To, &*From));
|
|
|
|
|
|
|
|
auto interferes = [&MDT, From, To](MachineInstr* &Clobber) -> bool {
|
|
|
|
const MachineBasicBlock *MBBFrom = From->getParent();
|
|
|
|
const MachineBasicBlock *MBBTo = To->getParent();
|
|
|
|
bool MayClobberFrom = isReachable(Clobber, &*From, MBBTo, MDT);
|
|
|
|
bool MayClobberTo = isReachable(Clobber, &*To, MBBTo, MDT);
|
|
|
|
if (!MayClobberFrom && !MayClobberTo)
|
|
|
|
return false;
|
|
|
|
if ((MayClobberFrom && !MayClobberTo) ||
|
|
|
|
(!MayClobberFrom && MayClobberTo))
|
|
|
|
return true;
|
|
|
|
// Both can clobber, this is not an interference only if both are
|
|
|
|
// dominated by Clobber and belong to the same block or if Clobber
|
|
|
|
// properly dominates To, given that To >> From, so it dominates
|
|
|
|
// both and located in a common dominator.
|
|
|
|
return !((MBBFrom == MBBTo &&
|
|
|
|
MDT.dominates(Clobber, &*From) &&
|
|
|
|
MDT.dominates(Clobber, &*To)) ||
|
|
|
|
MDT.properlyDominates(Clobber->getParent(), MBBTo));
|
|
|
|
};
|
|
|
|
|
2017-08-08 08:47:13 +08:00
|
|
|
return (llvm::any_of(Clobbers, interferes)) ||
|
|
|
|
(llvm::any_of(Inits, [&](InitListMap::value_type &C) {
|
|
|
|
return C.first != Init.first &&
|
|
|
|
llvm::any_of(C.second, interferes);
|
2017-04-25 03:37:54 +08:00
|
|
|
}));
|
|
|
|
};
|
|
|
|
|
|
|
|
if (MDT.dominates(MI1, MI2)) {
|
|
|
|
if (!intereferes(MI2, MI1)) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs()
|
|
|
|
<< "Erasing from "
|
|
|
|
<< printMBBReference(*MI2->getParent()) << " " << *MI2);
|
2017-04-25 03:37:54 +08:00
|
|
|
MI2->eraseFromParent();
|
|
|
|
Defs.erase(I2++);
|
|
|
|
Changed = true;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
} else if (MDT.dominates(MI2, MI1)) {
|
|
|
|
if (!intereferes(MI1, MI2)) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs()
|
|
|
|
<< "Erasing from "
|
|
|
|
<< printMBBReference(*MI1->getParent()) << " " << *MI1);
|
2017-04-25 03:37:54 +08:00
|
|
|
MI1->eraseFromParent();
|
|
|
|
Defs.erase(I1++);
|
|
|
|
Changed = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
auto *MBB = MDT.findNearestCommonDominator(MI1->getParent(),
|
|
|
|
MI2->getParent());
|
|
|
|
if (!MBB) {
|
|
|
|
++I2;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
MachineBasicBlock::iterator I = MBB->getFirstNonPHI();
|
|
|
|
if (!intereferes(MI1, I) && !intereferes(MI2, I)) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs()
|
|
|
|
<< "Erasing from "
|
|
|
|
<< printMBBReference(*MI1->getParent()) << " " << *MI1
|
|
|
|
<< "and moving from "
|
|
|
|
<< printMBBReference(*MI2->getParent()) << " to "
|
|
|
|
<< printMBBReference(*I->getParent()) << " " << *MI2);
|
2017-04-25 03:37:54 +08:00
|
|
|
I->getParent()->splice(I, MI2->getParent(), MI2);
|
|
|
|
MI1->eraseFromParent();
|
|
|
|
Defs.erase(I1++);
|
|
|
|
Changed = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
++I2;
|
|
|
|
}
|
|
|
|
++I1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Changed)
|
|
|
|
MRI.clearKillFlags(Reg);
|
|
|
|
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2013-08-07 07:08:28 +08:00
|
|
|
bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
|
2018-07-12 04:59:01 +08:00
|
|
|
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
|
2013-08-07 07:08:28 +08:00
|
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
2016-06-24 14:30:11 +08:00
|
|
|
const SIRegisterInfo *TRI = ST.getRegisterInfo();
|
|
|
|
const SIInstrInfo *TII = ST.getInstrInfo();
|
2016-11-29 08:46:46 +08:00
|
|
|
MDT = &getAnalysis<MachineDominatorTree>();
|
2015-11-03 07:30:48 +08:00
|
|
|
|
|
|
|
SmallVector<MachineInstr *, 16> Worklist;
|
|
|
|
|
2013-08-07 07:08:28 +08:00
|
|
|
for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
|
|
|
|
BI != BE; ++BI) {
|
|
|
|
MachineBasicBlock &MBB = *BI;
|
|
|
|
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
|
2015-11-03 07:30:48 +08:00
|
|
|
I != E; ++I) {
|
2013-08-07 07:08:28 +08:00
|
|
|
MachineInstr &MI = *I;
|
2013-11-14 07:36:37 +08:00
|
|
|
|
|
|
|
switch (MI.getOpcode()) {
|
2015-09-22 00:27:22 +08:00
|
|
|
default:
|
|
|
|
continue;
|
[AMDGPU] Add an llvm.amdgcn.wqm intrinsic for WQM
Summary:
Previously, we assumed that certain types of instructions needed WQM in
pixel shaders, particularly DS instructions and image sampling
instructions. This was ok because with OpenGL, the assumption was
correct. But we want to start using DPP instructions for derivatives as
well as other things, so the assumption that we can infer whether to use
WQM based on the instruction won't continue to hold. This intrinsic lets
frontends like Mesa indicate what things need WQM based on their
knowledge of the API, rather than second-guessing them in the backend.
We need to keep around the old method of enabling WQM, but eventually we
should remove it once Mesa catches up. For now, this will let us use DPP
instructions for computing derivatives correctly.
Reviewers: arsenm, tpr, nhaehnle
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, llvm-commits, t-tye
Differential Revision: https://reviews.llvm.org/D35167
llvm-svn: 310085
2017-08-05 02:36:49 +08:00
|
|
|
case AMDGPU::COPY:
|
[AMDGPU] Add support for Whole Wavefront Mode
Summary:
Whole Wavefront Wode (WWM) is similar to WQM, except that all of the
lanes are always enabled, regardless of control flow. This is required
for implementing wavefront reductions in non-uniform control flow, where
we need to use the inactive lanes to propagate intermediate results, so
they need to be enabled. We need to propagate WWM to uses (unless
they're explicitly marked as exact) so that they also propagate
intermediate results correctly. We do the analysis and exec mask munging
during the WQM pass, since there are interactions with WQM for things
that require both WQM and WWM. For simplicity, WWM is entirely
block-local -- blocks are never WWM on entry or exit of a block, and WWM
is not propagated to the block level. This means that computations
involving WWM cannot involve control flow, but we only ever plan to use
WWM for a few limited purposes (none of which involve control flow)
anyways.
Shaders can ask for WWM using the @llvm.amdgcn.wwm intrinsic. There
isn't yet a way to turn WWM off -- that will be added in a future
change.
Finally, it turns out that turning on inactive lanes causes a number of
problems with register allocation. While the best long-term solution
seems like teaching LLVM's register allocator about predication, for now
we need to add some hacks to prevent ourselves from getting into trouble
due to constraints that aren't currently expressed in LLVM. For the gory
details, see the comments at the top of SIFixWWMLiveness.cpp.
Reviewers: arsenm, nhaehnle, tpr
Subscribers: kzhuravl, wdng, mgorny, yaxunl, dstuttard, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D35524
llvm-svn: 310087
2017-08-05 02:36:52 +08:00
|
|
|
case AMDGPU::WQM:
|
|
|
|
case AMDGPU::WWM: {
|
2015-10-13 08:07:54 +08:00
|
|
|
// If the destination register is a physical register there isn't really
|
|
|
|
// much we can do to fix this.
|
|
|
|
if (!TargetRegisterInfo::isVirtualRegister(MI.getOperand(0).getReg()))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
const TargetRegisterClass *SrcRC, *DstRC;
|
|
|
|
std::tie(SrcRC, DstRC) = getCopyRegClasses(MI, *TRI, MRI);
|
|
|
|
if (isVGPRToSGPRCopy(SrcRC, DstRC, *TRI)) {
|
2017-04-29 09:26:34 +08:00
|
|
|
unsigned SrcReg = MI.getOperand(1).getReg();
|
|
|
|
if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) {
|
2018-10-09 02:47:01 +08:00
|
|
|
TII->moveToVALU(MI, MDT);
|
2017-04-29 09:26:34 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
MachineInstr *DefMI = MRI.getVRegDef(SrcReg);
|
2016-12-07 05:13:30 +08:00
|
|
|
unsigned SMovOp;
|
|
|
|
int64_t Imm;
|
|
|
|
// If we are just copying an immediate, we can replace the copy with
|
|
|
|
// s_mov_b32.
|
|
|
|
if (isSafeToFoldImmIntoCopy(&MI, DefMI, TII, SMovOp, Imm)) {
|
|
|
|
MI.getOperand(1).ChangeToImmediate(Imm);
|
|
|
|
MI.addImplicitDefUseOperands(MF);
|
|
|
|
MI.setDesc(TII->get(SMovOp));
|
|
|
|
break;
|
|
|
|
}
|
2018-10-09 02:47:01 +08:00
|
|
|
TII->moveToVALU(MI, MDT);
|
2017-06-21 02:32:42 +08:00
|
|
|
} else if (isSGPRToVGPRCopy(SrcRC, DstRC, *TRI)) {
|
|
|
|
tryChangeVGPRtoSGPRinCopy(MI, TRI, TII);
|
2015-09-22 00:27:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
2013-11-14 07:36:37 +08:00
|
|
|
case AMDGPU::PHI: {
|
|
|
|
unsigned Reg = MI.getOperand(0).getReg();
|
|
|
|
if (!TRI->isSGPRClass(MRI.getRegClass(Reg)))
|
|
|
|
break;
|
|
|
|
|
2018-04-25 20:32:46 +08:00
|
|
|
// We don't need to fix the PHI if the common dominator of the
|
|
|
|
// two incoming blocks terminates with a uniform branch.
|
2017-08-04 00:37:02 +08:00
|
|
|
bool HasVGPROperand = phiHasVGPROperands(MI, MRI, TRI, TII);
|
2018-04-25 20:32:46 +08:00
|
|
|
if (MI.getNumExplicitOperands() == 5 && !HasVGPROperand) {
|
|
|
|
MachineBasicBlock *MBB0 = MI.getOperand(2).getMBB();
|
|
|
|
MachineBasicBlock *MBB1 = MI.getOperand(4).getMBB();
|
|
|
|
|
|
|
|
if (!predsHasDivergentTerminator(MBB0, TRI) &&
|
|
|
|
!predsHasDivergentTerminator(MBB1, TRI)) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs()
|
|
|
|
<< "Not fixing PHI for uniform branch: " << MI << '\n');
|
2016-11-29 08:46:46 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-14 07:36:37 +08:00
|
|
|
// If a PHI node defines an SGPR and any of its operands are VGPRs,
|
|
|
|
// then we need to move it to the VALU.
|
2014-09-24 09:33:26 +08:00
|
|
|
//
|
|
|
|
// Also, if a PHI node defines an SGPR and has all SGPR operands
|
|
|
|
// we must move it to the VALU, because the SGPR operands will
|
|
|
|
// all end up being assigned the same register, which means
|
|
|
|
// there is a potential for a conflict if different threads take
|
2014-10-17 08:36:20 +08:00
|
|
|
// different control flow paths.
|
2014-09-24 09:33:26 +08:00
|
|
|
//
|
|
|
|
// For Example:
|
|
|
|
//
|
|
|
|
// sgpr0 = def;
|
|
|
|
// ...
|
|
|
|
// sgpr1 = def;
|
|
|
|
// ...
|
|
|
|
// sgpr2 = PHI sgpr0, sgpr1
|
|
|
|
// use sgpr2;
|
|
|
|
//
|
|
|
|
// Will Become:
|
|
|
|
//
|
|
|
|
// sgpr2 = def;
|
|
|
|
// ...
|
|
|
|
// sgpr2 = def;
|
|
|
|
// ...
|
|
|
|
// use sgpr2
|
|
|
|
//
|
|
|
|
// The one exception to this rule is when one of the operands
|
|
|
|
// is defined by a SI_BREAK, SI_IF_BREAK, or SI_ELSE_BREAK
|
|
|
|
// instruction. In this case, there we know the program will
|
|
|
|
// never enter the second block (the loop) without entering
|
|
|
|
// the first block (where the condition is computed), so there
|
|
|
|
// is no chance for values to be over-written.
|
|
|
|
|
2016-11-12 07:35:42 +08:00
|
|
|
SmallSet<unsigned, 8> Visited;
|
2017-08-04 00:37:02 +08:00
|
|
|
if (HasVGPROperand || !phiHasBreakDef(MI, MRI, Visited)) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Fixing PHI: " << MI);
|
2018-10-09 02:47:01 +08:00
|
|
|
TII->moveToVALU(MI, MDT);
|
2016-11-12 07:35:42 +08:00
|
|
|
}
|
2013-11-14 07:36:37 +08:00
|
|
|
break;
|
|
|
|
}
|
2017-08-08 08:47:13 +08:00
|
|
|
case AMDGPU::REG_SEQUENCE:
|
2013-11-14 07:36:37 +08:00
|
|
|
if (TRI->hasVGPRs(TII->getOpRegClass(MI, 0)) ||
|
2015-11-03 07:15:42 +08:00
|
|
|
!hasVGPROperands(MI, TRI)) {
|
|
|
|
foldVGPRCopyIntoRegSequence(MI, TRI, TII, MRI);
|
2013-11-14 07:36:37 +08:00
|
|
|
continue;
|
2015-11-03 07:15:42 +08:00
|
|
|
}
|
2013-11-14 07:36:37 +08:00
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Fixing REG_SEQUENCE: " << MI);
|
2013-11-14 07:36:37 +08:00
|
|
|
|
2018-10-09 02:47:01 +08:00
|
|
|
TII->moveToVALU(MI, MDT);
|
2013-11-14 07:36:37 +08:00
|
|
|
break;
|
2014-04-08 03:45:45 +08:00
|
|
|
case AMDGPU::INSERT_SUBREG: {
|
2014-05-15 22:41:55 +08:00
|
|
|
const TargetRegisterClass *DstRC, *Src0RC, *Src1RC;
|
2014-04-08 03:45:45 +08:00
|
|
|
DstRC = MRI.getRegClass(MI.getOperand(0).getReg());
|
2014-05-15 22:41:55 +08:00
|
|
|
Src0RC = MRI.getRegClass(MI.getOperand(1).getReg());
|
|
|
|
Src1RC = MRI.getRegClass(MI.getOperand(2).getReg());
|
|
|
|
if (TRI->isSGPRClass(DstRC) &&
|
|
|
|
(TRI->hasVGPRs(Src0RC) || TRI->hasVGPRs(Src1RC))) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << " Fixing INSERT_SUBREG: " << MI);
|
2018-10-09 02:47:01 +08:00
|
|
|
TII->moveToVALU(MI, MDT);
|
2014-05-15 22:41:55 +08:00
|
|
|
}
|
|
|
|
break;
|
2014-04-08 03:45:45 +08:00
|
|
|
}
|
2013-08-07 07:08:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-11-18 05:11:34 +08:00
|
|
|
|
2017-04-25 03:37:54 +08:00
|
|
|
if (MF.getTarget().getOptLevel() > CodeGenOpt::None && EnableM0Merge)
|
|
|
|
hoistAndMergeSGPRInits(AMDGPU::M0, MRI, *MDT);
|
|
|
|
|
2014-11-18 05:11:34 +08:00
|
|
|
return true;
|
2013-08-07 07:08:28 +08:00
|
|
|
}
|