2017-08-02 07:14:32 +08:00
|
|
|
//===-- SIOptimizeExecMaskingPreRA.cpp ------------------------------------===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2017-08-02 07:14:32 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
/// \file
|
2018-05-01 23:54:18 +08:00
|
|
|
/// This pass removes redundant S_OR_B64 instructions enabling lanes in
|
2017-08-02 07:14:32 +08:00
|
|
|
/// the exec. If two SI_END_CF (lowered as S_OR_B64) come together without any
|
|
|
|
/// vector instructions between them we can only keep outer SI_END_CF, given
|
|
|
|
/// that CFG is structured and exec bits of the outer end statement are always
|
|
|
|
/// not less than exec bit of the inner one.
|
|
|
|
///
|
|
|
|
/// This needs to be done before the RA to eliminate saved exec bits registers
|
|
|
|
/// but after register coalescer to have no vector registers copies in between
|
|
|
|
/// of different end cf statements.
|
|
|
|
///
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "AMDGPU.h"
|
|
|
|
#include "AMDGPUSubtarget.h"
|
|
|
|
#include "SIInstrInfo.h"
|
AMDGPU: Remove #include "MCTargetDesc/AMDGPUMCTargetDesc.h" from common headers
Summary:
MCTargetDesc/AMDGPUMCTargetDesc.h contains enums for all the instuction
and register defintions, which are huge so we only want to include
them where needed.
This will also make it easier if we want to split the R600 and GCN
definitions into separate tablegenerated files.
I was unable to remove AMDGPUMCTargetDesc.h from SIMachineFunctionInfo.h
because it uses some enums from the header to initialize default values
for the SIMachineFunction class, so I ended up having to remove includes of
SIMachineFunctionInfo.h from headers too.
Reviewers: arsenm, nhaehnle
Reviewed By: nhaehnle
Subscribers: MatzeB, kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D46272
llvm-svn: 332930
2018-05-22 10:03:23 +08:00
|
|
|
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
|
2017-12-13 10:51:04 +08:00
|
|
|
#include "llvm/CodeGen/LiveIntervals.h"
|
2017-08-02 07:14:32 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "si-optimize-exec-masking-pre-ra"
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
class SIOptimizeExecMaskingPreRA : public MachineFunctionPass {
|
2019-03-28 22:01:39 +08:00
|
|
|
private:
|
|
|
|
const SIRegisterInfo *TRI;
|
|
|
|
const SIInstrInfo *TII;
|
|
|
|
MachineRegisterInfo *MRI;
|
|
|
|
|
2017-08-02 07:14:32 +08:00
|
|
|
public:
|
2019-03-28 22:01:39 +08:00
|
|
|
MachineBasicBlock::iterator skipIgnoreExecInsts(
|
|
|
|
MachineBasicBlock::iterator I, MachineBasicBlock::iterator E) const;
|
|
|
|
|
|
|
|
MachineBasicBlock::iterator skipIgnoreExecInstsTrivialSucc(
|
|
|
|
MachineBasicBlock *&MBB,
|
|
|
|
MachineBasicBlock::iterator It) const;
|
2017-08-02 07:14:32 +08:00
|
|
|
|
|
|
|
public:
|
2019-03-28 22:01:39 +08:00
|
|
|
static char ID;
|
|
|
|
|
2017-08-02 07:14:32 +08:00
|
|
|
SIOptimizeExecMaskingPreRA() : MachineFunctionPass(ID) {
|
|
|
|
initializeSIOptimizeExecMaskingPreRAPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
|
|
|
bool runOnMachineFunction(MachineFunction &MF) override;
|
|
|
|
|
|
|
|
StringRef getPassName() const override {
|
|
|
|
return "SI optimize exec mask operations pre-RA";
|
|
|
|
}
|
|
|
|
|
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
|
|
AU.addRequired<LiveIntervals>();
|
|
|
|
AU.setPreservesAll();
|
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
} // End anonymous namespace.
|
|
|
|
|
|
|
|
INITIALIZE_PASS_BEGIN(SIOptimizeExecMaskingPreRA, DEBUG_TYPE,
|
|
|
|
"SI optimize exec mask operations pre-RA", false, false)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
|
|
|
|
INITIALIZE_PASS_END(SIOptimizeExecMaskingPreRA, DEBUG_TYPE,
|
|
|
|
"SI optimize exec mask operations pre-RA", false, false)
|
|
|
|
|
|
|
|
char SIOptimizeExecMaskingPreRA::ID = 0;
|
|
|
|
|
|
|
|
char &llvm::SIOptimizeExecMaskingPreRAID = SIOptimizeExecMaskingPreRA::ID;
|
|
|
|
|
|
|
|
FunctionPass *llvm::createSIOptimizeExecMaskingPreRAPass() {
|
|
|
|
return new SIOptimizeExecMaskingPreRA();
|
|
|
|
}
|
|
|
|
|
2019-06-17 01:13:09 +08:00
|
|
|
static bool isEndCF(const MachineInstr &MI, const SIRegisterInfo *TRI,
|
|
|
|
const GCNSubtarget &ST) {
|
|
|
|
if (ST.isWave32()) {
|
|
|
|
return MI.getOpcode() == AMDGPU::S_OR_B32 &&
|
|
|
|
MI.modifiesRegister(AMDGPU::EXEC_LO, TRI);
|
|
|
|
}
|
|
|
|
|
2019-04-27 08:51:18 +08:00
|
|
|
return MI.getOpcode() == AMDGPU::S_OR_B64 &&
|
2017-08-02 07:14:32 +08:00
|
|
|
MI.modifiesRegister(AMDGPU::EXEC, TRI);
|
|
|
|
}
|
|
|
|
|
2019-06-17 01:13:09 +08:00
|
|
|
static bool isFullExecCopy(const MachineInstr& MI, const GCNSubtarget& ST) {
|
|
|
|
unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
|
|
|
|
|
|
|
|
if (MI.isCopy() && MI.getOperand(1).getReg() == Exec) {
|
2019-04-22 22:54:39 +08:00
|
|
|
assert(MI.isFullCopy());
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
2017-08-02 07:14:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned getOrNonExecReg(const MachineInstr &MI,
|
2019-06-17 01:13:09 +08:00
|
|
|
const SIInstrInfo &TII,
|
|
|
|
const GCNSubtarget& ST) {
|
|
|
|
unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
|
2017-08-02 07:14:32 +08:00
|
|
|
auto Op = TII.getNamedOperand(MI, AMDGPU::OpName::src1);
|
2019-06-17 01:13:09 +08:00
|
|
|
if (Op->isReg() && Op->getReg() != Exec)
|
2017-08-02 07:14:32 +08:00
|
|
|
return Op->getReg();
|
|
|
|
Op = TII.getNamedOperand(MI, AMDGPU::OpName::src0);
|
2019-06-17 01:13:09 +08:00
|
|
|
if (Op->isReg() && Op->getReg() != Exec)
|
2017-08-02 07:14:32 +08:00
|
|
|
return Op->getReg();
|
|
|
|
return AMDGPU::NoRegister;
|
|
|
|
}
|
|
|
|
|
|
|
|
static MachineInstr* getOrExecSource(const MachineInstr &MI,
|
|
|
|
const SIInstrInfo &TII,
|
2019-06-17 01:13:09 +08:00
|
|
|
const MachineRegisterInfo &MRI,
|
|
|
|
const GCNSubtarget& ST) {
|
|
|
|
auto SavedExec = getOrNonExecReg(MI, TII, ST);
|
2017-08-02 07:14:32 +08:00
|
|
|
if (SavedExec == AMDGPU::NoRegister)
|
|
|
|
return nullptr;
|
|
|
|
auto SaveExecInst = MRI.getUniqueVRegDef(SavedExec);
|
2019-06-17 01:13:09 +08:00
|
|
|
if (!SaveExecInst || !isFullExecCopy(*SaveExecInst, ST))
|
2017-08-02 07:14:32 +08:00
|
|
|
return nullptr;
|
|
|
|
return SaveExecInst;
|
|
|
|
}
|
|
|
|
|
2019-03-28 22:01:39 +08:00
|
|
|
/// Skip over instructions that don't care about the exec mask.
|
|
|
|
MachineBasicBlock::iterator SIOptimizeExecMaskingPreRA::skipIgnoreExecInsts(
|
|
|
|
MachineBasicBlock::iterator I, MachineBasicBlock::iterator E) const {
|
|
|
|
for ( ; I != E; ++I) {
|
|
|
|
if (TII->mayReadEXEC(*MRI, *I))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return I;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Skip to the next instruction, ignoring debug instructions, and trivial block
|
|
|
|
// boundaries (blocks that have one (typically fallthrough) successor, and the
|
|
|
|
// successor has one predecessor.
|
|
|
|
MachineBasicBlock::iterator
|
|
|
|
SIOptimizeExecMaskingPreRA::skipIgnoreExecInstsTrivialSucc(
|
|
|
|
MachineBasicBlock *&MBB,
|
|
|
|
MachineBasicBlock::iterator It) const {
|
|
|
|
|
|
|
|
do {
|
|
|
|
It = skipIgnoreExecInsts(It, MBB->end());
|
|
|
|
if (It != MBB->end() || MBB->succ_size() != 1)
|
|
|
|
break;
|
|
|
|
|
|
|
|
// If there is one trivial successor, advance to the next block.
|
|
|
|
MachineBasicBlock *Succ = *MBB->succ_begin();
|
|
|
|
|
|
|
|
// TODO: Is this really necessary?
|
|
|
|
if (!MBB->isLayoutSuccessor(Succ))
|
|
|
|
break;
|
|
|
|
|
|
|
|
It = Succ->begin();
|
|
|
|
MBB = Succ;
|
|
|
|
} while (true);
|
|
|
|
|
|
|
|
return It;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-12-13 11:17:40 +08:00
|
|
|
// Optimize sequence
|
|
|
|
// %sel = V_CNDMASK_B32_e64 0, 1, %cc
|
|
|
|
// %cmp = V_CMP_NE_U32 1, %1
|
|
|
|
// $vcc = S_AND_B64 $exec, %cmp
|
|
|
|
// S_CBRANCH_VCC[N]Z
|
|
|
|
// =>
|
|
|
|
// $vcc = S_ANDN2_B64 $exec, %cc
|
|
|
|
// S_CBRANCH_VCC[N]Z
|
|
|
|
//
|
|
|
|
// It is the negation pattern inserted by DAGCombiner::visitBRCOND() in the
|
|
|
|
// rebuildSetCC(). We start with S_CBRANCH to avoid exhaustive search, but
|
|
|
|
// only 3 first instructions are really needed. S_AND_B64 with exec is a
|
|
|
|
// required part of the pattern since V_CNDMASK_B32 writes zeroes for inactive
|
|
|
|
// lanes.
|
|
|
|
//
|
|
|
|
// Returns %cc register on success.
|
|
|
|
static unsigned optimizeVcndVcmpPair(MachineBasicBlock &MBB,
|
|
|
|
const GCNSubtarget &ST,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
LiveIntervals *LIS) {
|
|
|
|
const SIRegisterInfo *TRI = ST.getRegisterInfo();
|
|
|
|
const SIInstrInfo *TII = ST.getInstrInfo();
|
2019-06-17 01:13:09 +08:00
|
|
|
bool Wave32 = ST.isWave32();
|
|
|
|
const unsigned AndOpc = Wave32 ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64;
|
|
|
|
const unsigned Andn2Opc = Wave32 ? AMDGPU::S_ANDN2_B32 : AMDGPU::S_ANDN2_B64;
|
|
|
|
const unsigned CondReg = Wave32 ? AMDGPU::VCC_LO : AMDGPU::VCC;
|
|
|
|
const unsigned ExecReg = Wave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
|
2018-12-13 11:17:40 +08:00
|
|
|
|
|
|
|
auto I = llvm::find_if(MBB.terminators(), [](const MachineInstr &MI) {
|
|
|
|
unsigned Opc = MI.getOpcode();
|
|
|
|
return Opc == AMDGPU::S_CBRANCH_VCCZ ||
|
|
|
|
Opc == AMDGPU::S_CBRANCH_VCCNZ; });
|
|
|
|
if (I == MBB.terminators().end())
|
|
|
|
return AMDGPU::NoRegister;
|
|
|
|
|
|
|
|
auto *And = TRI->findReachingDef(CondReg, AMDGPU::NoSubRegister,
|
|
|
|
*I, MRI, LIS);
|
|
|
|
if (!And || And->getOpcode() != AndOpc ||
|
|
|
|
!And->getOperand(1).isReg() || !And->getOperand(2).isReg())
|
|
|
|
return AMDGPU::NoRegister;
|
|
|
|
|
|
|
|
MachineOperand *AndCC = &And->getOperand(1);
|
|
|
|
unsigned CmpReg = AndCC->getReg();
|
|
|
|
unsigned CmpSubReg = AndCC->getSubReg();
|
|
|
|
if (CmpReg == ExecReg) {
|
|
|
|
AndCC = &And->getOperand(2);
|
|
|
|
CmpReg = AndCC->getReg();
|
|
|
|
CmpSubReg = AndCC->getSubReg();
|
|
|
|
} else if (And->getOperand(2).getReg() != ExecReg) {
|
|
|
|
return AMDGPU::NoRegister;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto *Cmp = TRI->findReachingDef(CmpReg, CmpSubReg, *And, MRI, LIS);
|
|
|
|
if (!Cmp || !(Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e32 ||
|
|
|
|
Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e64) ||
|
|
|
|
Cmp->getParent() != And->getParent())
|
|
|
|
return AMDGPU::NoRegister;
|
|
|
|
|
|
|
|
MachineOperand *Op1 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src0);
|
|
|
|
MachineOperand *Op2 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src1);
|
|
|
|
if (Op1->isImm() && Op2->isReg())
|
|
|
|
std::swap(Op1, Op2);
|
|
|
|
if (!Op1->isReg() || !Op2->isImm() || Op2->getImm() != 1)
|
|
|
|
return AMDGPU::NoRegister;
|
|
|
|
|
|
|
|
unsigned SelReg = Op1->getReg();
|
|
|
|
auto *Sel = TRI->findReachingDef(SelReg, Op1->getSubReg(), *Cmp, MRI, LIS);
|
|
|
|
if (!Sel || Sel->getOpcode() != AMDGPU::V_CNDMASK_B32_e64)
|
|
|
|
return AMDGPU::NoRegister;
|
|
|
|
|
2019-03-19 03:25:39 +08:00
|
|
|
if (TII->hasModifiersSet(*Sel, AMDGPU::OpName::src0_modifiers) ||
|
2019-04-15 18:36:24 +08:00
|
|
|
TII->hasModifiersSet(*Sel, AMDGPU::OpName::src1_modifiers))
|
2019-03-19 03:25:39 +08:00
|
|
|
return AMDGPU::NoRegister;
|
|
|
|
|
2018-12-13 11:17:40 +08:00
|
|
|
Op1 = TII->getNamedOperand(*Sel, AMDGPU::OpName::src0);
|
|
|
|
Op2 = TII->getNamedOperand(*Sel, AMDGPU::OpName::src1);
|
|
|
|
MachineOperand *CC = TII->getNamedOperand(*Sel, AMDGPU::OpName::src2);
|
|
|
|
if (!Op1->isImm() || !Op2->isImm() || !CC->isReg() ||
|
|
|
|
Op1->getImm() != 0 || Op2->getImm() != 1)
|
|
|
|
return AMDGPU::NoRegister;
|
|
|
|
|
|
|
|
LLVM_DEBUG(dbgs() << "Folding sequence:\n\t" << *Sel << '\t'
|
|
|
|
<< *Cmp << '\t' << *And);
|
|
|
|
|
|
|
|
unsigned CCReg = CC->getReg();
|
|
|
|
LIS->RemoveMachineInstrFromMaps(*And);
|
|
|
|
MachineInstr *Andn2 = BuildMI(MBB, *And, And->getDebugLoc(),
|
|
|
|
TII->get(Andn2Opc), And->getOperand(0).getReg())
|
|
|
|
.addReg(ExecReg)
|
2019-04-24 01:59:26 +08:00
|
|
|
.addReg(CCReg, 0, CC->getSubReg());
|
2018-12-13 11:17:40 +08:00
|
|
|
And->eraseFromParent();
|
|
|
|
LIS->InsertMachineInstrInMaps(*Andn2);
|
|
|
|
|
|
|
|
LLVM_DEBUG(dbgs() << "=>\n\t" << *Andn2 << '\n');
|
|
|
|
|
|
|
|
// Try to remove compare. Cmp value should not used in between of cmp
|
|
|
|
// and s_and_b64 if VCC or just unused if any other register.
|
|
|
|
if ((TargetRegisterInfo::isVirtualRegister(CmpReg) &&
|
|
|
|
MRI.use_nodbg_empty(CmpReg)) ||
|
|
|
|
(CmpReg == CondReg &&
|
|
|
|
std::none_of(std::next(Cmp->getIterator()), Andn2->getIterator(),
|
2018-12-13 13:52:11 +08:00
|
|
|
[&](const MachineInstr &MI) {
|
2018-12-13 11:17:40 +08:00
|
|
|
return MI.readsRegister(CondReg, TRI); }))) {
|
|
|
|
LLVM_DEBUG(dbgs() << "Erasing: " << *Cmp << '\n');
|
|
|
|
|
|
|
|
LIS->RemoveMachineInstrFromMaps(*Cmp);
|
|
|
|
Cmp->eraseFromParent();
|
|
|
|
|
|
|
|
// Try to remove v_cndmask_b32.
|
|
|
|
if (TargetRegisterInfo::isVirtualRegister(SelReg) &&
|
|
|
|
MRI.use_nodbg_empty(SelReg)) {
|
|
|
|
LLVM_DEBUG(dbgs() << "Erasing: " << *Sel << '\n');
|
|
|
|
|
|
|
|
LIS->RemoveMachineInstrFromMaps(*Sel);
|
|
|
|
Sel->eraseFromParent();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return CCReg;
|
|
|
|
}
|
|
|
|
|
2017-08-02 07:14:32 +08:00
|
|
|
bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) {
|
2017-12-16 06:22:58 +08:00
|
|
|
if (skipFunction(MF.getFunction()))
|
2017-08-02 07:14:32 +08:00
|
|
|
return false;
|
|
|
|
|
2018-07-12 04:59:01 +08:00
|
|
|
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
|
2019-03-28 22:01:39 +08:00
|
|
|
TRI = ST.getRegisterInfo();
|
|
|
|
TII = ST.getInstrInfo();
|
|
|
|
MRI = &MF.getRegInfo();
|
|
|
|
|
2017-08-02 07:14:32 +08:00
|
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
|
|
LiveIntervals *LIS = &getAnalysis<LiveIntervals>();
|
2017-08-16 12:43:49 +08:00
|
|
|
DenseSet<unsigned> RecalcRegs({AMDGPU::EXEC_LO, AMDGPU::EXEC_HI});
|
2019-06-17 01:13:09 +08:00
|
|
|
unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
|
2017-08-02 07:14:32 +08:00
|
|
|
bool Changed = false;
|
|
|
|
|
|
|
|
for (MachineBasicBlock &MBB : MF) {
|
2017-08-16 12:43:49 +08:00
|
|
|
|
2018-12-13 11:17:40 +08:00
|
|
|
if (unsigned Reg = optimizeVcndVcmpPair(MBB, ST, MRI, LIS)) {
|
|
|
|
RecalcRegs.insert(Reg);
|
|
|
|
RecalcRegs.insert(AMDGPU::VCC_LO);
|
|
|
|
RecalcRegs.insert(AMDGPU::VCC_HI);
|
|
|
|
RecalcRegs.insert(AMDGPU::SCC);
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
|
2017-08-16 12:43:49 +08:00
|
|
|
// Try to remove unneeded instructions before s_endpgm.
|
|
|
|
if (MBB.succ_empty()) {
|
2018-08-29 02:55:55 +08:00
|
|
|
if (MBB.empty())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Skip this if the endpgm has any implicit uses, otherwise we would need
|
|
|
|
// to be careful to update / remove them.
|
[AMDGPU] Add support for immediate operand for S_ENDPGM
Summary:
Add support for immediate operand in S_ENDPGM
Change-Id: I0c56a076a10980f719fb2a8f16407e9c301013f6
Reviewers: alexshap
Subscribers: qcolombet, arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, tpr, t-tye, eraman, arphaman, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59213
llvm-svn: 355902
2019-03-12 17:52:58 +08:00
|
|
|
// S_ENDPGM always has a single imm operand that is not used other than to
|
|
|
|
// end up in the encoding
|
2018-08-29 02:55:55 +08:00
|
|
|
MachineInstr &Term = MBB.back();
|
[AMDGPU] Add support for immediate operand for S_ENDPGM
Summary:
Add support for immediate operand in S_ENDPGM
Change-Id: I0c56a076a10980f719fb2a8f16407e9c301013f6
Reviewers: alexshap
Subscribers: qcolombet, arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, tpr, t-tye, eraman, arphaman, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59213
llvm-svn: 355902
2019-03-12 17:52:58 +08:00
|
|
|
if (Term.getOpcode() != AMDGPU::S_ENDPGM || Term.getNumOperands() != 1)
|
2017-08-16 12:43:49 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
SmallVector<MachineBasicBlock*, 4> Blocks({&MBB});
|
|
|
|
|
|
|
|
while (!Blocks.empty()) {
|
|
|
|
auto CurBB = Blocks.pop_back_val();
|
|
|
|
auto I = CurBB->rbegin(), E = CurBB->rend();
|
|
|
|
if (I != E) {
|
|
|
|
if (I->isUnconditionalBranch() || I->getOpcode() == AMDGPU::S_ENDPGM)
|
|
|
|
++I;
|
|
|
|
else if (I->isBranch())
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (I != E) {
|
2018-05-09 10:42:00 +08:00
|
|
|
if (I->isDebugInstr()) {
|
2017-12-06 02:23:17 +08:00
|
|
|
I = std::next(I);
|
2017-08-16 12:43:49 +08:00
|
|
|
continue;
|
2017-12-06 02:23:17 +08:00
|
|
|
}
|
|
|
|
|
2017-08-16 12:43:49 +08:00
|
|
|
if (I->mayStore() || I->isBarrier() || I->isCall() ||
|
|
|
|
I->hasUnmodeledSideEffects() || I->hasOrderedMemoryRef())
|
|
|
|
break;
|
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs()
|
|
|
|
<< "Removing no effect instruction: " << *I << '\n');
|
2017-08-16 12:43:49 +08:00
|
|
|
|
2017-09-09 02:51:26 +08:00
|
|
|
for (auto &Op : I->operands()) {
|
2017-08-16 12:43:49 +08:00
|
|
|
if (Op.isReg())
|
|
|
|
RecalcRegs.insert(Op.getReg());
|
2017-09-09 02:51:26 +08:00
|
|
|
}
|
2017-08-16 12:43:49 +08:00
|
|
|
|
|
|
|
auto Next = std::next(I);
|
|
|
|
LIS->RemoveMachineInstrFromMaps(*I);
|
|
|
|
I->eraseFromParent();
|
|
|
|
I = Next;
|
|
|
|
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (I != E)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Try to ascend predecessors.
|
|
|
|
for (auto *Pred : CurBB->predecessors()) {
|
|
|
|
if (Pred->succ_size() == 1)
|
|
|
|
Blocks.push_back(Pred);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try to collapse adjacent endifs.
|
2019-03-28 00:58:27 +08:00
|
|
|
auto E = MBB.end();
|
2019-04-27 08:51:18 +08:00
|
|
|
auto Lead = skipDebugInstructionsForward(MBB.begin(), E);
|
2019-06-17 01:13:09 +08:00
|
|
|
if (MBB.succ_size() != 1 || Lead == E || !isEndCF(*Lead, TRI, ST))
|
2017-08-02 07:14:32 +08:00
|
|
|
continue;
|
|
|
|
|
2019-03-28 22:01:39 +08:00
|
|
|
MachineBasicBlock *TmpMBB = &MBB;
|
|
|
|
auto NextLead = skipIgnoreExecInstsTrivialSucc(TmpMBB, std::next(Lead));
|
2019-06-17 01:13:09 +08:00
|
|
|
if (NextLead == TmpMBB->end() || !isEndCF(*NextLead, TRI, ST) ||
|
|
|
|
!getOrExecSource(*NextLead, *TII, MRI, ST))
|
2017-08-02 07:14:32 +08:00
|
|
|
continue;
|
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Redundant EXEC = S_OR_B64 found: " << *Lead << '\n');
|
2017-08-02 07:14:32 +08:00
|
|
|
|
2019-06-17 01:13:09 +08:00
|
|
|
auto SaveExec = getOrExecSource(*Lead, *TII, MRI, ST);
|
|
|
|
unsigned SaveExecReg = getOrNonExecReg(*Lead, *TII, ST);
|
2017-09-09 02:51:26 +08:00
|
|
|
for (auto &Op : Lead->operands()) {
|
|
|
|
if (Op.isReg())
|
|
|
|
RecalcRegs.insert(Op.getReg());
|
|
|
|
}
|
|
|
|
|
2017-08-02 07:14:32 +08:00
|
|
|
LIS->RemoveMachineInstrFromMaps(*Lead);
|
|
|
|
Lead->eraseFromParent();
|
|
|
|
if (SaveExecReg) {
|
|
|
|
LIS->removeInterval(SaveExecReg);
|
|
|
|
LIS->createAndComputeVirtRegInterval(SaveExecReg);
|
|
|
|
}
|
|
|
|
|
|
|
|
Changed = true;
|
2017-08-02 07:44:35 +08:00
|
|
|
|
|
|
|
// If the only use of saved exec in the removed instruction is S_AND_B64
|
|
|
|
// fold the copy now.
|
|
|
|
if (!SaveExec || !SaveExec->isFullCopy())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
unsigned SavedExec = SaveExec->getOperand(0).getReg();
|
|
|
|
bool SafeToReplace = true;
|
|
|
|
for (auto& U : MRI.use_nodbg_instructions(SavedExec)) {
|
|
|
|
if (U.getParent() != SaveExec->getParent()) {
|
|
|
|
SafeToReplace = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Redundant EXEC COPY: " << *SaveExec << '\n');
|
2017-08-02 07:44:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (SafeToReplace) {
|
|
|
|
LIS->RemoveMachineInstrFromMaps(*SaveExec);
|
|
|
|
SaveExec->eraseFromParent();
|
2019-06-17 01:13:09 +08:00
|
|
|
MRI.replaceRegWith(SavedExec, Exec);
|
2017-08-02 07:44:35 +08:00
|
|
|
LIS->removeInterval(SavedExec);
|
|
|
|
}
|
2017-08-02 07:14:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (Changed) {
|
2017-08-16 12:43:49 +08:00
|
|
|
for (auto Reg : RecalcRegs) {
|
|
|
|
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
|
|
|
|
LIS->removeInterval(Reg);
|
|
|
|
if (!MRI.reg_empty(Reg))
|
|
|
|
LIS->createAndComputeVirtRegInterval(Reg);
|
|
|
|
} else {
|
2019-02-23 03:03:36 +08:00
|
|
|
LIS->removeAllRegUnitsForPhysReg(Reg);
|
2017-08-16 12:43:49 +08:00
|
|
|
}
|
|
|
|
}
|
2017-08-02 07:14:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return Changed;
|
|
|
|
}
|