2017-01-21 08:53:49 +08:00
|
|
|
//===-- SIInsertSkips.cpp - Use predicates for control flow ---------------===//
|
2016-08-23 03:33:16 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2016-08-23 03:33:16 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
/// \file
|
2018-05-01 23:54:18 +08:00
|
|
|
/// This pass inserts branches on the 0 exec mask over divergent branches
|
2016-08-23 03:33:16 +08:00
|
|
|
/// branches when it's expected that jumping over the untaken control flow will
|
|
|
|
/// be cheaper than having every workitem no-op through it.
|
|
|
|
//
|
2017-01-21 08:53:49 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2016-08-23 03:33:16 +08:00
|
|
|
|
|
|
|
#include "AMDGPU.h"
|
|
|
|
#include "AMDGPUSubtarget.h"
|
|
|
|
#include "SIInstrInfo.h"
|
|
|
|
#include "SIMachineFunctionInfo.h"
|
AMDGPU: Remove #include "MCTargetDesc/AMDGPUMCTargetDesc.h" from common headers
Summary:
MCTargetDesc/AMDGPUMCTargetDesc.h contains enums for all the instuction
and register defintions, which are huge so we only want to include
them where needed.
This will also make it easier if we want to split the R600 and GCN
definitions into separate tablegenerated files.
I was unable to remove AMDGPUMCTargetDesc.h from SIMachineFunctionInfo.h
because it uses some enums from the header to initialize default values
for the SIMachineFunction class, so I ended up having to remove includes of
SIMachineFunctionInfo.h from headers too.
Reviewers: arsenm, nhaehnle
Reviewed By: nhaehnle
Subscribers: MatzeB, kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D46272
llvm-svn: 332930
2018-05-22 10:03:23 +08:00
|
|
|
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
|
2017-01-21 08:53:49 +08:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
|
|
|
#include "llvm/ADT/StringRef.h"
|
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
2016-08-23 03:33:16 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
2017-01-21 08:53:49 +08:00
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
2016-08-23 03:33:16 +08:00
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2017-01-21 08:53:49 +08:00
|
|
|
#include "llvm/CodeGen/MachineOperand.h"
|
|
|
|
#include "llvm/IR/CallingConv.h"
|
|
|
|
#include "llvm/IR/DebugLoc.h"
|
2016-08-23 03:33:16 +08:00
|
|
|
#include "llvm/MC/MCAsmInfo.h"
|
2017-01-21 08:53:49 +08:00
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Target/TargetMachine.h"
|
|
|
|
#include <cassert>
|
|
|
|
#include <cstdint>
|
|
|
|
#include <iterator>
|
2016-08-23 03:33:16 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "si-insert-skips"
|
|
|
|
|
|
|
|
static cl::opt<unsigned> SkipThresholdFlag(
|
|
|
|
"amdgpu-skip-threshold",
|
|
|
|
cl::desc("Number of instructions before jumping over divergent control flow"),
|
|
|
|
cl::init(12), cl::Hidden);
|
|
|
|
|
2017-01-21 08:53:49 +08:00
|
|
|
namespace {
|
|
|
|
|
2016-08-23 03:33:16 +08:00
|
|
|
class SIInsertSkips : public MachineFunctionPass {
|
|
|
|
private:
|
2017-01-21 08:53:49 +08:00
|
|
|
const SIRegisterInfo *TRI = nullptr;
|
|
|
|
const SIInstrInfo *TII = nullptr;
|
|
|
|
unsigned SkipThreshold = 0;
|
2016-08-23 03:33:16 +08:00
|
|
|
|
|
|
|
bool shouldSkip(const MachineBasicBlock &From,
|
|
|
|
const MachineBasicBlock &To) const;
|
|
|
|
|
|
|
|
bool skipIfDead(MachineInstr &MI, MachineBasicBlock &NextBB);
|
|
|
|
|
|
|
|
void kill(MachineInstr &MI);
|
|
|
|
|
|
|
|
MachineBasicBlock *insertSkipBlock(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator I) const;
|
|
|
|
|
|
|
|
bool skipMaskBranch(MachineInstr &MI, MachineBasicBlock &MBB);
|
|
|
|
|
2018-11-13 02:48:17 +08:00
|
|
|
bool optimizeVccBranch(MachineInstr &MI) const;
|
|
|
|
|
2016-08-23 03:33:16 +08:00
|
|
|
public:
|
|
|
|
static char ID;
|
|
|
|
|
2017-01-21 08:53:49 +08:00
|
|
|
SIInsertSkips() : MachineFunctionPass(ID) {}
|
2016-08-23 03:33:16 +08:00
|
|
|
|
|
|
|
bool runOnMachineFunction(MachineFunction &MF) override;
|
|
|
|
|
2016-10-01 10:56:57 +08:00
|
|
|
StringRef getPassName() const override {
|
2016-08-23 03:33:16 +08:00
|
|
|
return "SI insert s_cbranch_execz instructions";
|
|
|
|
}
|
|
|
|
|
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2017-01-21 08:53:49 +08:00
|
|
|
} // end anonymous namespace
|
2016-08-23 03:33:16 +08:00
|
|
|
|
|
|
|
char SIInsertSkips::ID = 0;
|
|
|
|
|
|
|
|
INITIALIZE_PASS(SIInsertSkips, DEBUG_TYPE,
|
|
|
|
"SI insert s_cbranch_execz instructions", false, false)
|
|
|
|
|
|
|
|
char &llvm::SIInsertSkipsPassID = SIInsertSkips::ID;
|
|
|
|
|
|
|
|
static bool opcodeEmitsNoInsts(unsigned Opc) {
|
|
|
|
switch (Opc) {
|
|
|
|
case TargetOpcode::IMPLICIT_DEF:
|
|
|
|
case TargetOpcode::KILL:
|
|
|
|
case TargetOpcode::BUNDLE:
|
|
|
|
case TargetOpcode::CFI_INSTRUCTION:
|
|
|
|
case TargetOpcode::EH_LABEL:
|
|
|
|
case TargetOpcode::GC_LABEL:
|
|
|
|
case TargetOpcode::DBG_VALUE:
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SIInsertSkips::shouldSkip(const MachineBasicBlock &From,
|
|
|
|
const MachineBasicBlock &To) const {
|
|
|
|
if (From.succ_empty())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned NumInstr = 0;
|
|
|
|
const MachineFunction *MF = From.getParent();
|
|
|
|
|
|
|
|
for (MachineFunction::const_iterator MBBI(&From), ToI(&To), End = MF->end();
|
|
|
|
MBBI != End && MBBI != ToI; ++MBBI) {
|
|
|
|
const MachineBasicBlock &MBB = *MBBI;
|
|
|
|
|
|
|
|
for (MachineBasicBlock::const_iterator I = MBB.begin(), E = MBB.end();
|
|
|
|
NumInstr < SkipThreshold && I != E; ++I) {
|
|
|
|
if (opcodeEmitsNoInsts(I->getOpcode()))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// FIXME: Since this is required for correctness, this should be inserted
|
|
|
|
// during SILowerControlFlow.
|
|
|
|
|
|
|
|
// When a uniform loop is inside non-uniform control flow, the branch
|
|
|
|
// leaving the loop might be an S_CBRANCH_VCCNZ, which is never taken
|
|
|
|
// when EXEC = 0. We should skip the loop lest it becomes infinite.
|
|
|
|
if (I->getOpcode() == AMDGPU::S_CBRANCH_VCCNZ ||
|
|
|
|
I->getOpcode() == AMDGPU::S_CBRANCH_VCCZ)
|
|
|
|
return true;
|
|
|
|
|
AMDGPU: Force skip over s_sendmsg and exp instructions
Summary:
These instructions interact with hardware blocks outside the shader core,
and they can have "scalar" side effects even when EXEC = 0. We don't
want these scalar side effects to occur when all lanes want to skip
these instructions, so always add the execz skip branch instruction
for basic blocks that contain them.
Also ensure that we skip scalar stores / atomics, though we don't
code-gen those yet.
Reviewers: arsenm, rampitec
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D48431
Change-Id: Ieaeb58352e2789ffd64745603c14970c60819d44
llvm-svn: 338235
2018-07-30 17:23:59 +08:00
|
|
|
if (TII->hasUnwantedEffectsWhenEXECEmpty(*I))
|
2017-10-04 02:55:36 +08:00
|
|
|
return true;
|
2016-08-23 03:33:16 +08:00
|
|
|
|
AMDGPU: Force skip over s_sendmsg and exp instructions
Summary:
These instructions interact with hardware blocks outside the shader core,
and they can have "scalar" side effects even when EXEC = 0. We don't
want these scalar side effects to occur when all lanes want to skip
these instructions, so always add the execz skip branch instruction
for basic blocks that contain them.
Also ensure that we skip scalar stores / atomics, though we don't
code-gen those yet.
Reviewers: arsenm, rampitec
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D48431
Change-Id: Ieaeb58352e2789ffd64745603c14970c60819d44
llvm-svn: 338235
2018-07-30 17:23:59 +08:00
|
|
|
++NumInstr;
|
2016-08-23 03:33:16 +08:00
|
|
|
if (NumInstr >= SkipThreshold)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SIInsertSkips::skipIfDead(MachineInstr &MI, MachineBasicBlock &NextBB) {
|
|
|
|
MachineBasicBlock &MBB = *MI.getParent();
|
|
|
|
MachineFunction *MF = MBB.getParent();
|
|
|
|
|
2017-12-16 06:22:58 +08:00
|
|
|
if (MF->getFunction().getCallingConv() != CallingConv::AMDGPU_PS ||
|
2016-08-23 03:33:16 +08:00
|
|
|
!shouldSkip(MBB, MBB.getParent()->back()))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MachineBasicBlock *SkipBB = insertSkipBlock(MBB, MI.getIterator());
|
|
|
|
|
|
|
|
const DebugLoc &DL = MI.getDebugLoc();
|
|
|
|
|
|
|
|
// If the exec mask is non-zero, skip the next two instructions
|
|
|
|
BuildMI(&MBB, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
|
|
|
|
.addMBB(&NextBB);
|
|
|
|
|
|
|
|
MachineBasicBlock::iterator Insert = SkipBB->begin();
|
|
|
|
|
|
|
|
// Exec mask is zero: Export to NULL target...
|
2016-12-06 04:23:10 +08:00
|
|
|
BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::EXP_DONE))
|
2016-08-23 03:33:16 +08:00
|
|
|
.addImm(0x09) // V_008DFC_SQ_EXP_NULL
|
|
|
|
.addReg(AMDGPU::VGPR0, RegState::Undef)
|
|
|
|
.addReg(AMDGPU::VGPR0, RegState::Undef)
|
|
|
|
.addReg(AMDGPU::VGPR0, RegState::Undef)
|
2016-12-06 04:23:10 +08:00
|
|
|
.addReg(AMDGPU::VGPR0, RegState::Undef)
|
|
|
|
.addImm(1) // vm
|
|
|
|
.addImm(0) // compr
|
|
|
|
.addImm(0); // en
|
2016-08-23 03:33:16 +08:00
|
|
|
|
|
|
|
// ... and terminate wavefront.
|
[AMDGPU] Add support for immediate operand for S_ENDPGM
Summary:
Add support for immediate operand in S_ENDPGM
Change-Id: I0c56a076a10980f719fb2a8f16407e9c301013f6
Reviewers: alexshap
Subscribers: qcolombet, arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, tpr, t-tye, eraman, arphaman, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59213
llvm-svn: 355902
2019-03-12 17:52:58 +08:00
|
|
|
BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM)).addImm(0);
|
2016-08-23 03:33:16 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void SIInsertSkips::kill(MachineInstr &MI) {
|
|
|
|
MachineBasicBlock &MBB = *MI.getParent();
|
|
|
|
DebugLoc DL = MI.getDebugLoc();
|
2017-10-24 18:27:13 +08:00
|
|
|
|
|
|
|
switch (MI.getOpcode()) {
|
|
|
|
case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: {
|
|
|
|
unsigned Opcode = 0;
|
|
|
|
|
|
|
|
// The opcodes are inverted because the inline immediate has to be
|
|
|
|
// the first operand, e.g. from "x < imm" to "imm > x"
|
|
|
|
switch (MI.getOperand(2).getImm()) {
|
|
|
|
case ISD::SETOEQ:
|
|
|
|
case ISD::SETEQ:
|
2018-01-30 07:19:10 +08:00
|
|
|
Opcode = AMDGPU::V_CMPX_EQ_F32_e64;
|
2017-10-24 18:27:13 +08:00
|
|
|
break;
|
|
|
|
case ISD::SETOGT:
|
|
|
|
case ISD::SETGT:
|
2018-01-30 07:19:10 +08:00
|
|
|
Opcode = AMDGPU::V_CMPX_LT_F32_e64;
|
2017-10-24 18:27:13 +08:00
|
|
|
break;
|
|
|
|
case ISD::SETOGE:
|
|
|
|
case ISD::SETGE:
|
2018-01-30 07:19:10 +08:00
|
|
|
Opcode = AMDGPU::V_CMPX_LE_F32_e64;
|
2017-10-24 18:27:13 +08:00
|
|
|
break;
|
|
|
|
case ISD::SETOLT:
|
|
|
|
case ISD::SETLT:
|
2018-01-30 07:19:10 +08:00
|
|
|
Opcode = AMDGPU::V_CMPX_GT_F32_e64;
|
2017-10-24 18:27:13 +08:00
|
|
|
break;
|
|
|
|
case ISD::SETOLE:
|
|
|
|
case ISD::SETLE:
|
2018-01-30 07:19:10 +08:00
|
|
|
Opcode = AMDGPU::V_CMPX_GE_F32_e64;
|
2017-10-24 18:27:13 +08:00
|
|
|
break;
|
|
|
|
case ISD::SETONE:
|
|
|
|
case ISD::SETNE:
|
2018-01-30 07:19:10 +08:00
|
|
|
Opcode = AMDGPU::V_CMPX_LG_F32_e64;
|
2017-10-24 18:27:13 +08:00
|
|
|
break;
|
|
|
|
case ISD::SETO:
|
2018-01-30 07:19:10 +08:00
|
|
|
Opcode = AMDGPU::V_CMPX_O_F32_e64;
|
2017-10-24 18:27:13 +08:00
|
|
|
break;
|
|
|
|
case ISD::SETUO:
|
2018-01-30 07:19:10 +08:00
|
|
|
Opcode = AMDGPU::V_CMPX_U_F32_e64;
|
2017-10-24 18:27:13 +08:00
|
|
|
break;
|
|
|
|
case ISD::SETUEQ:
|
2018-01-30 07:19:10 +08:00
|
|
|
Opcode = AMDGPU::V_CMPX_NLG_F32_e64;
|
2017-10-24 18:27:13 +08:00
|
|
|
break;
|
|
|
|
case ISD::SETUGT:
|
2018-01-30 07:19:10 +08:00
|
|
|
Opcode = AMDGPU::V_CMPX_NGE_F32_e64;
|
2017-10-24 18:27:13 +08:00
|
|
|
break;
|
|
|
|
case ISD::SETUGE:
|
2018-01-30 07:19:10 +08:00
|
|
|
Opcode = AMDGPU::V_CMPX_NGT_F32_e64;
|
2017-10-24 18:27:13 +08:00
|
|
|
break;
|
|
|
|
case ISD::SETULT:
|
2018-01-30 07:19:10 +08:00
|
|
|
Opcode = AMDGPU::V_CMPX_NLE_F32_e64;
|
2017-10-24 18:27:13 +08:00
|
|
|
break;
|
|
|
|
case ISD::SETULE:
|
2018-01-30 07:19:10 +08:00
|
|
|
Opcode = AMDGPU::V_CMPX_NLT_F32_e64;
|
2017-10-24 18:27:13 +08:00
|
|
|
break;
|
|
|
|
case ISD::SETUNE:
|
2018-01-30 07:19:10 +08:00
|
|
|
Opcode = AMDGPU::V_CMPX_NEQ_F32_e64;
|
2017-10-24 18:27:13 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
llvm_unreachable("invalid ISD:SET cond code");
|
|
|
|
}
|
|
|
|
|
2018-01-30 07:19:10 +08:00
|
|
|
assert(MI.getOperand(0).isReg());
|
|
|
|
|
|
|
|
if (TRI->isVGPR(MBB.getParent()->getRegInfo(),
|
|
|
|
MI.getOperand(0).getReg())) {
|
|
|
|
Opcode = AMDGPU::getVOPe32(Opcode);
|
2018-02-24 03:11:33 +08:00
|
|
|
BuildMI(MBB, &MI, DL, TII->get(Opcode))
|
2018-01-30 07:19:10 +08:00
|
|
|
.add(MI.getOperand(1))
|
|
|
|
.add(MI.getOperand(0));
|
|
|
|
} else {
|
2018-02-24 03:11:33 +08:00
|
|
|
BuildMI(MBB, &MI, DL, TII->get(Opcode))
|
2018-01-30 07:19:10 +08:00
|
|
|
.addReg(AMDGPU::VCC, RegState::Define)
|
|
|
|
.addImm(0) // src0 modifiers
|
|
|
|
.add(MI.getOperand(1))
|
|
|
|
.addImm(0) // src1 modifiers
|
|
|
|
.add(MI.getOperand(0))
|
|
|
|
.addImm(0); // omod
|
|
|
|
}
|
2017-10-24 18:27:13 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case AMDGPU::SI_KILL_I1_TERMINATOR: {
|
|
|
|
const MachineOperand &Op = MI.getOperand(0);
|
|
|
|
int64_t KillVal = MI.getOperand(1).getImm();
|
|
|
|
assert(KillVal == 0 || KillVal == -1);
|
|
|
|
|
|
|
|
// Kill all threads if Op0 is an immediate and equal to the Kill value.
|
|
|
|
if (Op.isImm()) {
|
|
|
|
int64_t Imm = Op.getImm();
|
|
|
|
assert(Imm == 0 || Imm == -1);
|
|
|
|
|
|
|
|
if (Imm == KillVal)
|
|
|
|
BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
|
|
|
|
.addImm(0);
|
|
|
|
break;
|
2016-08-23 03:33:16 +08:00
|
|
|
}
|
2017-10-24 18:27:13 +08:00
|
|
|
|
|
|
|
unsigned Opcode = KillVal ? AMDGPU::S_ANDN2_B64 : AMDGPU::S_AND_B64;
|
|
|
|
BuildMI(MBB, &MI, DL, TII->get(Opcode), AMDGPU::EXEC)
|
|
|
|
.addReg(AMDGPU::EXEC)
|
2017-01-13 17:58:52 +08:00
|
|
|
.add(Op);
|
2017-10-24 18:27:13 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
llvm_unreachable("invalid opcode, expected SI_KILL_*_TERMINATOR");
|
2016-08-23 03:33:16 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
MachineBasicBlock *SIInsertSkips::insertSkipBlock(
|
|
|
|
MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const {
|
|
|
|
MachineFunction *MF = MBB.getParent();
|
|
|
|
|
|
|
|
MachineBasicBlock *SkipBB = MF->CreateMachineBasicBlock();
|
|
|
|
MachineFunction::iterator MBBI(MBB);
|
|
|
|
++MBBI;
|
|
|
|
|
|
|
|
MF->insert(MBBI, SkipBB);
|
|
|
|
MBB.addSuccessor(SkipBB);
|
|
|
|
|
|
|
|
return SkipBB;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns true if a branch over the block was inserted.
|
|
|
|
bool SIInsertSkips::skipMaskBranch(MachineInstr &MI,
|
|
|
|
MachineBasicBlock &SrcMBB) {
|
|
|
|
MachineBasicBlock *DestBB = MI.getOperand(0).getMBB();
|
|
|
|
|
|
|
|
if (!shouldSkip(**SrcMBB.succ_begin(), *DestBB))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const DebugLoc &DL = MI.getDebugLoc();
|
|
|
|
MachineBasicBlock::iterator InsPt = std::next(MI.getIterator());
|
|
|
|
|
|
|
|
BuildMI(SrcMBB, InsPt, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
|
|
|
|
.addMBB(DestBB);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-11-13 02:48:17 +08:00
|
|
|
bool SIInsertSkips::optimizeVccBranch(MachineInstr &MI) const {
|
|
|
|
// Match:
|
|
|
|
// sreg = -1
|
|
|
|
// vcc = S_AND_B64 exec, sreg
|
|
|
|
// S_CBRANCH_VCC[N]Z
|
|
|
|
// =>
|
|
|
|
// S_CBRANCH_EXEC[N]Z
|
|
|
|
bool Changed = false;
|
|
|
|
MachineBasicBlock &MBB = *MI.getParent();
|
|
|
|
const unsigned CondReg = AMDGPU::VCC;
|
|
|
|
const unsigned ExecReg = AMDGPU::EXEC;
|
|
|
|
const unsigned And = AMDGPU::S_AND_B64;
|
|
|
|
|
|
|
|
MachineBasicBlock::reverse_iterator A = MI.getReverseIterator(),
|
|
|
|
E = MBB.rend();
|
|
|
|
bool ReadsCond = false;
|
|
|
|
unsigned Threshold = 5;
|
|
|
|
for (++A ; A != E ; ++A) {
|
|
|
|
if (!--Threshold)
|
|
|
|
return false;
|
|
|
|
if (A->modifiesRegister(ExecReg, TRI))
|
|
|
|
return false;
|
|
|
|
if (A->modifiesRegister(CondReg, TRI)) {
|
|
|
|
if (!A->definesRegister(CondReg, TRI) || A->getOpcode() != And)
|
|
|
|
return false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
ReadsCond |= A->readsRegister(CondReg, TRI);
|
|
|
|
}
|
|
|
|
if (A == E)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MachineOperand &Op1 = A->getOperand(1);
|
|
|
|
MachineOperand &Op2 = A->getOperand(2);
|
|
|
|
if (Op1.getReg() != ExecReg && Op2.isReg() && Op2.getReg() == ExecReg) {
|
|
|
|
TII->commuteInstruction(*A);
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
if (Op1.getReg() != ExecReg)
|
|
|
|
return Changed;
|
|
|
|
if (Op2.isImm() && Op2.getImm() != -1)
|
|
|
|
return Changed;
|
|
|
|
|
|
|
|
unsigned SReg = AMDGPU::NoRegister;
|
|
|
|
if (Op2.isReg()) {
|
|
|
|
SReg = Op2.getReg();
|
|
|
|
auto M = std::next(A);
|
|
|
|
bool ReadsSreg = false;
|
|
|
|
for ( ; M != E ; ++M) {
|
|
|
|
if (M->definesRegister(SReg, TRI))
|
|
|
|
break;
|
|
|
|
if (M->modifiesRegister(SReg, TRI))
|
|
|
|
return Changed;
|
|
|
|
ReadsSreg |= M->readsRegister(SReg, TRI);
|
|
|
|
}
|
|
|
|
if (M == E ||
|
|
|
|
!M->isMoveImmediate() ||
|
|
|
|
!M->getOperand(1).isImm() ||
|
|
|
|
M->getOperand(1).getImm() != -1)
|
|
|
|
return Changed;
|
|
|
|
// First if sreg is only used in and instruction fold the immediate
|
|
|
|
// into that and.
|
|
|
|
if (!ReadsSreg && Op2.isKill()) {
|
|
|
|
A->getOperand(2).ChangeToImmediate(-1);
|
|
|
|
M->eraseFromParent();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ReadsCond && A->registerDefIsDead(AMDGPU::SCC) &&
|
|
|
|
MI.killsRegister(CondReg, TRI))
|
|
|
|
A->eraseFromParent();
|
|
|
|
|
|
|
|
bool IsVCCZ = MI.getOpcode() == AMDGPU::S_CBRANCH_VCCZ;
|
|
|
|
if (SReg == ExecReg) {
|
|
|
|
if (IsVCCZ) {
|
|
|
|
MI.eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
MI.setDesc(TII->get(AMDGPU::S_BRANCH));
|
|
|
|
} else {
|
|
|
|
MI.setDesc(TII->get(IsVCCZ ? AMDGPU::S_CBRANCH_EXECZ
|
|
|
|
: AMDGPU::S_CBRANCH_EXECNZ));
|
|
|
|
}
|
|
|
|
|
|
|
|
MI.RemoveOperand(MI.findRegisterUseOperandIdx(CondReg, false /*Kill*/, TRI));
|
|
|
|
MI.addImplicitDefUseOperands(*MBB.getParent());
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-08-23 03:33:16 +08:00
|
|
|
bool SIInsertSkips::runOnMachineFunction(MachineFunction &MF) {
|
2018-07-12 04:59:01 +08:00
|
|
|
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
|
2016-08-23 03:33:16 +08:00
|
|
|
TII = ST.getInstrInfo();
|
|
|
|
TRI = &TII->getRegisterInfo();
|
|
|
|
SkipThreshold = SkipThresholdFlag;
|
|
|
|
|
|
|
|
bool HaveKill = false;
|
|
|
|
bool MadeChange = false;
|
|
|
|
|
|
|
|
// Track depth of exec mask, divergent branches.
|
|
|
|
SmallVector<MachineBasicBlock *, 16> ExecBranchStack;
|
|
|
|
|
|
|
|
MachineFunction::iterator NextBB;
|
|
|
|
|
|
|
|
MachineBasicBlock *EmptyMBBAtEnd = nullptr;
|
|
|
|
|
|
|
|
for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
|
|
|
|
BI != BE; BI = NextBB) {
|
|
|
|
NextBB = std::next(BI);
|
|
|
|
MachineBasicBlock &MBB = *BI;
|
2017-01-25 06:18:39 +08:00
|
|
|
bool HaveSkipBlock = false;
|
2016-08-23 03:33:16 +08:00
|
|
|
|
|
|
|
if (!ExecBranchStack.empty() && ExecBranchStack.back() == &MBB) {
|
|
|
|
// Reached convergence point for last divergent branch.
|
|
|
|
ExecBranchStack.pop_back();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (HaveKill && ExecBranchStack.empty()) {
|
|
|
|
HaveKill = false;
|
|
|
|
|
|
|
|
// TODO: Insert skip if exec is 0?
|
|
|
|
}
|
|
|
|
|
|
|
|
MachineBasicBlock::iterator I, Next;
|
|
|
|
for (I = MBB.begin(); I != MBB.end(); I = Next) {
|
|
|
|
Next = std::next(I);
|
|
|
|
|
|
|
|
MachineInstr &MI = *I;
|
|
|
|
|
|
|
|
switch (MI.getOpcode()) {
|
2017-01-21 08:53:49 +08:00
|
|
|
case AMDGPU::SI_MASK_BRANCH:
|
2016-08-23 03:33:16 +08:00
|
|
|
ExecBranchStack.push_back(MI.getOperand(0).getMBB());
|
|
|
|
MadeChange |= skipMaskBranch(MI, MBB);
|
|
|
|
break;
|
2017-01-21 08:53:49 +08:00
|
|
|
|
|
|
|
case AMDGPU::S_BRANCH:
|
2016-08-23 03:33:16 +08:00
|
|
|
// Optimize out branches to the next block.
|
|
|
|
// FIXME: Shouldn't this be handled by BranchFolding?
|
2017-01-25 06:18:39 +08:00
|
|
|
if (MBB.isLayoutSuccessor(MI.getOperand(0).getMBB())) {
|
2016-08-23 03:33:16 +08:00
|
|
|
MI.eraseFromParent();
|
2017-01-25 06:18:39 +08:00
|
|
|
} else if (HaveSkipBlock) {
|
|
|
|
// Remove the given unconditional branch when a skip block has been
|
|
|
|
// inserted after the current one and let skip the two instructions
|
|
|
|
// performing the kill if the exec mask is non-zero.
|
|
|
|
MI.eraseFromParent();
|
|
|
|
}
|
2016-08-23 03:33:16 +08:00
|
|
|
break;
|
2017-01-21 08:53:49 +08:00
|
|
|
|
2017-10-24 18:27:13 +08:00
|
|
|
case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
|
|
|
|
case AMDGPU::SI_KILL_I1_TERMINATOR:
|
2016-08-23 03:33:16 +08:00
|
|
|
MadeChange = true;
|
|
|
|
kill(MI);
|
|
|
|
|
|
|
|
if (ExecBranchStack.empty()) {
|
2018-11-16 13:03:02 +08:00
|
|
|
if (NextBB != BE && skipIfDead(MI, *NextBB)) {
|
2017-01-25 06:18:39 +08:00
|
|
|
HaveSkipBlock = true;
|
2016-08-23 03:33:16 +08:00
|
|
|
NextBB = std::next(BI);
|
|
|
|
BE = MF.end();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
HaveKill = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
MI.eraseFromParent();
|
|
|
|
break;
|
2017-01-21 08:53:49 +08:00
|
|
|
|
2017-03-22 06:18:10 +08:00
|
|
|
case AMDGPU::SI_RETURN_TO_EPILOG:
|
2016-08-23 03:33:16 +08:00
|
|
|
// FIXME: Should move somewhere else
|
|
|
|
assert(!MF.getInfo<SIMachineFunctionInfo>()->returnsVoid());
|
|
|
|
|
|
|
|
// Graphics shaders returning non-void shouldn't contain S_ENDPGM,
|
|
|
|
// because external bytecode will be appended at the end.
|
|
|
|
if (BI != --MF.end() || I != MBB.getFirstTerminator()) {
|
2017-03-22 06:18:10 +08:00
|
|
|
// SI_RETURN_TO_EPILOG is not the last instruction. Add an empty block at
|
2016-08-23 03:33:16 +08:00
|
|
|
// the end and jump there.
|
|
|
|
if (!EmptyMBBAtEnd) {
|
|
|
|
EmptyMBBAtEnd = MF.CreateMachineBasicBlock();
|
|
|
|
MF.insert(MF.end(), EmptyMBBAtEnd);
|
|
|
|
}
|
|
|
|
|
|
|
|
MBB.addSuccessor(EmptyMBBAtEnd);
|
|
|
|
BuildMI(*BI, I, MI.getDebugLoc(), TII->get(AMDGPU::S_BRANCH))
|
|
|
|
.addMBB(EmptyMBBAtEnd);
|
|
|
|
I->eraseFromParent();
|
|
|
|
}
|
2017-01-21 08:53:49 +08:00
|
|
|
break;
|
|
|
|
|
2018-11-13 02:48:17 +08:00
|
|
|
case AMDGPU::S_CBRANCH_VCCZ:
|
|
|
|
case AMDGPU::S_CBRANCH_VCCNZ:
|
|
|
|
MadeChange |= optimizeVccBranch(MI);
|
|
|
|
break;
|
|
|
|
|
2016-08-23 03:33:16 +08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return MadeChange;
|
|
|
|
}
|