2017-08-08 08:47:13 +08:00
|
|
|
//===- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies ---------===//
|
2013-08-07 07:08:28 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2013-08-07 07:08:28 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
/// \file
|
|
|
|
/// Copies from VGPR to SGPR registers are illegal and the register coalescer
|
|
|
|
/// will sometimes generate these illegal copies in situations like this:
|
|
|
|
///
|
|
|
|
/// Register Class <vsrc> is the union of <vgpr> and <sgpr>
|
|
|
|
///
|
|
|
|
/// BB0:
|
2017-11-30 20:12:19 +08:00
|
|
|
/// %0 <sgpr> = SCALAR_INST
|
|
|
|
/// %1 <vsrc> = COPY %0 <sgpr>
|
2013-08-07 07:08:28 +08:00
|
|
|
/// ...
|
|
|
|
/// BRANCH %cond BB1, BB2
|
|
|
|
/// BB1:
|
2017-11-30 20:12:19 +08:00
|
|
|
/// %2 <vgpr> = VECTOR_INST
|
|
|
|
/// %3 <vsrc> = COPY %2 <vgpr>
|
2013-08-07 07:08:28 +08:00
|
|
|
/// BB2:
|
2017-12-05 01:18:51 +08:00
|
|
|
/// %4 <vsrc> = PHI %1 <vsrc>, <%bb.0>, %3 <vrsc>, <%bb.1>
|
2017-11-30 20:12:19 +08:00
|
|
|
/// %5 <vgpr> = VECTOR_INST %4 <vsrc>
|
2013-11-14 12:05:22 +08:00
|
|
|
///
|
2013-08-07 07:08:28 +08:00
|
|
|
///
|
|
|
|
/// The coalescer will begin at BB0 and eliminate its copy, then the resulting
|
|
|
|
/// code will look like this:
|
|
|
|
///
|
|
|
|
/// BB0:
|
2017-11-30 20:12:19 +08:00
|
|
|
/// %0 <sgpr> = SCALAR_INST
|
2013-08-07 07:08:28 +08:00
|
|
|
/// ...
|
|
|
|
/// BRANCH %cond BB1, BB2
|
|
|
|
/// BB1:
|
2017-11-30 20:12:19 +08:00
|
|
|
/// %2 <vgpr> = VECTOR_INST
|
|
|
|
/// %3 <vsrc> = COPY %2 <vgpr>
|
2013-08-07 07:08:28 +08:00
|
|
|
/// BB2:
|
2017-12-05 01:18:51 +08:00
|
|
|
/// %4 <sgpr> = PHI %0 <sgpr>, <%bb.0>, %3 <vsrc>, <%bb.1>
|
2017-11-30 20:12:19 +08:00
|
|
|
/// %5 <vgpr> = VECTOR_INST %4 <sgpr>
|
2013-08-07 07:08:28 +08:00
|
|
|
///
|
|
|
|
/// Now that the result of the PHI instruction is an SGPR, the register
|
2017-11-30 20:12:19 +08:00
|
|
|
/// allocator is now forced to constrain the register class of %3 to
|
2013-08-07 07:08:28 +08:00
|
|
|
/// <sgpr> so we end up with final code like this:
|
2013-11-14 12:05:22 +08:00
|
|
|
///
|
2013-08-07 07:08:28 +08:00
|
|
|
/// BB0:
|
2017-11-30 20:12:19 +08:00
|
|
|
/// %0 <sgpr> = SCALAR_INST
|
2013-08-07 07:08:28 +08:00
|
|
|
/// ...
|
|
|
|
/// BRANCH %cond BB1, BB2
|
|
|
|
/// BB1:
|
2017-11-30 20:12:19 +08:00
|
|
|
/// %2 <vgpr> = VECTOR_INST
|
|
|
|
/// %3 <sgpr> = COPY %2 <vgpr>
|
2013-08-07 07:08:28 +08:00
|
|
|
/// BB2:
|
2017-12-05 01:18:51 +08:00
|
|
|
/// %4 <sgpr> = PHI %0 <sgpr>, <%bb.0>, %3 <sgpr>, <%bb.1>
|
2017-11-30 20:12:19 +08:00
|
|
|
/// %5 <vgpr> = VECTOR_INST %4 <sgpr>
|
2013-08-07 07:08:28 +08:00
|
|
|
///
|
2013-11-14 12:05:22 +08:00
|
|
|
/// Now this code contains an illegal copy from a VGPR to an SGPR.
|
2013-08-07 07:08:28 +08:00
|
|
|
///
|
|
|
|
/// In order to avoid this problem, this pass searches for PHI instructions
|
|
|
|
/// which define a <vsrc> register and constrains its definition class to
|
|
|
|
/// <vgpr> if the user of the PHI's definition register is a vector instruction.
|
|
|
|
/// If the PHI's definition class is constrained to <vgpr> then the coalescer
|
|
|
|
/// will be unable to perform the COPY removal from the above example which
|
|
|
|
/// ultimately led to the creation of an illegal COPY.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "AMDGPU.h"
|
2014-08-05 05:25:23 +08:00
|
|
|
#include "AMDGPUSubtarget.h"
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-14 05:15:01 +08:00
|
|
|
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
|
2013-08-07 07:08:28 +08:00
|
|
|
#include "SIInstrInfo.h"
|
2017-08-08 08:47:13 +08:00
|
|
|
#include "SIRegisterInfo.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/ADT/DenseSet.h"
|
2017-08-08 08:47:13 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
|
|
|
#include "llvm/ADT/SmallSet.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
2016-11-29 08:46:46 +08:00
|
|
|
#include "llvm/CodeGen/MachineDominators.h"
|
2017-08-08 08:47:13 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
2013-08-07 07:08:28 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
2017-08-08 08:47:13 +08:00
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
2013-11-14 07:36:37 +08:00
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2017-08-08 08:47:13 +08:00
|
|
|
#include "llvm/CodeGen/MachineOperand.h"
|
2013-08-07 07:08:28 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2017-11-17 09:07:10 +08:00
|
|
|
#include "llvm/CodeGen/TargetRegisterInfo.h"
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-14 05:15:01 +08:00
|
|
|
#include "llvm/InitializePasses.h"
|
2017-08-08 08:47:13 +08:00
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/CodeGen.h"
|
|
|
|
#include "llvm/Support/CommandLine.h"
|
2013-11-14 07:36:37 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2013-11-15 07:24:09 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2013-08-07 07:08:28 +08:00
|
|
|
#include "llvm/Target/TargetMachine.h"
|
2017-08-08 08:47:13 +08:00
|
|
|
#include <cassert>
|
|
|
|
#include <cstdint>
|
|
|
|
#include <iterator>
|
|
|
|
#include <list>
|
|
|
|
#include <map>
|
|
|
|
#include <tuple>
|
|
|
|
#include <utility>
|
2013-08-07 07:08:28 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2016-04-22 02:21:54 +08:00
|
|
|
#define DEBUG_TYPE "si-fix-sgpr-copies"
|
2014-04-22 10:41:26 +08:00
|
|
|
|
2017-04-25 03:37:54 +08:00
|
|
|
static cl::opt<bool> EnableM0Merge(
|
|
|
|
"amdgpu-enable-merge-m0",
|
|
|
|
cl::desc("Merge and hoist M0 initializations"),
|
[AMDGPU] Enable merging m0 initializations.
Summary:
Enable hoisting and merging m0 defs that are initialized with the same
immediate value. Fixes bug where removed instructions are not considered
to interfere with other inits, and make sure to not hoist inits before block
prologues.
Reviewers: rampitec, arsenm
Reviewed By: rampitec
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D64766
llvm-svn: 366135
2019-07-16 06:07:05 +08:00
|
|
|
cl::init(true));
|
2017-04-25 03:37:54 +08:00
|
|
|
|
2013-08-07 07:08:28 +08:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
class SIFixSGPRCopies : public MachineFunctionPass {
|
2016-11-29 08:46:46 +08:00
|
|
|
MachineDominatorTree *MDT;
|
2018-04-25 20:32:46 +08:00
|
|
|
|
2015-11-04 06:30:13 +08:00
|
|
|
public:
|
2013-08-07 07:08:28 +08:00
|
|
|
static char ID;
|
|
|
|
|
2019-10-14 20:01:10 +08:00
|
|
|
MachineRegisterInfo *MRI;
|
|
|
|
const SIRegisterInfo *TRI;
|
|
|
|
const SIInstrInfo *TII;
|
|
|
|
|
2017-08-08 08:47:13 +08:00
|
|
|
SIFixSGPRCopies() : MachineFunctionPass(ID) {}
|
2013-08-07 07:08:28 +08:00
|
|
|
|
2014-04-29 15:57:24 +08:00
|
|
|
bool runOnMachineFunction(MachineFunction &MF) override;
|
2013-08-07 07:08:28 +08:00
|
|
|
|
2019-10-14 20:01:10 +08:00
|
|
|
void processPHINode(MachineInstr &MI);
|
|
|
|
|
2016-10-01 10:56:57 +08:00
|
|
|
StringRef getPassName() const override { return "SI Fix SGPR copies"; }
|
2013-08-07 07:08:28 +08:00
|
|
|
|
2015-09-26 01:21:28 +08:00
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
2016-11-29 08:46:46 +08:00
|
|
|
AU.addRequired<MachineDominatorTree>();
|
|
|
|
AU.addPreserved<MachineDominatorTree>();
|
2015-09-26 01:21:28 +08:00
|
|
|
AU.setPreservesCFG();
|
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
}
|
2013-08-07 07:08:28 +08:00
|
|
|
};
|
|
|
|
|
2017-08-08 08:47:13 +08:00
|
|
|
} // end anonymous namespace
|
2013-08-07 07:08:28 +08:00
|
|
|
|
2016-11-29 08:46:46 +08:00
|
|
|
INITIALIZE_PASS_BEGIN(SIFixSGPRCopies, DEBUG_TYPE,
|
|
|
|
"SI Fix SGPR copies", false, false)
|
2017-04-25 03:37:54 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
|
2016-11-29 08:46:46 +08:00
|
|
|
INITIALIZE_PASS_END(SIFixSGPRCopies, DEBUG_TYPE,
|
|
|
|
"SI Fix SGPR copies", false, false)
|
|
|
|
|
2013-08-07 07:08:28 +08:00
|
|
|
char SIFixSGPRCopies::ID = 0;
|
|
|
|
|
2015-11-04 06:30:13 +08:00
|
|
|
char &llvm::SIFixSGPRCopiesID = SIFixSGPRCopies::ID;
|
|
|
|
|
|
|
|
FunctionPass *llvm::createSIFixSGPRCopiesPass() {
|
|
|
|
return new SIFixSGPRCopies();
|
2013-08-07 07:08:28 +08:00
|
|
|
}
|
|
|
|
|
2019-07-12 05:19:33 +08:00
|
|
|
static bool hasVectorOperands(const MachineInstr &MI,
|
|
|
|
const SIRegisterInfo *TRI) {
|
2013-11-14 07:36:37 +08:00
|
|
|
const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
|
|
|
|
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
|
|
|
|
if (!MI.getOperand(i).isReg() ||
|
2019-08-02 07:27:28 +08:00
|
|
|
!Register::isVirtualRegister(MI.getOperand(i).getReg()))
|
2013-11-14 07:36:37 +08:00
|
|
|
continue;
|
|
|
|
|
2019-07-12 05:19:33 +08:00
|
|
|
if (TRI->hasVectorRegisters(MRI.getRegClass(MI.getOperand(i).getReg())))
|
2013-11-14 07:36:37 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-11-03 07:15:42 +08:00
|
|
|
static std::pair<const TargetRegisterClass *, const TargetRegisterClass *>
|
|
|
|
getCopyRegClasses(const MachineInstr &Copy,
|
|
|
|
const SIRegisterInfo &TRI,
|
|
|
|
const MachineRegisterInfo &MRI) {
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register DstReg = Copy.getOperand(0).getReg();
|
|
|
|
Register SrcReg = Copy.getOperand(1).getReg();
|
2014-12-03 13:22:39 +08:00
|
|
|
|
2019-08-02 07:27:28 +08:00
|
|
|
const TargetRegisterClass *SrcRC = Register::isVirtualRegister(SrcReg)
|
|
|
|
? MRI.getRegClass(SrcReg)
|
|
|
|
: TRI.getPhysRegClass(SrcReg);
|
2015-10-13 08:07:54 +08:00
|
|
|
|
|
|
|
// We don't really care about the subregister here.
|
|
|
|
// SrcRC = TRI.getSubRegClass(SrcRC, Copy.getOperand(1).getSubReg());
|
2015-05-12 22:18:11 +08:00
|
|
|
|
2019-08-02 07:27:28 +08:00
|
|
|
const TargetRegisterClass *DstRC = Register::isVirtualRegister(DstReg)
|
|
|
|
? MRI.getRegClass(DstReg)
|
|
|
|
: TRI.getPhysRegClass(DstReg);
|
2015-10-13 08:07:54 +08:00
|
|
|
|
|
|
|
return std::make_pair(SrcRC, DstRC);
|
|
|
|
}
|
|
|
|
|
2015-11-03 07:15:42 +08:00
|
|
|
static bool isVGPRToSGPRCopy(const TargetRegisterClass *SrcRC,
|
|
|
|
const TargetRegisterClass *DstRC,
|
|
|
|
const SIRegisterInfo &TRI) {
|
AMDGPU: Rewrite SILowerI1Copies to always stay on SALU
Summary:
Instead of writing boolean values temporarily into 32-bit VGPRs
if they are involved in PHIs or are observed from outside a loop,
we use bitwise masking operations to combine lane masks in a way
that is consistent with wave control flow.
Move SIFixSGPRCopies to before this pass, since that pass
incorrectly attempts to move SGPR phis to VGPRs.
This should recover most of the code quality that was lost with
the bug fix in "AMDGPU: Remove PHI loop condition optimization".
There are still some relevant cases where code quality could be
improved, in particular:
- We often introduce redundant masks with EXEC. Ideally, we'd
have a generic computeKnownBits-like analysis to determine
whether masks are already masked by EXEC, so we can avoid this
masking both here and when lowering uniform control flow.
- The criterion we use to determine whether a def is observed
from outside a loop is conservative: it doesn't check whether
(loop) branch conditions are uniform.
Change-Id: Ibabdb373a7510e426b90deef00f5e16c5d56e64b
Reviewers: arsenm, rampitec, tpr
Subscribers: kzhuravl, jvesely, wdng, mgorny, yaxunl, dstuttard, t-tye, eraman, llvm-commits
Differential Revision: https://reviews.llvm.org/D53496
llvm-svn: 345719
2018-10-31 21:27:08 +08:00
|
|
|
return SrcRC != &AMDGPU::VReg_1RegClass && TRI.isSGPRClass(DstRC) &&
|
2019-07-12 05:19:33 +08:00
|
|
|
TRI.hasVectorRegisters(SrcRC);
|
2015-10-13 08:07:54 +08:00
|
|
|
}
|
2013-11-14 07:36:37 +08:00
|
|
|
|
2015-11-03 07:15:42 +08:00
|
|
|
static bool isSGPRToVGPRCopy(const TargetRegisterClass *SrcRC,
|
|
|
|
const TargetRegisterClass *DstRC,
|
|
|
|
const SIRegisterInfo &TRI) {
|
AMDGPU: Rewrite SILowerI1Copies to always stay on SALU
Summary:
Instead of writing boolean values temporarily into 32-bit VGPRs
if they are involved in PHIs or are observed from outside a loop,
we use bitwise masking operations to combine lane masks in a way
that is consistent with wave control flow.
Move SIFixSGPRCopies to before this pass, since that pass
incorrectly attempts to move SGPR phis to VGPRs.
This should recover most of the code quality that was lost with
the bug fix in "AMDGPU: Remove PHI loop condition optimization".
There are still some relevant cases where code quality could be
improved, in particular:
- We often introduce redundant masks with EXEC. Ideally, we'd
have a generic computeKnownBits-like analysis to determine
whether masks are already masked by EXEC, so we can avoid this
masking both here and when lowering uniform control flow.
- The criterion we use to determine whether a def is observed
from outside a loop is conservative: it doesn't check whether
(loop) branch conditions are uniform.
Change-Id: Ibabdb373a7510e426b90deef00f5e16c5d56e64b
Reviewers: arsenm, rampitec, tpr
Subscribers: kzhuravl, jvesely, wdng, mgorny, yaxunl, dstuttard, t-tye, eraman, llvm-commits
Differential Revision: https://reviews.llvm.org/D53496
llvm-svn: 345719
2018-10-31 21:27:08 +08:00
|
|
|
return DstRC != &AMDGPU::VReg_1RegClass && TRI.isSGPRClass(SrcRC) &&
|
2019-07-12 05:19:33 +08:00
|
|
|
TRI.hasVectorRegisters(DstRC);
|
2013-11-14 07:36:37 +08:00
|
|
|
}
|
|
|
|
|
2017-06-21 02:32:42 +08:00
|
|
|
static bool tryChangeVGPRtoSGPRinCopy(MachineInstr &MI,
|
|
|
|
const SIRegisterInfo *TRI,
|
|
|
|
const SIInstrInfo *TII) {
|
|
|
|
MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
|
|
|
|
auto &Src = MI.getOperand(1);
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register DstReg = MI.getOperand(0).getReg();
|
|
|
|
Register SrcReg = Src.getReg();
|
2019-08-02 07:27:28 +08:00
|
|
|
if (!Register::isVirtualRegister(SrcReg) ||
|
|
|
|
!Register::isVirtualRegister(DstReg))
|
2017-06-21 02:32:42 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
for (const auto &MO : MRI.reg_nodbg_operands(DstReg)) {
|
|
|
|
const auto *UseMI = MO.getParent();
|
|
|
|
if (UseMI == &MI)
|
|
|
|
continue;
|
|
|
|
if (MO.isDef() || UseMI->getParent() != MI.getParent() ||
|
|
|
|
UseMI->getOpcode() <= TargetOpcode::GENERIC_OP_END ||
|
|
|
|
!TII->isOperandLegal(*UseMI, UseMI->getOperandNo(&MO), &Src))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
// Change VGPR to SGPR destination.
|
|
|
|
MRI.setRegClass(DstReg, TRI->getEquivalentSGPRClass(MRI.getRegClass(DstReg)));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-11-03 07:15:42 +08:00
|
|
|
// Distribute an SGPR->VGPR copy of a REG_SEQUENCE into a VGPR REG_SEQUENCE.
|
|
|
|
//
|
|
|
|
// SGPRx = ...
|
|
|
|
// SGPRy = REG_SEQUENCE SGPRx, sub0 ...
|
|
|
|
// VGPRz = COPY SGPRy
|
|
|
|
//
|
|
|
|
// ==>
|
|
|
|
//
|
|
|
|
// VGPRx = COPY SGPRx
|
|
|
|
// VGPRz = REG_SEQUENCE VGPRx, sub0
|
|
|
|
//
|
|
|
|
// This exposes immediate folding opportunities when materializing 64-bit
|
|
|
|
// immediates.
|
|
|
|
static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI,
|
|
|
|
const SIRegisterInfo *TRI,
|
|
|
|
const SIInstrInfo *TII,
|
|
|
|
MachineRegisterInfo &MRI) {
|
|
|
|
assert(MI.isRegSequence());
|
|
|
|
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register DstReg = MI.getOperand(0).getReg();
|
2015-11-03 07:15:42 +08:00
|
|
|
if (!TRI->isSGPRClass(MRI.getRegClass(DstReg)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!MRI.hasOneUse(DstReg))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MachineInstr &CopyUse = *MRI.use_instr_begin(DstReg);
|
|
|
|
if (!CopyUse.isCopy())
|
|
|
|
return false;
|
|
|
|
|
2017-04-12 06:29:19 +08:00
|
|
|
// It is illegal to have vreg inputs to a physreg defining reg_sequence.
|
2019-08-02 07:27:28 +08:00
|
|
|
if (Register::isPhysicalRegister(CopyUse.getOperand(0).getReg()))
|
2017-04-12 06:29:19 +08:00
|
|
|
return false;
|
|
|
|
|
2015-11-03 07:15:42 +08:00
|
|
|
const TargetRegisterClass *SrcRC, *DstRC;
|
|
|
|
std::tie(SrcRC, DstRC) = getCopyRegClasses(CopyUse, *TRI, MRI);
|
|
|
|
|
|
|
|
if (!isSGPRToVGPRCopy(SrcRC, DstRC, *TRI))
|
|
|
|
return false;
|
|
|
|
|
2017-06-21 02:32:42 +08:00
|
|
|
if (tryChangeVGPRtoSGPRinCopy(CopyUse, TRI, TII))
|
|
|
|
return true;
|
|
|
|
|
2015-11-03 07:15:42 +08:00
|
|
|
// TODO: Could have multiple extracts?
|
|
|
|
unsigned SubReg = CopyUse.getOperand(1).getSubReg();
|
|
|
|
if (SubReg != AMDGPU::NoSubRegister)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MRI.setRegClass(DstReg, DstRC);
|
|
|
|
|
|
|
|
// SGPRx = ...
|
|
|
|
// SGPRy = REG_SEQUENCE SGPRx, sub0 ...
|
|
|
|
// VGPRz = COPY SGPRy
|
|
|
|
|
|
|
|
// =>
|
|
|
|
// VGPRx = COPY SGPRx
|
|
|
|
// VGPRz = REG_SEQUENCE VGPRx, sub0
|
|
|
|
|
|
|
|
MI.getOperand(0).setReg(CopyUse.getOperand(0).getReg());
|
2019-07-12 05:19:33 +08:00
|
|
|
bool IsAGPR = TRI->hasAGPRs(DstRC);
|
2015-11-03 07:15:42 +08:00
|
|
|
|
|
|
|
for (unsigned I = 1, N = MI.getNumOperands(); I != N; I += 2) {
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register SrcReg = MI.getOperand(I).getReg();
|
2016-01-08 01:10:29 +08:00
|
|
|
unsigned SrcSubReg = MI.getOperand(I).getSubReg();
|
2015-11-03 07:15:42 +08:00
|
|
|
|
|
|
|
const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
|
|
|
|
assert(TRI->isSGPRClass(SrcRC) &&
|
|
|
|
"Expected SGPR REG_SEQUENCE to only have SGPR inputs");
|
|
|
|
|
|
|
|
SrcRC = TRI->getSubRegClass(SrcRC, SrcSubReg);
|
|
|
|
const TargetRegisterClass *NewSrcRC = TRI->getEquivalentVGPRClass(SrcRC);
|
|
|
|
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register TmpReg = MRI.createVirtualRegister(NewSrcRC);
|
2015-11-03 07:15:42 +08:00
|
|
|
|
2017-01-13 17:58:52 +08:00
|
|
|
BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(AMDGPU::COPY),
|
|
|
|
TmpReg)
|
|
|
|
.add(MI.getOperand(I));
|
2015-11-03 07:15:42 +08:00
|
|
|
|
2019-07-12 05:19:33 +08:00
|
|
|
if (IsAGPR) {
|
|
|
|
const TargetRegisterClass *NewSrcRC = TRI->getEquivalentAGPRClass(SrcRC);
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register TmpAReg = MRI.createVirtualRegister(NewSrcRC);
|
2019-07-12 05:19:33 +08:00
|
|
|
unsigned Opc = NewSrcRC == &AMDGPU::AGPR_32RegClass ?
|
|
|
|
AMDGPU::V_ACCVGPR_WRITE_B32 : AMDGPU::COPY;
|
|
|
|
BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(Opc),
|
|
|
|
TmpAReg)
|
|
|
|
.addReg(TmpReg, RegState::Kill);
|
|
|
|
TmpReg = TmpAReg;
|
|
|
|
}
|
|
|
|
|
2015-11-03 07:15:42 +08:00
|
|
|
MI.getOperand(I).setReg(TmpReg);
|
|
|
|
}
|
|
|
|
|
|
|
|
CopyUse.eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-12-07 05:13:30 +08:00
|
|
|
static bool isSafeToFoldImmIntoCopy(const MachineInstr *Copy,
|
|
|
|
const MachineInstr *MoveImm,
|
|
|
|
const SIInstrInfo *TII,
|
|
|
|
unsigned &SMovOp,
|
|
|
|
int64_t &Imm) {
|
[AMDGPU] Add an llvm.amdgcn.wqm intrinsic for WQM
Summary:
Previously, we assumed that certain types of instructions needed WQM in
pixel shaders, particularly DS instructions and image sampling
instructions. This was ok because with OpenGL, the assumption was
correct. But we want to start using DPP instructions for derivatives as
well as other things, so the assumption that we can infer whether to use
WQM based on the instruction won't continue to hold. This intrinsic lets
frontends like Mesa indicate what things need WQM based on their
knowledge of the API, rather than second-guessing them in the backend.
We need to keep around the old method of enabling WQM, but eventually we
should remove it once Mesa catches up. For now, this will let us use DPP
instructions for computing derivatives correctly.
Reviewers: arsenm, tpr, nhaehnle
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, llvm-commits, t-tye
Differential Revision: https://reviews.llvm.org/D35167
llvm-svn: 310085
2017-08-05 02:36:49 +08:00
|
|
|
if (Copy->getOpcode() != AMDGPU::COPY)
|
|
|
|
return false;
|
|
|
|
|
2016-12-07 05:13:30 +08:00
|
|
|
if (!MoveImm->isMoveImmediate())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const MachineOperand *ImmOp =
|
|
|
|
TII->getNamedOperand(*MoveImm, AMDGPU::OpName::src0);
|
|
|
|
if (!ImmOp->isImm())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// FIXME: Handle copies with sub-regs.
|
|
|
|
if (Copy->getOperand(0).getSubReg())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
switch (MoveImm->getOpcode()) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case AMDGPU::V_MOV_B32_e32:
|
|
|
|
SMovOp = AMDGPU::S_MOV_B32;
|
|
|
|
break;
|
|
|
|
case AMDGPU::V_MOV_B64_PSEUDO:
|
|
|
|
SMovOp = AMDGPU::S_MOV_B64;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
Imm = ImmOp->getImm();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-04-25 03:37:54 +08:00
|
|
|
template <class UnaryPredicate>
|
|
|
|
bool searchPredecessors(const MachineBasicBlock *MBB,
|
|
|
|
const MachineBasicBlock *CutOff,
|
|
|
|
UnaryPredicate Predicate) {
|
|
|
|
if (MBB == CutOff)
|
|
|
|
return false;
|
|
|
|
|
2017-08-08 08:47:13 +08:00
|
|
|
DenseSet<const MachineBasicBlock *> Visited;
|
|
|
|
SmallVector<MachineBasicBlock *, 4> Worklist(MBB->pred_begin(),
|
|
|
|
MBB->pred_end());
|
2017-04-13 07:51:47 +08:00
|
|
|
|
|
|
|
while (!Worklist.empty()) {
|
2017-04-25 03:37:54 +08:00
|
|
|
MachineBasicBlock *MBB = Worklist.pop_back_val();
|
2017-04-13 07:51:47 +08:00
|
|
|
|
2017-04-25 03:37:54 +08:00
|
|
|
if (!Visited.insert(MBB).second)
|
2017-04-13 07:51:47 +08:00
|
|
|
continue;
|
2017-04-25 03:37:54 +08:00
|
|
|
if (MBB == CutOff)
|
|
|
|
continue;
|
|
|
|
if (Predicate(MBB))
|
2017-04-13 07:51:47 +08:00
|
|
|
return true;
|
|
|
|
|
2017-04-25 03:37:54 +08:00
|
|
|
Worklist.append(MBB->pred_begin(), MBB->pred_end());
|
2017-04-13 07:51:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-04-25 03:37:54 +08:00
|
|
|
// Checks if there is potential path From instruction To instruction.
|
|
|
|
// If CutOff is specified and it sits in between of that path we ignore
|
|
|
|
// a higher portion of the path and report it is not reachable.
|
|
|
|
static bool isReachable(const MachineInstr *From,
|
|
|
|
const MachineInstr *To,
|
|
|
|
const MachineBasicBlock *CutOff,
|
|
|
|
MachineDominatorTree &MDT) {
|
|
|
|
// If either From block dominates To block or instructions are in the same
|
|
|
|
// block and From is higher.
|
|
|
|
if (MDT.dominates(From, To))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
const MachineBasicBlock *MBBFrom = From->getParent();
|
|
|
|
const MachineBasicBlock *MBBTo = To->getParent();
|
|
|
|
if (MBBFrom == MBBTo)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Instructions are in different blocks, do predecessor search.
|
|
|
|
// We should almost never get here since we do not usually produce M0 stores
|
|
|
|
// other than -1.
|
|
|
|
return searchPredecessors(MBBTo, CutOff, [MBBFrom]
|
|
|
|
(const MachineBasicBlock *MBB) { return MBB == MBBFrom; });
|
|
|
|
}
|
|
|
|
|
[AMDGPU] Enable merging m0 initializations.
Summary:
Enable hoisting and merging m0 defs that are initialized with the same
immediate value. Fixes bug where removed instructions are not considered
to interfere with other inits, and make sure to not hoist inits before block
prologues.
Reviewers: rampitec, arsenm
Reviewed By: rampitec
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D64766
llvm-svn: 366135
2019-07-16 06:07:05 +08:00
|
|
|
// Return the first non-prologue instruction in the block.
|
|
|
|
static MachineBasicBlock::iterator
|
|
|
|
getFirstNonPrologue(MachineBasicBlock *MBB, const TargetInstrInfo *TII) {
|
|
|
|
MachineBasicBlock::iterator I = MBB->getFirstNonPHI();
|
|
|
|
while (I != MBB->end() && TII->isBasicBlockPrologue(*I))
|
|
|
|
++I;
|
|
|
|
|
|
|
|
return I;
|
|
|
|
}
|
|
|
|
|
2017-04-25 03:37:54 +08:00
|
|
|
// Hoist and merge identical SGPR initializations into a common predecessor.
|
|
|
|
// This is intended to combine M0 initializations, but can work with any
|
|
|
|
// SGPR. A VGPR cannot be processed since we cannot guarantee vector
|
|
|
|
// executioon.
|
|
|
|
static bool hoistAndMergeSGPRInits(unsigned Reg,
|
|
|
|
const MachineRegisterInfo &MRI,
|
AMDGPU: Move m0 initializations earlier
Summary:
After hoisting and merging m0 initializations schedule them as early as
possible in the MBB. This helps the scheduler avoid hazards in some
cases.
Reviewers: rampitec, arsenm
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, arphaman, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67450
llvm-svn: 371671
2019-09-12 05:28:41 +08:00
|
|
|
const TargetRegisterInfo *TRI,
|
[AMDGPU] Enable merging m0 initializations.
Summary:
Enable hoisting and merging m0 defs that are initialized with the same
immediate value. Fixes bug where removed instructions are not considered
to interfere with other inits, and make sure to not hoist inits before block
prologues.
Reviewers: rampitec, arsenm
Reviewed By: rampitec
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D64766
llvm-svn: 366135
2019-07-16 06:07:05 +08:00
|
|
|
MachineDominatorTree &MDT,
|
|
|
|
const TargetInstrInfo *TII) {
|
2017-04-25 03:37:54 +08:00
|
|
|
// List of inits by immediate value.
|
2017-08-08 08:47:13 +08:00
|
|
|
using InitListMap = std::map<unsigned, std::list<MachineInstr *>>;
|
2017-04-25 03:37:54 +08:00
|
|
|
InitListMap Inits;
|
|
|
|
// List of clobbering instructions.
|
|
|
|
SmallVector<MachineInstr*, 8> Clobbers;
|
[AMDGPU] Enable merging m0 initializations.
Summary:
Enable hoisting and merging m0 defs that are initialized with the same
immediate value. Fixes bug where removed instructions are not considered
to interfere with other inits, and make sure to not hoist inits before block
prologues.
Reviewers: rampitec, arsenm
Reviewed By: rampitec
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D64766
llvm-svn: 366135
2019-07-16 06:07:05 +08:00
|
|
|
// List of instructions marked for deletion.
|
|
|
|
SmallSet<MachineInstr*, 8> MergedInstrs;
|
|
|
|
|
2017-04-25 03:37:54 +08:00
|
|
|
bool Changed = false;
|
|
|
|
|
|
|
|
for (auto &MI : MRI.def_instructions(Reg)) {
|
|
|
|
MachineOperand *Imm = nullptr;
|
AMDGPU: Move m0 initializations earlier
Summary:
After hoisting and merging m0 initializations schedule them as early as
possible in the MBB. This helps the scheduler avoid hazards in some
cases.
Reviewers: rampitec, arsenm
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, arphaman, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67450
llvm-svn: 371671
2019-09-12 05:28:41 +08:00
|
|
|
for (auto &MO : MI.operands()) {
|
2017-04-25 03:37:54 +08:00
|
|
|
if ((MO.isReg() && ((MO.isDef() && MO.getReg() != Reg) || !MO.isDef())) ||
|
|
|
|
(!MO.isImm() && !MO.isReg()) || (MO.isImm() && Imm)) {
|
|
|
|
Imm = nullptr;
|
|
|
|
break;
|
|
|
|
} else if (MO.isImm())
|
|
|
|
Imm = &MO;
|
|
|
|
}
|
|
|
|
if (Imm)
|
|
|
|
Inits[Imm->getImm()].push_front(&MI);
|
|
|
|
else
|
|
|
|
Clobbers.push_back(&MI);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto &Init : Inits) {
|
|
|
|
auto &Defs = Init.second;
|
|
|
|
|
|
|
|
for (auto I1 = Defs.begin(), E = Defs.end(); I1 != E; ) {
|
|
|
|
MachineInstr *MI1 = *I1;
|
|
|
|
|
|
|
|
for (auto I2 = std::next(I1); I2 != E; ) {
|
|
|
|
MachineInstr *MI2 = *I2;
|
|
|
|
|
|
|
|
// Check any possible interference
|
[AMDGPU] Enable merging m0 initializations.
Summary:
Enable hoisting and merging m0 defs that are initialized with the same
immediate value. Fixes bug where removed instructions are not considered
to interfere with other inits, and make sure to not hoist inits before block
prologues.
Reviewers: rampitec, arsenm
Reviewed By: rampitec
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D64766
llvm-svn: 366135
2019-07-16 06:07:05 +08:00
|
|
|
auto interferes = [&](MachineBasicBlock::iterator From,
|
|
|
|
MachineBasicBlock::iterator To) -> bool {
|
2017-04-25 03:37:54 +08:00
|
|
|
|
|
|
|
assert(MDT.dominates(&*To, &*From));
|
|
|
|
|
|
|
|
auto interferes = [&MDT, From, To](MachineInstr* &Clobber) -> bool {
|
|
|
|
const MachineBasicBlock *MBBFrom = From->getParent();
|
|
|
|
const MachineBasicBlock *MBBTo = To->getParent();
|
|
|
|
bool MayClobberFrom = isReachable(Clobber, &*From, MBBTo, MDT);
|
|
|
|
bool MayClobberTo = isReachable(Clobber, &*To, MBBTo, MDT);
|
|
|
|
if (!MayClobberFrom && !MayClobberTo)
|
|
|
|
return false;
|
|
|
|
if ((MayClobberFrom && !MayClobberTo) ||
|
|
|
|
(!MayClobberFrom && MayClobberTo))
|
|
|
|
return true;
|
|
|
|
// Both can clobber, this is not an interference only if both are
|
|
|
|
// dominated by Clobber and belong to the same block or if Clobber
|
|
|
|
// properly dominates To, given that To >> From, so it dominates
|
|
|
|
// both and located in a common dominator.
|
|
|
|
return !((MBBFrom == MBBTo &&
|
|
|
|
MDT.dominates(Clobber, &*From) &&
|
|
|
|
MDT.dominates(Clobber, &*To)) ||
|
|
|
|
MDT.properlyDominates(Clobber->getParent(), MBBTo));
|
|
|
|
};
|
|
|
|
|
2017-08-08 08:47:13 +08:00
|
|
|
return (llvm::any_of(Clobbers, interferes)) ||
|
|
|
|
(llvm::any_of(Inits, [&](InitListMap::value_type &C) {
|
|
|
|
return C.first != Init.first &&
|
|
|
|
llvm::any_of(C.second, interferes);
|
2017-04-25 03:37:54 +08:00
|
|
|
}));
|
|
|
|
};
|
|
|
|
|
|
|
|
if (MDT.dominates(MI1, MI2)) {
|
[AMDGPU] Enable merging m0 initializations.
Summary:
Enable hoisting and merging m0 defs that are initialized with the same
immediate value. Fixes bug where removed instructions are not considered
to interfere with other inits, and make sure to not hoist inits before block
prologues.
Reviewers: rampitec, arsenm
Reviewed By: rampitec
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D64766
llvm-svn: 366135
2019-07-16 06:07:05 +08:00
|
|
|
if (!interferes(MI2, MI1)) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs()
|
|
|
|
<< "Erasing from "
|
|
|
|
<< printMBBReference(*MI2->getParent()) << " " << *MI2);
|
[AMDGPU] Enable merging m0 initializations.
Summary:
Enable hoisting and merging m0 defs that are initialized with the same
immediate value. Fixes bug where removed instructions are not considered
to interfere with other inits, and make sure to not hoist inits before block
prologues.
Reviewers: rampitec, arsenm
Reviewed By: rampitec
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D64766
llvm-svn: 366135
2019-07-16 06:07:05 +08:00
|
|
|
MergedInstrs.insert(MI2);
|
2017-04-25 03:37:54 +08:00
|
|
|
Changed = true;
|
[AMDGPU] Enable merging m0 initializations.
Summary:
Enable hoisting and merging m0 defs that are initialized with the same
immediate value. Fixes bug where removed instructions are not considered
to interfere with other inits, and make sure to not hoist inits before block
prologues.
Reviewers: rampitec, arsenm
Reviewed By: rampitec
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D64766
llvm-svn: 366135
2019-07-16 06:07:05 +08:00
|
|
|
++I2;
|
2017-04-25 03:37:54 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
} else if (MDT.dominates(MI2, MI1)) {
|
[AMDGPU] Enable merging m0 initializations.
Summary:
Enable hoisting and merging m0 defs that are initialized with the same
immediate value. Fixes bug where removed instructions are not considered
to interfere with other inits, and make sure to not hoist inits before block
prologues.
Reviewers: rampitec, arsenm
Reviewed By: rampitec
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D64766
llvm-svn: 366135
2019-07-16 06:07:05 +08:00
|
|
|
if (!interferes(MI1, MI2)) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs()
|
|
|
|
<< "Erasing from "
|
|
|
|
<< printMBBReference(*MI1->getParent()) << " " << *MI1);
|
[AMDGPU] Enable merging m0 initializations.
Summary:
Enable hoisting and merging m0 defs that are initialized with the same
immediate value. Fixes bug where removed instructions are not considered
to interfere with other inits, and make sure to not hoist inits before block
prologues.
Reviewers: rampitec, arsenm
Reviewed By: rampitec
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D64766
llvm-svn: 366135
2019-07-16 06:07:05 +08:00
|
|
|
MergedInstrs.insert(MI1);
|
2017-04-25 03:37:54 +08:00
|
|
|
Changed = true;
|
[AMDGPU] Enable merging m0 initializations.
Summary:
Enable hoisting and merging m0 defs that are initialized with the same
immediate value. Fixes bug where removed instructions are not considered
to interfere with other inits, and make sure to not hoist inits before block
prologues.
Reviewers: rampitec, arsenm
Reviewed By: rampitec
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D64766
llvm-svn: 366135
2019-07-16 06:07:05 +08:00
|
|
|
++I1;
|
2017-04-25 03:37:54 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
auto *MBB = MDT.findNearestCommonDominator(MI1->getParent(),
|
|
|
|
MI2->getParent());
|
|
|
|
if (!MBB) {
|
|
|
|
++I2;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
[AMDGPU] Enable merging m0 initializations.
Summary:
Enable hoisting and merging m0 defs that are initialized with the same
immediate value. Fixes bug where removed instructions are not considered
to interfere with other inits, and make sure to not hoist inits before block
prologues.
Reviewers: rampitec, arsenm
Reviewed By: rampitec
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D64766
llvm-svn: 366135
2019-07-16 06:07:05 +08:00
|
|
|
MachineBasicBlock::iterator I = getFirstNonPrologue(MBB, TII);
|
|
|
|
if (!interferes(MI1, I) && !interferes(MI2, I)) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs()
|
|
|
|
<< "Erasing from "
|
|
|
|
<< printMBBReference(*MI1->getParent()) << " " << *MI1
|
|
|
|
<< "and moving from "
|
|
|
|
<< printMBBReference(*MI2->getParent()) << " to "
|
|
|
|
<< printMBBReference(*I->getParent()) << " " << *MI2);
|
2017-04-25 03:37:54 +08:00
|
|
|
I->getParent()->splice(I, MI2->getParent(), MI2);
|
[AMDGPU] Enable merging m0 initializations.
Summary:
Enable hoisting and merging m0 defs that are initialized with the same
immediate value. Fixes bug where removed instructions are not considered
to interfere with other inits, and make sure to not hoist inits before block
prologues.
Reviewers: rampitec, arsenm
Reviewed By: rampitec
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D64766
llvm-svn: 366135
2019-07-16 06:07:05 +08:00
|
|
|
MergedInstrs.insert(MI1);
|
2017-04-25 03:37:54 +08:00
|
|
|
Changed = true;
|
[AMDGPU] Enable merging m0 initializations.
Summary:
Enable hoisting and merging m0 defs that are initialized with the same
immediate value. Fixes bug where removed instructions are not considered
to interfere with other inits, and make sure to not hoist inits before block
prologues.
Reviewers: rampitec, arsenm
Reviewed By: rampitec
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D64766
llvm-svn: 366135
2019-07-16 06:07:05 +08:00
|
|
|
++I1;
|
2017-04-25 03:37:54 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
++I2;
|
|
|
|
}
|
|
|
|
++I1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
AMDGPU: Move m0 initializations earlier
Summary:
After hoisting and merging m0 initializations schedule them as early as
possible in the MBB. This helps the scheduler avoid hazards in some
cases.
Reviewers: rampitec, arsenm
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, arphaman, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67450
llvm-svn: 371671
2019-09-12 05:28:41 +08:00
|
|
|
// Remove initializations that were merged into another.
|
|
|
|
for (auto &Init : Inits) {
|
|
|
|
auto &Defs = Init.second;
|
2019-09-13 03:12:21 +08:00
|
|
|
auto I = Defs.begin();
|
|
|
|
while (I != Defs.end()) {
|
AMDGPU: Move m0 initializations earlier
Summary:
After hoisting and merging m0 initializations schedule them as early as
possible in the MBB. This helps the scheduler avoid hazards in some
cases.
Reviewers: rampitec, arsenm
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, arphaman, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67450
llvm-svn: 371671
2019-09-12 05:28:41 +08:00
|
|
|
if (MergedInstrs.count(*I)) {
|
|
|
|
(*I)->eraseFromParent();
|
|
|
|
I = Defs.erase(I);
|
2019-09-13 03:12:21 +08:00
|
|
|
} else
|
|
|
|
++I;
|
|
|
|
}
|
AMDGPU: Move m0 initializations earlier
Summary:
After hoisting and merging m0 initializations schedule them as early as
possible in the MBB. This helps the scheduler avoid hazards in some
cases.
Reviewers: rampitec, arsenm
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, arphaman, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67450
llvm-svn: 371671
2019-09-12 05:28:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Try to schedule SGPR initializations as early as possible in the MBB.
|
|
|
|
for (auto &Init : Inits) {
|
|
|
|
auto &Defs = Init.second;
|
|
|
|
for (auto MI : Defs) {
|
|
|
|
auto MBB = MI->getParent();
|
|
|
|
MachineInstr &BoundaryMI = *getFirstNonPrologue(MBB, TII);
|
|
|
|
MachineBasicBlock::reverse_iterator B(BoundaryMI);
|
2019-10-10 06:44:47 +08:00
|
|
|
// Check if B should actually be a boundary. If not set the previous
|
AMDGPU: Move m0 initializations earlier
Summary:
After hoisting and merging m0 initializations schedule them as early as
possible in the MBB. This helps the scheduler avoid hazards in some
cases.
Reviewers: rampitec, arsenm
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, arphaman, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67450
llvm-svn: 371671
2019-09-12 05:28:41 +08:00
|
|
|
// instruction as the boundary instead.
|
|
|
|
if (!TII->isBasicBlockPrologue(*B))
|
|
|
|
B++;
|
|
|
|
|
|
|
|
auto R = std::next(MI->getReverseIterator());
|
|
|
|
const unsigned Threshold = 50;
|
2019-10-10 06:44:47 +08:00
|
|
|
// Search until B or Threshold for a place to insert the initialization.
|
AMDGPU: Move m0 initializations earlier
Summary:
After hoisting and merging m0 initializations schedule them as early as
possible in the MBB. This helps the scheduler avoid hazards in some
cases.
Reviewers: rampitec, arsenm
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, arphaman, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67450
llvm-svn: 371671
2019-09-12 05:28:41 +08:00
|
|
|
for (unsigned I = 0; R != B && I < Threshold; ++R, ++I)
|
|
|
|
if (R->readsRegister(Reg, TRI) || R->definesRegister(Reg, TRI) ||
|
|
|
|
TII->isSchedulingBoundary(*R, MBB, *MBB->getParent()))
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Move to directly after R.
|
|
|
|
if (&*--R != MI)
|
|
|
|
MBB->splice(*R, MBB, MI);
|
|
|
|
}
|
|
|
|
}
|
[AMDGPU] Enable merging m0 initializations.
Summary:
Enable hoisting and merging m0 defs that are initialized with the same
immediate value. Fixes bug where removed instructions are not considered
to interfere with other inits, and make sure to not hoist inits before block
prologues.
Reviewers: rampitec, arsenm
Reviewed By: rampitec
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D64766
llvm-svn: 366135
2019-07-16 06:07:05 +08:00
|
|
|
|
2017-04-25 03:37:54 +08:00
|
|
|
if (Changed)
|
|
|
|
MRI.clearKillFlags(Reg);
|
|
|
|
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2013-08-07 07:08:28 +08:00
|
|
|
bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
|
2019-12-24 06:34:59 +08:00
|
|
|
// Only need to run this in SelectionDAG path.
|
|
|
|
if (MF.getProperties().hasProperty(
|
|
|
|
MachineFunctionProperties::Property::Selected))
|
|
|
|
return false;
|
|
|
|
|
2018-07-12 04:59:01 +08:00
|
|
|
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
|
2019-10-14 20:01:10 +08:00
|
|
|
MRI = &MF.getRegInfo();
|
|
|
|
TRI = ST.getRegisterInfo();
|
|
|
|
TII = ST.getInstrInfo();
|
2016-11-29 08:46:46 +08:00
|
|
|
MDT = &getAnalysis<MachineDominatorTree>();
|
2015-11-03 07:30:48 +08:00
|
|
|
|
|
|
|
SmallVector<MachineInstr *, 16> Worklist;
|
|
|
|
|
2013-08-07 07:08:28 +08:00
|
|
|
for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
|
|
|
|
BI != BE; ++BI) {
|
|
|
|
MachineBasicBlock &MBB = *BI;
|
|
|
|
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
|
2015-11-03 07:30:48 +08:00
|
|
|
I != E; ++I) {
|
2013-08-07 07:08:28 +08:00
|
|
|
MachineInstr &MI = *I;
|
2013-11-14 07:36:37 +08:00
|
|
|
|
|
|
|
switch (MI.getOpcode()) {
|
2015-09-22 00:27:22 +08:00
|
|
|
default:
|
|
|
|
continue;
|
[AMDGPU] Add an llvm.amdgcn.wqm intrinsic for WQM
Summary:
Previously, we assumed that certain types of instructions needed WQM in
pixel shaders, particularly DS instructions and image sampling
instructions. This was ok because with OpenGL, the assumption was
correct. But we want to start using DPP instructions for derivatives as
well as other things, so the assumption that we can infer whether to use
WQM based on the instruction won't continue to hold. This intrinsic lets
frontends like Mesa indicate what things need WQM based on their
knowledge of the API, rather than second-guessing them in the backend.
We need to keep around the old method of enabling WQM, but eventually we
should remove it once Mesa catches up. For now, this will let us use DPP
instructions for computing derivatives correctly.
Reviewers: arsenm, tpr, nhaehnle
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, llvm-commits, t-tye
Differential Revision: https://reviews.llvm.org/D35167
llvm-svn: 310085
2017-08-05 02:36:49 +08:00
|
|
|
case AMDGPU::COPY:
|
[AMDGPU] Add support for Whole Wavefront Mode
Summary:
Whole Wavefront Wode (WWM) is similar to WQM, except that all of the
lanes are always enabled, regardless of control flow. This is required
for implementing wavefront reductions in non-uniform control flow, where
we need to use the inactive lanes to propagate intermediate results, so
they need to be enabled. We need to propagate WWM to uses (unless
they're explicitly marked as exact) so that they also propagate
intermediate results correctly. We do the analysis and exec mask munging
during the WQM pass, since there are interactions with WQM for things
that require both WQM and WWM. For simplicity, WWM is entirely
block-local -- blocks are never WWM on entry or exit of a block, and WWM
is not propagated to the block level. This means that computations
involving WWM cannot involve control flow, but we only ever plan to use
WWM for a few limited purposes (none of which involve control flow)
anyways.
Shaders can ask for WWM using the @llvm.amdgcn.wwm intrinsic. There
isn't yet a way to turn WWM off -- that will be added in a future
change.
Finally, it turns out that turning on inactive lanes causes a number of
problems with register allocation. While the best long-term solution
seems like teaching LLVM's register allocator about predication, for now
we need to add some hacks to prevent ourselves from getting into trouble
due to constraints that aren't currently expressed in LLVM. For the gory
details, see the comments at the top of SIFixWWMLiveness.cpp.
Reviewers: arsenm, nhaehnle, tpr
Subscribers: kzhuravl, wdng, mgorny, yaxunl, dstuttard, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D35524
llvm-svn: 310087
2017-08-05 02:36:52 +08:00
|
|
|
case AMDGPU::WQM:
|
2019-07-26 17:54:12 +08:00
|
|
|
case AMDGPU::SOFT_WQM:
|
[AMDGPU] Add support for Whole Wavefront Mode
Summary:
Whole Wavefront Wode (WWM) is similar to WQM, except that all of the
lanes are always enabled, regardless of control flow. This is required
for implementing wavefront reductions in non-uniform control flow, where
we need to use the inactive lanes to propagate intermediate results, so
they need to be enabled. We need to propagate WWM to uses (unless
they're explicitly marked as exact) so that they also propagate
intermediate results correctly. We do the analysis and exec mask munging
during the WQM pass, since there are interactions with WQM for things
that require both WQM and WWM. For simplicity, WWM is entirely
block-local -- blocks are never WWM on entry or exit of a block, and WWM
is not propagated to the block level. This means that computations
involving WWM cannot involve control flow, but we only ever plan to use
WWM for a few limited purposes (none of which involve control flow)
anyways.
Shaders can ask for WWM using the @llvm.amdgcn.wwm intrinsic. There
isn't yet a way to turn WWM off -- that will be added in a future
change.
Finally, it turns out that turning on inactive lanes causes a number of
problems with register allocation. While the best long-term solution
seems like teaching LLVM's register allocator about predication, for now
we need to add some hacks to prevent ourselves from getting into trouble
due to constraints that aren't currently expressed in LLVM. For the gory
details, see the comments at the top of SIFixWWMLiveness.cpp.
Reviewers: arsenm, nhaehnle, tpr
Subscribers: kzhuravl, wdng, mgorny, yaxunl, dstuttard, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D35524
llvm-svn: 310087
2017-08-05 02:36:52 +08:00
|
|
|
case AMDGPU::WWM: {
|
2019-08-02 02:27:11 +08:00
|
|
|
Register DstReg = MI.getOperand(0).getReg();
|
2015-10-13 08:07:54 +08:00
|
|
|
|
|
|
|
const TargetRegisterClass *SrcRC, *DstRC;
|
2019-10-14 20:01:10 +08:00
|
|
|
std::tie(SrcRC, DstRC) = getCopyRegClasses(MI, *TRI, *MRI);
|
2019-08-02 02:27:11 +08:00
|
|
|
|
2019-08-02 07:27:28 +08:00
|
|
|
if (!Register::isVirtualRegister(DstReg)) {
|
2019-08-02 02:27:11 +08:00
|
|
|
// If the destination register is a physical register there isn't
|
|
|
|
// really much we can do to fix this.
|
|
|
|
// Some special instructions use M0 as an input. Some even only use
|
|
|
|
// the first lane. Insert a readfirstlane and hope for the best.
|
|
|
|
if (DstReg == AMDGPU::M0 && TRI->hasVectorRegisters(SrcRC)) {
|
|
|
|
Register TmpReg
|
2019-10-14 20:01:10 +08:00
|
|
|
= MRI->createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
|
2019-08-02 02:27:11 +08:00
|
|
|
|
|
|
|
BuildMI(MBB, MI, MI.getDebugLoc(),
|
|
|
|
TII->get(AMDGPU::V_READFIRSTLANE_B32), TmpReg)
|
|
|
|
.add(MI.getOperand(1));
|
|
|
|
MI.getOperand(1).setReg(TmpReg);
|
|
|
|
}
|
|
|
|
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2015-10-13 08:07:54 +08:00
|
|
|
if (isVGPRToSGPRCopy(SrcRC, DstRC, *TRI)) {
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register SrcReg = MI.getOperand(1).getReg();
|
2019-08-02 07:27:28 +08:00
|
|
|
if (!Register::isVirtualRegister(SrcReg)) {
|
2018-10-09 02:47:01 +08:00
|
|
|
TII->moveToVALU(MI, MDT);
|
2017-04-29 09:26:34 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-10-14 20:01:10 +08:00
|
|
|
MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
|
2016-12-07 05:13:30 +08:00
|
|
|
unsigned SMovOp;
|
|
|
|
int64_t Imm;
|
|
|
|
// If we are just copying an immediate, we can replace the copy with
|
|
|
|
// s_mov_b32.
|
|
|
|
if (isSafeToFoldImmIntoCopy(&MI, DefMI, TII, SMovOp, Imm)) {
|
|
|
|
MI.getOperand(1).ChangeToImmediate(Imm);
|
|
|
|
MI.addImplicitDefUseOperands(MF);
|
|
|
|
MI.setDesc(TII->get(SMovOp));
|
|
|
|
break;
|
|
|
|
}
|
2018-10-09 02:47:01 +08:00
|
|
|
TII->moveToVALU(MI, MDT);
|
2017-06-21 02:32:42 +08:00
|
|
|
} else if (isSGPRToVGPRCopy(SrcRC, DstRC, *TRI)) {
|
|
|
|
tryChangeVGPRtoSGPRinCopy(MI, TRI, TII);
|
2015-09-22 00:27:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
2013-11-14 07:36:37 +08:00
|
|
|
case AMDGPU::PHI: {
|
2019-10-14 20:01:10 +08:00
|
|
|
processPHINode(MI);
|
2013-11-14 07:36:37 +08:00
|
|
|
break;
|
|
|
|
}
|
2017-08-08 08:47:13 +08:00
|
|
|
case AMDGPU::REG_SEQUENCE:
|
2019-07-12 05:19:33 +08:00
|
|
|
if (TRI->hasVectorRegisters(TII->getOpRegClass(MI, 0)) ||
|
|
|
|
!hasVectorOperands(MI, TRI)) {
|
2019-10-14 20:01:10 +08:00
|
|
|
foldVGPRCopyIntoRegSequence(MI, TRI, TII, *MRI);
|
2013-11-14 07:36:37 +08:00
|
|
|
continue;
|
2015-11-03 07:15:42 +08:00
|
|
|
}
|
2013-11-14 07:36:37 +08:00
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Fixing REG_SEQUENCE: " << MI);
|
2013-11-14 07:36:37 +08:00
|
|
|
|
2018-10-09 02:47:01 +08:00
|
|
|
TII->moveToVALU(MI, MDT);
|
2013-11-14 07:36:37 +08:00
|
|
|
break;
|
2014-04-08 03:45:45 +08:00
|
|
|
case AMDGPU::INSERT_SUBREG: {
|
2014-05-15 22:41:55 +08:00
|
|
|
const TargetRegisterClass *DstRC, *Src0RC, *Src1RC;
|
2019-10-14 20:01:10 +08:00
|
|
|
DstRC = MRI->getRegClass(MI.getOperand(0).getReg());
|
|
|
|
Src0RC = MRI->getRegClass(MI.getOperand(1).getReg());
|
|
|
|
Src1RC = MRI->getRegClass(MI.getOperand(2).getReg());
|
2014-05-15 22:41:55 +08:00
|
|
|
if (TRI->isSGPRClass(DstRC) &&
|
2019-07-12 05:19:33 +08:00
|
|
|
(TRI->hasVectorRegisters(Src0RC) ||
|
|
|
|
TRI->hasVectorRegisters(Src1RC))) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << " Fixing INSERT_SUBREG: " << MI);
|
2018-10-09 02:47:01 +08:00
|
|
|
TII->moveToVALU(MI, MDT);
|
2014-05-15 22:41:55 +08:00
|
|
|
}
|
|
|
|
break;
|
2014-04-08 03:45:45 +08:00
|
|
|
}
|
[AMDGPU] Fix-up cases where writelane has 2 SGPR operands
Summary:
Even though writelane doesn't have the same constraints as other valu
instructions it still can't violate the >1 SGPR operand constraint
Due to later register propagation (e.g. fixing up vgpr operands via
readfirstlane) changing writelane to only have a single SGPR is tricky.
This implementation puts a new check after SIFixSGPRCopies that prevents
multiple SGPRs being used in any writelane instructions.
The algorithm used is to check for trivial copy prop of suitable constants into
one of the SGPR operands and perform that if possible. If this isn't possible
put an explicit copy of Src1 SGPR into M0 and use that instead (this is
allowable for writelane as the constraint is for SGPR read-port and not
constant-bus access).
Reviewers: rampitec, tpr, arsenm, nhaehnle
Reviewed By: rampitec, arsenm, nhaehnle
Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, mgorny, yaxunl, tpr, t-tye, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D51932
Change-Id: Ic7553fa57440f208d4dbc4794fc24345d7e0e9ea
llvm-svn: 375004
2019-10-16 22:37:39 +08:00
|
|
|
case AMDGPU::V_WRITELANE_B32: {
|
|
|
|
// Some architectures allow more than one constant bus access without
|
|
|
|
// SGPR restriction
|
|
|
|
if (ST.getConstantBusLimit(MI.getOpcode()) != 1)
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Writelane is special in that it can use SGPR and M0 (which would
|
|
|
|
// normally count as using the constant bus twice - but in this case it
|
|
|
|
// is allowed since the lane selector doesn't count as a use of the
|
|
|
|
// constant bus). However, it is still required to abide by the 1 SGPR
|
|
|
|
// rule. Apply a fix here as we might have multiple SGPRs after
|
|
|
|
// legalizing VGPRs to SGPRs
|
|
|
|
int Src0Idx =
|
|
|
|
AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
|
|
|
|
int Src1Idx =
|
|
|
|
AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src1);
|
|
|
|
MachineOperand &Src0 = MI.getOperand(Src0Idx);
|
|
|
|
MachineOperand &Src1 = MI.getOperand(Src1Idx);
|
|
|
|
|
|
|
|
// Check to see if the instruction violates the 1 SGPR rule
|
|
|
|
if ((Src0.isReg() && TRI->isSGPRReg(*MRI, Src0.getReg()) &&
|
|
|
|
Src0.getReg() != AMDGPU::M0) &&
|
|
|
|
(Src1.isReg() && TRI->isSGPRReg(*MRI, Src1.getReg()) &&
|
|
|
|
Src1.getReg() != AMDGPU::M0)) {
|
|
|
|
|
|
|
|
// Check for trivially easy constant prop into one of the operands
|
|
|
|
// If this is the case then perform the operation now to resolve SGPR
|
|
|
|
// issue. If we don't do that here we will always insert a mov to m0
|
|
|
|
// that can't be resolved in later operand folding pass
|
|
|
|
bool Resolved = false;
|
|
|
|
for (MachineOperand *MO : {&Src0, &Src1}) {
|
|
|
|
if (Register::isVirtualRegister(MO->getReg())) {
|
|
|
|
MachineInstr *DefMI = MRI->getVRegDef(MO->getReg());
|
|
|
|
if (DefMI && TII->isFoldableCopy(*DefMI)) {
|
|
|
|
const MachineOperand &Def = DefMI->getOperand(0);
|
|
|
|
if (Def.isReg() &&
|
|
|
|
MO->getReg() == Def.getReg() &&
|
|
|
|
MO->getSubReg() == Def.getSubReg()) {
|
|
|
|
const MachineOperand &Copied = DefMI->getOperand(1);
|
|
|
|
if (Copied.isImm() &&
|
|
|
|
TII->isInlineConstant(APInt(64, Copied.getImm(), true))) {
|
|
|
|
MO->ChangeToImmediate(Copied.getImm());
|
|
|
|
Resolved = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!Resolved) {
|
|
|
|
// Haven't managed to resolve by replacing an SGPR with an immediate
|
|
|
|
// Move src1 to be in M0
|
|
|
|
BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
|
|
|
|
TII->get(AMDGPU::COPY), AMDGPU::M0)
|
|
|
|
.add(Src1);
|
|
|
|
Src1.ChangeToRegister(AMDGPU::M0, false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2013-08-07 07:08:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-11-18 05:11:34 +08:00
|
|
|
|
2017-04-25 03:37:54 +08:00
|
|
|
if (MF.getTarget().getOptLevel() > CodeGenOpt::None && EnableM0Merge)
|
2019-10-14 20:01:10 +08:00
|
|
|
hoistAndMergeSGPRInits(AMDGPU::M0, *MRI, TRI, *MDT, TII);
|
2017-04-25 03:37:54 +08:00
|
|
|
|
2014-11-18 05:11:34 +08:00
|
|
|
return true;
|
2013-08-07 07:08:28 +08:00
|
|
|
}
|
2019-10-14 20:01:10 +08:00
|
|
|
|
|
|
|
void SIFixSGPRCopies::processPHINode(MachineInstr &MI) {
|
|
|
|
unsigned numVGPRUses = 0;
|
2019-10-19 06:48:45 +08:00
|
|
|
bool AllAGPRUses = true;
|
2019-10-14 20:01:10 +08:00
|
|
|
SetVector<const MachineInstr *> worklist;
|
AMDGPU: Fix infinite searches in SIFixSGPRCopies
Summary:
Two conditions could lead to infinite loops when processing PHI nodes in
SIFixSGPRCopies.
The first condition involves a REG_SEQUENCE that uses registers defined by both
a PHI and a COPY.
The second condition arises when a physical register is copied to a virtual
register which is then used in a PHI node. If the same virtual register is
copied to the same physical register, the result is an endless loop.
%0:sgpr_64 = COPY $sgpr0_sgpr1
%2 = PHI %0, %bb.0, %1, %bb.1
$sgpr0_sgpr1 = COPY %0
Reviewers: alex-t, rampitec, arsenm
Reviewed By: rampitec
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68970
llvm-svn: 374944
2019-10-16 03:59:45 +08:00
|
|
|
SmallSet<const MachineInstr *, 4> Visited;
|
2019-10-14 20:01:10 +08:00
|
|
|
worklist.insert(&MI);
|
AMDGPU: Fix infinite searches in SIFixSGPRCopies
Summary:
Two conditions could lead to infinite loops when processing PHI nodes in
SIFixSGPRCopies.
The first condition involves a REG_SEQUENCE that uses registers defined by both
a PHI and a COPY.
The second condition arises when a physical register is copied to a virtual
register which is then used in a PHI node. If the same virtual register is
copied to the same physical register, the result is an endless loop.
%0:sgpr_64 = COPY $sgpr0_sgpr1
%2 = PHI %0, %bb.0, %1, %bb.1
$sgpr0_sgpr1 = COPY %0
Reviewers: alex-t, rampitec, arsenm
Reviewed By: rampitec
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68970
llvm-svn: 374944
2019-10-16 03:59:45 +08:00
|
|
|
Visited.insert(&MI);
|
2019-10-14 20:01:10 +08:00
|
|
|
while (!worklist.empty()) {
|
|
|
|
const MachineInstr *Instr = worklist.pop_back_val();
|
|
|
|
unsigned Reg = Instr->getOperand(0).getReg();
|
|
|
|
for (const auto &Use : MRI->use_operands(Reg)) {
|
|
|
|
const MachineInstr *UseMI = Use.getParent();
|
2019-10-19 06:48:45 +08:00
|
|
|
AllAGPRUses &= (UseMI->isCopy() &&
|
|
|
|
TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg())) ||
|
|
|
|
TRI->isAGPR(*MRI, Use.getReg());
|
2019-10-14 20:01:10 +08:00
|
|
|
if (UseMI->isCopy() || UseMI->isRegSequence()) {
|
|
|
|
if (UseMI->isCopy() &&
|
|
|
|
UseMI->getOperand(0).getReg().isPhysical() &&
|
|
|
|
!TRI->isSGPRReg(*MRI, UseMI->getOperand(0).getReg())) {
|
|
|
|
numVGPRUses++;
|
|
|
|
}
|
AMDGPU: Fix infinite searches in SIFixSGPRCopies
Summary:
Two conditions could lead to infinite loops when processing PHI nodes in
SIFixSGPRCopies.
The first condition involves a REG_SEQUENCE that uses registers defined by both
a PHI and a COPY.
The second condition arises when a physical register is copied to a virtual
register which is then used in a PHI node. If the same virtual register is
copied to the same physical register, the result is an endless loop.
%0:sgpr_64 = COPY $sgpr0_sgpr1
%2 = PHI %0, %bb.0, %1, %bb.1
$sgpr0_sgpr1 = COPY %0
Reviewers: alex-t, rampitec, arsenm
Reviewed By: rampitec
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68970
llvm-svn: 374944
2019-10-16 03:59:45 +08:00
|
|
|
if (Visited.insert(UseMI).second)
|
|
|
|
worklist.insert(UseMI);
|
|
|
|
|
2019-10-14 20:01:10 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (UseMI->isPHI()) {
|
|
|
|
const TargetRegisterClass *UseRC = MRI->getRegClass(Use.getReg());
|
|
|
|
if (!TRI->isSGPRReg(*MRI, Use.getReg()) &&
|
|
|
|
UseRC != &AMDGPU::VReg_1RegClass)
|
|
|
|
numVGPRUses++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
const TargetRegisterClass *OpRC =
|
|
|
|
TII->getOpRegClass(*UseMI, UseMI->getOperandNo(&Use));
|
|
|
|
if (!TRI->isSGPRClass(OpRC) && OpRC != &AMDGPU::VS_32RegClass &&
|
|
|
|
OpRC != &AMDGPU::VS_64RegClass) {
|
|
|
|
numVGPRUses++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-10-19 06:48:45 +08:00
|
|
|
|
|
|
|
Register PHIRes = MI.getOperand(0).getReg();
|
|
|
|
const TargetRegisterClass *RC0 = MRI->getRegClass(PHIRes);
|
|
|
|
if (AllAGPRUses && numVGPRUses && !TRI->hasAGPRs(RC0)) {
|
|
|
|
LLVM_DEBUG(dbgs() << "Moving PHI to AGPR: " << MI);
|
|
|
|
MRI->setRegClass(PHIRes, TRI->getEquivalentAGPRClass(RC0));
|
|
|
|
}
|
|
|
|
|
2019-10-14 20:01:10 +08:00
|
|
|
bool hasVGPRInput = false;
|
|
|
|
for (unsigned i = 1; i < MI.getNumOperands(); i += 2) {
|
|
|
|
unsigned InputReg = MI.getOperand(i).getReg();
|
|
|
|
MachineInstr *Def = MRI->getVRegDef(InputReg);
|
2019-10-19 06:48:45 +08:00
|
|
|
if (TRI->isVectorRegister(*MRI, InputReg)) {
|
2019-10-14 20:01:10 +08:00
|
|
|
if (Def->isCopy()) {
|
|
|
|
unsigned SrcReg = Def->getOperand(1).getReg();
|
|
|
|
const TargetRegisterClass *RC =
|
|
|
|
TRI->getRegClassForReg(*MRI, SrcReg);
|
|
|
|
if (TRI->isSGPRClass(RC))
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
hasVGPRInput = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
else if (Def->isCopy() &&
|
2019-10-19 06:48:45 +08:00
|
|
|
TRI->isVectorRegister(*MRI, Def->getOperand(1).getReg())) {
|
2019-10-14 20:01:10 +08:00
|
|
|
hasVGPRInput = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-19 06:48:45 +08:00
|
|
|
if ((!TRI->isVectorRegister(*MRI, PHIRes) &&
|
|
|
|
RC0 != &AMDGPU::VReg_1RegClass) &&
|
2019-10-14 20:01:10 +08:00
|
|
|
(hasVGPRInput || numVGPRUses > 1)) {
|
|
|
|
LLVM_DEBUG(dbgs() << "Fixing PHI: " << MI);
|
|
|
|
TII->moveToVALU(MI);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
LLVM_DEBUG(dbgs() << "Legalizing PHI: " << MI);
|
|
|
|
TII->legalizeOperands(MI, MDT);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|