2019-07-04 07:32:29 +08:00
|
|
|
//===-- SILowerSGPRSPills.cpp ---------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Handle SGPR spills. This pass takes the place of PrologEpilogInserter for all
|
|
|
|
// SGPR spills, so must insert CSR SGPR spills as well as expand them.
|
|
|
|
//
|
|
|
|
// This pass must never create new SGPR virtual registers.
|
|
|
|
//
|
|
|
|
// FIXME: Must stop RegScavenger spills in later passes.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "AMDGPU.h"
|
|
|
|
#include "AMDGPUSubtarget.h"
|
|
|
|
#include "SIInstrInfo.h"
|
|
|
|
#include "SIMachineFunctionInfo.h"
|
|
|
|
#include "llvm/CodeGen/LiveIntervals.h"
|
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
|
|
|
#include "llvm/CodeGen/MachineOperand.h"
|
|
|
|
#include "llvm/CodeGen/VirtRegMap.h"
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-14 05:15:01 +08:00
|
|
|
#include "llvm/InitializePasses.h"
|
2019-07-04 07:32:29 +08:00
|
|
|
#include "llvm/Target/TargetMachine.h"
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "si-lower-sgpr-spills"
|
|
|
|
|
|
|
|
using MBBVector = SmallVector<MachineBasicBlock *, 4>;
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2019-07-12 05:54:13 +08:00
|
|
|
static cl::opt<bool> EnableSpillVGPRToAGPR(
|
|
|
|
"amdgpu-spill-vgpr-to-agpr",
|
|
|
|
cl::desc("Enable spilling VGPRs to AGPRs"),
|
|
|
|
cl::ReallyHidden,
|
|
|
|
cl::init(true));
|
|
|
|
|
2019-07-04 07:32:29 +08:00
|
|
|
class SILowerSGPRSpills : public MachineFunctionPass {
|
|
|
|
private:
|
|
|
|
const SIRegisterInfo *TRI = nullptr;
|
|
|
|
const SIInstrInfo *TII = nullptr;
|
|
|
|
VirtRegMap *VRM = nullptr;
|
|
|
|
LiveIntervals *LIS = nullptr;
|
|
|
|
|
|
|
|
// Save and Restore blocks of the current function. Typically there is a
|
|
|
|
// single save block, unless Windows EH funclets are involved.
|
|
|
|
MBBVector SaveBlocks;
|
|
|
|
MBBVector RestoreBlocks;
|
|
|
|
|
|
|
|
public:
|
|
|
|
static char ID;
|
|
|
|
|
|
|
|
SILowerSGPRSpills() : MachineFunctionPass(ID) {}
|
|
|
|
|
|
|
|
void calculateSaveRestoreBlocks(MachineFunction &MF);
|
|
|
|
bool spillCalleeSavedRegs(MachineFunction &MF);
|
|
|
|
|
|
|
|
bool runOnMachineFunction(MachineFunction &MF) override;
|
|
|
|
|
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
|
|
AU.setPreservesAll();
|
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
|
|
|
char SILowerSGPRSpills::ID = 0;
|
|
|
|
|
|
|
|
INITIALIZE_PASS_BEGIN(SILowerSGPRSpills, DEBUG_TYPE,
|
|
|
|
"SI lower SGPR spill instructions", false, false)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(VirtRegMap)
|
|
|
|
INITIALIZE_PASS_END(SILowerSGPRSpills, DEBUG_TYPE,
|
|
|
|
"SI lower SGPR spill instructions", false, false)
|
|
|
|
|
|
|
|
char &llvm::SILowerSGPRSpillsID = SILowerSGPRSpills::ID;
|
|
|
|
|
|
|
|
/// Insert restore code for the callee-saved registers used in the function.
|
|
|
|
static void insertCSRSaves(MachineBasicBlock &SaveBlock,
|
|
|
|
ArrayRef<CalleeSavedInfo> CSI,
|
|
|
|
LiveIntervals *LIS) {
|
|
|
|
MachineFunction &MF = *SaveBlock.getParent();
|
|
|
|
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
|
|
|
|
const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
|
|
|
|
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
|
|
|
|
|
|
|
|
MachineBasicBlock::iterator I = SaveBlock.begin();
|
|
|
|
if (!TFI->spillCalleeSavedRegisters(SaveBlock, I, CSI, TRI)) {
|
|
|
|
for (const CalleeSavedInfo &CS : CSI) {
|
|
|
|
// Insert the spill to the stack frame.
|
|
|
|
unsigned Reg = CS.getReg();
|
|
|
|
|
2019-07-06 04:23:59 +08:00
|
|
|
MachineInstrSpan MIS(I, &SaveBlock);
|
2020-04-11 06:54:54 +08:00
|
|
|
const TargetRegisterClass *RC =
|
|
|
|
TRI->getMinimalPhysRegClass(Reg, MVT::i32);
|
2019-07-04 07:32:29 +08:00
|
|
|
|
|
|
|
TII.storeRegToStackSlot(SaveBlock, I, Reg, true, CS.getFrameIdx(), RC,
|
|
|
|
TRI);
|
|
|
|
|
|
|
|
if (LIS) {
|
|
|
|
assert(std::distance(MIS.begin(), I) == 1);
|
|
|
|
MachineInstr &Inst = *std::prev(I);
|
|
|
|
|
|
|
|
LIS->InsertMachineInstrInMaps(Inst);
|
|
|
|
LIS->removeAllRegUnitsForPhysReg(Reg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Insert restore code for the callee-saved registers used in the function.
|
|
|
|
static void insertCSRRestores(MachineBasicBlock &RestoreBlock,
|
2020-02-29 16:50:23 +08:00
|
|
|
MutableArrayRef<CalleeSavedInfo> CSI,
|
2019-07-04 07:32:29 +08:00
|
|
|
LiveIntervals *LIS) {
|
|
|
|
MachineFunction &MF = *RestoreBlock.getParent();
|
|
|
|
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
|
|
|
|
const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
|
|
|
|
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
|
|
|
|
|
|
|
|
// Restore all registers immediately before the return and any
|
|
|
|
// terminators that precede it.
|
|
|
|
MachineBasicBlock::iterator I = RestoreBlock.getFirstTerminator();
|
|
|
|
|
|
|
|
// FIXME: Just emit the readlane/writelane directly
|
|
|
|
if (!TFI->restoreCalleeSavedRegisters(RestoreBlock, I, CSI, TRI)) {
|
|
|
|
for (const CalleeSavedInfo &CI : reverse(CSI)) {
|
|
|
|
unsigned Reg = CI.getReg();
|
2020-04-11 06:54:54 +08:00
|
|
|
const TargetRegisterClass *RC =
|
|
|
|
TRI->getMinimalPhysRegClass(Reg, MVT::i32);
|
2019-07-04 07:32:29 +08:00
|
|
|
|
|
|
|
TII.loadRegFromStackSlot(RestoreBlock, I, Reg, CI.getFrameIdx(), RC, TRI);
|
|
|
|
assert(I != RestoreBlock.begin() &&
|
|
|
|
"loadRegFromStackSlot didn't insert any code!");
|
|
|
|
// Insert in reverse order. loadRegFromStackSlot can insert
|
|
|
|
// multiple instructions.
|
|
|
|
|
|
|
|
if (LIS) {
|
|
|
|
MachineInstr &Inst = *std::prev(I);
|
|
|
|
LIS->InsertMachineInstrInMaps(Inst);
|
|
|
|
LIS->removeAllRegUnitsForPhysReg(Reg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Compute the sets of entry and return blocks for saving and restoring
|
|
|
|
/// callee-saved registers, and placing prolog and epilog code.
|
|
|
|
void SILowerSGPRSpills::calculateSaveRestoreBlocks(MachineFunction &MF) {
|
|
|
|
const MachineFrameInfo &MFI = MF.getFrameInfo();
|
|
|
|
|
|
|
|
// Even when we do not change any CSR, we still want to insert the
|
|
|
|
// prologue and epilogue of the function.
|
|
|
|
// So set the save points for those.
|
|
|
|
|
|
|
|
// Use the points found by shrink-wrapping, if any.
|
|
|
|
if (MFI.getSavePoint()) {
|
|
|
|
SaveBlocks.push_back(MFI.getSavePoint());
|
|
|
|
assert(MFI.getRestorePoint() && "Both restore and save must be set");
|
|
|
|
MachineBasicBlock *RestoreBlock = MFI.getRestorePoint();
|
|
|
|
// If RestoreBlock does not have any successor and is not a return block
|
|
|
|
// then the end point is unreachable and we do not need to insert any
|
|
|
|
// epilogue.
|
|
|
|
if (!RestoreBlock->succ_empty() || RestoreBlock->isReturnBlock())
|
|
|
|
RestoreBlocks.push_back(RestoreBlock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save refs to entry and return blocks.
|
|
|
|
SaveBlocks.push_back(&MF.front());
|
|
|
|
for (MachineBasicBlock &MBB : MF) {
|
|
|
|
if (MBB.isEHFuncletEntry())
|
|
|
|
SaveBlocks.push_back(&MBB);
|
|
|
|
if (MBB.isReturnBlock())
|
|
|
|
RestoreBlocks.push_back(&MBB);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SILowerSGPRSpills::spillCalleeSavedRegs(MachineFunction &MF) {
|
|
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
|
|
const Function &F = MF.getFunction();
|
|
|
|
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
|
|
|
|
const SIFrameLowering *TFI = ST.getFrameLowering();
|
|
|
|
MachineFrameInfo &MFI = MF.getFrameInfo();
|
|
|
|
RegScavenger *RS = nullptr;
|
|
|
|
|
|
|
|
// Determine which of the registers in the callee save list should be saved.
|
|
|
|
BitVector SavedRegs;
|
|
|
|
TFI->determineCalleeSavesSGPR(MF, SavedRegs, RS);
|
|
|
|
|
|
|
|
// Add the code to save and restore the callee saved registers.
|
|
|
|
if (!F.hasFnAttribute(Attribute::Naked)) {
|
|
|
|
// FIXME: This is a lie. The CalleeSavedInfo is incomplete, but this is
|
|
|
|
// necessary for verifier liveness checks.
|
|
|
|
MFI.setCalleeSavedInfoValid(true);
|
|
|
|
|
|
|
|
std::vector<CalleeSavedInfo> CSI;
|
|
|
|
const MCPhysReg *CSRegs = MRI.getCalleeSavedRegs();
|
|
|
|
|
|
|
|
for (unsigned I = 0; CSRegs[I]; ++I) {
|
|
|
|
unsigned Reg = CSRegs[I];
|
|
|
|
if (SavedRegs.test(Reg)) {
|
2020-04-11 06:54:54 +08:00
|
|
|
const TargetRegisterClass *RC =
|
|
|
|
TRI->getMinimalPhysRegClass(Reg, MVT::i32);
|
2019-07-04 07:32:29 +08:00
|
|
|
int JunkFI = MFI.CreateStackObject(TRI->getSpillSize(*RC),
|
|
|
|
TRI->getSpillAlignment(*RC),
|
|
|
|
true);
|
|
|
|
|
|
|
|
CSI.push_back(CalleeSavedInfo(Reg, JunkFI));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!CSI.empty()) {
|
|
|
|
for (MachineBasicBlock *SaveBlock : SaveBlocks)
|
|
|
|
insertCSRSaves(*SaveBlock, CSI, LIS);
|
|
|
|
|
|
|
|
for (MachineBasicBlock *RestoreBlock : RestoreBlocks)
|
|
|
|
insertCSRRestores(*RestoreBlock, CSI, LIS);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
[AMDGPU] Reserving VGPR for future SGPR Spill
Summary: One VGPR register is allocated to handle a future spill of SGPR if "--amdgpu-reserve-vgpr-for-sgpr-spill" option is used
Reviewers: arsenm, rampitec, msearles, cdevadas
Reviewed By: arsenm
Subscribers: madhur13490, qcolombet, kerbowa, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #amdgpu, #llvm
Differential Revision: https://reviews.llvm.org/D70379
2020-04-10 15:55:11 +08:00
|
|
|
// Find lowest available VGPR and use it as VGPR reserved for SGPR spills.
|
|
|
|
static bool lowerShiftReservedVGPR(MachineFunction &MF,
|
|
|
|
const GCNSubtarget &ST) {
|
|
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
|
|
MachineFrameInfo &FrameInfo = MF.getFrameInfo();
|
|
|
|
SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
|
|
|
|
Register LowestAvailableVGPR, ReservedVGPR;
|
[NFC] Move getAll{S,V}GPR{32,128} methods to SIFrameLowering
Summary:
Future patch needs some of these in multiple places.
The definitions of these can't be in the header and be eligible for
inlining without making the full declaration of GCNSubtarget visible.
I'm not sure what the right trade-off is, but I opted to not bloat
SIRegisterInfo.h
Reviewers: arsenm, cdevadas
Reviewed By: arsenm
Subscribers: RamNalamothu, qcolombet, jvesely, wdng, nhaehnle, hiraditya, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D79878
2020-06-18 00:08:09 +08:00
|
|
|
ArrayRef<MCPhysReg> AllVGPR32s = ST.getRegisterInfo()->getAllVGPR32(MF);
|
[AMDGPU] Reserving VGPR for future SGPR Spill
Summary: One VGPR register is allocated to handle a future spill of SGPR if "--amdgpu-reserve-vgpr-for-sgpr-spill" option is used
Reviewers: arsenm, rampitec, msearles, cdevadas
Reviewed By: arsenm
Subscribers: madhur13490, qcolombet, kerbowa, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #amdgpu, #llvm
Differential Revision: https://reviews.llvm.org/D70379
2020-04-10 15:55:11 +08:00
|
|
|
for (MCPhysReg Reg : AllVGPR32s) {
|
|
|
|
if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg)) {
|
|
|
|
LowestAvailableVGPR = Reg;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!LowestAvailableVGPR)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
ReservedVGPR = FuncInfo->VGPRReservedForSGPRSpill;
|
|
|
|
const MCPhysReg *CSRegs = MF.getRegInfo().getCalleeSavedRegs();
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
for (MachineBasicBlock &MBB : MF) {
|
|
|
|
for (auto Reg : FuncInfo->getSGPRSpillVGPRs()) {
|
|
|
|
if (Reg.VGPR == ReservedVGPR) {
|
|
|
|
MBB.removeLiveIn(ReservedVGPR);
|
|
|
|
MBB.addLiveIn(LowestAvailableVGPR);
|
|
|
|
Optional<int> FI;
|
|
|
|
if (FuncInfo->isCalleeSavedReg(CSRegs, LowestAvailableVGPR))
|
|
|
|
FI = FrameInfo.CreateSpillStackObject(4, Align(4));
|
|
|
|
|
|
|
|
FuncInfo->setSGPRSpillVGPRs(LowestAvailableVGPR, FI, i);
|
|
|
|
}
|
|
|
|
++i;
|
|
|
|
}
|
|
|
|
MBB.sortUniqueLiveIns();
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-07-04 07:32:29 +08:00
|
|
|
bool SILowerSGPRSpills::runOnMachineFunction(MachineFunction &MF) {
|
|
|
|
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
|
|
|
|
TII = ST.getInstrInfo();
|
|
|
|
TRI = &TII->getRegisterInfo();
|
|
|
|
|
|
|
|
VRM = getAnalysisIfAvailable<VirtRegMap>();
|
|
|
|
|
|
|
|
assert(SaveBlocks.empty() && RestoreBlocks.empty());
|
|
|
|
|
|
|
|
// First, expose any CSR SGPR spills. This is mostly the same as what PEI
|
|
|
|
// does, but somewhat simpler.
|
|
|
|
calculateSaveRestoreBlocks(MF);
|
|
|
|
bool HasCSRs = spillCalleeSavedRegs(MF);
|
|
|
|
|
|
|
|
MachineFrameInfo &MFI = MF.getFrameInfo();
|
|
|
|
if (!MFI.hasStackObjects() && !HasCSRs) {
|
|
|
|
SaveBlocks.clear();
|
|
|
|
RestoreBlocks.clear();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-07-12 05:54:13 +08:00
|
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
2019-07-04 07:32:29 +08:00
|
|
|
SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
|
2019-07-12 05:54:13 +08:00
|
|
|
const bool SpillVGPRToAGPR = ST.hasMAIInsts() && FuncInfo->hasSpilledVGPRs()
|
|
|
|
&& EnableSpillVGPRToAGPR;
|
|
|
|
|
2019-07-04 07:32:29 +08:00
|
|
|
bool MadeChange = false;
|
|
|
|
|
2019-07-12 05:54:13 +08:00
|
|
|
const bool SpillToAGPR = EnableSpillVGPRToAGPR && ST.hasMAIInsts();
|
|
|
|
|
|
|
|
// TODO: CSR VGPRs will never be spilled to AGPRs. These can probably be
|
|
|
|
// handled as SpilledToReg in regular PrologEpilogInserter.
|
|
|
|
if ((TRI->spillSGPRToVGPR() && (HasCSRs || FuncInfo->hasSpilledSGPRs())) ||
|
|
|
|
SpillVGPRToAGPR) {
|
2019-07-04 07:32:29 +08:00
|
|
|
// Process all SGPR spills before frame offsets are finalized. Ideally SGPRs
|
|
|
|
// are spilled to VGPRs, in which case we can eliminate the stack usage.
|
|
|
|
//
|
|
|
|
// This operates under the assumption that only other SGPR spills are users
|
|
|
|
// of the frame index.
|
[AMDGPU] Reserving VGPR for future SGPR Spill
Summary: One VGPR register is allocated to handle a future spill of SGPR if "--amdgpu-reserve-vgpr-for-sgpr-spill" option is used
Reviewers: arsenm, rampitec, msearles, cdevadas
Reviewed By: arsenm
Subscribers: madhur13490, qcolombet, kerbowa, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #amdgpu, #llvm
Differential Revision: https://reviews.llvm.org/D70379
2020-04-10 15:55:11 +08:00
|
|
|
|
|
|
|
lowerShiftReservedVGPR(MF, ST);
|
|
|
|
|
2019-07-04 07:32:29 +08:00
|
|
|
for (MachineBasicBlock &MBB : MF) {
|
|
|
|
MachineBasicBlock::iterator Next;
|
|
|
|
for (auto I = MBB.begin(), E = MBB.end(); I != E; I = Next) {
|
|
|
|
MachineInstr &MI = *I;
|
|
|
|
Next = std::next(I);
|
|
|
|
|
2019-07-12 05:54:13 +08:00
|
|
|
if (SpillToAGPR && TII->isVGPRSpill(MI)) {
|
|
|
|
// Try to eliminate stack used by VGPR spills before frame
|
|
|
|
// finalization.
|
|
|
|
unsigned FIOp = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
|
|
|
|
AMDGPU::OpName::vaddr);
|
|
|
|
int FI = MI.getOperand(FIOp).getIndex();
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register VReg =
|
|
|
|
TII->getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg();
|
2019-07-12 06:30:11 +08:00
|
|
|
if (FuncInfo->allocateVGPRSpillToAGPR(MF, FI,
|
|
|
|
TRI->isAGPR(MRI, VReg))) {
|
2019-07-12 05:54:13 +08:00
|
|
|
TRI->eliminateFrameIndex(MI, 0, FIOp, nullptr);
|
2019-07-12 06:30:11 +08:00
|
|
|
continue;
|
|
|
|
}
|
2019-07-12 05:54:13 +08:00
|
|
|
}
|
|
|
|
|
2019-07-04 07:32:29 +08:00
|
|
|
if (!TII->isSGPRSpill(MI))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
int FI = TII->getNamedOperand(MI, AMDGPU::OpName::addr)->getIndex();
|
|
|
|
assert(MFI.getStackID(FI) == TargetStackID::SGPRSpill);
|
|
|
|
if (FuncInfo->allocateSGPRSpillToVGPR(MF, FI)) {
|
|
|
|
bool Spilled = TRI->eliminateSGPRToVGPRSpillFrameIndex(MI, FI, nullptr);
|
|
|
|
(void)Spilled;
|
|
|
|
assert(Spilled && "failed to spill SGPR to VGPR when allocated");
|
2019-07-15 14:35:28 +08:00
|
|
|
}
|
2019-07-04 07:32:29 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (MachineBasicBlock &MBB : MF) {
|
|
|
|
for (auto SSpill : FuncInfo->getSGPRSpillVGPRs())
|
|
|
|
MBB.addLiveIn(SSpill.VGPR);
|
2019-07-12 05:54:13 +08:00
|
|
|
|
|
|
|
for (MCPhysReg Reg : FuncInfo->getVGPRSpillAGPRs())
|
|
|
|
MBB.addLiveIn(Reg);
|
|
|
|
|
|
|
|
for (MCPhysReg Reg : FuncInfo->getAGPRSpillVGPRs())
|
|
|
|
MBB.addLiveIn(Reg);
|
|
|
|
|
2019-07-04 07:32:29 +08:00
|
|
|
MBB.sortUniqueLiveIns();
|
|
|
|
}
|
|
|
|
|
|
|
|
MadeChange = true;
|
[AMDGPU] Reserving VGPR for future SGPR Spill
Summary: One VGPR register is allocated to handle a future spill of SGPR if "--amdgpu-reserve-vgpr-for-sgpr-spill" option is used
Reviewers: arsenm, rampitec, msearles, cdevadas
Reviewed By: arsenm
Subscribers: madhur13490, qcolombet, kerbowa, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #amdgpu, #llvm
Differential Revision: https://reviews.llvm.org/D70379
2020-04-10 15:55:11 +08:00
|
|
|
} else if (FuncInfo->VGPRReservedForSGPRSpill) {
|
|
|
|
FuncInfo->removeVGPRForSGPRSpill(FuncInfo->VGPRReservedForSGPRSpill, MF);
|
2019-07-04 07:32:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
SaveBlocks.clear();
|
|
|
|
RestoreBlocks.clear();
|
|
|
|
|
|
|
|
return MadeChange;
|
|
|
|
}
|