2013-05-07 00:15:19 +08:00
|
|
|
//===-- SystemZFrameLowering.cpp - Frame lowering for SystemZ -------------===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2013-05-07 00:15:19 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "SystemZFrameLowering.h"
|
|
|
|
#include "SystemZCallingConv.h"
|
|
|
|
#include "SystemZInstrBuilder.h"
|
2014-07-02 04:18:59 +08:00
|
|
|
#include "SystemZInstrInfo.h"
|
2013-05-07 00:15:19 +08:00
|
|
|
#include "SystemZMachineFunctionInfo.h"
|
2014-07-02 04:18:59 +08:00
|
|
|
#include "SystemZRegisterInfo.h"
|
2014-08-05 05:25:23 +08:00
|
|
|
#include "SystemZSubtarget.h"
|
2013-05-07 00:15:19 +08:00
|
|
|
#include "llvm/CodeGen/MachineModuleInfo.h"
|
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2013-07-05 20:55:00 +08:00
|
|
|
#include "llvm/CodeGen/RegisterScavenging.h"
|
2013-05-07 00:15:19 +08:00
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2013-07-03 17:11:00 +08:00
|
|
|
namespace {
|
2019-11-26 01:48:47 +08:00
|
|
|
// The ABI-defined register save slots, relative to the CFA (i.e.
|
|
|
|
// incoming stack pointer + SystemZMC::CallFrameSize).
|
2014-03-06 18:38:30 +08:00
|
|
|
static const TargetFrameLowering::SpillSlot SpillOffsetTable[] = {
|
2019-11-28 00:22:43 +08:00
|
|
|
{ SystemZ::R2D, 0x10 },
|
|
|
|
{ SystemZ::R3D, 0x18 },
|
|
|
|
{ SystemZ::R4D, 0x20 },
|
|
|
|
{ SystemZ::R5D, 0x28 },
|
|
|
|
{ SystemZ::R6D, 0x30 },
|
|
|
|
{ SystemZ::R7D, 0x38 },
|
|
|
|
{ SystemZ::R8D, 0x40 },
|
|
|
|
{ SystemZ::R9D, 0x48 },
|
|
|
|
{ SystemZ::R10D, 0x50 },
|
|
|
|
{ SystemZ::R11D, 0x58 },
|
|
|
|
{ SystemZ::R12D, 0x60 },
|
|
|
|
{ SystemZ::R13D, 0x68 },
|
|
|
|
{ SystemZ::R14D, 0x70 },
|
|
|
|
{ SystemZ::R15D, 0x78 },
|
|
|
|
{ SystemZ::F0D, 0x80 },
|
|
|
|
{ SystemZ::F2D, 0x88 },
|
|
|
|
{ SystemZ::F4D, 0x90 },
|
|
|
|
{ SystemZ::F6D, 0x98 }
|
2014-03-06 18:38:30 +08:00
|
|
|
};
|
|
|
|
} // end anonymous namespace
|
2013-05-07 00:15:19 +08:00
|
|
|
|
2014-07-02 04:18:59 +08:00
|
|
|
SystemZFrameLowering::SystemZFrameLowering()
|
[Alignment][NFC] Use Align for TargetFrameLowering/Subtarget
Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: jholewinski, arsenm, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, hiraditya, aheejin, kbarton, fedor.sergeev, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Jim, lenary, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68993
llvm-svn: 375084
2019-10-17 15:49:39 +08:00
|
|
|
: TargetFrameLowering(TargetFrameLowering::StackGrowsDown, Align(8),
|
2019-11-28 00:22:43 +08:00
|
|
|
0, Align(8), false /* StackRealignable */),
|
|
|
|
RegSpillOffsets(0) {
|
2019-11-26 01:48:47 +08:00
|
|
|
// Due to the SystemZ ABI, the DWARF CFA (Canonical Frame Address) is not
|
|
|
|
// equal to the incoming stack pointer, but to incoming stack pointer plus
|
2019-11-28 00:22:43 +08:00
|
|
|
// 160. Instead of using a Local Area Offset, the Register save area will
|
|
|
|
// be occupied by fixed frame objects, and all offsets are actually
|
|
|
|
// relative to CFA.
|
2019-11-26 01:48:47 +08:00
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
// Create a mapping from register number to save slot offset.
|
2019-11-26 01:48:47 +08:00
|
|
|
// These offsets are relative to the start of the register save area.
|
2013-05-07 00:15:19 +08:00
|
|
|
RegSpillOffsets.grow(SystemZ::NUM_TARGET_REGS);
|
|
|
|
for (unsigned I = 0, E = array_lengthof(SpillOffsetTable); I != E; ++I)
|
2019-11-28 00:22:43 +08:00
|
|
|
RegSpillOffsets[SpillOffsetTable[I].Reg] = SpillOffsetTable[I].Offset;
|
2013-07-03 17:11:00 +08:00
|
|
|
}
|
|
|
|
|
2019-11-28 00:22:43 +08:00
|
|
|
static bool usePackedStack(MachineFunction &MF) {
|
|
|
|
bool HasPackedStackAttr = MF.getFunction().hasFnAttribute("packed-stack");
|
|
|
|
bool IsVarArg = MF.getFunction().isVarArg();
|
|
|
|
bool CallConv = MF.getFunction().getCallingConv() != CallingConv::GHC;
|
|
|
|
bool BackChain = MF.getFunction().hasFnAttribute("backchain");
|
|
|
|
bool FramAddressTaken = MF.getFrameInfo().isFrameAddressTaken();
|
|
|
|
return HasPackedStackAttr && !IsVarArg && CallConv && !BackChain &&
|
|
|
|
!FramAddressTaken;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SystemZFrameLowering::
|
|
|
|
assignCalleeSavedSpillSlots(MachineFunction &MF,
|
|
|
|
const TargetRegisterInfo *TRI,
|
|
|
|
std::vector<CalleeSavedInfo> &CSI) const {
|
|
|
|
SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
|
|
|
|
MachineFrameInfo &MFFrame = MF.getFrameInfo();
|
|
|
|
bool IsVarArg = MF.getFunction().isVarArg();
|
|
|
|
if (CSI.empty())
|
|
|
|
return true; // Early exit if no callee saved registers are modified!
|
|
|
|
|
|
|
|
unsigned LowGPR = 0;
|
|
|
|
unsigned HighGPR = SystemZ::R15D;
|
|
|
|
int StartSPOffset = SystemZMC::CallFrameSize;
|
|
|
|
int CurrOffset;
|
|
|
|
if (!usePackedStack(MF)) {
|
|
|
|
for (auto &CS : CSI) {
|
|
|
|
unsigned Reg = CS.getReg();
|
|
|
|
int Offset = RegSpillOffsets[Reg];
|
|
|
|
if (Offset) {
|
|
|
|
if (SystemZ::GR64BitRegClass.contains(Reg) && StartSPOffset > Offset) {
|
|
|
|
LowGPR = Reg;
|
|
|
|
StartSPOffset = Offset;
|
|
|
|
}
|
|
|
|
Offset -= SystemZMC::CallFrameSize;
|
|
|
|
int FrameIdx = MFFrame.CreateFixedSpillStackObject(8, Offset);
|
|
|
|
CS.setFrameIdx(FrameIdx);
|
|
|
|
} else
|
|
|
|
CS.setFrameIdx(INT32_MAX);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save the range of call-saved registers, for use by the
|
|
|
|
// prologue/epilogue inserters.
|
|
|
|
ZFI->setRestoreGPRRegs(LowGPR, HighGPR, StartSPOffset);
|
|
|
|
if (IsVarArg) {
|
|
|
|
// Also save the GPR varargs, if any. R6D is call-saved, so would
|
|
|
|
// already be included, but we also need to handle the call-clobbered
|
|
|
|
// argument registers.
|
|
|
|
unsigned FirstGPR = ZFI->getVarArgsFirstGPR();
|
|
|
|
if (FirstGPR < SystemZ::NumArgGPRs) {
|
|
|
|
unsigned Reg = SystemZ::ArgGPRs[FirstGPR];
|
|
|
|
int Offset = RegSpillOffsets[Reg];
|
|
|
|
if (StartSPOffset > Offset) {
|
|
|
|
LowGPR = Reg; StartSPOffset = Offset;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ZFI->setSpillGPRRegs(LowGPR, HighGPR, StartSPOffset);
|
|
|
|
|
|
|
|
CurrOffset = -SystemZMC::CallFrameSize;
|
|
|
|
} else {
|
|
|
|
// Packed stack: put all the GPRs at the top of the Register save area.
|
|
|
|
uint32_t LowGR64Num = UINT32_MAX;
|
|
|
|
for (auto &CS : CSI) {
|
|
|
|
unsigned Reg = CS.getReg();
|
|
|
|
if (SystemZ::GR64BitRegClass.contains(Reg)) {
|
|
|
|
unsigned GR64Num = SystemZMC::getFirstReg(Reg);
|
|
|
|
int Offset = -8 * (15 - GR64Num + 1);
|
|
|
|
if (LowGR64Num > GR64Num) {
|
|
|
|
LowGR64Num = GR64Num;
|
|
|
|
StartSPOffset = SystemZMC::CallFrameSize + Offset;
|
|
|
|
}
|
|
|
|
int FrameIdx = MFFrame.CreateFixedSpillStackObject(8, Offset);
|
|
|
|
CS.setFrameIdx(FrameIdx);
|
|
|
|
} else
|
|
|
|
CS.setFrameIdx(INT32_MAX);
|
|
|
|
}
|
|
|
|
if (LowGR64Num < UINT32_MAX)
|
|
|
|
LowGPR = SystemZMC::GR64Regs[LowGR64Num];
|
|
|
|
|
|
|
|
// Save the range of call-saved registers, for use by the
|
|
|
|
// prologue/epilogue inserters.
|
|
|
|
ZFI->setRestoreGPRRegs(LowGPR, HighGPR, StartSPOffset);
|
|
|
|
ZFI->setSpillGPRRegs(LowGPR, HighGPR, StartSPOffset);
|
|
|
|
|
|
|
|
CurrOffset = LowGPR ? -(SystemZMC::CallFrameSize - StartSPOffset) : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create fixed stack objects for the remaining registers.
|
|
|
|
for (auto &CS : CSI) {
|
|
|
|
if (CS.getFrameIdx() != INT32_MAX)
|
|
|
|
continue;
|
|
|
|
unsigned Reg = CS.getReg();
|
|
|
|
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
|
|
|
|
unsigned Size = TRI->getSpillSize(*RC);
|
|
|
|
CurrOffset -= Size;
|
|
|
|
assert(CurrOffset % 8 == 0 &&
|
|
|
|
"8-byte alignment required for for all register save slots");
|
|
|
|
int FrameIdx = MFFrame.CreateFixedSpillStackObject(Size, CurrOffset);
|
|
|
|
CS.setFrameIdx(FrameIdx);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
2013-05-07 00:15:19 +08:00
|
|
|
}
|
|
|
|
|
2015-07-15 01:17:13 +08:00
|
|
|
void SystemZFrameLowering::determineCalleeSaves(MachineFunction &MF,
|
|
|
|
BitVector &SavedRegs,
|
|
|
|
RegScavenger *RS) const {
|
|
|
|
TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
|
|
|
|
|
2016-07-29 02:40:00 +08:00
|
|
|
MachineFrameInfo &MFFrame = MF.getFrameInfo();
|
2014-08-05 10:39:49 +08:00
|
|
|
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
|
2013-05-07 00:15:19 +08:00
|
|
|
bool HasFP = hasFP(MF);
|
|
|
|
SystemZMachineFunctionInfo *MFI = MF.getInfo<SystemZMachineFunctionInfo>();
|
2017-12-16 06:22:58 +08:00
|
|
|
bool IsVarArg = MF.getFunction().isVarArg();
|
2013-05-07 00:15:19 +08:00
|
|
|
|
|
|
|
// va_start stores incoming FPR varargs in the normal way, but delegates
|
|
|
|
// the saving of incoming GPR varargs to spillCalleeSavedRegisters().
|
|
|
|
// Record these pending uses, which typically include the call-saved
|
|
|
|
// argument register R6D.
|
|
|
|
if (IsVarArg)
|
|
|
|
for (unsigned I = MFI->getVarArgsFirstGPR(); I < SystemZ::NumArgGPRs; ++I)
|
2015-07-15 01:17:13 +08:00
|
|
|
SavedRegs.set(SystemZ::ArgGPRs[I]);
|
2013-05-07 00:15:19 +08:00
|
|
|
|
2016-06-28 22:13:11 +08:00
|
|
|
// If there are any landing pads, entering them will modify r6/r7.
|
2016-12-02 03:32:15 +08:00
|
|
|
if (!MF.getLandingPads().empty()) {
|
2016-06-28 22:13:11 +08:00
|
|
|
SavedRegs.set(SystemZ::R6D);
|
|
|
|
SavedRegs.set(SystemZ::R7D);
|
|
|
|
}
|
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
// If the function requires a frame pointer, record that the hard
|
|
|
|
// frame pointer will be clobbered.
|
|
|
|
if (HasFP)
|
2015-07-15 01:17:13 +08:00
|
|
|
SavedRegs.set(SystemZ::R11D);
|
2013-05-07 00:15:19 +08:00
|
|
|
|
|
|
|
// If the function calls other functions, record that the return
|
|
|
|
// address register will be clobbered.
|
2016-07-29 02:40:00 +08:00
|
|
|
if (MFFrame.hasCalls())
|
2015-07-15 01:17:13 +08:00
|
|
|
SavedRegs.set(SystemZ::R14D);
|
2013-05-07 00:15:19 +08:00
|
|
|
|
|
|
|
// If we are saving GPRs other than the stack pointer, we might as well
|
|
|
|
// save and restore the stack pointer at the same time, via STMG and LMG.
|
|
|
|
// This allows the deallocation to be done by the LMG, rather than needing
|
|
|
|
// a separate %r15 addition.
|
2014-04-04 13:16:06 +08:00
|
|
|
const MCPhysReg *CSRegs = TRI->getCalleeSavedRegs(&MF);
|
2013-05-07 00:15:19 +08:00
|
|
|
for (unsigned I = 0; CSRegs[I]; ++I) {
|
|
|
|
unsigned Reg = CSRegs[I];
|
2015-07-15 01:17:13 +08:00
|
|
|
if (SystemZ::GR64BitRegClass.contains(Reg) && SavedRegs.test(Reg)) {
|
|
|
|
SavedRegs.set(SystemZ::R15D);
|
2013-05-07 00:15:19 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add GPR64 to the save instruction being built by MIB, which is in basic
|
|
|
|
// block MBB. IsImplicit says whether this is an explicit operand to the
|
|
|
|
// instruction, or an implicit one that comes between the explicit start
|
|
|
|
// and end registers.
|
|
|
|
static void addSavedGPR(MachineBasicBlock &MBB, MachineInstrBuilder &MIB,
|
|
|
|
unsigned GPR64, bool IsImplicit) {
|
2014-08-05 05:25:23 +08:00
|
|
|
const TargetRegisterInfo *RI =
|
2014-08-05 10:39:49 +08:00
|
|
|
MBB.getParent()->getSubtarget().getRegisterInfo();
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register GPR32 = RI->getSubReg(GPR64, SystemZ::subreg_l32);
|
2013-05-07 00:15:19 +08:00
|
|
|
bool IsLive = MBB.isLiveIn(GPR64) || MBB.isLiveIn(GPR32);
|
|
|
|
if (!IsLive || !IsImplicit) {
|
|
|
|
MIB.addReg(GPR64, getImplRegState(IsImplicit) | getKillRegState(!IsLive));
|
|
|
|
if (!IsLive)
|
|
|
|
MBB.addLiveIn(GPR64);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SystemZFrameLowering::
|
|
|
|
spillCalleeSavedRegisters(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator MBBI,
|
|
|
|
const std::vector<CalleeSavedInfo> &CSI,
|
|
|
|
const TargetRegisterInfo *TRI) const {
|
|
|
|
if (CSI.empty())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MachineFunction &MF = *MBB.getParent();
|
2015-11-06 05:54:58 +08:00
|
|
|
const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
|
|
|
|
SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
|
2017-12-16 06:22:58 +08:00
|
|
|
bool IsVarArg = MF.getFunction().isVarArg();
|
2015-11-06 05:54:58 +08:00
|
|
|
DebugLoc DL;
|
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
// Save GPRs
|
2019-11-28 00:22:43 +08:00
|
|
|
SystemZ::GPRRegs SpillGPRs = ZFI->getSpillGPRRegs();
|
|
|
|
if (SpillGPRs.LowGPR) {
|
|
|
|
assert(SpillGPRs.LowGPR != SpillGPRs.HighGPR &&
|
|
|
|
"Should be saving %r15 and something else");
|
2013-05-07 00:15:19 +08:00
|
|
|
|
|
|
|
// Build an STMG instruction.
|
|
|
|
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(SystemZ::STMG));
|
|
|
|
|
|
|
|
// Add the explicit register operands.
|
2019-11-28 00:22:43 +08:00
|
|
|
addSavedGPR(MBB, MIB, SpillGPRs.LowGPR, false);
|
|
|
|
addSavedGPR(MBB, MIB, SpillGPRs.HighGPR, false);
|
2013-05-07 00:15:19 +08:00
|
|
|
|
|
|
|
// Add the address.
|
2019-11-28 00:22:43 +08:00
|
|
|
MIB.addReg(SystemZ::R15D).addImm(SpillGPRs.GPROffset);
|
2013-05-07 00:15:19 +08:00
|
|
|
|
|
|
|
// Make sure all call-saved GPRs are included as operands and are
|
|
|
|
// marked as live on entry.
|
|
|
|
for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
|
|
|
|
unsigned Reg = CSI[I].getReg();
|
|
|
|
if (SystemZ::GR64BitRegClass.contains(Reg))
|
2014-07-02 04:18:59 +08:00
|
|
|
addSavedGPR(MBB, MIB, Reg, true);
|
2013-05-07 00:15:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// ...likewise GPR varargs.
|
|
|
|
if (IsVarArg)
|
|
|
|
for (unsigned I = ZFI->getVarArgsFirstGPR(); I < SystemZ::NumArgGPRs; ++I)
|
2014-07-02 04:18:59 +08:00
|
|
|
addSavedGPR(MBB, MIB, SystemZ::ArgGPRs[I], true);
|
2013-05-07 00:15:19 +08:00
|
|
|
}
|
|
|
|
|
2018-03-03 04:40:11 +08:00
|
|
|
// Save FPRs/VRs in the normal TargetInstrInfo way.
|
2013-05-07 00:15:19 +08:00
|
|
|
for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
|
|
|
|
unsigned Reg = CSI[I].getReg();
|
|
|
|
if (SystemZ::FP64BitRegClass.contains(Reg)) {
|
|
|
|
MBB.addLiveIn(Reg);
|
|
|
|
TII->storeRegToStackSlot(MBB, MBBI, Reg, true, CSI[I].getFrameIdx(),
|
|
|
|
&SystemZ::FP64BitRegClass, TRI);
|
|
|
|
}
|
2018-03-03 04:40:11 +08:00
|
|
|
if (SystemZ::VR128BitRegClass.contains(Reg)) {
|
|
|
|
MBB.addLiveIn(Reg);
|
|
|
|
TII->storeRegToStackSlot(MBB, MBBI, Reg, true, CSI[I].getFrameIdx(),
|
|
|
|
&SystemZ::VR128BitRegClass, TRI);
|
|
|
|
}
|
2013-05-07 00:15:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SystemZFrameLowering::
|
|
|
|
restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator MBBI,
|
2017-08-11 00:17:32 +08:00
|
|
|
std::vector<CalleeSavedInfo> &CSI,
|
2013-05-07 00:15:19 +08:00
|
|
|
const TargetRegisterInfo *TRI) const {
|
|
|
|
if (CSI.empty())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MachineFunction &MF = *MBB.getParent();
|
2014-08-05 10:39:49 +08:00
|
|
|
const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
|
2013-05-07 00:15:19 +08:00
|
|
|
SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
|
|
|
|
bool HasFP = hasFP(MF);
|
|
|
|
DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
|
|
|
|
|
2018-03-03 04:40:11 +08:00
|
|
|
// Restore FPRs/VRs in the normal TargetInstrInfo way.
|
2013-05-07 00:15:19 +08:00
|
|
|
for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
|
|
|
|
unsigned Reg = CSI[I].getReg();
|
|
|
|
if (SystemZ::FP64BitRegClass.contains(Reg))
|
|
|
|
TII->loadRegFromStackSlot(MBB, MBBI, Reg, CSI[I].getFrameIdx(),
|
|
|
|
&SystemZ::FP64BitRegClass, TRI);
|
2018-03-03 04:40:11 +08:00
|
|
|
if (SystemZ::VR128BitRegClass.contains(Reg))
|
|
|
|
TII->loadRegFromStackSlot(MBB, MBBI, Reg, CSI[I].getFrameIdx(),
|
|
|
|
&SystemZ::VR128BitRegClass, TRI);
|
2013-05-07 00:15:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Restore call-saved GPRs (but not call-clobbered varargs, which at
|
|
|
|
// this point might hold return values).
|
2019-11-28 00:22:43 +08:00
|
|
|
SystemZ::GPRRegs RestoreGPRs = ZFI->getRestoreGPRRegs();
|
|
|
|
if (RestoreGPRs.LowGPR) {
|
2013-05-07 00:15:19 +08:00
|
|
|
// If we saved any of %r2-%r5 as varargs, we should also be saving
|
|
|
|
// and restoring %r6. If we're saving %r6 or above, we should be
|
|
|
|
// restoring it too.
|
2019-11-28 00:22:43 +08:00
|
|
|
assert(RestoreGPRs.LowGPR != RestoreGPRs.HighGPR &&
|
|
|
|
"Should be loading %r15 and something else");
|
2013-05-07 00:15:19 +08:00
|
|
|
|
|
|
|
// Build an LMG instruction.
|
|
|
|
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(SystemZ::LMG));
|
|
|
|
|
|
|
|
// Add the explicit register operands.
|
2019-11-28 00:22:43 +08:00
|
|
|
MIB.addReg(RestoreGPRs.LowGPR, RegState::Define);
|
|
|
|
MIB.addReg(RestoreGPRs.HighGPR, RegState::Define);
|
2013-05-07 00:15:19 +08:00
|
|
|
|
|
|
|
// Add the address.
|
|
|
|
MIB.addReg(HasFP ? SystemZ::R11D : SystemZ::R15D);
|
2019-11-28 00:22:43 +08:00
|
|
|
MIB.addImm(RestoreGPRs.GPROffset);
|
2013-05-07 00:15:19 +08:00
|
|
|
|
|
|
|
// Do a second scan adding regs as being defined by instruction
|
|
|
|
for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
|
|
|
|
unsigned Reg = CSI[I].getReg();
|
2019-11-28 00:22:43 +08:00
|
|
|
if (Reg != RestoreGPRs.LowGPR && Reg != RestoreGPRs.HighGPR &&
|
2016-05-02 17:37:44 +08:00
|
|
|
SystemZ::GR64BitRegClass.contains(Reg))
|
2013-05-07 00:15:19 +08:00
|
|
|
MIB.addReg(Reg, RegState::ImplicitDefine);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-07-05 20:55:00 +08:00
|
|
|
void SystemZFrameLowering::
|
|
|
|
processFunctionBeforeFrameFinalized(MachineFunction &MF,
|
|
|
|
RegScavenger *RS) const {
|
2016-07-29 02:40:00 +08:00
|
|
|
MachineFrameInfo &MFFrame = MF.getFrameInfo();
|
2019-11-28 00:22:43 +08:00
|
|
|
|
|
|
|
if (!usePackedStack(MF))
|
|
|
|
// Always create the full incoming register save area.
|
|
|
|
getOrCreateFramePointerSaveIndex(MF);
|
|
|
|
|
2017-06-27 00:50:32 +08:00
|
|
|
// Get the size of our stack frame to be allocated ...
|
|
|
|
uint64_t StackSize = (MFFrame.estimateStackSize(MF) +
|
|
|
|
SystemZMC::CallFrameSize);
|
|
|
|
// ... and the maximum offset we may need to reach into the
|
|
|
|
// caller's frame to access the save area or stack arguments.
|
2019-11-28 00:22:43 +08:00
|
|
|
int64_t MaxArgOffset = 0;
|
2017-06-27 00:50:32 +08:00
|
|
|
for (int I = MFFrame.getObjectIndexBegin(); I != 0; ++I)
|
|
|
|
if (MFFrame.getObjectOffset(I) >= 0) {
|
2019-11-28 00:22:43 +08:00
|
|
|
int64_t ArgOffset = MFFrame.getObjectOffset(I) +
|
2017-06-27 00:50:32 +08:00
|
|
|
MFFrame.getObjectSize(I);
|
|
|
|
MaxArgOffset = std::max(MaxArgOffset, ArgOffset);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t MaxReach = StackSize + MaxArgOffset;
|
2013-07-05 21:11:52 +08:00
|
|
|
if (!isUInt<12>(MaxReach)) {
|
|
|
|
// We may need register scavenging slots if some parts of the frame
|
2013-07-05 20:55:00 +08:00
|
|
|
// are outside the reach of an unsigned 12-bit displacement.
|
2013-07-05 21:11:52 +08:00
|
|
|
// Create 2 for the case where both addresses in an MVC are
|
|
|
|
// out of range.
|
2016-07-29 02:40:00 +08:00
|
|
|
RS->addScavengingFrameIndex(MFFrame.CreateStackObject(8, 8, false));
|
|
|
|
RS->addScavengingFrameIndex(MFFrame.CreateStackObject(8, 8, false));
|
2013-07-05 21:11:52 +08:00
|
|
|
}
|
2013-07-05 20:55:00 +08:00
|
|
|
}
|
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
// Emit instructions before MBBI (in MBB) to add NumBytes to Reg.
|
|
|
|
static void emitIncrement(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator &MBBI,
|
|
|
|
const DebugLoc &DL,
|
|
|
|
unsigned Reg, int64_t NumBytes,
|
|
|
|
const TargetInstrInfo *TII) {
|
|
|
|
while (NumBytes) {
|
|
|
|
unsigned Opcode;
|
|
|
|
int64_t ThisVal = NumBytes;
|
|
|
|
if (isInt<16>(NumBytes))
|
|
|
|
Opcode = SystemZ::AGHI;
|
|
|
|
else {
|
|
|
|
Opcode = SystemZ::AGFI;
|
|
|
|
// Make sure we maintain 8-byte stack alignment.
|
2014-08-21 05:56:43 +08:00
|
|
|
int64_t MinVal = -uint64_t(1) << 31;
|
2013-05-07 00:15:19 +08:00
|
|
|
int64_t MaxVal = (int64_t(1) << 31) - 8;
|
|
|
|
if (ThisVal < MinVal)
|
|
|
|
ThisVal = MinVal;
|
|
|
|
else if (ThisVal > MaxVal)
|
|
|
|
ThisVal = MaxVal;
|
|
|
|
}
|
|
|
|
MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII->get(Opcode), Reg)
|
|
|
|
.addReg(Reg).addImm(ThisVal);
|
2013-05-22 21:38:45 +08:00
|
|
|
// The CC implicit def is dead.
|
2013-05-07 00:15:19 +08:00
|
|
|
MI->getOperand(3).setIsDead();
|
|
|
|
NumBytes -= ThisVal;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[ShrinkWrap] Add (a simplified version) of shrink-wrapping.
This patch introduces a new pass that computes the safe point to insert the
prologue and epilogue of the function.
The interest is to find safe points that are cheaper than the entry and exits
blocks.
As an example and to avoid regressions to be introduce, this patch also
implements the required bits to enable the shrink-wrapping pass for AArch64.
** Context **
Currently we insert the prologue and epilogue of the method/function in the
entry and exits blocks. Although this is correct, we can do a better job when
those are not immediately required and insert them at less frequently executed
places.
The job of the shrink-wrapping pass is to identify such places.
** Motivating example **
Let us consider the following function that perform a call only in one branch of
a if:
define i32 @f(i32 %a, i32 %b) {
%tmp = alloca i32, align 4
%tmp2 = icmp slt i32 %a, %b
br i1 %tmp2, label %true, label %false
true:
store i32 %a, i32* %tmp, align 4
%tmp4 = call i32 @doSomething(i32 0, i32* %tmp)
br label %false
false:
%tmp.0 = phi i32 [ %tmp4, %true ], [ %a, %0 ]
ret i32 %tmp.0
}
On AArch64 this code generates (removing the cfi directives to ease
readabilities):
_f: ; @f
; BB#0:
stp x29, x30, [sp, #-16]!
mov x29, sp
sub sp, sp, #16 ; =16
cmp w0, w1
b.ge LBB0_2
; BB#1: ; %true
stur w0, [x29, #-4]
sub x1, x29, #4 ; =4
mov w0, wzr
bl _doSomething
LBB0_2: ; %false
mov sp, x29
ldp x29, x30, [sp], #16
ret
With shrink-wrapping we could generate:
_f: ; @f
; BB#0:
cmp w0, w1
b.ge LBB0_2
; BB#1: ; %true
stp x29, x30, [sp, #-16]!
mov x29, sp
sub sp, sp, #16 ; =16
stur w0, [x29, #-4]
sub x1, x29, #4 ; =4
mov w0, wzr
bl _doSomething
add sp, x29, #16 ; =16
ldp x29, x30, [sp], #16
LBB0_2: ; %false
ret
Therefore, we would pay the overhead of setting up/destroying the frame only if
we actually do the call.
** Proposed Solution **
This patch introduces a new machine pass that perform the shrink-wrapping
analysis (See the comments at the beginning of ShrinkWrap.cpp for more details).
It then stores the safe save and restore point into the MachineFrameInfo
attached to the MachineFunction.
This information is then used by the PrologEpilogInserter (PEI) to place the
related code at the right place. This pass runs right before the PEI.
Unlike the original paper of Chow from PLDI’88, this implementation of
shrink-wrapping does not use expensive data-flow analysis and does not need hack
to properly avoid frequently executed point. Instead, it relies on dominance and
loop properties.
The pass is off by default and each target can opt-in by setting the
EnableShrinkWrap boolean to true in their derived class of TargetPassConfig.
This setting can also be overwritten on the command line by using
-enable-shrink-wrap.
Before you try out the pass for your target, make sure you properly fix your
emitProlog/emitEpilog/adjustForXXX method to cope with basic blocks that are not
necessarily the entry block.
** Design Decisions **
1. ShrinkWrap is its own pass right now. It could frankly be merged into PEI but
for debugging and clarity I thought it was best to have its own file.
2. Right now, we only support one save point and one restore point. At some
point we can expand this to several save point and restore point, the impacted
component would then be:
- The pass itself: New algorithm needed.
- MachineFrameInfo: Hold a list or set of Save/Restore point instead of one
pointer.
- PEI: Should loop over the save point and restore point.
Anyhow, at least for this first iteration, I do not believe this is interesting
to support the complex cases. We should revisit that when we motivating
examples.
Differential Revision: http://reviews.llvm.org/D9210
<rdar://problem/3201744>
llvm-svn: 236507
2015-05-06 01:38:16 +08:00
|
|
|
void SystemZFrameLowering::emitPrologue(MachineFunction &MF,
|
|
|
|
MachineBasicBlock &MBB) const {
|
|
|
|
assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported");
|
2016-07-29 02:40:00 +08:00
|
|
|
MachineFrameInfo &MFFrame = MF.getFrameInfo();
|
2014-08-05 10:39:49 +08:00
|
|
|
auto *ZII =
|
|
|
|
static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo());
|
2013-05-07 00:15:19 +08:00
|
|
|
SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
|
|
|
|
MachineBasicBlock::iterator MBBI = MBB.begin();
|
|
|
|
MachineModuleInfo &MMI = MF.getMMI();
|
2015-11-06 05:54:58 +08:00
|
|
|
const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
|
2016-07-29 02:40:00 +08:00
|
|
|
const std::vector<CalleeSavedInfo> &CSI = MFFrame.getCalleeSavedInfo();
|
2015-11-06 05:54:58 +08:00
|
|
|
bool HasFP = hasFP(MF);
|
|
|
|
|
2019-11-04 20:26:38 +08:00
|
|
|
// In GHC calling convention C stack space, including the ABI-defined
|
|
|
|
// 160-byte base area, is (de)allocated by GHC itself. This stack space may
|
|
|
|
// be used by LLVM as spill slots for the tail recursive GHC functions. Thus
|
|
|
|
// do not allocate stack space here, too.
|
|
|
|
if (MF.getFunction().getCallingConv() == CallingConv::GHC) {
|
|
|
|
if (MFFrame.getStackSize() > 2048 * sizeof(long)) {
|
|
|
|
report_fatal_error(
|
|
|
|
"Pre allocated stack space for GHC function is too small");
|
|
|
|
}
|
|
|
|
if (HasFP) {
|
|
|
|
report_fatal_error(
|
|
|
|
"In GHC calling convention a frame pointer is not supported");
|
|
|
|
}
|
|
|
|
MFFrame.setStackSize(MFFrame.getStackSize() + SystemZMC::CallFrameSize);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-11-06 05:54:58 +08:00
|
|
|
// Debug location must be unknown since the first debug location is used
|
|
|
|
// to determine the end of the prologue.
|
|
|
|
DebugLoc DL;
|
|
|
|
|
|
|
|
// The current offset of the stack pointer from the CFA.
|
|
|
|
int64_t SPOffsetFromCFA = -SystemZMC::CFAOffsetFromInitialSP;
|
2013-05-07 00:15:19 +08:00
|
|
|
|
2019-11-28 00:22:43 +08:00
|
|
|
if (ZFI->getSpillGPRRegs().LowGPR) {
|
2013-05-07 00:15:19 +08:00
|
|
|
// Skip over the GPR saves.
|
|
|
|
if (MBBI != MBB.end() && MBBI->getOpcode() == SystemZ::STMG)
|
|
|
|
++MBBI;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Couldn't skip over GPR saves");
|
|
|
|
|
|
|
|
// Add CFI for the GPR saves.
|
2014-03-06 19:00:15 +08:00
|
|
|
for (auto &Save : CSI) {
|
|
|
|
unsigned Reg = Save.getReg();
|
2013-05-07 00:15:19 +08:00
|
|
|
if (SystemZ::GR64BitRegClass.contains(Reg)) {
|
2019-11-28 00:22:43 +08:00
|
|
|
int FI = Save.getFrameIdx();
|
|
|
|
int64_t Offset = MFFrame.getObjectOffset(FI);
|
2016-12-01 07:48:42 +08:00
|
|
|
unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset(
|
2014-03-07 14:08:31 +08:00
|
|
|
nullptr, MRI->getDwarfRegNum(Reg, true), Offset));
|
|
|
|
BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::CFI_INSTRUCTION))
|
|
|
|
.addCFIIndex(CFIIndex);
|
2013-05-07 00:15:19 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-03 04:38:41 +08:00
|
|
|
uint64_t StackSize = MFFrame.getStackSize();
|
|
|
|
// We need to allocate the ABI-defined 160-byte base area whenever
|
|
|
|
// we allocate stack space for our own use and whenever we call another
|
|
|
|
// function.
|
2019-11-28 00:22:43 +08:00
|
|
|
bool HasStackObject = false;
|
|
|
|
for (unsigned i = 0, e = MFFrame.getObjectIndexEnd(); i != e; ++i)
|
|
|
|
if (!MFFrame.isDeadObjectIndex(i)) {
|
|
|
|
HasStackObject = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (HasStackObject || MFFrame.hasCalls())
|
2018-03-03 04:38:41 +08:00
|
|
|
StackSize += SystemZMC::CallFrameSize;
|
2019-11-28 00:22:43 +08:00
|
|
|
// Don't allocate the incoming reg save area.
|
|
|
|
StackSize = StackSize > SystemZMC::CallFrameSize
|
|
|
|
? StackSize - SystemZMC::CallFrameSize
|
|
|
|
: 0;
|
|
|
|
MFFrame.setStackSize(StackSize);
|
2018-03-03 04:38:41 +08:00
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
if (StackSize) {
|
2016-05-05 08:37:30 +08:00
|
|
|
// Determine if we want to store a backchain.
|
2017-12-16 06:22:58 +08:00
|
|
|
bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain");
|
2016-05-05 08:37:30 +08:00
|
|
|
|
|
|
|
// If we need backchain, save current stack pointer. R1 is free at this
|
|
|
|
// point.
|
|
|
|
if (StoreBackchain)
|
|
|
|
BuildMI(MBB, MBBI, DL, ZII->get(SystemZ::LGR))
|
|
|
|
.addReg(SystemZ::R1D, RegState::Define).addReg(SystemZ::R15D);
|
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
// Allocate StackSize bytes.
|
|
|
|
int64_t Delta = -int64_t(StackSize);
|
|
|
|
emitIncrement(MBB, MBBI, DL, SystemZ::R15D, Delta, ZII);
|
|
|
|
|
|
|
|
// Add CFI for the allocation.
|
2016-12-01 07:48:42 +08:00
|
|
|
unsigned CFIIndex = MF.addFrameInst(
|
2014-03-07 14:08:31 +08:00
|
|
|
MCCFIInstruction::createDefCfaOffset(nullptr, SPOffsetFromCFA + Delta));
|
|
|
|
BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::CFI_INSTRUCTION))
|
|
|
|
.addCFIIndex(CFIIndex);
|
2013-05-07 00:15:19 +08:00
|
|
|
SPOffsetFromCFA += Delta;
|
2016-05-05 08:37:30 +08:00
|
|
|
|
|
|
|
if (StoreBackchain)
|
|
|
|
BuildMI(MBB, MBBI, DL, ZII->get(SystemZ::STG))
|
2019-11-28 00:22:43 +08:00
|
|
|
.addReg(SystemZ::R1D, RegState::Kill).addReg(SystemZ::R15D).addImm(0)
|
|
|
|
.addReg(0);
|
2013-05-07 00:15:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (HasFP) {
|
|
|
|
// Copy the base of the frame to R11.
|
|
|
|
BuildMI(MBB, MBBI, DL, ZII->get(SystemZ::LGR), SystemZ::R11D)
|
|
|
|
.addReg(SystemZ::R15D);
|
|
|
|
|
|
|
|
// Add CFI for the new frame location.
|
2013-06-18 15:20:20 +08:00
|
|
|
unsigned HardFP = MRI->getDwarfRegNum(SystemZ::R11D, true);
|
2016-12-01 07:48:42 +08:00
|
|
|
unsigned CFIIndex = MF.addFrameInst(
|
2014-03-07 14:08:31 +08:00
|
|
|
MCCFIInstruction::createDefCfaRegister(nullptr, HardFP));
|
|
|
|
BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::CFI_INSTRUCTION))
|
|
|
|
.addCFIIndex(CFIIndex);
|
2013-05-07 00:15:19 +08:00
|
|
|
|
|
|
|
// Mark the FramePtr as live at the beginning of every block except
|
|
|
|
// the entry block. (We'll have marked R11 as live on entry when
|
|
|
|
// saving the GPRs.)
|
2014-03-06 19:00:15 +08:00
|
|
|
for (auto I = std::next(MF.begin()), E = MF.end(); I != E; ++I)
|
2013-05-07 00:15:19 +08:00
|
|
|
I->addLiveIn(SystemZ::R11D);
|
|
|
|
}
|
|
|
|
|
2018-03-03 04:40:11 +08:00
|
|
|
// Skip over the FPR/VR saves.
|
2014-03-07 14:08:31 +08:00
|
|
|
SmallVector<unsigned, 8> CFIIndexes;
|
2014-03-06 19:00:15 +08:00
|
|
|
for (auto &Save : CSI) {
|
|
|
|
unsigned Reg = Save.getReg();
|
2013-05-07 00:15:19 +08:00
|
|
|
if (SystemZ::FP64BitRegClass.contains(Reg)) {
|
|
|
|
if (MBBI != MBB.end() &&
|
|
|
|
(MBBI->getOpcode() == SystemZ::STD ||
|
|
|
|
MBBI->getOpcode() == SystemZ::STDY))
|
|
|
|
++MBBI;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Couldn't skip over FPR save");
|
2018-03-03 04:40:11 +08:00
|
|
|
} else if (SystemZ::VR128BitRegClass.contains(Reg)) {
|
|
|
|
if (MBBI != MBB.end() &&
|
|
|
|
MBBI->getOpcode() == SystemZ::VST)
|
|
|
|
++MBBI;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Couldn't skip over VR save");
|
|
|
|
} else
|
|
|
|
continue;
|
2013-05-07 00:15:19 +08:00
|
|
|
|
2018-03-03 04:40:11 +08:00
|
|
|
// Add CFI for the this save.
|
|
|
|
unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
|
|
|
|
unsigned IgnoredFrameReg;
|
|
|
|
int64_t Offset =
|
|
|
|
getFrameIndexReference(MF, Save.getFrameIdx(), IgnoredFrameReg);
|
2015-08-15 10:32:35 +08:00
|
|
|
|
2018-03-03 04:40:11 +08:00
|
|
|
unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset(
|
2014-03-07 14:08:31 +08:00
|
|
|
nullptr, DwarfReg, SPOffsetFromCFA + Offset));
|
2018-03-03 04:40:11 +08:00
|
|
|
CFIIndexes.push_back(CFIIndex);
|
2013-05-07 00:15:19 +08:00
|
|
|
}
|
2018-03-03 04:40:11 +08:00
|
|
|
// Complete the CFI for the FPR/VR saves, modelling them as taking effect
|
2013-05-07 00:15:19 +08:00
|
|
|
// after the last save.
|
2014-03-07 14:08:31 +08:00
|
|
|
for (auto CFIIndex : CFIIndexes) {
|
|
|
|
BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::CFI_INSTRUCTION))
|
|
|
|
.addCFIIndex(CFIIndex);
|
|
|
|
}
|
2013-05-07 00:15:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void SystemZFrameLowering::emitEpilogue(MachineFunction &MF,
|
|
|
|
MachineBasicBlock &MBB) const {
|
|
|
|
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
|
2014-08-05 10:39:49 +08:00
|
|
|
auto *ZII =
|
|
|
|
static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo());
|
2013-05-07 00:15:19 +08:00
|
|
|
SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
|
2018-03-03 04:38:41 +08:00
|
|
|
MachineFrameInfo &MFFrame = MF.getFrameInfo();
|
2013-05-07 00:15:19 +08:00
|
|
|
|
2019-11-04 20:26:38 +08:00
|
|
|
// See SystemZFrameLowering::emitPrologue
|
|
|
|
if (MF.getFunction().getCallingConv() == CallingConv::GHC)
|
|
|
|
return;
|
|
|
|
|
2013-05-07 00:15:19 +08:00
|
|
|
// Skip the return instruction.
|
2013-08-19 20:42:31 +08:00
|
|
|
assert(MBBI->isReturn() && "Can only insert epilogue into returning blocks");
|
2013-05-07 00:15:19 +08:00
|
|
|
|
2018-03-03 04:38:41 +08:00
|
|
|
uint64_t StackSize = MFFrame.getStackSize();
|
2019-11-28 00:22:43 +08:00
|
|
|
if (ZFI->getRestoreGPRRegs().LowGPR) {
|
2013-05-07 00:15:19 +08:00
|
|
|
--MBBI;
|
|
|
|
unsigned Opcode = MBBI->getOpcode();
|
|
|
|
if (Opcode != SystemZ::LMG)
|
|
|
|
llvm_unreachable("Expected to see callee-save register restore code");
|
|
|
|
|
|
|
|
unsigned AddrOpNo = 2;
|
|
|
|
DebugLoc DL = MBBI->getDebugLoc();
|
|
|
|
uint64_t Offset = StackSize + MBBI->getOperand(AddrOpNo + 1).getImm();
|
|
|
|
unsigned NewOpcode = ZII->getOpcodeForOffset(Opcode, Offset);
|
|
|
|
|
|
|
|
// If the offset is too large, use the largest stack-aligned offset
|
|
|
|
// and add the rest to the base register (the stack or frame pointer).
|
|
|
|
if (!NewOpcode) {
|
|
|
|
uint64_t NumBytes = Offset - 0x7fff8;
|
|
|
|
emitIncrement(MBB, MBBI, DL, MBBI->getOperand(AddrOpNo).getReg(),
|
|
|
|
NumBytes, ZII);
|
|
|
|
Offset -= NumBytes;
|
|
|
|
NewOpcode = ZII->getOpcodeForOffset(Opcode, Offset);
|
|
|
|
assert(NewOpcode && "No restore instruction available");
|
|
|
|
}
|
|
|
|
|
|
|
|
MBBI->setDesc(ZII->get(NewOpcode));
|
|
|
|
MBBI->getOperand(AddrOpNo + 1).ChangeToImmediate(Offset);
|
|
|
|
} else if (StackSize) {
|
|
|
|
DebugLoc DL = MBBI->getDebugLoc();
|
|
|
|
emitIncrement(MBB, MBBI, DL, SystemZ::R15D, StackSize, ZII);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SystemZFrameLowering::hasFP(const MachineFunction &MF) const {
|
|
|
|
return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
|
2016-07-29 02:40:00 +08:00
|
|
|
MF.getFrameInfo().hasVarSizedObjects() ||
|
2013-05-07 00:15:19 +08:00
|
|
|
MF.getInfo<SystemZMachineFunctionInfo>()->getManipulatesSP());
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
SystemZFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
|
|
|
|
// The ABI requires us to allocate 160 bytes of stack space for the callee,
|
|
|
|
// with any outgoing stack arguments being placed above that. It seems
|
|
|
|
// better to make that area a permanent feature of the frame even if
|
|
|
|
// we're using a frame pointer.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-11-28 00:22:43 +08:00
|
|
|
int SystemZFrameLowering::getFrameIndexReference(const MachineFunction &MF,
|
|
|
|
int FI,
|
|
|
|
unsigned &FrameReg) const {
|
|
|
|
// Our incoming SP is actually SystemZMC::CallFrameSize below the CFA, so
|
|
|
|
// add that difference here.
|
|
|
|
int64_t Offset =
|
|
|
|
TargetFrameLowering::getFrameIndexReference(MF, FI, FrameReg);
|
|
|
|
return Offset + SystemZMC::CallFrameSize;
|
|
|
|
}
|
|
|
|
|
2016-04-01 02:33:38 +08:00
|
|
|
MachineBasicBlock::iterator SystemZFrameLowering::
|
2013-05-07 00:15:19 +08:00
|
|
|
eliminateCallFramePseudoInstr(MachineFunction &MF,
|
|
|
|
MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator MI) const {
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
case SystemZ::ADJCALLSTACKDOWN:
|
|
|
|
case SystemZ::ADJCALLSTACKUP:
|
|
|
|
assert(hasReservedCallFrame(MF) &&
|
|
|
|
"ADJSTACKDOWN and ADJSTACKUP should be no-ops");
|
2016-04-01 02:33:38 +08:00
|
|
|
return MBB.erase(MI);
|
2013-05-07 00:15:19 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unexpected call frame instruction");
|
|
|
|
}
|
|
|
|
}
|
2019-11-28 00:22:43 +08:00
|
|
|
|
|
|
|
int SystemZFrameLowering::
|
|
|
|
getOrCreateFramePointerSaveIndex(MachineFunction &MF) const {
|
|
|
|
SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
|
|
|
|
int FI = ZFI->getFramePointerSaveIndex();
|
|
|
|
if (!FI) {
|
|
|
|
MachineFrameInfo &MFFrame = MF.getFrameInfo();
|
|
|
|
FI = MFFrame.CreateFixedObject(8, -SystemZMC::CallFrameSize, false);
|
|
|
|
ZFI->setFramePointerSaveIndex(FI);
|
|
|
|
}
|
|
|
|
return FI;
|
|
|
|
}
|