[AArch64] NFC: Add generic StackOffset to describe scalable offsets.

To support spilling/filling of scalable vectors we need a more generic
representation of a stack offset than simply 'int'.

For this we introduce the StackOffset struct, which comprises multiple
offsets sized by their respective MVTs. Byte-offsets will thus be a simple
tuple such as { offset, MVT::i8 }. Adding two byte-offsets will result in a
byte offset { offsetA + offsetB, MVT::i8 }. When two offsets have different
types, we can canonicalise them to use the same MVT, as long as their
runtime sizes are guaranteed to have the same size-ratio as they would have
at compile-time.

When we have both scalable- and fixed-size objects on the stack, we can 
create an offset that is: 

  ({ offset_fixed, MVT::i8 } + { offset_scalable, MVT::nxv1i8 })

The struct also contains a getForFrameOffset() method that is specific to
AArch64 and decomposes the frame-offset to be used directly in instructions
that operate on the stack or index into the stack.

Note: This patch adds StackOffset as an AArch64-only concept, but we would
like to make this a generic concept/struct that is supported by all 
interfaces that take or return stack offsets (currently as 'int'). Since
that would be a bigger change that is currently pending on D32530 landing,
we thought it makes sense to first show/prove the concept in the AArch64
target before proposing to roll this out further.

Reviewers: thegameg, rovka, t.p.northover, efriedma, greened

Reviewed By: rovka, greened

Differential Revision: https://reviews.llvm.org/D61435

llvm-svn: 368024
This commit is contained in:
Sander de Smalen 2019-08-06 13:06:40 +00:00
parent 2fbf58c6e6
commit 612b038966
9 changed files with 261 additions and 75 deletions

View File

@ -659,11 +659,12 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
// instruction sequence.
int BaseOffset = -AFI->getTaggedBasePointerOffset();
unsigned FrameReg;
int FrameRegOffset = TFI->resolveFrameOffsetReference(
MF, BaseOffset, false /*isFixed*/, FrameReg, /*PreferFP=*/false,
StackOffset FrameRegOffset = TFI->resolveFrameOffsetReference(
MF, BaseOffset, false /*isFixed*/, FrameReg,
/*PreferFP=*/false,
/*ForSimm=*/true);
Register SrcReg = FrameReg;
if (FrameRegOffset != 0) {
if (FrameRegOffset) {
// Use output register as temporary.
SrcReg = MI.getOperand(0).getReg();
emitFrameOffset(MBB, &MI, MI.getDebugLoc(), SrcReg, FrameReg,

View File

@ -94,6 +94,7 @@
#include "AArch64InstrInfo.h"
#include "AArch64MachineFunctionInfo.h"
#include "AArch64RegisterInfo.h"
#include "AArch64StackOffset.h"
#include "AArch64Subtarget.h"
#include "AArch64TargetMachine.h"
#include "MCTargetDesc/AArch64AddressingModes.h"
@ -173,7 +174,7 @@ static unsigned estimateRSStackSizeLimit(MachineFunction &MF) {
if (!MO.isFI())
continue;
int Offset = 0;
StackOffset Offset;
if (isAArch64FrameOffsetLegal(MI, Offset, nullptr, nullptr, nullptr) ==
AArch64FrameOffsetCannotUpdate)
return 0;
@ -273,14 +274,15 @@ MachineBasicBlock::iterator AArch64FrameLowering::eliminateCallFramePseudoInstr(
// Most call frames will be allocated at the start of a function so
// this is OK, but it is a limitation that needs dealing with.
assert(Amount > -0xffffff && Amount < 0xffffff && "call frame too large");
emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, Amount, TII);
emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, {Amount, MVT::i8},
TII);
}
} else if (CalleePopAmount != 0) {
// If the calling convention demands that the callee pops arguments from the
// stack, we want to add it back if we have a reserved call frame.
assert(CalleePopAmount < 0xffffff && "call frame too large");
emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, -CalleePopAmount,
TII);
emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP,
{-(int64_t)CalleePopAmount, MVT::i8}, TII);
}
return MBB.erase(I);
}
@ -866,8 +868,9 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
AFI->setHasRedZone(true);
++NumRedZoneFunctions;
} else {
emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, -NumBytes, TII,
MachineInstr::FrameSetup, false, NeedsWinCFI, &HasWinCFI);
emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP,
{-NumBytes, MVT::i8}, TII, MachineInstr::FrameSetup,
false, NeedsWinCFI, &HasWinCFI);
if (!NeedsWinCFI) {
// Label used to tie together the PROLOG_LABEL and the MachineMoves.
MCSymbol *FrameLabel = MMI.getContext().createTempSymbol();
@ -901,8 +904,9 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
AFI->setLocalStackSize(NumBytes - PrologueSaveSize);
bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes);
if (CombineSPBump) {
emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, -NumBytes, TII,
MachineInstr::FrameSetup, false, NeedsWinCFI, &HasWinCFI);
emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP,
{-NumBytes, MVT::i8}, TII, MachineInstr::FrameSetup, false,
NeedsWinCFI, &HasWinCFI);
NumBytes = 0;
} else if (PrologueSaveSize != 0) {
MBBI = convertCalleeSaveRestoreToSPPrePostIncDec(
@ -958,8 +962,9 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
// mov fp,sp when FPOffset is zero.
// Note: All stores of callee-saved registers are marked as "FrameSetup".
// This code marks the instruction(s) that set the FP also.
emitFrameOffset(MBB, MBBI, DL, AArch64::FP, AArch64::SP, FPOffset, TII,
MachineInstr::FrameSetup, false, NeedsWinCFI, &HasWinCFI);
emitFrameOffset(MBB, MBBI, DL, AArch64::FP, AArch64::SP,
{FPOffset, MVT::i8}, TII, MachineInstr::FrameSetup, false,
NeedsWinCFI, &HasWinCFI);
}
if (windowsRequiresStackProbe(MF, NumBytes)) {
@ -1071,8 +1076,9 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
// FIXME: in the case of dynamic re-alignment, NumBytes doesn't have
// the correct value here, as NumBytes also includes padding bytes,
// which shouldn't be counted here.
emitFrameOffset(MBB, MBBI, DL, scratchSPReg, AArch64::SP, -NumBytes, TII,
MachineInstr::FrameSetup, false, NeedsWinCFI, &HasWinCFI);
emitFrameOffset(MBB, MBBI, DL, scratchSPReg, AArch64::SP,
{-NumBytes, MVT::i8}, TII, MachineInstr::FrameSetup,
false, NeedsWinCFI, &HasWinCFI);
if (NeedsRealignment) {
const unsigned Alignment = MFI.getMaxAlignment();
@ -1404,8 +1410,8 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
// If there is a single SP update, insert it before the ret and we're done.
if (CombineSPBump) {
emitFrameOffset(MBB, MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP,
NumBytes + AfterCSRPopSize, TII, MachineInstr::FrameDestroy,
false, NeedsWinCFI, &HasWinCFI);
{NumBytes + (int64_t)AfterCSRPopSize, MVT::i8}, TII,
MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI);
if (NeedsWinCFI && HasWinCFI)
BuildMI(MBB, MBB.getFirstTerminator(), DL,
TII->get(AArch64::SEH_EpilogEnd))
@ -1437,8 +1443,8 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
adaptForLdStOpt(MBB, MBB.getFirstTerminator(), LastPopI);
emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP,
StackRestoreBytes, TII, MachineInstr::FrameDestroy, false,
NeedsWinCFI, &HasWinCFI);
{StackRestoreBytes, MVT::i8}, TII,
MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI);
if (Done) {
if (NeedsWinCFI) {
HasWinCFI = true;
@ -1458,11 +1464,12 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
// be able to save any instructions.
if (!IsFunclet && (MFI.hasVarSizedObjects() || AFI->isStackRealigned()))
emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::FP,
-AFI->getCalleeSavedStackSize() + 16, TII,
MachineInstr::FrameDestroy, false, NeedsWinCFI);
{-(int64_t)AFI->getCalleeSavedStackSize() + 16, MVT::i8},
TII, MachineInstr::FrameDestroy, false, NeedsWinCFI);
else if (NumBytes)
emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, NumBytes, TII,
MachineInstr::FrameDestroy, false, NeedsWinCFI);
emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP,
{NumBytes, MVT::i8}, TII, MachineInstr::FrameDestroy, false,
NeedsWinCFI);
// This must be placed after the callee-save restore code because that code
// assumes the SP is at the same location as it was after the callee-save save
@ -1483,8 +1490,8 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
adaptForLdStOpt(MBB, FirstSPPopI, LastPopI);
emitFrameOffset(MBB, FirstSPPopI, DL, AArch64::SP, AArch64::SP,
AfterCSRPopSize, TII, MachineInstr::FrameDestroy, false,
NeedsWinCFI, &HasWinCFI);
{(int64_t)AfterCSRPopSize, MVT::i8}, TII,
MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI);
}
if (NeedsWinCFI && HasWinCFI)
BuildMI(MBB, MBB.getFirstTerminator(), DL, TII->get(AArch64::SEH_EpilogEnd))
@ -1501,10 +1508,11 @@ int AArch64FrameLowering::getFrameIndexReference(const MachineFunction &MF,
int FI,
unsigned &FrameReg) const {
return resolveFrameIndexReference(
MF, FI, FrameReg,
/*PreferFP=*/
MF.getFunction().hasFnAttribute(Attribute::SanitizeHWAddress),
/*ForSimm=*/false);
MF, FI, FrameReg,
/*PreferFP=*/
MF.getFunction().hasFnAttribute(Attribute::SanitizeHWAddress),
/*ForSimm=*/false)
.getBytes();
}
int AArch64FrameLowering::getNonLocalFrameIndexReference(
@ -1512,18 +1520,18 @@ int AArch64FrameLowering::getNonLocalFrameIndexReference(
return getSEHFrameIndexOffset(MF, FI);
}
static int getFPOffset(const MachineFunction &MF, int ObjectOffset) {
static StackOffset getFPOffset(const MachineFunction &MF, int ObjectOffset) {
const auto *AFI = MF.getInfo<AArch64FunctionInfo>();
const auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
bool IsWin64 =
Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv());
unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0;
return ObjectOffset + FixedObject + 16;
return {ObjectOffset + FixedObject + 16, MVT::i8};
}
static int getStackOffset(const MachineFunction &MF, int ObjectOffset) {
static StackOffset getStackOffset(const MachineFunction &MF, int ObjectOffset) {
const auto &MFI = MF.getFrameInfo();
return ObjectOffset + MFI.getStackSize();
return {ObjectOffset + (int)MFI.getStackSize(), MVT::i8};
}
int AArch64FrameLowering::getSEHFrameIndexOffset(const MachineFunction &MF,
@ -1532,14 +1540,13 @@ int AArch64FrameLowering::getSEHFrameIndexOffset(const MachineFunction &MF,
MF.getSubtarget().getRegisterInfo());
int ObjectOffset = MF.getFrameInfo().getObjectOffset(FI);
return RegInfo->getLocalAddressRegister(MF) == AArch64::FP
? getFPOffset(MF, ObjectOffset)
: getStackOffset(MF, ObjectOffset);
? getFPOffset(MF, ObjectOffset).getBytes()
: getStackOffset(MF, ObjectOffset).getBytes();
}
int AArch64FrameLowering::resolveFrameIndexReference(const MachineFunction &MF,
int FI, unsigned &FrameReg,
bool PreferFP,
bool ForSimm) const {
StackOffset AArch64FrameLowering::resolveFrameIndexReference(
const MachineFunction &MF, int FI, unsigned &FrameReg, bool PreferFP,
bool ForSimm) const {
const auto &MFI = MF.getFrameInfo();
int ObjectOffset = MFI.getObjectOffset(FI);
bool isFixed = MFI.isFixedObjectIndex(FI);
@ -1547,7 +1554,7 @@ int AArch64FrameLowering::resolveFrameIndexReference(const MachineFunction &MF,
PreferFP, ForSimm);
}
int AArch64FrameLowering::resolveFrameOffsetReference(
StackOffset AArch64FrameLowering::resolveFrameOffsetReference(
const MachineFunction &MF, int ObjectOffset, bool isFixed,
unsigned &FrameReg, bool PreferFP, bool ForSimm) const {
const auto &MFI = MF.getFrameInfo();
@ -1556,8 +1563,8 @@ int AArch64FrameLowering::resolveFrameOffsetReference(
const auto *AFI = MF.getInfo<AArch64FunctionInfo>();
const auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
int FPOffset = getFPOffset(MF, ObjectOffset);
int Offset = getStackOffset(MF, ObjectOffset);
int FPOffset = getFPOffset(MF, ObjectOffset).getBytes();
int Offset = getStackOffset(MF, ObjectOffset).getBytes();
bool isCSR =
!isFixed && ObjectOffset >= -((int)AFI->getCalleeSavedStackSize());
@ -1627,7 +1634,7 @@ int AArch64FrameLowering::resolveFrameOffsetReference(
if (UseFP) {
FrameReg = RegInfo->getFrameRegister(MF);
return FPOffset;
return StackOffset(FPOffset, MVT::i8);
}
// Use the base pointer if we have one.
@ -1644,7 +1651,7 @@ int AArch64FrameLowering::resolveFrameOffsetReference(
Offset -= AFI->getLocalStackSize();
}
return Offset;
return StackOffset(Offset, MVT::i8);
}
static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg) {

View File

@ -13,6 +13,7 @@
#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64FRAMELOWERING_H
#define LLVM_LIB_TARGET_AARCH64_AARCH64FRAMELOWERING_H
#include "AArch64StackOffset.h"
#include "llvm/CodeGen/TargetFrameLowering.h"
namespace llvm {
@ -39,12 +40,13 @@ public:
int getFrameIndexReference(const MachineFunction &MF, int FI,
unsigned &FrameReg) const override;
int resolveFrameIndexReference(const MachineFunction &MF, int FI,
unsigned &FrameReg, bool PreferFP,
bool ForSimm) const;
int resolveFrameOffsetReference(const MachineFunction &MF, int ObjectOffset,
bool isFixed, unsigned &FrameReg,
bool PreferFP, bool ForSimm) const;
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI,
unsigned &FrameReg, bool PreferFP,
bool ForSimm) const;
StackOffset resolveFrameOffsetReference(const MachineFunction &MF,
int ObjectOffset, bool isFixed,
unsigned &FrameReg, bool PreferFP,
bool ForSimm) const;
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,

View File

@ -2974,10 +2974,12 @@ void AArch64InstrInfo::loadRegFromStackSlot(
void llvm::emitFrameOffset(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
unsigned DestReg, unsigned SrcReg, int Offset,
const TargetInstrInfo *TII,
unsigned DestReg, unsigned SrcReg,
StackOffset SOffset, const TargetInstrInfo *TII,
MachineInstr::MIFlag Flag, bool SetNZCV,
bool NeedsWinCFI, bool *HasWinCFI) {
int64_t Offset;
SOffset.getForFrameOffset(Offset);
if (DestReg == SrcReg && Offset == 0)
return;
@ -3239,7 +3241,8 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
return nullptr;
}
int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI,
StackOffset &SOffset,
bool *OutUseUnscaledOp,
unsigned *OutUnscaledOp,
int *EmittableOffset) {
@ -3283,7 +3286,7 @@ int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
// Construct the complete offset.
const MachineOperand &ImmOpnd =
MI.getOperand(AArch64InstrInfo::getLoadStoreImmIdx(MI.getOpcode()));
Offset += ImmOpnd.getImm() * Scale;
int Offset = SOffset.getBytes() + ImmOpnd.getImm() * Scale;
// If the offset doesn't match the scale, we rewrite the instruction to
// use the unscaled instruction instead. Likewise, if we have a negative
@ -3315,23 +3318,24 @@ int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
if (OutUnscaledOp && UnscaledOp)
*OutUnscaledOp = *UnscaledOp;
SOffset = StackOffset(Offset, MVT::i8);
return AArch64FrameOffsetCanUpdate |
(Offset == 0 ? AArch64FrameOffsetIsLegal : 0);
}
bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
unsigned FrameReg, int &Offset,
unsigned FrameReg, StackOffset &Offset,
const AArch64InstrInfo *TII) {
unsigned Opcode = MI.getOpcode();
unsigned ImmIdx = FrameRegIdx + 1;
if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) {
Offset += MI.getOperand(ImmIdx).getImm();
Offset += StackOffset(MI.getOperand(ImmIdx).getImm(), MVT::i8);
emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
MI.getOperand(0).getReg(), FrameReg, Offset, TII,
MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri));
MI.eraseFromParent();
Offset = 0;
Offset = StackOffset();
return true;
}
@ -3348,7 +3352,7 @@ bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
MI.setDesc(TII->get(UnscaledOp));
MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
return Offset == 0;
return !Offset;
}
return false;

View File

@ -15,6 +15,7 @@
#include "AArch64.h"
#include "AArch64RegisterInfo.h"
#include "AArch64StackOffset.h"
#include "llvm/ADT/Optional.h"
#include "llvm/CodeGen/MachineCombinerPattern.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
@ -299,7 +300,7 @@ private:
/// if necessary, to be replaced by the scavenger at the end of PEI.
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
int Offset, const TargetInstrInfo *TII,
StackOffset Offset, const TargetInstrInfo *TII,
MachineInstr::MIFlag = MachineInstr::NoFlags,
bool SetNZCV = false, bool NeedsWinCFI = false,
bool *HasWinCFI = nullptr);
@ -308,7 +309,7 @@ void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
/// FP. Return false if the offset could not be handled directly in MI, and
/// return the left-over portion by reference.
bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
unsigned FrameReg, int &Offset,
unsigned FrameReg, StackOffset &Offset,
const AArch64InstrInfo *TII);
/// Use to report the frame offset status in isAArch64FrameOffsetLegal.
@ -332,7 +333,7 @@ enum AArch64FrameOffsetStatus {
/// If set, @p EmittableOffset contains the amount that can be set in @p MI
/// (possibly with @p OutUnscaledOp if OutUseUnscaledOp is true) and that
/// is a legal offset.
int isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset,
bool *OutUseUnscaledOp = nullptr,
unsigned *OutUnscaledOp = nullptr,
int *EmittableOffset = nullptr);

View File

@ -15,6 +15,7 @@
#include "AArch64FrameLowering.h"
#include "AArch64InstrInfo.h"
#include "AArch64MachineFunctionInfo.h"
#include "AArch64StackOffset.h"
#include "AArch64Subtarget.h"
#include "MCTargetDesc/AArch64AddressingModes.h"
#include "llvm/ADT/BitVector.h"
@ -23,10 +24,10 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/CodeGen/TargetFrameLowering.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetOptions.h"
using namespace llvm;
@ -390,7 +391,7 @@ bool AArch64RegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
int64_t Offset) const {
assert(Offset <= INT_MAX && "Offset too big to fit in int.");
assert(MI && "Unable to get the legal offset for nil instruction.");
int SaveOffset = Offset;
StackOffset SaveOffset(Offset, MVT::i8);
return isAArch64FrameOffsetLegal(*MI, SaveOffset) & AArch64FrameOffsetIsLegal;
}
@ -420,7 +421,9 @@ void AArch64RegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
void AArch64RegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
int64_t Offset) const {
int Off = Offset; // ARM doesn't need the general 64-bit offsets
// ARM doesn't need the general 64-bit offsets
StackOffset Off(Offset, MVT::i8);
unsigned i = 0;
while (!MI.getOperand(i).isFI()) {
@ -449,34 +452,36 @@ void AArch64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
unsigned FrameReg;
int Offset;
// Special handling of dbg_value, stackmap and patchpoint instructions.
if (MI.isDebugValue() || MI.getOpcode() == TargetOpcode::STACKMAP ||
MI.getOpcode() == TargetOpcode::PATCHPOINT) {
Offset = TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg,
/*PreferFP=*/true,
/*ForSimm=*/false);
Offset += MI.getOperand(FIOperandNum + 1).getImm();
StackOffset Offset =
TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg,
/*PreferFP=*/true,
/*ForSimm=*/false);
Offset += StackOffset(MI.getOperand(FIOperandNum + 1).getImm(), MVT::i8);
MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/);
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getBytes());
return;
}
if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) {
MachineOperand &FI = MI.getOperand(FIOperandNum);
Offset = TFI->getNonLocalFrameIndexReference(MF, FrameIndex);
int Offset = TFI->getNonLocalFrameIndexReference(MF, FrameIndex);
FI.ChangeToImmediate(Offset);
return;
}
StackOffset Offset;
if (MI.getOpcode() == AArch64::TAGPstack) {
// TAGPstack must use the virtual frame register in its 3rd operand.
const MachineFrameInfo &MFI = MF.getFrameInfo();
const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
FrameReg = MI.getOperand(3).getReg();
Offset =
MFI.getObjectOffset(FrameIndex) + AFI->getTaggedBasePointerOffset();
Offset = {MFI.getObjectOffset(FrameIndex) +
AFI->getTaggedBasePointerOffset(),
MVT::i8};
} else {
Offset = TFI->resolveFrameIndexReference(
MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);

View File

@ -0,0 +1,105 @@
//==--AArch64StackOffset.h ---------------------------------------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the StackOffset class, which is used to
// describe scalable and non-scalable offsets during frame lowering.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64STACKOFFSET_H
#define LLVM_LIB_TARGET_AARCH64_AARCH64STACKOFFSET_H
#include "llvm/Support/MachineValueType.h"
namespace llvm {
/// StackOffset is a wrapper around scalable and non-scalable offsets and is
/// used in several functions such as 'isAArch64FrameOffsetLegal' and
/// 'emitFrameOffset()'. StackOffsets are described by MVTs, e.g.
//
/// StackOffset(1, MVT::nxv16i8)
//
/// would describe an offset as being the size of a single SVE vector.
///
/// The class also implements simple arithmetic (addition/subtraction) on these
/// offsets, e.g.
//
/// StackOffset(1, MVT::nxv16i8) + StackOffset(1, MVT::i64)
//
/// describes an offset that spans the combined storage required for an SVE
/// vector and a 64bit GPR.
class StackOffset {
int64_t Bytes;
explicit operator int() const;
public:
using Part = std::pair<int64_t, MVT>;
StackOffset() : Bytes(0) {}
StackOffset(int64_t Offset, MVT::SimpleValueType T) : StackOffset() {
assert(!MVT(T).isScalableVector() && "Scalable types not supported");
*this += Part(Offset, T);
}
StackOffset(const StackOffset &Other) : Bytes(Other.Bytes) {}
StackOffset &operator=(const StackOffset &) = default;
StackOffset &operator+=(const StackOffset::Part &Other) {
assert(Other.second.getSizeInBits() % 8 == 0 &&
"Offset type is not a multiple of bytes");
Bytes += Other.first * (Other.second.getSizeInBits() / 8);
return *this;
}
StackOffset &operator+=(const StackOffset &Other) {
Bytes += Other.Bytes;
return *this;
}
StackOffset operator+(const StackOffset &Other) const {
StackOffset Res(*this);
Res += Other;
return Res;
}
StackOffset &operator-=(const StackOffset &Other) {
Bytes -= Other.Bytes;
return *this;
}
StackOffset operator-(const StackOffset &Other) const {
StackOffset Res(*this);
Res -= Other;
return Res;
}
StackOffset operator-() const {
StackOffset Res = {};
const StackOffset Other(*this);
Res -= Other;
return Res;
}
/// Returns the non-scalable part of the offset in bytes.
int64_t getBytes() const { return Bytes; }
/// Returns the offset in parts to which this frame offset can be
/// decomposed for the purpose of describing a frame offset.
/// For non-scalable offsets this is simply its byte size.
void getForFrameOffset(int64_t &ByteSized) const { ByteSized = Bytes; }
/// Returns whether the offset is known zero.
explicit operator bool() const { return Bytes; }
};
} // end namespace llvm
#endif

View File

@ -19,4 +19,5 @@ set(LLVM_LINK_COMPONENTS
add_llvm_unittest(AArch64Tests
InstSizes.cpp
TestStackOffset.cpp
)

View File

@ -0,0 +1,60 @@
//===- TestStackOffset.cpp - StackOffset unit tests------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "AArch64StackOffset.h"
#include "gtest/gtest.h"
using namespace llvm;
TEST(StackOffset, MixedSize) {
StackOffset A(1, MVT::i8);
EXPECT_EQ(1, A.getBytes());
StackOffset B(2, MVT::i32);
EXPECT_EQ(8, B.getBytes());
StackOffset C(2, MVT::v4i64);
EXPECT_EQ(64, C.getBytes());
}
TEST(StackOffset, Add) {
StackOffset A(1, MVT::i64);
StackOffset B(1, MVT::i32);
StackOffset C = A + B;
EXPECT_EQ(12, C.getBytes());
StackOffset D(1, MVT::i32);
D += A;
EXPECT_EQ(12, D.getBytes());
}
TEST(StackOffset, Sub) {
StackOffset A(1, MVT::i64);
StackOffset B(1, MVT::i32);
StackOffset C = A - B;
EXPECT_EQ(4, C.getBytes());
StackOffset D(1, MVT::i64);
D -= A;
EXPECT_EQ(0, D.getBytes());
}
TEST(StackOffset, isZero) {
StackOffset A(0, MVT::i64);
StackOffset B(0, MVT::i32);
EXPECT_TRUE(!A);
EXPECT_TRUE(!(A + B));
}
TEST(StackOffset, getForFrameOffset) {
StackOffset A(1, MVT::i64);
StackOffset B(1, MVT::i32);
int64_t ByteSized;
(A + B).getForFrameOffset(ByteSized);
EXPECT_EQ(12, ByteSized);
}