forked from OSchip/llvm-project
AArch64: Fix emergency spillslot being out of reach for large callframes
Re-commit of r322200: The testcase shouldn't hit machineverifiers anymore with r322917 in place. Large callframes (calls with several hundreds or thousands or parameters) could lead to situations in which the emergency spillslot is out of range to be addressed relative to the stack pointer. This commit forces the use of a frame pointer in the presence of large callframes. This commit does several things: - Compute max callframe size at the end of instruction selection. - Add mirFileLoaded target callback. Use it to compute the max callframe size after loading a .mir file when the size wasn't specified in the file. - Let TargetFrameLowering::hasFP() return true if there exists a callframe > 255 bytes. - Always place the emergency spillslot close to FP if we have a frame pointer. - Note that `useFPForScavengingIndex()` would previously return false when a base pointer was available leading to the emergency spillslot getting allocated late (that's the whole effect of this callback). Which made no sense to me so I took this case out: Even though the emergency spillslot is technically not referenced by FP in this case we still want it allocated early. Differential Revision: https://reviews.llvm.org/D40876 llvm-svn: 322919
This commit is contained in:
parent
702ffea169
commit
5c290dc206
|
@ -248,6 +248,9 @@ public:
|
|||
/// Returns string representation of scheduler comment
|
||||
std::string getSchedInfoStr(const MachineInstr &MI) const override;
|
||||
std::string getSchedInfoStr(MCInst const &MCI) const override;
|
||||
|
||||
/// This is called after a .mir file was loaded.
|
||||
virtual void mirFileLoaded(MachineFunction &MF) const;
|
||||
};
|
||||
|
||||
} // end namespace llvm
|
||||
|
|
|
@ -417,6 +417,8 @@ MIRParserImpl::initializeMachineFunction(const yaml::MachineFunction &YamlMF,
|
|||
|
||||
computeFunctionProperties(MF);
|
||||
|
||||
MF.getSubtarget().mirFileLoaded(MF);
|
||||
|
||||
MF.verify();
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -111,3 +111,6 @@ std::string TargetSubtargetInfo::getSchedInfoStr(MCInst const &MCI) const {
|
|||
TSchedModel.computeInstrRThroughput(MCI.getOpcode());
|
||||
return createSchedInfoStr(Latency, RThroughput);
|
||||
}
|
||||
|
||||
void TargetSubtargetInfo::mirFileLoaded(MachineFunction &MF) const {
|
||||
}
|
||||
|
|
|
@ -142,6 +142,12 @@ static cl::opt<bool> EnableRedZone("aarch64-redzone",
|
|||
|
||||
STATISTIC(NumRedZoneFunctions, "Number of functions using red zone");
|
||||
|
||||
/// This is the biggest offset to the stack pointer we can encode in aarch64
|
||||
/// instructions (without using a separate calculation and a temp register).
|
||||
/// Note that the exception here are vector stores/loads which cannot encode any
|
||||
/// displacements (see estimateRSStackSizeLimit(), isAArch64FrameOffsetLegal()).
|
||||
static const unsigned DefaultSafeSPDisplacement = 255;
|
||||
|
||||
/// Look at each instruction that references stack frames and return the stack
|
||||
/// size limit beyond which some of these instructions will require a scratch
|
||||
/// register during their expansion later.
|
||||
|
@ -167,7 +173,7 @@ static unsigned estimateRSStackSizeLimit(MachineFunction &MF) {
|
|||
}
|
||||
}
|
||||
}
|
||||
return 255;
|
||||
return DefaultSafeSPDisplacement;
|
||||
}
|
||||
|
||||
bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const {
|
||||
|
@ -191,11 +197,25 @@ bool AArch64FrameLowering::hasFP(const MachineFunction &MF) const {
|
|||
const MachineFrameInfo &MFI = MF.getFrameInfo();
|
||||
const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
|
||||
// Retain behavior of always omitting the FP for leaf functions when possible.
|
||||
return (MFI.hasCalls() &&
|
||||
MF.getTarget().Options.DisableFramePointerElim(MF)) ||
|
||||
MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() ||
|
||||
MFI.hasStackMap() || MFI.hasPatchPoint() ||
|
||||
RegInfo->needsStackRealignment(MF);
|
||||
if (MFI.hasCalls() && MF.getTarget().Options.DisableFramePointerElim(MF))
|
||||
return true;
|
||||
if (MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() ||
|
||||
MFI.hasStackMap() || MFI.hasPatchPoint() ||
|
||||
RegInfo->needsStackRealignment(MF))
|
||||
return true;
|
||||
// With large callframes around we may need to use FP to access the scavenging
|
||||
// emergency spillslot.
|
||||
//
|
||||
// Unfortunately some calls to hasFP() like machine verifier ->
|
||||
// getReservedReg() -> hasFP in the middle of global isel are too early
|
||||
// to know the max call frame size. Hopefully conservatively returning "true"
|
||||
// in those cases is fine.
|
||||
// DefaultSafeSPDisplacement is fine as we only emergency spill GP regs.
|
||||
if (!MFI.isMaxCallFrameSizeComputed() ||
|
||||
MFI.getMaxCallFrameSize() > DefaultSafeSPDisplacement)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
|
||||
|
|
|
@ -10982,3 +10982,8 @@ AArch64TargetLowering::getVaListSizeInBits(const DataLayout &DL) const {
|
|||
|
||||
return 3 * getPointerTy(DL).getSizeInBits() + 2 * 32;
|
||||
}
|
||||
|
||||
void AArch64TargetLowering::finalizeLowering(MachineFunction &MF) const {
|
||||
MF.getFrameInfo().computeMaxCallFrameSize(MF);
|
||||
TargetLoweringBase::finalizeLowering(MF);
|
||||
}
|
||||
|
|
|
@ -648,6 +648,8 @@ private:
|
|||
SelectionDAG &DAG) const override;
|
||||
|
||||
bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
|
||||
|
||||
void finalizeLowering(MachineFunction &MF) const override;
|
||||
};
|
||||
|
||||
namespace AArch64 {
|
||||
|
|
|
@ -225,11 +225,13 @@ bool AArch64RegisterInfo::requiresVirtualBaseRegisters(
|
|||
|
||||
bool
|
||||
AArch64RegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const {
|
||||
const MachineFrameInfo &MFI = MF.getFrameInfo();
|
||||
// AArch64FrameLowering::resolveFrameIndexReference() can always fall back
|
||||
// to the stack pointer, so only put the emergency spill slot next to the
|
||||
// FP when there's no better way to access it (SP or base pointer).
|
||||
return MFI.hasVarSizedObjects() && !hasBasePointer(MF);
|
||||
// This function indicates whether the emergency spillslot should be placed
|
||||
// close to the beginning of the stackframe (closer to FP) or the end
|
||||
// (closer to SP).
|
||||
//
|
||||
// The beginning works most reliably if we have a frame pointer.
|
||||
const AArch64FrameLowering &TFI = *getFrameLowering(MF);
|
||||
return TFI.hasFP(MF);
|
||||
}
|
||||
|
||||
bool AArch64RegisterInfo::requiresFrameIndexScavenging(
|
||||
|
|
|
@ -250,3 +250,13 @@ std::unique_ptr<PBQPRAConstraint>
|
|||
AArch64Subtarget::getCustomPBQPConstraints() const {
|
||||
return balanceFPOps() ? llvm::make_unique<A57ChainingConstraint>() : nullptr;
|
||||
}
|
||||
|
||||
void AArch64Subtarget::mirFileLoaded(MachineFunction &MF) const {
|
||||
// We usually compute max call frame size after ISel. Do the computation now
|
||||
// if the .mir file didn't specify it. Note that this will probably give you
|
||||
// bogus values after PEI has eliminated the callframe setup/destroy pseudo
|
||||
// instructions, specify explicitely if you need it to be correct.
|
||||
MachineFrameInfo &MFI = MF.getFrameInfo();
|
||||
if (!MFI.isMaxCallFrameSizeComputed())
|
||||
MFI.computeMaxCallFrameSize(MF);
|
||||
}
|
||||
|
|
|
@ -326,6 +326,8 @@ public:
|
|||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void mirFileLoaded(MachineFunction &MF) const override;
|
||||
};
|
||||
} // End llvm namespace
|
||||
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
; RUN: llc -o - %s -verify-machineinstrs | FileCheck %s
|
||||
; Make sure we use a frame pointer and fp relative addressing for the emergency
|
||||
; spillslot when we have gigantic callframes.
|
||||
; CHECK-LABEL: func:
|
||||
; CHECK: stur {{.*}}, [x29, #{{.*}}] // 8-byte Folded Spill
|
||||
; CHECK: ldur {{.*}}, [x29, #{{.*}}] // 8-byte Folded Reload
|
||||
target triple = "aarch64--"
|
||||
declare void @extfunc([4096 x i64]* byval %p)
|
||||
define void @func([4096 x i64]* %z) {
|
||||
%lvar = alloca [31 x i8]
|
||||
%v = load volatile [31 x i8], [31 x i8]* %lvar
|
||||
store volatile [31 x i8] %v, [31 x i8]* %lvar
|
||||
call void @extfunc([4096 x i64]* byval %z)
|
||||
ret void
|
||||
}
|
Loading…
Reference in New Issue