hasFP() is now a virtual method of MRegisterInfo.

llvm-svn: 33455
This commit is contained in:
Evan Cheng 2007-01-23 00:57:47 +00:00
parent d39e38848b
commit 16e58be1bc
15 changed files with 47 additions and 18 deletions

View File

@ -367,6 +367,11 @@ public:
return 0;
}
/// hasFP - Return true if the specified function should have a dedicated frame
/// pointer register. For most targets this is true only if the function has
/// variable sized allocas or if frame pointer elimination is disabled.
virtual bool hasFP(const MachineFunction &MF) const = 0;
/// getCallFrameSetup/DestroyOpcode - These methods return the opcode of the
/// frame setup/destroy instructions if they exist (-1 otherwise). Some
/// targets use pseudo instructions in order to abstract away the difference

View File

@ -277,7 +277,7 @@ ARMRegisterInfo::getCalleeSavedRegClasses() const {
/// pointer register. This is true if the function has variable sized allocas
/// or if frame pointer elimination is disabled.
///
static bool hasFP(const MachineFunction &MF) {
bool ARMRegisterInfo::hasFP(const MachineFunction &MF) const {
return NoFramePointerElim || MF.getFrameInfo()->hasVarSizedObjects();
}

View File

@ -68,6 +68,8 @@ public:
const TargetRegisterClass* const* getCalleeSavedRegClasses() const;
bool hasFP(const MachineFunction &MF) const;
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const;

View File

@ -150,6 +150,7 @@ def GPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R4, R5, R6,
GPRClass::iterator
GPRClass::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
const MRegisterInfo *RI = TM.getRegisterInfo();
const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
GPRClass::iterator I;
if (Subtarget.isThumb())
@ -167,7 +168,7 @@ def GPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R4, R5, R6,
}
// Mac OS X requires FP not to be clobbered for backtracing purpose.
return (Subtarget.isTargetDarwin() || hasFP(MF)) ? I-1 : I;
return (Subtarget.isTargetDarwin() || RI->hasFP(MF)) ? I-1 : I;
}
}];
}

View File

@ -186,7 +186,7 @@ AlphaRegisterInfo::getCalleeSavedRegClasses() const {
// pointer register. This is true if the function has variable sized allocas or
// if frame pointer elimination is disabled.
//
static bool hasFP(const MachineFunction &MF) {
bool AlphaRegisterInfo::hasFP(const MachineFunction &MF) const {
MachineFrameInfo *MFI = MF.getFrameInfo();
return MFI->hasVarSizedObjects();
}

View File

@ -49,6 +49,8 @@ struct AlphaRegisterInfo : public AlphaGenRegisterInfo {
const TargetRegisterClass* const* getCalleeSavedRegClasses() const;
bool hasFP(const MachineFunction &MF) const;
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const;

View File

@ -114,7 +114,7 @@ IA64RegisterInfo::getCalleeSavedRegClasses() const {
// pointer register. This is true if the function has variable sized allocas or
// if frame pointer elimination is disabled.
//
static bool hasFP(const MachineFunction &MF) {
bool IA64RegisterInfo::hasFP(const MachineFunction &MF) const {
return NoFramePointerElim || MF.getFrameInfo()->hasVarSizedObjects();
}

View File

@ -48,6 +48,8 @@ struct IA64RegisterInfo : public IA64GenRegisterInfo {
const TargetRegisterClass* const* getCalleeSavedRegClasses() const;
bool hasFP(const MachineFunction &MF) const;
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const;

View File

@ -410,7 +410,7 @@ static bool needsFP(const MachineFunction &MF) {
// hasFP - Return true if the specified function actually has a dedicated frame
// pointer register. This is true if the function needs a frame pointer and has
// a non-zero stack size.
static bool hasFP(const MachineFunction &MF) {
bool PPCRegisterInfo::hasFP(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
return MFI->getStackSize() && needsFP(MF);
}

View File

@ -58,6 +58,8 @@ public:
const TargetRegisterClass* const* getCalleeSavedRegClasses() const;
bool hasFP(const MachineFunction &MF) const;
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const;

View File

@ -122,6 +122,9 @@ SparcRegisterInfo::getCalleeSavedRegClasses() const {
return CalleeSavedRegClasses;
}
bool SparcRegisterInfo::hasFP(const MachineFunction &MF) const {
return false;
}
void SparcRegisterInfo::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,

View File

@ -52,6 +52,8 @@ struct SparcRegisterInfo : public SparcGenRegisterInfo {
const TargetRegisterClass* const* getCalleeSavedRegClasses() const;
bool hasFP(const MachineFunction &MF) const;
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const;

View File

@ -891,7 +891,7 @@ X86RegisterInfo::getCalleeSavedRegClasses() const {
// pointer register. This is true if the function has variable sized allocas or
// if frame pointer elimination is disabled.
//
static bool hasFP(const MachineFunction &MF) {
bool X86RegisterInfo::hasFP(const MachineFunction &MF) const {
return (NoFramePointerElim ||
MF.getFrameInfo()->hasVarSizedObjects() ||
MF.getInfo<X86FunctionInfo>()->getForceFramePointer());
@ -998,7 +998,7 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
// Get the number of bytes to allocate from the FrameInfo
unsigned NumBytes = MFI->getStackSize();
if (MFI->hasCalls() || MF.getFrameInfo()->hasVarSizedObjects()) {
if (MFI->hasCalls() || MFI->hasVarSizedObjects()) {
// When we have no frame pointer, we reserve argument space for call sites
// in the function immediately on entry to the current function. This
// eliminates the need for add/sub ESP brackets around call sites.

View File

@ -78,6 +78,8 @@ public:
/// length of this list match the getCalleeSavedRegs() list.
const TargetRegisterClass* const* getCalleeSavedRegClasses() const;
bool hasFP(const MachineFunction &MF) const;
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const;

View File

@ -197,10 +197,11 @@ def GR8 : RegisterClass<"X86", [i8], 8,
GR8Class::iterator
GR8Class::allocation_order_begin(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
const MRegisterInfo *RI = TM.getRegisterInfo();
const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
if (!Subtarget.is64Bit())
return X86_GR8_AO_32;
else if (hasFP(MF))
else if (RI->hasFP(MF))
return X86_GR8_AO_64_fp;
else
return X86_GR8_AO_64;
@ -209,10 +210,11 @@ def GR8 : RegisterClass<"X86", [i8], 8,
GR8Class::iterator
GR8Class::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
const MRegisterInfo *RI = TM.getRegisterInfo();
const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
if (!Subtarget.is64Bit())
return X86_GR8_AO_32 + (sizeof(X86_GR8_AO_32) / sizeof(unsigned));
else if (hasFP(MF))
else if (RI->hasFP(MF))
return X86_GR8_AO_64_fp + (sizeof(X86_GR8_AO_64_fp) / sizeof(unsigned));
else
return X86_GR8_AO_64 + (sizeof(X86_GR8_AO_64) / sizeof(unsigned));
@ -248,14 +250,15 @@ def GR16 : RegisterClass<"X86", [i16], 16,
GR16Class::iterator
GR16Class::allocation_order_begin(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
const MRegisterInfo *RI = TM.getRegisterInfo();
const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
if (Subtarget.is64Bit()) {
if (hasFP(MF))
if (RI->hasFP(MF))
return X86_GR16_AO_64_fp;
else
return X86_GR16_AO_64;
} else {
if (hasFP(MF))
if (RI->hasFP(MF))
return X86_GR16_AO_32_fp;
else
return X86_GR16_AO_32;
@ -265,14 +268,15 @@ def GR16 : RegisterClass<"X86", [i16], 16,
GR16Class::iterator
GR16Class::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
const MRegisterInfo *RI = TM.getRegisterInfo();
const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
if (Subtarget.is64Bit()) {
if (hasFP(MF))
if (RI->hasFP(MF))
return X86_GR16_AO_64_fp+(sizeof(X86_GR16_AO_64_fp)/sizeof(unsigned));
else
return X86_GR16_AO_64 + (sizeof(X86_GR16_AO_64) / sizeof(unsigned));
} else {
if (hasFP(MF))
if (RI->hasFP(MF))
return X86_GR16_AO_32_fp+(sizeof(X86_GR16_AO_32_fp)/sizeof(unsigned));
else
return X86_GR16_AO_32 + (sizeof(X86_GR16_AO_32) / sizeof(unsigned));
@ -309,14 +313,15 @@ def GR32 : RegisterClass<"X86", [i32], 32,
GR32Class::iterator
GR32Class::allocation_order_begin(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
const MRegisterInfo *RI = TM.getRegisterInfo();
const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
if (Subtarget.is64Bit()) {
if (hasFP(MF))
if (RI->hasFP(MF))
return X86_GR32_AO_64_fp;
else
return X86_GR32_AO_64;
} else {
if (hasFP(MF))
if (RI->hasFP(MF))
return X86_GR32_AO_32_fp;
else
return X86_GR32_AO_32;
@ -326,14 +331,15 @@ def GR32 : RegisterClass<"X86", [i32], 32,
GR32Class::iterator
GR32Class::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
const MRegisterInfo *RI = TM.getRegisterInfo();
const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
if (Subtarget.is64Bit()) {
if (hasFP(MF))
if (RI->hasFP(MF))
return X86_GR32_AO_64_fp+(sizeof(X86_GR32_AO_64_fp)/sizeof(unsigned));
else
return X86_GR32_AO_64 + (sizeof(X86_GR32_AO_64) / sizeof(unsigned));
} else {
if (hasFP(MF))
if (RI->hasFP(MF))
return X86_GR32_AO_32_fp+(sizeof(X86_GR32_AO_32_fp)/sizeof(unsigned));
else
return X86_GR32_AO_32 + (sizeof(X86_GR32_AO_32) / sizeof(unsigned));
@ -352,7 +358,9 @@ def GR64 : RegisterClass<"X86", [i64], 64,
let MethodBodies = [{
GR64Class::iterator
GR64Class::allocation_order_end(const MachineFunction &MF) const {
if (hasFP(MF)) // Does the function dedicate RBP to being a frame ptr?
const TargetMachine &TM = MF.getTarget();
const MRegisterInfo *RI = TM.getRegisterInfo();
if (RI->hasFP(MF)) // Does the function dedicate RBP to being a frame ptr?
return end()-2; // If so, don't allocate RSP or RBP
else
return end()-1; // If not, just don't allocate RSP