Code refactoring, no functionality change.

llvm-svn: 94570
This commit is contained in:
Evan Cheng 2010-01-26 19:04:47 +00:00
parent 9c71bb03f3
commit 8703c412f4
3 changed files with 27 additions and 28 deletions

View File

@ -31,7 +31,6 @@
#include "llvm/Support/CallSite.h" #include "llvm/Support/CallSite.h"
#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/GetElementPtrTypeIterator.h" #include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Target/TargetOptions.h"
using namespace llvm; using namespace llvm;
namespace { namespace {
@ -1246,7 +1245,7 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
// fastcc with -tailcallopt is intended to provide a guaranteed // fastcc with -tailcallopt is intended to provide a guaranteed
// tail call optimization. Fastisel doesn't know how to do that. // tail call optimization. Fastisel doesn't know how to do that.
if (CC == CallingConv::Fast && PerformTailCallOpt) if (X86::IsEligibleForTailCallOpt(CC))
return false; return false;
// Let SDISel handle vararg functions. // Let SDISel handle vararg functions.

View File

@ -37,7 +37,6 @@
#include "llvm/MC/MCContext.h" #include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h" #include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCSymbol.h" #include "llvm/MC/MCSymbol.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/ADT/BitVector.h" #include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringExtras.h"
@ -1448,7 +1447,7 @@ X86TargetLowering::LowerMemArgument(SDValue Chain,
// Create the nodes corresponding to a load from this parameter slot. // Create the nodes corresponding to a load from this parameter slot.
ISD::ArgFlagsTy Flags = Ins[i].Flags; ISD::ArgFlagsTy Flags = Ins[i].Flags;
bool AlwaysUseMutable = (CallConv==CallingConv::Fast) && PerformTailCallOpt; bool AlwaysUseMutable = X86::IsEligibleForTailCallOpt(CallConv);
bool isImmutable = !AlwaysUseMutable && !Flags.isByVal(); bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
EVT ValVT; EVT ValVT;
@ -1586,7 +1585,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
unsigned StackSize = CCInfo.getNextStackOffset(); unsigned StackSize = CCInfo.getNextStackOffset();
// align stack specially for tail calls // align stack specially for tail calls
if (PerformTailCallOpt && CallConv == CallingConv::Fast) if (X86::IsEligibleForTailCallOpt(CallConv))
StackSize = GetAlignedArgumentStackSize(StackSize, DAG); StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
// If the function takes variable number of arguments, make a frame index for // If the function takes variable number of arguments, make a frame index for
@ -1738,12 +1737,9 @@ X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
/// optimization is performed and it is required. /// optimization is performed and it is required.
SDValue SDValue
X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
SDValue &OutRetAddr, SDValue &OutRetAddr, SDValue Chain,
SDValue Chain, bool IsTailCall, bool Is64Bit,
bool IsTailCall, int FPDiff, DebugLoc dl) {
bool Is64Bit,
int FPDiff,
DebugLoc dl) {
if (!IsTailCall || FPDiff==0) return Chain; if (!IsTailCall || FPDiff==0) return Chain;
// Adjust the Return address stack slot. // Adjust the Return address stack slot.
@ -1766,8 +1762,7 @@ EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF,
// Calculate the new stack slot for the return address. // Calculate the new stack slot for the return address.
int SlotSize = Is64Bit ? 8 : 4; int SlotSize = Is64Bit ? 8 : 4;
int NewReturnAddrFI = int NewReturnAddrFI =
MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize, MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize, true,false);
true, false);
EVT VT = Is64Bit ? MVT::i64 : MVT::i32; EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT);
Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx, Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
@ -1788,9 +1783,8 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
bool Is64Bit = Subtarget->is64Bit(); bool Is64Bit = Subtarget->is64Bit();
bool IsStructRet = CallIsStructReturn(Outs); bool IsStructRet = CallIsStructReturn(Outs);
assert((!isTailCall || assert((!isTailCall || X86::IsEligibleForTailCallOpt(CallConv)) &&
(CallConv == CallingConv::Fast && PerformTailCallOpt)) && "Call is not eligible for tail call optimization!");
"IsEligibleForTailCallOptimization missed a case!");
assert(!(isVarArg && CallConv == CallingConv::Fast) && assert(!(isVarArg && CallConv == CallingConv::Fast) &&
"Var args not supported with calling convention fastcc"); "Var args not supported with calling convention fastcc");
@ -1802,7 +1796,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Get a count of how many bytes are to be pushed on the stack. // Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset(); unsigned NumBytes = CCInfo.getNextStackOffset();
if (PerformTailCallOpt && CallConv == CallingConv::Fast) if (X86::IsEligibleForTailCallOpt(CallConv))
NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
int FPDiff = 0; int FPDiff = 0;
@ -2240,21 +2234,18 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
bool isVarArg, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const { SelectionDAG& DAG) const {
MachineFunction &MF = DAG.getMachineFunction(); return X86::IsEligibleForTailCallOpt(CalleeCC) &&
CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); DAG.getMachineFunction().getFunction()->getCallingConv() == CalleeCC;
return CalleeCC == CallingConv::Fast && CallerCC == CalleeCC;
} }
FastISel * FastISel *
X86TargetLowering::createFastISel(MachineFunction &mf, X86TargetLowering::createFastISel(MachineFunction &mf, MachineModuleInfo *mmo,
MachineModuleInfo *mmo, DwarfWriter *dw,
DwarfWriter *dw, DenseMap<const Value *, unsigned> &vm,
DenseMap<const Value *, unsigned> &vm, DenseMap<const BasicBlock*, MachineBasicBlock*> &bm,
DenseMap<const BasicBlock *, DenseMap<const AllocaInst *, int> &am
MachineBasicBlock *> &bm,
DenseMap<const AllocaInst *, int> &am
#ifndef NDEBUG #ifndef NDEBUG
, SmallSet<Instruction*, 8> &cil , SmallSet<Instruction*, 8> &cil
#endif #endif
) { ) {
return X86::createFastISel(mf, mmo, dw, vm, bm, am return X86::createFastISel(mf, mmo, dw, vm, bm, am
@ -2317,6 +2308,10 @@ bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
return false; return false;
} }
bool X86::IsEligibleForTailCallOpt(CallingConv::ID CC) {
return PerformTailCallOpt && CC == CallingConv::Fast;
}
/// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86 /// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
/// specific condition code, returning the condition code and the LHS/RHS of the /// specific condition code, returning the condition code and the LHS/RHS of the
/// comparison to make. /// comparison to make.

View File

@ -19,6 +19,7 @@
#include "X86RegisterInfo.h" #include "X86RegisterInfo.h"
#include "X86MachineFunctionInfo.h" #include "X86MachineFunctionInfo.h"
#include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/CodeGen/FastISel.h" #include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/SelectionDAG.h" #include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/CallingConvLower.h" #include "llvm/CodeGen/CallingConvLower.h"
@ -361,6 +362,10 @@ namespace llvm {
/// fit into displacement field of the instruction. /// fit into displacement field of the instruction.
bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
bool hasSymbolicDisplacement = true); bool hasSymbolicDisplacement = true);
/// IsEligibleForTailCallOpt - Return true if it's legal to perform tail call
/// optimization for the given calling convention.
bool IsEligibleForTailCallOpt(CallingConv::ID CC);
} }
//===--------------------------------------------------------------------===// //===--------------------------------------------------------------------===//