Major calling convention code refactoring.

Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.

This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.

This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.

llvm-svn: 78142
This commit is contained in:
Dan Gohman 2009-08-05 01:29:28 +00:00
parent cdb125ce66
commit f9bbcd1afd
47 changed files with 2080 additions and 2197 deletions

View File

@ -172,17 +172,20 @@ public:
return UsedRegs[Reg/32] & (1 << (Reg&31)); return UsedRegs[Reg/32] & (1 << (Reg&31));
} }
/// AnalyzeFormalArguments - Analyze an ISD::FORMAL_ARGUMENTS node, /// AnalyzeFormalArguments - Analyze an array of argument values,
/// incorporating info about the formals into this state. /// incorporating info about the formals into this state.
void AnalyzeFormalArguments(SDNode *TheArgs, CCAssignFn Fn); void AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
CCAssignFn Fn);
/// AnalyzeReturn - Analyze the returned values of an ISD::RET node, /// AnalyzeReturn - Analyze the returned values of a return,
/// incorporating info about the result values into this state. /// incorporating info about the result values into this state.
void AnalyzeReturn(SDNode *TheRet, CCAssignFn Fn); void AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
CCAssignFn Fn);
/// AnalyzeCallOperands - Analyze an ISD::CALL node, incorporating info /// AnalyzeCallOperands - Analyze the outgoing arguments to a call,
/// about the passed values into this state. /// incorporating info about the passed values into this state.
void AnalyzeCallOperands(CallSDNode *TheCall, CCAssignFn Fn); void AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
CCAssignFn Fn);
/// AnalyzeCallOperands - Same as above except it takes vectors of types /// AnalyzeCallOperands - Same as above except it takes vectors of types
/// and argument flags. /// and argument flags.
@ -190,9 +193,10 @@ public:
SmallVectorImpl<ISD::ArgFlagsTy> &Flags, SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
CCAssignFn Fn); CCAssignFn Fn);
/// AnalyzeCallResult - Analyze the return values of an ISD::CALL node, /// AnalyzeCallResult - Analyze the return values of a call,
/// incorporating info about the passed values into this state. /// incorporating info about the passed values into this state.
void AnalyzeCallResult(CallSDNode *TheCall, CCAssignFn Fn); void AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
CCAssignFn Fn);
/// AnalyzeCallResult - Same as above except it's specialized for calls which /// AnalyzeCallResult - Same as above except it's specialized for calls which
/// produce a single value. /// produce a single value.

View File

@ -319,7 +319,6 @@ public:
SDValue getExternalSymbol(const char *Sym, DebugLoc dl, MVT VT); SDValue getExternalSymbol(const char *Sym, DebugLoc dl, MVT VT);
SDValue getTargetExternalSymbol(const char *Sym, MVT VT, SDValue getTargetExternalSymbol(const char *Sym, MVT VT,
unsigned char TargetFlags = 0); unsigned char TargetFlags = 0);
SDValue getArgFlags(ISD::ArgFlagsTy Flags);
SDValue getValueType(MVT); SDValue getValueType(MVT);
SDValue getRegister(unsigned Reg, MVT VT); SDValue getRegister(unsigned Reg, MVT VT);
SDValue getDbgStopPoint(DebugLoc DL, SDValue Root, SDValue getDbgStopPoint(DebugLoc DL, SDValue Root,
@ -460,6 +459,12 @@ public:
SDValue N1, SDValue N2, SDValue N3, SDValue N4, SDValue N1, SDValue N2, SDValue N3, SDValue N4,
SDValue N5); SDValue N5);
/// getStackArgumentTokenFactor - Compute a TokenFactor to force all
/// the incoming stack arguments to be loaded from the stack. This is
/// used in tail call lowering to protect stack arguments from being
/// clobbered.
SDValue getStackArgumentTokenFactor(SDValue Chain);
SDValue getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst, SDValue Src, SDValue getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool AlwaysInline, SDValue Size, unsigned Align, bool AlwaysInline,
const Value *DstSV, uint64_t DstSVOff, const Value *DstSV, uint64_t DstSVOff,
@ -534,13 +539,6 @@ public:
/// getMergeValues - Create a MERGE_VALUES node from the given operands. /// getMergeValues - Create a MERGE_VALUES node from the given operands.
SDValue getMergeValues(const SDValue *Ops, unsigned NumOps, DebugLoc dl); SDValue getMergeValues(const SDValue *Ops, unsigned NumOps, DebugLoc dl);
/// getCall - Create a CALL node from the given information.
///
SDValue getCall(unsigned CallingConv, DebugLoc dl, bool IsVarArgs,
bool IsTailCall, bool isInreg, SDVTList VTs,
const SDValue *Operands, unsigned NumOperands,
unsigned NumFixedArgs);
/// getLoad - Loads are not normal binary operators: their result type is not /// getLoad - Loads are not normal binary operators: their result type is not
/// determined by their operands, and they produce a value AND a token chain. /// determined by their operands, and they produce a value AND a token chain.
/// ///

View File

@ -97,7 +97,7 @@ namespace ISD {
AssertSext, AssertZext, AssertSext, AssertZext,
// Various leaf nodes. // Various leaf nodes.
BasicBlock, VALUETYPE, ARG_FLAGS, CONDCODE, Register, BasicBlock, VALUETYPE, CONDCODE, Register,
Constant, ConstantFP, Constant, ConstantFP,
GlobalAddress, GlobalTLSAddress, FrameIndex, GlobalAddress, GlobalTLSAddress, FrameIndex,
JumpTable, ConstantPool, ExternalSymbol, JumpTable, ConstantPool, ExternalSymbol,
@ -180,38 +180,6 @@ namespace ISD {
// UNDEF - An undefined node // UNDEF - An undefined node
UNDEF, UNDEF,
/// FORMAL_ARGUMENTS(CHAIN, CC#, ISVARARG, FLAG0, ..., FLAGn) - This node
/// represents the formal arguments for a function. CC# is a Constant value
/// indicating the calling convention of the function, and ISVARARG is a
/// flag that indicates whether the function is varargs or not. This node
/// has one result value for each incoming argument, plus one for the output
/// chain. It must be custom legalized. See description of CALL node for
/// FLAG argument contents explanation.
///
FORMAL_ARGUMENTS,
/// RV1, RV2...RVn, CHAIN = CALL(CHAIN, CALLEE,
/// ARG0, FLAG0, ARG1, FLAG1, ... ARGn, FLAGn)
/// This node represents a fully general function call, before the legalizer
/// runs. This has one result value for each argument / flag pair, plus
/// a chain result. It must be custom legalized. Flag argument indicates
/// misc. argument attributes. Currently:
/// Bit 0 - signness
/// Bit 1 - 'inreg' attribute
/// Bit 2 - 'sret' attribute
/// Bit 4 - 'byval' attribute
/// Bit 5 - 'nest' attribute
/// Bit 6-9 - alignment of byval structures
/// Bit 10-26 - size of byval structures
/// Bits 31:27 - argument ABI alignment in the first argument piece and
/// alignment '1' in other argument pieces.
///
/// CALL nodes use the CallSDNode subclass of SDNode, which
/// additionally carries information about the calling convention,
/// whether the call is varargs, and if it's marked as a tail call.
///
CALL,
// EXTRACT_ELEMENT - This is used to get the lower or upper (determined by // EXTRACT_ELEMENT - This is used to get the lower or upper (determined by
// a Constant, which is required to be operand #1) half of the integer or // a Constant, which is required to be operand #1) half of the integer or
// float value specified as operand #0. This is only for use before // float value specified as operand #0. This is only for use before
@ -515,12 +483,6 @@ namespace ISD {
// chain, cc, lhs, rhs, block to branch to if condition is true. // chain, cc, lhs, rhs, block to branch to if condition is true.
BR_CC, BR_CC,
// RET - Return from function. The first operand is the chain,
// and any subsequent operands are pairs of return value and return value
// attributes (see CALL for description of attributes) for the function.
// This operation can have variable number of operands.
RET,
// INLINEASM - Represents an inline asm block. This node always has two // INLINEASM - Represents an inline asm block. This node always has two
// return values: a chain and a flag result. The inputs are as follows: // return values: a chain and a flag result. The inputs are as follows:
// Operand #0 : Input chain. // Operand #0 : Input chain.
@ -2234,81 +2196,42 @@ namespace ISD {
/// getRawBits - Represent the flags as a bunch of bits. /// getRawBits - Represent the flags as a bunch of bits.
uint64_t getRawBits() const { return Flags; } uint64_t getRawBits() const { return Flags; }
}; };
/// InputArg - This struct carries flags and type information about a
/// single incoming (formal) argument or incoming (from the perspective
/// of the caller) return value virtual register.
///
struct InputArg {
ArgFlagsTy Flags;
MVT VT;
bool Used;
InputArg() : VT(MVT::Other), Used(false) {}
InputArg(ISD::ArgFlagsTy flags, MVT vt, bool used)
: Flags(flags), VT(vt), Used(used) {
assert(VT.isSimple() &&
"InputArg value type must be Simple!");
}
};
/// OutputArg - This struct carries flags and a value for a
/// single outgoing (actual) argument or outgoing (from the perspective
/// of the caller) return value virtual register.
///
struct OutputArg {
ArgFlagsTy Flags;
SDValue Val;
bool IsFixed;
OutputArg() : IsFixed(false) {}
OutputArg(ISD::ArgFlagsTy flags, SDValue val, bool isfixed)
: Flags(flags), Val(val), IsFixed(isfixed) {
assert(Val.getValueType().isSimple() &&
"OutputArg value type must be Simple!");
}
};
} }
/// ARG_FLAGSSDNode - Leaf node holding parameter flags.
class ARG_FLAGSSDNode : public SDNode {
ISD::ArgFlagsTy TheFlags;
friend class SelectionDAG;
explicit ARG_FLAGSSDNode(ISD::ArgFlagsTy Flags)
: SDNode(ISD::ARG_FLAGS, DebugLoc::getUnknownLoc(),
getSDVTList(MVT::Other)), TheFlags(Flags) {
}
public:
ISD::ArgFlagsTy getArgFlags() const { return TheFlags; }
static bool classof(const ARG_FLAGSSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::ARG_FLAGS;
}
};
/// CallSDNode - Node for calls -- ISD::CALL.
class CallSDNode : public SDNode {
unsigned CallingConv;
bool IsVarArg;
bool IsTailCall;
unsigned NumFixedArgs;
// We might eventually want a full-blown Attributes for the result; that
// will expand the size of the representation. At the moment we only
// need Inreg.
bool Inreg;
friend class SelectionDAG;
CallSDNode(unsigned cc, DebugLoc dl, bool isvararg, bool istailcall,
bool isinreg, SDVTList VTs, const SDValue *Operands,
unsigned numOperands, unsigned numFixedArgs)
: SDNode(ISD::CALL, dl, VTs, Operands, numOperands),
CallingConv(cc), IsVarArg(isvararg), IsTailCall(istailcall),
NumFixedArgs(numFixedArgs), Inreg(isinreg) {}
public:
unsigned getCallingConv() const { return CallingConv; }
unsigned isVarArg() const { return IsVarArg; }
unsigned isTailCall() const { return IsTailCall; }
unsigned isInreg() const { return Inreg; }
/// Set this call to not be marked as a tail call. Normally setter
/// methods in SDNodes are unsafe because it breaks the CSE map,
/// but we don't include the tail call flag for calls so it's ok
/// in this case.
void setNotTailCall() { IsTailCall = false; }
SDValue getChain() const { return getOperand(0); }
SDValue getCallee() const { return getOperand(1); }
unsigned getNumArgs() const { return (getNumOperands() - 2) / 2; }
unsigned getNumFixedArgs() const {
if (isVarArg())
return NumFixedArgs;
else
return getNumArgs();
}
SDValue getArg(unsigned i) const { return getOperand(2+2*i); }
SDValue getArgFlagsVal(unsigned i) const {
return getOperand(3+2*i);
}
ISD::ArgFlagsTy getArgFlags(unsigned i) const {
return cast<ARG_FLAGSSDNode>(getArgFlagsVal(i).getNode())->getArgFlags();
}
unsigned getNumRetVals() const { return getNumValues() - 1; }
MVT getRetValType(unsigned i) const { return getValueType(i); }
static bool classof(const CallSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::CALL;
}
};
/// VTSDNode - This class is used to represent MVT's, which are used /// VTSDNode - This class is used to represent MVT's, which are used
/// to parameterize some operations. /// to parameterize some operations.
class VTSDNode : public SDNode { class VTSDNode : public SDNode {
@ -2491,7 +2414,7 @@ typedef LoadSDNode LargestSDNode;
/// MostAlignedSDNode - The SDNode class with the greatest alignment /// MostAlignedSDNode - The SDNode class with the greatest alignment
/// requirement. /// requirement.
/// ///
typedef ARG_FLAGSSDNode MostAlignedSDNode; typedef GlobalAddressSDNode MostAlignedSDNode;
namespace ISD { namespace ISD {
/// isNormalLoad - Returns true if the specified node is a non-extending /// isNormalLoad - Returns true if the specified node is a non-extending

View File

@ -1091,16 +1091,27 @@ public:
// the SelectionDAGLowering code knows how to lower these. // the SelectionDAGLowering code knows how to lower these.
// //
/// LowerArguments - This hook must be implemented to indicate how we should /// LowerFormalArguments - This hook must be implemented to lower the
/// lower the arguments for the specified function, into the specified DAG. /// incoming (formal) arguments, described by the Ins array, into the
virtual void /// specified DAG. The implementation should fill in the InVals array
LowerArguments(Function &F, SelectionDAG &DAG, /// with legal-type argument values, and return the resulting token
SmallVectorImpl<SDValue>& ArgValues, DebugLoc dl); /// chain value.
///
virtual SDValue
LowerFormalArguments(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
assert(0 && "Not Implemented");
return SDValue(); // this is here to silence compiler errors
}
/// LowerCallTo - This hook lowers an abstract call to a function into an /// LowerCallTo - This function lowers an abstract call to a function into an
/// actual call. This returns a pair of operands. The first element is the /// actual call. This returns a pair of operands. The first element is the
/// return value for the function (if RetTy is not VoidTy). The second /// return value for the function (if RetTy is not VoidTy). The second
/// element is the outgoing token chain. /// element is the outgoing token chain. It calls LowerCall to do the actual
/// lowering.
struct ArgListEntry { struct ArgListEntry {
SDValue Node; SDValue Node;
const Type* Ty; const Type* Ty;
@ -1116,11 +1127,47 @@ public:
isSRet(false), isNest(false), isByVal(false), Alignment(0) { } isSRet(false), isNest(false), isByVal(false), Alignment(0) { }
}; };
typedef std::vector<ArgListEntry> ArgListTy; typedef std::vector<ArgListEntry> ArgListTy;
virtual std::pair<SDValue, SDValue> std::pair<SDValue, SDValue>
LowerCallTo(SDValue Chain, const Type *RetTy, bool RetSExt, bool RetZExt, LowerCallTo(SDValue Chain, const Type *RetTy, bool RetSExt, bool RetZExt,
bool isVarArg, bool isInreg, unsigned NumFixedArgs, bool isVarArg, bool isInreg, unsigned NumFixedArgs,
unsigned CallingConv, bool isTailCall, SDValue Callee, unsigned CallConv, bool isTailCall, bool isReturnValueUsed,
ArgListTy &Args, SelectionDAG &DAG, DebugLoc dl); SDValue Callee, ArgListTy &Args, SelectionDAG &DAG, DebugLoc dl);
/// LowerCall - This hook must be implemented to lower calls into the
/// the specified DAG. The outgoing arguments to the call are described
/// by the Outs array, and the values to be returned by the call are
/// described by the Ins array. The implementation should fill in the
/// InVals array with legal-type return values from the call, and return
/// the resulting token chain value.
///
/// The isTailCall flag here is normative. If it is true, the
/// implementation must emit a tail call. The
/// IsEligibleForTailCallOptimization hook should be used to catch
/// cases that cannot be handled.
///
virtual SDValue
LowerCall(SDValue Chain, SDValue Callee,
unsigned CallConv, bool isVarArg, bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
assert(0 && "Not Implemented");
return SDValue(); // this is here to silence compiler errors
}
/// LowerReturn - This hook must be implemented to lower outgoing
/// return values, described by the Outs array, into the specified
/// DAG. The implementation should return the resulting token chain
/// value.
///
virtual SDValue
LowerReturn(SDValue Chain, unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG) {
assert(0 && "Not Implemented");
return SDValue(); // this is here to silence compiler errors
}
/// EmitTargetCodeForMemcpy - Emit target-specific code that performs a /// EmitTargetCodeForMemcpy - Emit target-specific code that performs a
/// memcpy. This can be used by targets to provide code sequences for cases /// memcpy. This can be used by targets to provide code sequences for cases
@ -1216,19 +1263,17 @@ public:
/// IsEligibleForTailCallOptimization - Check whether the call is eligible for /// IsEligibleForTailCallOptimization - Check whether the call is eligible for
/// tail call optimization. Targets which want to do tail call optimization /// tail call optimization. Targets which want to do tail call optimization
/// should override this function. /// should override this function.
virtual bool IsEligibleForTailCallOptimization(CallSDNode *Call, virtual bool
SDValue Ret, IsEligibleForTailCallOptimization(SDValue Callee,
SelectionDAG &DAG) const { unsigned CalleeCC,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const {
// Conservative default: no calls are eligible.
return false; return false;
} }
/// CheckTailCallReturnConstraints - Check whether CALL node immediatly
/// preceeds the RET node and whether the return uses the result of the node
/// or is a void return. This function can be used by the target to determine
/// eligiblity of tail call optimization.
static bool CheckTailCallReturnConstraints(CallSDNode *TheCall, SDValue Ret);
/// GetPossiblePreceedingTailCall - Get preceeding TailCallNodeOpCode node if /// GetPossiblePreceedingTailCall - Get preceeding TailCallNodeOpCode node if
/// it exists. Skip a possible ISD::TokenFactor. /// it exists. Skip a possible ISD::TokenFactor.
static SDValue GetPossiblePreceedingTailCall(SDValue Chain, static SDValue GetPossiblePreceedingTailCall(SDValue Chain,

View File

@ -345,7 +345,6 @@ def vsetcc : SDNode<"ISD::VSETCC" , SDTSetCC>;
def brcond : SDNode<"ISD::BRCOND" , SDTBrcond, [SDNPHasChain]>; def brcond : SDNode<"ISD::BRCOND" , SDTBrcond, [SDNPHasChain]>;
def brind : SDNode<"ISD::BRIND" , SDTBrind, [SDNPHasChain]>; def brind : SDNode<"ISD::BRIND" , SDTBrind, [SDNPHasChain]>;
def br : SDNode<"ISD::BR" , SDTBr, [SDNPHasChain]>; def br : SDNode<"ISD::BR" , SDTBr, [SDNPHasChain]>;
def ret : SDNode<"ISD::RET" , SDTNone, [SDNPHasChain]>;
def trap : SDNode<"ISD::TRAP" , SDTNone, def trap : SDNode<"ISD::TRAP" , SDTNone,
[SDNPHasChain, SDNPSideEffect]>; [SDNPHasChain, SDNPSideEffect]>;

View File

@ -57,15 +57,16 @@ void CCState::MarkAllocated(unsigned Reg) {
UsedRegs[Reg/32] |= 1 << (Reg&31); UsedRegs[Reg/32] |= 1 << (Reg&31);
} }
/// AnalyzeFormalArguments - Analyze an ISD::FORMAL_ARGUMENTS node, /// AnalyzeFormalArguments - Analyze an array of argument values,
/// incorporating info about the formals into this state. /// incorporating info about the formals into this state.
void CCState::AnalyzeFormalArguments(SDNode *TheArgs, CCAssignFn Fn) { void
unsigned NumArgs = TheArgs->getNumValues()-1; CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
CCAssignFn Fn) {
unsigned NumArgs = Ins.size();
for (unsigned i = 0; i != NumArgs; ++i) { for (unsigned i = 0; i != NumArgs; ++i) {
MVT ArgVT = TheArgs->getValueType(i); MVT ArgVT = Ins[i].VT;
ISD::ArgFlagsTy ArgFlags = ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
cast<ARG_FLAGSSDNode>(TheArgs->getOperand(3+i))->getArgFlags();
if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) { if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
#ifndef NDEBUG #ifndef NDEBUG
cerr << "Formal argument #" << i << " has unhandled type " cerr << "Formal argument #" << i << " has unhandled type "
@ -76,14 +77,14 @@ void CCState::AnalyzeFormalArguments(SDNode *TheArgs, CCAssignFn Fn) {
} }
} }
/// AnalyzeReturn - Analyze the returned values of an ISD::RET node, /// AnalyzeReturn - Analyze the returned values of a return,
/// incorporating info about the result values into this state. /// incorporating info about the result values into this state.
void CCState::AnalyzeReturn(SDNode *TheRet, CCAssignFn Fn) { void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
CCAssignFn Fn) {
// Determine which register each value should be copied into. // Determine which register each value should be copied into.
for (unsigned i = 0, e = TheRet->getNumOperands() / 2; i != e; ++i) { for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
MVT VT = TheRet->getOperand(i*2+1).getValueType(); MVT VT = Outs[i].Val.getValueType();
ISD::ArgFlagsTy ArgFlags = ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
cast<ARG_FLAGSSDNode>(TheRet->getOperand(i*2+2))->getArgFlags();
if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) { if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) {
#ifndef NDEBUG #ifndef NDEBUG
cerr << "Return operand #" << i << " has unhandled type " cerr << "Return operand #" << i << " has unhandled type "
@ -95,13 +96,14 @@ void CCState::AnalyzeReturn(SDNode *TheRet, CCAssignFn Fn) {
} }
/// AnalyzeCallOperands - Analyze an ISD::CALL node, incorporating info /// AnalyzeCallOperands - Analyze the outgoing arguments to a call,
/// about the passed values into this state. /// incorporating info about the passed values into this state.
void CCState::AnalyzeCallOperands(CallSDNode *TheCall, CCAssignFn Fn) { void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
unsigned NumOps = TheCall->getNumArgs(); CCAssignFn Fn) {
unsigned NumOps = Outs.size();
for (unsigned i = 0; i != NumOps; ++i) { for (unsigned i = 0; i != NumOps; ++i) {
MVT ArgVT = TheCall->getArg(i).getValueType(); MVT ArgVT = Outs[i].Val.getValueType();
ISD::ArgFlagsTy ArgFlags = TheCall->getArgFlags(i); ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) { if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
#ifndef NDEBUG #ifndef NDEBUG
cerr << "Call operand #" << i << " has unhandled type " cerr << "Call operand #" << i << " has unhandled type "
@ -131,14 +133,13 @@ void CCState::AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
} }
} }
/// AnalyzeCallResult - Analyze the return values of an ISD::CALL node, /// AnalyzeCallResult - Analyze the return values of a call,
/// incorporating info about the passed values into this state. /// incorporating info about the passed values into this state.
void CCState::AnalyzeCallResult(CallSDNode *TheCall, CCAssignFn Fn) { void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
for (unsigned i = 0, e = TheCall->getNumRetVals(); i != e; ++i) { CCAssignFn Fn) {
MVT VT = TheCall->getRetValType(i); for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); MVT VT = Ins[i].VT;
if (TheCall->isInreg()) ISD::ArgFlagsTy Flags = Ins[i].Flags;
Flags.setInReg();
if (Fn(i, VT, VT, CCValAssign::Full, Flags, *this)) { if (Fn(i, VT, VT, CCValAssign::Full, Flags, *this)) {
#ifndef NDEBUG #ifndef NDEBUG
cerr << "Call result #" << i << " has unhandled type " cerr << "Call result #" << i << " has unhandled type "

View File

@ -823,11 +823,6 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// special case should be done as part of making LegalizeDAG non-recursive. // special case should be done as part of making LegalizeDAG non-recursive.
SimpleFinishLegalizing = false; SimpleFinishLegalizing = false;
break; break;
case ISD::CALL:
// FIXME: Legalization for calls requires custom-lowering the call before
// legalizing the operands! (I haven't looked into precisely why.)
SimpleFinishLegalizing = false;
break;
case ISD::EXTRACT_ELEMENT: case ISD::EXTRACT_ELEMENT:
case ISD::FLT_ROUNDS_: case ISD::FLT_ROUNDS_:
case ISD::SADDO: case ISD::SADDO:
@ -849,7 +844,6 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
case ISD::TRAMPOLINE: case ISD::TRAMPOLINE:
case ISD::FRAMEADDR: case ISD::FRAMEADDR:
case ISD::RETURNADDR: case ISD::RETURNADDR:
case ISD::FORMAL_ARGUMENTS:
// These operations lie about being legal: when they claim to be legal, // These operations lie about being legal: when they claim to be legal,
// they should actually be custom-lowered. // they should actually be custom-lowered.
Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
@ -887,7 +881,6 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
case ISD::BR_JT: case ISD::BR_JT:
case ISD::BR_CC: case ISD::BR_CC:
case ISD::BRCOND: case ISD::BRCOND:
case ISD::RET:
// Branches tweak the chain to include LastCALLSEQ_END // Branches tweak the chain to include LastCALLSEQ_END
Ops[0] = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ops[0], Ops[0] = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ops[0],
LastCALLSEQ_END); LastCALLSEQ_END);
@ -951,37 +944,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
cerr << "NODE: "; Node->dump(&DAG); cerr << "\n"; cerr << "NODE: "; Node->dump(&DAG); cerr << "\n";
#endif #endif
llvm_unreachable("Do not know how to legalize this operator!"); llvm_unreachable("Do not know how to legalize this operator!");
case ISD::CALL:
// The only option for this is to custom lower it.
Tmp3 = TLI.LowerOperation(Result.getValue(0), DAG);
assert(Tmp3.getNode() && "Target didn't custom lower this node!");
// A call within a calling sequence must be legalized to something
// other than the normal CALLSEQ_END. Violating this gets Legalize
// into an infinite loop.
assert ((!IsLegalizingCall ||
Node->getOpcode() != ISD::CALL ||
Tmp3.getNode()->getOpcode() != ISD::CALLSEQ_END) &&
"Nested CALLSEQ_START..CALLSEQ_END not supported.");
// The number of incoming and outgoing values should match; unless the final
// outgoing value is a flag.
assert((Tmp3.getNode()->getNumValues() == Result.getNode()->getNumValues() ||
(Tmp3.getNode()->getNumValues() == Result.getNode()->getNumValues() + 1 &&
Tmp3.getNode()->getValueType(Tmp3.getNode()->getNumValues() - 1) ==
MVT::Flag)) &&
"Lowering call/formal_arguments produced unexpected # results!");
// Since CALL/FORMAL_ARGUMENTS nodes produce multiple values, make sure to
// remember that we legalized all of them, so it doesn't get relegalized.
for (unsigned i = 0, e = Tmp3.getNode()->getNumValues(); i != e; ++i) {
if (Tmp3.getNode()->getValueType(i) == MVT::Flag)
continue;
Tmp1 = LegalizeOp(Tmp3.getValue(i));
if (Op.getResNo() == i)
Tmp2 = Tmp1;
AddLegalizedOperand(SDValue(Node, i), Tmp1);
}
return Tmp2;
case ISD::BUILD_VECTOR: case ISD::BUILD_VECTOR:
switch (TLI.getOperationAction(ISD::BUILD_VECTOR, Node->getValueType(0))) { switch (TLI.getOperationAction(ISD::BUILD_VECTOR, Node->getValueType(0))) {
default: llvm_unreachable("This action is not supported yet!"); default: llvm_unreachable("This action is not supported yet!");
@ -1905,7 +1868,9 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
const Type *RetTy = Node->getValueType(0).getTypeForMVT(); const Type *RetTy = Node->getValueType(0).getTypeForMVT();
std::pair<SDValue, SDValue> CallInfo = std::pair<SDValue, SDValue> CallInfo =
TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false, TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
0, CallingConv::C, false, Callee, Args, DAG, 0, CallingConv::C, false,
/*isReturnValueUsed=*/true,
Callee, Args, DAG,
Node->getDebugLoc()); Node->getDebugLoc());
// Legalize the call sequence, starting with the chain. This will advance // Legalize the call sequence, starting with the chain. This will advance
@ -2311,6 +2276,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
std::pair<SDValue, SDValue> CallResult = std::pair<SDValue, SDValue> CallResult =
TLI.LowerCallTo(Node->getOperand(0), Type::VoidTy, TLI.LowerCallTo(Node->getOperand(0), Type::VoidTy,
false, false, false, false, 0, CallingConv::C, false, false, false, false, false, 0, CallingConv::C, false,
/*isReturnValueUsed=*/true,
DAG.getExternalSymbol("abort", TLI.getPointerTy()), DAG.getExternalSymbol("abort", TLI.getPointerTy()),
Args, DAG, dl); Args, DAG, dl);
Results.push_back(CallResult.second); Results.push_back(CallResult.second);

View File

@ -1019,7 +1019,9 @@ SDValue DAGTypeLegalizer::MakeLibCall(RTLIB::Libcall LC, MVT RetVT,
const Type *RetTy = RetVT.getTypeForMVT(); const Type *RetTy = RetVT.getTypeForMVT();
std::pair<SDValue,SDValue> CallInfo = std::pair<SDValue,SDValue> CallInfo =
TLI.LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false, TLI.LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false,
false, 0, CallingConv::C, false, Callee, Args, DAG, dl); false, 0, CallingConv::C, false,
/*isReturnValueUsed=*/true,
Callee, Args, DAG, dl);
return CallInfo.first; return CallInfo.first;
} }

View File

@ -20,8 +20,8 @@
// type i8 which must be promoted. // type i8 which must be promoted.
// //
// This does not legalize vector manipulations like ISD::BUILD_VECTOR, // This does not legalize vector manipulations like ISD::BUILD_VECTOR,
// or operations that happen to take a vector which are custom-lowered like // or operations that happen to take a vector which are custom-lowered;
// ISD::CALL; the legalization for such operations never produces nodes // the legalization for such operations never produces nodes
// with illegal types, so it's okay to put off legalizing them until // with illegal types, so it's okay to put off legalizing them until
// SelectionDAG::Legalize runs. // SelectionDAG::Legalize runs.
// //

View File

@ -366,9 +366,6 @@ static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
case ISD::ExternalSymbol: case ISD::ExternalSymbol:
llvm_unreachable("Should only be used on nodes with operands"); llvm_unreachable("Should only be used on nodes with operands");
default: break; // Normal nodes don't need extra info. default: break; // Normal nodes don't need extra info.
case ISD::ARG_FLAGS:
ID.AddInteger(cast<ARG_FLAGSSDNode>(N)->getArgFlags().getRawBits());
break;
case ISD::TargetConstant: case ISD::TargetConstant:
case ISD::Constant: case ISD::Constant:
ID.AddPointer(cast<ConstantSDNode>(N)->getConstantIntValue()); ID.AddPointer(cast<ConstantSDNode>(N)->getConstantIntValue());
@ -430,12 +427,6 @@ static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
ID.AddInteger(CP->getTargetFlags()); ID.AddInteger(CP->getTargetFlags());
break; break;
} }
case ISD::CALL: {
const CallSDNode *Call = cast<CallSDNode>(N);
ID.AddInteger(Call->getCallingConv());
ID.AddInteger(Call->isVarArg());
break;
}
case ISD::LOAD: { case ISD::LOAD: {
const LoadSDNode *LD = cast<LoadSDNode>(N); const LoadSDNode *LD = cast<LoadSDNode>(N);
ID.AddInteger(LD->getMemoryVT().getRawBits()); ID.AddInteger(LD->getMemoryVT().getRawBits());
@ -1103,20 +1094,6 @@ SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
return SDValue(N, 0); return SDValue(N, 0);
} }
SDValue SelectionDAG::getArgFlags(ISD::ArgFlagsTy Flags) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::ARG_FLAGS, getVTList(MVT::Other), 0, 0);
ID.AddInteger(Flags.getRawBits());
void *IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
SDNode *N = NodeAllocator.Allocate<ARG_FLAGSSDNode>();
new (N) ARG_FLAGSSDNode(Flags);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getValueType(MVT VT) { SDValue SelectionDAG::getValueType(MVT VT) {
if (VT.isSimple() && (unsigned)VT.getSimpleVT() >= ValueTypeNodes.size()) if (VT.isSimple() && (unsigned)VT.getSimpleVT() >= ValueTypeNodes.size())
ValueTypeNodes.resize(VT.getSimpleVT()+1); ValueTypeNodes.resize(VT.getSimpleVT()+1);
@ -2995,6 +2972,29 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT,
return getNode(Opcode, DL, VT, Ops, 5); return getNode(Opcode, DL, VT, Ops, 5);
} }
/// getStackArgumentTokenFactor - Compute a TokenFactor to force all
/// the incoming stack arguments to be loaded from the stack.
SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
SmallVector<SDValue, 8> ArgChains;
// Include the original chain at the beginning of the list. When this is
// used by target LowerCall hooks, this helps legalize find the
// CALLSEQ_BEGIN node.
ArgChains.push_back(Chain);
// Add a chain value for each stack argument.
for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
if (FI->getIndex() < 0)
ArgChains.push_back(SDValue(L, 1));
// Build a tokenfactor for all the chains.
return getNode(ISD::TokenFactor, Chain.getDebugLoc(), MVT::Other,
&ArgChains[0], ArgChains.size());
}
/// getMemsetValue - Vectorized representation of the memset value /// getMemsetValue - Vectorized representation of the memset value
/// operand. /// operand.
static SDValue getMemsetValue(SDValue Value, MVT VT, SelectionDAG &DAG, static SDValue getMemsetValue(SDValue Value, MVT VT, SelectionDAG &DAG,
@ -3386,6 +3386,7 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
std::pair<SDValue,SDValue> CallResult = std::pair<SDValue,SDValue> CallResult =
TLI.LowerCallTo(Chain, Type::VoidTy, TLI.LowerCallTo(Chain, Type::VoidTy,
false, false, false, false, 0, CallingConv::C, false, false, false, false, false, 0, CallingConv::C, false,
/*isReturnValueUsed=*/false,
getExternalSymbol(TLI.getLibcallName(RTLIB::MEMCPY), getExternalSymbol(TLI.getLibcallName(RTLIB::MEMCPY),
TLI.getPointerTy()), TLI.getPointerTy()),
Args, *this, dl); Args, *this, dl);
@ -3433,6 +3434,7 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
std::pair<SDValue,SDValue> CallResult = std::pair<SDValue,SDValue> CallResult =
TLI.LowerCallTo(Chain, Type::VoidTy, TLI.LowerCallTo(Chain, Type::VoidTy,
false, false, false, false, 0, CallingConv::C, false, false, false, false, false, 0, CallingConv::C, false,
/*isReturnValueUsed=*/false,
getExternalSymbol(TLI.getLibcallName(RTLIB::MEMMOVE), getExternalSymbol(TLI.getLibcallName(RTLIB::MEMMOVE),
TLI.getPointerTy()), TLI.getPointerTy()),
Args, *this, dl); Args, *this, dl);
@ -3486,6 +3488,7 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
std::pair<SDValue,SDValue> CallResult = std::pair<SDValue,SDValue> CallResult =
TLI.LowerCallTo(Chain, Type::VoidTy, TLI.LowerCallTo(Chain, Type::VoidTy,
false, false, false, false, 0, CallingConv::C, false, false, false, false, false, 0, CallingConv::C, false,
/*isReturnValueUsed=*/false,
getExternalSymbol(TLI.getLibcallName(RTLIB::MEMSET), getExternalSymbol(TLI.getLibcallName(RTLIB::MEMSET),
TLI.getPointerTy()), TLI.getPointerTy()),
Args, *this, dl); Args, *this, dl);
@ -3615,32 +3618,6 @@ SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList,
return SDValue(N, 0); return SDValue(N, 0);
} }
SDValue
SelectionDAG::getCall(unsigned CallingConv, DebugLoc dl, bool IsVarArgs,
bool IsTailCall, bool IsInreg, SDVTList VTs,
const SDValue *Operands, unsigned NumOperands,
unsigned NumFixedArgs) {
// Do not include isTailCall in the folding set profile.
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::CALL, VTs, Operands, NumOperands);
ID.AddInteger(CallingConv);
ID.AddInteger(IsVarArgs);
void *IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
// Instead of including isTailCall in the folding set, we just
// set the flag of the existing node.
if (!IsTailCall)
cast<CallSDNode>(E)->setNotTailCall();
return SDValue(E, 0);
}
SDNode *N = NodeAllocator.Allocate<CallSDNode>();
new (N) CallSDNode(CallingConv, dl, IsVarArgs, IsTailCall, IsInreg,
VTs, Operands, NumOperands, NumFixedArgs);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
}
SDValue SDValue
SelectionDAG::getLoad(ISD::MemIndexedMode AM, DebugLoc dl, SelectionDAG::getLoad(ISD::MemIndexedMode AM, DebugLoc dl,
ISD::LoadExtType ExtType, MVT VT, SDValue Chain, ISD::LoadExtType ExtType, MVT VT, SDValue Chain,
@ -5206,7 +5183,6 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::AssertZext: return "AssertZext"; case ISD::AssertZext: return "AssertZext";
case ISD::BasicBlock: return "BasicBlock"; case ISD::BasicBlock: return "BasicBlock";
case ISD::ARG_FLAGS: return "ArgFlags";
case ISD::VALUETYPE: return "ValueType"; case ISD::VALUETYPE: return "ValueType";
case ISD::Register: return "Register"; case ISD::Register: return "Register";
@ -5254,8 +5230,6 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::EH_LABEL: return "eh_label"; case ISD::EH_LABEL: return "eh_label";
case ISD::DECLARE: return "declare"; case ISD::DECLARE: return "declare";
case ISD::HANDLENODE: return "handlenode"; case ISD::HANDLENODE: return "handlenode";
case ISD::FORMAL_ARGUMENTS: return "formal_arguments";
case ISD::CALL: return "call";
// Unary operators // Unary operators
case ISD::FABS: return "fabs"; case ISD::FABS: return "fabs";
@ -5364,7 +5338,6 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::BR_JT: return "br_jt"; case ISD::BR_JT: return "br_jt";
case ISD::BRCOND: return "brcond"; case ISD::BRCOND: return "brcond";
case ISD::BR_CC: return "br_cc"; case ISD::BR_CC: return "br_cc";
case ISD::RET: return "ret";
case ISD::CALLSEQ_START: return "callseq_start"; case ISD::CALLSEQ_START: return "callseq_start";
case ISD::CALLSEQ_END: return "callseq_end"; case ISD::CALLSEQ_END: return "callseq_end";
@ -5566,8 +5539,6 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
OS << "<" << M->MO.getValue() << ":" << M->MO.getOffset() << ">"; OS << "<" << M->MO.getValue() << ":" << M->MO.getOffset() << ">";
else else
OS << "<null:" << M->MO.getOffset() << ">"; OS << "<null:" << M->MO.getOffset() << ">";
} else if (const ARG_FLAGSSDNode *N = dyn_cast<ARG_FLAGSSDNode>(this)) {
OS << N->getArgFlags().getArgFlagsString();
} else if (const VTSDNode *N = dyn_cast<VTSDNode>(this)) { } else if (const VTSDNode *N = dyn_cast<VTSDNode>(this)) {
OS << ":" << N->getVT().getMVTString(); OS << ":" << N->getVT().getMVTString();
} }

View File

@ -17,6 +17,7 @@
#include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallSet.h"
#include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Constants.h" #include "llvm/Constants.h"
#include "llvm/Constants.h"
#include "llvm/CallingConv.h" #include "llvm/CallingConv.h"
#include "llvm/DerivedTypes.h" #include "llvm/DerivedTypes.h"
#include "llvm/Function.h" #include "llvm/Function.h"
@ -753,6 +754,7 @@ void SelectionDAGLowering::clear() {
PendingExports.clear(); PendingExports.clear();
DAG.clear(); DAG.clear();
CurDebugLoc = DebugLoc::getUnknownLoc(); CurDebugLoc = DebugLoc::getUnknownLoc();
HasTailCall = false;
} }
/// getRoot - Return the current virtual root of the Selection DAG, /// getRoot - Return the current virtual root of the Selection DAG,
@ -934,14 +936,8 @@ SDValue SelectionDAGLowering::getValue(const Value *V) {
void SelectionDAGLowering::visitRet(ReturnInst &I) { void SelectionDAGLowering::visitRet(ReturnInst &I) {
if (I.getNumOperands() == 0) { SDValue Chain = getControlRoot();
DAG.setRoot(DAG.getNode(ISD::RET, getCurDebugLoc(), SmallVector<ISD::OutputArg, 8> Outs;
MVT::Other, getControlRoot()));
return;
}
SmallVector<SDValue, 8> NewValues;
NewValues.push_back(getControlRoot());
for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) { for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
SmallVector<MVT, 4> ValueVTs; SmallVector<MVT, 4> ValueVTs;
ComputeValueVTs(TLI, I.getOperand(i)->getType(), ValueVTs); ComputeValueVTs(TLI, I.getOperand(i)->getType(), ValueVTs);
@ -988,14 +984,16 @@ void SelectionDAGLowering::visitRet(ReturnInst &I) {
else if (F->paramHasAttr(0, Attribute::ZExt)) else if (F->paramHasAttr(0, Attribute::ZExt))
Flags.setZExt(); Flags.setZExt();
for (unsigned i = 0; i < NumParts; ++i) { for (unsigned i = 0; i < NumParts; ++i)
NewValues.push_back(Parts[i]); Outs.push_back(ISD::OutputArg(Flags, Parts[i], /*isfixed=*/true));
NewValues.push_back(DAG.getArgFlags(Flags));
}
} }
} }
DAG.setRoot(DAG.getNode(ISD::RET, getCurDebugLoc(), MVT::Other,
&NewValues[0], NewValues.size())); bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
unsigned CallConv = DAG.getMachineFunction().getFunction()->getCallingConv();
Chain = TLI.LowerReturn(Chain, CallConv, isVarArg,
Outs, getCurDebugLoc(), DAG);
DAG.setRoot(Chain);
} }
/// CopyToExportRegsIfNeeded - If the given value has virtual registers /// CopyToExportRegsIfNeeded - If the given value has virtual registers
@ -4346,9 +4344,76 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
} }
} }
/// Test if the given instruction is in a position to be optimized
/// with a tail-call. This roughly means that it's in a block with
/// a return and there's nothing that needs to be scheduled
/// between it and the return.
///
/// This function only tests target-independent requirements.
/// For target-dependent requirements, a target should override
/// TargetLowering::IsEligibleForTailCallOptimization.
///
static bool
isInTailCallPosition(const Instruction *I, Attributes RetAttr,
const TargetLowering &TLI) {
const BasicBlock *ExitBB = I->getParent();
const TerminatorInst *Term = ExitBB->getTerminator();
const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
const Function *F = ExitBB->getParent();
// The block must end in a return statement or an unreachable.
if (!Ret && !isa<UnreachableInst>(Term)) return false;
// If I will have a chain, make sure no other instruction that will have a
// chain interposes between I and the return.
if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
!I->isSafeToSpeculativelyExecute())
for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
--BBI) {
if (&*BBI == I)
break;
if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
!BBI->isSafeToSpeculativelyExecute())
return false;
}
// If the block ends with a void return or unreachable, it doesn't matter
// what the call's return type is.
if (!Ret || Ret->getNumOperands() == 0) return true;
// Conservatively require the attributes of the call to match those of
// the return.
if (F->getAttributes().getRetAttributes() != RetAttr)
return false;
// Otherwise, make sure the unmodified return value of I is the return value.
for (const Instruction *U = dyn_cast<Instruction>(Ret->getOperand(0)); ;
U = dyn_cast<Instruction>(U->getOperand(0))) {
if (!U)
return false;
if (!U->hasOneUse())
return false;
if (U == I)
break;
// Check for a truly no-op truncate.
if (isa<TruncInst>(U) &&
TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType()))
continue;
// Check for a truly no-op bitcast.
if (isa<BitCastInst>(U) &&
(U->getOperand(0)->getType() == U->getType() ||
(isa<PointerType>(U->getOperand(0)->getType()) &&
isa<PointerType>(U->getType()))))
continue;
// Otherwise it's not a true no-op.
return false;
}
return true;
}
void SelectionDAGLowering::LowerCallTo(CallSite CS, SDValue Callee, void SelectionDAGLowering::LowerCallTo(CallSite CS, SDValue Callee,
bool IsTailCall, bool isTailCall,
MachineBasicBlock *LandingPad) { MachineBasicBlock *LandingPad) {
const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
const FunctionType *FTy = cast<FunctionType>(PT->getElementType()); const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
@ -4358,8 +4423,9 @@ void SelectionDAGLowering::LowerCallTo(CallSite CS, SDValue Callee,
TargetLowering::ArgListTy Args; TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry; TargetLowering::ArgListEntry Entry;
Args.reserve(CS.arg_size()); Args.reserve(CS.arg_size());
unsigned j = 1;
for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
i != e; ++i) { i != e; ++i, ++j) {
SDValue ArgNode = getValue(*i); SDValue ArgNode = getValue(*i);
Entry.Node = ArgNode; Entry.Ty = (*i)->getType(); Entry.Node = ArgNode; Entry.Ty = (*i)->getType();
@ -4385,17 +4451,38 @@ void SelectionDAGLowering::LowerCallTo(CallSite CS, SDValue Callee,
getControlRoot(), BeginLabel)); getControlRoot(), BeginLabel));
} }
// Check if target-independent constraints permit a tail call here.
// Target-dependent constraints are checked within TLI.LowerCallTo.
if (isTailCall &&
!isInTailCallPosition(CS.getInstruction(),
CS.getAttributes().getRetAttributes(),
TLI))
isTailCall = false;
std::pair<SDValue,SDValue> Result = std::pair<SDValue,SDValue> Result =
TLI.LowerCallTo(getRoot(), CS.getType(), TLI.LowerCallTo(getRoot(), CS.getType(),
CS.paramHasAttr(0, Attribute::SExt), CS.paramHasAttr(0, Attribute::SExt),
CS.paramHasAttr(0, Attribute::ZExt), FTy->isVarArg(), CS.paramHasAttr(0, Attribute::ZExt), FTy->isVarArg(),
CS.paramHasAttr(0, Attribute::InReg), FTy->getNumParams(), CS.paramHasAttr(0, Attribute::InReg), FTy->getNumParams(),
CS.getCallingConv(), CS.getCallingConv(),
IsTailCall && PerformTailCallOpt, isTailCall,
!CS.getInstruction()->use_empty(),
Callee, Args, DAG, getCurDebugLoc()); Callee, Args, DAG, getCurDebugLoc());
if (CS.getType() != Type::VoidTy) assert((isTailCall || CS.getType() == Type::VoidTy ||
Result.first.getNode()) &&
"Non-null value expected with non-void non-tail call!");
assert((isTailCall || Result.second.getNode()) &&
"Non-null chain expected with non-tail call!");
assert((Result.second.getNode() || !Result.first.getNode()) &&
"Null value expected with tail call!");
if (Result.first.getNode())
setValue(CS.getInstruction(), Result.first); setValue(CS.getInstruction(), Result.first);
DAG.setRoot(Result.second); // As a special case, a null chain means that a tail call has
// been emitted and the DAG root is already updated.
if (Result.second.getNode())
DAG.setRoot(Result.second);
else
HasTailCall = true;
if (LandingPad && MMI) { if (LandingPad && MMI) {
// Insert a label at the end of the invoke call to mark the try range. This // Insert a label at the end of the invoke call to mark the try range. This
@ -4484,7 +4571,12 @@ void SelectionDAGLowering::visitCall(CallInst &I) {
else else
Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy()); Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
LowerCallTo(&I, Callee, I.isTailCall()); // Check if we can potentially perform a tail call. More detailed
// checking is be done within LowerCallTo, after more information
// about the call is known.
bool isTailCall = PerformTailCallOpt && I.isTailCall();
LowerCallTo(&I, Callee, isTailCall);
} }
@ -5431,13 +5523,18 @@ void SelectionDAGLowering::visitMalloc(MallocInst &I) {
Entry.Ty = TLI.getTargetData()->getIntPtrType(); Entry.Ty = TLI.getTargetData()->getIntPtrType();
Args.push_back(Entry); Args.push_back(Entry);
bool isTailCall = PerformTailCallOpt &&
isInTailCallPosition(&I, Attribute::None, TLI);
std::pair<SDValue,SDValue> Result = std::pair<SDValue,SDValue> Result =
TLI.LowerCallTo(getRoot(), I.getType(), false, false, false, false, TLI.LowerCallTo(getRoot(), I.getType(), false, false, false, false,
0, CallingConv::C, PerformTailCallOpt, 0, CallingConv::C, isTailCall,
/*isReturnValueUsed=*/true,
DAG.getExternalSymbol("malloc", IntPtr), DAG.getExternalSymbol("malloc", IntPtr),
Args, DAG, getCurDebugLoc()); Args, DAG, getCurDebugLoc());
setValue(&I, Result.first); // Pointers always fit in registers if (Result.first.getNode())
DAG.setRoot(Result.second); setValue(&I, Result.first); // Pointers always fit in registers
if (Result.second.getNode())
DAG.setRoot(Result.second);
} }
void SelectionDAGLowering::visitFree(FreeInst &I) { void SelectionDAGLowering::visitFree(FreeInst &I) {
@ -5447,12 +5544,16 @@ void SelectionDAGLowering::visitFree(FreeInst &I) {
Entry.Ty = TLI.getTargetData()->getIntPtrType(); Entry.Ty = TLI.getTargetData()->getIntPtrType();
Args.push_back(Entry); Args.push_back(Entry);
MVT IntPtr = TLI.getPointerTy(); MVT IntPtr = TLI.getPointerTy();
bool isTailCall = PerformTailCallOpt &&
isInTailCallPosition(&I, Attribute::None, TLI);
std::pair<SDValue,SDValue> Result = std::pair<SDValue,SDValue> Result =
TLI.LowerCallTo(getRoot(), Type::VoidTy, false, false, false, false, TLI.LowerCallTo(getRoot(), Type::VoidTy, false, false, false, false,
0, CallingConv::C, PerformTailCallOpt, 0, CallingConv::C, isTailCall,
/*isReturnValueUsed=*/true,
DAG.getExternalSymbol("free", IntPtr), Args, DAG, DAG.getExternalSymbol("free", IntPtr), Args, DAG,
getCurDebugLoc()); getCurDebugLoc());
DAG.setRoot(Result.second); if (Result.second.getNode())
DAG.setRoot(Result.second);
} }
void SelectionDAGLowering::visitVAStart(CallInst &I) { void SelectionDAGLowering::visitVAStart(CallInst &I) {
@ -5486,154 +5587,24 @@ void SelectionDAGLowering::visitVACopy(CallInst &I) {
DAG.getSrcValue(I.getOperand(2)))); DAG.getSrcValue(I.getOperand(2))));
} }
/// TargetLowering::LowerArguments - This is the default LowerArguments
/// implementation, which just inserts a FORMAL_ARGUMENTS node. FIXME: When all
/// targets are migrated to using FORMAL_ARGUMENTS, this hook should be
/// integrated into SDISel.
void TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &ArgValues,
DebugLoc dl) {
// Add CC# and isVararg as operands to the FORMAL_ARGUMENTS node.
SmallVector<SDValue, 3+16> Ops;
Ops.push_back(DAG.getRoot());
Ops.push_back(DAG.getConstant(F.getCallingConv(), getPointerTy()));
Ops.push_back(DAG.getConstant(F.isVarArg(), getPointerTy()));
// Add one result value for each formal argument.
SmallVector<MVT, 16> RetVals;
unsigned j = 1;
for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
I != E; ++I, ++j) {
SmallVector<MVT, 4> ValueVTs;
ComputeValueVTs(*this, I->getType(), ValueVTs);
for (unsigned Value = 0, NumValues = ValueVTs.size();
Value != NumValues; ++Value) {
MVT VT = ValueVTs[Value];
const Type *ArgTy = VT.getTypeForMVT();
ISD::ArgFlagsTy Flags;
unsigned OriginalAlignment =
getTargetData()->getABITypeAlignment(ArgTy);
if (F.paramHasAttr(j, Attribute::ZExt))
Flags.setZExt();
if (F.paramHasAttr(j, Attribute::SExt))
Flags.setSExt();
if (F.paramHasAttr(j, Attribute::InReg))
Flags.setInReg();
if (F.paramHasAttr(j, Attribute::StructRet))
Flags.setSRet();
if (F.paramHasAttr(j, Attribute::ByVal)) {
Flags.setByVal();
const PointerType *Ty = cast<PointerType>(I->getType());
const Type *ElementTy = Ty->getElementType();
unsigned FrameAlign = getByValTypeAlignment(ElementTy);
unsigned FrameSize = getTargetData()->getTypeAllocSize(ElementTy);
// For ByVal, alignment should be passed from FE. BE will guess if
// this info is not there but there are cases it cannot get right.
if (F.getParamAlignment(j))
FrameAlign = F.getParamAlignment(j);
Flags.setByValAlign(FrameAlign);
Flags.setByValSize(FrameSize);
}
if (F.paramHasAttr(j, Attribute::Nest))
Flags.setNest();
Flags.setOrigAlign(OriginalAlignment);
MVT RegisterVT = getRegisterType(VT);
unsigned NumRegs = getNumRegisters(VT);
for (unsigned i = 0; i != NumRegs; ++i) {
RetVals.push_back(RegisterVT);
ISD::ArgFlagsTy MyFlags = Flags;
if (NumRegs > 1 && i == 0)
MyFlags.setSplit();
// if it isn't first piece, alignment must be 1
else if (i > 0)
MyFlags.setOrigAlign(1);
Ops.push_back(DAG.getArgFlags(MyFlags));
}
}
}
RetVals.push_back(MVT::Other);
// Create the node.
SDNode *Result = DAG.getNode(ISD::FORMAL_ARGUMENTS, dl,
DAG.getVTList(&RetVals[0], RetVals.size()),
&Ops[0], Ops.size()).getNode();
// Prelower FORMAL_ARGUMENTS. This isn't required for functionality, but
// allows exposing the loads that may be part of the argument access to the
// first DAGCombiner pass.
SDValue TmpRes = LowerOperation(SDValue(Result, 0), DAG);
// The number of results should match up, except that the lowered one may have
// an extra flag result.
assert((Result->getNumValues() == TmpRes.getNode()->getNumValues() ||
(Result->getNumValues()+1 == TmpRes.getNode()->getNumValues() &&
TmpRes.getValue(Result->getNumValues()).getValueType() == MVT::Flag))
&& "Lowering produced unexpected number of results!");
// The FORMAL_ARGUMENTS node itself is likely no longer needed.
if (Result != TmpRes.getNode() && Result->use_empty()) {
HandleSDNode Dummy(DAG.getRoot());
DAG.RemoveDeadNode(Result);
}
Result = TmpRes.getNode();
unsigned NumArgRegs = Result->getNumValues() - 1;
DAG.setRoot(SDValue(Result, NumArgRegs));
// Set up the return result vector.
unsigned i = 0;
unsigned Idx = 1;
for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
++I, ++Idx) {
SmallVector<MVT, 4> ValueVTs;
ComputeValueVTs(*this, I->getType(), ValueVTs);
for (unsigned Value = 0, NumValues = ValueVTs.size();
Value != NumValues; ++Value) {
MVT VT = ValueVTs[Value];
MVT PartVT = getRegisterType(VT);
unsigned NumParts = getNumRegisters(VT);
SmallVector<SDValue, 4> Parts(NumParts);
for (unsigned j = 0; j != NumParts; ++j)
Parts[j] = SDValue(Result, i++);
ISD::NodeType AssertOp = ISD::DELETED_NODE;
if (F.paramHasAttr(Idx, Attribute::SExt))
AssertOp = ISD::AssertSext;
else if (F.paramHasAttr(Idx, Attribute::ZExt))
AssertOp = ISD::AssertZext;
ArgValues.push_back(getCopyFromParts(DAG, dl, &Parts[0], NumParts,
PartVT, VT, AssertOp));
}
}
assert(i == NumArgRegs && "Argument register count mismatch!");
}
/// TargetLowering::LowerCallTo - This is the default LowerCallTo /// TargetLowering::LowerCallTo - This is the default LowerCallTo
/// implementation, which just inserts an ISD::CALL node, which is later custom /// implementation, which just calls LowerCall.
/// lowered by the target to something concrete. FIXME: When all targets are /// FIXME: When all targets are
/// migrated to using ISD::CALL, this hook should be integrated into SDISel. /// migrated to using LowerCall, this hook should be integrated into SDISel.
std::pair<SDValue, SDValue> std::pair<SDValue, SDValue>
TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy, TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
bool RetSExt, bool RetZExt, bool isVarArg, bool RetSExt, bool RetZExt, bool isVarArg,
bool isInreg, unsigned NumFixedArgs, bool isInreg, unsigned NumFixedArgs,
unsigned CallingConv, bool isTailCall, unsigned CallConv, bool isTailCall,
bool isReturnValueUsed,
SDValue Callee, SDValue Callee,
ArgListTy &Args, SelectionDAG &DAG, DebugLoc dl) { ArgListTy &Args, SelectionDAG &DAG, DebugLoc dl) {
assert((!isTailCall || PerformTailCallOpt) && assert((!isTailCall || PerformTailCallOpt) &&
"isTailCall set when tail-call optimizations are disabled!"); "isTailCall set when tail-call optimizations are disabled!");
SmallVector<SDValue, 32> Ops;
Ops.push_back(Chain); // Op#0 - Chain
Ops.push_back(Callee);
// Handle all of the outgoing arguments. // Handle all of the outgoing arguments.
SmallVector<ISD::OutputArg, 32> Outs;
for (unsigned i = 0, e = Args.size(); i != e; ++i) { for (unsigned i = 0, e = Args.size(); i != e; ++i) {
SmallVector<MVT, 4> ValueVTs; SmallVector<MVT, 4> ValueVTs;
ComputeValueVTs(*this, Args[i].Ty, ValueVTs); ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
@ -5684,75 +5655,92 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
getCopyToParts(DAG, dl, Op, &Parts[0], NumParts, PartVT, ExtendKind); getCopyToParts(DAG, dl, Op, &Parts[0], NumParts, PartVT, ExtendKind);
for (unsigned i = 0; i != NumParts; ++i) { for (unsigned j = 0; j != NumParts; ++j) {
// if it isn't first piece, alignment must be 1 // if it isn't first piece, alignment must be 1
ISD::ArgFlagsTy MyFlags = Flags; ISD::OutputArg MyFlags(Flags, Parts[j], i < NumFixedArgs);
if (NumParts > 1 && i == 0) if (NumParts > 1 && j == 0)
MyFlags.setSplit(); MyFlags.Flags.setSplit();
else if (i != 0) else if (j != 0)
MyFlags.setOrigAlign(1); MyFlags.Flags.setOrigAlign(1);
Ops.push_back(Parts[i]); Outs.push_back(MyFlags);
Ops.push_back(DAG.getArgFlags(MyFlags));
} }
} }
} }
// Figure out the result value types. We start by making a list of // Handle the incoming return values from the call.
// the potentially illegal return value types. SmallVector<ISD::InputArg, 32> Ins;
SmallVector<MVT, 4> LoweredRetTys;
SmallVector<MVT, 4> RetTys; SmallVector<MVT, 4> RetTys;
ComputeValueVTs(*this, RetTy, RetTys); ComputeValueVTs(*this, RetTy, RetTys);
// Then we translate that to a list of legal types.
for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
MVT VT = RetTys[I]; MVT VT = RetTys[I];
MVT RegisterVT = getRegisterType(VT); MVT RegisterVT = getRegisterType(VT);
unsigned NumRegs = getNumRegisters(VT); unsigned NumRegs = getNumRegisters(VT);
for (unsigned i = 0; i != NumRegs; ++i) for (unsigned i = 0; i != NumRegs; ++i) {
LoweredRetTys.push_back(RegisterVT); ISD::InputArg MyFlags;
} MyFlags.VT = RegisterVT;
MyFlags.Used = isReturnValueUsed;
LoweredRetTys.push_back(MVT::Other); // Always has a chain. if (RetSExt)
MyFlags.Flags.setSExt();
// Create the CALL node. if (RetZExt)
SDValue Res = DAG.getCall(CallingConv, dl, MyFlags.Flags.setZExt();
isVarArg, isTailCall, isInreg, if (isInreg)
DAG.getVTList(&LoweredRetTys[0], MyFlags.Flags.setInReg();
LoweredRetTys.size()), Ins.push_back(MyFlags);
&Ops[0], Ops.size(), NumFixedArgs
);
Chain = Res.getValue(LoweredRetTys.size() - 1);
// Gather up the call result into a single value.
if (RetTy != Type::VoidTy && !RetTys.empty()) {
ISD::NodeType AssertOp = ISD::DELETED_NODE;
if (RetSExt)
AssertOp = ISD::AssertSext;
else if (RetZExt)
AssertOp = ISD::AssertZext;
SmallVector<SDValue, 4> ReturnValues;
unsigned RegNo = 0;
for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
MVT VT = RetTys[I];
MVT RegisterVT = getRegisterType(VT);
unsigned NumRegs = getNumRegisters(VT);
unsigned RegNoEnd = NumRegs + RegNo;
SmallVector<SDValue, 4> Results;
for (; RegNo != RegNoEnd; ++RegNo)
Results.push_back(Res.getValue(RegNo));
SDValue ReturnValue =
getCopyFromParts(DAG, dl, &Results[0], NumRegs, RegisterVT, VT,
AssertOp);
ReturnValues.push_back(ReturnValue);
} }
Res = DAG.getNode(ISD::MERGE_VALUES, dl,
DAG.getVTList(&RetTys[0], RetTys.size()),
&ReturnValues[0], ReturnValues.size());
} }
// Check if target-dependent constraints permit a tail call here.
// Target-independent constraints should be checked by the caller.
if (isTailCall &&
!IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, Ins, DAG))
isTailCall = false;
SmallVector<SDValue, 4> InVals;
Chain = LowerCall(Chain, Callee, CallConv, isVarArg, isTailCall,
Outs, Ins, dl, DAG, InVals);
assert((!isTailCall || InVals.empty()) && "Tail call had return SDValues!");
// For a tail call, the return value is merely live-out and there aren't
// any nodes in the DAG representing it. Return a special value to
// indicate that a tail call has been emitted and no more Instructions
// should be processed in the current block.
if (isTailCall) {
DAG.setRoot(Chain);
return std::make_pair(SDValue(), SDValue());
}
// Collect the legal value parts into potentially illegal values
// that correspond to the original function's return values.
ISD::NodeType AssertOp = ISD::DELETED_NODE;
if (RetSExt)
AssertOp = ISD::AssertSext;
else if (RetZExt)
AssertOp = ISD::AssertZext;
SmallVector<SDValue, 4> ReturnValues;
unsigned CurReg = 0;
for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
MVT VT = RetTys[I];
MVT RegisterVT = getRegisterType(VT);
unsigned NumRegs = getNumRegisters(VT);
SDValue ReturnValue =
getCopyFromParts(DAG, dl, &InVals[CurReg], NumRegs, RegisterVT, VT,
AssertOp);
ReturnValues.push_back(ReturnValue);
CurReg += NumRegs;
}
// For a function returning void, there is no return value. We can't create
// such a node, so we just return a null return value in that case. In
// that case, nothing will actualy look at the value.
if (ReturnValues.empty())
return std::make_pair(SDValue(), Chain);
SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
DAG.getVTList(&RetTys[0], RetTys.size()),
&ReturnValues[0], ReturnValues.size());
return std::make_pair(Res, Chain); return std::make_pair(Res, Chain);
} }
@ -5789,25 +5777,108 @@ void SelectionDAGISel::
LowerArguments(BasicBlock *LLVMBB) { LowerArguments(BasicBlock *LLVMBB) {
// If this is the entry block, emit arguments. // If this is the entry block, emit arguments.
Function &F = *LLVMBB->getParent(); Function &F = *LLVMBB->getParent();
SDValue OldRoot = SDL->DAG.getRoot(); SelectionDAG &DAG = SDL->DAG;
SmallVector<SDValue, 16> Args; SDValue OldRoot = DAG.getRoot();
TLI.LowerArguments(F, SDL->DAG, Args, SDL->getCurDebugLoc()); DebugLoc dl = SDL->getCurDebugLoc();
const TargetData *TD = TLI.getTargetData();
unsigned a = 0; // Set up the incoming argument description vector.
for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end(); SmallVector<ISD::InputArg, 16> Ins;
AI != E; ++AI) { unsigned Idx = 1;
for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
I != E; ++I, ++Idx) {
SmallVector<MVT, 4> ValueVTs; SmallVector<MVT, 4> ValueVTs;
ComputeValueVTs(TLI, AI->getType(), ValueVTs); ComputeValueVTs(TLI, I->getType(), ValueVTs);
bool isArgValueUsed = !I->use_empty();
for (unsigned Value = 0, NumValues = ValueVTs.size();
Value != NumValues; ++Value) {
MVT VT = ValueVTs[Value];
const Type *ArgTy = VT.getTypeForMVT();
ISD::ArgFlagsTy Flags;
unsigned OriginalAlignment =
TD->getABITypeAlignment(ArgTy);
if (F.paramHasAttr(Idx, Attribute::ZExt))
Flags.setZExt();
if (F.paramHasAttr(Idx, Attribute::SExt))
Flags.setSExt();
if (F.paramHasAttr(Idx, Attribute::InReg))
Flags.setInReg();
if (F.paramHasAttr(Idx, Attribute::StructRet))
Flags.setSRet();
if (F.paramHasAttr(Idx, Attribute::ByVal)) {
Flags.setByVal();
const PointerType *Ty = cast<PointerType>(I->getType());
const Type *ElementTy = Ty->getElementType();
unsigned FrameAlign = TLI.getByValTypeAlignment(ElementTy);
unsigned FrameSize = TD->getTypeAllocSize(ElementTy);
// For ByVal, alignment should be passed from FE. BE will guess if
// this info is not there but there are cases it cannot get right.
if (F.getParamAlignment(Idx))
FrameAlign = F.getParamAlignment(Idx);
Flags.setByValAlign(FrameAlign);
Flags.setByValSize(FrameSize);
}
if (F.paramHasAttr(Idx, Attribute::Nest))
Flags.setNest();
Flags.setOrigAlign(OriginalAlignment);
MVT RegisterVT = TLI.getRegisterType(VT);
unsigned NumRegs = TLI.getNumRegisters(VT);
for (unsigned i = 0; i != NumRegs; ++i) {
ISD::InputArg MyFlags(Flags, RegisterVT, isArgValueUsed);
if (NumRegs > 1 && i == 0)
MyFlags.Flags.setSplit();
// if it isn't first piece, alignment must be 1
else if (i > 0)
MyFlags.Flags.setOrigAlign(1);
Ins.push_back(MyFlags);
}
}
}
// Call the target to set up the argument values.
SmallVector<SDValue, 8> InVals;
SDValue NewRoot = TLI.LowerFormalArguments(DAG.getRoot(), F.getCallingConv(),
F.isVarArg(), Ins,
dl, DAG, InVals);
DAG.setRoot(NewRoot);
// Set up the argument values.
unsigned i = 0;
Idx = 1;
for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
++I, ++Idx) {
SmallVector<SDValue, 4> ArgValues;
SmallVector<MVT, 4> ValueVTs;
ComputeValueVTs(TLI, I->getType(), ValueVTs);
unsigned NumValues = ValueVTs.size(); unsigned NumValues = ValueVTs.size();
if (!AI->use_empty()) { for (unsigned Value = 0; Value != NumValues; ++Value) {
SDL->setValue(AI, SDL->DAG.getMergeValues(&Args[a], NumValues, MVT VT = ValueVTs[Value];
SDL->getCurDebugLoc())); MVT PartVT = TLI.getRegisterType(VT);
unsigned NumParts = TLI.getNumRegisters(VT);
if (!I->use_empty()) {
ISD::NodeType AssertOp = ISD::DELETED_NODE;
if (F.paramHasAttr(Idx, Attribute::SExt))
AssertOp = ISD::AssertSext;
else if (F.paramHasAttr(Idx, Attribute::ZExt))
AssertOp = ISD::AssertZext;
ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
PartVT, VT, AssertOp));
}
i += NumParts;
}
if (!I->use_empty()) {
SDL->setValue(I, DAG.getMergeValues(&ArgValues[0], NumValues,
SDL->getCurDebugLoc()));
// If this argument is live outside of the entry block, insert a copy from // If this argument is live outside of the entry block, insert a copy from
// whereever we got it to the vreg that other BB's will reference it as. // whereever we got it to the vreg that other BB's will reference it as.
SDL->CopyToExportRegsIfNeeded(AI); SDL->CopyToExportRegsIfNeeded(I);
} }
a += NumValues;
} }
assert(i == InVals.size() && "Argument register count mismatch!");
// Finally, if the target has anything special to do, allow it to do so. // Finally, if the target has anything special to do, allow it to do so.
// FIXME: this should insert code into the DAG! // FIXME: this should insert code into the DAG!

View File

@ -363,13 +363,20 @@ public:
/// GFI - Garbage collection metadata for the function. /// GFI - Garbage collection metadata for the function.
GCFunctionInfo *GFI; GCFunctionInfo *GFI;
/// HasTailCall - This is set to true if a call in the current
/// block has been translated as a tail call. In this case,
/// no subsequent DAG nodes should be created.
///
bool HasTailCall;
LLVMContext *Context; LLVMContext *Context;
SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli, SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli,
FunctionLoweringInfo &funcinfo, FunctionLoweringInfo &funcinfo,
CodeGenOpt::Level ol) CodeGenOpt::Level ol)
: CurDebugLoc(DebugLoc::getUnknownLoc()), : CurDebugLoc(DebugLoc::getUnknownLoc()),
TLI(tli), DAG(dag), FuncInfo(funcinfo), OptLevel(ol), TLI(tli), DAG(dag), FuncInfo(funcinfo), OptLevel(ol),
HasTailCall(false),
Context(dag.getContext()) { Context(dag.getContext()) {
} }

View File

@ -366,140 +366,36 @@ static void copyCatchInfo(BasicBlock *SrcBB, BasicBlock *DestBB,
} }
} }
/// IsFixedFrameObjectWithPosOffset - Check if object is a fixed frame object and
/// whether object offset >= 0.
static bool
IsFixedFrameObjectWithPosOffset(MachineFrameInfo *MFI, SDValue Op) {
if (!isa<FrameIndexSDNode>(Op)) return false;
FrameIndexSDNode * FrameIdxNode = dyn_cast<FrameIndexSDNode>(Op);
int FrameIdx = FrameIdxNode->getIndex();
return MFI->isFixedObjectIndex(FrameIdx) &&
MFI->getObjectOffset(FrameIdx) >= 0;
}
/// IsPossiblyOverwrittenArgumentOfTailCall - Check if the operand could
/// possibly be overwritten when lowering the outgoing arguments in a tail
/// call. Currently the implementation of this call is very conservative and
/// assumes all arguments sourcing from FORMAL_ARGUMENTS or a CopyFromReg with
/// virtual registers would be overwritten by direct lowering.
static bool IsPossiblyOverwrittenArgumentOfTailCall(SDValue Op,
MachineFrameInfo *MFI) {
RegisterSDNode * OpReg = NULL;
if (Op.getOpcode() == ISD::FORMAL_ARGUMENTS ||
(Op.getOpcode()== ISD::CopyFromReg &&
(OpReg = dyn_cast<RegisterSDNode>(Op.getOperand(1))) &&
(OpReg->getReg() >= TargetRegisterInfo::FirstVirtualRegister)) ||
(Op.getOpcode() == ISD::LOAD &&
IsFixedFrameObjectWithPosOffset(MFI, Op.getOperand(1))) ||
(Op.getOpcode() == ISD::MERGE_VALUES &&
Op.getOperand(Op.getResNo()).getOpcode() == ISD::LOAD &&
IsFixedFrameObjectWithPosOffset(MFI, Op.getOperand(Op.getResNo()).
getOperand(1))))
return true;
return false;
}
/// CheckDAGForTailCallsAndFixThem - This Function looks for CALL nodes in the
/// DAG and fixes their tailcall attribute operand.
static void CheckDAGForTailCallsAndFixThem(SelectionDAG &DAG,
const TargetLowering& TLI) {
SDNode * Ret = NULL;
SDValue Terminator = DAG.getRoot();
// Find RET node.
if (Terminator.getOpcode() == ISD::RET) {
Ret = Terminator.getNode();
}
// Fix tail call attribute of CALL nodes.
for (SelectionDAG::allnodes_iterator BE = DAG.allnodes_begin(),
BI = DAG.allnodes_end(); BI != BE; ) {
--BI;
if (CallSDNode *TheCall = dyn_cast<CallSDNode>(BI)) {
SDValue OpRet(Ret, 0);
SDValue OpCall(BI, 0);
bool isMarkedTailCall = TheCall->isTailCall();
// If CALL node has tail call attribute set to true and the call is not
// eligible (no RET or the target rejects) the attribute is fixed to
// false. The TargetLowering::IsEligibleForTailCallOptimization function
// must correctly identify tail call optimizable calls.
if (!isMarkedTailCall) continue;
if (Ret==NULL ||
!TLI.IsEligibleForTailCallOptimization(TheCall, OpRet, DAG)) {
// Not eligible. Mark CALL node as non tail call. Note that we
// can modify the call node in place since calls are not CSE'd.
TheCall->setNotTailCall();
} else {
// Look for tail call clobbered arguments. Emit a series of
// copyto/copyfrom virtual register nodes to protect them.
SmallVector<SDValue, 32> Ops;
SDValue Chain = TheCall->getChain(), InFlag;
Ops.push_back(Chain);
Ops.push_back(TheCall->getCallee());
for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; ++i) {
SDValue Arg = TheCall->getArg(i);
bool isByVal = TheCall->getArgFlags(i).isByVal();
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
if (!isByVal &&
IsPossiblyOverwrittenArgumentOfTailCall(Arg, MFI)) {
MVT VT = Arg.getValueType();
unsigned VReg = MF.getRegInfo().
createVirtualRegister(TLI.getRegClassFor(VT));
Chain = DAG.getCopyToReg(Chain, Arg.getDebugLoc(),
VReg, Arg, InFlag);
InFlag = Chain.getValue(1);
Arg = DAG.getCopyFromReg(Chain, Arg.getDebugLoc(),
VReg, VT, InFlag);
Chain = Arg.getValue(1);
InFlag = Arg.getValue(2);
}
Ops.push_back(Arg);
Ops.push_back(TheCall->getArgFlagsVal(i));
}
// Link in chain of CopyTo/CopyFromReg.
Ops[0] = Chain;
DAG.UpdateNodeOperands(OpCall, Ops.begin(), Ops.size());
}
}
}
}
void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB,
BasicBlock::iterator Begin, BasicBlock::iterator Begin,
BasicBlock::iterator End) { BasicBlock::iterator End) {
SDL->setCurrentBasicBlock(BB); SDL->setCurrentBasicBlock(BB);
// Lower all of the non-terminator instructions. // Lower all of the non-terminator instructions. If a call is emitted
for (BasicBlock::iterator I = Begin; I != End; ++I) // as a tail call, cease emitting nodes for this block.
for (BasicBlock::iterator I = Begin; I != End && !SDL->HasTailCall; ++I)
if (!isa<TerminatorInst>(I)) if (!isa<TerminatorInst>(I))
SDL->visit(*I); SDL->visit(*I);
// Ensure that all instructions which are used outside of their defining if (!SDL->HasTailCall) {
// blocks are available as virtual registers. Invoke is handled elsewhere. // Ensure that all instructions which are used outside of their defining
for (BasicBlock::iterator I = Begin; I != End; ++I) // blocks are available as virtual registers. Invoke is handled elsewhere.
if (!isa<PHINode>(I) && !isa<InvokeInst>(I)) for (BasicBlock::iterator I = Begin; I != End; ++I)
SDL->CopyToExportRegsIfNeeded(I); if (!isa<PHINode>(I) && !isa<InvokeInst>(I))
SDL->CopyToExportRegsIfNeeded(I);
// Handle PHI nodes in successor blocks. // Handle PHI nodes in successor blocks.
if (End == LLVMBB->end()) { if (End == LLVMBB->end()) {
HandlePHINodesInSuccessorBlocks(LLVMBB); HandlePHINodesInSuccessorBlocks(LLVMBB);
// Lower the terminator after the copies are emitted. // Lower the terminator after the copies are emitted.
SDL->visit(*LLVMBB->getTerminator()); SDL->visit(*LLVMBB->getTerminator());
}
} }
// Make sure the root of the DAG is up-to-date. // Make sure the root of the DAG is up-to-date.
CurDAG->setRoot(SDL->getControlRoot()); CurDAG->setRoot(SDL->getControlRoot());
// Check whether calls in this block are real tail calls. Fix up CALL nodes
// with correct tailcall attribute so that the target can rely on the tailcall
// attribute indicating whether the call is really eligible for tail call
// optimization.
if (PerformTailCallOpt)
CheckDAGForTailCallsAndFixThem(*CurDAG, TLI);
// Final step, emit the lowered DAG as machine code. // Final step, emit the lowered DAG as machine code.
CodeGenAndEmitDAG(); CodeGenAndEmitDAG();
SDL->clear(); SDL->clear();

View File

@ -2584,45 +2584,3 @@ SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
DAG.getConstant(magics.s-1, getShiftAmountTy())); DAG.getConstant(magics.s-1, getShiftAmountTy()));
} }
} }
/// IgnoreHarmlessInstructions - Ignore instructions between a CALL and RET
/// node that don't prevent tail call optimization.
static SDValue IgnoreHarmlessInstructions(SDValue node) {
// Found call return.
if (node.getOpcode() == ISD::CALL) return node;
// Ignore MERGE_VALUES. Will have at least one operand.
if (node.getOpcode() == ISD::MERGE_VALUES)
return IgnoreHarmlessInstructions(node.getOperand(0));
// Ignore ANY_EXTEND node.
if (node.getOpcode() == ISD::ANY_EXTEND)
return IgnoreHarmlessInstructions(node.getOperand(0));
if (node.getOpcode() == ISD::TRUNCATE)
return IgnoreHarmlessInstructions(node.getOperand(0));
// Any other node type.
return node;
}
bool TargetLowering::CheckTailCallReturnConstraints(CallSDNode *TheCall,
SDValue Ret) {
unsigned NumOps = Ret.getNumOperands();
// ISD::CALL results:(value0, ..., valuen, chain)
// ISD::RET operands:(chain, value0, flag0, ..., valuen, flagn)
// Value return:
// Check that operand of the RET node sources from the CALL node. The RET node
// has at least two operands. Operand 0 holds the chain. Operand 1 holds the
// value.
// Also we need to check that there is no code in between the call and the
// return. Hence we also check that the incomming chain to the return sources
// from the outgoing chain of the call.
if (NumOps > 1 &&
IgnoreHarmlessInstructions(Ret.getOperand(1)) == SDValue(TheCall,0) &&
Ret.getOperand(0) == SDValue(TheCall, TheCall->getNumValues()-1))
return true;
// void return: The RET node has the chain result value of the CALL node as
// input.
if (NumOps == 1 &&
Ret.getOperand(0) == SDValue(TheCall, TheCall->getNumValues()-1))
return true;
return false;
}

View File

@ -295,7 +295,6 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand); setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
setOperationAction(ISD::RET, MVT::Other, Custom);
setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
setOperationAction(ISD::ConstantPool, MVT::i32, Custom); setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom); setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom);
@ -531,13 +530,6 @@ static bool FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// Calling Convention Implementation // Calling Convention Implementation
//
// The lower operations present on calling convention works on this order:
// LowerCALL (virt regs --> phys regs, virt regs --> stack)
// LowerFORMAL_ARGUMENTS (phys --> virt regs, stack --> virt regs)
// LowerRET (virt regs --> phys regs)
// LowerCALL (phys regs --> virt regs)
//
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "ARMGenCallingConv.inc" #include "ARMGenCallingConv.inc"
@ -694,25 +686,21 @@ CCAssignFn *ARMTargetLowering::CCAssignFnForNode(unsigned CC,
} }
} }
/// LowerCallResult - Lower the result values of an ISD::CALL into the /// LowerCallResult - Lower the result values of a call into the
/// appropriate copies out of appropriate physical registers. This assumes that /// appropriate copies out of appropriate physical registers.
/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call SDValue
/// being lowered. The returns a SDNode with the same number of values as the ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
/// ISD::CALL. unsigned CallConv, bool isVarArg,
SDNode *ARMTargetLowering:: const SmallVectorImpl<ISD::InputArg> &Ins,
LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall, DebugLoc dl, SelectionDAG &DAG,
unsigned CallingConv, SelectionDAG &DAG) { SmallVectorImpl<SDValue> &InVals) {
DebugLoc dl = TheCall->getDebugLoc();
// Assign locations to each value returned by this call. // Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs; SmallVector<CCValAssign, 16> RVLocs;
bool isVarArg = TheCall->isVarArg(); CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
CCState CCInfo(CallingConv, isVarArg, getTargetMachine(),
RVLocs, *DAG.getContext()); RVLocs, *DAG.getContext());
CCInfo.AnalyzeCallResult(TheCall, CCInfo.AnalyzeCallResult(Ins,
CCAssignFnForNode(CallingConv, /* Return*/ true)); CCAssignFnForNode(CallConv, /* Return*/ true));
SmallVector<SDValue, 8> ResultVals;
// Copy all of the result registers out of their specified physreg. // Copy all of the result registers out of their specified physreg.
for (unsigned i = 0; i != RVLocs.size(); ++i) { for (unsigned i = 0; i != RVLocs.size(); ++i) {
@ -764,13 +752,10 @@ LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall,
break; break;
} }
ResultVals.push_back(Val); InVals.push_back(Val);
} }
// Merge everything together with a MERGE_VALUES node. return Chain;
ResultVals.push_back(Chain);
return DAG.getNode(ISD::MERGE_VALUES, dl, TheCall->getVTList(),
&ResultVals[0], ResultVals.size()).getNode();
} }
/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
@ -790,11 +775,11 @@ CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
/// LowerMemOpCallTo - Store the argument to the stack. /// LowerMemOpCallTo - Store the argument to the stack.
SDValue SDValue
ARMTargetLowering::LowerMemOpCallTo(CallSDNode *TheCall, SelectionDAG &DAG, ARMTargetLowering::LowerMemOpCallTo(SDValue Chain,
const SDValue &StackPtr, SDValue StackPtr, SDValue Arg,
const CCValAssign &VA, SDValue Chain, DebugLoc dl, SelectionDAG &DAG,
SDValue Arg, ISD::ArgFlagsTy Flags) { const CCValAssign &VA,
DebugLoc dl = TheCall->getDebugLoc(); ISD::ArgFlagsTy Flags) {
unsigned LocMemOffset = VA.getLocMemOffset(); unsigned LocMemOffset = VA.getLocMemOffset();
SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
@ -805,14 +790,13 @@ ARMTargetLowering::LowerMemOpCallTo(CallSDNode *TheCall, SelectionDAG &DAG,
PseudoSourceValue::getStack(), LocMemOffset); PseudoSourceValue::getStack(), LocMemOffset);
} }
void ARMTargetLowering::PassF64ArgInRegs(CallSDNode *TheCall, SelectionDAG &DAG, void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG,
SDValue Chain, SDValue &Arg, SDValue Chain, SDValue &Arg,
RegsToPassVector &RegsToPass, RegsToPassVector &RegsToPass,
CCValAssign &VA, CCValAssign &NextVA, CCValAssign &VA, CCValAssign &NextVA,
SDValue &StackPtr, SDValue &StackPtr,
SmallVector<SDValue, 8> &MemOpChains, SmallVector<SDValue, 8> &MemOpChains,
ISD::ArgFlagsTy Flags) { ISD::ArgFlagsTy Flags) {
DebugLoc dl = TheCall->getDebugLoc();
SDValue fmrrd = DAG.getNode(ARMISD::FMRRD, dl, SDValue fmrrd = DAG.getNode(ARMISD::FMRRD, dl,
DAG.getVTList(MVT::i32, MVT::i32), Arg); DAG.getVTList(MVT::i32, MVT::i32), Arg);
@ -825,27 +809,30 @@ void ARMTargetLowering::PassF64ArgInRegs(CallSDNode *TheCall, SelectionDAG &DAG,
if (StackPtr.getNode() == 0) if (StackPtr.getNode() == 0)
StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
MemOpChains.push_back(LowerMemOpCallTo(TheCall, DAG, StackPtr, NextVA, MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1),
Chain, fmrrd.getValue(1), Flags)); dl, DAG, NextVA,
Flags));
} }
} }
/// LowerCALL - Lowering a ISD::CALL node into a callseq_start <- /// LowerCall - Lowering a call into a callseq_start <-
/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
/// nodes. /// nodes.
SDValue ARMTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) { SDValue
CallSDNode *TheCall = cast<CallSDNode>(Op.getNode()); ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
MVT RetVT = TheCall->getRetValType(0); unsigned CallConv, bool isVarArg,
SDValue Chain = TheCall->getChain(); bool isTailCall,
unsigned CC = TheCall->getCallingConv(); const SmallVectorImpl<ISD::OutputArg> &Outs,
bool isVarArg = TheCall->isVarArg(); const SmallVectorImpl<ISD::InputArg> &Ins,
SDValue Callee = TheCall->getCallee(); DebugLoc dl, SelectionDAG &DAG,
DebugLoc dl = TheCall->getDebugLoc(); SmallVectorImpl<SDValue> &InVals) {
// Analyze operands of the call, assigning locations to each operand. // Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs; SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext()); CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
CCInfo.AnalyzeCallOperands(TheCall, CCAssignFnForNode(CC, /* Return*/ false)); *DAG.getContext());
CCInfo.AnalyzeCallOperands(Outs,
CCAssignFnForNode(CallConv, /* Return*/ false));
// Get a count of how many bytes are to be pushed on the stack. // Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset(); unsigned NumBytes = CCInfo.getNextStackOffset();
@ -865,8 +852,8 @@ SDValue ARMTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
i != e; i != e;
++i, ++realArgIdx) { ++i, ++realArgIdx) {
CCValAssign &VA = ArgLocs[i]; CCValAssign &VA = ArgLocs[i];
SDValue Arg = TheCall->getArg(realArgIdx); SDValue Arg = Outs[realArgIdx].Val;
ISD::ArgFlagsTy Flags = TheCall->getArgFlags(realArgIdx); ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
// Promote the value if needed. // Promote the value if needed.
switch (VA.getLocInfo()) { switch (VA.getLocInfo()) {
@ -894,23 +881,23 @@ SDValue ARMTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
DAG.getConstant(1, MVT::i32)); DAG.getConstant(1, MVT::i32));
PassF64ArgInRegs(TheCall, DAG, Chain, Op0, RegsToPass, PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
VA = ArgLocs[++i]; // skip ahead to next loc VA = ArgLocs[++i]; // skip ahead to next loc
if (VA.isRegLoc()) { if (VA.isRegLoc()) {
PassF64ArgInRegs(TheCall, DAG, Chain, Op1, RegsToPass, PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
} else { } else {
assert(VA.isMemLoc()); assert(VA.isMemLoc());
if (StackPtr.getNode() == 0) if (StackPtr.getNode() == 0)
StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
MemOpChains.push_back(LowerMemOpCallTo(TheCall, DAG, StackPtr, VA, MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
Chain, Op1, Flags)); dl, DAG, VA, Flags));
} }
} else { } else {
PassF64ArgInRegs(TheCall, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
StackPtr, MemOpChains, Flags); StackPtr, MemOpChains, Flags);
} }
} else if (VA.isRegLoc()) { } else if (VA.isRegLoc()) {
@ -920,8 +907,8 @@ SDValue ARMTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
if (StackPtr.getNode() == 0) if (StackPtr.getNode() == 0)
StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
MemOpChains.push_back(LowerMemOpCallTo(TheCall, DAG, StackPtr, VA, MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
Chain, Arg, Flags)); dl, DAG, VA, Flags));
} }
} }
@ -1024,30 +1011,30 @@ SDValue ARMTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
DAG.getIntPtrConstant(0, true), InFlag); DAG.getIntPtrConstant(0, true), InFlag);
if (RetVT != MVT::Other) if (!Ins.empty())
InFlag = Chain.getValue(1); InFlag = Chain.getValue(1);
// Handle result values, copying them out of physregs into vregs that we // Handle result values, copying them out of physregs into vregs that we
// return. // return.
return SDValue(LowerCallResult(Chain, InFlag, TheCall, CC, DAG), return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins,
Op.getResNo()); dl, DAG, InVals);
} }
SDValue ARMTargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) { SDValue
// The chain is always operand #0 ARMTargetLowering::LowerReturn(SDValue Chain,
SDValue Chain = Op.getOperand(0); unsigned CallConv, bool isVarArg,
DebugLoc dl = Op.getDebugLoc(); const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG) {
// CCValAssign - represent the assignment of the return value to a location. // CCValAssign - represent the assignment of the return value to a location.
SmallVector<CCValAssign, 16> RVLocs; SmallVector<CCValAssign, 16> RVLocs;
unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
// CCState - Info about the registers and stack slots. // CCState - Info about the registers and stack slots.
CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs, *DAG.getContext()); CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs,
*DAG.getContext());
// Analyze return values of ISD::RET. // Analyze outgoing return values.
CCInfo.AnalyzeReturn(Op.getNode(), CCAssignFnForNode(CC, /* Return */ true)); CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true));
// If this is the first return lowered for this function, add // If this is the first return lowered for this function, add
// the regs to the liveout set for the function. // the regs to the liveout set for the function.
@ -1066,9 +1053,7 @@ SDValue ARMTargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) {
CCValAssign &VA = RVLocs[i]; CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!"); assert(VA.isRegLoc() && "Can only return in registers!");
// ISD::RET => ret chain, (regnum1,val1), ... SDValue Arg = Outs[realRVLocIdx].Val;
// So i*2+1 index only the regnums
SDValue Arg = Op.getOperand(realRVLocIdx*2+1);
switch (VA.getLocInfo()) { switch (VA.getLocInfo()) {
default: llvm_unreachable("Unknown loc info!"); default: llvm_unreachable("Unknown loc info!");
@ -1172,7 +1157,7 @@ ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
// FIXME: is there useful debug info available here? // FIXME: is there useful debug info available here?
std::pair<SDValue, SDValue> CallResult = std::pair<SDValue, SDValue> CallResult =
LowerCallTo(Chain, (const Type *) Type::Int32Ty, false, false, false, false, LowerCallTo(Chain, (const Type *) Type::Int32Ty, false, false, false, false,
0, CallingConv::C, false, 0, CallingConv::C, false, /*isReturnValueUsed=*/true,
DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl); DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl);
return CallResult.first; return CallResult.first;
} }
@ -1420,21 +1405,24 @@ ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
} }
SDValue SDValue
ARMTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) { ARMTargetLowering::LowerFormalArguments(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg>
&Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
MachineFunction &MF = DAG.getMachineFunction(); MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo(); MachineFrameInfo *MFI = MF.getFrameInfo();
SDValue Root = Op.getOperand(0);
DebugLoc dl = Op.getDebugLoc();
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
unsigned CC = MF.getFunction()->getCallingConv();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
// Assign locations to all of the incoming arguments. // Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs; SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext()); CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
CCInfo.AnalyzeFormalArguments(Op.getNode(), *DAG.getContext());
CCAssignFnForNode(CC, /* Return*/ false)); CCInfo.AnalyzeFormalArguments(Ins,
CCAssignFnForNode(CallConv, /* Return*/ false));
SmallVector<SDValue, 16> ArgValues; SmallVector<SDValue, 16> ArgValues;
@ -1453,17 +1441,17 @@ ARMTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
if (VA.getLocVT() == MVT::v2f64) { if (VA.getLocVT() == MVT::v2f64) {
SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
Root, DAG, dl); Chain, DAG, dl);
VA = ArgLocs[++i]; // skip ahead to next loc VA = ArgLocs[++i]; // skip ahead to next loc
SDValue ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], SDValue ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
Root, DAG, dl); Chain, DAG, dl);
ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
ArgValue, ArgValue1, DAG.getIntPtrConstant(0)); ArgValue, ArgValue1, DAG.getIntPtrConstant(0));
ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
ArgValue, ArgValue2, DAG.getIntPtrConstant(1)); ArgValue, ArgValue2, DAG.getIntPtrConstant(1));
} else } else
ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Root, DAG, dl); ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
} else { } else {
TargetRegisterClass *RC; TargetRegisterClass *RC;
@ -1478,11 +1466,11 @@ ARMTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
assert((RegVT == MVT::i32 || RegVT == MVT::f32 || assert((RegVT == MVT::i32 || RegVT == MVT::f32 ||
(FloatABIType == FloatABI::Hard && RegVT == MVT::f64)) && (FloatABIType == FloatABI::Hard && RegVT == MVT::f64)) &&
"RegVT not supported by FORMAL_ARGUMENTS Lowering"); "RegVT not supported by formal arguments Lowering");
// Transform the arguments in physical registers into virtual ones. // Transform the arguments in physical registers into virtual ones.
unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
ArgValue = DAG.getCopyFromReg(Root, dl, Reg, RegVT); ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
} }
// If this is an 8 or 16-bit value, it is really passed promoted // If this is an 8 or 16-bit value, it is really passed promoted
@ -1506,7 +1494,7 @@ ARMTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
break; break;
} }
ArgValues.push_back(ArgValue); InVals.push_back(ArgValue);
} else { // VA.isRegLoc() } else { // VA.isRegLoc()
@ -1519,7 +1507,7 @@ ARMTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
// Create load nodes to retrieve arguments from the stack. // Create load nodes to retrieve arguments from the stack.
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
ArgValues.push_back(DAG.getLoad(VA.getValVT(), dl, Root, FIN, NULL, 0)); InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, NULL, 0));
} }
} }
@ -1555,25 +1543,21 @@ ARMTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
RC = ARM::GPRRegisterClass; RC = ARM::GPRRegisterClass;
unsigned VReg = MF.addLiveIn(GPRArgRegs[NumGPRs], RC); unsigned VReg = MF.addLiveIn(GPRArgRegs[NumGPRs], RC);
SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, MVT::i32); SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0); SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0);
MemOps.push_back(Store); MemOps.push_back(Store);
FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN, FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN,
DAG.getConstant(4, getPointerTy())); DAG.getConstant(4, getPointerTy()));
} }
if (!MemOps.empty()) if (!MemOps.empty())
Root = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOps[0], MemOps.size()); &MemOps[0], MemOps.size());
} else } else
// This will point to the next argument passed via stack. // This will point to the next argument passed via stack.
VarArgsFrameIndex = MFI->CreateFixedObject(4, ArgOffset); VarArgsFrameIndex = MFI->CreateFixedObject(4, ArgOffset);
} }
ArgValues.push_back(Root); return Chain;
// Return the new list of results.
return DAG.getNode(ISD::MERGE_VALUES, dl, Op.getNode()->getVTList(),
&ArgValues[0], ArgValues.size()).getValue(Op.getResNo());
} }
/// isFloatingPointZero - Return true if this is +0.0. /// isFloatingPointZero - Return true if this is +0.0.
@ -2380,8 +2364,6 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) : return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) :
LowerGlobalAddressELF(Op, DAG); LowerGlobalAddressELF(Op, DAG);
case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
case ISD::CALL: return LowerCALL(Op, DAG);
case ISD::RET: return LowerRET(Op, DAG);
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG, Subtarget); case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG, Subtarget);
case ISD::BR_CC: return LowerBR_CC(Op, DAG, Subtarget); case ISD::BR_CC: return LowerBR_CC(Op, DAG, Subtarget);
case ISD::BR_JT: return LowerBR_JT(Op, DAG); case ISD::BR_JT: return LowerBR_JT(Op, DAG);
@ -2391,7 +2373,6 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
case ISD::FP_TO_SINT: case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG);
case ISD::RETURNADDR: break; case ISD::RETURNADDR: break;
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG);

View File

@ -225,7 +225,7 @@ namespace llvm {
void addQRTypeForNEON(MVT VT); void addQRTypeForNEON(MVT VT);
typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector; typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector;
void PassF64ArgInRegs(CallSDNode *TheCall, SelectionDAG &DAG, void PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG,
SDValue Chain, SDValue &Arg, SDValue Chain, SDValue &Arg,
RegsToPassVector &RegsToPass, RegsToPassVector &RegsToPass,
CCValAssign &VA, CCValAssign &NextVA, CCValAssign &VA, CCValAssign &NextVA,
@ -236,15 +236,12 @@ namespace llvm {
SDValue &Root, SelectionDAG &DAG, DebugLoc dl); SDValue &Root, SelectionDAG &DAG, DebugLoc dl);
CCAssignFn *CCAssignFnForNode(unsigned CC, bool Return) const; CCAssignFn *CCAssignFnForNode(unsigned CC, bool Return) const;
SDValue LowerMemOpCallTo(CallSDNode *TheCall, SelectionDAG &DAG, SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
const SDValue &StackPtr, const CCValAssign &VA, DebugLoc dl, SelectionDAG &DAG,
SDValue Chain, SDValue Arg, ISD::ArgFlagsTy Flags); const CCValAssign &VA,
SDNode *LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall, ISD::ArgFlagsTy Flags);
unsigned CallingConv, SelectionDAG &DAG);
SDValue LowerCALL(SDValue Op, SelectionDAG &DAG);
SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG); SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG);
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG); SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG);
SDValue LowerRET(SDValue Op, SelectionDAG &DAG);
SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG); SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG);
SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG); SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG);
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG); SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG);
@ -253,7 +250,6 @@ namespace llvm {
SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA, SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
SelectionDAG &DAG); SelectionDAG &DAG);
SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG); SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG);
SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG);
SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG); SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG);
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG); SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG);
@ -264,6 +260,33 @@ namespace llvm {
bool AlwaysInline, bool AlwaysInline,
const Value *DstSV, uint64_t DstSVOff, const Value *DstSV, uint64_t DstSVOff,
const Value *SrcSV, uint64_t SrcSVOff); const Value *SrcSV, uint64_t SrcSVOff);
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerFormalArguments(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerCall(SDValue Chain, SDValue Callee,
unsigned CallConv, bool isVarArg,
bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerReturn(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG);
}; };
} }

View File

@ -166,8 +166,6 @@ AlphaTargetLowering::AlphaTargetLowering(TargetMachine &TM)
setOperationAction(ISD::VAARG, MVT::Other, Custom); setOperationAction(ISD::VAARG, MVT::Other, Custom);
setOperationAction(ISD::VAARG, MVT::i32, Custom); setOperationAction(ISD::VAARG, MVT::i32, Custom);
setOperationAction(ISD::RET, MVT::Other, Custom);
setOperationAction(ISD::JumpTable, MVT::i64, Custom); setOperationAction(ISD::JumpTable, MVT::i64, Custom);
setOperationAction(ISD::JumpTable, MVT::i32, Custom); setOperationAction(ISD::JumpTable, MVT::i32, Custom);
@ -246,20 +244,21 @@ static SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) {
#include "AlphaGenCallingConv.inc" #include "AlphaGenCallingConv.inc"
SDValue AlphaTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) { SDValue
CallSDNode *TheCall = cast<CallSDNode>(Op.getNode()); AlphaTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
SDValue Chain = TheCall->getChain(); unsigned CallConv, bool isVarArg,
SDValue Callee = TheCall->getCallee(); bool isTailCall,
bool isVarArg = TheCall->isVarArg(); const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl = Op.getDebugLoc(); const SmallVectorImpl<ISD::InputArg> &Ins,
MachineFunction &MF = DAG.getMachineFunction(); DebugLoc dl, SelectionDAG &DAG,
unsigned CC = MF.getFunction()->getCallingConv(); SmallVectorImpl<SDValue> &InVals) {
// Analyze operands of the call, assigning locations to each operand. // Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs; SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext()); CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
ArgLocs, *DAG.getContext());
CCInfo.AnalyzeCallOperands(TheCall, CC_Alpha); CCInfo.AnalyzeCallOperands(Outs, CC_Alpha);
// Get a count of how many bytes are to be pushed on the stack. // Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset(); unsigned NumBytes = CCInfo.getNextStackOffset();
@ -275,8 +274,7 @@ SDValue AlphaTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i]; CCValAssign &VA = ArgLocs[i];
// Arguments start after the 5 first operands of ISD::CALL SDValue Arg = Outs[i].Val;
SDValue Arg = TheCall->getArg(i);
// Promote the value if needed. // Promote the value if needed.
switch (VA.getLocInfo()) { switch (VA.getLocInfo()) {
@ -355,30 +353,26 @@ SDValue AlphaTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
// Handle result values, copying them out of physregs into vregs that we // Handle result values, copying them out of physregs into vregs that we
// return. // return.
return SDValue(LowerCallResult(Chain, InFlag, TheCall, CC, DAG), return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
Op.getResNo()); Ins, dl, DAG, InVals);
} }
/// LowerCallResult - Lower the result values of an ISD::CALL into the /// LowerCallResult - Lower the result values of a call into the
/// appropriate copies out of appropriate physical registers. This assumes that /// appropriate copies out of appropriate physical registers.
/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call ///
/// being lowered. Returns a SDNode with the same number of values as the SDValue
/// ISD::CALL.
SDNode*
AlphaTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, AlphaTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
CallSDNode *TheCall, unsigned CallConv, bool isVarArg,
unsigned CallingConv, const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG &DAG) { DebugLoc dl, SelectionDAG &DAG,
bool isVarArg = TheCall->isVarArg(); SmallVectorImpl<SDValue> &InVals) {
DebugLoc dl = TheCall->getDebugLoc();
// Assign locations to each value returned by this call. // Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs; SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs, CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs,
*DAG.getContext()); *DAG.getContext());
CCInfo.AnalyzeCallResult(TheCall, RetCC_Alpha); CCInfo.AnalyzeCallResult(Ins, RetCC_Alpha);
SmallVector<SDValue, 8> ResultVals;
// Copy all of the result registers out of their specified physreg. // Copy all of the result registers out of their specified physreg.
for (unsigned i = 0; i != RVLocs.size(); ++i) { for (unsigned i = 0; i != RVLocs.size(); ++i) {
@ -402,33 +396,31 @@ AlphaTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
if (VA.getLocInfo() != CCValAssign::Full) if (VA.getLocInfo() != CCValAssign::Full)
RetValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), RetValue); RetValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), RetValue);
ResultVals.push_back(RetValue); InVals.push_back(RetValue);
} }
ResultVals.push_back(Chain); return Chain;
// Merge everything together with a MERGE_VALUES node.
return DAG.getNode(ISD::MERGE_VALUES, dl, TheCall->getVTList(),
&ResultVals[0], ResultVals.size()).getNode();
} }
static SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG, SDValue
int &VarArgsBase, AlphaTargetLowering::LowerFormalArguments(SDValue Chain,
int &VarArgsOffset) { unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg>
&Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
MachineFunction &MF = DAG.getMachineFunction(); MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo(); MachineFrameInfo *MFI = MF.getFrameInfo();
std::vector<SDValue> ArgValues;
SDValue Root = Op.getOperand(0);
DebugLoc dl = Op.getDebugLoc();
unsigned args_int[] = { unsigned args_int[] = {
Alpha::R16, Alpha::R17, Alpha::R18, Alpha::R19, Alpha::R20, Alpha::R21}; Alpha::R16, Alpha::R17, Alpha::R18, Alpha::R19, Alpha::R20, Alpha::R21};
unsigned args_float[] = { unsigned args_float[] = {
Alpha::F16, Alpha::F17, Alpha::F18, Alpha::F19, Alpha::F20, Alpha::F21}; Alpha::F16, Alpha::F17, Alpha::F18, Alpha::F19, Alpha::F20, Alpha::F21};
for (unsigned ArgNo = 0, e = Op.getNode()->getNumValues()-1; ArgNo != e; ++ArgNo) { for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
SDValue argt; SDValue argt;
MVT ObjectVT = Op.getValue(ArgNo).getValueType(); MVT ObjectVT = Ins[ArgNo].VT;
SDValue ArgVal; SDValue ArgVal;
if (ArgNo < 6) { if (ArgNo < 6) {
@ -438,17 +430,17 @@ static SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG,
case MVT::f64: case MVT::f64:
args_float[ArgNo] = AddLiveIn(MF, args_float[ArgNo], args_float[ArgNo] = AddLiveIn(MF, args_float[ArgNo],
&Alpha::F8RCRegClass); &Alpha::F8RCRegClass);
ArgVal = DAG.getCopyFromReg(Root, dl, args_float[ArgNo], ObjectVT); ArgVal = DAG.getCopyFromReg(Chain, dl, args_float[ArgNo], ObjectVT);
break; break;
case MVT::f32: case MVT::f32:
args_float[ArgNo] = AddLiveIn(MF, args_float[ArgNo], args_float[ArgNo] = AddLiveIn(MF, args_float[ArgNo],
&Alpha::F4RCRegClass); &Alpha::F4RCRegClass);
ArgVal = DAG.getCopyFromReg(Root, dl, args_float[ArgNo], ObjectVT); ArgVal = DAG.getCopyFromReg(Chain, dl, args_float[ArgNo], ObjectVT);
break; break;
case MVT::i64: case MVT::i64:
args_int[ArgNo] = AddLiveIn(MF, args_int[ArgNo], args_int[ArgNo] = AddLiveIn(MF, args_int[ArgNo],
&Alpha::GPRCRegClass); &Alpha::GPRCRegClass);
ArgVal = DAG.getCopyFromReg(Root, dl, args_int[ArgNo], MVT::i64); ArgVal = DAG.getCopyFromReg(Chain, dl, args_int[ArgNo], MVT::i64);
break; break;
} }
} else { //more args } else { //more args
@ -458,59 +450,58 @@ static SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG,
// Create the SelectionDAG nodes corresponding to a load // Create the SelectionDAG nodes corresponding to a load
//from this parameter //from this parameter
SDValue FIN = DAG.getFrameIndex(FI, MVT::i64); SDValue FIN = DAG.getFrameIndex(FI, MVT::i64);
ArgVal = DAG.getLoad(ObjectVT, dl, Root, FIN, NULL, 0); ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, NULL, 0);
} }
ArgValues.push_back(ArgVal); InVals.push_back(ArgVal);
} }
// If the functions takes variable number of arguments, copy all regs to stack // If the functions takes variable number of arguments, copy all regs to stack
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
if (isVarArg) { if (isVarArg) {
VarArgsOffset = (Op.getNode()->getNumValues()-1) * 8; VarArgsOffset = Ins.size() * 8;
std::vector<SDValue> LS; std::vector<SDValue> LS;
for (int i = 0; i < 6; ++i) { for (int i = 0; i < 6; ++i) {
if (TargetRegisterInfo::isPhysicalRegister(args_int[i])) if (TargetRegisterInfo::isPhysicalRegister(args_int[i]))
args_int[i] = AddLiveIn(MF, args_int[i], &Alpha::GPRCRegClass); args_int[i] = AddLiveIn(MF, args_int[i], &Alpha::GPRCRegClass);
SDValue argt = DAG.getCopyFromReg(Root, dl, args_int[i], MVT::i64); SDValue argt = DAG.getCopyFromReg(Chain, dl, args_int[i], MVT::i64);
int FI = MFI->CreateFixedObject(8, -8 * (6 - i)); int FI = MFI->CreateFixedObject(8, -8 * (6 - i));
if (i == 0) VarArgsBase = FI; if (i == 0) VarArgsBase = FI;
SDValue SDFI = DAG.getFrameIndex(FI, MVT::i64); SDValue SDFI = DAG.getFrameIndex(FI, MVT::i64);
LS.push_back(DAG.getStore(Root, dl, argt, SDFI, NULL, 0)); LS.push_back(DAG.getStore(Chain, dl, argt, SDFI, NULL, 0));
if (TargetRegisterInfo::isPhysicalRegister(args_float[i])) if (TargetRegisterInfo::isPhysicalRegister(args_float[i]))
args_float[i] = AddLiveIn(MF, args_float[i], &Alpha::F8RCRegClass); args_float[i] = AddLiveIn(MF, args_float[i], &Alpha::F8RCRegClass);
argt = DAG.getCopyFromReg(Root, dl, args_float[i], MVT::f64); argt = DAG.getCopyFromReg(Chain, dl, args_float[i], MVT::f64);
FI = MFI->CreateFixedObject(8, - 8 * (12 - i)); FI = MFI->CreateFixedObject(8, - 8 * (12 - i));
SDFI = DAG.getFrameIndex(FI, MVT::i64); SDFI = DAG.getFrameIndex(FI, MVT::i64);
LS.push_back(DAG.getStore(Root, dl, argt, SDFI, NULL, 0)); LS.push_back(DAG.getStore(Chain, dl, argt, SDFI, NULL, 0));
} }
//Set up a token factor with all the stack traffic //Set up a token factor with all the stack traffic
Root = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &LS[0], LS.size()); Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &LS[0], LS.size());
} }
ArgValues.push_back(Root); return Chain;
// Return the new list of results.
return DAG.getNode(ISD::MERGE_VALUES, dl, Op.getNode()->getVTList(),
&ArgValues[0], ArgValues.size());
} }
static SDValue LowerRET(SDValue Op, SelectionDAG &DAG) { SDValue
DebugLoc dl = Op.getDebugLoc(); AlphaTargetLowering::LowerReturn(SDValue Chain,
SDValue Copy = DAG.getCopyToReg(Op.getOperand(0), dl, Alpha::R26, unsigned CallConv, bool isVarArg,
DAG.getNode(AlphaISD::GlobalRetAddr, const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc::getUnknownLoc(), DebugLoc dl, SelectionDAG &DAG) {
MVT::i64),
SDValue()); SDValue Copy = DAG.getCopyToReg(Chain, dl, Alpha::R26,
switch (Op.getNumOperands()) { DAG.getNode(AlphaISD::GlobalRetAddr,
DebugLoc::getUnknownLoc(),
MVT::i64),
SDValue());
switch (Outs.size()) {
default: default:
llvm_unreachable("Do not know how to return this many arguments!"); llvm_unreachable("Do not know how to return this many arguments!");
case 1: case 0:
break; break;
//return SDValue(); // ret void is legal //return SDValue(); // ret void is legal
case 3: { case 1: {
MVT ArgVT = Op.getOperand(1).getValueType(); MVT ArgVT = Outs[0].Val.getValueType();
unsigned ArgReg; unsigned ArgReg;
if (ArgVT.isInteger()) if (ArgVT.isInteger())
ArgReg = Alpha::R0; ArgReg = Alpha::R0;
@ -519,13 +510,13 @@ static SDValue LowerRET(SDValue Op, SelectionDAG &DAG) {
ArgReg = Alpha::F0; ArgReg = Alpha::F0;
} }
Copy = DAG.getCopyToReg(Copy, dl, ArgReg, Copy = DAG.getCopyToReg(Copy, dl, ArgReg,
Op.getOperand(1), Copy.getValue(1)); Outs[0].Val, Copy.getValue(1));
if (DAG.getMachineFunction().getRegInfo().liveout_empty()) if (DAG.getMachineFunction().getRegInfo().liveout_empty())
DAG.getMachineFunction().getRegInfo().addLiveOut(ArgReg); DAG.getMachineFunction().getRegInfo().addLiveOut(ArgReg);
break; break;
} }
case 5: { case 2: {
MVT ArgVT = Op.getOperand(1).getValueType(); MVT ArgVT = Outs[0].Val.getValueType();
unsigned ArgReg1, ArgReg2; unsigned ArgReg1, ArgReg2;
if (ArgVT.isInteger()) { if (ArgVT.isInteger()) {
ArgReg1 = Alpha::R0; ArgReg1 = Alpha::R0;
@ -536,13 +527,13 @@ static SDValue LowerRET(SDValue Op, SelectionDAG &DAG) {
ArgReg2 = Alpha::F1; ArgReg2 = Alpha::F1;
} }
Copy = DAG.getCopyToReg(Copy, dl, ArgReg1, Copy = DAG.getCopyToReg(Copy, dl, ArgReg1,
Op.getOperand(1), Copy.getValue(1)); Outs[0].Val, Copy.getValue(1));
if (std::find(DAG.getMachineFunction().getRegInfo().liveout_begin(), if (std::find(DAG.getMachineFunction().getRegInfo().liveout_begin(),
DAG.getMachineFunction().getRegInfo().liveout_end(), ArgReg1) DAG.getMachineFunction().getRegInfo().liveout_end(), ArgReg1)
== DAG.getMachineFunction().getRegInfo().liveout_end()) == DAG.getMachineFunction().getRegInfo().liveout_end())
DAG.getMachineFunction().getRegInfo().addLiveOut(ArgReg1); DAG.getMachineFunction().getRegInfo().addLiveOut(ArgReg1);
Copy = DAG.getCopyToReg(Copy, dl, ArgReg2, Copy = DAG.getCopyToReg(Copy, dl, ArgReg2,
Op.getOperand(3), Copy.getValue(1)); Outs[1].Val, Copy.getValue(1));
if (std::find(DAG.getMachineFunction().getRegInfo().liveout_begin(), if (std::find(DAG.getMachineFunction().getRegInfo().liveout_begin(),
DAG.getMachineFunction().getRegInfo().liveout_end(), ArgReg2) DAG.getMachineFunction().getRegInfo().liveout_end(), ArgReg2)
== DAG.getMachineFunction().getRegInfo().liveout_end()) == DAG.getMachineFunction().getRegInfo().liveout_end())
@ -589,11 +580,6 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
DebugLoc dl = Op.getDebugLoc(); DebugLoc dl = Op.getDebugLoc();
switch (Op.getOpcode()) { switch (Op.getOpcode()) {
default: llvm_unreachable("Wasn't expecting to be able to lower this!"); default: llvm_unreachable("Wasn't expecting to be able to lower this!");
case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG,
VarArgsBase,
VarArgsOffset);
case ISD::CALL: return LowerCALL(Op, DAG);
case ISD::RET: return LowerRET(Op,DAG);
case ISD::JumpTable: return LowerJumpTable(Op, DAG); case ISD::JumpTable: return LowerJumpTable(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: { case ISD::INTRINSIC_WO_CHAIN: {

View File

@ -82,9 +82,11 @@ namespace llvm {
// Friendly names for dumps // Friendly names for dumps
const char *getTargetNodeName(unsigned Opcode) const; const char *getTargetNodeName(unsigned Opcode) const;
SDValue LowerCALL(SDValue Op, SelectionDAG &DAG); SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
SDNode *LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall, unsigned CallConv, bool isVarArg,
unsigned CallingConv, SelectionDAG &DAG); const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
ConstraintType getConstraintType(const std::string &Constraint) const; ConstraintType getConstraintType(const std::string &Constraint) const;
@ -107,6 +109,26 @@ namespace llvm {
void LowerVAARG(SDNode *N, SDValue &Chain, SDValue &DataPtr, void LowerVAARG(SDNode *N, SDValue &Chain, SDValue &DataPtr,
SelectionDAG &DAG); SelectionDAG &DAG);
virtual SDValue
LowerFormalArguments(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerCall(SDValue Chain, SDValue Callee,
unsigned CallConv, bool isVarArg, bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerReturn(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG);
}; };
} }

View File

@ -123,9 +123,6 @@ BlackfinTargetLowering::BlackfinTargetLowering(TargetMachine &TM)
setOperationAction(ISD::VAEND, MVT::Other, Expand); setOperationAction(ISD::VAEND, MVT::Other, Expand);
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
// RET must be custom lowered, to meet ABI requirements
setOperationAction(ISD::RET, MVT::Other, Custom);
} }
const char *BlackfinTargetLowering::getTargetNodeName(unsigned Opcode) const { const char *BlackfinTargetLowering::getTargetNodeName(unsigned Opcode) const {
@ -160,27 +157,23 @@ SDValue BlackfinTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) {
return DAG.getNode(BFISD::Wrapper, DL, MVT::i32, Op); return DAG.getNode(BFISD::Wrapper, DL, MVT::i32, Op);
} }
// FORMAL_ARGUMENTS(CHAIN, CC#, ISVARARG, FLAG0, ..., FLAGn) - This node SDValue
// represents the formal arguments for a function. CC# is a Constant value BlackfinTargetLowering::LowerFormalArguments(SDValue Chain,
// indicating the calling convention of the function, and ISVARARG is a unsigned CallConv, bool isVarArg,
// flag that indicates whether the function is varargs or not. This node const SmallVectorImpl<ISD::InputArg>
// has one result value for each incoming argument, plus one for the output &Ins,
// chain. DebugLoc dl, SelectionDAG &DAG,
SDValue BlackfinTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SmallVectorImpl<SDValue> &InVals) {
SelectionDAG &DAG) {
DebugLoc dl = Op.getDebugLoc();
SDValue Root = Op.getOperand(0);
unsigned CC = Op.getConstantOperandVal(1);
bool isVarArg = Op.getConstantOperandVal(2);
MachineFunction &MF = DAG.getMachineFunction(); MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo(); MachineFrameInfo *MFI = MF.getFrameInfo();
SmallVector<CCValAssign, 16> ArgLocs; SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext()); CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
ArgLocs, *DAG.getContext());
CCInfo.AllocateStack(12, 4); // ABI requires 12 bytes stack space CCInfo.AllocateStack(12, 4); // ABI requires 12 bytes stack space
CCInfo.AnalyzeFormalArguments(Op.getNode(), CC_Blackfin); CCInfo.AnalyzeFormalArguments(Ins, CC_Blackfin);
SmallVector<SDValue, 8> ArgValues;
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i]; CCValAssign &VA = ArgLocs[i];
@ -193,7 +186,7 @@ SDValue BlackfinTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op,
unsigned Reg = MF.getRegInfo().createVirtualRegister(RC); unsigned Reg = MF.getRegInfo().createVirtualRegister(RC);
MF.getRegInfo().addLiveIn(VA.getLocReg(), Reg); MF.getRegInfo().addLiveIn(VA.getLocReg(), Reg);
SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, RegVT); SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
// If this is an 8 or 16-bit value, it is really passed promoted to 32 // If this is an 8 or 16-bit value, it is really passed promoted to 32
// bits. Insert an assert[sz]ext to capture this, then truncate to the // bits. Insert an assert[sz]ext to capture this, then truncate to the
@ -208,35 +201,34 @@ SDValue BlackfinTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op,
if (VA.getLocInfo() != CCValAssign::Full) if (VA.getLocInfo() != CCValAssign::Full)
ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
ArgValues.push_back(ArgValue); InVals.push_back(ArgValue);
} else { } else {
assert(VA.isMemLoc() && "CCValAssign must be RegLoc or MemLoc"); assert(VA.isMemLoc() && "CCValAssign must be RegLoc or MemLoc");
unsigned ObjSize = VA.getLocVT().getStoreSizeInBits()/8; unsigned ObjSize = VA.getLocVT().getStoreSizeInBits()/8;
int FI = MFI->CreateFixedObject(ObjSize, VA.getLocMemOffset()); int FI = MFI->CreateFixedObject(ObjSize, VA.getLocMemOffset());
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
ArgValues.push_back(DAG.getLoad(VA.getValVT(), dl, Root, FIN, NULL, 0)); InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, NULL, 0));
} }
} }
ArgValues.push_back(Root); return Chain;
// Return the new list of results.
return DAG.getNode(ISD::MERGE_VALUES, dl, Op.getNode()->getVTList(),
&ArgValues[0], ArgValues.size()).getValue(Op.getResNo());
} }
SDValue BlackfinTargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) { SDValue
BlackfinTargetLowering::LowerReturn(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG) {
// CCValAssign - represent the assignment of the return value to locations. // CCValAssign - represent the assignment of the return value to locations.
SmallVector<CCValAssign, 16> RVLocs; SmallVector<CCValAssign, 16> RVLocs;
unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
DebugLoc dl = Op.getDebugLoc();
// CCState - Info about the registers and stack slot. // CCState - Info about the registers and stack slot.
CCState CCInfo(CC, isVarArg, DAG.getTarget(), RVLocs, *DAG.getContext()); CCState CCInfo(CallConv, isVarArg, DAG.getTarget(),
RVLocs, *DAG.getContext());
// Analize return values of ISD::RET // Analize return values.
CCInfo.AnalyzeReturn(Op.getNode(), RetCC_Blackfin); CCInfo.AnalyzeReturn(Outs, RetCC_Blackfin);
// If this is the first return lowered for this function, add the regs to the // If this is the first return lowered for this function, add the regs to the
// liveout set for the function. // liveout set for the function.
@ -245,14 +237,13 @@ SDValue BlackfinTargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) {
DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
} }
SDValue Chain = Op.getOperand(0);
SDValue Flag; SDValue Flag;
// Copy the result values into the output registers. // Copy the result values into the output registers.
for (unsigned i = 0; i != RVLocs.size(); ++i) { for (unsigned i = 0; i != RVLocs.size(); ++i) {
CCValAssign &VA = RVLocs[i]; CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!"); assert(VA.isRegLoc() && "Can only return in registers!");
SDValue Opi = Op.getOperand(i*2+1); SDValue Opi = Outs[i].Val;
// Expand to i32 if necessary // Expand to i32 if necessary
switch (VA.getLocInfo()) { switch (VA.getLocInfo()) {
@ -268,8 +259,6 @@ SDValue BlackfinTargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) {
Opi = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Opi); Opi = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Opi);
break; break;
} }
// ISD::RET => ret chain, (regnum1,val1), ...
// So i*2+1 index only the regnums.
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Opi, SDValue()); Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Opi, SDValue());
// Guarantee that all emitted copies are stuck together with flags. // Guarantee that all emitted copies are stuck together with flags.
Flag = Chain.getValue(1); Flag = Chain.getValue(1);
@ -282,20 +271,21 @@ SDValue BlackfinTargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) {
} }
} }
SDValue BlackfinTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) { SDValue
CallSDNode *TheCall = cast<CallSDNode>(Op.getNode()); BlackfinTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
unsigned CallingConv = TheCall->getCallingConv(); unsigned CallConv, bool isVarArg,
SDValue Chain = TheCall->getChain(); bool isTailCall,
SDValue Callee = TheCall->getCallee(); const SmallVectorImpl<ISD::OutputArg> &Outs,
bool isVarArg = TheCall->isVarArg(); const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl = TheCall->getDebugLoc(); DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
// Analyze operands of the call, assigning locations to each operand. // Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs; SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallingConv, isVarArg, DAG.getTarget(), ArgLocs, CCState CCInfo(CallConv, isVarArg, DAG.getTarget(), ArgLocs,
*DAG.getContext()); *DAG.getContext());
CCInfo.AllocateStack(12, 4); // ABI requires 12 bytes stack space CCInfo.AllocateStack(12, 4); // ABI requires 12 bytes stack space
CCInfo.AnalyzeCallOperands(TheCall, CC_Blackfin); CCInfo.AnalyzeCallOperands(Outs, CC_Blackfin);
// Get the size of the outgoing arguments stack space requirement. // Get the size of the outgoing arguments stack space requirement.
unsigned ArgsSize = CCInfo.getNextStackOffset(); unsigned ArgsSize = CCInfo.getNextStackOffset();
@ -307,9 +297,7 @@ SDValue BlackfinTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
// Walk the register/memloc assignments, inserting copies/loads. // Walk the register/memloc assignments, inserting copies/loads.
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i]; CCValAssign &VA = ArgLocs[i];
SDValue Arg = Outs[i].Val;
// Arguments start after the 5 first operands of ISD::CALL
SDValue Arg = TheCall->getArg(i);
// Promote the value if needed. // Promote the value if needed.
switch (VA.getLocInfo()) { switch (VA.getLocInfo()) {
@ -383,11 +371,10 @@ SDValue BlackfinTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
// Assign locations to each value returned by this call. // Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs; SmallVector<CCValAssign, 16> RVLocs;
CCState RVInfo(CallingConv, isVarArg, DAG.getTarget(), RVLocs, CCState RVInfo(CallConv, isVarArg, DAG.getTarget(), RVLocs,
*DAG.getContext()); *DAG.getContext());
RVInfo.AnalyzeCallResult(TheCall, RetCC_Blackfin); RVInfo.AnalyzeCallResult(Ins, RetCC_Blackfin);
SmallVector<SDValue, 8> ResultVals;
// Copy all of the result registers out of their specified physreg. // Copy all of the result registers out of their specified physreg.
for (unsigned i = 0; i != RVLocs.size(); ++i) { for (unsigned i = 0; i != RVLocs.size(); ++i) {
@ -417,16 +404,10 @@ SDValue BlackfinTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
// Truncate to valtype // Truncate to valtype
if (RV.getLocInfo() != CCValAssign::Full) if (RV.getLocInfo() != CCValAssign::Full)
Val = DAG.getNode(ISD::TRUNCATE, dl, RV.getValVT(), Val); Val = DAG.getNode(ISD::TRUNCATE, dl, RV.getValVT(), Val);
ResultVals.push_back(Val); InVals.push_back(Val);
} }
ResultVals.push_back(Chain); return Chain;
// Merge everything together with a MERGE_VALUES node.
SDValue merge = DAG.getNode(ISD::MERGE_VALUES, dl,
TheCall->getVTList(), &ResultVals[0],
ResultVals.size());
return merge;
} }
// Expansion of ADDE / SUBE. This is a bit involved since blackfin doesn't have // Expansion of ADDE / SUBE. This is a bit involved since blackfin doesn't have
@ -477,9 +458,6 @@ SDValue BlackfinTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
// Frame & Return address. Currently unimplemented // Frame & Return address. Currently unimplemented
case ISD::FRAMEADDR: return SDValue(); case ISD::FRAMEADDR: return SDValue();
case ISD::RETURNADDR: return SDValue(); case ISD::RETURNADDR: return SDValue();
case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG);
case ISD::CALL: return LowerCALL(Op, DAG);
case ISD::RET: return LowerRET(Op, DAG);
case ISD::ADDE: case ISD::ADDE:
case ISD::SUBE: return LowerADDE(Op, DAG); case ISD::SUBE: return LowerADDE(Op, DAG);
} }

View File

@ -51,10 +51,27 @@ namespace llvm {
private: private:
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG); SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG);
SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG); SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG);
SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG);
SDValue LowerRET(SDValue Op, SelectionDAG &DAG);
SDValue LowerCALL(SDValue Op, SelectionDAG &DAG);
SDValue LowerADDE(SDValue Op, SelectionDAG &DAG); SDValue LowerADDE(SDValue Op, SelectionDAG &DAG);
virtual SDValue
LowerFormalArguments(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerCall(SDValue Chain, SDValue Callee,
unsigned CallConv, bool isVarArg, bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerReturn(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG);
}; };
} // end namespace llvm } // end namespace llvm

View File

@ -115,7 +115,9 @@ namespace {
const Type *RetTy = Op.getNode()->getValueType(0).getTypeForMVT(); const Type *RetTy = Op.getNode()->getValueType(0).getTypeForMVT();
std::pair<SDValue, SDValue> CallInfo = std::pair<SDValue, SDValue> CallInfo =
TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false, TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
0, CallingConv::C, false, Callee, Args, DAG, 0, CallingConv::C, false,
/*isReturnValueUsed=*/true,
Callee, Args, DAG,
Op.getDebugLoc()); Op.getDebugLoc());
return CallInfo.first; return CallInfo.first;
@ -396,9 +398,6 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
setOperationAction(ISD::JumpTable, VT, Custom); setOperationAction(ISD::JumpTable, VT, Custom);
} }
// RET must be custom lowered, to meet ABI requirements
setOperationAction(ISD::RET, MVT::Other, Custom);
// VASTART needs to be custom lowered to use the VarArgsFrameIndex // VASTART needs to be custom lowered to use the VarArgsFrameIndex
setOperationAction(ISD::VASTART , MVT::Other, Custom); setOperationAction(ISD::VASTART , MVT::Other, Custom);
@ -1008,16 +1007,17 @@ LowerConstantFP(SDValue Op, SelectionDAG &DAG) {
return SDValue(); return SDValue();
} }
static SDValue SDValue
LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG, int &VarArgsFrameIndex) SPUTargetLowering::LowerFormalArguments(SDValue Chain,
{ unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg>
&Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
MachineFunction &MF = DAG.getMachineFunction(); MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo(); MachineFrameInfo *MFI = MF.getFrameInfo();
MachineRegisterInfo &RegInfo = MF.getRegInfo(); MachineRegisterInfo &RegInfo = MF.getRegInfo();
SmallVector<SDValue, 48> ArgValues;
SDValue Root = Op.getOperand(0);
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
DebugLoc dl = Op.getDebugLoc();
const unsigned *ArgRegs = SPURegisterInfo::getArgRegs(); const unsigned *ArgRegs = SPURegisterInfo::getArgRegs();
const unsigned NumArgRegs = SPURegisterInfo::getNumArgRegs(); const unsigned NumArgRegs = SPURegisterInfo::getNumArgRegs();
@ -1029,9 +1029,8 @@ LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG, int &VarArgsFrameIndex)
MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
// Add DAG nodes to load the arguments or copy them out of registers. // Add DAG nodes to load the arguments or copy them out of registers.
for (unsigned ArgNo = 0, e = Op.getNode()->getNumValues() - 1; for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
ArgNo != e; ++ArgNo) { MVT ObjectVT = Ins[ArgNo].VT;
MVT ObjectVT = Op.getValue(ArgNo).getValueType();
unsigned ObjSize = ObjectVT.getSizeInBits()/8; unsigned ObjSize = ObjectVT.getSizeInBits()/8;
SDValue ArgVal; SDValue ArgVal;
@ -1042,7 +1041,7 @@ LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG, int &VarArgsFrameIndex)
default: { default: {
std::string msg; std::string msg;
raw_string_ostream Msg(msg); raw_string_ostream Msg(msg);
Msg << "LowerFORMAL_ARGUMENTS Unhandled argument type: " Msg << "LowerFormalArguments Unhandled argument type: "
<< ObjectVT.getMVTString(); << ObjectVT.getMVTString();
llvm_report_error(Msg.str()); llvm_report_error(Msg.str());
} }
@ -1079,7 +1078,7 @@ LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG, int &VarArgsFrameIndex)
unsigned VReg = RegInfo.createVirtualRegister(ArgRegClass); unsigned VReg = RegInfo.createVirtualRegister(ArgRegClass);
RegInfo.addLiveIn(ArgRegs[ArgRegIdx], VReg); RegInfo.addLiveIn(ArgRegs[ArgRegIdx], VReg);
ArgVal = DAG.getCopyFromReg(Root, dl, VReg, ObjectVT); ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
++ArgRegIdx; ++ArgRegIdx;
} else { } else {
// We need to load the argument to a virtual register if we determined // We need to load the argument to a virtual register if we determined
@ -1087,13 +1086,13 @@ LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG, int &VarArgsFrameIndex)
// or we're forced to do vararg // or we're forced to do vararg
int FI = MFI->CreateFixedObject(ObjSize, ArgOffset); int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT); SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
ArgVal = DAG.getLoad(ObjectVT, dl, Root, FIN, NULL, 0); ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, NULL, 0);
ArgOffset += StackSlotSize; ArgOffset += StackSlotSize;
} }
ArgValues.push_back(ArgVal); InVals.push_back(ArgVal);
// Update the chain // Update the chain
Root = ArgVal.getOperand(0); Chain = ArgVal.getOperand(0);
} }
// vararg handling: // vararg handling:
@ -1108,23 +1107,19 @@ LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG, int &VarArgsFrameIndex)
VarArgsFrameIndex = MFI->CreateFixedObject(StackSlotSize, ArgOffset); VarArgsFrameIndex = MFI->CreateFixedObject(StackSlotSize, ArgOffset);
SDValue FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); SDValue FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
SDValue ArgVal = DAG.getRegister(ArgRegs[ArgRegIdx], MVT::v16i8); SDValue ArgVal = DAG.getRegister(ArgRegs[ArgRegIdx], MVT::v16i8);
SDValue Store = DAG.getStore(Root, dl, ArgVal, FIN, NULL, 0); SDValue Store = DAG.getStore(Chain, dl, ArgVal, FIN, NULL, 0);
Root = Store.getOperand(0); Chain = Store.getOperand(0);
MemOps.push_back(Store); MemOps.push_back(Store);
// Increment address by stack slot size for the next stored argument // Increment address by stack slot size for the next stored argument
ArgOffset += StackSlotSize; ArgOffset += StackSlotSize;
} }
if (!MemOps.empty()) if (!MemOps.empty())
Root = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOps[0], MemOps.size()); &MemOps[0], MemOps.size());
} }
ArgValues.push_back(Root); return Chain;
// Return the new list of results.
return DAG.getNode(ISD::MERGE_VALUES, dl, Op.getNode()->getVTList(),
&ArgValues[0], ArgValues.size());
} }
/// isLSAAddress - Return the immediate to use if the specified /// isLSAAddress - Return the immediate to use if the specified
@ -1141,16 +1136,20 @@ static SDNode *isLSAAddress(SDValue Op, SelectionDAG &DAG) {
return DAG.getConstant((int)C->getZExtValue() >> 2, MVT::i32).getNode(); return DAG.getConstant((int)C->getZExtValue() >> 2, MVT::i32).getNode();
} }
static SDValue SDValue
LowerCALL(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) { SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallSDNode *TheCall = cast<CallSDNode>(Op.getNode()); unsigned CallConv, bool isVarArg,
SDValue Chain = TheCall->getChain(); bool isTailCall,
SDValue Callee = TheCall->getCallee(); const SmallVectorImpl<ISD::OutputArg> &Outs,
unsigned NumOps = TheCall->getNumArgs(); const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
unsigned NumOps = Outs.size();
unsigned StackSlotSize = SPUFrameInfo::stackSlotSize(); unsigned StackSlotSize = SPUFrameInfo::stackSlotSize();
const unsigned *ArgRegs = SPURegisterInfo::getArgRegs(); const unsigned *ArgRegs = SPURegisterInfo::getArgRegs();
const unsigned NumArgRegs = SPURegisterInfo::getNumArgRegs(); const unsigned NumArgRegs = SPURegisterInfo::getNumArgRegs();
DebugLoc dl = TheCall->getDebugLoc();
// Handy pointer type // Handy pointer type
MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
@ -1176,7 +1175,7 @@ LowerCALL(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
SmallVector<SDValue, 8> MemOpChains; SmallVector<SDValue, 8> MemOpChains;
for (unsigned i = 0; i != NumOps; ++i) { for (unsigned i = 0; i != NumOps; ++i) {
SDValue Arg = TheCall->getArg(i); SDValue Arg = Outs[i].Val;
// PtrOff will be used to store the current argument to the stack if a // PtrOff will be used to store the current argument to the stack if a
// register cannot be found for it. // register cannot be found for it.
@ -1308,50 +1307,46 @@ LowerCALL(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumStackBytes, true), Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumStackBytes, true),
DAG.getIntPtrConstant(0, true), InFlag); DAG.getIntPtrConstant(0, true), InFlag);
if (TheCall->getValueType(0) != MVT::Other) if (!Ins.empty())
InFlag = Chain.getValue(1); InFlag = Chain.getValue(1);
SDValue ResultVals[3]; // If the function returns void, just return the chain.
unsigned NumResults = 0; if (Ins.empty())
return Chain;
// If the call has results, copy the values out of the ret val registers. // If the call has results, copy the values out of the ret val registers.
switch (TheCall->getValueType(0).getSimpleVT()) { switch (Ins[0].VT.getSimpleVT()) {
default: llvm_unreachable("Unexpected ret value!"); default: llvm_unreachable("Unexpected ret value!");
case MVT::Other: break; case MVT::Other: break;
case MVT::i32: case MVT::i32:
if (TheCall->getValueType(1) == MVT::i32) { if (Ins.size() > 1 && Ins[1].VT == MVT::i32) {
Chain = DAG.getCopyFromReg(Chain, dl, SPU::R4, Chain = DAG.getCopyFromReg(Chain, dl, SPU::R4,
MVT::i32, InFlag).getValue(1); MVT::i32, InFlag).getValue(1);
ResultVals[0] = Chain.getValue(0); InVals.push_back(Chain.getValue(0));
Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i32, Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i32,
Chain.getValue(2)).getValue(1); Chain.getValue(2)).getValue(1);
ResultVals[1] = Chain.getValue(0); InVals.push_back(Chain.getValue(0));
NumResults = 2;
} else { } else {
Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i32, Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i32,
InFlag).getValue(1); InFlag).getValue(1);
ResultVals[0] = Chain.getValue(0); InVals.push_back(Chain.getValue(0));
NumResults = 1;
} }
break; break;
case MVT::i64: case MVT::i64:
Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i64, Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i64,
InFlag).getValue(1); InFlag).getValue(1);
ResultVals[0] = Chain.getValue(0); InVals.push_back(Chain.getValue(0));
NumResults = 1;
break; break;
case MVT::i128: case MVT::i128:
Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i128, Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i128,
InFlag).getValue(1); InFlag).getValue(1);
ResultVals[0] = Chain.getValue(0); InVals.push_back(Chain.getValue(0));
NumResults = 1;
break; break;
case MVT::f32: case MVT::f32:
case MVT::f64: case MVT::f64:
Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, TheCall->getValueType(0), Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, Ins[0].VT,
InFlag).getValue(1); InFlag).getValue(1);
ResultVals[0] = Chain.getValue(0); InVals.push_back(Chain.getValue(0));
NumResults = 1;
break; break;
case MVT::v2f64: case MVT::v2f64:
case MVT::v2i64: case MVT::v2i64:
@ -1359,31 +1354,25 @@ LowerCALL(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
case MVT::v4i32: case MVT::v4i32:
case MVT::v8i16: case MVT::v8i16:
case MVT::v16i8: case MVT::v16i8:
Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, TheCall->getValueType(0), Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, Ins[0].VT,
InFlag).getValue(1); InFlag).getValue(1);
ResultVals[0] = Chain.getValue(0); InVals.push_back(Chain.getValue(0));
NumResults = 1;
break; break;
} }
// If the function returns void, just return the chain. return Chain;
if (NumResults == 0)
return Chain;
// Otherwise, merge everything together with a MERGE_VALUES node.
ResultVals[NumResults++] = Chain;
SDValue Res = DAG.getMergeValues(ResultVals, NumResults, dl);
return Res.getValue(Op.getResNo());
} }
static SDValue SDValue
LowerRET(SDValue Op, SelectionDAG &DAG, TargetMachine &TM) { SPUTargetLowering::LowerReturn(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG) {
SmallVector<CCValAssign, 16> RVLocs; SmallVector<CCValAssign, 16> RVLocs;
unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); RVLocs, *DAG.getContext());
DebugLoc dl = Op.getDebugLoc(); CCInfo.AnalyzeReturn(Outs, RetCC_SPU);
CCState CCInfo(CC, isVarArg, TM, RVLocs, *DAG.getContext());
CCInfo.AnalyzeReturn(Op.getNode(), RetCC_SPU);
// If this is the first return lowered for this function, add the regs to the // If this is the first return lowered for this function, add the regs to the
// liveout set for the function. // liveout set for the function.
@ -1392,7 +1381,6 @@ LowerRET(SDValue Op, SelectionDAG &DAG, TargetMachine &TM) {
DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
} }
SDValue Chain = Op.getOperand(0);
SDValue Flag; SDValue Flag;
// Copy the result values into the output registers. // Copy the result values into the output registers.
@ -1400,7 +1388,7 @@ LowerRET(SDValue Op, SelectionDAG &DAG, TargetMachine &TM) {
CCValAssign &VA = RVLocs[i]; CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!"); assert(VA.isRegLoc() && "Can only return in registers!");
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
Op.getOperand(i*2+1), Flag); Outs[i].Val, Flag);
Flag = Chain.getValue(1); Flag = Chain.getValue(1);
} }
@ -2648,12 +2636,6 @@ SPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG)
return LowerJumpTable(Op, DAG, SPUTM.getSubtargetImpl()); return LowerJumpTable(Op, DAG, SPUTM.getSubtargetImpl());
case ISD::ConstantFP: case ISD::ConstantFP:
return LowerConstantFP(Op, DAG); return LowerConstantFP(Op, DAG);
case ISD::FORMAL_ARGUMENTS:
return LowerFORMAL_ARGUMENTS(Op, DAG, VarArgsFrameIndex);
case ISD::CALL:
return LowerCALL(Op, DAG, SPUTM.getSubtargetImpl());
case ISD::RET:
return LowerRET(Op, DAG, getTargetMachine());
// i8, i64 math ops: // i8, i64 math ops:
case ISD::ADD: case ISD::ADD:

View File

@ -150,6 +150,28 @@ namespace llvm {
/// getFunctionAlignment - Return the Log2 alignment of this function. /// getFunctionAlignment - Return the Log2 alignment of this function.
virtual unsigned getFunctionAlignment(const Function *F) const; virtual unsigned getFunctionAlignment(const Function *F) const;
virtual SDValue
LowerFormalArguments(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerCall(SDValue Chain, SDValue Callee,
unsigned CallConv, bool isVarArg,
bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerReturn(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG);
}; };
} }

View File

@ -4430,13 +4430,6 @@ def : Pat<(v4i32 v4i32Imm:$imm),
def : Pat<(i8 imm:$imm), def : Pat<(i8 imm:$imm),
(ILHr8 imm:$imm)>; (ILHr8 imm:$imm)>;
//===----------------------------------------------------------------------===//
// Call instruction patterns:
//===----------------------------------------------------------------------===//
// Return void
def : Pat<(ret),
(RET)>;
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// Zero/Any/Sign extensions // Zero/Any/Sign extensions
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//

View File

@ -80,7 +80,6 @@ MSP430TargetLowering::MSP430TargetLowering(MSP430TargetMachine &tm) :
setOperationAction(ISD::ROTR, MVT::i8, Expand); setOperationAction(ISD::ROTR, MVT::i8, Expand);
setOperationAction(ISD::ROTL, MVT::i16, Expand); setOperationAction(ISD::ROTL, MVT::i16, Expand);
setOperationAction(ISD::ROTR, MVT::i16, Expand); setOperationAction(ISD::ROTR, MVT::i16, Expand);
setOperationAction(ISD::RET, MVT::Other, Custom);
setOperationAction(ISD::GlobalAddress, MVT::i16, Custom); setOperationAction(ISD::GlobalAddress, MVT::i16, Custom);
setOperationAction(ISD::ExternalSymbol, MVT::i16, Custom); setOperationAction(ISD::ExternalSymbol, MVT::i16, Custom);
setOperationAction(ISD::BR_JT, MVT::Other, Expand); setOperationAction(ISD::BR_JT, MVT::Other, Expand);
@ -129,12 +128,9 @@ MSP430TargetLowering::MSP430TargetLowering(MSP430TargetMachine &tm) :
SDValue MSP430TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) { SDValue MSP430TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
switch (Op.getOpcode()) { switch (Op.getOpcode()) {
case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG);
case ISD::SHL: // FALLTHROUGH case ISD::SHL: // FALLTHROUGH
case ISD::SRL: case ISD::SRL:
case ISD::SRA: return LowerShifts(Op, DAG); case ISD::SRA: return LowerShifts(Op, DAG);
case ISD::RET: return LowerRET(Op, DAG);
case ISD::CALL: return LowerCALL(Op, DAG);
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
case ISD::BR_CC: return LowerBR_CC(Op, DAG); case ISD::BR_CC: return LowerBR_CC(Op, DAG);
@ -157,27 +153,41 @@ unsigned MSP430TargetLowering::getFunctionAlignment(const Function *F) const {
#include "MSP430GenCallingConv.inc" #include "MSP430GenCallingConv.inc"
SDValue MSP430TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SDValue
SelectionDAG &DAG) { MSP430TargetLowering::LowerFormalArguments(SDValue Chain,
unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); unsigned CallConv,
switch (CC) { bool isVarArg,
const SmallVectorImpl<ISD::InputArg>
&Ins,
DebugLoc dl,
SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
switch (CallConv) {
default: default:
llvm_unreachable("Unsupported calling convention"); llvm_unreachable("Unsupported calling convention");
case CallingConv::C: case CallingConv::C:
case CallingConv::Fast: case CallingConv::Fast:
return LowerCCCArguments(Op, DAG); return LowerCCCArguments(Chain, CallConv, isVarArg, Ins, dl, DAG, InVals);
} }
} }
SDValue MSP430TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) { SDValue
CallSDNode *TheCall = cast<CallSDNode>(Op.getNode()); MSP430TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
unsigned CallingConv = TheCall->getCallingConv(); unsigned CallConv, bool isVarArg,
switch (CallingConv) { bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
switch (CallConv) {
default: default:
llvm_unreachable("Unsupported calling convention"); llvm_unreachable("Unsupported calling convention");
case CallingConv::Fast: case CallingConv::Fast:
case CallingConv::C: case CallingConv::C:
return LowerCCCCallTo(Op, DAG, CallingConv); return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
Outs, Ins, dl, DAG, InVals);
} }
} }
@ -185,24 +195,27 @@ SDValue MSP430TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
/// generate load operations for arguments places on the stack. /// generate load operations for arguments places on the stack.
// FIXME: struct return stuff // FIXME: struct return stuff
// FIXME: varargs // FIXME: varargs
SDValue MSP430TargetLowering::LowerCCCArguments(SDValue Op, SDValue
SelectionDAG &DAG) { MSP430TargetLowering::LowerCCCArguments(SDValue Chain,
unsigned CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg>
&Ins,
DebugLoc dl,
SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
MachineFunction &MF = DAG.getMachineFunction(); MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo(); MachineFrameInfo *MFI = MF.getFrameInfo();
MachineRegisterInfo &RegInfo = MF.getRegInfo(); MachineRegisterInfo &RegInfo = MF.getRegInfo();
SDValue Root = Op.getOperand(0);
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
unsigned CC = MF.getFunction()->getCallingConv();
DebugLoc dl = Op.getDebugLoc();
// Assign locations to all of the incoming arguments. // Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs; SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext()); CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
CCInfo.AnalyzeFormalArguments(Op.getNode(), CC_MSP430); ArgLocs, *DAG.getContext());
CCInfo.AnalyzeFormalArguments(Ins, CC_MSP430);
assert(!isVarArg && "Varargs not supported yet"); assert(!isVarArg && "Varargs not supported yet");
SmallVector<SDValue, 16> ArgValues;
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i]; CCValAssign &VA = ArgLocs[i];
if (VA.isRegLoc()) { if (VA.isRegLoc()) {
@ -212,7 +225,7 @@ SDValue MSP430TargetLowering::LowerCCCArguments(SDValue Op,
default: default:
{ {
#ifndef NDEBUG #ifndef NDEBUG
cerr << "LowerFORMAL_ARGUMENTS Unhandled argument type: " cerr << "LowerFormalArguments Unhandled argument type: "
<< RegVT.getSimpleVT() << "\n"; << RegVT.getSimpleVT() << "\n";
#endif #endif
llvm_unreachable(0); llvm_unreachable(0);
@ -221,7 +234,7 @@ SDValue MSP430TargetLowering::LowerCCCArguments(SDValue Op,
unsigned VReg = unsigned VReg =
RegInfo.createVirtualRegister(MSP430::GR16RegisterClass); RegInfo.createVirtualRegister(MSP430::GR16RegisterClass);
RegInfo.addLiveIn(VA.getLocReg(), VReg); RegInfo.addLiveIn(VA.getLocReg(), VReg);
SDValue ArgValue = DAG.getCopyFromReg(Root, dl, VReg, RegVT); SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, RegVT);
// If this is an 8-bit value, it is really passed promoted to 16 // If this is an 8-bit value, it is really passed promoted to 16
// bits. Insert an assert[sz]ext to capture this, then truncate to the // bits. Insert an assert[sz]ext to capture this, then truncate to the
@ -236,7 +249,7 @@ SDValue MSP430TargetLowering::LowerCCCArguments(SDValue Op,
if (VA.getLocInfo() != CCValAssign::Full) if (VA.getLocInfo() != CCValAssign::Full)
ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
ArgValues.push_back(ArgValue); InVals.push_back(ArgValue);
} }
} else { } else {
// Sanity check // Sanity check
@ -244,7 +257,7 @@ SDValue MSP430TargetLowering::LowerCCCArguments(SDValue Op,
// Load the argument to a virtual register // Load the argument to a virtual register
unsigned ObjSize = VA.getLocVT().getSizeInBits()/8; unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
if (ObjSize > 2) { if (ObjSize > 2) {
cerr << "LowerFORMAL_ARGUMENTS Unhandled argument type: " cerr << "LowerFormalArguments Unhandled argument type: "
<< VA.getLocVT().getSimpleVT() << VA.getLocVT().getSimpleVT()
<< "\n"; << "\n";
} }
@ -254,30 +267,29 @@ SDValue MSP430TargetLowering::LowerCCCArguments(SDValue Op,
// Create the SelectionDAG nodes corresponding to a load // Create the SelectionDAG nodes corresponding to a load
//from this parameter //from this parameter
SDValue FIN = DAG.getFrameIndex(FI, MVT::i16); SDValue FIN = DAG.getFrameIndex(FI, MVT::i16);
ArgValues.push_back(DAG.getLoad(VA.getLocVT(), dl, Root, FIN, InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
PseudoSourceValue::getFixedStack(FI), 0)); PseudoSourceValue::getFixedStack(FI), 0));
} }
} }
ArgValues.push_back(Root); return Chain;
// Return the new list of results.
return DAG.getNode(ISD::MERGE_VALUES, dl, Op.getNode()->getVTList(),
&ArgValues[0], ArgValues.size()).getValue(Op.getResNo());
} }
SDValue MSP430TargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) { SDValue
MSP430TargetLowering::LowerReturn(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG) {
// CCValAssign - represent the assignment of the return value to a location // CCValAssign - represent the assignment of the return value to a location
SmallVector<CCValAssign, 16> RVLocs; SmallVector<CCValAssign, 16> RVLocs;
unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
DebugLoc dl = Op.getDebugLoc();
// CCState - Info about the registers and stack slot. // CCState - Info about the registers and stack slot.
CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs, *DAG.getContext()); CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
RVLocs, *DAG.getContext());
// Analize return values of ISD::RET // Analize return values.
CCInfo.AnalyzeReturn(Op.getNode(), RetCC_MSP430); CCInfo.AnalyzeReturn(Outs, RetCC_MSP430);
// If this is the first return lowered for this function, add the regs to the // If this is the first return lowered for this function, add the regs to the
// liveout set for the function. // liveout set for the function.
@ -287,8 +299,6 @@ SDValue MSP430TargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) {
DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
} }
// The chain is always operand #0
SDValue Chain = Op.getOperand(0);
SDValue Flag; SDValue Flag;
// Copy the result values into the output registers. // Copy the result values into the output registers.
@ -296,10 +306,8 @@ SDValue MSP430TargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) {
CCValAssign &VA = RVLocs[i]; CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!"); assert(VA.isRegLoc() && "Can only return in registers!");
// ISD::RET => ret chain, (regnum1,val1), ...
// So i*2+1 index only the regnums
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
Op.getOperand(i*2+1), Flag); Outs[i].Val, Flag);
// Guarantee that all emitted copies are stuck together, // Guarantee that all emitted copies are stuck together,
// avoiding something bad. // avoiding something bad.
@ -316,19 +324,21 @@ SDValue MSP430TargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) {
/// LowerCCCCallTo - functions arguments are copied from virtual regs to /// LowerCCCCallTo - functions arguments are copied from virtual regs to
/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted. /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
/// TODO: sret. /// TODO: sret.
SDValue MSP430TargetLowering::LowerCCCCallTo(SDValue Op, SelectionDAG &DAG, SDValue
unsigned CC) { MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
CallSDNode *TheCall = cast<CallSDNode>(Op.getNode()); unsigned CallConv, bool isVarArg,
SDValue Chain = TheCall->getChain(); bool isTailCall,
SDValue Callee = TheCall->getCallee(); const SmallVectorImpl<ISD::OutputArg>
bool isVarArg = TheCall->isVarArg(); &Outs,
DebugLoc dl = Op.getDebugLoc(); const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
// Analyze operands of the call, assigning locations to each operand. // Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs; SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext()); CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
ArgLocs, *DAG.getContext());
CCInfo.AnalyzeCallOperands(TheCall, CC_MSP430); CCInfo.AnalyzeCallOperands(Outs, CC_MSP430);
// Get a count of how many bytes are to be pushed on the stack. // Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset(); unsigned NumBytes = CCInfo.getNextStackOffset();
@ -344,8 +354,7 @@ SDValue MSP430TargetLowering::LowerCCCCallTo(SDValue Op, SelectionDAG &DAG,
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i]; CCValAssign &VA = ArgLocs[i];
// Arguments start after the 5 first operands of ISD::CALL SDValue Arg = Outs[i].Val;
SDValue Arg = TheCall->getArg(i);
// Promote the value if needed. // Promote the value if needed.
switch (VA.getLocInfo()) { switch (VA.getLocInfo()) {
@ -434,44 +443,36 @@ SDValue MSP430TargetLowering::LowerCCCCallTo(SDValue Op, SelectionDAG &DAG,
// Handle result values, copying them out of physregs into vregs that we // Handle result values, copying them out of physregs into vregs that we
// return. // return.
return SDValue(LowerCallResult(Chain, InFlag, TheCall, CC, DAG), return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl,
Op.getResNo()); DAG, InVals);
} }
/// LowerCallResult - Lower the result values of an ISD::CALL into the /// LowerCallResult - Lower the result values of a call into the
/// appropriate copies out of appropriate physical registers. This assumes that /// appropriate copies out of appropriate physical registers.
/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call ///
/// being lowered. Returns a SDNode with the same number of values as the SDValue
/// ISD::CALL.
SDNode*
MSP430TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, MSP430TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
CallSDNode *TheCall, unsigned CallConv, bool isVarArg,
unsigned CallingConv, const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG &DAG) { DebugLoc dl, SelectionDAG &DAG,
bool isVarArg = TheCall->isVarArg(); SmallVectorImpl<SDValue> &InVals) {
DebugLoc dl = TheCall->getDebugLoc();
// Assign locations to each value returned by this call. // Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs; SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
RVLocs, *DAG.getContext()); RVLocs, *DAG.getContext());
CCInfo.AnalyzeCallResult(TheCall, RetCC_MSP430); CCInfo.AnalyzeCallResult(Ins, RetCC_MSP430);
SmallVector<SDValue, 8> ResultVals;
// Copy all of the result registers out of their specified physreg. // Copy all of the result registers out of their specified physreg.
for (unsigned i = 0; i != RVLocs.size(); ++i) { for (unsigned i = 0; i != RVLocs.size(); ++i) {
Chain = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(), Chain = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
RVLocs[i].getValVT(), InFlag).getValue(1); RVLocs[i].getValVT(), InFlag).getValue(1);
InFlag = Chain.getValue(2); InFlag = Chain.getValue(2);
ResultVals.push_back(Chain.getValue(0)); InVals.push_back(Chain.getValue(0));
} }
ResultVals.push_back(Chain); return Chain;
// Merge everything together with a MERGE_VALUES node.
return DAG.getNode(ISD::MERGE_VALUES, dl, TheCall->getVTList(),
&ResultVals[0], ResultVals.size()).getNode();
} }
SDValue MSP430TargetLowering::LowerShifts(SDValue Op, SDValue MSP430TargetLowering::LowerShifts(SDValue Op,

View File

@ -33,7 +33,7 @@ namespace llvm {
/// Y = RRC X, rotate right via carry /// Y = RRC X, rotate right via carry
RRC, RRC,
/// CALL/TAILCALL - These operations represent an abstract call /// CALL - These operations represent an abstract call
/// instruction, which includes a bunch of information. /// instruction, which includes a bunch of information.
CALL, CALL,
@ -77,10 +77,6 @@ namespace llvm {
/// getFunctionAlignment - Return the Log2 alignment of this function. /// getFunctionAlignment - Return the Log2 alignment of this function.
virtual unsigned getFunctionAlignment(const Function *F) const; virtual unsigned getFunctionAlignment(const Function *F) const;
SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG);
SDValue LowerCALL(SDValue Op, SelectionDAG &DAG);
SDValue LowerRET(SDValue Op, SelectionDAG &DAG);
SDValue LowerCCCArguments(SDValue Op, SelectionDAG &DAG);
SDValue LowerShifts(SDValue Op, SelectionDAG &DAG); SDValue LowerShifts(SDValue Op, SelectionDAG &DAG);
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG); SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG);
SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG); SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG);
@ -88,16 +84,52 @@ namespace llvm {
SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG); SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG);
SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG); SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG);
SDValue LowerCCCCallTo(SDValue Op, SelectionDAG &DAG,
unsigned CC);
SDNode* LowerCallResult(SDValue Chain, SDValue InFlag,
CallSDNode *TheCall,
unsigned CallingConv, SelectionDAG &DAG);
MachineBasicBlock* EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock* EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *BB) const; MachineBasicBlock *BB) const;
private: private:
SDValue LowerCCCCallTo(SDValue Chain, SDValue Callee,
unsigned CallConv, bool isVarArg,
bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
SDValue LowerCCCArguments(SDValue Chain,
unsigned CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl,
SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerFormalArguments(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerCall(SDValue Chain, SDValue Callee,
unsigned CallConv, bool isVarArg, bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerReturn(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG);
const MSP430Subtarget &Subtarget; const MSP430Subtarget &Subtarget;
const MSP430TargetMachine &TM; const MSP430TargetMachine &TM;
}; };

View File

@ -94,7 +94,6 @@ MipsTargetLowering(MipsTargetMachine &TM)
// Mips Custom Operations // Mips Custom Operations
setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
setOperationAction(ISD::RET, MVT::Other, Custom);
setOperationAction(ISD::JumpTable, MVT::i32, Custom); setOperationAction(ISD::JumpTable, MVT::i32, Custom);
setOperationAction(ISD::ConstantPool, MVT::i32, Custom); setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
setOperationAction(ISD::SELECT, MVT::f32, Custom); setOperationAction(ISD::SELECT, MVT::f32, Custom);
@ -182,16 +181,13 @@ LowerOperation(SDValue Op, SelectionDAG &DAG)
{ {
case ISD::AND: return LowerANDOR(Op, DAG); case ISD::AND: return LowerANDOR(Op, DAG);
case ISD::BRCOND: return LowerBRCOND(Op, DAG); case ISD::BRCOND: return LowerBRCOND(Op, DAG);
case ISD::CALL: return LowerCALL(Op, DAG);
case ISD::ConstantPool: return LowerConstantPool(Op, DAG); case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG);
case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
case ISD::JumpTable: return LowerJumpTable(Op, DAG); case ISD::JumpTable: return LowerJumpTable(Op, DAG);
case ISD::OR: return LowerANDOR(Op, DAG); case ISD::OR: return LowerANDOR(Op, DAG);
case ISD::RET: return LowerRET(Op, DAG);
case ISD::SELECT: return LowerSELECT(Op, DAG); case ISD::SELECT: return LowerSELECT(Op, DAG);
case ISD::SETCC: return LowerSETCC(Op, DAG); case ISD::SETCC: return LowerSETCC(Op, DAG);
} }
@ -580,13 +576,6 @@ LowerConstantPool(SDValue Op, SelectionDAG &DAG)
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// Calling Convention Implementation // Calling Convention Implementation
//
// The lower operations present on calling convention works on this order:
// LowerCALL (virt regs --> phys regs, virt regs --> stack)
// LowerFORMAL_ARGUMENTS (phys --> virt regs, stack --> virt regs)
// LowerRET (virt regs --> phys regs)
// LowerCALL (phys regs --> virt regs)
//
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "MipsGenCallingConv.inc" #include "MipsGenCallingConv.inc"
@ -671,38 +660,37 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT,
} }
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// CALL Calling Convention Implementation // Call Calling Convention Implementation
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
/// LowerCALL - functions arguments are copied from virtual regs to /// LowerCall - functions arguments are copied from virtual regs to
/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted. /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
/// TODO: isVarArg, isTailCall. /// TODO: isVarArg, isTailCall.
SDValue MipsTargetLowering:: SDValue
LowerCALL(SDValue Op, SelectionDAG &DAG) MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
{ unsigned CallConv, bool isVarArg,
bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
MachineFunction &MF = DAG.getMachineFunction(); MachineFunction &MF = DAG.getMachineFunction();
CallSDNode *TheCall = cast<CallSDNode>(Op.getNode());
SDValue Chain = TheCall->getChain();
SDValue Callee = TheCall->getCallee();
bool isVarArg = TheCall->isVarArg();
unsigned CC = TheCall->getCallingConv();
DebugLoc dl = TheCall->getDebugLoc();
MachineFrameInfo *MFI = MF.getFrameInfo(); MachineFrameInfo *MFI = MF.getFrameInfo();
// Analyze operands of the call, assigning locations to each operand. // Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs; SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext()); CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
*DAG.getContext());
// To meet O32 ABI, Mips must always allocate 16 bytes on // To meet O32 ABI, Mips must always allocate 16 bytes on
// the stack (even if less than 4 are used as arguments) // the stack (even if less than 4 are used as arguments)
if (Subtarget->isABI_O32()) { if (Subtarget->isABI_O32()) {
int VTsize = MVT(MVT::i32).getSizeInBits()/8; int VTsize = MVT(MVT::i32).getSizeInBits()/8;
MFI->CreateFixedObject(VTsize, (VTsize*3)); MFI->CreateFixedObject(VTsize, (VTsize*3));
CCInfo.AnalyzeCallOperands(TheCall, CC_MipsO32); CCInfo.AnalyzeCallOperands(Outs, CC_MipsO32);
} else } else
CCInfo.AnalyzeCallOperands(TheCall, CC_Mips); CCInfo.AnalyzeCallOperands(Outs, CC_Mips);
// Get a count of how many bytes are to be pushed on the stack. // Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset(); unsigned NumBytes = CCInfo.getNextStackOffset();
@ -719,7 +707,7 @@ LowerCALL(SDValue Op, SelectionDAG &DAG)
// Walk the register/memloc assignments, inserting copies/loads. // Walk the register/memloc assignments, inserting copies/loads.
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
SDValue Arg = TheCall->getArg(i); SDValue Arg = Outs[i].Val;
CCValAssign &VA = ArgLocs[i]; CCValAssign &VA = ArgLocs[i];
// Promote the value if needed. // Promote the value if needed.
@ -859,76 +847,69 @@ LowerCALL(SDValue Op, SelectionDAG &DAG)
// Handle result values, copying them out of physregs into vregs that we // Handle result values, copying them out of physregs into vregs that we
// return. // return.
return SDValue(LowerCallResult(Chain, InFlag, TheCall, CC, DAG), Op.getResNo()); return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
Ins, dl, DAG, InVals);
} }
/// LowerCallResult - Lower the result values of an ISD::CALL into the /// LowerCallResult - Lower the result values of a call into the
/// appropriate copies out of appropriate physical registers. This assumes that /// appropriate copies out of appropriate physical registers.
/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call SDValue
/// being lowered. Returns a SDNode with the same number of values as the MipsTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
/// ISD::CALL. unsigned CallConv, bool isVarArg,
SDNode *MipsTargetLowering:: const SmallVectorImpl<ISD::InputArg> &Ins,
LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall, DebugLoc dl, SelectionDAG &DAG,
unsigned CallingConv, SelectionDAG &DAG) { SmallVectorImpl<SDValue> &InVals) {
bool isVarArg = TheCall->isVarArg();
DebugLoc dl = TheCall->getDebugLoc();
// Assign locations to each value returned by this call. // Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs; SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
RVLocs, *DAG.getContext()); RVLocs, *DAG.getContext());
CCInfo.AnalyzeCallResult(TheCall, RetCC_Mips); CCInfo.AnalyzeCallResult(Ins, RetCC_Mips);
SmallVector<SDValue, 8> ResultVals;
// Copy all of the result registers out of their specified physreg. // Copy all of the result registers out of their specified physreg.
for (unsigned i = 0; i != RVLocs.size(); ++i) { for (unsigned i = 0; i != RVLocs.size(); ++i) {
Chain = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(), Chain = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
RVLocs[i].getValVT(), InFlag).getValue(1); RVLocs[i].getValVT(), InFlag).getValue(1);
InFlag = Chain.getValue(2); InFlag = Chain.getValue(2);
ResultVals.push_back(Chain.getValue(0)); InVals.push_back(Chain.getValue(0));
} }
ResultVals.push_back(Chain);
// Merge everything together with a MERGE_VALUES node. return Chain;
return DAG.getNode(ISD::MERGE_VALUES, dl, TheCall->getVTList(),
&ResultVals[0], ResultVals.size()).getNode();
} }
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// FORMAL_ARGUMENTS Calling Convention Implementation // Formal Arguments Calling Convention Implementation
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
/// LowerFORMAL_ARGUMENTS - transform physical registers into /// LowerFormalArguments - transform physical registers into
/// virtual registers and generate load operations for /// virtual registers and generate load operations for
/// arguments places on the stack. /// arguments places on the stack.
/// TODO: isVarArg /// TODO: isVarArg
SDValue MipsTargetLowering:: SDValue
LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) MipsTargetLowering::LowerFormalArguments(SDValue Chain,
{ unsigned CallConv, bool isVarArg,
SDValue Root = Op.getOperand(0); const SmallVectorImpl<ISD::InputArg>
&Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
MachineFunction &MF = DAG.getMachineFunction(); MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo(); MachineFrameInfo *MFI = MF.getFrameInfo();
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
DebugLoc dl = Op.getDebugLoc();
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
unsigned StackReg = MF.getTarget().getRegisterInfo()->getFrameRegister(MF); unsigned StackReg = MF.getTarget().getRegisterInfo()->getFrameRegister(MF);
// Assign locations to all of the incoming arguments. // Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs; SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext()); CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
ArgLocs, *DAG.getContext());
if (Subtarget->isABI_O32()) if (Subtarget->isABI_O32())
CCInfo.AnalyzeFormalArguments(Op.getNode(), CC_MipsO32); CCInfo.AnalyzeFormalArguments(Ins, CC_MipsO32);
else else
CCInfo.AnalyzeFormalArguments(Op.getNode(), CC_Mips); CCInfo.AnalyzeFormalArguments(Ins, CC_Mips);
SmallVector<SDValue, 16> ArgValues;
SDValue StackPtr; SDValue StackPtr;
unsigned FirstStackArgLoc = (Subtarget->isABI_EABI() ? 0 : 16); unsigned FirstStackArgLoc = (Subtarget->isABI_EABI() ? 0 : 16);
@ -949,12 +930,12 @@ LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG)
if (!Subtarget->isSingleFloat()) if (!Subtarget->isSingleFloat())
RC = Mips::AFGR64RegisterClass; RC = Mips::AFGR64RegisterClass;
} else } else
llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); llvm_unreachable("RegVT not supported by LowerFormalArguments Lowering");
// Transform the arguments stored on // Transform the arguments stored on
// physical registers into virtual ones // physical registers into virtual ones
unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC); unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC);
SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, RegVT); SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
// If this is an 8 or 16-bit value, it has been passed promoted // If this is an 8 or 16-bit value, it has been passed promoted
// to 32 bits. Insert an assert[sz]ext to capture this, then // to 32 bits. Insert an assert[sz]ext to capture this, then
@ -978,14 +959,14 @@ LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG)
if (RegVT == MVT::i32 && VA.getValVT() == MVT::f64) { if (RegVT == MVT::i32 && VA.getValVT() == MVT::f64) {
unsigned Reg2 = AddLiveIn(DAG.getMachineFunction(), unsigned Reg2 = AddLiveIn(DAG.getMachineFunction(),
VA.getLocReg()+1, RC); VA.getLocReg()+1, RC);
SDValue ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg2, RegVT); SDValue ArgValue2 = DAG.getCopyFromReg(Chain, dl, Reg2, RegVT);
SDValue Hi = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, ArgValue); SDValue Hi = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, ArgValue);
SDValue Lo = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, ArgValue2); SDValue Lo = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, ArgValue2);
ArgValue = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::f64, Lo, Hi); ArgValue = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::f64, Lo, Hi);
} }
} }
ArgValues.push_back(ArgValue); InVals.push_back(ArgValue);
// To meet ABI, when VARARGS are passed on registers, the registers // To meet ABI, when VARARGS are passed on registers, the registers
// must have their values written to the caller stack frame. // must have their values written to the caller stack frame.
@ -1007,7 +988,7 @@ LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG)
// emit ISD::STORE whichs stores the // emit ISD::STORE whichs stores the
// parameter value to a stack Location // parameter value to a stack Location
ArgValues.push_back(DAG.getStore(Root, dl, ArgValue, PtrOff, NULL, 0)); InVals.push_back(DAG.getStore(Chain, dl, ArgValue, PtrOff, NULL, 0));
} }
} else { // VA.isRegLoc() } else { // VA.isRegLoc()
@ -1030,7 +1011,7 @@ LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG)
// Create load nodes to retrieve arguments from the stack // Create load nodes to retrieve arguments from the stack
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
ArgValues.push_back(DAG.getLoad(VA.getValVT(), dl, Root, FIN, NULL, 0)); InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, NULL, 0));
} }
} }
@ -1043,36 +1024,33 @@ LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG)
Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i32)); Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i32));
MipsFI->setSRetReturnReg(Reg); MipsFI->setSRetReturnReg(Reg);
} }
SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, ArgValues[0]); SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
Root = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Root); Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
} }
ArgValues.push_back(Root); return Chain;
// Return the new list of results.
return DAG.getNode(ISD::MERGE_VALUES, dl, Op.getNode()->getVTList(),
&ArgValues[0], ArgValues.size()).getValue(Op.getResNo());
} }
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// Return Value Calling Convention Implementation // Return Value Calling Convention Implementation
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
SDValue MipsTargetLowering:: SDValue
LowerRET(SDValue Op, SelectionDAG &DAG) MipsTargetLowering::LowerReturn(SDValue Chain,
{ unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG) {
// CCValAssign - represent the assignment of // CCValAssign - represent the assignment of
// the return value to a location // the return value to a location
SmallVector<CCValAssign, 16> RVLocs; SmallVector<CCValAssign, 16> RVLocs;
unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
DebugLoc dl = Op.getDebugLoc();
// CCState - Info about the registers and stack slot. // CCState - Info about the registers and stack slot.
CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs, *DAG.getContext()); CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
RVLocs, *DAG.getContext());
// Analize return values of ISD::RET // Analize return values.
CCInfo.AnalyzeReturn(Op.getNode(), RetCC_Mips); CCInfo.AnalyzeReturn(Outs, RetCC_Mips);
// If this is the first return lowered for this function, add // If this is the first return lowered for this function, add
// the regs to the liveout set for the function. // the regs to the liveout set for the function.
@ -1082,8 +1060,6 @@ LowerRET(SDValue Op, SelectionDAG &DAG)
DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
} }
// The chain is always operand #0
SDValue Chain = Op.getOperand(0);
SDValue Flag; SDValue Flag;
// Copy the result values into the output registers. // Copy the result values into the output registers.
@ -1091,10 +1067,8 @@ LowerRET(SDValue Op, SelectionDAG &DAG)
CCValAssign &VA = RVLocs[i]; CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!"); assert(VA.isRegLoc() && "Can only return in registers!");
// ISD::RET => ret chain, (regnum1,val1), ...
// So i*2+1 index only the regnums
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
Op.getOperand(i*2+1), Flag); Outs[i].Val, Flag);
// guarantee that all emitted copies are // guarantee that all emitted copies are
// stuck together, avoiding something bad // stuck together, avoiding something bad

View File

@ -89,24 +89,46 @@ namespace llvm {
const MipsSubtarget *Subtarget; const MipsSubtarget *Subtarget;
// Lower Operand helpers // Lower Operand helpers
SDNode *LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall, SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
unsigned CallingConv, SelectionDAG &DAG); unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
// Lower Operand specifics // Lower Operand specifics
SDValue LowerANDOR(SDValue Op, SelectionDAG &DAG); SDValue LowerANDOR(SDValue Op, SelectionDAG &DAG);
SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG); SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG);
SDValue LowerCALL(SDValue Op, SelectionDAG &DAG);
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG); SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG);
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG); SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG);
SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG);
SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG); SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG);
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG); SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG);
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG); SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG);
SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG); SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG);
SDValue LowerRET(SDValue Op, SelectionDAG &DAG);
SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG); SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG);
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG); SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG);
virtual SDValue
LowerFormalArguments(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerCall(SDValue Chain, SDValue Callee,
unsigned CallConv, bool isVarArg,
bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerReturn(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG);
virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI, virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *MBB) const; MachineBasicBlock *MBB) const;

View File

@ -57,7 +57,7 @@ private:
/// to be used on emitPrologue and processFunctionBeforeFrameFinalized. /// to be used on emitPrologue and processFunctionBeforeFrameFinalized.
MipsFIHolder GPHolder; MipsFIHolder GPHolder;
/// On LowerFORMAL_ARGUMENTS the stack size is unknown, so the Stack /// On LowerFormalArguments the stack size is unknown, so the Stack
/// Pointer Offset calculation of "not in register arguments" must be /// Pointer Offset calculation of "not in register arguments" must be
/// postponed to emitPrologue. /// postponed to emitPrologue.
SmallVector<MipsFIHolder, 16> FnLoadArgs; SmallVector<MipsFIHolder, 16> FnLoadArgs;
@ -65,7 +65,7 @@ private:
// When VarArgs, we must write registers back to caller stack, preserving // When VarArgs, we must write registers back to caller stack, preserving
// on register arguments. Since the stack size is unknown on // on register arguments. Since the stack size is unknown on
// LowerFORMAL_ARGUMENTS, the Stack Pointer Offset calculation must be // LowerFormalArguments, the Stack Pointer Offset calculation must be
// postponed to emitPrologue. // postponed to emitPrologue.
SmallVector<MipsFIHolder, 4> FnStoreVarArgs; SmallVector<MipsFIHolder, 4> FnStoreVarArgs;
bool HasStoreVarArgs; bool HasStoreVarArgs;

View File

@ -212,7 +212,7 @@ getReservedRegs(const MachineFunction &MF) const
// The emitted instruction will be something like: // The emitted instruction will be something like:
// lw REGX, 16+StackSize(SP) // lw REGX, 16+StackSize(SP)
// //
// Since the total stack size is unknown on LowerFORMAL_ARGUMENTS, all // Since the total stack size is unknown on LowerFormalArguments, all
// stack references (ObjectOffset) created to reference the function // stack references (ObjectOffset) created to reference the function
// arguments, are negative numbers. This way, on eliminateFrameIndex it's // arguments, are negative numbers. This way, on eliminateFrameIndex it's
// possible to detect those references and the offsets are adjusted to // possible to detect those references and the offsets are adjusted to
@ -234,7 +234,7 @@ void MipsRegisterInfo::adjustMipsStackFrame(MachineFunction &MF) const
int TopCPUSavedRegOff = -1, TopFPUSavedRegOff = -1; int TopCPUSavedRegOff = -1, TopFPUSavedRegOff = -1;
// Replace the dummy '0' SPOffset by the negative offsets, as explained on // Replace the dummy '0' SPOffset by the negative offsets, as explained on
// LowerFORMAL_ARGUMENTS. Leaving '0' for while is necessary to avoid // LowerFormalArguments. Leaving '0' for while is necessary to avoid
// the approach done by calculateFrameObjectOffsets to the stack frame. // the approach done by calculateFrameObjectOffsets to the stack frame.
MipsFI->adjustLoadArgsFI(MFI); MipsFI->adjustLoadArgsFI(MFI);
MipsFI->adjustStoreVarArgsFI(MFI); MipsFI->adjustStoreVarArgsFI(MFI);
@ -378,7 +378,7 @@ eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
DOUT << "stackSize : " << stackSize << "\n"; DOUT << "stackSize : " << stackSize << "\n";
#endif #endif
// as explained on LowerFORMAL_ARGUMENTS, detect negative offsets // as explained on LowerFormalArguments, detect negative offsets
// and adjust SPOffsets considering the final stack size. // and adjust SPOffsets considering the final stack size.
int Offset = ((spOffset < 0) ? (stackSize + (-(spOffset+4))) : (spOffset)); int Offset = ((spOffset < 0) ? (stackSize + (-(spOffset+4))) : (spOffset));
Offset += MI.getOperand(i-1).getImm(); Offset += MI.getOperand(i-1).getImm();

View File

@ -268,8 +268,6 @@ PIC16TargetLowering::PIC16TargetLowering(PIC16TargetMachine &TM)
setOperationAction(ISD::XOR, MVT::i8, Custom); setOperationAction(ISD::XOR, MVT::i8, Custom);
setOperationAction(ISD::FrameIndex, MVT::i16, Custom); setOperationAction(ISD::FrameIndex, MVT::i16, Custom);
setOperationAction(ISD::CALL, MVT::i16, Custom);
setOperationAction(ISD::RET, MVT::Other, Custom);
setOperationAction(ISD::MUL, MVT::i8, Custom); setOperationAction(ISD::MUL, MVT::i8, Custom);
@ -410,7 +408,9 @@ PIC16TargetLowering::MakePIC16Libcall(PIC16ISD::PIC16Libcall Call,
const Type *RetTy = RetVT.getTypeForMVT(); const Type *RetTy = RetVT.getTypeForMVT();
std::pair<SDValue,SDValue> CallInfo = std::pair<SDValue,SDValue> CallInfo =
LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false, LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false,
false, 0, CallingConv::C, false, Callee, Args, DAG, dl); false, 0, CallingConv::C, false,
/*isReturnValueUsed=*/true,
Callee, Args, DAG, dl);
return CallInfo.first; return CallInfo.first;
} }
@ -440,6 +440,7 @@ const char *PIC16TargetLowering::getTargetNodeName(unsigned Opcode) const {
case PIC16ISD::SUBCC: return "PIC16ISD::SUBCC"; case PIC16ISD::SUBCC: return "PIC16ISD::SUBCC";
case PIC16ISD::SELECT_ICC: return "PIC16ISD::SELECT_ICC"; case PIC16ISD::SELECT_ICC: return "PIC16ISD::SELECT_ICC";
case PIC16ISD::BRCOND: return "PIC16ISD::BRCOND"; case PIC16ISD::BRCOND: return "PIC16ISD::BRCOND";
case PIC16ISD::RET: return "PIC16ISD::RET";
case PIC16ISD::Dummy: return "PIC16ISD::Dummy"; case PIC16ISD::Dummy: return "PIC16ISD::Dummy";
} }
} }
@ -994,12 +995,8 @@ PIC16TargetLowering::LowerOperationWrapper(SDNode *N,
SDValue Res; SDValue Res;
unsigned i; unsigned i;
switch (Op.getOpcode()) { switch (Op.getOpcode()) {
case ISD::FORMAL_ARGUMENTS:
Res = LowerFORMAL_ARGUMENTS(Op, DAG); break;
case ISD::LOAD: case ISD::LOAD:
Res = ExpandLoad(Op.getNode(), DAG); break; Res = ExpandLoad(Op.getNode(), DAG); break;
case ISD::CALL:
Res = LowerCALL(Op, DAG); break;
default: { default: {
// All other operations are handled in LowerOperation. // All other operations are handled in LowerOperation.
Res = LowerOperation(Op, DAG); Res = LowerOperation(Op, DAG);
@ -1019,8 +1016,6 @@ PIC16TargetLowering::LowerOperationWrapper(SDNode *N,
SDValue PIC16TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) { SDValue PIC16TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
switch (Op.getOpcode()) { switch (Op.getOpcode()) {
case ISD::FORMAL_ARGUMENTS:
return LowerFORMAL_ARGUMENTS(Op, DAG);
case ISD::ADD: case ISD::ADD:
case ISD::ADDC: case ISD::ADDC:
case ISD::ADDE: case ISD::ADDE:
@ -1043,10 +1038,6 @@ SDValue PIC16TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
case ISD::AND: case ISD::AND:
case ISD::XOR: case ISD::XOR:
return LowerBinOp(Op, DAG); return LowerBinOp(Op, DAG);
case ISD::CALL:
return LowerCALL(Op, DAG);
case ISD::RET:
return LowerRET(Op, DAG);
case ISD::BR_CC: case ISD::BR_CC:
return LowerBR_CC(Op, DAG); return LowerBR_CC(Op, DAG);
case ISD::SELECT_CC: case ISD::SELECT_CC:
@ -1091,12 +1082,11 @@ SDValue PIC16TargetLowering::ConvertToMemOperand(SDValue Op,
} }
SDValue PIC16TargetLowering:: SDValue PIC16TargetLowering::
LowerIndirectCallArguments(SDValue Op, SDValue Chain, SDValue InFlag, LowerIndirectCallArguments(SDValue Chain, SDValue InFlag,
SDValue DataAddr_Lo, SDValue DataAddr_Hi, SDValue DataAddr_Lo, SDValue DataAddr_Hi,
SelectionDAG &DAG) { const SmallVectorImpl<ISD::OutputArg> &Outs,
CallSDNode *TheCall = dyn_cast<CallSDNode>(Op); DebugLoc dl, SelectionDAG &DAG) {
unsigned NumOps = TheCall->getNumArgs(); unsigned NumOps = Outs.size();
DebugLoc dl = TheCall->getDebugLoc();
// If call has no arguments then do nothing and return. // If call has no arguments then do nothing and return.
if (NumOps == 0) if (NumOps == 0)
@ -1107,10 +1097,10 @@ LowerIndirectCallArguments(SDValue Op, SDValue Chain, SDValue InFlag,
SDValue Arg, StoreRet; SDValue Arg, StoreRet;
// For PIC16 ABI the arguments come after the return value. // For PIC16 ABI the arguments come after the return value.
unsigned RetVals = TheCall->getNumRetVals(); unsigned RetVals = Outs.size();
for (unsigned i = 0, ArgOffset = RetVals; i < NumOps; i++) { for (unsigned i = 0, ArgOffset = RetVals; i < NumOps; i++) {
// Get the arguments // Get the arguments
Arg = TheCall->getArg(i); Arg = Outs[i].Val;
Ops.clear(); Ops.clear();
Ops.push_back(Chain); Ops.push_back(Chain);
@ -1130,16 +1120,14 @@ LowerIndirectCallArguments(SDValue Op, SDValue Chain, SDValue InFlag,
} }
SDValue PIC16TargetLowering:: SDValue PIC16TargetLowering::
LowerDirectCallArguments(SDValue Op, SDValue Chain, SDValue ArgLabel, LowerDirectCallArguments(SDValue ArgLabel, SDValue Chain, SDValue InFlag,
SDValue InFlag, SelectionDAG &DAG) { const SmallVectorImpl<ISD::OutputArg> &Outs,
CallSDNode *TheCall = dyn_cast<CallSDNode>(Op); DebugLoc dl, SelectionDAG &DAG) {
unsigned NumOps = TheCall->getNumArgs(); unsigned NumOps = Outs.size();
DebugLoc dl = TheCall->getDebugLoc();
std::string Name; std::string Name;
SDValue Arg, StoreAt; SDValue Arg, StoreAt;
MVT ArgVT; MVT ArgVT;
unsigned Size=0; unsigned Size=0;
unsigned ArgCount=0;
// If call has no arguments then do nothing and return. // If call has no arguments then do nothing and return.
if (NumOps == 0) if (NumOps == 0)
@ -1157,9 +1145,9 @@ LowerDirectCallArguments(SDValue Op, SDValue Chain, SDValue ArgLabel,
std::vector<SDValue> Ops; std::vector<SDValue> Ops;
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
for (unsigned i=ArgCount, Offset = 0; i<NumOps; i++) { for (unsigned i=0, Offset = 0; i<NumOps; i++) {
// Get the argument // Get the argument
Arg = TheCall->getArg(i); Arg = Outs[i].Val;
StoreOffset = (Offset + AddressOffset); StoreOffset = (Offset + AddressOffset);
// Store the argument on frame // Store the argument on frame
@ -1187,12 +1175,12 @@ LowerDirectCallArguments(SDValue Op, SDValue Chain, SDValue ArgLabel,
} }
SDValue PIC16TargetLowering:: SDValue PIC16TargetLowering::
LowerIndirectCallReturn (SDValue Op, SDValue Chain, SDValue InFlag, LowerIndirectCallReturn(SDValue Chain, SDValue InFlag,
SDValue DataAddr_Lo, SDValue DataAddr_Hi, SDValue DataAddr_Lo, SDValue DataAddr_Hi,
SelectionDAG &DAG) { const SmallVectorImpl<ISD::InputArg> &Ins,
CallSDNode *TheCall = dyn_cast<CallSDNode>(Op); DebugLoc dl, SelectionDAG &DAG,
DebugLoc dl = TheCall->getDebugLoc(); SmallVectorImpl<SDValue> &InVals) {
unsigned RetVals = TheCall->getNumRetVals(); unsigned RetVals = Ins.size();
// If call does not have anything to return // If call does not have anything to return
// then do nothing and go back. // then do nothing and go back.
@ -1200,7 +1188,6 @@ LowerIndirectCallReturn (SDValue Op, SDValue Chain, SDValue InFlag,
return Chain; return Chain;
// Call has something to return // Call has something to return
std::vector<SDValue> ResultVals;
SDValue LoadRet; SDValue LoadRet;
SDVTList Tys = DAG.getVTList(MVT::i8, MVT::Other, MVT::Flag); SDVTList Tys = DAG.getVTList(MVT::i8, MVT::Other, MVT::Flag);
@ -1210,23 +1197,20 @@ LowerIndirectCallReturn (SDValue Op, SDValue Chain, SDValue InFlag,
InFlag); InFlag);
InFlag = getOutFlag(LoadRet); InFlag = getOutFlag(LoadRet);
Chain = getChain(LoadRet); Chain = getChain(LoadRet);
ResultVals.push_back(LoadRet); InVals.push_back(LoadRet);
} }
ResultVals.push_back(Chain); return Chain;
SDValue Res = DAG.getMergeValues(&ResultVals[0], ResultVals.size(), dl);
return Res;
} }
SDValue PIC16TargetLowering:: SDValue PIC16TargetLowering::
LowerDirectCallReturn(SDValue Op, SDValue Chain, SDValue RetLabel, LowerDirectCallReturn(SDValue RetLabel, SDValue Chain, SDValue InFlag,
SDValue InFlag, SelectionDAG &DAG) { const SmallVectorImpl<ISD::InputArg> &Ins,
CallSDNode *TheCall = dyn_cast<CallSDNode>(Op); DebugLoc dl, SelectionDAG &DAG,
DebugLoc dl = TheCall->getDebugLoc(); SmallVectorImpl<SDValue> &InVals) {
// Currently handling primitive types only. They will come in // Currently handling primitive types only. They will come in
// i8 parts // i8 parts
unsigned RetVals = TheCall->getNumRetVals(); unsigned RetVals = Ins.size();
std::vector<SDValue> ResultVals;
// Return immediately if the return type is void // Return immediately if the return type is void
if (RetVals == 0) if (RetVals == 0)
@ -1252,29 +1236,20 @@ LowerDirectCallReturn(SDValue Op, SDValue Chain, SDValue RetLabel,
Chain = getChain(LoadRet); Chain = getChain(LoadRet);
Offset++; Offset++;
ResultVals.push_back(LoadRet); InVals.push_back(LoadRet);
} }
// To return use MERGE_VALUES return Chain;
ResultVals.push_back(Chain);
SDValue Res = DAG.getMergeValues(&ResultVals[0], ResultVals.size(), dl);
return Res;
} }
SDValue PIC16TargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) { SDValue
SDValue Chain = Op.getOperand(0); PIC16TargetLowering::LowerReturn(SDValue Chain,
DebugLoc dl = Op.getDebugLoc(); unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG) {
if (Op.getNumOperands() == 1) // return void
return Op;
// return should have odd number of operands
if ((Op.getNumOperands() % 2) == 0 ) {
llvm_unreachable("Do not know how to return this many arguments!");
}
// Number of values to return // Number of values to return
unsigned NumRet = (Op.getNumOperands() / 2); unsigned NumRet = Outs.size();
// Function returns value always on stack with the offset starting // Function returns value always on stack with the offset starting
// from 0 // from 0
@ -1288,68 +1263,13 @@ SDValue PIC16TargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) {
SDValue BS = DAG.getConstant(1, MVT::i8); SDValue BS = DAG.getConstant(1, MVT::i8);
SDValue RetVal; SDValue RetVal;
for(unsigned i=0;i<NumRet; ++i) { for(unsigned i=0;i<NumRet; ++i) {
RetVal = Op.getNode()->getOperand(2*i + 1); RetVal = Outs[i].Val;
Chain = DAG.getNode (PIC16ISD::PIC16Store, dl, MVT::Other, Chain, RetVal, Chain = DAG.getNode (PIC16ISD::PIC16Store, dl, MVT::Other, Chain, RetVal,
ES, BS, ES, BS,
DAG.getConstant (i, MVT::i8)); DAG.getConstant (i, MVT::i8));
} }
return DAG.getNode(ISD::RET, dl, MVT::Other, Chain); return DAG.getNode(PIC16ISD::RET, dl, MVT::Other, Chain);
}
// CALL node may have some operands non-legal to PIC16. Generate new CALL
// node with all the operands legal.
// Currently only Callee operand of the CALL node is non-legal. This function
// legalizes the Callee operand and uses all other operands as are to generate
// new CALL node.
SDValue PIC16TargetLowering::LegalizeCALL(SDValue Op, SelectionDAG &DAG) {
CallSDNode *TheCall = dyn_cast<CallSDNode>(Op);
SDValue Chain = TheCall->getChain();
SDValue Callee = TheCall->getCallee();
DebugLoc dl = TheCall->getDebugLoc();
unsigned i =0;
assert(Callee.getValueType() == MVT::i16 &&
"Don't know how to legalize this call node!!!");
assert(Callee.getOpcode() == ISD::BUILD_PAIR &&
"Don't know how to legalize this call node!!!");
if (isDirectAddress(Callee)) {
// Come here for direct calls
Callee = Callee.getOperand(0).getOperand(0);
} else {
// Come here for indirect calls
SDValue Lo, Hi;
// Indirect addresses. Get the hi and lo parts of ptr.
GetExpandedParts(Callee, DAG, Lo, Hi);
// Connect Lo and Hi parts of the callee with the PIC16Connect
Callee = DAG.getNode(PIC16ISD::PIC16Connect, dl, MVT::i8, Lo, Hi);
}
std::vector<SDValue> Ops;
Ops.push_back(Chain);
Ops.push_back(Callee);
// Add the call arguments and their flags
unsigned NumArgs = TheCall->getNumArgs();
for(i=0;i<NumArgs;i++) {
Ops.push_back(TheCall->getArg(i));
Ops.push_back(TheCall->getArgFlagsVal(i));
}
std::vector<MVT> NodeTys;
unsigned NumRets = TheCall->getNumRetVals();
for(i=0;i<NumRets;i++)
NodeTys.push_back(TheCall->getRetValType(i));
// Return a Chain as well
NodeTys.push_back(MVT::Other);
SDVTList VTs = DAG.getVTList(&NodeTys[0], NodeTys.size());
// Generate new call with all the operands legal
return DAG.getCall(TheCall->getCallingConv(), dl,
TheCall->isVarArg(), TheCall->isTailCall(),
TheCall->isInreg(), VTs, &Ops[0], Ops.size(),
TheCall->getNumFixedArgs());
} }
void PIC16TargetLowering:: void PIC16TargetLowering::
@ -1414,36 +1334,40 @@ GetDataAddress(DebugLoc dl, SDValue Callee, SDValue &Chain,
DataAddr_Hi = DAG.getNode(PIC16ISD::MTHI, dl, MVT::i8, Call, OperFlag); DataAddr_Hi = DAG.getNode(PIC16ISD::MTHI, dl, MVT::i8, Call, OperFlag);
} }
SDValue
PIC16TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
unsigned CallConv, bool isVarArg,
bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
SDValue PIC16TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) { assert(Callee.getValueType() == MVT::i16 &&
CallSDNode *TheCall = dyn_cast<CallSDNode>(Op); "Don't know how to legalize this call node!!!");
SDValue Chain = TheCall->getChain();
SDValue Callee = TheCall->getCallee();
DebugLoc dl = TheCall->getDebugLoc();
if (Callee.getValueType() == MVT::i16 &&
Callee.getOpcode() == ISD::BUILD_PAIR) {
// Control should come here only from TypeLegalizer for lowering
// Legalize the non-legal arguments of call and return the
// new call with legal arguments.
return LegalizeCALL(Op, DAG);
}
// Control should come here from Legalize DAG.
// Here all the operands of CALL node should be legal.
// If this is an indirect call then to pass the arguments
// and read the return value back, we need the data address
// of the function being called.
// To get the data address two more calls need to be made.
// The flag to track if this is a direct or indirect call. // The flag to track if this is a direct or indirect call.
bool IsDirectCall = true; bool IsDirectCall = true;
unsigned RetVals = TheCall->getNumRetVals(); unsigned RetVals = Ins.size();
unsigned NumArgs = TheCall->getNumArgs(); unsigned NumArgs = Outs.size();
SDValue DataAddr_Lo, DataAddr_Hi; SDValue DataAddr_Lo, DataAddr_Hi;
if (Callee.getOpcode() == PIC16ISD::PIC16Connect) { if (!isa<GlobalAddressSDNode>(Callee) &&
!isa<ExternalSymbolSDNode>(Callee)) {
IsDirectCall = false; // This is indirect call IsDirectCall = false; // This is indirect call
// If this is an indirect call then to pass the arguments
// and read the return value back, we need the data address
// of the function being called.
// To get the data address two more calls need to be made.
// Come here for indirect calls
SDValue Lo, Hi;
// Indirect addresses. Get the hi and lo parts of ptr.
GetExpandedParts(Callee, DAG, Lo, Hi);
// Connect Lo and Hi parts of the callee with the PIC16Connect
Callee = DAG.getNode(PIC16ISD::PIC16Connect, dl, MVT::i8, Lo, Hi);
// Read DataAddress only if we have to pass arguments or // Read DataAddress only if we have to pass arguments or
// read return value. // read return value.
if ((RetVals > 0) || (NumArgs > 0)) if ((RetVals > 0) || (NumArgs > 0))
@ -1499,12 +1423,13 @@ SDValue PIC16TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
// Pass the argument to function before making the call. // Pass the argument to function before making the call.
SDValue CallArgs; SDValue CallArgs;
if (IsDirectCall) { if (IsDirectCall) {
CallArgs = LowerDirectCallArguments(Op, Chain, ArgLabel, OperFlag, DAG); CallArgs = LowerDirectCallArguments(ArgLabel, Chain, OperFlag,
Outs, dl, DAG);
Chain = getChain(CallArgs); Chain = getChain(CallArgs);
OperFlag = getOutFlag(CallArgs); OperFlag = getOutFlag(CallArgs);
} else { } else {
CallArgs = LowerIndirectCallArguments(Op, Chain, OperFlag, DataAddr_Lo, CallArgs = LowerIndirectCallArguments(Chain, OperFlag, DataAddr_Lo,
DataAddr_Hi, DAG); DataAddr_Hi, Outs, dl, DAG);
Chain = getChain(CallArgs); Chain = getChain(CallArgs);
OperFlag = getOutFlag(CallArgs); OperFlag = getOutFlag(CallArgs);
} }
@ -1525,10 +1450,11 @@ SDValue PIC16TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
// Lower the return value reading after the call. // Lower the return value reading after the call.
if (IsDirectCall) if (IsDirectCall)
return LowerDirectCallReturn(Op, Chain, RetLabel, OperFlag, DAG); return LowerDirectCallReturn(RetLabel, Chain, OperFlag,
Ins, dl, DAG, InVals);
else else
return LowerIndirectCallReturn(Op, Chain, OperFlag, DataAddr_Lo, return LowerIndirectCallReturn(Chain, OperFlag, DataAddr_Lo,
DataAddr_Hi, DAG); DataAddr_Hi, Ins, dl, DAG, InVals);
} }
bool PIC16TargetLowering::isDirectLoad(const SDValue Op) { bool PIC16TargetLowering::isDirectLoad(const SDValue Op) {
@ -1660,17 +1586,19 @@ void PIC16TargetLowering::InitReservedFrameCount(const Function *F) {
ReservedFrameCount = NumArgs + 1; ReservedFrameCount = NumArgs + 1;
} }
// LowerFORMAL_ARGUMENTS - Argument values are loaded from the // LowerFormalArguments - Argument values are loaded from the
// <fname>.args + offset. All arguments are already broken to leaglized // <fname>.args + offset. All arguments are already broken to leaglized
// types, so the offset just runs from 0 to NumArgVals - 1. // types, so the offset just runs from 0 to NumArgVals - 1.
SDValue PIC16TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SDValue
SelectionDAG &DAG) { PIC16TargetLowering::LowerFormalArguments(SDValue Chain,
SmallVector<SDValue, 8> ArgValues; unsigned CallConv,
unsigned NumArgVals = Op.getNode()->getNumValues() - 1; bool isVarArg,
DebugLoc dl = Op.getDebugLoc(); const SmallVectorImpl<ISD::InputArg> &Ins,
SDValue Chain = Op.getOperand(0); // Formal arguments' chain DebugLoc dl,
SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
unsigned NumArgVals = Ins.size();
// Get the callee's name to create the <fname>.args label to pass args. // Get the callee's name to create the <fname>.args label to pass args.
MachineFunction &MF = DAG.getMachineFunction(); MachineFunction &MF = DAG.getMachineFunction();
@ -1694,13 +1622,10 @@ SDValue PIC16TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op,
SDValue PICLoad = DAG.getNode(PIC16ISD::PIC16LdArg, dl, VTs, Chain, ES, BS, SDValue PICLoad = DAG.getNode(PIC16ISD::PIC16LdArg, dl, VTs, Chain, ES, BS,
Offset); Offset);
Chain = getChain(PICLoad); Chain = getChain(PICLoad);
ArgValues.push_back(PICLoad); InVals.push_back(PICLoad);
} }
// Return a MERGE_VALUE node. return Chain;
ArgValues.push_back(Op.getOperand(0));
return DAG.getNode(ISD::MERGE_VALUES, dl, Op.getNode()->getVTList(),
&ArgValues[0], ArgValues.size()).getValue(Op.getResNo());
} }
// Perform DAGCombine of PIC16Load. // Perform DAGCombine of PIC16Load.

View File

@ -52,6 +52,7 @@ namespace llvm {
SUBCC, // Compare for equality or inequality. SUBCC, // Compare for equality or inequality.
SELECT_ICC, // Psuedo to be caught in schedular and expanded to brcond. SELECT_ICC, // Psuedo to be caught in schedular and expanded to brcond.
BRCOND, // Conditional branch. BRCOND, // Conditional branch.
RET, // Return.
Dummy Dummy
}; };
@ -82,32 +83,35 @@ namespace llvm {
virtual const char *getTargetNodeName(unsigned Opcode) const; virtual const char *getTargetNodeName(unsigned Opcode) const;
/// getSetCCResultType - Return the ISD::SETCC ValueType /// getSetCCResultType - Return the ISD::SETCC ValueType
virtual MVT getSetCCResultType(MVT ValType) const; virtual MVT getSetCCResultType(MVT ValType) const;
SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG);
SDValue LowerShift(SDValue Op, SelectionDAG &DAG); SDValue LowerShift(SDValue Op, SelectionDAG &DAG);
SDValue LowerMUL(SDValue Op, SelectionDAG &DAG); SDValue LowerMUL(SDValue Op, SelectionDAG &DAG);
SDValue LowerADD(SDValue Op, SelectionDAG &DAG); SDValue LowerADD(SDValue Op, SelectionDAG &DAG);
SDValue LowerSUB(SDValue Op, SelectionDAG &DAG); SDValue LowerSUB(SDValue Op, SelectionDAG &DAG);
SDValue LowerBinOp(SDValue Op, SelectionDAG &DAG); SDValue LowerBinOp(SDValue Op, SelectionDAG &DAG);
SDValue LowerCALL(SDValue Op, SelectionDAG &DAG);
SDValue LowerRET(SDValue Op, SelectionDAG &DAG);
// Call returns // Call returns
SDValue SDValue
LowerDirectCallReturn(SDValue Op, SDValue Chain, SDValue FrameAddress, LowerDirectCallReturn(SDValue RetLabel, SDValue Chain, SDValue InFlag,
SDValue InFlag, SelectionDAG &DAG); const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
SDValue SDValue
LowerIndirectCallReturn(SDValue Op, SDValue Chain, SDValue InFlag, LowerIndirectCallReturn(SDValue Chain, SDValue InFlag,
SDValue DataAddr_Lo, SDValue DataAddr_Hi, SDValue DataAddr_Lo, SDValue DataAddr_Hi,
SelectionDAG &DAG); const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
// Call arguments // Call arguments
SDValue SDValue
LowerDirectCallArguments(SDValue Op, SDValue Chain, SDValue FrameAddress, LowerDirectCallArguments(SDValue ArgLabel, SDValue Chain, SDValue InFlag,
SDValue InFlag, SelectionDAG &DAG); const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG);
SDValue SDValue
LowerIndirectCallArguments(SDValue Op, SDValue Chain, SDValue InFlag, LowerIndirectCallArguments(SDValue Chain, SDValue InFlag,
SDValue DataAddr_Lo, SDValue DataAddr_Hi, SDValue DataAddr_Lo, SDValue DataAddr_Hi,
SelectionDAG &DAG); const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG);
SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG); SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG);
SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG); SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG);
@ -125,6 +129,28 @@ namespace llvm {
SmallVectorImpl<SDValue> &Results, SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG); SelectionDAG &DAG);
virtual SDValue
LowerFormalArguments(SDValue Chain,
unsigned CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerCall(SDValue Chain, SDValue Callee,
unsigned CallConv, bool isVarArg, bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerReturn(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG);
SDValue ExpandStore(SDNode *N, SelectionDAG &DAG); SDValue ExpandStore(SDNode *N, SelectionDAG &DAG);
SDValue ExpandLoad(SDNode *N, SelectionDAG &DAG); SDValue ExpandLoad(SDNode *N, SelectionDAG &DAG);
SDValue ExpandGlobalAddress(SDNode *N, SelectionDAG &DAG); SDValue ExpandGlobalAddress(SDNode *N, SelectionDAG &DAG);
@ -175,12 +201,6 @@ namespace llvm {
void LegalizeFrameIndex(SDValue Op, SelectionDAG &DAG, SDValue &ES, void LegalizeFrameIndex(SDValue Op, SelectionDAG &DAG, SDValue &ES,
int &Offset); int &Offset);
// CALL node should have all legal operands only. Legalize all non-legal
// operands of CALL node and then return the new call will all operands
// legal.
SDValue LegalizeCALL(SDValue Op, SelectionDAG &DAG);
// For indirect calls data address of the callee frame need to be // For indirect calls data address of the callee frame need to be
// extracted. This function fills the arguments DataAddr_Lo and // extracted. This function fills the arguments DataAddr_Lo and
// DataAddr_Hi with the address of the callee frame. // DataAddr_Hi with the address of the callee frame.

View File

@ -115,6 +115,8 @@ def PIC16Brcond : SDNode<"PIC16ISD::BRCOND", SDT_PIC16Brcond,
def PIC16Selecticc : SDNode<"PIC16ISD::SELECT_ICC", SDT_PIC16Selecticc, def PIC16Selecticc : SDNode<"PIC16ISD::SELECT_ICC", SDT_PIC16Selecticc,
[SDNPInFlag]>; [SDNPInFlag]>;
def PIC16ret : SDNode<"PIC16ISD::RET", SDTNone, [SDNPHasChain]>;
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// PIC16 Operand Definitions. // PIC16 Operand Definitions.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
@ -493,7 +495,7 @@ def pagesel :
// Return insn. // Return insn.
let isTerminator = 1, isBarrier = 1, isReturn = 1 in let isTerminator = 1, isBarrier = 1, isReturn = 1 in
def Return : def Return :
ControlFormat<0, (outs), (ins), "return", [(ret)]>; ControlFormat<0, (outs), (ins), "return", [(PIC16ret)]>;
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// PIC16 Replacment Patterns. // PIC16 Replacment Patterns.

View File

@ -203,9 +203,6 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::ConstantPool, MVT::i64, Custom); setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
setOperationAction(ISD::JumpTable, MVT::i64, Custom); setOperationAction(ISD::JumpTable, MVT::i64, Custom);
// RET must be custom lowered, to meet ABI requirements.
setOperationAction(ISD::RET , MVT::Other, Custom);
// TRAP is legal. // TRAP is legal.
setOperationAction(ISD::TRAP, MVT::Other, Legal); setOperationAction(ISD::TRAP, MVT::Other, Legal);
@ -448,7 +445,6 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
case PPCISD::MTFSB1: return "PPCISD::MTFSB1"; case PPCISD::MTFSB1: return "PPCISD::MTFSB1";
case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ";
case PPCISD::MTFSF: return "PPCISD::MTFSF"; case PPCISD::MTFSF: return "PPCISD::MTFSF";
case PPCISD::TAILCALL: return "PPCISD::TAILCALL";
case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN";
} }
} }
@ -1293,6 +1289,7 @@ SDValue PPCTargetLowering::LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
std::pair<SDValue, SDValue> CallResult = std::pair<SDValue, SDValue> CallResult =
LowerCallTo(Chain, Op.getValueType().getTypeForMVT(), LowerCallTo(Chain, Op.getValueType().getTypeForMVT(),
false, false, false, false, 0, CallingConv::C, false, false, false, false, false, 0, CallingConv::C, false,
/*isReturnValueUsed=*/true,
DAG.getExternalSymbol("__trampoline_setup", PtrVT), DAG.getExternalSymbol("__trampoline_setup", PtrVT),
Args, DAG, dl); Args, DAG, dl);
@ -1472,9 +1469,8 @@ static const unsigned *GetFPR(const PPCSubtarget &Subtarget) {
/// CalculateStackSlotSize - Calculates the size reserved for this argument on /// CalculateStackSlotSize - Calculates the size reserved for this argument on
/// the stack. /// the stack.
static unsigned CalculateStackSlotSize(SDValue Arg, ISD::ArgFlagsTy Flags, static unsigned CalculateStackSlotSize(MVT ArgVT, ISD::ArgFlagsTy Flags,
unsigned PtrByteSize) { unsigned PtrByteSize) {
MVT ArgVT = Arg.getValueType();
unsigned ArgSize = ArgVT.getSizeInBits()/8; unsigned ArgSize = ArgVT.getSizeInBits()/8;
if (Flags.isByVal()) if (Flags.isByVal())
ArgSize = Flags.getByValSize(); ArgSize = Flags.getByValSize();
@ -1484,13 +1480,30 @@ static unsigned CalculateStackSlotSize(SDValue Arg, ISD::ArgFlagsTy Flags,
} }
SDValue SDValue
PPCTargetLowering::LowerFORMAL_ARGUMENTS_SVR4(SDValue Op, PPCTargetLowering::LowerFormalArguments(SDValue Chain,
SelectionDAG &DAG, unsigned CallConv, bool isVarArg,
int &VarArgsFrameIndex, const SmallVectorImpl<ISD::InputArg>
int &VarArgsStackOffset, &Ins,
unsigned &VarArgsNumGPR, DebugLoc dl, SelectionDAG &DAG,
unsigned &VarArgsNumFPR, SmallVectorImpl<SDValue> &InVals) {
const PPCSubtarget &Subtarget) { if (PPCSubTarget.isSVR4ABI()) {
return LowerFormalArguments_SVR4(Chain, CallConv, isVarArg, Ins,
dl, DAG, InVals);
} else {
return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins,
dl, DAG, InVals);
}
}
SDValue
PPCTargetLowering::LowerFormalArguments_SVR4(
SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg>
&Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
// SVR4 ABI Stack Frame Layout: // SVR4 ABI Stack Frame Layout:
// +-----------------------------------+ // +-----------------------------------+
// +--> | Back chain | // +--> | Back chain |
@ -1522,25 +1535,21 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS_SVR4(SDValue Op,
MachineFunction &MF = DAG.getMachineFunction(); MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo(); MachineFrameInfo *MFI = MF.getFrameInfo();
SmallVector<SDValue, 8> ArgValues;
SDValue Root = Op.getOperand(0);
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
DebugLoc dl = Op.getDebugLoc();
MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
// Potential tail calls could cause overwriting of argument stack slots. // Potential tail calls could cause overwriting of argument stack slots.
unsigned CC = MF.getFunction()->getCallingConv(); bool isImmutable = !(PerformTailCallOpt && (CallConv==CallingConv::Fast));
bool isImmutable = !(PerformTailCallOpt && (CC==CallingConv::Fast));
unsigned PtrByteSize = 4; unsigned PtrByteSize = 4;
// Assign locations to all of the incoming arguments. // Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs; SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext()); CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
*DAG.getContext());
// Reserve space for the linkage area on the stack. // Reserve space for the linkage area on the stack.
CCInfo.AllocateStack(PPCFrameInfo::getLinkageSize(false, false), PtrByteSize); CCInfo.AllocateStack(PPCFrameInfo::getLinkageSize(false, false), PtrByteSize);
CCInfo.AnalyzeFormalArguments(Op.getNode(), CC_PPC_SVR4); CCInfo.AnalyzeFormalArguments(Ins, CC_PPC_SVR4);
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i]; CCValAssign &VA = ArgLocs[i];
@ -1552,7 +1561,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS_SVR4(SDValue Op,
switch (ValVT.getSimpleVT()) { switch (ValVT.getSimpleVT()) {
default: default:
llvm_unreachable("ValVT not supported by FORMAL_ARGUMENTS Lowering"); llvm_unreachable("ValVT not supported by formal arguments Lowering");
case MVT::i32: case MVT::i32:
RC = PPC::GPRCRegisterClass; RC = PPC::GPRCRegisterClass;
break; break;
@ -1572,9 +1581,9 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS_SVR4(SDValue Op,
// Transform the arguments stored in physical registers into virtual ones. // Transform the arguments stored in physical registers into virtual ones.
unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, ValVT); SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, ValVT);
ArgValues.push_back(ArgValue); InVals.push_back(ArgValue);
} else { } else {
// Argument stored in memory. // Argument stored in memory.
assert(VA.isMemLoc()); assert(VA.isMemLoc());
@ -1585,7 +1594,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS_SVR4(SDValue Op,
// Create load nodes to retrieve arguments from the stack. // Create load nodes to retrieve arguments from the stack.
SDValue FIN = DAG.getFrameIndex(FI, PtrVT); SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
ArgValues.push_back(DAG.getLoad(VA.getValVT(), dl, Root, FIN, NULL, 0)); InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, NULL, 0));
} }
} }
@ -1593,13 +1602,13 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS_SVR4(SDValue Op,
// Aggregates passed by value are stored in the local variable space of the // Aggregates passed by value are stored in the local variable space of the
// caller's stack frame, right above the parameter list area. // caller's stack frame, right above the parameter list area.
SmallVector<CCValAssign, 16> ByValArgLocs; SmallVector<CCValAssign, 16> ByValArgLocs;
CCState CCByValInfo(CC, isVarArg, getTargetMachine(), CCState CCByValInfo(CallConv, isVarArg, getTargetMachine(),
ByValArgLocs, *DAG.getContext()); ByValArgLocs, *DAG.getContext());
// Reserve stack space for the allocations in CCInfo. // Reserve stack space for the allocations in CCInfo.
CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
CCByValInfo.AnalyzeFormalArguments(Op.getNode(), CC_PPC_SVR4_ByVal); CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC_SVR4_ByVal);
// Area that is at least reserved in the caller of this function. // Area that is at least reserved in the caller of this function.
unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
@ -1656,7 +1665,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS_SVR4(SDValue Op,
unsigned GPRIndex = 0; unsigned GPRIndex = 0;
for (; GPRIndex != VarArgsNumGPR; ++GPRIndex) { for (; GPRIndex != VarArgsNumGPR; ++GPRIndex) {
SDValue Val = DAG.getRegister(GPArgRegs[GPRIndex], PtrVT); SDValue Val = DAG.getRegister(GPArgRegs[GPRIndex], PtrVT);
SDValue Store = DAG.getStore(Root, dl, Val, FIN, NULL, 0); SDValue Store = DAG.getStore(Chain, dl, Val, FIN, NULL, 0);
MemOps.push_back(Store); MemOps.push_back(Store);
// Increment the address by four for the next argument to store // Increment the address by four for the next argument to store
SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT);
@ -1669,7 +1678,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS_SVR4(SDValue Op,
for (; GPRIndex != NumGPArgRegs; ++GPRIndex) { for (; GPRIndex != NumGPArgRegs; ++GPRIndex) {
unsigned VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); unsigned VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, PtrVT); SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0); SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0);
MemOps.push_back(Store); MemOps.push_back(Store);
// Increment the address by four for the next argument to store // Increment the address by four for the next argument to store
@ -1685,7 +1694,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS_SVR4(SDValue Op,
unsigned FPRIndex = 0; unsigned FPRIndex = 0;
for (FPRIndex = 0; FPRIndex != VarArgsNumFPR; ++FPRIndex) { for (FPRIndex = 0; FPRIndex != VarArgsNumFPR; ++FPRIndex) {
SDValue Val = DAG.getRegister(FPArgRegs[FPRIndex], MVT::f64); SDValue Val = DAG.getRegister(FPArgRegs[FPRIndex], MVT::f64);
SDValue Store = DAG.getStore(Root, dl, Val, FIN, NULL, 0); SDValue Store = DAG.getStore(Chain, dl, Val, FIN, NULL, 0);
MemOps.push_back(Store); MemOps.push_back(Store);
// Increment the address by eight for the next argument to store // Increment the address by eight for the next argument to store
SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8,
@ -1696,7 +1705,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS_SVR4(SDValue Op,
for (; FPRIndex != NumFPArgRegs; ++FPRIndex) { for (; FPRIndex != NumFPArgRegs; ++FPRIndex) {
unsigned VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); unsigned VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, MVT::f64); SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0); SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0);
MemOps.push_back(Store); MemOps.push_back(Store);
// Increment the address by eight for the next argument to store // Increment the address by eight for the next argument to store
@ -1707,36 +1716,30 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS_SVR4(SDValue Op,
} }
if (!MemOps.empty()) if (!MemOps.empty())
Root = DAG.getNode(ISD::TokenFactor, dl, Chain = DAG.getNode(ISD::TokenFactor, dl,
MVT::Other, &MemOps[0], MemOps.size()); MVT::Other, &MemOps[0], MemOps.size());
return Chain;
ArgValues.push_back(Root);
// Return the new list of results.
return DAG.getNode(ISD::MERGE_VALUES, dl, Op.getNode()->getVTList(),
&ArgValues[0], ArgValues.size()).getValue(Op.getResNo());
} }
SDValue SDValue
PPCTargetLowering::LowerFORMAL_ARGUMENTS_Darwin(SDValue Op, PPCTargetLowering::LowerFormalArguments_Darwin(
SelectionDAG &DAG, SDValue Chain,
int &VarArgsFrameIndex, unsigned CallConv, bool isVarArg,
const PPCSubtarget &Subtarget) { const SmallVectorImpl<ISD::InputArg>
&Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
// TODO: add description of PPC stack frame format, or at least some docs. // TODO: add description of PPC stack frame format, or at least some docs.
// //
MachineFunction &MF = DAG.getMachineFunction(); MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo(); MachineFrameInfo *MFI = MF.getFrameInfo();
SmallVector<SDValue, 8> ArgValues;
SDValue Root = Op.getOperand(0);
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
DebugLoc dl = Op.getDebugLoc();
MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
bool isPPC64 = PtrVT == MVT::i64; bool isPPC64 = PtrVT == MVT::i64;
// Potential tail calls could cause overwriting of argument stack slots. // Potential tail calls could cause overwriting of argument stack slots.
unsigned CC = MF.getFunction()->getCallingConv(); bool isImmutable = !(PerformTailCallOpt && (CallConv==CallingConv::Fast));
bool isImmutable = !(PerformTailCallOpt && (CC==CallingConv::Fast));
unsigned PtrByteSize = isPPC64 ? 8 : 4; unsigned PtrByteSize = isPPC64 ? 8 : 4;
unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64, true); unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64, true);
@ -1752,7 +1755,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS_Darwin(SDValue Op,
PPC::X7, PPC::X8, PPC::X9, PPC::X10, PPC::X7, PPC::X8, PPC::X9, PPC::X10,
}; };
static const unsigned *FPR = GetFPR(Subtarget); static const unsigned *FPR = GetFPR(PPCSubTarget);
static const unsigned VR[] = { static const unsigned VR[] = {
PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
@ -1776,12 +1779,11 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS_Darwin(SDValue Op,
// entire point of the following loop. // entire point of the following loop.
unsigned VecArgOffset = ArgOffset; unsigned VecArgOffset = ArgOffset;
if (!isVarArg && !isPPC64) { if (!isVarArg && !isPPC64) {
for (unsigned ArgNo = 0, e = Op.getNode()->getNumValues()-1; ArgNo != e; for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e;
++ArgNo) { ++ArgNo) {
MVT ObjectVT = Op.getValue(ArgNo).getValueType(); MVT ObjectVT = Ins[ArgNo].VT;
unsigned ObjSize = ObjectVT.getSizeInBits()/8; unsigned ObjSize = ObjectVT.getSizeInBits()/8;
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
cast<ARG_FLAGSSDNode>(Op.getOperand(ArgNo+3))->getArgFlags();
if (Flags.isByVal()) { if (Flags.isByVal()) {
// ObjSize is the true size, ArgSize rounded up to multiple of regs. // ObjSize is the true size, ArgSize rounded up to multiple of regs.
@ -1822,15 +1824,13 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS_Darwin(SDValue Op,
SmallVector<SDValue, 8> MemOps; SmallVector<SDValue, 8> MemOps;
unsigned nAltivecParamsAtEnd = 0; unsigned nAltivecParamsAtEnd = 0;
for (unsigned ArgNo = 0, e = Op.getNode()->getNumValues() - 1; for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
ArgNo != e; ++ArgNo) {
SDValue ArgVal; SDValue ArgVal;
bool needsLoad = false; bool needsLoad = false;
MVT ObjectVT = Op.getValue(ArgNo).getValueType(); MVT ObjectVT = Ins[ArgNo].VT;
unsigned ObjSize = ObjectVT.getSizeInBits()/8; unsigned ObjSize = ObjectVT.getSizeInBits()/8;
unsigned ArgSize = ObjSize; unsigned ArgSize = ObjSize;
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
cast<ARG_FLAGSSDNode>(Op.getOperand(ArgNo+3))->getArgFlags();
unsigned CurArgOffset = ArgOffset; unsigned CurArgOffset = ArgOffset;
@ -1839,13 +1839,13 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS_Darwin(SDValue Op,
ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
if (isVarArg || isPPC64) { if (isVarArg || isPPC64) {
MinReservedArea = ((MinReservedArea+15)/16)*16; MinReservedArea = ((MinReservedArea+15)/16)*16;
MinReservedArea += CalculateStackSlotSize(Op.getValue(ArgNo), MinReservedArea += CalculateStackSlotSize(ObjectVT,
Flags, Flags,
PtrByteSize); PtrByteSize);
} else nAltivecParamsAtEnd++; } else nAltivecParamsAtEnd++;
} else } else
// Calculate min reserved area. // Calculate min reserved area.
MinReservedArea += CalculateStackSlotSize(Op.getValue(ArgNo), MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT,
Flags, Flags,
PtrByteSize); PtrByteSize);
@ -1863,11 +1863,11 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS_Darwin(SDValue Op,
// The value of the object is its address. // The value of the object is its address.
int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset); int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT); SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
ArgValues.push_back(FIN); InVals.push_back(FIN);
if (ObjSize==1 || ObjSize==2) { if (ObjSize==1 || ObjSize==2) {
if (GPR_idx != Num_GPR_Regs) { if (GPR_idx != Num_GPR_Regs) {
unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, PtrVT); SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
NULL, 0, ObjSize==1 ? MVT::i8 : MVT::i16 ); NULL, 0, ObjSize==1 ? MVT::i8 : MVT::i16 );
MemOps.push_back(Store); MemOps.push_back(Store);
@ -1886,7 +1886,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS_Darwin(SDValue Op,
unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset); int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT); SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, PtrVT); SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0); SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0);
MemOps.push_back(Store); MemOps.push_back(Store);
++GPR_idx; ++GPR_idx;
@ -1905,7 +1905,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS_Darwin(SDValue Op,
if (!isPPC64) { if (!isPPC64) {
if (GPR_idx != Num_GPR_Regs) { if (GPR_idx != Num_GPR_Regs) {
unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
ArgVal = DAG.getCopyFromReg(Root, dl, VReg, MVT::i32); ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
++GPR_idx; ++GPR_idx;
} else { } else {
needsLoad = true; needsLoad = true;
@ -1919,7 +1919,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS_Darwin(SDValue Op,
case MVT::i64: // PPC64 case MVT::i64: // PPC64
if (GPR_idx != Num_GPR_Regs) { if (GPR_idx != Num_GPR_Regs) {
unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
ArgVal = DAG.getCopyFromReg(Root, dl, VReg, MVT::i64); ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
if (ObjectVT == MVT::i32) { if (ObjectVT == MVT::i32) {
// PPC64 passes i8, i16, and i32 values in i64 registers. Promote // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
@ -1960,7 +1960,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS_Darwin(SDValue Op,
else else
VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
ArgVal = DAG.getCopyFromReg(Root, dl, VReg, ObjectVT); ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
++FPR_idx; ++FPR_idx;
} else { } else {
needsLoad = true; needsLoad = true;
@ -1977,7 +1977,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS_Darwin(SDValue Op,
// except in varargs functions. // except in varargs functions.
if (VR_idx != Num_VR_Regs) { if (VR_idx != Num_VR_Regs) {
unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
ArgVal = DAG.getCopyFromReg(Root, dl, VReg, ObjectVT); ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
if (isVarArg) { if (isVarArg) {
while ((ArgOffset % 16) != 0) { while ((ArgOffset % 16) != 0) {
ArgOffset += PtrByteSize; ArgOffset += PtrByteSize;
@ -2011,10 +2011,10 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS_Darwin(SDValue Op,
CurArgOffset + (ArgSize - ObjSize), CurArgOffset + (ArgSize - ObjSize),
isImmutable); isImmutable);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT); SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
ArgVal = DAG.getLoad(ObjectVT, dl, Root, FIN, NULL, 0); ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, NULL, 0);
} }
ArgValues.push_back(ArgVal); InVals.push_back(ArgVal);
} }
// Set the size that is at least reserved in caller of this function. Tail // Set the size that is at least reserved in caller of this function. Tail
@ -2056,7 +2056,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS_Darwin(SDValue Op,
else else
VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, PtrVT); SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0); SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0);
MemOps.push_back(Store); MemOps.push_back(Store);
// Increment the address by four for the next argument to store // Increment the address by four for the next argument to store
@ -2066,14 +2066,10 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS_Darwin(SDValue Op,
} }
if (!MemOps.empty()) if (!MemOps.empty())
Root = DAG.getNode(ISD::TokenFactor, dl, Chain = DAG.getNode(ISD::TokenFactor, dl,
MVT::Other, &MemOps[0], MemOps.size()); MVT::Other, &MemOps[0], MemOps.size());
ArgValues.push_back(Root); return Chain;
// Return the new list of results.
return DAG.getNode(ISD::MERGE_VALUES, dl, Op.getNode()->getVTList(),
&ArgValues[0], ArgValues.size());
} }
/// CalculateParameterAndLinkageAreaSize - Get the size of the paramter plus /// CalculateParameterAndLinkageAreaSize - Get the size of the paramter plus
@ -2083,13 +2079,14 @@ CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG,
bool isPPC64, bool isPPC64,
bool isVarArg, bool isVarArg,
unsigned CC, unsigned CC,
CallSDNode *TheCall, const SmallVectorImpl<ISD::OutputArg>
&Outs,
unsigned &nAltivecParamsAtEnd) { unsigned &nAltivecParamsAtEnd) {
// Count how many bytes are to be pushed on the stack, including the linkage // Count how many bytes are to be pushed on the stack, including the linkage
// area, and parameter passing area. We start with 24/48 bytes, which is // area, and parameter passing area. We start with 24/48 bytes, which is
// prereserved space for [SP][CR][LR][3 x unused]. // prereserved space for [SP][CR][LR][3 x unused].
unsigned NumBytes = PPCFrameInfo::getLinkageSize(isPPC64, true); unsigned NumBytes = PPCFrameInfo::getLinkageSize(isPPC64, true);
unsigned NumOps = TheCall->getNumArgs(); unsigned NumOps = Outs.size();
unsigned PtrByteSize = isPPC64 ? 8 : 4; unsigned PtrByteSize = isPPC64 ? 8 : 4;
// Add up all the space actually used. // Add up all the space actually used.
@ -2100,8 +2097,8 @@ CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG,
// 16-byte aligned. // 16-byte aligned.
nAltivecParamsAtEnd = 0; nAltivecParamsAtEnd = 0;
for (unsigned i = 0; i != NumOps; ++i) { for (unsigned i = 0; i != NumOps; ++i) {
SDValue Arg = TheCall->getArg(i); SDValue Arg = Outs[i].Val;
ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i); ISD::ArgFlagsTy Flags = Outs[i].Flags;
MVT ArgVT = Arg.getValueType(); MVT ArgVT = Arg.getValueType();
// Varargs Altivec parameters are padded to a 16 byte boundary. // Varargs Altivec parameters are padded to a 16 byte boundary.
if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 || if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 ||
@ -2115,7 +2112,7 @@ CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG,
// Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
NumBytes = ((NumBytes+15)/16)*16; NumBytes = ((NumBytes+15)/16)*16;
} }
NumBytes += CalculateStackSlotSize(Arg, Flags, PtrByteSize); NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
} }
// Allow for Altivec parameters at the end, if needed. // Allow for Altivec parameters at the end, if needed.
@ -2160,40 +2157,37 @@ static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool IsTailCall,
return SPDiff; return SPDiff;
} }
/// IsEligibleForTailCallElimination - Check to see whether the next instruction /// IsEligibleForTailCallOptimization - Check whether the call is eligible
/// following the call is a return. A function is eligible if caller/callee /// for tail call optimization. Targets which want to do tail call
/// calling conventions match, currently only fastcc supports tail calls, and /// optimization should implement this function.
/// the function CALL is immediatly followed by a RET.
bool bool
PPCTargetLowering::IsEligibleForTailCallOptimization(CallSDNode *TheCall, PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
SDValue Ret, unsigned CalleeCC,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const { SelectionDAG& DAG) const {
// Variable argument functions are not supported. // Variable argument functions are not supported.
if (!PerformTailCallOpt || TheCall->isVarArg()) if (isVarArg)
return false; return false;
if (CheckTailCallReturnConstraints(TheCall, Ret)) { MachineFunction &MF = DAG.getMachineFunction();
MachineFunction &MF = DAG.getMachineFunction(); unsigned CallerCC = MF.getFunction()->getCallingConv();
unsigned CallerCC = MF.getFunction()->getCallingConv(); if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
unsigned CalleeCC = TheCall->getCallingConv(); // Functions containing by val parameters are not supported.
if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { for (unsigned i = 0; i != Ins.size(); i++) {
// Functions containing by val parameters are not supported. ISD::ArgFlagsTy Flags = Ins[i].Flags;
for (unsigned i = 0; i != TheCall->getNumArgs(); i++) { if (Flags.isByVal()) return false;
ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
if (Flags.isByVal()) return false;
}
SDValue Callee = TheCall->getCallee();
// Non PIC/GOT tail calls are supported.
if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
return true;
// At the moment we can only do local tail calls (in same module, hidden
// or protected) if we are generating PIC.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
return G->getGlobal()->hasHiddenVisibility()
|| G->getGlobal()->hasProtectedVisibility();
} }
// Non PIC/GOT tail calls are supported.
if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
return true;
// At the moment we can only do local tail calls (in same module, hidden
// or protected) if we are generating PIC.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
return G->getGlobal()->hasHiddenVisibility()
|| G->getGlobal()->hasProtectedVisibility();
} }
return false; return false;
@ -2455,16 +2449,17 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
return CallOpc; return CallOpc;
} }
static SDValue LowerCallReturn(SDValue Op, SelectionDAG &DAG, TargetMachine &TM, SDValue
CallSDNode *TheCall, SDValue Chain, PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
SDValue InFlag) { unsigned CallConv, bool isVarArg,
bool isVarArg = TheCall->isVarArg(); const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl = TheCall->getDebugLoc(); DebugLoc dl, SelectionDAG &DAG,
SmallVector<SDValue, 16> ResultVals; SmallVectorImpl<SDValue> &InVals) {
SmallVector<CCValAssign, 16> RVLocs; SmallVector<CCValAssign, 16> RVLocs;
unsigned CallerCC = DAG.getMachineFunction().getFunction()->getCallingConv(); CCState CCRetInfo(CallConv, isVarArg, getTargetMachine(),
CCState CCRetInfo(CallerCC, isVarArg, TM, RVLocs, *DAG.getContext()); RVLocs, *DAG.getContext());
CCRetInfo.AnalyzeCallResult(TheCall, RetCC_PPC); CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC);
// Copy all of the result registers out of their specified physreg. // Copy all of the result registers out of their specified physreg.
for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
@ -2473,53 +2468,61 @@ static SDValue LowerCallReturn(SDValue Op, SelectionDAG &DAG, TargetMachine &TM,
assert(VA.isRegLoc() && "Can only return in registers!"); assert(VA.isRegLoc() && "Can only return in registers!");
Chain = DAG.getCopyFromReg(Chain, dl, Chain = DAG.getCopyFromReg(Chain, dl,
VA.getLocReg(), VT, InFlag).getValue(1); VA.getLocReg(), VT, InFlag).getValue(1);
ResultVals.push_back(Chain.getValue(0)); InVals.push_back(Chain.getValue(0));
InFlag = Chain.getValue(2); InFlag = Chain.getValue(2);
} }
// If the function returns void, just return the chain. return Chain;
if (RVLocs.empty())
return Chain;
// Otherwise, merge everything together with a MERGE_VALUES node.
ResultVals.push_back(Chain);
SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl, TheCall->getVTList(),
&ResultVals[0], ResultVals.size());
return Res.getValue(Op.getResNo());
} }
static SDValue
SDValue FinishCall(SelectionDAG &DAG, CallSDNode *TheCall, TargetMachine &TM, PPCTargetLowering::FinishCall(unsigned CallConv, DebugLoc dl, bool isTailCall,
SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, bool isVarArg,
SDValue Op, SDValue InFlag, SDValue Chain, SDValue &Callee, SelectionDAG &DAG,
int SPDiff, unsigned NumBytes) { SmallVector<std::pair<unsigned, SDValue>, 8>
unsigned CC = TheCall->getCallingConv(); &RegsToPass,
DebugLoc dl = TheCall->getDebugLoc(); SDValue InFlag, SDValue Chain,
bool isTailCall = TheCall->isTailCall() SDValue &Callee,
&& CC == CallingConv::Fast && PerformTailCallOpt; int SPDiff, unsigned NumBytes,
const SmallVectorImpl<ISD::InputArg> &Ins,
SmallVectorImpl<SDValue> &InVals) {
std::vector<MVT> NodeTys; std::vector<MVT> NodeTys;
SmallVector<SDValue, 8> Ops; SmallVector<SDValue, 8> Ops;
unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, dl, SPDiff, unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, dl, SPDiff,
isTailCall, RegsToPass, Ops, NodeTys, isTailCall, RegsToPass, Ops, NodeTys,
TM.getSubtarget<PPCSubtarget>().isSVR4ABI()); PPCSubTarget.isSVR4ABI());
// When performing tail call optimization the callee pops its arguments off // When performing tail call optimization the callee pops its arguments off
// the stack. Account for this here so these bytes can be pushed back on in // the stack. Account for this here so these bytes can be pushed back on in
// PPCRegisterInfo::eliminateCallFramePseudoInstr. // PPCRegisterInfo::eliminateCallFramePseudoInstr.
int BytesCalleePops = int BytesCalleePops =
(CC==CallingConv::Fast && PerformTailCallOpt) ? NumBytes : 0; (CallConv==CallingConv::Fast && PerformTailCallOpt) ? NumBytes : 0;
if (InFlag.getNode()) if (InFlag.getNode())
Ops.push_back(InFlag); Ops.push_back(InFlag);
// Emit tail call. // Emit tail call.
if (isTailCall) { if (isTailCall) {
assert(InFlag.getNode() && // If this is the first return lowered for this function, add the regs
"Flag must be set. Depend on flag being set in LowerRET"); // to the liveout set for the function.
Chain = DAG.getNode(PPCISD::TAILCALL, dl, if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
TheCall->getVTList(), &Ops[0], Ops.size()); SmallVector<CCValAssign, 16> RVLocs;
return SDValue(Chain.getNode(), Op.getResNo()); CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs,
*DAG.getContext());
CCInfo.AnalyzeCallResult(Ins, RetCC_PPC);
for (unsigned i = 0; i != RVLocs.size(); ++i)
DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
}
assert(((Callee.getOpcode() == ISD::Register &&
cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
Callee.getOpcode() == ISD::TargetExternalSymbol ||
Callee.getOpcode() == ISD::TargetGlobalAddress ||
isa<ConstantSDNode>(Callee)) &&
"Expecting an global address, external symbol, absolute value or register");
return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, &Ops[0], Ops.size());
} }
Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size());
@ -2528,27 +2531,49 @@ SDValue FinishCall(SelectionDAG &DAG, CallSDNode *TheCall, TargetMachine &TM,
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
DAG.getIntPtrConstant(BytesCalleePops, true), DAG.getIntPtrConstant(BytesCalleePops, true),
InFlag); InFlag);
if (TheCall->getValueType(0) != MVT::Other) if (!Ins.empty())
InFlag = Chain.getValue(1); InFlag = Chain.getValue(1);
return LowerCallReturn(Op, DAG, TM, TheCall, Chain, InFlag); return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
Ins, dl, DAG, InVals);
} }
SDValue PPCTargetLowering::LowerCALL_SVR4(SDValue Op, SelectionDAG &DAG, SDValue
const PPCSubtarget &Subtarget, PPCTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
TargetMachine &TM) { unsigned CallConv, bool isVarArg,
// See PPCTargetLowering::LowerFORMAL_ARGUMENTS_SVR4() for a description bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
if (PPCSubTarget.isSVR4ABI()) {
return LowerCall_SVR4(Chain, Callee, CallConv, isVarArg,
isTailCall, Outs, Ins,
dl, DAG, InVals);
} else {
return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg,
isTailCall, Outs, Ins,
dl, DAG, InVals);
}
}
SDValue
PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
unsigned CallConv, bool isVarArg,
bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
// See PPCTargetLowering::LowerFormalArguments_SVR4() for a description
// of the SVR4 ABI stack frame layout. // of the SVR4 ABI stack frame layout.
CallSDNode *TheCall = cast<CallSDNode>(Op.getNode());
SDValue Chain = TheCall->getChain(); assert((!isTailCall ||
bool isVarArg = TheCall->isVarArg(); (CallConv == CallingConv::Fast && PerformTailCallOpt)) &&
unsigned CC = TheCall->getCallingConv(); "IsEligibleForTailCallOptimization missed a case!");
assert((CC == CallingConv::C ||
CC == CallingConv::Fast) && "Unknown calling convention!"); assert((CallConv == CallingConv::C ||
bool isTailCall = TheCall->isTailCall() CallConv == CallingConv::Fast) && "Unknown calling convention!");
&& CC == CallingConv::Fast && PerformTailCallOpt;
SDValue Callee = TheCall->getCallee();
DebugLoc dl = TheCall->getDebugLoc();
MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
unsigned PtrByteSize = 4; unsigned PtrByteSize = 4;
@ -2560,7 +2585,7 @@ SDValue PPCTargetLowering::LowerCALL_SVR4(SDValue Op, SelectionDAG &DAG,
// and restoring the callers stack pointer in this functions epilog. This is // and restoring the callers stack pointer in this functions epilog. This is
// done because by tail calling the called function might overwrite the value // done because by tail calling the called function might overwrite the value
// in this function's (MF) stack pointer stack slot 0(SP). // in this function's (MF) stack pointer stack slot 0(SP).
if (PerformTailCallOpt && CC==CallingConv::Fast) if (PerformTailCallOpt && CallConv==CallingConv::Fast)
MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
// Count how many bytes are to be pushed on the stack, including the linkage // Count how many bytes are to be pushed on the stack, including the linkage
@ -2569,7 +2594,8 @@ SDValue PPCTargetLowering::LowerCALL_SVR4(SDValue Op, SelectionDAG &DAG,
// Assign locations to all of the outgoing arguments. // Assign locations to all of the outgoing arguments.
SmallVector<CCValAssign, 16> ArgLocs; SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext()); CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
ArgLocs, *DAG.getContext());
// Reserve space for the linkage area on the stack. // Reserve space for the linkage area on the stack.
CCInfo.AllocateStack(PPCFrameInfo::getLinkageSize(false, false), PtrByteSize); CCInfo.AllocateStack(PPCFrameInfo::getLinkageSize(false, false), PtrByteSize);
@ -2578,15 +2604,14 @@ SDValue PPCTargetLowering::LowerCALL_SVR4(SDValue Op, SelectionDAG &DAG,
// Handle fixed and variable vector arguments differently. // Handle fixed and variable vector arguments differently.
// Fixed vector arguments go into registers as long as registers are // Fixed vector arguments go into registers as long as registers are
// available. Variable vector arguments always go into memory. // available. Variable vector arguments always go into memory.
unsigned NumArgs = TheCall->getNumArgs(); unsigned NumArgs = Outs.size();
unsigned NumFixedArgs = TheCall->getNumFixedArgs();
for (unsigned i = 0; i != NumArgs; ++i) { for (unsigned i = 0; i != NumArgs; ++i) {
MVT ArgVT = TheCall->getArg(i).getValueType(); MVT ArgVT = Outs[i].Val.getValueType();
ISD::ArgFlagsTy ArgFlags = TheCall->getArgFlags(i); ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
bool Result; bool Result;
if (i < NumFixedArgs) { if (Outs[i].IsFixed) {
Result = CC_PPC_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, Result = CC_PPC_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
CCInfo); CCInfo);
} else { } else {
@ -2604,18 +2629,18 @@ SDValue PPCTargetLowering::LowerCALL_SVR4(SDValue Op, SelectionDAG &DAG,
} }
} else { } else {
// All arguments are treated the same. // All arguments are treated the same.
CCInfo.AnalyzeCallOperands(TheCall, CC_PPC_SVR4); CCInfo.AnalyzeCallOperands(Outs, CC_PPC_SVR4);
} }
// Assign locations to all of the outgoing aggregate by value arguments. // Assign locations to all of the outgoing aggregate by value arguments.
SmallVector<CCValAssign, 16> ByValArgLocs; SmallVector<CCValAssign, 16> ByValArgLocs;
CCState CCByValInfo(CC, isVarArg, getTargetMachine(), ByValArgLocs, CCState CCByValInfo(CallConv, isVarArg, getTargetMachine(), ByValArgLocs,
*DAG.getContext()); *DAG.getContext());
// Reserve stack space for the allocations in CCInfo. // Reserve stack space for the allocations in CCInfo.
CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
CCByValInfo.AnalyzeCallOperands(TheCall, CC_PPC_SVR4_ByVal); CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC_SVR4_ByVal);
// Size of the linkage area, parameter list area and the part of the local // Size of the linkage area, parameter list area and the part of the local
// space variable where copies of aggregates which are passed by value are // space variable where copies of aggregates which are passed by value are
@ -2651,8 +2676,8 @@ SDValue PPCTargetLowering::LowerCALL_SVR4(SDValue Op, SelectionDAG &DAG,
i != e; i != e;
++i) { ++i) {
CCValAssign &VA = ArgLocs[i]; CCValAssign &VA = ArgLocs[i];
SDValue Arg = TheCall->getArg(i); SDValue Arg = Outs[i].Val;
ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i); ISD::ArgFlagsTy Flags = Outs[i].Flags;
if (Flags.isByVal()) { if (Flags.isByVal()) {
// Argument is an aggregate which is passed by value, thus we need to // Argument is an aggregate which is passed by value, thus we need to
@ -2736,22 +2761,21 @@ SDValue PPCTargetLowering::LowerCALL_SVR4(SDValue Op, SelectionDAG &DAG,
false, TailCallArguments); false, TailCallArguments);
} }
return FinishCall(DAG, TheCall, TM, RegsToPass, Op, InFlag, Chain, Callee, return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG,
SPDiff, NumBytes); RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes,
Ins, InVals);
} }
SDValue PPCTargetLowering::LowerCALL_Darwin(SDValue Op, SelectionDAG &DAG, SDValue
const PPCSubtarget &Subtarget, PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
TargetMachine &TM) { unsigned CallConv, bool isVarArg,
CallSDNode *TheCall = cast<CallSDNode>(Op.getNode()); bool isTailCall,
SDValue Chain = TheCall->getChain(); const SmallVectorImpl<ISD::OutputArg> &Outs,
bool isVarArg = TheCall->isVarArg(); const SmallVectorImpl<ISD::InputArg> &Ins,
unsigned CC = TheCall->getCallingConv(); DebugLoc dl, SelectionDAG &DAG,
bool isTailCall = TheCall->isTailCall() SmallVectorImpl<SDValue> &InVals) {
&& CC == CallingConv::Fast && PerformTailCallOpt;
SDValue Callee = TheCall->getCallee(); unsigned NumOps = Outs.size();
unsigned NumOps = TheCall->getNumArgs();
DebugLoc dl = TheCall->getDebugLoc();
MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
bool isPPC64 = PtrVT == MVT::i64; bool isPPC64 = PtrVT == MVT::i64;
@ -2764,7 +2788,7 @@ SDValue PPCTargetLowering::LowerCALL_Darwin(SDValue Op, SelectionDAG &DAG,
// and restoring the callers stack pointer in this functions epilog. This is // and restoring the callers stack pointer in this functions epilog. This is
// done because by tail calling the called function might overwrite the value // done because by tail calling the called function might overwrite the value
// in this function's (MF) stack pointer stack slot 0(SP). // in this function's (MF) stack pointer stack slot 0(SP).
if (PerformTailCallOpt && CC==CallingConv::Fast) if (PerformTailCallOpt && CallConv==CallingConv::Fast)
MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
unsigned nAltivecParamsAtEnd = 0; unsigned nAltivecParamsAtEnd = 0;
@ -2773,13 +2797,19 @@ SDValue PPCTargetLowering::LowerCALL_Darwin(SDValue Op, SelectionDAG &DAG,
// area, and parameter passing area. We start with 24/48 bytes, which is // area, and parameter passing area. We start with 24/48 bytes, which is
// prereserved space for [SP][CR][LR][3 x unused]. // prereserved space for [SP][CR][LR][3 x unused].
unsigned NumBytes = unsigned NumBytes =
CalculateParameterAndLinkageAreaSize(DAG, isPPC64, isVarArg, CC, TheCall, CalculateParameterAndLinkageAreaSize(DAG, isPPC64, isVarArg, CallConv,
Outs,
nAltivecParamsAtEnd); nAltivecParamsAtEnd);
// Calculate by how many bytes the stack has to be adjusted in case of tail // Calculate by how many bytes the stack has to be adjusted in case of tail
// call optimization. // call optimization.
int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
// To protect arguments on the stack from being clobbered in a tail call,
// force all the loads to happen before doing any other lowering.
if (isTailCall)
Chain = DAG.getStackArgumentTokenFactor(Chain);
// Adjust the stack pointer for the new arguments... // Adjust the stack pointer for the new arguments...
// These operations are automatically eliminated by the prolog/epilog pass // These operations are automatically eliminated by the prolog/epilog pass
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
@ -2815,7 +2845,7 @@ SDValue PPCTargetLowering::LowerCALL_Darwin(SDValue Op, SelectionDAG &DAG,
PPC::X3, PPC::X4, PPC::X5, PPC::X6, PPC::X3, PPC::X4, PPC::X5, PPC::X6,
PPC::X7, PPC::X8, PPC::X9, PPC::X10, PPC::X7, PPC::X8, PPC::X9, PPC::X10,
}; };
static const unsigned *FPR = GetFPR(Subtarget); static const unsigned *FPR = GetFPR(PPCSubTarget);
static const unsigned VR[] = { static const unsigned VR[] = {
PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
@ -2833,8 +2863,8 @@ SDValue PPCTargetLowering::LowerCALL_Darwin(SDValue Op, SelectionDAG &DAG,
SmallVector<SDValue, 8> MemOpChains; SmallVector<SDValue, 8> MemOpChains;
for (unsigned i = 0; i != NumOps; ++i) { for (unsigned i = 0; i != NumOps; ++i) {
bool inMem = false; bool inMem = false;
SDValue Arg = TheCall->getArg(i); SDValue Arg = Outs[i].Val;
ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i); ISD::ArgFlagsTy Flags = Outs[i].Flags;
// PtrOff will be used to store the current argument to the stack if a // PtrOff will be used to store the current argument to the stack if a
// register cannot be found for it. // register cannot be found for it.
@ -3031,7 +3061,7 @@ SDValue PPCTargetLowering::LowerCALL_Darwin(SDValue Op, SelectionDAG &DAG,
ArgOffset = ((ArgOffset+15)/16)*16; ArgOffset = ((ArgOffset+15)/16)*16;
ArgOffset += 12*16; ArgOffset += 12*16;
for (unsigned i = 0; i != NumOps; ++i) { for (unsigned i = 0; i != NumOps; ++i) {
SDValue Arg = TheCall->getArg(i); SDValue Arg = Outs[i].Val;
MVT ArgType = Arg.getValueType(); MVT ArgType = Arg.getValueType();
if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
@ -3065,18 +3095,21 @@ SDValue PPCTargetLowering::LowerCALL_Darwin(SDValue Op, SelectionDAG &DAG,
FPOp, true, TailCallArguments); FPOp, true, TailCallArguments);
} }
return FinishCall(DAG, TheCall, TM, RegsToPass, Op, InFlag, Chain, Callee, return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG,
SPDiff, NumBytes); RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes,
Ins, InVals);
} }
SDValue PPCTargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG, SDValue
TargetMachine &TM) { PPCTargetLowering::LowerReturn(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG) {
SmallVector<CCValAssign, 16> RVLocs; SmallVector<CCValAssign, 16> RVLocs;
unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); RVLocs, *DAG.getContext());
DebugLoc dl = Op.getDebugLoc(); CCInfo.AnalyzeReturn(Outs, RetCC_PPC);
CCState CCInfo(CC, isVarArg, TM, RVLocs, *DAG.getContext());
CCInfo.AnalyzeReturn(Op.getNode(), RetCC_PPC);
// If this is the first return lowered for this function, add the regs to the // If this is the first return lowered for this function, add the regs to the
// liveout set for the function. // liveout set for the function.
@ -3085,37 +3118,6 @@ SDValue PPCTargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG,
DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
} }
SDValue Chain = Op.getOperand(0);
Chain = GetPossiblePreceedingTailCall(Chain, PPCISD::TAILCALL);
if (Chain.getOpcode() == PPCISD::TAILCALL) {
SDValue TailCall = Chain;
SDValue TargetAddress = TailCall.getOperand(1);
SDValue StackAdjustment = TailCall.getOperand(2);
assert(((TargetAddress.getOpcode() == ISD::Register &&
cast<RegisterSDNode>(TargetAddress)->getReg() == PPC::CTR) ||
TargetAddress.getOpcode() == ISD::TargetExternalSymbol ||
TargetAddress.getOpcode() == ISD::TargetGlobalAddress ||
isa<ConstantSDNode>(TargetAddress)) &&
"Expecting an global address, external symbol, absolute value or register");
assert(StackAdjustment.getOpcode() == ISD::Constant &&
"Expecting a const value");
SmallVector<SDValue,8> Operands;
Operands.push_back(Chain.getOperand(0));
Operands.push_back(TargetAddress);
Operands.push_back(StackAdjustment);
// Copy registers used by the call. Last operand is a flag so it is not
// copied.
for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) {
Operands.push_back(Chain.getOperand(i));
}
return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, &Operands[0],
Operands.size());
}
SDValue Flag; SDValue Flag;
// Copy the result values into the output registers. // Copy the result values into the output registers.
@ -3123,7 +3125,7 @@ SDValue PPCTargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG,
CCValAssign &VA = RVLocs[i]; CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!"); assert(VA.isRegLoc() && "Can only return in registers!");
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
Op.getOperand(i*2+1), Flag); Outs[i].Val, Flag);
Flag = Chain.getValue(1); Flag = Chain.getValue(1);
} }
@ -4178,24 +4180,6 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
return LowerVAARG(Op, DAG, VarArgsFrameIndex, VarArgsStackOffset, return LowerVAARG(Op, DAG, VarArgsFrameIndex, VarArgsStackOffset,
VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget); VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget);
case ISD::FORMAL_ARGUMENTS:
if (PPCSubTarget.isSVR4ABI()) {
return LowerFORMAL_ARGUMENTS_SVR4(Op, DAG, VarArgsFrameIndex,
VarArgsStackOffset, VarArgsNumGPR,
VarArgsNumFPR, PPCSubTarget);
} else {
return LowerFORMAL_ARGUMENTS_Darwin(Op, DAG, VarArgsFrameIndex,
PPCSubTarget);
}
case ISD::CALL:
if (PPCSubTarget.isSVR4ABI()) {
return LowerCALL_SVR4(Op, DAG, PPCSubTarget, getTargetMachine());
} else {
return LowerCALL_Darwin(Op, DAG, PPCSubTarget, getTargetMachine());
}
case ISD::RET: return LowerRET(Op, DAG, getTargetMachine());
case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget); case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget);
case ISD::DYNAMIC_STACKALLOC: case ISD::DYNAMIC_STACKALLOC:
return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget); return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget);

View File

@ -160,8 +160,6 @@ namespace llvm {
/// indexed. This is used to implement atomic operations. /// indexed. This is used to implement atomic operations.
STCX, STCX,
/// TAILCALL - Indicates a tail call should be taken.
TAILCALL,
/// TC_RETURN - A tail call return. /// TC_RETURN - A tail call return.
/// operand #0 chain /// operand #0 chain
/// operand #1 callee (register or absolute) /// operand #1 callee (register or absolute)
@ -327,12 +325,12 @@ namespace llvm {
/// the offset of the target addressing mode. /// the offset of the target addressing mode.
virtual bool isLegalAddressImmediate(GlobalValue *GV) const; virtual bool isLegalAddressImmediate(GlobalValue *GV) const;
/// IsEligibleForTailCallOptimization - Check whether the call is eligible virtual bool
/// for tail call optimization. Targets which want to do tail call IsEligibleForTailCallOptimization(SDValue Callee,
/// optimization should implement this function. unsigned CalleeCC,
virtual bool IsEligibleForTailCallOptimization(CallSDNode *TheCall, bool isVarArg,
SDValue Ret, const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG &DAG) const; SelectionDAG& DAG) const;
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
@ -370,20 +368,6 @@ namespace llvm {
SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG, int VarArgsFrameIndex, SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG, int VarArgsFrameIndex,
int VarArgsStackOffset, unsigned VarArgsNumGPR, int VarArgsStackOffset, unsigned VarArgsNumGPR,
unsigned VarArgsNumFPR, const PPCSubtarget &Subtarget); unsigned VarArgsNumFPR, const PPCSubtarget &Subtarget);
SDValue LowerFORMAL_ARGUMENTS_SVR4(SDValue Op, SelectionDAG &DAG,
int &VarArgsFrameIndex,
int &VarArgsStackOffset,
unsigned &VarArgsNumGPR,
unsigned &VarArgsNumFPR,
const PPCSubtarget &Subtarget);
SDValue LowerFORMAL_ARGUMENTS_Darwin(SDValue Op, SelectionDAG &DAG,
int &VarArgsFrameIndex,
const PPCSubtarget &Subtarget);
SDValue LowerCALL_Darwin(SDValue Op, SelectionDAG &DAG,
const PPCSubtarget &Subtarget, TargetMachine &TM);
SDValue LowerCALL_SVR4(SDValue Op, SelectionDAG &DAG,
const PPCSubtarget &Subtarget, TargetMachine &TM);
SDValue LowerRET(SDValue Op, SelectionDAG &DAG, TargetMachine &TM);
SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG,
const PPCSubtarget &Subtarget); const PPCSubtarget &Subtarget);
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG,
@ -400,6 +384,71 @@ namespace llvm {
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG); SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG);
SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG); SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG);
SDValue LowerMUL(SDValue Op, SelectionDAG &DAG); SDValue LowerMUL(SDValue Op, SelectionDAG &DAG);
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
SDValue FinishCall(unsigned CallConv, DebugLoc dl, bool isTailCall,
bool isVarArg,
SelectionDAG &DAG,
SmallVector<std::pair<unsigned, SDValue>, 8>
&RegsToPass,
SDValue InFlag, SDValue Chain,
SDValue &Callee,
int SPDiff, unsigned NumBytes,
const SmallVectorImpl<ISD::InputArg> &Ins,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerFormalArguments(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerCall(SDValue Chain, SDValue Callee,
unsigned CallConv, bool isVarArg, bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerReturn(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG);
SDValue
LowerFormalArguments_Darwin(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
SDValue
LowerFormalArguments_SVR4(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
SDValue
LowerCall_Darwin(SDValue Chain, SDValue Callee,
unsigned CallConv, bool isVarArg, bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
SDValue
LowerCall_SVR4(SDValue Chain, SDValue Callee,
unsigned CallConv, bool isVarArg, bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
}; };
} }

View File

@ -125,9 +125,6 @@ def retflag : SDNode<"PPCISD::RET_FLAG", SDTNone,
def PPCtc_return : SDNode<"PPCISD::TC_RETURN", SDT_PPCTC_ret, def PPCtc_return : SDNode<"PPCISD::TC_RETURN", SDT_PPCTC_ret,
[SDNPHasChain, SDNPOptInFlag]>; [SDNPHasChain, SDNPOptInFlag]>;
def PPCtailcall : SDNode<"PPCISD::TAILCALL", SDT_PPCCall,
[SDNPHasChain, SDNPOutFlag, SDNPOptInFlag]>;
def PPCvcmp : SDNode<"PPCISD::VCMP" , SDT_PPCvcmp, []>; def PPCvcmp : SDNode<"PPCISD::VCMP" , SDT_PPCvcmp, []>;
def PPCvcmp_o : SDNode<"PPCISD::VCMPo", SDT_PPCvcmp, [SDNPOutFlag]>; def PPCvcmp_o : SDNode<"PPCISD::VCMPo", SDT_PPCvcmp, [SDNPOutFlag]>;

View File

@ -33,18 +33,21 @@ using namespace llvm;
#include "SparcGenCallingConv.inc" #include "SparcGenCallingConv.inc"
static SDValue LowerRET(SDValue Op, SelectionDAG &DAG) { SDValue
SparcTargetLowering::LowerReturn(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG) {
// CCValAssign - represent the assignment of the return value to locations. // CCValAssign - represent the assignment of the return value to locations.
SmallVector<CCValAssign, 16> RVLocs; SmallVector<CCValAssign, 16> RVLocs;
unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
DebugLoc dl = Op.getDebugLoc();
// CCState - Info about the registers and stack slot. // CCState - Info about the registers and stack slot.
CCState CCInfo(CC, isVarArg, DAG.getTarget(), RVLocs, *DAG.getContext()); CCState CCInfo(CallConv, isVarArg, DAG.getTarget(),
RVLocs, *DAG.getContext());
// Analize return values of ISD::RET // Analize return values.
CCInfo.AnalyzeReturn(Op.getNode(), RetCC_Sparc32); CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
// If this is the first return lowered for this function, add the regs to the // If this is the first return lowered for this function, add the regs to the
// liveout set for the function. // liveout set for the function.
@ -54,7 +57,6 @@ static SDValue LowerRET(SDValue Op, SelectionDAG &DAG) {
DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
} }
SDValue Chain = Op.getOperand(0);
SDValue Flag; SDValue Flag;
// Copy the result values into the output registers. // Copy the result values into the output registers.
@ -62,10 +64,8 @@ static SDValue LowerRET(SDValue Op, SelectionDAG &DAG) {
CCValAssign &VA = RVLocs[i]; CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!"); assert(VA.isRegLoc() && "Can only return in registers!");
// ISD::RET => ret chain, (regnum1,val1), ...
// So i*2+1 index only the regnums.
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
Op.getOperand(i*2+1), Flag); Outs[i].Val, Flag);
// Guarantee that all emitted copies are stuck together with flags. // Guarantee that all emitted copies are stuck together with flags.
Flag = Chain.getValue(1); Flag = Chain.getValue(1);
@ -76,23 +76,25 @@ static SDValue LowerRET(SDValue Op, SelectionDAG &DAG) {
return DAG.getNode(SPISD::RET_FLAG, dl, MVT::Other, Chain); return DAG.getNode(SPISD::RET_FLAG, dl, MVT::Other, Chain);
} }
/// LowerArguments - V8 uses a very simple ABI, where all values are passed in /// LowerFormalArguments - V8 uses a very simple ABI, where all values are
/// either one or two GPRs, including FP values. TODO: we should pass FP values /// passed in either one or two GPRs, including FP values. TODO: we should
/// in FP registers for fastcc functions. /// pass FP values in FP registers for fastcc functions.
SDValue SDValue
SparcTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SparcTargetLowering::LowerFormalArguments(SDValue Chain,
SelectionDAG &DAG) { unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg>
&Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
MachineFunction &MF = DAG.getMachineFunction(); MachineFunction &MF = DAG.getMachineFunction();
MachineRegisterInfo &RegInfo = MF.getRegInfo(); MachineRegisterInfo &RegInfo = MF.getRegInfo();
SDValue Root = Op.getOperand(0);
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
unsigned CC = MF.getFunction()->getCallingConv();
DebugLoc dl = Op.getDebugLoc();
// Assign locations to all of the incoming arguments. // Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs; SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext()); CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
CCInfo.AnalyzeFormalArguments(Op.getNode(), CC_Sparc32); ArgLocs, *DAG.getContext());
CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
static const unsigned ArgRegs[] = { static const unsigned ArgRegs[] = {
SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
@ -100,7 +102,6 @@ SparcTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op,
const unsigned *CurArgReg = ArgRegs, *ArgRegEnd = ArgRegs+6; const unsigned *CurArgReg = ArgRegs, *ArgRegEnd = ArgRegs+6;
unsigned ArgOffset = 68; unsigned ArgOffset = 68;
SmallVector<SDValue, 16> ArgValues;
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
SDValue ArgValue; SDValue ArgValue;
CCValAssign &VA = ArgLocs[i]; CCValAssign &VA = ArgLocs[i];
@ -113,23 +114,26 @@ SparcTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op,
case MVT::i8: case MVT::i8:
case MVT::i16: case MVT::i16:
case MVT::i32: case MVT::i32:
if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR if (!Ins[i].Used) { // Argument is dead.
if (CurArgReg < ArgRegEnd) ++CurArgReg;
InVals.push_back(DAG.getUNDEF(ObjectVT));
} else if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR
unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
MF.getRegInfo().addLiveIn(*CurArgReg++, VReg); MF.getRegInfo().addLiveIn(*CurArgReg++, VReg);
SDValue Arg = DAG.getCopyFromReg(Root, dl, VReg, MVT::i32); SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
if (ObjectVT != MVT::i32) { if (ObjectVT != MVT::i32) {
unsigned AssertOp = ISD::AssertSext; unsigned AssertOp = ISD::AssertSext;
Arg = DAG.getNode(AssertOp, dl, MVT::i32, Arg, Arg = DAG.getNode(AssertOp, dl, MVT::i32, Arg,
DAG.getValueType(ObjectVT)); DAG.getValueType(ObjectVT));
Arg = DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, Arg); Arg = DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, Arg);
} }
ArgValues.push_back(Arg); InVals.push_back(Arg);
} else { } else {
int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset); int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
SDValue Load; SDValue Load;
if (ObjectVT == MVT::i32) { if (ObjectVT == MVT::i32) {
Load = DAG.getLoad(MVT::i32, dl, Root, FIPtr, NULL, 0); Load = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, NULL, 0);
} else { } else {
ISD::LoadExtType LoadOp = ISD::SEXTLOAD; ISD::LoadExtType LoadOp = ISD::SEXTLOAD;
@ -137,56 +141,63 @@ SparcTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op,
unsigned Offset = 4-std::max(1U, ObjectVT.getSizeInBits()/8); unsigned Offset = 4-std::max(1U, ObjectVT.getSizeInBits()/8);
FIPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIPtr, FIPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIPtr,
DAG.getConstant(Offset, MVT::i32)); DAG.getConstant(Offset, MVT::i32));
Load = DAG.getExtLoad(LoadOp, dl, MVT::i32, Root, FIPtr, Load = DAG.getExtLoad(LoadOp, dl, MVT::i32, Chain, FIPtr,
NULL, 0, ObjectVT); NULL, 0, ObjectVT);
Load = DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, Load); Load = DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, Load);
} }
ArgValues.push_back(Load); InVals.push_back(Load);
} }
ArgOffset += 4; ArgOffset += 4;
break; break;
case MVT::f32: case MVT::f32:
if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR if (!Ins[i].Used) { // Argument is dead.
if (CurArgReg < ArgRegEnd) ++CurArgReg;
InVals.push_back(DAG.getUNDEF(ObjectVT));
} else if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR
// FP value is passed in an integer register. // FP value is passed in an integer register.
unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
MF.getRegInfo().addLiveIn(*CurArgReg++, VReg); MF.getRegInfo().addLiveIn(*CurArgReg++, VReg);
SDValue Arg = DAG.getCopyFromReg(Root, dl, VReg, MVT::i32); SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Arg); Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Arg);
ArgValues.push_back(Arg); InVals.push_back(Arg);
} else { } else {
int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset); int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
SDValue Load = DAG.getLoad(MVT::f32, dl, Root, FIPtr, NULL, 0); SDValue Load = DAG.getLoad(MVT::f32, dl, Chain, FIPtr, NULL, 0);
ArgValues.push_back(Load); InVals.push_back(Load);
} }
ArgOffset += 4; ArgOffset += 4;
break; break;
case MVT::i64: case MVT::i64:
case MVT::f64: case MVT::f64:
{ if (!Ins[i].Used) { // Argument is dead.
if (CurArgReg < ArgRegEnd) ++CurArgReg;
if (CurArgReg < ArgRegEnd) ++CurArgReg;
InVals.push_back(DAG.getUNDEF(ObjectVT));
} else {
SDValue HiVal; SDValue HiVal;
if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR
unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
MF.getRegInfo().addLiveIn(*CurArgReg++, VRegHi); MF.getRegInfo().addLiveIn(*CurArgReg++, VRegHi);
HiVal = DAG.getCopyFromReg(Root, dl, VRegHi, MVT::i32); HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
} else { } else {
int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset); int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
HiVal = DAG.getLoad(MVT::i32, dl, Root, FIPtr, NULL, 0); HiVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, NULL, 0);
} }
SDValue LoVal; SDValue LoVal;
if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR
unsigned VRegLo = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); unsigned VRegLo = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
MF.getRegInfo().addLiveIn(*CurArgReg++, VRegLo); MF.getRegInfo().addLiveIn(*CurArgReg++, VRegLo);
LoVal = DAG.getCopyFromReg(Root, dl, VRegLo, MVT::i32); LoVal = DAG.getCopyFromReg(Chain, dl, VRegLo, MVT::i32);
} else { } else {
int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset+4); int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset+4);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
LoVal = DAG.getLoad(MVT::i32, dl, Root, FIPtr, NULL, 0); LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, NULL, 0);
} }
// Compose the two halves together into an i64 unit. // Compose the two halves together into an i64 unit.
@ -197,7 +208,7 @@ SparcTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op,
if (ObjectVT == MVT::f64) if (ObjectVT == MVT::f64)
WholeValue = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, WholeValue); WholeValue = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, WholeValue);
ArgValues.push_back(WholeValue); InVals.push_back(WholeValue);
} }
ArgOffset += 8; ArgOffset += 8;
break; break;
@ -224,32 +235,29 @@ SparcTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op,
} }
if (!OutChains.empty()) { if (!OutChains.empty()) {
OutChains.push_back(Root); OutChains.push_back(Chain);
Root = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&OutChains[0], OutChains.size()); &OutChains[0], OutChains.size());
} }
} }
ArgValues.push_back(Root); return Chain;
// Return the new list of results.
return DAG.getNode(ISD::MERGE_VALUES, dl, Op.getNode()->getVTList(),
&ArgValues[0], ArgValues.size()).getValue(Op.getResNo());
} }
static SDValue LowerCALL(SDValue Op, SelectionDAG &DAG) { SDValue
CallSDNode *TheCall = cast<CallSDNode>(Op.getNode()); SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
unsigned CallingConv = TheCall->getCallingConv(); unsigned CallConv, bool isVarArg,
SDValue Chain = TheCall->getChain(); bool isTailCall,
SDValue Callee = TheCall->getCallee(); const SmallVectorImpl<ISD::OutputArg> &Outs,
bool isVarArg = TheCall->isVarArg(); const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl = TheCall->getDebugLoc(); DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
#if 0 #if 0
// Analyze operands of the call, assigning locations to each operand. // Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs; SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallingConv, isVarArg, DAG.getTarget(), ArgLocs); CCState CCInfo(CallConv, isVarArg, DAG.getTarget(), ArgLocs);
CCInfo.AnalyzeCallOperands(Op.getNode(), CC_Sparc32); CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
// Get the size of the outgoing arguments stack space requirement. // Get the size of the outgoing arguments stack space requirement.
unsigned ArgsSize = CCInfo.getNextStackOffset(); unsigned ArgsSize = CCInfo.getNextStackOffset();
@ -259,8 +267,8 @@ static SDValue LowerCALL(SDValue Op, SelectionDAG &DAG) {
// Count the size of the outgoing arguments. // Count the size of the outgoing arguments.
unsigned ArgsSize = 0; unsigned ArgsSize = 0;
for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; ++i) { for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
switch (TheCall->getArg(i).getValueType().getSimpleVT()) { switch (Outs[i].Val.getValueType().getSimpleVT()) {
default: llvm_unreachable("Unknown value type!"); default: llvm_unreachable("Unknown value type!");
case MVT::i1: case MVT::i1:
case MVT::i8: case MVT::i8:
@ -293,9 +301,7 @@ static SDValue LowerCALL(SDValue Op, SelectionDAG &DAG) {
// Walk the register/memloc assignments, inserting copies/loads. // Walk the register/memloc assignments, inserting copies/loads.
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i]; CCValAssign &VA = ArgLocs[i];
SDValue Arg = Outs[i].Val;
// Arguments start after the 5 first operands of ISD::CALL
SDValue Arg = TheCall->getArg(i);
// Promote the value if needed. // Promote the value if needed.
switch (VA.getLocInfo()) { switch (VA.getLocInfo()) {
@ -335,8 +341,8 @@ static SDValue LowerCALL(SDValue Op, SelectionDAG &DAG) {
}; };
unsigned ArgOffset = 68; unsigned ArgOffset = 68;
for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; ++i) { for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
SDValue Val = TheCall->getArg(i); SDValue Val = Outs[i].Val;
MVT ObjectVT = Val.getValueType(); MVT ObjectVT = Val.getValueType();
SDValue ValToStore(0, 0); SDValue ValToStore(0, 0);
unsigned ObjSize; unsigned ObjSize;
@ -469,11 +475,10 @@ static SDValue LowerCALL(SDValue Op, SelectionDAG &DAG) {
// Assign locations to each value returned by this call. // Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs; SmallVector<CCValAssign, 16> RVLocs;
CCState RVInfo(CallingConv, isVarArg, DAG.getTarget(), CCState RVInfo(CallConv, isVarArg, DAG.getTarget(),
RVLocs, *DAG.getContext()); RVLocs, *DAG.getContext());
RVInfo.AnalyzeCallResult(TheCall, RetCC_Sparc32); RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
SmallVector<SDValue, 8> ResultVals;
// Copy all of the result registers out of their specified physreg. // Copy all of the result registers out of their specified physreg.
for (unsigned i = 0; i != RVLocs.size(); ++i) { for (unsigned i = 0; i != RVLocs.size(); ++i) {
@ -486,15 +491,10 @@ static SDValue LowerCALL(SDValue Op, SelectionDAG &DAG) {
Chain = DAG.getCopyFromReg(Chain, dl, Reg, Chain = DAG.getCopyFromReg(Chain, dl, Reg,
RVLocs[i].getValVT(), InFlag).getValue(1); RVLocs[i].getValVT(), InFlag).getValue(1);
InFlag = Chain.getValue(2); InFlag = Chain.getValue(2);
ResultVals.push_back(Chain.getValue(0)); InVals.push_back(Chain.getValue(0));
} }
ResultVals.push_back(Chain); return Chain;
// Merge everything together with a MERGE_VALUES node.
return DAG.getNode(ISD::MERGE_VALUES, dl,
TheCall->getVTList(), &ResultVals[0],
ResultVals.size());
} }
@ -668,9 +668,6 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
setOperationAction(ISD::DBG_LABEL, MVT::Other, Expand); setOperationAction(ISD::DBG_LABEL, MVT::Other, Expand);
setOperationAction(ISD::EH_LABEL, MVT::Other, Expand); setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
// RET must be custom lowered, to meet ABI requirements
setOperationAction(ISD::RET , MVT::Other, Custom);
// VASTART needs to be custom lowered to use the VarArgsFrameIndex. // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
setOperationAction(ISD::VASTART , MVT::Other, Custom); setOperationAction(ISD::VASTART , MVT::Other, Custom);
// VAARG needs to be lowered to not do unaligned accesses for doubles. // VAARG needs to be lowered to not do unaligned accesses for doubles.
@ -948,9 +945,6 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) {
case ISD::VASTART: return LowerVASTART(Op, DAG, *this); case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
case ISD::VAARG: return LowerVAARG(Op, DAG); case ISD::VAARG: return LowerVAARG(Op, DAG);
case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
case ISD::CALL: return LowerCALL(Op, DAG);
case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG);
case ISD::RET: return LowerRET(Op, DAG);
} }
} }

View File

@ -44,7 +44,6 @@ namespace llvm {
public: public:
SparcTargetLowering(TargetMachine &TM); SparcTargetLowering(TargetMachine &TM);
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG); virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG);
SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG);
int getVarArgsFrameOffset() const { return VarArgsFrameOffset; } int getVarArgsFrameOffset() const { return VarArgsFrameOffset; }
@ -74,6 +73,29 @@ namespace llvm {
/// getFunctionAlignment - Return the Log2 alignment of this function. /// getFunctionAlignment - Return the Log2 alignment of this function.
virtual unsigned getFunctionAlignment(const Function *F) const; virtual unsigned getFunctionAlignment(const Function *F) const;
virtual SDValue
LowerFormalArguments(SDValue Chain,
unsigned CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerCall(SDValue Chain, SDValue Callee,
unsigned CallConv, bool isVarArg,
bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerReturn(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG);
}; };
} // end namespace llvm } // end namespace llvm

View File

@ -754,8 +754,6 @@ def : Pat<(call tglobaladdr:$dst),
def : Pat<(call texternalsym:$dst), def : Pat<(call texternalsym:$dst),
(CALL texternalsym:$dst)>; (CALL texternalsym:$dst)>;
def : Pat<(ret), (RETL)>;
// Map integer extload's to zextloads. // Map integer extload's to zextloads.
def : Pat<(i32 (extloadi1 ADDRrr:$src)), (LDUBrr ADDRrr:$src)>; def : Pat<(i32 (extloadi1 ADDRrr:$src)), (LDUBrr ADDRrr:$src)>;
def : Pat<(i32 (extloadi1 ADDRri:$src)), (LDUBri ADDRri:$src)>; def : Pat<(i32 (extloadi1 ADDRri:$src)), (LDUBri ADDRri:$src)>;

View File

@ -82,8 +82,6 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) :
setSchedulingPreference(SchedulingForLatency); setSchedulingPreference(SchedulingForLatency);
setBooleanContents(ZeroOrOneBooleanContent); setBooleanContents(ZeroOrOneBooleanContent);
setOperationAction(ISD::RET, MVT::Other, Custom);
setOperationAction(ISD::BR_JT, MVT::Other, Expand); setOperationAction(ISD::BR_JT, MVT::Other, Expand);
setOperationAction(ISD::BRCOND, MVT::Other, Expand); setOperationAction(ISD::BRCOND, MVT::Other, Expand);
setOperationAction(ISD::BR_CC, MVT::i32, Custom); setOperationAction(ISD::BR_CC, MVT::i32, Custom);
@ -155,9 +153,6 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) :
SDValue SystemZTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) { SDValue SystemZTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
switch (Op.getOpcode()) { switch (Op.getOpcode()) {
case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG);
case ISD::RET: return LowerRET(Op, DAG);
case ISD::CALL: return LowerCALL(Op, DAG);
case ISD::BR_CC: return LowerBR_CC(Op, DAG); case ISD::BR_CC: return LowerBR_CC(Op, DAG);
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
@ -175,27 +170,41 @@ SDValue SystemZTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
#include "SystemZGenCallingConv.inc" #include "SystemZGenCallingConv.inc"
SDValue SystemZTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SDValue
SelectionDAG &DAG) { SystemZTargetLowering::LowerFormalArguments(SDValue Chain,
unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); unsigned CallConv,
switch (CC) { bool isVarArg,
const SmallVectorImpl<ISD::InputArg>
&Ins,
DebugLoc dl,
SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
switch (CallConv) {
default: default:
llvm_unreachable("Unsupported calling convention"); llvm_unreachable("Unsupported calling convention");
case CallingConv::C: case CallingConv::C:
case CallingConv::Fast: case CallingConv::Fast:
return LowerCCCArguments(Op, DAG); return LowerCCCArguments(Chain, CallConv, isVarArg, Ins, dl, DAG, InVals);
} }
} }
SDValue SystemZTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) { SDValue
CallSDNode *TheCall = cast<CallSDNode>(Op.getNode()); SystemZTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
unsigned CallingConv = TheCall->getCallingConv(); unsigned CallConv, bool isVarArg,
switch (CallingConv) { bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
switch (CallConv) {
default: default:
llvm_unreachable("Unsupported calling convention"); llvm_unreachable("Unsupported calling convention");
case CallingConv::Fast: case CallingConv::Fast:
case CallingConv::C: case CallingConv::C:
return LowerCCCCallTo(Op, DAG, CallingConv); return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
Outs, Ins, dl, DAG, InVals);
} }
} }
@ -203,25 +212,29 @@ SDValue SystemZTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
/// generate load operations for arguments places on the stack. /// generate load operations for arguments places on the stack.
// FIXME: struct return stuff // FIXME: struct return stuff
// FIXME: varargs // FIXME: varargs
SDValue SystemZTargetLowering::LowerCCCArguments(SDValue Op, SDValue
SelectionDAG &DAG) { SystemZTargetLowering::LowerCCCArguments(SDValue Chain,
unsigned CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg>
&Ins,
DebugLoc dl,
SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
MachineFunction &MF = DAG.getMachineFunction(); MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo(); MachineFrameInfo *MFI = MF.getFrameInfo();
MachineRegisterInfo &RegInfo = MF.getRegInfo(); MachineRegisterInfo &RegInfo = MF.getRegInfo();
SDValue Root = Op.getOperand(0);
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
unsigned CC = MF.getFunction()->getCallingConv();
DebugLoc dl = Op.getDebugLoc();
// Assign locations to all of the incoming arguments. // Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs; SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext()); CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
CCInfo.AnalyzeFormalArguments(Op.getNode(), CC_SystemZ); ArgLocs, *DAG.getContext());
CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ);
if (isVarArg) if (isVarArg)
llvm_report_error("Varargs not supported yet"); llvm_report_error("Varargs not supported yet");
SmallVector<SDValue, 16> ArgValues;
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
SDValue ArgValue; SDValue ArgValue;
CCValAssign &VA = ArgLocs[i]; CCValAssign &VA = ArgLocs[i];
@ -232,7 +245,7 @@ SDValue SystemZTargetLowering::LowerCCCArguments(SDValue Op,
switch (LocVT.getSimpleVT()) { switch (LocVT.getSimpleVT()) {
default: default:
#ifndef NDEBUG #ifndef NDEBUG
cerr << "LowerFORMAL_ARGUMENTS Unhandled argument type: " cerr << "LowerFormalArguments Unhandled argument type: "
<< LocVT.getSimpleVT() << LocVT.getSimpleVT()
<< "\n"; << "\n";
#endif #endif
@ -250,7 +263,7 @@ SDValue SystemZTargetLowering::LowerCCCArguments(SDValue Op,
unsigned VReg = RegInfo.createVirtualRegister(RC); unsigned VReg = RegInfo.createVirtualRegister(RC);
RegInfo.addLiveIn(VA.getLocReg(), VReg); RegInfo.addLiveIn(VA.getLocReg(), VReg);
ArgValue = DAG.getCopyFromReg(Root, dl, VReg, LocVT); ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
} else { } else {
// Sanity check // Sanity check
assert(VA.isMemLoc()); assert(VA.isMemLoc());
@ -263,7 +276,7 @@ SDValue SystemZTargetLowering::LowerCCCArguments(SDValue Op,
// Create the SelectionDAG nodes corresponding to a load // Create the SelectionDAG nodes corresponding to a load
// from this parameter // from this parameter
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
ArgValue = DAG.getLoad(LocVT, dl, Root, FIN, ArgValue = DAG.getLoad(LocVT, dl, Chain, FIN,
PseudoSourceValue::getFixedStack(FI), 0); PseudoSourceValue::getFixedStack(FI), 0);
} }
@ -280,26 +293,25 @@ SDValue SystemZTargetLowering::LowerCCCArguments(SDValue Op,
if (VA.getLocInfo() != CCValAssign::Full) if (VA.getLocInfo() != CCValAssign::Full)
ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
ArgValues.push_back(ArgValue); InVals.push_back(ArgValue);
} }
ArgValues.push_back(Root); return Chain;
// Return the new list of results.
return DAG.getNode(ISD::MERGE_VALUES, dl, Op.getNode()->getVTList(),
&ArgValues[0], ArgValues.size()).getValue(Op.getResNo());
} }
/// LowerCCCCallTo - functions arguments are copied from virtual regs to /// LowerCCCCallTo - functions arguments are copied from virtual regs to
/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted. /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
/// TODO: sret. /// TODO: sret.
SDValue SystemZTargetLowering::LowerCCCCallTo(SDValue Op, SelectionDAG &DAG, SDValue
unsigned CC) { SystemZTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
CallSDNode *TheCall = cast<CallSDNode>(Op.getNode()); unsigned CallConv, bool isVarArg,
SDValue Chain = TheCall->getChain(); bool isTailCall,
SDValue Callee = TheCall->getCallee(); const SmallVectorImpl<ISD::OutputArg>
bool isVarArg = TheCall->isVarArg(); &Outs,
DebugLoc dl = Op.getDebugLoc(); const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
MachineFunction &MF = DAG.getMachineFunction(); MachineFunction &MF = DAG.getMachineFunction();
// Offset to first argument stack slot. // Offset to first argument stack slot.
@ -307,9 +319,10 @@ SDValue SystemZTargetLowering::LowerCCCCallTo(SDValue Op, SelectionDAG &DAG,
// Analyze operands of the call, assigning locations to each operand. // Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs; SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext()); CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
ArgLocs, *DAG.getContext());
CCInfo.AnalyzeCallOperands(TheCall, CC_SystemZ); CCInfo.AnalyzeCallOperands(Outs, CC_SystemZ);
// Get a count of how many bytes are to be pushed on the stack. // Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset(); unsigned NumBytes = CCInfo.getNextStackOffset();
@ -325,8 +338,7 @@ SDValue SystemZTargetLowering::LowerCCCCallTo(SDValue Op, SelectionDAG &DAG,
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i]; CCValAssign &VA = ArgLocs[i];
// Arguments start after the 5 first operands of ISD::CALL SDValue Arg = Outs[i].Val;
SDValue Arg = TheCall->getArg(i);
// Promote the value if needed. // Promote the value if needed.
switch (VA.getLocInfo()) { switch (VA.getLocInfo()) {
@ -418,30 +430,27 @@ SDValue SystemZTargetLowering::LowerCCCCallTo(SDValue Op, SelectionDAG &DAG,
// Handle result values, copying them out of physregs into vregs that we // Handle result values, copying them out of physregs into vregs that we
// return. // return.
return SDValue(LowerCallResult(Chain, InFlag, TheCall, CC, DAG), return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl,
Op.getResNo()); DAG, InVals);
} }
/// LowerCallResult - Lower the result values of an ISD::CALL into the /// LowerCallResult - Lower the result values of a call into the
/// appropriate copies out of appropriate physical registers. This assumes that /// appropriate copies out of appropriate physical registers.
/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call ///
/// being lowered. Returns a SDNode with the same number of values as the SDValue
/// ISD::CALL.
SDNode*
SystemZTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, SystemZTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
CallSDNode *TheCall, unsigned CallConv, bool isVarArg,
unsigned CallingConv, const SmallVectorImpl<ISD::InputArg>
SelectionDAG &DAG) { &Ins,
bool isVarArg = TheCall->isVarArg(); DebugLoc dl, SelectionDAG &DAG,
DebugLoc dl = TheCall->getDebugLoc(); SmallVectorImpl<SDValue> &InVals) {
// Assign locations to each value returned by this call. // Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs; SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs, CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs,
*DAG.getContext()); *DAG.getContext());
CCInfo.AnalyzeCallResult(TheCall, RetCC_SystemZ); CCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ);
SmallVector<SDValue, 8> ResultVals;
// Copy all of the result registers out of their specified physreg. // Copy all of the result registers out of their specified physreg.
for (unsigned i = 0; i != RVLocs.size(); ++i) { for (unsigned i = 0; i != RVLocs.size(); ++i) {
@ -465,29 +474,28 @@ SystemZTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
if (VA.getLocInfo() != CCValAssign::Full) if (VA.getLocInfo() != CCValAssign::Full)
RetValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), RetValue); RetValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), RetValue);
ResultVals.push_back(RetValue); InVals.push_back(RetValue);
} }
ResultVals.push_back(Chain); return Chain;
// Merge everything together with a MERGE_VALUES node.
return DAG.getNode(ISD::MERGE_VALUES, dl, TheCall->getVTList(),
&ResultVals[0], ResultVals.size()).getNode();
} }
SDValue SystemZTargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) { SDValue
SystemZTargetLowering::LowerReturn(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG) {
// CCValAssign - represent the assignment of the return value to a location // CCValAssign - represent the assignment of the return value to a location
SmallVector<CCValAssign, 16> RVLocs; SmallVector<CCValAssign, 16> RVLocs;
unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
DebugLoc dl = Op.getDebugLoc();
// CCState - Info about the registers and stack slot. // CCState - Info about the registers and stack slot.
CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs, *DAG.getContext()); CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
RVLocs, *DAG.getContext());
// Analize return values of ISD::RET // Analize return values.
CCInfo.AnalyzeReturn(Op.getNode(), RetCC_SystemZ); CCInfo.AnalyzeReturn(Outs, RetCC_SystemZ);
// If this is the first return lowered for this function, add the regs to the // If this is the first return lowered for this function, add the regs to the
// liveout set for the function. // liveout set for the function.
@ -497,14 +505,12 @@ SDValue SystemZTargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) {
DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
} }
// The chain is always operand #0
SDValue Chain = Op.getOperand(0);
SDValue Flag; SDValue Flag;
// Copy the result values into the output registers. // Copy the result values into the output registers.
for (unsigned i = 0; i != RVLocs.size(); ++i) { for (unsigned i = 0; i != RVLocs.size(); ++i) {
CCValAssign &VA = RVLocs[i]; CCValAssign &VA = RVLocs[i];
SDValue ResValue = Op.getOperand(i*2+1); SDValue ResValue = Outs[i].Val;
assert(VA.isRegLoc() && "Can only return in registers!"); assert(VA.isRegLoc() && "Can only return in registers!");
// If this is an 8/16/32-bit value, it is really should be passed promoted // If this is an 8/16/32-bit value, it is really should be passed promoted
@ -516,8 +522,6 @@ SDValue SystemZTargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) {
else if (VA.getLocInfo() == CCValAssign::AExt) else if (VA.getLocInfo() == CCValAssign::AExt)
ResValue = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ResValue); ResValue = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ResValue);
// ISD::RET => ret chain, (regnum1,val1), ...
// So i*2+1 index only the regnums
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ResValue, Flag); Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ResValue, Flag);
// Guarantee that all emitted copies are stuck together, // Guarantee that all emitted copies are stuck together,

View File

@ -28,7 +28,7 @@ namespace llvm {
/// Return with a flag operand. Operand 0 is the chain operand. /// Return with a flag operand. Operand 0 is the chain operand.
RET_FLAG, RET_FLAG,
/// CALL/TAILCALL - These operations represent an abstract call /// CALL - These operations represent an abstract call
/// instruction, which includes a bunch of information. /// instruction, which includes a bunch of information.
CALL, CALL,
@ -69,21 +69,12 @@ namespace llvm {
return 1; return 1;
} }
SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG);
SDValue LowerRET(SDValue Op, SelectionDAG &DAG);
SDValue LowerCALL(SDValue Op, SelectionDAG &DAG);
SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG); SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG);
SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG); SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG);
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG); SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG);
SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG); SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG);
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG); SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG);
SDValue LowerCCCArguments(SDValue Op, SelectionDAG &DAG);
SDValue LowerCCCCallTo(SDValue Op, SelectionDAG &DAG, unsigned CC);
SDNode* LowerCallResult(SDValue Chain, SDValue InFlag,
CallSDNode *TheCall,
unsigned CallingConv, SelectionDAG &DAG);
SDValue EmitCmp(SDValue LHS, SDValue RHS, SDValue EmitCmp(SDValue LHS, SDValue RHS,
ISD::CondCode CC, SDValue &SystemZCC, ISD::CondCode CC, SDValue &SystemZCC,
SelectionDAG &DAG); SelectionDAG &DAG);
@ -93,6 +84,48 @@ namespace llvm {
MachineBasicBlock *BB) const; MachineBasicBlock *BB) const;
private: private:
SDValue LowerCCCCallTo(SDValue Chain, SDValue Callee,
unsigned CallConv, bool isVarArg,
bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
SDValue LowerCCCArguments(SDValue Chain,
unsigned CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl,
SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerFormalArguments(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerCall(SDValue Chain, SDValue Callee,
unsigned CallConv, bool isVarArg, bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerReturn(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG);
const SystemZSubtarget &Subtarget; const SystemZSubtarget &Subtarget;
const SystemZTargetMachine &TM; const SystemZTargetMachine &TM;
const SystemZRegisterInfo *RegInfo; const SystemZRegisterInfo *RegInfo;

View File

@ -294,8 +294,6 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::SELECT , MVT::i64 , Custom); setOperationAction(ISD::SELECT , MVT::i64 , Custom);
setOperationAction(ISD::SETCC , MVT::i64 , Custom); setOperationAction(ISD::SETCC , MVT::i64 , Custom);
} }
// X86 ret instruction may pop stack.
setOperationAction(ISD::RET , MVT::Other, Custom);
setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
// Darwin ABI issue. // Darwin ABI issue.
@ -1060,16 +1058,16 @@ unsigned X86TargetLowering::getFunctionAlignment(const Function *F) const {
#include "X86GenCallingConv.inc" #include "X86GenCallingConv.inc"
/// LowerRET - Lower an ISD::RET node. SDValue
SDValue X86TargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) { X86TargetLowering::LowerReturn(SDValue Chain,
DebugLoc dl = Op.getDebugLoc(); unsigned CallConv, bool isVarArg,
assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args"); const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG) {
SmallVector<CCValAssign, 16> RVLocs; SmallVector<CCValAssign, 16> RVLocs;
unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); RVLocs, *DAG.getContext());
CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs, *DAG.getContext()); CCInfo.AnalyzeReturn(Outs, RetCC_X86);
CCInfo.AnalyzeReturn(Op.getNode(), RetCC_X86);
// If this is the first return lowered for this function, add the regs to the // If this is the first return lowered for this function, add the regs to the
// liveout set for the function. // liveout set for the function.
@ -1078,37 +1076,7 @@ SDValue X86TargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) {
if (RVLocs[i].isRegLoc()) if (RVLocs[i].isRegLoc())
DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
} }
SDValue Chain = Op.getOperand(0);
// Handle tail call return.
Chain = GetPossiblePreceedingTailCall(Chain, X86ISD::TAILCALL);
if (Chain.getOpcode() == X86ISD::TAILCALL) {
SDValue TailCall = Chain;
SDValue TargetAddress = TailCall.getOperand(1);
SDValue StackAdjustment = TailCall.getOperand(2);
assert(((TargetAddress.getOpcode() == ISD::Register &&
(cast<RegisterSDNode>(TargetAddress)->getReg() == X86::EAX ||
cast<RegisterSDNode>(TargetAddress)->getReg() == X86::R11)) ||
TargetAddress.getOpcode() == ISD::TargetExternalSymbol ||
TargetAddress.getOpcode() == ISD::TargetGlobalAddress) &&
"Expecting an global address, external symbol, or register");
assert(StackAdjustment.getOpcode() == ISD::Constant &&
"Expecting a const value");
SmallVector<SDValue,8> Operands;
Operands.push_back(Chain.getOperand(0));
Operands.push_back(TargetAddress);
Operands.push_back(StackAdjustment);
// Copy registers used by the call. Last operand is a flag so it is not
// copied.
for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) {
Operands.push_back(Chain.getOperand(i));
}
return DAG.getNode(X86ISD::TC_RETURN, dl, MVT::Other, &Operands[0],
Operands.size());
}
// Regular return.
SDValue Flag; SDValue Flag;
SmallVector<SDValue, 6> RetOps; SmallVector<SDValue, 6> RetOps;
@ -1120,7 +1088,7 @@ SDValue X86TargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) {
for (unsigned i = 0; i != RVLocs.size(); ++i) { for (unsigned i = 0; i != RVLocs.size(); ++i) {
CCValAssign &VA = RVLocs[i]; CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!"); assert(VA.isRegLoc() && "Can only return in registers!");
SDValue ValToCopy = Op.getOperand(i*2+1); SDValue ValToCopy = Outs[i].Val;
// Returns in ST0/ST1 are handled specially: these are pushed as operands to // Returns in ST0/ST1 are handled specially: these are pushed as operands to
// the RET instruction and handled by the FP Stackifier. // the RET instruction and handled by the FP Stackifier.
@ -1179,26 +1147,22 @@ SDValue X86TargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) {
MVT::Other, &RetOps[0], RetOps.size()); MVT::Other, &RetOps[0], RetOps.size());
} }
/// LowerCallResult - Lower the result values of a call into the
/// appropriate copies out of appropriate physical registers.
///
SDValue
X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
/// LowerCallResult - Lower the result values of an ISD::CALL into the
/// appropriate copies out of appropriate physical registers. This assumes that
/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call
/// being lowered. The returns a SDNode with the same number of values as the
/// ISD::CALL.
SDNode *X86TargetLowering::
LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall,
unsigned CallingConv, SelectionDAG &DAG) {
DebugLoc dl = TheCall->getDebugLoc();
// Assign locations to each value returned by this call. // Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs; SmallVector<CCValAssign, 16> RVLocs;
bool isVarArg = TheCall->isVarArg();
bool Is64Bit = Subtarget->is64Bit(); bool Is64Bit = Subtarget->is64Bit();
CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
RVLocs, *DAG.getContext()); RVLocs, *DAG.getContext());
CCInfo.AnalyzeCallResult(TheCall, RetCC_X86); CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
SmallVector<SDValue, 8> ResultVals;
// Copy all of the result registers out of their specified physreg. // Copy all of the result registers out of their specified physreg.
for (unsigned i = 0; i != RVLocs.size(); ++i) { for (unsigned i = 0; i != RVLocs.size(); ++i) {
@ -1207,7 +1171,7 @@ LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall,
// If this is x86-64, and we disabled SSE, we can't return FP values // If this is x86-64, and we disabled SSE, we can't return FP values
if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) && if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
((Is64Bit || TheCall->isInreg()) && !Subtarget->hasSSE1())) { ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
llvm_report_error("SSE register return with SSE disabled"); llvm_report_error("SSE register return with SSE disabled");
} }
@ -1250,13 +1214,10 @@ LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall,
DAG.getIntPtrConstant(1)); DAG.getIntPtrConstant(1));
} }
ResultVals.push_back(Val); InVals.push_back(Val);
} }
// Merge everything together with a MERGE_VALUES node. return Chain;
ResultVals.push_back(Chain);
return DAG.getNode(ISD::MERGE_VALUES, dl, TheCall->getVTList(),
&ResultVals[0], ResultVals.size()).getNode();
} }
@ -1270,24 +1231,23 @@ LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall,
// For info on fast calling convention see Fast Calling Convention (tail call) // For info on fast calling convention see Fast Calling Convention (tail call)
// implementation LowerX86_32FastCCCallTo. // implementation LowerX86_32FastCCCallTo.
/// CallIsStructReturn - Determines whether a CALL node uses struct return /// CallIsStructReturn - Determines whether a call uses struct return
/// semantics. /// semantics.
static bool CallIsStructReturn(CallSDNode *TheCall) { static bool CallIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
unsigned NumOps = TheCall->getNumArgs(); if (Outs.empty())
if (!NumOps)
return false; return false;
return TheCall->getArgFlags(0).isSRet(); return Outs[0].Flags.isSRet();
} }
/// ArgsAreStructReturn - Determines whether a function uses struct /// ArgsAreStructReturn - Determines whether a function uses struct
/// return semantics. /// return semantics.
static bool ArgsAreStructReturn(SDValue Op) { static bool
unsigned NumArgs = Op.getNode()->getNumValues() - 1; ArgsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
if (!NumArgs) if (Ins.empty())
return false; return false;
return cast<ARG_FLAGSSDNode>(Op.getOperand(3))->getArgFlags().isSRet(); return Ins[0].Flags.isSRet();
} }
/// IsCalleePop - Determines whether the callee is required to pop its /// IsCalleePop - Determines whether the callee is required to pop its
@ -1326,14 +1286,13 @@ CCAssignFn *X86TargetLowering::CCAssignFnForNode(unsigned CC) const {
return CC_X86_32_C; return CC_X86_32_C;
} }
/// NameDecorationForFORMAL_ARGUMENTS - Selects the appropriate decoration to /// NameDecorationForCallConv - Selects the appropriate decoration to
/// apply to a MachineFunction containing a given FORMAL_ARGUMENTS node. /// apply to a MachineFunction containing a given calling convention.
NameDecorationStyle NameDecorationStyle
X86TargetLowering::NameDecorationForFORMAL_ARGUMENTS(SDValue Op) { X86TargetLowering::NameDecorationForCallConv(unsigned CallConv) {
unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); if (CallConv == CallingConv::X86_FastCall)
if (CC == CallingConv::X86_FastCall)
return FastCall; return FastCall;
else if (CC == CallingConv::X86_StdCall) else if (CallConv == CallingConv::X86_StdCall)
return StdCall; return StdCall;
return None; return None;
} }
@ -1352,15 +1311,18 @@ CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
/*AlwaysInline=*/true, NULL, 0, NULL, 0); /*AlwaysInline=*/true, NULL, 0, NULL, 0);
} }
SDValue X86TargetLowering::LowerMemArgument(SDValue Op, SelectionDAG &DAG, SDValue
const CCValAssign &VA, X86TargetLowering::LowerMemArgument(SDValue Chain,
MachineFrameInfo *MFI, unsigned CallConv,
unsigned CC, const SmallVectorImpl<ISD::InputArg> &Ins,
SDValue Root, unsigned i) { DebugLoc dl, SelectionDAG &DAG,
const CCValAssign &VA,
MachineFrameInfo *MFI,
unsigned i) {
// Create the nodes corresponding to a load from this parameter slot. // Create the nodes corresponding to a load from this parameter slot.
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy Flags = Ins[i].Flags;
cast<ARG_FLAGSSDNode>(Op.getOperand(3 + i))->getArgFlags(); bool AlwaysUseMutable = (CallConv==CallingConv::Fast) && PerformTailCallOpt;
bool AlwaysUseMutable = (CC==CallingConv::Fast) && PerformTailCallOpt;
bool isImmutable = !AlwaysUseMutable && !Flags.isByVal(); bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
// FIXME: For now, all byval parameter objects are marked mutable. This can be // FIXME: For now, all byval parameter objects are marked mutable. This can be
@ -1372,15 +1334,21 @@ SDValue X86TargetLowering::LowerMemArgument(SDValue Op, SelectionDAG &DAG,
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
if (Flags.isByVal()) if (Flags.isByVal())
return FIN; return FIN;
return DAG.getLoad(VA.getValVT(), Op.getDebugLoc(), Root, FIN, return DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
PseudoSourceValue::getFixedStack(FI), 0); PseudoSourceValue::getFixedStack(FI), 0);
} }
SDValue SDValue
X86TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) { X86TargetLowering::LowerFormalArguments(SDValue Chain,
unsigned CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl,
SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
MachineFunction &MF = DAG.getMachineFunction(); MachineFunction &MF = DAG.getMachineFunction();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
DebugLoc dl = Op.getDebugLoc();
const Function* Fn = MF.getFunction(); const Function* Fn = MF.getFunction();
if (Fn->hasExternalLinkage() && if (Fn->hasExternalLinkage() &&
@ -1389,24 +1357,21 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
FuncInfo->setForceFramePointer(true); FuncInfo->setForceFramePointer(true);
// Decorate the function name. // Decorate the function name.
FuncInfo->setDecorationStyle(NameDecorationForFORMAL_ARGUMENTS(Op)); FuncInfo->setDecorationStyle(NameDecorationForCallConv(CallConv));
MachineFrameInfo *MFI = MF.getFrameInfo(); MachineFrameInfo *MFI = MF.getFrameInfo();
SDValue Root = Op.getOperand(0);
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
unsigned CC = MF.getFunction()->getCallingConv();
bool Is64Bit = Subtarget->is64Bit(); bool Is64Bit = Subtarget->is64Bit();
bool IsWin64 = Subtarget->isTargetWin64(); bool IsWin64 = Subtarget->isTargetWin64();
assert(!(isVarArg && CC == CallingConv::Fast) && assert(!(isVarArg && CallConv == CallingConv::Fast) &&
"Var args not supported with calling convention fastcc"); "Var args not supported with calling convention fastcc");
// Assign locations to all of the incoming arguments. // Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs; SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext()); CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
CCInfo.AnalyzeFormalArguments(Op.getNode(), CCAssignFnForNode(CC)); ArgLocs, *DAG.getContext());
CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForNode(CallConv));
SmallVector<SDValue, 8> ArgValues;
unsigned LastVal = ~0U; unsigned LastVal = ~0U;
SDValue ArgValue; SDValue ArgValue;
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
@ -1436,7 +1401,7 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
llvm_unreachable("Unknown argument type!"); llvm_unreachable("Unknown argument type!");
unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
ArgValue = DAG.getCopyFromReg(Root, dl, Reg, RegVT); ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
// If this is an 8 or 16-bit value, it is really passed promoted to 32 // If this is an 8 or 16-bit value, it is really passed promoted to 32
// bits. Insert an assert[sz]ext to capture this, then truncate to the // bits. Insert an assert[sz]ext to capture this, then truncate to the
@ -1461,14 +1426,14 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
} }
} else { } else {
assert(VA.isMemLoc()); assert(VA.isMemLoc());
ArgValue = LowerMemArgument(Op, DAG, VA, MFI, CC, Root, i); ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i);
} }
// If value is passed via pointer - do a load. // If value is passed via pointer - do a load.
if (VA.getLocInfo() == CCValAssign::Indirect) if (VA.getLocInfo() == CCValAssign::Indirect)
ArgValue = DAG.getLoad(VA.getValVT(), dl, Root, ArgValue, NULL, 0); ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, NULL, 0);
ArgValues.push_back(ArgValue); InVals.push_back(ArgValue);
} }
// The x86-64 ABI for returning structs by value requires that we copy // The x86-64 ABI for returning structs by value requires that we copy
@ -1481,19 +1446,19 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64)); Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64));
FuncInfo->setSRetReturnReg(Reg); FuncInfo->setSRetReturnReg(Reg);
} }
SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, ArgValues[0]); SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
Root = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Root); Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
} }
unsigned StackSize = CCInfo.getNextStackOffset(); unsigned StackSize = CCInfo.getNextStackOffset();
// align stack specially for tail calls // align stack specially for tail calls
if (PerformTailCallOpt && CC == CallingConv::Fast) if (PerformTailCallOpt && CallConv == CallingConv::Fast)
StackSize = GetAlignedArgumentStackSize(StackSize, DAG); StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
// If the function takes variable number of arguments, make a frame index for // If the function takes variable number of arguments, make a frame index for
// the start of the first vararg value... for expansion of llvm.va_start. // the start of the first vararg value... for expansion of llvm.va_start.
if (isVarArg) { if (isVarArg) {
if (Is64Bit || CC != CallingConv::X86_FastCall) { if (Is64Bit || CallConv != CallingConv::X86_FastCall) {
VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize); VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize);
} }
if (Is64Bit) { if (Is64Bit) {
@ -1555,7 +1520,7 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) { for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) {
unsigned VReg = MF.addLiveIn(GPR64ArgRegs[NumIntRegs], unsigned VReg = MF.addLiveIn(GPR64ArgRegs[NumIntRegs],
X86::GR64RegisterClass); X86::GR64RegisterClass);
SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, MVT::i64); SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
SDValue Store = SDValue Store =
DAG.getStore(Val.getValue(1), dl, Val, FIN, DAG.getStore(Val.getValue(1), dl, Val, FIN,
PseudoSourceValue::getFixedStack(RegSaveFrameIndex), 0); PseudoSourceValue::getFixedStack(RegSaveFrameIndex), 0);
@ -1570,7 +1535,7 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) { for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) {
unsigned VReg = MF.addLiveIn(XMMArgRegs[NumXMMRegs], unsigned VReg = MF.addLiveIn(XMMArgRegs[NumXMMRegs],
X86::VR128RegisterClass); X86::VR128RegisterClass);
SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, MVT::v4f32); SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::v4f32);
SDValue Store = SDValue Store =
DAG.getStore(Val.getValue(1), dl, Val, FIN, DAG.getStore(Val.getValue(1), dl, Val, FIN,
PseudoSourceValue::getFixedStack(RegSaveFrameIndex), 0); PseudoSourceValue::getFixedStack(RegSaveFrameIndex), 0);
@ -1579,46 +1544,41 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
DAG.getIntPtrConstant(16)); DAG.getIntPtrConstant(16));
} }
if (!MemOps.empty()) if (!MemOps.empty())
Root = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOps[0], MemOps.size()); &MemOps[0], MemOps.size());
} }
} }
ArgValues.push_back(Root);
// Some CCs need callee pop. // Some CCs need callee pop.
if (IsCalleePop(isVarArg, CC)) { if (IsCalleePop(isVarArg, CallConv)) {
BytesToPopOnReturn = StackSize; // Callee pops everything. BytesToPopOnReturn = StackSize; // Callee pops everything.
BytesCallerReserves = 0; BytesCallerReserves = 0;
} else { } else {
BytesToPopOnReturn = 0; // Callee pops nothing. BytesToPopOnReturn = 0; // Callee pops nothing.
// If this is an sret function, the return should pop the hidden pointer. // If this is an sret function, the return should pop the hidden pointer.
if (!Is64Bit && CC != CallingConv::Fast && ArgsAreStructReturn(Op)) if (!Is64Bit && CallConv != CallingConv::Fast && ArgsAreStructReturn(Ins))
BytesToPopOnReturn = 4; BytesToPopOnReturn = 4;
BytesCallerReserves = StackSize; BytesCallerReserves = StackSize;
} }
if (!Is64Bit) { if (!Is64Bit) {
RegSaveFrameIndex = 0xAAAAAAA; // RegSaveFrameIndex is X86-64 only. RegSaveFrameIndex = 0xAAAAAAA; // RegSaveFrameIndex is X86-64 only.
if (CC == CallingConv::X86_FastCall) if (CallConv == CallingConv::X86_FastCall)
VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs. VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs.
} }
FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn); FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn);
// Return the new list of results. return Chain;
return DAG.getNode(ISD::MERGE_VALUES, dl, Op.getNode()->getVTList(),
&ArgValues[0], ArgValues.size()).getValue(Op.getResNo());
} }
SDValue SDValue
X86TargetLowering::LowerMemOpCallTo(CallSDNode *TheCall, SelectionDAG &DAG, X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
const SDValue &StackPtr, SDValue StackPtr, SDValue Arg,
DebugLoc dl, SelectionDAG &DAG,
const CCValAssign &VA, const CCValAssign &VA,
SDValue Chain, ISD::ArgFlagsTy Flags) {
SDValue Arg, ISD::ArgFlagsTy Flags) {
const unsigned FirstStackArgOffset = (Subtarget->isTargetWin64() ? 32 : 0); const unsigned FirstStackArgOffset = (Subtarget->isTargetWin64() ? 32 : 0);
DebugLoc dl = TheCall->getDebugLoc();
unsigned LocMemOffset = FirstStackArgOffset + VA.getLocMemOffset(); unsigned LocMemOffset = FirstStackArgOffset + VA.getLocMemOffset();
SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
@ -1669,34 +1629,37 @@ EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF,
return Chain; return Chain;
} }
SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) { SDValue
MachineFunction &MF = DAG.getMachineFunction(); X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallSDNode *TheCall = cast<CallSDNode>(Op.getNode()); unsigned CallConv, bool isVarArg, bool isTailCall,
SDValue Chain = TheCall->getChain(); const SmallVectorImpl<ISD::OutputArg> &Outs,
unsigned CC = TheCall->getCallingConv(); const SmallVectorImpl<ISD::InputArg> &Ins,
bool isVarArg = TheCall->isVarArg(); DebugLoc dl, SelectionDAG &DAG,
bool IsTailCall = TheCall->isTailCall() && SmallVectorImpl<SDValue> &InVals) {
CC == CallingConv::Fast && PerformTailCallOpt;
SDValue Callee = TheCall->getCallee();
bool Is64Bit = Subtarget->is64Bit();
bool IsStructRet = CallIsStructReturn(TheCall);
DebugLoc dl = TheCall->getDebugLoc();
assert(!(isVarArg && CC == CallingConv::Fast) && MachineFunction &MF = DAG.getMachineFunction();
bool Is64Bit = Subtarget->is64Bit();
bool IsStructRet = CallIsStructReturn(Outs);
assert((!isTailCall ||
(CallConv == CallingConv::Fast && PerformTailCallOpt)) &&
"IsEligibleForTailCallOptimization missed a case!");
assert(!(isVarArg && CallConv == CallingConv::Fast) &&
"Var args not supported with calling convention fastcc"); "Var args not supported with calling convention fastcc");
// Analyze operands of the call, assigning locations to each operand. // Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs; SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext()); CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
CCInfo.AnalyzeCallOperands(TheCall, CCAssignFnForNode(CC)); ArgLocs, *DAG.getContext());
CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CallConv));
// Get a count of how many bytes are to be pushed on the stack. // Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset(); unsigned NumBytes = CCInfo.getNextStackOffset();
if (PerformTailCallOpt && CC == CallingConv::Fast) if (PerformTailCallOpt && CallConv == CallingConv::Fast)
NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
int FPDiff = 0; int FPDiff = 0;
if (IsTailCall) { if (isTailCall) {
// Lower arguments at fp - stackoffset + fpdiff. // Lower arguments at fp - stackoffset + fpdiff.
unsigned NumBytesCallerPushed = unsigned NumBytesCallerPushed =
MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn(); MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn();
@ -1712,7 +1675,7 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
SDValue RetAddrFrIdx; SDValue RetAddrFrIdx;
// Load return adress for tail calls. // Load return adress for tail calls.
Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, IsTailCall, Is64Bit, Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall, Is64Bit,
FPDiff, dl); FPDiff, dl);
SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
@ -1724,8 +1687,8 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i]; CCValAssign &VA = ArgLocs[i];
MVT RegVT = VA.getLocVT(); MVT RegVT = VA.getLocVT();
SDValue Arg = TheCall->getArg(i); SDValue Arg = Outs[i].Val;
ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i); ISD::ArgFlagsTy Flags = Outs[i].Flags;
bool isByVal = Flags.isByVal(); bool isByVal = Flags.isByVal();
// Promote the value if needed. // Promote the value if needed.
@ -1764,13 +1727,13 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
if (VA.isRegLoc()) { if (VA.isRegLoc()) {
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
} else { } else {
if (!IsTailCall || (IsTailCall && isByVal)) { if (!isTailCall || (isTailCall && isByVal)) {
assert(VA.isMemLoc()); assert(VA.isMemLoc());
if (StackPtr.getNode() == 0) if (StackPtr.getNode() == 0)
StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, getPointerTy()); StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, getPointerTy());
MemOpChains.push_back(LowerMemOpCallTo(TheCall, DAG, StackPtr, VA, MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
Chain, Arg, Flags)); dl, DAG, VA, Flags));
} }
} }
} }
@ -1784,7 +1747,7 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
SDValue InFlag; SDValue InFlag;
// Tail call byval lowering might overwrite argument registers so in case of // Tail call byval lowering might overwrite argument registers so in case of
// tail call optimization the copies to registers are lowered later. // tail call optimization the copies to registers are lowered later.
if (!IsTailCall) if (!isTailCall)
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
RegsToPass[i].second, InFlag); RegsToPass[i].second, InFlag);
@ -1795,7 +1758,7 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
if (Subtarget->isPICStyleGOT()) { if (Subtarget->isPICStyleGOT()) {
// ELF / PIC requires GOT in the EBX register before function calls via PLT // ELF / PIC requires GOT in the EBX register before function calls via PLT
// GOT pointer. // GOT pointer.
if (!IsTailCall) { if (!isTailCall) {
Chain = DAG.getCopyToReg(Chain, dl, X86::EBX, Chain = DAG.getCopyToReg(Chain, dl, X86::EBX,
DAG.getNode(X86ISD::GlobalBaseReg, DAG.getNode(X86ISD::GlobalBaseReg,
DebugLoc::getUnknownLoc(), DebugLoc::getUnknownLoc(),
@ -1847,7 +1810,15 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
// For tail calls lower the arguments to the 'real' stack slot. // For tail calls lower the arguments to the 'real' stack slot.
if (IsTailCall) { if (isTailCall) {
// Force all the incoming stack arguments to be loaded from the stack
// before any new outgoing arguments are stored to the stack, because the
// outgoing stack slots may alias the incoming argument stack slots, and
// the alias isn't otherwise explicit. This is slightly more conservative
// than necessary, because it means that each store effectively depends
// on every argument instead of just those arguments it would clobber.
SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
SmallVector<SDValue, 8> MemOpChains2; SmallVector<SDValue, 8> MemOpChains2;
SDValue FIN; SDValue FIN;
int FI = 0; int FI = 0;
@ -1857,8 +1828,8 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
CCValAssign &VA = ArgLocs[i]; CCValAssign &VA = ArgLocs[i];
if (!VA.isRegLoc()) { if (!VA.isRegLoc()) {
assert(VA.isMemLoc()); assert(VA.isMemLoc());
SDValue Arg = TheCall->getArg(i); SDValue Arg = Outs[i].Val;
ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i); ISD::ArgFlagsTy Flags = Outs[i].Flags;
// Create frame index. // Create frame index.
int32_t Offset = VA.getLocMemOffset()+FPDiff; int32_t Offset = VA.getLocMemOffset()+FPDiff;
uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8; uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
@ -1873,12 +1844,13 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
getPointerTy()); getPointerTy());
Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source); Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, Chain, MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
ArgChain,
Flags, DAG, dl)); Flags, DAG, dl));
} else { } else {
// Store relative to framepointer. // Store relative to framepointer.
MemOpChains2.push_back( MemOpChains2.push_back(
DAG.getStore(Chain, dl, Arg, FIN, DAG.getStore(ArgChain, dl, Arg, FIN,
PseudoSourceValue::getFixedStack(FI), 0)); PseudoSourceValue::getFixedStack(FI), 0));
} }
} }
@ -1948,7 +1920,7 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(), Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
OpFlags); OpFlags);
} else if (IsTailCall) { } else if (isTailCall) {
unsigned Opc = Is64Bit ? X86::R11 : X86::EAX; unsigned Opc = Is64Bit ? X86::R11 : X86::EAX;
Chain = DAG.getCopyToReg(Chain, dl, Chain = DAG.getCopyToReg(Chain, dl,
@ -1963,20 +1935,16 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
SmallVector<SDValue, 8> Ops; SmallVector<SDValue, 8> Ops;
if (IsTailCall) { if (isTailCall) {
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
DAG.getIntPtrConstant(0, true), InFlag); DAG.getIntPtrConstant(0, true), InFlag);
InFlag = Chain.getValue(1); InFlag = Chain.getValue(1);
// Returns a chain & a flag for retval copy to use.
NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
Ops.clear();
} }
Ops.push_back(Chain); Ops.push_back(Chain);
Ops.push_back(Callee); Ops.push_back(Callee);
if (IsTailCall) if (isTailCall)
Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
// Add argument registers to the end of the list so that they are known live // Add argument registers to the end of the list so that they are known live
@ -1986,7 +1954,7 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
RegsToPass[i].second.getValueType())); RegsToPass[i].second.getValueType()));
// Add an implicit use GOT pointer in EBX. // Add an implicit use GOT pointer in EBX.
if (!IsTailCall && Subtarget->isPICStyleGOT()) if (!isTailCall && Subtarget->isPICStyleGOT())
Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy()));
// Add an implicit use of AL for x86 vararg functions. // Add an implicit use of AL for x86 vararg functions.
@ -1996,13 +1964,28 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
if (InFlag.getNode()) if (InFlag.getNode())
Ops.push_back(InFlag); Ops.push_back(InFlag);
if (IsTailCall) { if (isTailCall) {
assert(InFlag.getNode() && // If this is the first return lowered for this function, add the regs
"Flag must be set. Depend on flag being set in LowerRET"); // to the liveout set for the function.
Chain = DAG.getNode(X86ISD::TAILCALL, dl, if (MF.getRegInfo().liveout_empty()) {
TheCall->getVTList(), &Ops[0], Ops.size()); SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs,
*DAG.getContext());
CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
for (unsigned i = 0; i != RVLocs.size(); ++i)
if (RVLocs[i].isRegLoc())
MF.getRegInfo().addLiveOut(RVLocs[i].getLocReg());
}
return SDValue(Chain.getNode(), Op.getResNo()); assert(((Callee.getOpcode() == ISD::Register &&
(cast<RegisterSDNode>(Callee)->getReg() == X86::EAX ||
cast<RegisterSDNode>(Callee)->getReg() == X86::R9)) ||
Callee.getOpcode() == ISD::TargetExternalSymbol ||
Callee.getOpcode() == ISD::TargetGlobalAddress) &&
"Expecting an global address, external symbol, or register");
return DAG.getNode(X86ISD::TC_RETURN, dl,
NodeTys, &Ops[0], Ops.size());
} }
Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, &Ops[0], Ops.size()); Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, &Ops[0], Ops.size());
@ -2010,9 +1993,9 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
// Create the CALLSEQ_END node. // Create the CALLSEQ_END node.
unsigned NumBytesForCalleeToPush; unsigned NumBytesForCalleeToPush;
if (IsCalleePop(isVarArg, CC)) if (IsCalleePop(isVarArg, CallConv))
NumBytesForCalleeToPush = NumBytes; // Callee pops everything NumBytesForCalleeToPush = NumBytes; // Callee pops everything
else if (!Is64Bit && CC != CallingConv::Fast && IsStructRet) else if (!Is64Bit && CallConv != CallingConv::Fast && IsStructRet)
// If this is is a call to a struct-return function, the callee // If this is is a call to a struct-return function, the callee
// pops the hidden struct pointer, so we have to push it back. // pops the hidden struct pointer, so we have to push it back.
// This is common for Darwin/X86, Linux & Mingw32 targets. // This is common for Darwin/X86, Linux & Mingw32 targets.
@ -2030,8 +2013,8 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
// Handle result values, copying them out of physregs into vregs that we // Handle result values, copying them out of physregs into vregs that we
// return. // return.
return SDValue(LowerCallResult(Chain, InFlag, TheCall, CC, DAG), return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
Op.getResNo()); Ins, dl, DAG, InVals);
} }
@ -2088,25 +2071,18 @@ unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
return Offset; return Offset;
} }
/// IsEligibleForTailCallElimination - Check to see whether the next instruction /// IsEligibleForTailCallOptimization - Check whether the call is eligible
/// following the call is a return. A function is eligible if caller/callee /// for tail call optimization. Targets which want to do tail call
/// calling conventions match, currently only fastcc supports tail calls, and /// optimization should implement this function.
/// the function CALL is immediatly followed by a RET. bool
bool X86TargetLowering::IsEligibleForTailCallOptimization(CallSDNode *TheCall, X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
SDValue Ret, unsigned CalleeCC,
SelectionDAG& DAG) const { bool isVarArg,
if (!PerformTailCallOpt) const SmallVectorImpl<ISD::InputArg> &Ins,
return false; SelectionDAG& DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
if (CheckTailCallReturnConstraints(TheCall, Ret)) { unsigned CallerCC = MF.getFunction()->getCallingConv();
unsigned CallerCC = return CalleeCC == CallingConv::Fast && CallerCC == CalleeCC;
DAG.getMachineFunction().getFunction()->getCallingConv();
unsigned CalleeCC = TheCall->getCallingConv();
if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC)
return true;
}
return false;
} }
FastISel * FastISel *
@ -5825,7 +5801,7 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
Args.push_back(Entry); Args.push_back(Entry);
std::pair<SDValue,SDValue> CallResult = std::pair<SDValue,SDValue> CallResult =
LowerCallTo(Chain, Type::VoidTy, false, false, false, false, LowerCallTo(Chain, Type::VoidTy, false, false, false, false,
0, CallingConv::C, false, 0, CallingConv::C, false, /*isReturnValueUsed=*/false,
DAG.getExternalSymbol(bzeroEntry, IntPtr), Args, DAG, dl); DAG.getExternalSymbol(bzeroEntry, IntPtr), Args, DAG, dl);
return CallResult.second; return CallResult.second;
} }
@ -6845,9 +6821,6 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
case ISD::SELECT: return LowerSELECT(Op, DAG); case ISD::SELECT: return LowerSELECT(Op, DAG);
case ISD::BRCOND: return LowerBRCOND(Op, DAG); case ISD::BRCOND: return LowerBRCOND(Op, DAG);
case ISD::JumpTable: return LowerJumpTable(Op, DAG); case ISD::JumpTable: return LowerJumpTable(Op, DAG);
case ISD::CALL: return LowerCALL(Op, DAG);
case ISD::RET: return LowerRET(Op, DAG);
case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG);
case ISD::VASTART: return LowerVASTART(Op, DAG); case ISD::VASTART: return LowerVASTART(Op, DAG);
case ISD::VAARG: return LowerVAARG(Op, DAG); case ISD::VAARG: return LowerVAARG(Op, DAG);
case ISD::VACOPY: return LowerVACOPY(Op, DAG); case ISD::VACOPY: return LowerVACOPY(Op, DAG);
@ -7009,7 +6982,6 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::FLD: return "X86ISD::FLD"; case X86ISD::FLD: return "X86ISD::FLD";
case X86ISD::FST: return "X86ISD::FST"; case X86ISD::FST: return "X86ISD::FST";
case X86ISD::CALL: return "X86ISD::CALL"; case X86ISD::CALL: return "X86ISD::CALL";
case X86ISD::TAILCALL: return "X86ISD::TAILCALL";
case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
case X86ISD::BT: return "X86ISD::BT"; case X86ISD::BT: return "X86ISD::BT";
case X86ISD::CMP: return "X86ISD::CMP"; case X86ISD::CMP: return "X86ISD::CMP";

View File

@ -85,7 +85,7 @@ namespace llvm {
/// as. /// as.
FST, FST,
/// CALL/TAILCALL - These operations represent an abstract X86 call /// CALL - These operations represent an abstract X86 call
/// instruction, which includes a bunch of information. In particular the /// instruction, which includes a bunch of information. In particular the
/// operands of these node are: /// operands of these node are:
/// ///
@ -102,12 +102,8 @@ namespace llvm {
/// #1 - The first register result value (optional) /// #1 - The first register result value (optional)
/// #2 - The second register result value (optional) /// #2 - The second register result value (optional)
/// ///
/// The CALL vs TAILCALL distinction boils down to whether the callee is
/// known not to modify the caller's stack frame, as is standard with
/// LLVM.
CALL, CALL,
TAILCALL,
/// RDTSC_DAG - This operation implements the lowering for /// RDTSC_DAG - This operation implements the lowering for
/// readcyclecounter /// readcyclecounter
RDTSC_DAG, RDTSC_DAG,
@ -508,9 +504,12 @@ namespace llvm {
/// IsEligibleForTailCallOptimization - Check whether the call is eligible /// IsEligibleForTailCallOptimization - Check whether the call is eligible
/// for tail call optimization. Targets which want to do tail call /// for tail call optimization. Targets which want to do tail call
/// optimization should implement this function. /// optimization should implement this function.
virtual bool IsEligibleForTailCallOptimization(CallSDNode *TheCall, virtual bool
SDValue Ret, IsEligibleForTailCallOptimization(SDValue Callee,
SelectionDAG &DAG) const; unsigned CalleeCC,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const;
virtual const X86Subtarget* getSubtarget() { virtual const X86Subtarget* getSubtarget() {
return Subtarget; return Subtarget;
@ -563,26 +562,30 @@ namespace llvm {
bool X86ScalarSSEf32; bool X86ScalarSSEf32;
bool X86ScalarSSEf64; bool X86ScalarSSEf64;
SDNode *LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall, SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
unsigned CallingConv, SelectionDAG &DAG); unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDValue LowerMemArgument(SDValue Op, SelectionDAG &DAG, DebugLoc dl, SelectionDAG &DAG,
const CCValAssign &VA, MachineFrameInfo *MFI, SmallVectorImpl<SDValue> &InVals);
unsigned CC, SDValue Root, unsigned i); SDValue LowerMemArgument(SDValue Chain,
unsigned CallConv,
SDValue LowerMemOpCallTo(CallSDNode *TheCall, SelectionDAG &DAG, const SmallVectorImpl<ISD::InputArg> &ArgInfo,
const SDValue &StackPtr, DebugLoc dl, SelectionDAG &DAG,
const CCValAssign &VA, SDValue Chain, const CCValAssign &VA, MachineFrameInfo *MFI,
SDValue Arg, ISD::ArgFlagsTy Flags); unsigned i);
SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
DebugLoc dl, SelectionDAG &DAG,
const CCValAssign &VA,
ISD::ArgFlagsTy Flags);
// Call lowering helpers. // Call lowering helpers.
bool IsCalleePop(bool isVarArg, unsigned CallingConv); bool IsCalleePop(bool isVarArg, unsigned CallConv);
SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr, SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
SDValue Chain, bool IsTailCall, bool Is64Bit, SDValue Chain, bool IsTailCall, bool Is64Bit,
int FPDiff, DebugLoc dl); int FPDiff, DebugLoc dl);
CCAssignFn *CCAssignFnForNode(unsigned CallingConv) const; CCAssignFn *CCAssignFnForNode(unsigned CallConv) const;
NameDecorationStyle NameDecorationForFORMAL_ARGUMENTS(SDValue Op); NameDecorationStyle NameDecorationForCallConv(unsigned CallConv);
unsigned GetAlignedArgumentStackSize(unsigned StackSize, SelectionDAG &DAG); unsigned GetAlignedArgumentStackSize(unsigned StackSize, SelectionDAG &DAG);
std::pair<SDValue,SDValue> FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, std::pair<SDValue,SDValue> FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
@ -619,10 +622,7 @@ namespace llvm {
SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG); SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG);
SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG); SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG);
SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG); SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG);
SDValue LowerCALL(SDValue Op, SelectionDAG &DAG);
SDValue LowerRET(SDValue Op, SelectionDAG &DAG);
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG); SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG);
SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG);
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG); SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG);
SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG); SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG);
SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG); SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG);
@ -642,6 +642,26 @@ namespace llvm {
SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG); SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG);
SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG); SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG);
virtual SDValue
LowerFormalArguments(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerCall(SDValue Chain, SDValue Callee,
unsigned CallConv, bool isVarArg, bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerReturn(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG);
void ReplaceATOMIC_BINARY_64(SDNode *N, SmallVectorImpl<SDValue> &Results, void ReplaceATOMIC_BINARY_64(SDNode *N, SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG, unsigned NewOp); SelectionDAG &DAG, unsigned NewOp);

View File

@ -1528,23 +1528,7 @@ def : Pat<(X86call (i64 tglobaladdr:$dst)),
def : Pat<(X86call (i64 texternalsym:$dst)), def : Pat<(X86call (i64 texternalsym:$dst)),
(WINCALL64pcrel32 texternalsym:$dst)>, Requires<[IsWin64]>; (WINCALL64pcrel32 texternalsym:$dst)>, Requires<[IsWin64]>;
def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
(CALL64pcrel32 tglobaladdr:$dst)>;
def : Pat<(X86tailcall (i64 texternalsym:$dst)),
(CALL64pcrel32 texternalsym:$dst)>;
def : Pat<(X86tailcall GR64:$dst),
(CALL64r GR64:$dst)>;
// tailcall stuff // tailcall stuff
def : Pat<(X86tailcall GR32:$dst),
(TAILCALL)>;
def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
(TAILCALL)>;
def : Pat<(X86tailcall (i64 texternalsym:$dst)),
(TAILCALL)>;
def : Pat<(X86tcret GR64:$dst, imm:$off), def : Pat<(X86tcret GR64:$dst, imm:$off),
(TCRETURNri64 GR64:$dst, imm:$off)>; (TCRETURNri64 GR64:$dst, imm:$off)>;

View File

@ -124,9 +124,6 @@ def X86callseq_end :
def X86call : SDNode<"X86ISD::CALL", SDT_X86Call, def X86call : SDNode<"X86ISD::CALL", SDT_X86Call,
[SDNPHasChain, SDNPOutFlag, SDNPOptInFlag]>; [SDNPHasChain, SDNPOutFlag, SDNPOptInFlag]>;
def X86tailcall: SDNode<"X86ISD::TAILCALL", SDT_X86Call,
[SDNPHasChain, SDNPOutFlag, SDNPOptInFlag]>;
def X86rep_stos: SDNode<"X86ISD::REP_STOS", SDTX86RepStr, def X86rep_stos: SDNode<"X86ISD::REP_STOS", SDTX86RepStr,
[SDNPHasChain, SDNPInFlag, SDNPOutFlag, SDNPMayStore]>; [SDNPHasChain, SDNPInFlag, SDNPOutFlag, SDNPMayStore]>;
def X86rep_movs: SDNode<"X86ISD::REP_MOVS", SDTX86RepStr, def X86rep_movs: SDNode<"X86ISD::REP_MOVS", SDTX86RepStr,
@ -622,10 +619,6 @@ let isCall = 1 in
// Tail call stuff. // Tail call stuff.
def TAILCALL : I<0, Pseudo, (outs), (ins),
"#TAILCALL",
[]>;
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
def TCRETURNdi : I<0, Pseudo, (outs), (ins i32imm:$dst, i32imm:$offset, variable_ops), def TCRETURNdi : I<0, Pseudo, (outs), (ins i32imm:$dst, i32imm:$offset, variable_ops),
"#TC_RETURN $dst $offset", "#TC_RETURN $dst $offset",
@ -3452,14 +3445,6 @@ def : Pat<(store (i32 (X86Wrapper texternalsym:$src)), addr:$dst),
// Calls // Calls
// tailcall stuff // tailcall stuff
def : Pat<(X86tailcall GR32:$dst),
(TAILCALL)>;
def : Pat<(X86tailcall (i32 tglobaladdr:$dst)),
(TAILCALL)>;
def : Pat<(X86tailcall (i32 texternalsym:$dst)),
(TAILCALL)>;
def : Pat<(X86tcret GR32:$dst, imm:$off), def : Pat<(X86tcret GR32:$dst, imm:$off),
(TCRETURNri GR32:$dst, imm:$off)>; (TCRETURNri GR32:$dst, imm:$off)>;

View File

@ -113,9 +113,6 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
setOperationAction(ISD::BR_JT, MVT::Other, Expand); setOperationAction(ISD::BR_JT, MVT::Other, Expand);
setOperationAction(ISD::JumpTable, MVT::i32, Custom); setOperationAction(ISD::JumpTable, MVT::i32, Custom);
// RET must be custom lowered, to meet ABI requirements
setOperationAction(ISD::RET, MVT::Other, Custom);
setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
// Thread Local Storage // Thread Local Storage
@ -162,9 +159,6 @@ SDValue XCoreTargetLowering::
LowerOperation(SDValue Op, SelectionDAG &DAG) { LowerOperation(SDValue Op, SelectionDAG &DAG) {
switch (Op.getOpcode()) switch (Op.getOpcode())
{ {
case ISD::CALL: return LowerCALL(Op, DAG);
case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG);
case ISD::RET: return LowerRET(Op, DAG);
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
case ISD::ConstantPool: return LowerConstantPool(Op, DAG); case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
@ -455,6 +449,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG)
std::pair<SDValue, SDValue> CallResult = std::pair<SDValue, SDValue> CallResult =
LowerCallTo(Chain, IntPtrTy, false, false, LowerCallTo(Chain, IntPtrTy, false, false,
false, false, 0, CallingConv::C, false, false, false, 0, CallingConv::C, false,
/*isReturnValueUsed=*/true,
DAG.getExternalSymbol("__misaligned_load", getPointerTy()), DAG.getExternalSymbol("__misaligned_load", getPointerTy()),
Args, DAG, dl); Args, DAG, dl);
@ -515,6 +510,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG)
std::pair<SDValue, SDValue> CallResult = std::pair<SDValue, SDValue> CallResult =
LowerCallTo(Chain, Type::VoidTy, false, false, LowerCallTo(Chain, Type::VoidTy, false, false,
false, false, 0, CallingConv::C, false, false, false, 0, CallingConv::C, false,
/*isReturnValueUsed=*/true,
DAG.getExternalSymbol("__misaligned_store", getPointerTy()), DAG.getExternalSymbol("__misaligned_store", getPointerTy()),
Args, DAG, dl); Args, DAG, dl);
@ -603,35 +599,33 @@ SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// Calling Convention Implementation // Calling Convention Implementation
//
// The lower operations present on calling convention works on this order:
// LowerCALL (virt regs --> phys regs, virt regs --> stack)
// LowerFORMAL_ARGUMENTS (phys --> virt regs, stack --> virt regs)
// LowerRET (virt regs --> phys regs)
// LowerCALL (phys regs --> virt regs)
//
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "XCoreGenCallingConv.inc" #include "XCoreGenCallingConv.inc"
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// CALL Calling Convention Implementation // Call Calling Convention Implementation
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
/// XCore custom CALL implementation /// XCore call implementation
SDValue XCoreTargetLowering:: SDValue
LowerCALL(SDValue Op, SelectionDAG &DAG) XCoreTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
{ unsigned CallConv, bool isVarArg,
CallSDNode *TheCall = cast<CallSDNode>(Op.getNode()); bool isTailCall,
unsigned CallingConv = TheCall->getCallingConv(); const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
// For now, only CallingConv::C implemented // For now, only CallingConv::C implemented
switch (CallingConv) switch (CallConv)
{ {
default: default:
llvm_unreachable("Unsupported calling convention"); llvm_unreachable("Unsupported calling convention");
case CallingConv::Fast: case CallingConv::Fast:
case CallingConv::C: case CallingConv::C:
return LowerCCCCallTo(Op, DAG, CallingConv); return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
Outs, Ins, dl, DAG, InVals);
} }
} }
@ -639,24 +633,25 @@ LowerCALL(SDValue Op, SelectionDAG &DAG)
/// regs to (physical regs)/(stack frame), CALLSEQ_START and /// regs to (physical regs)/(stack frame), CALLSEQ_START and
/// CALLSEQ_END are emitted. /// CALLSEQ_END are emitted.
/// TODO: isTailCall, sret. /// TODO: isTailCall, sret.
SDValue XCoreTargetLowering:: SDValue
LowerCCCCallTo(SDValue Op, SelectionDAG &DAG, unsigned CC) XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
{ unsigned CallConv, bool isVarArg,
CallSDNode *TheCall = cast<CallSDNode>(Op.getNode()); bool isTailCall,
SDValue Chain = TheCall->getChain(); const SmallVectorImpl<ISD::OutputArg> &Outs,
SDValue Callee = TheCall->getCallee(); const SmallVectorImpl<ISD::InputArg> &Ins,
bool isVarArg = TheCall->isVarArg(); DebugLoc dl, SelectionDAG &DAG,
DebugLoc dl = Op.getDebugLoc(); SmallVectorImpl<SDValue> &InVals) {
// Analyze operands of the call, assigning locations to each operand. // Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs; SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext()); CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
ArgLocs, *DAG.getContext());
// The ABI dictates there should be one stack slot available to the callee // The ABI dictates there should be one stack slot available to the callee
// on function entry (for saving lr). // on function entry (for saving lr).
CCInfo.AllocateStack(4, 4); CCInfo.AllocateStack(4, 4);
CCInfo.AnalyzeCallOperands(TheCall, CC_XCore); CCInfo.AnalyzeCallOperands(Outs, CC_XCore);
// Get a count of how many bytes are to be pushed on the stack. // Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset(); unsigned NumBytes = CCInfo.getNextStackOffset();
@ -670,9 +665,7 @@ LowerCCCCallTo(SDValue Op, SelectionDAG &DAG, unsigned CC)
// Walk the register/memloc assignments, inserting copies/loads. // Walk the register/memloc assignments, inserting copies/loads.
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i]; CCValAssign &VA = ArgLocs[i];
SDValue Arg = Outs[i].Val;
// Arguments start after the 5 first operands of ISD::CALL
SDValue Arg = TheCall->getArg(i);
// Promote the value if needed. // Promote the value if needed.
switch (VA.getLocInfo()) { switch (VA.getLocInfo()) {
@ -759,60 +752,58 @@ LowerCCCCallTo(SDValue Op, SelectionDAG &DAG, unsigned CC)
// Handle result values, copying them out of physregs into vregs that we // Handle result values, copying them out of physregs into vregs that we
// return. // return.
return SDValue(LowerCallResult(Chain, InFlag, TheCall, CC, DAG), return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
Op.getResNo()); Ins, dl, DAG, InVals);
} }
/// LowerCallResult - Lower the result values of an ISD::CALL into the /// LowerCallResult - Lower the result values of a call into the
/// appropriate copies out of appropriate physical registers. This assumes that /// appropriate copies out of appropriate physical registers.
/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call SDValue
/// being lowered. Returns a SDNode with the same number of values as the XCoreTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
/// ISD::CALL. unsigned CallConv, bool isVarArg,
SDNode *XCoreTargetLowering:: const SmallVectorImpl<ISD::InputArg> &Ins,
LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall, DebugLoc dl, SelectionDAG &DAG,
unsigned CallingConv, SelectionDAG &DAG) { SmallVectorImpl<SDValue> &InVals) {
bool isVarArg = TheCall->isVarArg();
DebugLoc dl = TheCall->getDebugLoc();
// Assign locations to each value returned by this call. // Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs; SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
RVLocs, *DAG.getContext()); RVLocs, *DAG.getContext());
CCInfo.AnalyzeCallResult(TheCall, RetCC_XCore); CCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
SmallVector<SDValue, 8> ResultVals;
// Copy all of the result registers out of their specified physreg. // Copy all of the result registers out of their specified physreg.
for (unsigned i = 0; i != RVLocs.size(); ++i) { for (unsigned i = 0; i != RVLocs.size(); ++i) {
Chain = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(), Chain = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
RVLocs[i].getValVT(), InFlag).getValue(1); RVLocs[i].getValVT(), InFlag).getValue(1);
InFlag = Chain.getValue(2); InFlag = Chain.getValue(2);
ResultVals.push_back(Chain.getValue(0)); InVals.push_back(Chain.getValue(0));
} }
ResultVals.push_back(Chain); return Chain;
// Merge everything together with a MERGE_VALUES node.
return DAG.getNode(ISD::MERGE_VALUES, dl, TheCall->getVTList(),
&ResultVals[0], ResultVals.size()).getNode();
} }
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// FORMAL_ARGUMENTS Calling Convention Implementation // Formal Arguments Calling Convention Implementation
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
/// XCore custom FORMAL_ARGUMENTS implementation /// XCore formal arguments implementation
SDValue XCoreTargetLowering:: SDValue
LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) XCoreTargetLowering::LowerFormalArguments(SDValue Chain,
{ unsigned CallConv,
unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); bool isVarArg,
switch(CC) const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl,
SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
switch (CallConv)
{ {
default: default:
llvm_unreachable("Unsupported calling convention"); llvm_unreachable("Unsupported calling convention");
case CallingConv::C: case CallingConv::C:
case CallingConv::Fast: case CallingConv::Fast:
return LowerCCCArguments(Op, DAG); return LowerCCCArguments(Chain, CallConv, isVarArg,
Ins, dl, DAG, InVals);
} }
} }
@ -820,27 +811,28 @@ LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG)
/// virtual registers and generate load operations for /// virtual registers and generate load operations for
/// arguments places on the stack. /// arguments places on the stack.
/// TODO: sret /// TODO: sret
SDValue XCoreTargetLowering:: SDValue
LowerCCCArguments(SDValue Op, SelectionDAG &DAG) XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
{ unsigned CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg>
&Ins,
DebugLoc dl,
SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
MachineFunction &MF = DAG.getMachineFunction(); MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo(); MachineFrameInfo *MFI = MF.getFrameInfo();
MachineRegisterInfo &RegInfo = MF.getRegInfo(); MachineRegisterInfo &RegInfo = MF.getRegInfo();
SDValue Root = Op.getOperand(0);
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
unsigned CC = MF.getFunction()->getCallingConv();
DebugLoc dl = Op.getDebugLoc();
// Assign locations to all of the incoming arguments. // Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs; SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext()); CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
ArgLocs, *DAG.getContext());
CCInfo.AnalyzeFormalArguments(Op.getNode(), CC_XCore); CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);
unsigned StackSlotSize = XCoreFrameInfo::stackSlotSize(); unsigned StackSlotSize = XCoreFrameInfo::stackSlotSize();
SmallVector<SDValue, 16> ArgValues;
unsigned LRSaveSize = StackSlotSize; unsigned LRSaveSize = StackSlotSize;
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
@ -854,7 +846,7 @@ LowerCCCArguments(SDValue Op, SelectionDAG &DAG)
default: default:
{ {
#ifndef NDEBUG #ifndef NDEBUG
errs() << "LowerFORMAL_ARGUMENTS Unhandled argument type: " errs() << "LowerFormalArguments Unhandled argument type: "
<< RegVT.getSimpleVT() << "\n"; << RegVT.getSimpleVT() << "\n";
#endif #endif
llvm_unreachable(0); llvm_unreachable(0);
@ -863,7 +855,7 @@ LowerCCCArguments(SDValue Op, SelectionDAG &DAG)
unsigned VReg = RegInfo.createVirtualRegister( unsigned VReg = RegInfo.createVirtualRegister(
XCore::GRRegsRegisterClass); XCore::GRRegsRegisterClass);
RegInfo.addLiveIn(VA.getLocReg(), VReg); RegInfo.addLiveIn(VA.getLocReg(), VReg);
ArgValues.push_back(DAG.getCopyFromReg(Root, dl, VReg, RegVT)); InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
} }
} else { } else {
// sanity check // sanity check
@ -871,7 +863,7 @@ LowerCCCArguments(SDValue Op, SelectionDAG &DAG)
// Load the argument to a virtual register // Load the argument to a virtual register
unsigned ObjSize = VA.getLocVT().getSizeInBits()/8; unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
if (ObjSize > StackSlotSize) { if (ObjSize > StackSlotSize) {
errs() << "LowerFORMAL_ARGUMENTS Unhandled argument type: " errs() << "LowerFormalArguments Unhandled argument type: "
<< VA.getLocVT().getSimpleVT() << VA.getLocVT().getSimpleVT()
<< "\n"; << "\n";
} }
@ -882,7 +874,7 @@ LowerCCCArguments(SDValue Op, SelectionDAG &DAG)
// Create the SelectionDAG nodes corresponding to a load // Create the SelectionDAG nodes corresponding to a load
//from this parameter //from this parameter
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
ArgValues.push_back(DAG.getLoad(VA.getLocVT(), dl, Root, FIN, NULL, 0)); InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, NULL, 0));
} }
} }
@ -911,14 +903,14 @@ LowerCCCArguments(SDValue Op, SelectionDAG &DAG)
unsigned VReg = RegInfo.createVirtualRegister( unsigned VReg = RegInfo.createVirtualRegister(
XCore::GRRegsRegisterClass); XCore::GRRegsRegisterClass);
RegInfo.addLiveIn(ArgRegs[i], VReg); RegInfo.addLiveIn(ArgRegs[i], VReg);
SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, MVT::i32); SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
// Move argument from virt reg -> stack // Move argument from virt reg -> stack
SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0); SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0);
MemOps.push_back(Store); MemOps.push_back(Store);
} }
if (!MemOps.empty()) if (!MemOps.empty())
Root = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOps[0], MemOps.size()); &MemOps[0], MemOps.size());
} else { } else {
// This will point to the next argument passed via stack. // This will point to the next argument passed via stack.
XFI->setVarArgsFrameIndex( XFI->setVarArgsFrameIndex(
@ -926,34 +918,29 @@ LowerCCCArguments(SDValue Op, SelectionDAG &DAG)
} }
} }
ArgValues.push_back(Root); return Chain;
// Return the new list of results.
std::vector<MVT> RetVT(Op.getNode()->value_begin(),
Op.getNode()->value_end());
return DAG.getNode(ISD::MERGE_VALUES, dl, RetVT,
&ArgValues[0], ArgValues.size());
} }
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// Return Value Calling Convention Implementation // Return Value Calling Convention Implementation
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
SDValue XCoreTargetLowering:: SDValue
LowerRET(SDValue Op, SelectionDAG &DAG) XCoreTargetLowering::LowerReturn(SDValue Chain,
{ unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG) {
// CCValAssign - represent the assignment of // CCValAssign - represent the assignment of
// the return value to a location // the return value to a location
SmallVector<CCValAssign, 16> RVLocs; SmallVector<CCValAssign, 16> RVLocs;
unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
DebugLoc dl = Op.getDebugLoc();
// CCState - Info about the registers and stack slot. // CCState - Info about the registers and stack slot.
CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs, *DAG.getContext()); CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
RVLocs, *DAG.getContext());
// Analize return values of ISD::RET // Analize return values.
CCInfo.AnalyzeReturn(Op.getNode(), RetCC_XCore); CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
// If this is the first return lowered for this function, add // If this is the first return lowered for this function, add
// the regs to the liveout set for the function. // the regs to the liveout set for the function.
@ -963,8 +950,6 @@ LowerRET(SDValue Op, SelectionDAG &DAG)
DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
} }
// The chain is always operand #0
SDValue Chain = Op.getOperand(0);
SDValue Flag; SDValue Flag;
// Copy the result values into the output registers. // Copy the result values into the output registers.
@ -972,10 +957,8 @@ LowerRET(SDValue Op, SelectionDAG &DAG)
CCValAssign &VA = RVLocs[i]; CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!"); assert(VA.isRegLoc() && "Can only return in registers!");
// ISD::RET => ret chain, (regnum1,val1), ...
// So i*2+1 index only the regnums
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
Op.getOperand(i*2+1), Flag); Outs[i].Val, Flag);
// guarantee that all emitted copies are // guarantee that all emitted copies are
// stuck together, avoiding something bad // stuck together, avoiding something bad

View File

@ -92,10 +92,24 @@ namespace llvm {
const XCoreSubtarget &Subtarget; const XCoreSubtarget &Subtarget;
// Lower Operand helpers // Lower Operand helpers
SDValue LowerCCCArguments(SDValue Op, SelectionDAG &DAG); SDValue LowerCCCArguments(SDValue Chain,
SDValue LowerCCCCallTo(SDValue Op, SelectionDAG &DAG, unsigned CC); unsigned CallConv,
SDNode *LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode*TheCall, bool isVarArg,
unsigned CallingConv, SelectionDAG &DAG); const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
SDValue LowerCCCCallTo(SDValue Chain, SDValue Callee,
unsigned CallConv, bool isVarArg,
bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
SDValue getReturnAddressFrameIndex(SelectionDAG &DAG); SDValue getReturnAddressFrameIndex(SelectionDAG &DAG);
SDValue getGlobalAddressWrapper(SDValue GA, GlobalValue *GV, SDValue getGlobalAddressWrapper(SDValue GA, GlobalValue *GV,
SelectionDAG &DAG); SelectionDAG &DAG);
@ -103,9 +117,6 @@ namespace llvm {
// Lower Operand specifics // Lower Operand specifics
SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG); SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG);
SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG); SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG);
SDValue LowerRET(SDValue Op, SelectionDAG &DAG);
SDValue LowerCALL(SDValue Op, SelectionDAG &DAG);
SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG);
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG); SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG);
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG); SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG);
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG); SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG);
@ -124,6 +135,29 @@ namespace llvm {
SDValue ExpandADDSUB(SDNode *Op, SelectionDAG &DAG); SDValue ExpandADDSUB(SDNode *Op, SelectionDAG &DAG);
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
virtual SDValue
LowerFormalArguments(SDValue Chain,
unsigned CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerCall(SDValue Chain, SDValue Callee,
unsigned CallConv, bool isVarArg,
bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerReturn(SDValue Chain,
unsigned CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
DebugLoc dl, SelectionDAG &DAG);
}; };
} }

View File

@ -1,14 +1,17 @@
; RUN: llvm-as < %s | llc -tailcallopt -march=x86-64 | grep TAILCALL ; RUN: llvm-as < %s | llc -tailcallopt -march=x86-64 | FileCheck %s
; Check that lowered arguments on the stack do not overwrite each other. ; Check that lowered arguments on the stack do not overwrite each other.
; Move param %in1 to temp register (%eax). ; Add %in1 %p1 to a different temporary register (%eax).
; RUN: llvm-as < %s | llc -tailcallopt -march=x86-64 -x86-asm-syntax=att | grep {movl 40(%rsp), %eax} ; CHECK: movl %edi, %eax
; Add %in1 %p1 to another temporary register (%r9d). ; CHECK: addl 32(%rsp), %eax
; RUN: llvm-as < %s | llc -tailcallopt -march=x86-64 -x86-asm-syntax=att | grep {movl %edi, %r10d} ; Move param %in1 to temp register (%r10d).
; RUN: llvm-as < %s | llc -tailcallopt -march=x86-64 -x86-asm-syntax=att | grep {addl 32(%rsp), %r10d} ; CHECK: movl 40(%rsp), %r10d
; Move result of addition to stack. ; Move result of addition to stack.
; RUN: llvm-as < %s | llc -tailcallopt -march=x86-64 -x86-asm-syntax=att | grep {movl %r10d, 40(%rsp)} ; CHECK: movl %eax, 40(%rsp)
; Move param %in2 to stack. ; Move param %in2 to stack.
; RUN: llvm-as < %s | llc -tailcallopt -march=x86-64 -x86-asm-syntax=att | grep {movl %eax, 32(%rsp)} ; CHECK: movl %r10d, 32(%rsp)
; Eventually, do a TAILCALL
; CHECK: TAILCALL
declare fastcc i32 @tailcallee(i32 %p1, i32 %p2, i32 %p3, i32 %p4, i32 %p5, i32 %p6, i32 %a, i32 %b) declare fastcc i32 @tailcallee(i32 %p1, i32 %p2, i32 %p3, i32 %p4, i32 %p5, i32 %p6, i32 %a, i32 %b)