2009-04-01 00:51:18 +08:00
|
|
|
//===-- CallingConvLower.cpp - Calling Conventions ------------------------===//
|
2007-02-27 12:43:02 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2007-02-27 12:43:02 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements the CCState class, used for lowering and implementing
|
|
|
|
// calling conventions.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/CodeGen/CallingConvLower.h"
|
2011-06-09 07:55:35 +08:00
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
2014-12-23 07:58:37 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
2010-01-05 09:24:50 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2009-07-11 21:10:19 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2014-12-23 07:58:37 +08:00
|
|
|
#include "llvm/Support/SaveAndRestore.h"
|
2009-07-11 21:10:19 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2011-03-01 01:17:53 +08:00
|
|
|
#include "llvm/Target/TargetLowering.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Target/TargetRegisterInfo.h"
|
2014-08-05 05:25:23 +08:00
|
|
|
#include "llvm/Target/TargetSubtargetInfo.h"
|
2007-02-27 12:43:02 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2011-06-09 07:55:35 +08:00
|
|
|
CCState::CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &mf,
|
2014-08-07 02:45:26 +08:00
|
|
|
SmallVectorImpl<CCValAssign> &locs, LLVMContext &C)
|
|
|
|
: CallingConv(CC), IsVarArg(isVarArg), MF(mf),
|
|
|
|
TRI(*MF.getSubtarget().getRegisterInfo()), Locs(locs), Context(C),
|
2014-08-05 05:25:23 +08:00
|
|
|
CallOrPrologue(Unknown) {
|
2007-02-27 12:43:02 +08:00
|
|
|
// No stack is used.
|
|
|
|
StackOffset = 0;
|
2011-06-09 07:55:35 +08:00
|
|
|
|
2013-05-05 15:48:36 +08:00
|
|
|
clearByValRegsInfo();
|
2008-07-01 04:25:31 +08:00
|
|
|
UsedRegs.resize((TRI.getNumRegs()+31)/32);
|
2007-02-27 12:43:02 +08:00
|
|
|
}
|
|
|
|
|
2015-06-11 22:26:49 +08:00
|
|
|
/// Allocate space on the stack large enough to pass an argument by value.
|
|
|
|
/// The size and alignment information of the argument is encoded in
|
|
|
|
/// its parameter attribute.
|
2010-11-04 18:49:57 +08:00
|
|
|
void CCState::HandleByVal(unsigned ValNo, MVT ValVT,
|
2010-11-03 19:35:31 +08:00
|
|
|
MVT LocVT, CCValAssign::LocInfo LocInfo,
|
2008-01-15 15:49:36 +08:00
|
|
|
int MinSize, int MinAlign,
|
2008-03-21 17:14:45 +08:00
|
|
|
ISD::ArgFlagsTy ArgFlags) {
|
|
|
|
unsigned Align = ArgFlags.getByValAlign();
|
|
|
|
unsigned Size = ArgFlags.getByValSize();
|
2008-01-15 15:49:36 +08:00
|
|
|
if (MinSize > (int)Size)
|
|
|
|
Size = MinSize;
|
|
|
|
if (MinAlign > (int)Align)
|
|
|
|
Align = MinAlign;
|
2012-06-20 06:59:12 +08:00
|
|
|
MF.getFrameInfo()->ensureMaxAlignment(Align);
|
2014-08-07 02:45:26 +08:00
|
|
|
MF.getSubtarget().getTargetLowering()->HandleByVal(this, Size, Align);
|
2014-08-07 01:57:23 +08:00
|
|
|
Size = unsigned(RoundUpToAlignment(Size, MinAlign));
|
2011-05-26 12:09:49 +08:00
|
|
|
unsigned Offset = AllocateStack(Size, Align);
|
|
|
|
addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
|
2007-08-10 22:44:42 +08:00
|
|
|
}
|
2007-02-27 12:43:02 +08:00
|
|
|
|
2015-06-11 22:26:49 +08:00
|
|
|
/// Mark a register and all of its aliases as allocated.
|
2007-02-27 12:43:02 +08:00
|
|
|
void CCState::MarkAllocated(unsigned Reg) {
|
2012-06-02 07:28:30 +08:00
|
|
|
for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
|
|
|
|
UsedRegs[*AI/32] |= 1 << (*AI&31);
|
2007-02-27 12:43:02 +08:00
|
|
|
}
|
2007-02-28 14:56:37 +08:00
|
|
|
|
2015-06-11 22:26:49 +08:00
|
|
|
/// Analyze an array of argument values,
|
2007-02-28 15:09:40 +08:00
|
|
|
/// incorporating info about the formals into this state.
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
void
|
|
|
|
CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
|
|
|
|
CCAssignFn Fn) {
|
|
|
|
unsigned NumArgs = Ins.size();
|
|
|
|
|
2007-02-28 15:09:40 +08:00
|
|
|
for (unsigned i = 0; i != NumArgs; ++i) {
|
2010-11-03 19:35:31 +08:00
|
|
|
MVT ArgVT = Ins[i].VT;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
|
2007-02-28 15:09:40 +08:00
|
|
|
if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
|
2009-07-15 00:55:14 +08:00
|
|
|
#ifndef NDEBUG
|
2010-01-05 09:24:50 +08:00
|
|
|
dbgs() << "Formal argument #" << i << " has unhandled type "
|
2012-11-14 13:20:09 +08:00
|
|
|
<< EVT(ArgVT).getEVTString() << '\n';
|
2009-07-15 00:55:14 +08:00
|
|
|
#endif
|
2014-04-14 08:51:57 +08:00
|
|
|
llvm_unreachable(nullptr);
|
2007-02-28 15:09:40 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-11 22:26:49 +08:00
|
|
|
/// Analyze the return values of a function, returning true if the return can
|
|
|
|
/// be performed without sret-demotion and false otherwise.
|
2010-07-10 17:00:22 +08:00
|
|
|
bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
|
2009-11-07 10:11:54 +08:00
|
|
|
CCAssignFn Fn) {
|
|
|
|
// Determine which register each value should be copied into.
|
2010-07-10 17:00:22 +08:00
|
|
|
for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
|
2010-11-03 19:35:31 +08:00
|
|
|
MVT VT = Outs[i].VT;
|
2010-07-10 17:00:22 +08:00
|
|
|
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
|
2009-11-07 10:11:54 +08:00
|
|
|
if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-06-11 22:26:49 +08:00
|
|
|
/// Analyze the returned values of a return,
|
2007-02-28 15:09:40 +08:00
|
|
|
/// incorporating info about the result values into this state.
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
|
|
|
|
CCAssignFn Fn) {
|
2007-02-28 15:09:40 +08:00
|
|
|
// Determine which register each value should be copied into.
|
2010-07-06 23:39:54 +08:00
|
|
|
for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
|
2010-11-03 19:35:31 +08:00
|
|
|
MVT VT = Outs[i].VT;
|
2010-07-06 23:39:54 +08:00
|
|
|
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
|
|
|
|
if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) {
|
|
|
|
#ifndef NDEBUG
|
|
|
|
dbgs() << "Return operand #" << i << " has unhandled type "
|
2012-11-14 13:20:09 +08:00
|
|
|
<< EVT(VT).getEVTString() << '\n';
|
2010-07-06 23:39:54 +08:00
|
|
|
#endif
|
2014-04-14 08:51:57 +08:00
|
|
|
llvm_unreachable(nullptr);
|
2010-07-06 23:39:54 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-11 22:26:49 +08:00
|
|
|
/// Analyze the outgoing arguments to a call,
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
/// incorporating info about the passed values into this state.
|
|
|
|
void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
|
|
|
|
CCAssignFn Fn) {
|
|
|
|
unsigned NumOps = Outs.size();
|
2010-07-06 23:39:54 +08:00
|
|
|
for (unsigned i = 0; i != NumOps; ++i) {
|
2010-11-03 19:35:31 +08:00
|
|
|
MVT ArgVT = Outs[i].VT;
|
2010-07-06 23:39:54 +08:00
|
|
|
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
|
|
|
|
if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
|
|
|
|
#ifndef NDEBUG
|
|
|
|
dbgs() << "Call operand #" << i << " has unhandled type "
|
2012-11-14 13:20:09 +08:00
|
|
|
<< EVT(ArgVT).getEVTString() << '\n';
|
2010-07-06 23:39:54 +08:00
|
|
|
#endif
|
2014-04-14 08:51:57 +08:00
|
|
|
llvm_unreachable(nullptr);
|
2010-07-06 23:39:54 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-11 22:26:49 +08:00
|
|
|
/// Same as above except it takes vectors of types and argument flags.
|
2010-11-03 19:35:31 +08:00
|
|
|
void CCState::AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
|
2008-09-06 00:59:26 +08:00
|
|
|
SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
|
|
|
|
CCAssignFn Fn) {
|
|
|
|
unsigned NumOps = ArgVTs.size();
|
|
|
|
for (unsigned i = 0; i != NumOps; ++i) {
|
2010-11-03 19:35:31 +08:00
|
|
|
MVT ArgVT = ArgVTs[i];
|
2008-09-06 00:59:26 +08:00
|
|
|
ISD::ArgFlagsTy ArgFlags = Flags[i];
|
|
|
|
if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
|
2009-07-15 00:55:14 +08:00
|
|
|
#ifndef NDEBUG
|
2010-01-05 09:24:50 +08:00
|
|
|
dbgs() << "Call operand #" << i << " has unhandled type "
|
2012-11-14 13:20:09 +08:00
|
|
|
<< EVT(ArgVT).getEVTString() << '\n';
|
2009-07-15 00:55:14 +08:00
|
|
|
#endif
|
2014-04-14 08:51:57 +08:00
|
|
|
llvm_unreachable(nullptr);
|
2008-09-06 00:59:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-11 22:26:49 +08:00
|
|
|
/// Analyze the return values of a call, incorporating info about the passed
|
|
|
|
/// values into this state.
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
|
|
|
|
CCAssignFn Fn) {
|
|
|
|
for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
|
2010-11-03 19:35:31 +08:00
|
|
|
MVT VT = Ins[i].VT;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
ISD::ArgFlagsTy Flags = Ins[i].Flags;
|
2008-09-27 03:31:26 +08:00
|
|
|
if (Fn(i, VT, VT, CCValAssign::Full, Flags, *this)) {
|
2009-07-15 00:55:14 +08:00
|
|
|
#ifndef NDEBUG
|
2010-01-05 09:24:50 +08:00
|
|
|
dbgs() << "Call result #" << i << " has unhandled type "
|
2012-11-14 13:20:09 +08:00
|
|
|
<< EVT(VT).getEVTString() << '\n';
|
2009-07-15 00:55:14 +08:00
|
|
|
#endif
|
2014-04-14 08:51:57 +08:00
|
|
|
llvm_unreachable(nullptr);
|
2007-02-28 14:56:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2008-09-07 17:02:18 +08:00
|
|
|
|
2015-06-11 22:26:49 +08:00
|
|
|
/// Same as above except it's specialized for calls that produce a single value.
|
2010-11-03 19:35:31 +08:00
|
|
|
void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) {
|
2008-09-07 17:02:18 +08:00
|
|
|
if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this)) {
|
2009-07-15 00:55:14 +08:00
|
|
|
#ifndef NDEBUG
|
2010-01-05 09:24:50 +08:00
|
|
|
dbgs() << "Call result has unhandled type "
|
2012-11-14 13:20:09 +08:00
|
|
|
<< EVT(VT).getEVTString() << '\n';
|
2009-07-15 00:55:14 +08:00
|
|
|
#endif
|
2014-04-14 08:51:57 +08:00
|
|
|
llvm_unreachable(nullptr);
|
2008-09-07 17:02:18 +08:00
|
|
|
}
|
|
|
|
}
|
2014-12-23 07:58:37 +08:00
|
|
|
|
2015-01-13 07:28:23 +08:00
|
|
|
static bool isValueTypeInRegForCC(CallingConv::ID CC, MVT VT) {
|
|
|
|
if (VT.isVector())
|
|
|
|
return true; // Assume -msse-regparm might be in effect.
|
|
|
|
if (!VT.isInteger())
|
|
|
|
return false;
|
|
|
|
if (CC == CallingConv::X86_VectorCall || CC == CallingConv::X86_FastCall)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-12-23 07:58:37 +08:00
|
|
|
void CCState::getRemainingRegParmsForType(SmallVectorImpl<MCPhysReg> &Regs,
|
|
|
|
MVT VT, CCAssignFn Fn) {
|
|
|
|
unsigned SavedStackOffset = StackOffset;
|
|
|
|
unsigned NumLocs = Locs.size();
|
|
|
|
|
2015-01-13 07:28:23 +08:00
|
|
|
// Set the 'inreg' flag if it is used for this calling convention.
|
2014-12-23 07:58:37 +08:00
|
|
|
ISD::ArgFlagsTy Flags;
|
2015-01-13 07:28:23 +08:00
|
|
|
if (isValueTypeInRegForCC(CallingConv, VT))
|
|
|
|
Flags.setInReg();
|
|
|
|
|
|
|
|
// Allocate something of this value type repeatedly until we get assigned a
|
|
|
|
// location in memory.
|
2014-12-23 07:58:37 +08:00
|
|
|
bool HaveRegParm = true;
|
|
|
|
while (HaveRegParm) {
|
|
|
|
if (Fn(0, VT, VT, CCValAssign::Full, Flags, *this)) {
|
|
|
|
#ifndef NDEBUG
|
|
|
|
dbgs() << "Call has unhandled type " << EVT(VT).getEVTString()
|
|
|
|
<< " while computing remaining regparms\n";
|
|
|
|
#endif
|
|
|
|
llvm_unreachable(nullptr);
|
|
|
|
}
|
|
|
|
HaveRegParm = Locs.back().isRegLoc();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy all the registers from the value locations we added.
|
|
|
|
assert(NumLocs < Locs.size() && "CC assignment failed to add location");
|
|
|
|
for (unsigned I = NumLocs, E = Locs.size(); I != E; ++I)
|
|
|
|
if (Locs[I].isRegLoc())
|
|
|
|
Regs.push_back(MCPhysReg(Locs[I].getLocReg()));
|
|
|
|
|
|
|
|
// Clear the assigned values and stack memory. We leave the registers marked
|
|
|
|
// as allocated so that future queries don't return the same registers, i.e.
|
|
|
|
// when i64 and f64 are both passed in GPRs.
|
|
|
|
StackOffset = SavedStackOffset;
|
|
|
|
Locs.resize(NumLocs);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CCState::analyzeMustTailForwardedRegisters(
|
|
|
|
SmallVectorImpl<ForwardedRegister> &Forwards, ArrayRef<MVT> RegParmTypes,
|
|
|
|
CCAssignFn Fn) {
|
|
|
|
// Oftentimes calling conventions will not user register parameters for
|
|
|
|
// variadic functions, so we need to assume we're not variadic so that we get
|
|
|
|
// all the registers that might be used in a non-variadic call.
|
|
|
|
SaveAndRestore<bool> SavedVarArg(IsVarArg, false);
|
|
|
|
|
|
|
|
for (MVT RegVT : RegParmTypes) {
|
|
|
|
SmallVector<MCPhysReg, 8> RemainingRegs;
|
|
|
|
getRemainingRegParmsForType(RemainingRegs, RegVT, Fn);
|
|
|
|
const TargetLowering *TL = MF.getSubtarget().getTargetLowering();
|
|
|
|
const TargetRegisterClass *RC = TL->getRegClassFor(RegVT);
|
|
|
|
for (MCPhysReg PReg : RemainingRegs) {
|
|
|
|
unsigned VReg = MF.addLiveIn(PReg, RC);
|
|
|
|
Forwards.push_back(ForwardedRegister(VReg, PReg, RegVT));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|