2009-04-01 00:51:18 +08:00
|
|
|
//===-- CallingConvLower.cpp - Calling Conventions ------------------------===//
|
2007-02-27 12:43:02 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2007-02-27 12:43:02 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements the CCState class, used for lowering and implementing
|
|
|
|
// calling conventions.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/CodeGen/CallingConvLower.h"
|
2011-06-09 07:55:35 +08:00
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
2014-12-23 07:58:37 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2017-11-17 09:07:10 +08:00
|
|
|
#include "llvm/CodeGen/TargetLowering.h"
|
|
|
|
#include "llvm/CodeGen/TargetRegisterInfo.h"
|
|
|
|
#include "llvm/CodeGen/TargetSubtargetInfo.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
2010-01-05 09:24:50 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2009-07-11 21:10:19 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2014-12-23 07:58:37 +08:00
|
|
|
#include "llvm/Support/SaveAndRestore.h"
|
2009-07-11 21:10:19 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2016-12-21 16:31:45 +08:00
|
|
|
#include <algorithm>
|
|
|
|
|
2007-02-27 12:43:02 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2011-06-09 07:55:35 +08:00
|
|
|
CCState::CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &mf,
|
2014-08-07 02:45:26 +08:00
|
|
|
SmallVectorImpl<CCValAssign> &locs, LLVMContext &C)
|
|
|
|
: CallingConv(CC), IsVarArg(isVarArg), MF(mf),
|
2017-02-03 05:58:22 +08:00
|
|
|
TRI(*MF.getSubtarget().getRegisterInfo()), Locs(locs), Context(C) {
|
2007-02-27 12:43:02 +08:00
|
|
|
// No stack is used.
|
|
|
|
StackOffset = 0;
|
Arguments spilled on the stack before a function call may have
alignment requirements, for example in the case of vectors.
These requirements are exploited by the code generator by using
move instructions that have similar alignment requirements, e.g.,
movaps on x86.
Although the code generator properly aligns the arguments with
respect to the displacement of the stack pointer it computes,
the displacement itself may cause misalignment. For example if
we have
%3 = load <16 x float>, <16 x float>* %1, align 64
call void @bar(<16 x float> %3, i32 0)
the x86 back-end emits:
movaps 32(%ecx), %xmm2
movaps (%ecx), %xmm0
movaps 16(%ecx), %xmm1
movaps 48(%ecx), %xmm3
subl $20, %esp <-- if %esp was 16-byte aligned before this instruction, it no longer will be afterwards
movaps %xmm3, (%esp) <-- movaps requires 16-byte alignment, while %esp is not aligned as such.
movl $0, 16(%esp)
calll __bar
To solve this, we need to make sure that the computed value with which
the stack pointer is changed is a multiple af the maximal alignment seen
during its computation. With this change we get proper alignment:
subl $32, %esp
movaps %xmm3, (%esp)
Differential Revision: http://reviews.llvm.org/D12337
llvm-svn: 248786
2015-09-29 18:12:57 +08:00
|
|
|
MaxStackArgAlign = 1;
|
2011-06-09 07:55:35 +08:00
|
|
|
|
2013-05-05 15:48:36 +08:00
|
|
|
clearByValRegsInfo();
|
2008-07-01 04:25:31 +08:00
|
|
|
UsedRegs.resize((TRI.getNumRegs()+31)/32);
|
2007-02-27 12:43:02 +08:00
|
|
|
}
|
|
|
|
|
2015-06-11 22:26:49 +08:00
|
|
|
/// Allocate space on the stack large enough to pass an argument by value.
|
|
|
|
/// The size and alignment information of the argument is encoded in
|
|
|
|
/// its parameter attribute.
|
2010-11-04 18:49:57 +08:00
|
|
|
void CCState::HandleByVal(unsigned ValNo, MVT ValVT,
|
2010-11-03 19:35:31 +08:00
|
|
|
MVT LocVT, CCValAssign::LocInfo LocInfo,
|
2008-01-15 15:49:36 +08:00
|
|
|
int MinSize, int MinAlign,
|
2008-03-21 17:14:45 +08:00
|
|
|
ISD::ArgFlagsTy ArgFlags) {
|
|
|
|
unsigned Align = ArgFlags.getByValAlign();
|
|
|
|
unsigned Size = ArgFlags.getByValSize();
|
2008-01-15 15:49:36 +08:00
|
|
|
if (MinSize > (int)Size)
|
|
|
|
Size = MinSize;
|
|
|
|
if (MinAlign > (int)Align)
|
|
|
|
Align = MinAlign;
|
2016-01-22 06:23:22 +08:00
|
|
|
ensureMaxAlignment(Align);
|
2014-08-07 02:45:26 +08:00
|
|
|
MF.getSubtarget().getTargetLowering()->HandleByVal(this, Size, Align);
|
2016-01-15 05:06:47 +08:00
|
|
|
Size = unsigned(alignTo(Size, MinAlign));
|
2011-05-26 12:09:49 +08:00
|
|
|
unsigned Offset = AllocateStack(Size, Align);
|
|
|
|
addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
|
2007-08-10 22:44:42 +08:00
|
|
|
}
|
2007-02-27 12:43:02 +08:00
|
|
|
|
2015-06-11 22:26:49 +08:00
|
|
|
/// Mark a register and all of its aliases as allocated.
|
2007-02-27 12:43:02 +08:00
|
|
|
void CCState::MarkAllocated(unsigned Reg) {
|
2012-06-02 07:28:30 +08:00
|
|
|
for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
|
|
|
|
UsedRegs[*AI/32] |= 1 << (*AI&31);
|
2007-02-27 12:43:02 +08:00
|
|
|
}
|
2007-02-28 14:56:37 +08:00
|
|
|
|
2016-12-21 16:31:45 +08:00
|
|
|
bool CCState::IsShadowAllocatedReg(unsigned Reg) const {
|
|
|
|
if (!isAllocated(Reg))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (auto const &ValAssign : Locs) {
|
|
|
|
if (ValAssign.isRegLoc()) {
|
|
|
|
for (MCRegAliasIterator AI(ValAssign.getLocReg(), &TRI, true);
|
|
|
|
AI.isValid(); ++AI) {
|
|
|
|
if (*AI == Reg)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-06-11 22:26:49 +08:00
|
|
|
/// Analyze an array of argument values,
|
2007-02-28 15:09:40 +08:00
|
|
|
/// incorporating info about the formals into this state.
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
void
|
|
|
|
CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
|
|
|
|
CCAssignFn Fn) {
|
|
|
|
unsigned NumArgs = Ins.size();
|
|
|
|
|
2007-02-28 15:09:40 +08:00
|
|
|
for (unsigned i = 0; i != NumArgs; ++i) {
|
2010-11-03 19:35:31 +08:00
|
|
|
MVT ArgVT = Ins[i].VT;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
|
2007-02-28 15:09:40 +08:00
|
|
|
if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
|
2009-07-15 00:55:14 +08:00
|
|
|
#ifndef NDEBUG
|
2010-01-05 09:24:50 +08:00
|
|
|
dbgs() << "Formal argument #" << i << " has unhandled type "
|
2012-11-14 13:20:09 +08:00
|
|
|
<< EVT(ArgVT).getEVTString() << '\n';
|
2009-07-15 00:55:14 +08:00
|
|
|
#endif
|
2014-04-14 08:51:57 +08:00
|
|
|
llvm_unreachable(nullptr);
|
2007-02-28 15:09:40 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-11 22:26:49 +08:00
|
|
|
/// Analyze the return values of a function, returning true if the return can
|
|
|
|
/// be performed without sret-demotion and false otherwise.
|
2010-07-10 17:00:22 +08:00
|
|
|
bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
|
2009-11-07 10:11:54 +08:00
|
|
|
CCAssignFn Fn) {
|
|
|
|
// Determine which register each value should be copied into.
|
2010-07-10 17:00:22 +08:00
|
|
|
for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
|
2010-11-03 19:35:31 +08:00
|
|
|
MVT VT = Outs[i].VT;
|
2010-07-10 17:00:22 +08:00
|
|
|
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
|
2009-11-07 10:11:54 +08:00
|
|
|
if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-06-11 22:26:49 +08:00
|
|
|
/// Analyze the returned values of a return,
|
2007-02-28 15:09:40 +08:00
|
|
|
/// incorporating info about the result values into this state.
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
|
|
|
|
CCAssignFn Fn) {
|
2007-02-28 15:09:40 +08:00
|
|
|
// Determine which register each value should be copied into.
|
2010-07-06 23:39:54 +08:00
|
|
|
for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
|
2010-11-03 19:35:31 +08:00
|
|
|
MVT VT = Outs[i].VT;
|
2010-07-06 23:39:54 +08:00
|
|
|
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
|
|
|
|
if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) {
|
|
|
|
#ifndef NDEBUG
|
|
|
|
dbgs() << "Return operand #" << i << " has unhandled type "
|
2012-11-14 13:20:09 +08:00
|
|
|
<< EVT(VT).getEVTString() << '\n';
|
2010-07-06 23:39:54 +08:00
|
|
|
#endif
|
2014-04-14 08:51:57 +08:00
|
|
|
llvm_unreachable(nullptr);
|
2010-07-06 23:39:54 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-11 22:26:49 +08:00
|
|
|
/// Analyze the outgoing arguments to a call,
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
/// incorporating info about the passed values into this state.
|
|
|
|
void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
|
|
|
|
CCAssignFn Fn) {
|
|
|
|
unsigned NumOps = Outs.size();
|
2010-07-06 23:39:54 +08:00
|
|
|
for (unsigned i = 0; i != NumOps; ++i) {
|
2010-11-03 19:35:31 +08:00
|
|
|
MVT ArgVT = Outs[i].VT;
|
2010-07-06 23:39:54 +08:00
|
|
|
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
|
|
|
|
if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
|
|
|
|
#ifndef NDEBUG
|
|
|
|
dbgs() << "Call operand #" << i << " has unhandled type "
|
2012-11-14 13:20:09 +08:00
|
|
|
<< EVT(ArgVT).getEVTString() << '\n';
|
2010-07-06 23:39:54 +08:00
|
|
|
#endif
|
2014-04-14 08:51:57 +08:00
|
|
|
llvm_unreachable(nullptr);
|
2010-07-06 23:39:54 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-11 22:26:49 +08:00
|
|
|
/// Same as above except it takes vectors of types and argument flags.
|
2010-11-03 19:35:31 +08:00
|
|
|
void CCState::AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
|
2008-09-06 00:59:26 +08:00
|
|
|
SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
|
|
|
|
CCAssignFn Fn) {
|
|
|
|
unsigned NumOps = ArgVTs.size();
|
|
|
|
for (unsigned i = 0; i != NumOps; ++i) {
|
2010-11-03 19:35:31 +08:00
|
|
|
MVT ArgVT = ArgVTs[i];
|
2008-09-06 00:59:26 +08:00
|
|
|
ISD::ArgFlagsTy ArgFlags = Flags[i];
|
|
|
|
if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
|
2009-07-15 00:55:14 +08:00
|
|
|
#ifndef NDEBUG
|
2010-01-05 09:24:50 +08:00
|
|
|
dbgs() << "Call operand #" << i << " has unhandled type "
|
2012-11-14 13:20:09 +08:00
|
|
|
<< EVT(ArgVT).getEVTString() << '\n';
|
2009-07-15 00:55:14 +08:00
|
|
|
#endif
|
2014-04-14 08:51:57 +08:00
|
|
|
llvm_unreachable(nullptr);
|
2008-09-06 00:59:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-11 22:26:49 +08:00
|
|
|
/// Analyze the return values of a call, incorporating info about the passed
|
|
|
|
/// values into this state.
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
|
|
|
|
CCAssignFn Fn) {
|
|
|
|
for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
|
2010-11-03 19:35:31 +08:00
|
|
|
MVT VT = Ins[i].VT;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 09:29:28 +08:00
|
|
|
ISD::ArgFlagsTy Flags = Ins[i].Flags;
|
2008-09-27 03:31:26 +08:00
|
|
|
if (Fn(i, VT, VT, CCValAssign::Full, Flags, *this)) {
|
2009-07-15 00:55:14 +08:00
|
|
|
#ifndef NDEBUG
|
2010-01-05 09:24:50 +08:00
|
|
|
dbgs() << "Call result #" << i << " has unhandled type "
|
2012-11-14 13:20:09 +08:00
|
|
|
<< EVT(VT).getEVTString() << '\n';
|
2009-07-15 00:55:14 +08:00
|
|
|
#endif
|
2014-04-14 08:51:57 +08:00
|
|
|
llvm_unreachable(nullptr);
|
2007-02-28 14:56:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2008-09-07 17:02:18 +08:00
|
|
|
|
2015-06-11 22:26:49 +08:00
|
|
|
/// Same as above except it's specialized for calls that produce a single value.
|
2010-11-03 19:35:31 +08:00
|
|
|
void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) {
|
2008-09-07 17:02:18 +08:00
|
|
|
if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this)) {
|
2009-07-15 00:55:14 +08:00
|
|
|
#ifndef NDEBUG
|
2010-01-05 09:24:50 +08:00
|
|
|
dbgs() << "Call result has unhandled type "
|
2012-11-14 13:20:09 +08:00
|
|
|
<< EVT(VT).getEVTString() << '\n';
|
2009-07-15 00:55:14 +08:00
|
|
|
#endif
|
2014-04-14 08:51:57 +08:00
|
|
|
llvm_unreachable(nullptr);
|
2008-09-07 17:02:18 +08:00
|
|
|
}
|
|
|
|
}
|
2014-12-23 07:58:37 +08:00
|
|
|
|
2015-01-13 07:28:23 +08:00
|
|
|
static bool isValueTypeInRegForCC(CallingConv::ID CC, MVT VT) {
|
|
|
|
if (VT.isVector())
|
|
|
|
return true; // Assume -msse-regparm might be in effect.
|
|
|
|
if (!VT.isInteger())
|
|
|
|
return false;
|
|
|
|
if (CC == CallingConv::X86_VectorCall || CC == CallingConv::X86_FastCall)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-12-23 07:58:37 +08:00
|
|
|
void CCState::getRemainingRegParmsForType(SmallVectorImpl<MCPhysReg> &Regs,
|
|
|
|
MVT VT, CCAssignFn Fn) {
|
|
|
|
unsigned SavedStackOffset = StackOffset;
|
Arguments spilled on the stack before a function call may have
alignment requirements, for example in the case of vectors.
These requirements are exploited by the code generator by using
move instructions that have similar alignment requirements, e.g.,
movaps on x86.
Although the code generator properly aligns the arguments with
respect to the displacement of the stack pointer it computes,
the displacement itself may cause misalignment. For example if
we have
%3 = load <16 x float>, <16 x float>* %1, align 64
call void @bar(<16 x float> %3, i32 0)
the x86 back-end emits:
movaps 32(%ecx), %xmm2
movaps (%ecx), %xmm0
movaps 16(%ecx), %xmm1
movaps 48(%ecx), %xmm3
subl $20, %esp <-- if %esp was 16-byte aligned before this instruction, it no longer will be afterwards
movaps %xmm3, (%esp) <-- movaps requires 16-byte alignment, while %esp is not aligned as such.
movl $0, 16(%esp)
calll __bar
To solve this, we need to make sure that the computed value with which
the stack pointer is changed is a multiple af the maximal alignment seen
during its computation. With this change we get proper alignment:
subl $32, %esp
movaps %xmm3, (%esp)
Differential Revision: http://reviews.llvm.org/D12337
llvm-svn: 248786
2015-09-29 18:12:57 +08:00
|
|
|
unsigned SavedMaxStackArgAlign = MaxStackArgAlign;
|
2014-12-23 07:58:37 +08:00
|
|
|
unsigned NumLocs = Locs.size();
|
|
|
|
|
2015-01-13 07:28:23 +08:00
|
|
|
// Set the 'inreg' flag if it is used for this calling convention.
|
2014-12-23 07:58:37 +08:00
|
|
|
ISD::ArgFlagsTy Flags;
|
2015-01-13 07:28:23 +08:00
|
|
|
if (isValueTypeInRegForCC(CallingConv, VT))
|
|
|
|
Flags.setInReg();
|
|
|
|
|
|
|
|
// Allocate something of this value type repeatedly until we get assigned a
|
|
|
|
// location in memory.
|
2014-12-23 07:58:37 +08:00
|
|
|
bool HaveRegParm = true;
|
|
|
|
while (HaveRegParm) {
|
|
|
|
if (Fn(0, VT, VT, CCValAssign::Full, Flags, *this)) {
|
|
|
|
#ifndef NDEBUG
|
|
|
|
dbgs() << "Call has unhandled type " << EVT(VT).getEVTString()
|
|
|
|
<< " while computing remaining regparms\n";
|
|
|
|
#endif
|
|
|
|
llvm_unreachable(nullptr);
|
|
|
|
}
|
|
|
|
HaveRegParm = Locs.back().isRegLoc();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy all the registers from the value locations we added.
|
|
|
|
assert(NumLocs < Locs.size() && "CC assignment failed to add location");
|
|
|
|
for (unsigned I = NumLocs, E = Locs.size(); I != E; ++I)
|
|
|
|
if (Locs[I].isRegLoc())
|
|
|
|
Regs.push_back(MCPhysReg(Locs[I].getLocReg()));
|
|
|
|
|
|
|
|
// Clear the assigned values and stack memory. We leave the registers marked
|
|
|
|
// as allocated so that future queries don't return the same registers, i.e.
|
|
|
|
// when i64 and f64 are both passed in GPRs.
|
|
|
|
StackOffset = SavedStackOffset;
|
Arguments spilled on the stack before a function call may have
alignment requirements, for example in the case of vectors.
These requirements are exploited by the code generator by using
move instructions that have similar alignment requirements, e.g.,
movaps on x86.
Although the code generator properly aligns the arguments with
respect to the displacement of the stack pointer it computes,
the displacement itself may cause misalignment. For example if
we have
%3 = load <16 x float>, <16 x float>* %1, align 64
call void @bar(<16 x float> %3, i32 0)
the x86 back-end emits:
movaps 32(%ecx), %xmm2
movaps (%ecx), %xmm0
movaps 16(%ecx), %xmm1
movaps 48(%ecx), %xmm3
subl $20, %esp <-- if %esp was 16-byte aligned before this instruction, it no longer will be afterwards
movaps %xmm3, (%esp) <-- movaps requires 16-byte alignment, while %esp is not aligned as such.
movl $0, 16(%esp)
calll __bar
To solve this, we need to make sure that the computed value with which
the stack pointer is changed is a multiple af the maximal alignment seen
during its computation. With this change we get proper alignment:
subl $32, %esp
movaps %xmm3, (%esp)
Differential Revision: http://reviews.llvm.org/D12337
llvm-svn: 248786
2015-09-29 18:12:57 +08:00
|
|
|
MaxStackArgAlign = SavedMaxStackArgAlign;
|
2014-12-23 07:58:37 +08:00
|
|
|
Locs.resize(NumLocs);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CCState::analyzeMustTailForwardedRegisters(
|
|
|
|
SmallVectorImpl<ForwardedRegister> &Forwards, ArrayRef<MVT> RegParmTypes,
|
|
|
|
CCAssignFn Fn) {
|
|
|
|
// Oftentimes calling conventions will not user register parameters for
|
|
|
|
// variadic functions, so we need to assume we're not variadic so that we get
|
|
|
|
// all the registers that might be used in a non-variadic call.
|
|
|
|
SaveAndRestore<bool> SavedVarArg(IsVarArg, false);
|
2016-01-22 06:23:22 +08:00
|
|
|
SaveAndRestore<bool> SavedMustTail(AnalyzingMustTailForwardedRegs, true);
|
2014-12-23 07:58:37 +08:00
|
|
|
|
|
|
|
for (MVT RegVT : RegParmTypes) {
|
|
|
|
SmallVector<MCPhysReg, 8> RemainingRegs;
|
|
|
|
getRemainingRegParmsForType(RemainingRegs, RegVT, Fn);
|
|
|
|
const TargetLowering *TL = MF.getSubtarget().getTargetLowering();
|
|
|
|
const TargetRegisterClass *RC = TL->getRegClassFor(RegVT);
|
|
|
|
for (MCPhysReg PReg : RemainingRegs) {
|
|
|
|
unsigned VReg = MF.addLiveIn(PReg, RC);
|
|
|
|
Forwards.push_back(ForwardedRegister(VReg, PReg, RegVT));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-03-31 06:46:04 +08:00
|
|
|
|
|
|
|
bool CCState::resultsCompatible(CallingConv::ID CalleeCC,
|
|
|
|
CallingConv::ID CallerCC, MachineFunction &MF,
|
|
|
|
LLVMContext &C,
|
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins,
|
|
|
|
CCAssignFn CalleeFn, CCAssignFn CallerFn) {
|
|
|
|
if (CalleeCC == CallerCC)
|
|
|
|
return true;
|
|
|
|
SmallVector<CCValAssign, 4> RVLocs1;
|
|
|
|
CCState CCInfo1(CalleeCC, false, MF, RVLocs1, C);
|
|
|
|
CCInfo1.AnalyzeCallResult(Ins, CalleeFn);
|
|
|
|
|
|
|
|
SmallVector<CCValAssign, 4> RVLocs2;
|
|
|
|
CCState CCInfo2(CallerCC, false, MF, RVLocs2, C);
|
|
|
|
CCInfo2.AnalyzeCallResult(Ins, CallerFn);
|
|
|
|
|
|
|
|
if (RVLocs1.size() != RVLocs2.size())
|
|
|
|
return false;
|
|
|
|
for (unsigned I = 0, E = RVLocs1.size(); I != E; ++I) {
|
|
|
|
const CCValAssign &Loc1 = RVLocs1[I];
|
|
|
|
const CCValAssign &Loc2 = RVLocs2[I];
|
|
|
|
if (Loc1.getLocInfo() != Loc2.getLocInfo())
|
|
|
|
return false;
|
|
|
|
bool RegLoc1 = Loc1.isRegLoc();
|
|
|
|
if (RegLoc1 != Loc2.isRegLoc())
|
|
|
|
return false;
|
|
|
|
if (RegLoc1) {
|
|
|
|
if (Loc1.getLocReg() != Loc2.getLocReg())
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset())
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|