2017-10-05 08:33:50 +08:00
|
|
|
//===- llvm/lib/Target/X86/X86CallLowering.cpp - Call lowering ------------===//
|
2016-11-15 14:34:33 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2017-10-05 08:33:50 +08:00
|
|
|
//
|
2016-11-15 14:34:33 +08:00
|
|
|
/// \file
|
|
|
|
/// This file implements the lowering of LLVM calls to machine code calls for
|
|
|
|
/// GlobalISel.
|
2017-10-05 08:33:50 +08:00
|
|
|
//
|
2016-11-15 14:34:33 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "X86CallLowering.h"
|
2017-03-23 20:13:29 +08:00
|
|
|
#include "X86CallingConv.h"
|
2016-11-15 14:34:33 +08:00
|
|
|
#include "X86ISelLowering.h"
|
|
|
|
#include "X86InstrInfo.h"
|
2017-10-05 08:33:50 +08:00
|
|
|
#include "X86RegisterInfo.h"
|
|
|
|
#include "X86Subtarget.h"
|
|
|
|
#include "llvm/ADT/ArrayRef.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2017-07-05 14:24:13 +08:00
|
|
|
#include "llvm/CodeGen/Analysis.h"
|
2017-10-05 08:33:50 +08:00
|
|
|
#include "llvm/CodeGen/CallingConvLower.h"
|
2016-11-15 14:34:33 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
|
2017-08-20 17:25:22 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/Utils.h"
|
2017-10-05 08:33:50 +08:00
|
|
|
#include "llvm/CodeGen/LowLevelType.h"
|
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
|
|
|
#include "llvm/CodeGen/MachineMemOperand.h"
|
|
|
|
#include "llvm/CodeGen/MachineOperand.h"
|
2017-01-29 16:35:42 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2017-11-08 09:01:31 +08:00
|
|
|
#include "llvm/CodeGen/TargetInstrInfo.h"
|
2017-11-17 09:07:10 +08:00
|
|
|
#include "llvm/CodeGen/TargetSubtargetInfo.h"
|
2018-03-30 01:21:10 +08:00
|
|
|
#include "llvm/CodeGen/ValueTypes.h"
|
2017-10-05 08:33:50 +08:00
|
|
|
#include "llvm/IR/Attributes.h"
|
|
|
|
#include "llvm/IR/DataLayout.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/Value.h"
|
|
|
|
#include "llvm/MC/MCRegisterInfo.h"
|
|
|
|
#include "llvm/Support/LowLevelTypeImpl.h"
|
2018-03-24 07:58:25 +08:00
|
|
|
#include "llvm/Support/MachineValueType.h"
|
2017-10-05 08:33:50 +08:00
|
|
|
#include <cassert>
|
|
|
|
#include <cstdint>
|
2016-11-15 14:34:33 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2017-01-29 16:35:42 +08:00
|
|
|
#include "X86GenCallingConv.inc"
|
|
|
|
|
2016-11-15 14:34:33 +08:00
|
|
|
X86CallLowering::X86CallLowering(const X86TargetLowering &TLI)
|
|
|
|
: CallLowering(&TLI) {}
|
|
|
|
|
2017-07-05 14:24:13 +08:00
|
|
|
bool X86CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
|
2017-02-06 16:37:41 +08:00
|
|
|
SmallVectorImpl<ArgInfo> &SplitArgs,
|
|
|
|
const DataLayout &DL,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
SplitArgTy PerformArgSplit) const {
|
|
|
|
const X86TargetLowering &TLI = *getTLI<X86TargetLowering>();
|
|
|
|
LLVMContext &Context = OrigArg.Ty->getContext();
|
2017-07-05 14:24:13 +08:00
|
|
|
|
|
|
|
SmallVector<EVT, 4> SplitVTs;
|
|
|
|
SmallVector<uint64_t, 4> Offsets;
|
|
|
|
ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0);
|
|
|
|
|
|
|
|
if (SplitVTs.size() != 1) {
|
|
|
|
// TODO: support struct/array split
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
EVT VT = SplitVTs[0];
|
2017-02-06 16:37:41 +08:00
|
|
|
unsigned NumParts = TLI.getNumRegisters(Context, VT);
|
|
|
|
|
|
|
|
if (NumParts == 1) {
|
2017-03-23 23:25:57 +08:00
|
|
|
// replace the original type ( pointer -> GPR ).
|
|
|
|
SplitArgs.emplace_back(OrigArg.Reg, VT.getTypeForEVT(Context),
|
|
|
|
OrigArg.Flags, OrigArg.IsFixed);
|
2017-07-05 14:24:13 +08:00
|
|
|
return true;
|
2017-02-06 16:37:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
SmallVector<unsigned, 8> SplitRegs;
|
|
|
|
|
|
|
|
EVT PartVT = TLI.getRegisterType(Context, VT);
|
|
|
|
Type *PartTy = PartVT.getTypeForEVT(Context);
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < NumParts; ++i) {
|
Recommit: [globalisel] Change LLT constructor string into an LLT-based object that knows how to generate it.
Summary:
This will allow future patches to inspect the details of the LLT. The implementation is now split between
the Support and CodeGen libraries to allow TableGen to use this class without introducing layering concerns.
Thanks to Ahmed Bougacha for finding a reasonable way to avoid the layering issue and providing the version of this patch without that problem.
The problem with the previous commit appears to have been that TableGen was including CodeGen/LowLevelType.h instead of Support/LowLevelTypeImpl.h.
Reviewers: t.p.northover, qcolombet, rovka, aditya_nandakumar, ab, javed.absar
Subscribers: arsenm, nhaehnle, mgorny, dberris, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D30046
llvm-svn: 297241
2017-03-08 07:20:35 +08:00
|
|
|
ArgInfo Info =
|
|
|
|
ArgInfo{MRI.createGenericVirtualRegister(getLLTForType(*PartTy, DL)),
|
|
|
|
PartTy, OrigArg.Flags};
|
2017-02-06 16:37:41 +08:00
|
|
|
SplitArgs.push_back(Info);
|
2017-04-25 01:05:52 +08:00
|
|
|
SplitRegs.push_back(Info.Reg);
|
2017-02-06 16:37:41 +08:00
|
|
|
}
|
2017-04-25 01:05:52 +08:00
|
|
|
|
|
|
|
PerformArgSplit(SplitRegs);
|
2017-07-05 14:24:13 +08:00
|
|
|
return true;
|
2017-02-06 16:37:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
2017-10-05 08:33:50 +08:00
|
|
|
|
2017-08-20 17:25:22 +08:00
|
|
|
struct OutgoingValueHandler : public CallLowering::ValueHandler {
|
|
|
|
OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
|
|
|
|
MachineInstrBuilder &MIB, CCAssignFn *AssignFn)
|
2017-10-05 08:33:50 +08:00
|
|
|
: ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB),
|
2017-08-20 17:25:22 +08:00
|
|
|
DL(MIRBuilder.getMF().getDataLayout()),
|
2017-10-05 08:33:50 +08:00
|
|
|
STI(MIRBuilder.getMF().getSubtarget<X86Subtarget>()) {}
|
2017-02-06 16:37:41 +08:00
|
|
|
|
|
|
|
unsigned getStackAddress(uint64_t Size, int64_t Offset,
|
|
|
|
MachinePointerInfo &MPO) override {
|
2017-08-20 17:25:22 +08:00
|
|
|
LLT p0 = LLT::pointer(0, DL.getPointerSizeInBits(0));
|
|
|
|
LLT SType = LLT::scalar(DL.getPointerSizeInBits(0));
|
|
|
|
unsigned SPReg = MRI.createGenericVirtualRegister(p0);
|
|
|
|
MIRBuilder.buildCopy(SPReg, STI.getRegisterInfo()->getStackRegister());
|
|
|
|
|
|
|
|
unsigned OffsetReg = MRI.createGenericVirtualRegister(SType);
|
|
|
|
MIRBuilder.buildConstant(OffsetReg, Offset);
|
|
|
|
|
|
|
|
unsigned AddrReg = MRI.createGenericVirtualRegister(p0);
|
|
|
|
MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
|
|
|
|
|
|
|
|
MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
|
|
|
|
return AddrReg;
|
2017-02-06 16:37:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
|
|
|
|
CCValAssign &VA) override {
|
|
|
|
MIB.addUse(PhysReg, RegState::Implicit);
|
2018-02-09 06:41:47 +08:00
|
|
|
|
|
|
|
unsigned ExtReg;
|
|
|
|
// If we are copying the value to a physical register with the
|
|
|
|
// size larger than the size of the value itself - build AnyExt
|
|
|
|
// to the size of the register first and only then do the copy.
|
|
|
|
// The example of that would be copying from s32 to xmm0, for which
|
|
|
|
// case ValVT == LocVT == MVT::f32. If LocSize and ValSize are not equal
|
|
|
|
// we expect normal extendRegister mechanism to work.
|
|
|
|
unsigned PhysRegSize =
|
|
|
|
MRI.getTargetRegisterInfo()->getRegSizeInBits(PhysReg, MRI);
|
|
|
|
unsigned ValSize = VA.getValVT().getSizeInBits();
|
|
|
|
unsigned LocSize = VA.getLocVT().getSizeInBits();
|
|
|
|
if (PhysRegSize > ValSize && LocSize == ValSize) {
|
|
|
|
assert((PhysRegSize == 128 || PhysRegSize == 80) && "We expect that to be 128 bit");
|
|
|
|
auto MIB = MIRBuilder.buildAnyExt(LLT::scalar(PhysRegSize), ValVReg);
|
|
|
|
ExtReg = MIB->getOperand(0).getReg();
|
|
|
|
} else
|
|
|
|
ExtReg = extendRegister(ValVReg, VA);
|
|
|
|
|
2017-02-06 16:37:41 +08:00
|
|
|
MIRBuilder.buildCopy(PhysReg, ExtReg);
|
|
|
|
}
|
|
|
|
|
|
|
|
void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
|
|
|
|
MachinePointerInfo &MPO, CCValAssign &VA) override {
|
2017-08-20 17:25:22 +08:00
|
|
|
unsigned ExtReg = extendRegister(ValVReg, VA);
|
|
|
|
auto MMO = MIRBuilder.getMF().getMachineMemOperand(
|
|
|
|
MPO, MachineMemOperand::MOStore, VA.getLocVT().getStoreSize(),
|
|
|
|
/* Alignment */ 0);
|
|
|
|
MIRBuilder.buildStore(ExtReg, Addr, *MMO);
|
2017-02-06 16:37:41 +08:00
|
|
|
}
|
|
|
|
|
2017-08-20 17:25:22 +08:00
|
|
|
bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
|
|
|
|
CCValAssign::LocInfo LocInfo,
|
|
|
|
const CallLowering::ArgInfo &Info, CCState &State) override {
|
|
|
|
bool Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
|
|
|
|
StackSize = State.getNextStackOffset();
|
2017-08-30 23:10:15 +08:00
|
|
|
|
|
|
|
static const MCPhysReg XMMArgRegs[] = {X86::XMM0, X86::XMM1, X86::XMM2,
|
|
|
|
X86::XMM3, X86::XMM4, X86::XMM5,
|
|
|
|
X86::XMM6, X86::XMM7};
|
|
|
|
if (!Info.IsFixed)
|
|
|
|
NumXMMRegs = State.getFirstUnallocated(XMMArgRegs);
|
|
|
|
|
2017-08-20 17:25:22 +08:00
|
|
|
return Res;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t getStackSize() { return StackSize; }
|
2017-08-30 23:10:15 +08:00
|
|
|
uint64_t getNumXmmRegs() { return NumXMMRegs; }
|
2017-08-20 17:25:22 +08:00
|
|
|
|
|
|
|
protected:
|
2017-02-06 16:37:41 +08:00
|
|
|
MachineInstrBuilder &MIB;
|
2017-10-05 08:33:50 +08:00
|
|
|
uint64_t StackSize = 0;
|
2017-08-20 17:25:22 +08:00
|
|
|
const DataLayout &DL;
|
|
|
|
const X86Subtarget &STI;
|
2017-10-05 08:33:50 +08:00
|
|
|
unsigned NumXMMRegs = 0;
|
2017-02-06 16:37:41 +08:00
|
|
|
};
|
2017-10-05 08:33:50 +08:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
2017-02-06 16:37:41 +08:00
|
|
|
|
2016-11-15 14:34:33 +08:00
|
|
|
bool X86CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
|
|
|
|
const Value *Val, unsigned VReg) const {
|
2017-02-06 16:37:41 +08:00
|
|
|
assert(((Val && VReg) || (!Val && !VReg)) && "Return value without a vreg");
|
|
|
|
|
|
|
|
auto MIB = MIRBuilder.buildInstrNoInsert(X86::RET).addImm(0);
|
|
|
|
|
|
|
|
if (VReg) {
|
|
|
|
MachineFunction &MF = MIRBuilder.getMF();
|
|
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
|
|
auto &DL = MF.getDataLayout();
|
2017-12-16 06:22:58 +08:00
|
|
|
const Function &F = MF.getFunction();
|
2017-01-29 16:35:42 +08:00
|
|
|
|
2017-02-06 16:37:41 +08:00
|
|
|
ArgInfo OrigArg{VReg, Val->getType()};
|
Rename AttributeSet to AttributeList
Summary:
This class is a list of AttributeSetNodes corresponding the function
prototype of a call or function declaration. This class used to be
called ParamAttrListPtr, then AttrListPtr, then AttributeSet. It is
typically accessed by parameter and return value index, so
"AttributeList" seems like a more intuitive name.
Rename AttributeSetImpl to AttributeListImpl to follow suit.
It's useful to rename this class so that we can rename AttributeSetNode
to AttributeSet later. AttributeSet is the set of attributes that apply
to a single function, argument, or return value.
Reviewers: sanjoy, javed.absar, chandlerc, pete
Reviewed By: pete
Subscribers: pete, jholewinski, arsenm, dschuff, mehdi_amini, jfb, nhaehnle, sbc100, void, llvm-commits
Differential Revision: https://reviews.llvm.org/D31102
llvm-svn: 298393
2017-03-22 00:57:19 +08:00
|
|
|
setArgFlags(OrigArg, AttributeList::ReturnIndex, DL, F);
|
2016-11-15 14:34:33 +08:00
|
|
|
|
2017-02-06 16:37:41 +08:00
|
|
|
SmallVector<ArgInfo, 8> SplitArgs;
|
2017-07-05 14:24:13 +08:00
|
|
|
if (!splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
|
|
|
|
[&](ArrayRef<unsigned> Regs) {
|
|
|
|
MIRBuilder.buildUnmerge(Regs, VReg);
|
|
|
|
}))
|
|
|
|
return false;
|
2017-02-06 16:37:41 +08:00
|
|
|
|
2017-08-20 17:25:22 +08:00
|
|
|
OutgoingValueHandler Handler(MIRBuilder, MRI, MIB, RetCC_X86);
|
2017-03-23 20:13:29 +08:00
|
|
|
if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
|
2017-02-06 16:37:41 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
MIRBuilder.insertInstr(MIB);
|
2016-11-15 14:34:33 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-01-29 16:35:42 +08:00
|
|
|
namespace {
|
2017-10-05 08:33:50 +08:00
|
|
|
|
2017-08-20 17:25:22 +08:00
|
|
|
struct IncomingValueHandler : public CallLowering::ValueHandler {
|
|
|
|
IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
|
|
|
|
CCAssignFn *AssignFn)
|
|
|
|
: ValueHandler(MIRBuilder, MRI, AssignFn),
|
|
|
|
DL(MIRBuilder.getMF().getDataLayout()) {}
|
2017-01-29 16:35:42 +08:00
|
|
|
|
|
|
|
unsigned getStackAddress(uint64_t Size, int64_t Offset,
|
|
|
|
MachinePointerInfo &MPO) override {
|
|
|
|
auto &MFI = MIRBuilder.getMF().getFrameInfo();
|
|
|
|
int FI = MFI.CreateFixedObject(Size, Offset, true);
|
|
|
|
MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
|
|
|
|
|
2017-03-23 20:13:29 +08:00
|
|
|
unsigned AddrReg = MRI.createGenericVirtualRegister(
|
|
|
|
LLT::pointer(0, DL.getPointerSizeInBits(0)));
|
2017-01-29 16:35:42 +08:00
|
|
|
MIRBuilder.buildFrameIndex(AddrReg, FI);
|
|
|
|
return AddrReg;
|
|
|
|
}
|
|
|
|
|
|
|
|
void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
|
|
|
|
MachinePointerInfo &MPO, CCValAssign &VA) override {
|
|
|
|
auto MMO = MIRBuilder.getMF().getMachineMemOperand(
|
|
|
|
MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, Size,
|
|
|
|
0);
|
|
|
|
MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
|
|
|
|
}
|
|
|
|
|
2017-10-10 04:07:43 +08:00
|
|
|
void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
|
|
|
|
CCValAssign &VA) override {
|
|
|
|
markPhysRegUsed(PhysReg);
|
2018-02-09 06:41:47 +08:00
|
|
|
|
2017-10-10 04:07:43 +08:00
|
|
|
switch (VA.getLocInfo()) {
|
2018-02-09 06:41:47 +08:00
|
|
|
default: {
|
|
|
|
// If we are copying the value from a physical register with the
|
|
|
|
// size larger than the size of the value itself - build the copy
|
|
|
|
// of the phys reg first and then build the truncation of that copy.
|
|
|
|
// The example of that would be copying from xmm0 to s32, for which
|
|
|
|
// case ValVT == LocVT == MVT::f32. If LocSize and ValSize are not equal
|
|
|
|
// we expect this to be handled in SExt/ZExt/AExt case.
|
|
|
|
unsigned PhysRegSize =
|
|
|
|
MRI.getTargetRegisterInfo()->getRegSizeInBits(PhysReg, MRI);
|
|
|
|
unsigned ValSize = VA.getValVT().getSizeInBits();
|
|
|
|
unsigned LocSize = VA.getLocVT().getSizeInBits();
|
|
|
|
if (PhysRegSize > ValSize && LocSize == ValSize) {
|
|
|
|
auto Copy = MIRBuilder.buildCopy(LLT::scalar(PhysRegSize), PhysReg);
|
|
|
|
MIRBuilder.buildTrunc(ValVReg, Copy);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-10-10 04:07:43 +08:00
|
|
|
MIRBuilder.buildCopy(ValVReg, PhysReg);
|
|
|
|
break;
|
2018-02-09 06:41:47 +08:00
|
|
|
}
|
2017-10-10 04:07:43 +08:00
|
|
|
case CCValAssign::LocInfo::SExt:
|
|
|
|
case CCValAssign::LocInfo::ZExt:
|
|
|
|
case CCValAssign::LocInfo::AExt: {
|
|
|
|
auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg);
|
|
|
|
MIRBuilder.buildTrunc(ValVReg, Copy);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// How the physical register gets marked varies between formal
|
|
|
|
/// parameters (it's a basic-block live-in), and a call instruction
|
|
|
|
/// (it's an implicit-def of the BL).
|
|
|
|
virtual void markPhysRegUsed(unsigned PhysReg) = 0;
|
|
|
|
|
2017-08-20 17:25:22 +08:00
|
|
|
protected:
|
|
|
|
const DataLayout &DL;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct FormalArgHandler : public IncomingValueHandler {
|
|
|
|
FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
|
|
|
|
CCAssignFn *AssignFn)
|
|
|
|
: IncomingValueHandler(MIRBuilder, MRI, AssignFn) {}
|
|
|
|
|
2017-10-10 04:07:43 +08:00
|
|
|
void markPhysRegUsed(unsigned PhysReg) override {
|
2017-01-29 16:35:42 +08:00
|
|
|
MIRBuilder.getMBB().addLiveIn(PhysReg);
|
|
|
|
}
|
2017-08-20 17:25:22 +08:00
|
|
|
};
|
2017-01-29 16:35:42 +08:00
|
|
|
|
2017-08-20 17:25:22 +08:00
|
|
|
struct CallReturnHandler : public IncomingValueHandler {
|
|
|
|
CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
|
|
|
|
CCAssignFn *AssignFn, MachineInstrBuilder &MIB)
|
|
|
|
: IncomingValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
|
|
|
|
|
2017-10-10 04:07:43 +08:00
|
|
|
void markPhysRegUsed(unsigned PhysReg) override {
|
2017-08-20 17:25:22 +08:00
|
|
|
MIB.addDef(PhysReg, RegState::Implicit);
|
|
|
|
}
|
|
|
|
|
|
|
|
protected:
|
|
|
|
MachineInstrBuilder &MIB;
|
2017-01-29 16:35:42 +08:00
|
|
|
};
|
2017-08-20 17:25:22 +08:00
|
|
|
|
2017-10-05 08:33:50 +08:00
|
|
|
} // end anonymous namespace
|
2017-01-29 16:35:42 +08:00
|
|
|
|
2016-11-15 14:34:33 +08:00
|
|
|
bool X86CallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
|
|
|
|
const Function &F,
|
|
|
|
ArrayRef<unsigned> VRegs) const {
|
2017-01-29 16:35:42 +08:00
|
|
|
if (F.arg_empty())
|
|
|
|
return true;
|
|
|
|
|
2017-03-23 20:13:29 +08:00
|
|
|
// TODO: handle variadic function
|
2017-01-29 16:35:42 +08:00
|
|
|
if (F.isVarArg())
|
|
|
|
return false;
|
|
|
|
|
2017-02-06 16:37:41 +08:00
|
|
|
MachineFunction &MF = MIRBuilder.getMF();
|
|
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
|
|
auto DL = MF.getDataLayout();
|
2017-01-29 16:35:42 +08:00
|
|
|
|
2017-02-06 16:37:41 +08:00
|
|
|
SmallVector<ArgInfo, 8> SplitArgs;
|
2017-01-29 16:35:42 +08:00
|
|
|
unsigned Idx = 0;
|
Remove getArgumentList() in favor of arg_begin(), args(), etc
Users often call getArgumentList().size(), which is a linear way to get
the number of function arguments. arg_size(), on the other hand, is
constant time.
In general, the fact that arguments are stored in an iplist is an
implementation detail, so I've removed it from the Function interface
and moved all other users to the argument container APIs (arg_begin(),
arg_end(), args(), arg_size()).
Reviewed By: chandlerc
Differential Revision: https://reviews.llvm.org/D31052
llvm-svn: 298010
2017-03-17 06:59:15 +08:00
|
|
|
for (auto &Arg : F.args()) {
|
2017-07-05 19:40:35 +08:00
|
|
|
|
|
|
|
// TODO: handle not simple cases.
|
|
|
|
if (Arg.hasAttribute(Attribute::ByVal) ||
|
|
|
|
Arg.hasAttribute(Attribute::InReg) ||
|
|
|
|
Arg.hasAttribute(Attribute::StructRet) ||
|
|
|
|
Arg.hasAttribute(Attribute::SwiftSelf) ||
|
|
|
|
Arg.hasAttribute(Attribute::SwiftError) ||
|
|
|
|
Arg.hasAttribute(Attribute::Nest))
|
|
|
|
return false;
|
|
|
|
|
2017-02-06 16:37:41 +08:00
|
|
|
ArgInfo OrigArg(VRegs[Idx], Arg.getType());
|
2017-07-05 19:40:35 +08:00
|
|
|
setArgFlags(OrigArg, Idx + AttributeList::FirstArgIndex, DL, F);
|
2017-07-05 14:24:13 +08:00
|
|
|
if (!splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
|
|
|
|
[&](ArrayRef<unsigned> Regs) {
|
|
|
|
MIRBuilder.buildMerge(VRegs[Idx], Regs);
|
|
|
|
}))
|
|
|
|
return false;
|
2017-01-29 16:35:42 +08:00
|
|
|
Idx++;
|
|
|
|
}
|
|
|
|
|
2017-02-06 16:37:41 +08:00
|
|
|
MachineBasicBlock &MBB = MIRBuilder.getMBB();
|
|
|
|
if (!MBB.empty())
|
2017-03-23 20:13:29 +08:00
|
|
|
MIRBuilder.setInstr(*MBB.begin());
|
2017-02-06 16:37:41 +08:00
|
|
|
|
2017-08-20 17:25:22 +08:00
|
|
|
FormalArgHandler Handler(MIRBuilder, MRI, CC_X86);
|
2017-02-06 16:37:41 +08:00
|
|
|
if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Move back to the end of the basic block.
|
|
|
|
MIRBuilder.setMBB(MBB);
|
|
|
|
|
|
|
|
return true;
|
2016-11-15 14:34:33 +08:00
|
|
|
}
|
2017-08-20 17:25:22 +08:00
|
|
|
|
|
|
|
bool X86CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
|
|
|
|
CallingConv::ID CallConv,
|
|
|
|
const MachineOperand &Callee,
|
|
|
|
const ArgInfo &OrigRet,
|
|
|
|
ArrayRef<ArgInfo> OrigArgs) const {
|
|
|
|
MachineFunction &MF = MIRBuilder.getMF();
|
2017-12-16 06:22:58 +08:00
|
|
|
const Function &F = MF.getFunction();
|
2017-08-20 17:25:22 +08:00
|
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
|
|
auto &DL = F.getParent()->getDataLayout();
|
|
|
|
const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
|
|
|
|
const TargetInstrInfo &TII = *STI.getInstrInfo();
|
|
|
|
auto TRI = STI.getRegisterInfo();
|
|
|
|
|
|
|
|
// Handle only Linux C, X86_64_SysV calling conventions for now.
|
|
|
|
if (!STI.isTargetLinux() ||
|
|
|
|
!(CallConv == CallingConv::C || CallConv == CallingConv::X86_64_SysV))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
|
|
|
|
auto CallSeqStart = MIRBuilder.buildInstr(AdjStackDown);
|
|
|
|
|
|
|
|
// Create a temporarily-floating call instruction so we can add the implicit
|
|
|
|
// uses of arg registers.
|
|
|
|
bool Is64Bit = STI.is64Bit();
|
|
|
|
unsigned CallOpc = Callee.isReg()
|
|
|
|
? (Is64Bit ? X86::CALL64r : X86::CALL32r)
|
|
|
|
: (Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32);
|
|
|
|
|
|
|
|
auto MIB = MIRBuilder.buildInstrNoInsert(CallOpc).add(Callee).addRegMask(
|
|
|
|
TRI->getCallPreservedMask(MF, CallConv));
|
|
|
|
|
|
|
|
SmallVector<ArgInfo, 8> SplitArgs;
|
|
|
|
for (const auto &OrigArg : OrigArgs) {
|
2017-08-21 16:59:59 +08:00
|
|
|
|
|
|
|
// TODO: handle not simple cases.
|
|
|
|
if (OrigArg.Flags.isByVal())
|
|
|
|
return false;
|
|
|
|
|
2017-08-20 17:25:22 +08:00
|
|
|
if (!splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
|
|
|
|
[&](ArrayRef<unsigned> Regs) {
|
|
|
|
MIRBuilder.buildUnmerge(Regs, OrigArg.Reg);
|
|
|
|
}))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
// Do the actual argument marshalling.
|
|
|
|
OutgoingValueHandler Handler(MIRBuilder, MRI, MIB, CC_X86);
|
|
|
|
if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
|
|
|
|
return false;
|
|
|
|
|
2017-08-30 23:10:15 +08:00
|
|
|
bool IsFixed = OrigArgs.empty() ? true : OrigArgs.back().IsFixed;
|
|
|
|
if (STI.is64Bit() && !IsFixed && !STI.isCallingConvWin64(CallConv)) {
|
|
|
|
// From AMD64 ABI document:
|
|
|
|
// For calls that may call functions that use varargs or stdargs
|
|
|
|
// (prototype-less calls or calls to functions containing ellipsis (...) in
|
|
|
|
// the declaration) %al is used as hidden argument to specify the number
|
|
|
|
// of SSE registers used. The contents of %al do not need to match exactly
|
|
|
|
// the number of registers, but must be an ubound on the number of SSE
|
|
|
|
// registers used and is in the range 0 - 8 inclusive.
|
|
|
|
|
|
|
|
MIRBuilder.buildInstr(X86::MOV8ri)
|
|
|
|
.addDef(X86::AL)
|
|
|
|
.addImm(Handler.getNumXmmRegs());
|
|
|
|
MIB.addUse(X86::AL, RegState::Implicit);
|
|
|
|
}
|
|
|
|
|
2017-08-20 17:25:22 +08:00
|
|
|
// Now we can add the actual call instruction to the correct basic block.
|
|
|
|
MIRBuilder.insertInstr(MIB);
|
|
|
|
|
|
|
|
// If Callee is a reg, since it is used by a target specific
|
|
|
|
// instruction, it must have a register class matching the
|
|
|
|
// constraint of that instruction.
|
|
|
|
if (Callee.isReg())
|
|
|
|
MIB->getOperand(0).setReg(constrainOperandRegClass(
|
|
|
|
MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
|
2018-02-27 06:56:21 +08:00
|
|
|
*MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(), Callee, 0));
|
2017-08-20 17:25:22 +08:00
|
|
|
|
|
|
|
// Finally we can copy the returned value back into its virtual-register. In
|
|
|
|
// symmetry with the arguments, the physical register must be an
|
|
|
|
// implicit-define of the call instruction.
|
|
|
|
|
|
|
|
if (OrigRet.Reg) {
|
|
|
|
SplitArgs.clear();
|
|
|
|
SmallVector<unsigned, 8> NewRegs;
|
|
|
|
|
|
|
|
if (!splitToValueTypes(OrigRet, SplitArgs, DL, MRI,
|
|
|
|
[&](ArrayRef<unsigned> Regs) {
|
|
|
|
NewRegs.assign(Regs.begin(), Regs.end());
|
|
|
|
}))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
CallReturnHandler Handler(MIRBuilder, MRI, RetCC_X86, MIB);
|
|
|
|
if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!NewRegs.empty())
|
|
|
|
MIRBuilder.buildMerge(OrigRet.Reg, NewRegs);
|
|
|
|
}
|
|
|
|
|
|
|
|
CallSeqStart.addImm(Handler.getStackSize())
|
|
|
|
.addImm(0 /* see getFrameTotalSize */)
|
|
|
|
.addImm(0 /* see getFrameAdjustment */);
|
|
|
|
|
|
|
|
unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
|
|
|
|
MIRBuilder.buildInstr(AdjStackUp)
|
|
|
|
.addImm(Handler.getStackSize())
|
|
|
|
.addImm(0 /* NumBytesForCalleeToPop */);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|