2017-10-05 08:33:50 +08:00
|
|
|
//===- llvm/lib/Target/X86/X86CallLowering.cpp - Call lowering ------------===//
|
2016-11-15 14:34:33 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2016-11-15 14:34:33 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2017-10-05 08:33:50 +08:00
|
|
|
//
|
2016-11-15 14:34:33 +08:00
|
|
|
/// \file
|
|
|
|
/// This file implements the lowering of LLVM calls to machine code calls for
|
|
|
|
/// GlobalISel.
|
2017-10-05 08:33:50 +08:00
|
|
|
//
|
2016-11-15 14:34:33 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "X86CallLowering.h"
|
2017-03-23 20:13:29 +08:00
|
|
|
#include "X86CallingConv.h"
|
2016-11-15 14:34:33 +08:00
|
|
|
#include "X86ISelLowering.h"
|
|
|
|
#include "X86InstrInfo.h"
|
2017-10-05 08:33:50 +08:00
|
|
|
#include "X86RegisterInfo.h"
|
|
|
|
#include "X86Subtarget.h"
|
|
|
|
#include "llvm/ADT/ArrayRef.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2017-07-05 14:24:13 +08:00
|
|
|
#include "llvm/CodeGen/Analysis.h"
|
2017-10-05 08:33:50 +08:00
|
|
|
#include "llvm/CodeGen/CallingConvLower.h"
|
2016-11-15 14:34:33 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
|
2017-08-20 17:25:22 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/Utils.h"
|
2017-10-05 08:33:50 +08:00
|
|
|
#include "llvm/CodeGen/LowLevelType.h"
|
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
|
|
|
#include "llvm/CodeGen/MachineMemOperand.h"
|
|
|
|
#include "llvm/CodeGen/MachineOperand.h"
|
2017-01-29 16:35:42 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2017-11-08 09:01:31 +08:00
|
|
|
#include "llvm/CodeGen/TargetInstrInfo.h"
|
2017-11-17 09:07:10 +08:00
|
|
|
#include "llvm/CodeGen/TargetSubtargetInfo.h"
|
2018-03-30 01:21:10 +08:00
|
|
|
#include "llvm/CodeGen/ValueTypes.h"
|
2017-10-05 08:33:50 +08:00
|
|
|
#include "llvm/IR/Attributes.h"
|
|
|
|
#include "llvm/IR/DataLayout.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/Value.h"
|
|
|
|
#include "llvm/MC/MCRegisterInfo.h"
|
|
|
|
#include "llvm/Support/LowLevelTypeImpl.h"
|
2018-03-24 07:58:25 +08:00
|
|
|
#include "llvm/Support/MachineValueType.h"
|
2017-10-05 08:33:50 +08:00
|
|
|
#include <cassert>
|
|
|
|
#include <cstdint>
|
2016-11-15 14:34:33 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
X86CallLowering::X86CallLowering(const X86TargetLowering &TLI)
|
|
|
|
: CallLowering(&TLI) {}
|
|
|
|
|
2017-07-05 14:24:13 +08:00
|
|
|
bool X86CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
|
2017-02-06 16:37:41 +08:00
|
|
|
SmallVectorImpl<ArgInfo> &SplitArgs,
|
|
|
|
const DataLayout &DL,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
SplitArgTy PerformArgSplit) const {
|
|
|
|
const X86TargetLowering &TLI = *getTLI<X86TargetLowering>();
|
|
|
|
LLVMContext &Context = OrigArg.Ty->getContext();
|
2017-07-05 14:24:13 +08:00
|
|
|
|
|
|
|
SmallVector<EVT, 4> SplitVTs;
|
|
|
|
SmallVector<uint64_t, 4> Offsets;
|
|
|
|
ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0);
|
2019-06-27 16:50:53 +08:00
|
|
|
assert(OrigArg.Regs.size() == 1 && "Can't handle multple regs yet");
|
2017-07-05 14:24:13 +08:00
|
|
|
|
2018-08-02 16:33:31 +08:00
|
|
|
if (OrigArg.Ty->isVoidTy())
|
|
|
|
return true;
|
2017-07-05 14:24:13 +08:00
|
|
|
|
|
|
|
EVT VT = SplitVTs[0];
|
2017-02-06 16:37:41 +08:00
|
|
|
unsigned NumParts = TLI.getNumRegisters(Context, VT);
|
|
|
|
|
|
|
|
if (NumParts == 1) {
|
2017-03-23 23:25:57 +08:00
|
|
|
// replace the original type ( pointer -> GPR ).
|
2019-06-27 16:50:53 +08:00
|
|
|
SplitArgs.emplace_back(OrigArg.Regs[0], VT.getTypeForEVT(Context),
|
2017-03-23 23:25:57 +08:00
|
|
|
OrigArg.Flags, OrigArg.IsFixed);
|
2017-07-05 14:24:13 +08:00
|
|
|
return true;
|
2017-02-06 16:37:41 +08:00
|
|
|
}
|
|
|
|
|
2019-06-24 23:50:29 +08:00
|
|
|
SmallVector<Register, 8> SplitRegs;
|
2017-02-06 16:37:41 +08:00
|
|
|
|
|
|
|
EVT PartVT = TLI.getRegisterType(Context, VT);
|
|
|
|
Type *PartTy = PartVT.getTypeForEVT(Context);
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < NumParts; ++i) {
|
Recommit: [globalisel] Change LLT constructor string into an LLT-based object that knows how to generate it.
Summary:
This will allow future patches to inspect the details of the LLT. The implementation is now split between
the Support and CodeGen libraries to allow TableGen to use this class without introducing layering concerns.
Thanks to Ahmed Bougacha for finding a reasonable way to avoid the layering issue and providing the version of this patch without that problem.
The problem with the previous commit appears to have been that TableGen was including CodeGen/LowLevelType.h instead of Support/LowLevelTypeImpl.h.
Reviewers: t.p.northover, qcolombet, rovka, aditya_nandakumar, ab, javed.absar
Subscribers: arsenm, nhaehnle, mgorny, dberris, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D30046
llvm-svn: 297241
2017-03-08 07:20:35 +08:00
|
|
|
ArgInfo Info =
|
|
|
|
ArgInfo{MRI.createGenericVirtualRegister(getLLTForType(*PartTy, DL)),
|
|
|
|
PartTy, OrigArg.Flags};
|
2017-02-06 16:37:41 +08:00
|
|
|
SplitArgs.push_back(Info);
|
2019-06-27 16:50:53 +08:00
|
|
|
SplitRegs.push_back(Info.Regs[0]);
|
2017-02-06 16:37:41 +08:00
|
|
|
}
|
2017-04-25 01:05:52 +08:00
|
|
|
|
|
|
|
PerformArgSplit(SplitRegs);
|
2017-07-05 14:24:13 +08:00
|
|
|
return true;
|
2017-02-06 16:37:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
2017-10-05 08:33:50 +08:00
|
|
|
|
2017-08-20 17:25:22 +08:00
|
|
|
struct OutgoingValueHandler : public CallLowering::ValueHandler {
|
|
|
|
OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
|
|
|
|
MachineInstrBuilder &MIB, CCAssignFn *AssignFn)
|
2017-10-05 08:33:50 +08:00
|
|
|
: ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB),
|
2017-08-20 17:25:22 +08:00
|
|
|
DL(MIRBuilder.getMF().getDataLayout()),
|
2017-10-05 08:33:50 +08:00
|
|
|
STI(MIRBuilder.getMF().getSubtarget<X86Subtarget>()) {}
|
2017-02-06 16:37:41 +08:00
|
|
|
|
2019-06-25 00:16:12 +08:00
|
|
|
Register getStackAddress(uint64_t Size, int64_t Offset,
|
2017-02-06 16:37:41 +08:00
|
|
|
MachinePointerInfo &MPO) override {
|
2017-08-20 17:25:22 +08:00
|
|
|
LLT p0 = LLT::pointer(0, DL.getPointerSizeInBits(0));
|
|
|
|
LLT SType = LLT::scalar(DL.getPointerSizeInBits(0));
|
2019-06-25 00:16:12 +08:00
|
|
|
Register SPReg = MRI.createGenericVirtualRegister(p0);
|
2017-08-20 17:25:22 +08:00
|
|
|
MIRBuilder.buildCopy(SPReg, STI.getRegisterInfo()->getStackRegister());
|
|
|
|
|
2019-06-25 00:16:12 +08:00
|
|
|
Register OffsetReg = MRI.createGenericVirtualRegister(SType);
|
2017-08-20 17:25:22 +08:00
|
|
|
MIRBuilder.buildConstant(OffsetReg, Offset);
|
|
|
|
|
2019-06-25 00:16:12 +08:00
|
|
|
Register AddrReg = MRI.createGenericVirtualRegister(p0);
|
2017-08-20 17:25:22 +08:00
|
|
|
MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
|
|
|
|
|
|
|
|
MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
|
|
|
|
return AddrReg;
|
2017-02-06 16:37:41 +08:00
|
|
|
}
|
|
|
|
|
2019-06-25 00:16:12 +08:00
|
|
|
void assignValueToReg(Register ValVReg, Register PhysReg,
|
2017-02-06 16:37:41 +08:00
|
|
|
CCValAssign &VA) override {
|
|
|
|
MIB.addUse(PhysReg, RegState::Implicit);
|
2018-02-09 06:41:47 +08:00
|
|
|
|
2019-06-25 00:16:12 +08:00
|
|
|
Register ExtReg;
|
2018-02-09 06:41:47 +08:00
|
|
|
// If we are copying the value to a physical register with the
|
|
|
|
// size larger than the size of the value itself - build AnyExt
|
|
|
|
// to the size of the register first and only then do the copy.
|
|
|
|
// The example of that would be copying from s32 to xmm0, for which
|
|
|
|
// case ValVT == LocVT == MVT::f32. If LocSize and ValSize are not equal
|
|
|
|
// we expect normal extendRegister mechanism to work.
|
|
|
|
unsigned PhysRegSize =
|
|
|
|
MRI.getTargetRegisterInfo()->getRegSizeInBits(PhysReg, MRI);
|
|
|
|
unsigned ValSize = VA.getValVT().getSizeInBits();
|
|
|
|
unsigned LocSize = VA.getLocVT().getSizeInBits();
|
|
|
|
if (PhysRegSize > ValSize && LocSize == ValSize) {
|
|
|
|
assert((PhysRegSize == 128 || PhysRegSize == 80) && "We expect that to be 128 bit");
|
|
|
|
auto MIB = MIRBuilder.buildAnyExt(LLT::scalar(PhysRegSize), ValVReg);
|
|
|
|
ExtReg = MIB->getOperand(0).getReg();
|
|
|
|
} else
|
|
|
|
ExtReg = extendRegister(ValVReg, VA);
|
|
|
|
|
2017-02-06 16:37:41 +08:00
|
|
|
MIRBuilder.buildCopy(PhysReg, ExtReg);
|
|
|
|
}
|
|
|
|
|
2019-06-25 00:16:12 +08:00
|
|
|
void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
|
2017-02-06 16:37:41 +08:00
|
|
|
MachinePointerInfo &MPO, CCValAssign &VA) override {
|
2019-06-25 00:16:12 +08:00
|
|
|
Register ExtReg = extendRegister(ValVReg, VA);
|
2017-08-20 17:25:22 +08:00
|
|
|
auto MMO = MIRBuilder.getMF().getMachineMemOperand(
|
|
|
|
MPO, MachineMemOperand::MOStore, VA.getLocVT().getStoreSize(),
|
2019-01-31 09:38:47 +08:00
|
|
|
/* Alignment */ 1);
|
2017-08-20 17:25:22 +08:00
|
|
|
MIRBuilder.buildStore(ExtReg, Addr, *MMO);
|
2017-02-06 16:37:41 +08:00
|
|
|
}
|
|
|
|
|
2017-08-20 17:25:22 +08:00
|
|
|
bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
|
|
|
|
CCValAssign::LocInfo LocInfo,
|
|
|
|
const CallLowering::ArgInfo &Info, CCState &State) override {
|
|
|
|
bool Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
|
|
|
|
StackSize = State.getNextStackOffset();
|
2017-08-30 23:10:15 +08:00
|
|
|
|
|
|
|
static const MCPhysReg XMMArgRegs[] = {X86::XMM0, X86::XMM1, X86::XMM2,
|
|
|
|
X86::XMM3, X86::XMM4, X86::XMM5,
|
|
|
|
X86::XMM6, X86::XMM7};
|
|
|
|
if (!Info.IsFixed)
|
|
|
|
NumXMMRegs = State.getFirstUnallocated(XMMArgRegs);
|
|
|
|
|
2017-08-20 17:25:22 +08:00
|
|
|
return Res;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t getStackSize() { return StackSize; }
|
2017-08-30 23:10:15 +08:00
|
|
|
uint64_t getNumXmmRegs() { return NumXMMRegs; }
|
2017-08-20 17:25:22 +08:00
|
|
|
|
|
|
|
protected:
|
2017-02-06 16:37:41 +08:00
|
|
|
MachineInstrBuilder &MIB;
|
2017-10-05 08:33:50 +08:00
|
|
|
uint64_t StackSize = 0;
|
2017-08-20 17:25:22 +08:00
|
|
|
const DataLayout &DL;
|
|
|
|
const X86Subtarget &STI;
|
2017-10-05 08:33:50 +08:00
|
|
|
unsigned NumXMMRegs = 0;
|
2017-02-06 16:37:41 +08:00
|
|
|
};
|
2017-10-05 08:33:50 +08:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
2017-02-06 16:37:41 +08:00
|
|
|
|
2018-08-02 16:33:31 +08:00
|
|
|
bool X86CallLowering::lowerReturn(
|
|
|
|
MachineIRBuilder &MIRBuilder, const Value *Val,
|
2019-06-24 23:50:29 +08:00
|
|
|
ArrayRef<Register> VRegs) const {
|
2018-08-02 16:33:31 +08:00
|
|
|
assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) &&
|
|
|
|
"Return value without a vreg");
|
2017-02-06 16:37:41 +08:00
|
|
|
auto MIB = MIRBuilder.buildInstrNoInsert(X86::RET).addImm(0);
|
|
|
|
|
2018-08-02 16:33:31 +08:00
|
|
|
if (!VRegs.empty()) {
|
2017-02-06 16:37:41 +08:00
|
|
|
MachineFunction &MF = MIRBuilder.getMF();
|
2018-08-02 16:33:31 +08:00
|
|
|
const Function &F = MF.getFunction();
|
2017-02-06 16:37:41 +08:00
|
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
|
|
auto &DL = MF.getDataLayout();
|
2018-08-02 16:33:31 +08:00
|
|
|
LLVMContext &Ctx = Val->getType()->getContext();
|
|
|
|
const X86TargetLowering &TLI = *getTLI<X86TargetLowering>();
|
2017-01-29 16:35:42 +08:00
|
|
|
|
2018-08-02 16:33:31 +08:00
|
|
|
SmallVector<EVT, 4> SplitEVTs;
|
|
|
|
ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
|
|
|
|
assert(VRegs.size() == SplitEVTs.size() &&
|
|
|
|
"For each split Type there should be exactly one VReg.");
|
2016-11-15 14:34:33 +08:00
|
|
|
|
2017-02-06 16:37:41 +08:00
|
|
|
SmallVector<ArgInfo, 8> SplitArgs;
|
2018-08-02 16:33:31 +08:00
|
|
|
for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
|
|
|
|
ArgInfo CurArgInfo = ArgInfo{VRegs[i], SplitEVTs[i].getTypeForEVT(Ctx)};
|
|
|
|
setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
|
|
|
|
if (!splitToValueTypes(CurArgInfo, SplitArgs, DL, MRI,
|
2019-06-24 23:50:29 +08:00
|
|
|
[&](ArrayRef<Register> Regs) {
|
2018-08-02 16:33:31 +08:00
|
|
|
MIRBuilder.buildUnmerge(Regs, VRegs[i]);
|
|
|
|
}))
|
|
|
|
return false;
|
|
|
|
}
|
2017-02-06 16:37:41 +08:00
|
|
|
|
2017-08-20 17:25:22 +08:00
|
|
|
OutgoingValueHandler Handler(MIRBuilder, MRI, MIB, RetCC_X86);
|
2017-03-23 20:13:29 +08:00
|
|
|
if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
|
2017-02-06 16:37:41 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
MIRBuilder.insertInstr(MIB);
|
2016-11-15 14:34:33 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-01-29 16:35:42 +08:00
|
|
|
namespace {
|
2017-10-05 08:33:50 +08:00
|
|
|
|
2017-08-20 17:25:22 +08:00
|
|
|
struct IncomingValueHandler : public CallLowering::ValueHandler {
|
|
|
|
IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
|
|
|
|
CCAssignFn *AssignFn)
|
|
|
|
: ValueHandler(MIRBuilder, MRI, AssignFn),
|
|
|
|
DL(MIRBuilder.getMF().getDataLayout()) {}
|
2017-01-29 16:35:42 +08:00
|
|
|
|
2019-04-10 05:22:33 +08:00
|
|
|
bool isArgumentHandler() const override { return true; }
|
|
|
|
|
2019-06-25 00:16:12 +08:00
|
|
|
Register getStackAddress(uint64_t Size, int64_t Offset,
|
2017-01-29 16:35:42 +08:00
|
|
|
MachinePointerInfo &MPO) override {
|
|
|
|
auto &MFI = MIRBuilder.getMF().getFrameInfo();
|
|
|
|
int FI = MFI.CreateFixedObject(Size, Offset, true);
|
|
|
|
MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
|
|
|
|
|
2017-03-23 20:13:29 +08:00
|
|
|
unsigned AddrReg = MRI.createGenericVirtualRegister(
|
|
|
|
LLT::pointer(0, DL.getPointerSizeInBits(0)));
|
2017-01-29 16:35:42 +08:00
|
|
|
MIRBuilder.buildFrameIndex(AddrReg, FI);
|
|
|
|
return AddrReg;
|
|
|
|
}
|
|
|
|
|
2019-06-25 00:16:12 +08:00
|
|
|
void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
|
2017-01-29 16:35:42 +08:00
|
|
|
MachinePointerInfo &MPO, CCValAssign &VA) override {
|
|
|
|
auto MMO = MIRBuilder.getMF().getMachineMemOperand(
|
|
|
|
MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, Size,
|
2019-01-31 09:38:47 +08:00
|
|
|
1);
|
2017-01-29 16:35:42 +08:00
|
|
|
MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
|
|
|
|
}
|
|
|
|
|
2019-06-25 00:16:12 +08:00
|
|
|
void assignValueToReg(Register ValVReg, Register PhysReg,
|
2017-10-10 04:07:43 +08:00
|
|
|
CCValAssign &VA) override {
|
|
|
|
markPhysRegUsed(PhysReg);
|
2018-02-09 06:41:47 +08:00
|
|
|
|
2017-10-10 04:07:43 +08:00
|
|
|
switch (VA.getLocInfo()) {
|
2018-02-09 06:41:47 +08:00
|
|
|
default: {
|
|
|
|
// If we are copying the value from a physical register with the
|
|
|
|
// size larger than the size of the value itself - build the copy
|
|
|
|
// of the phys reg first and then build the truncation of that copy.
|
|
|
|
// The example of that would be copying from xmm0 to s32, for which
|
|
|
|
// case ValVT == LocVT == MVT::f32. If LocSize and ValSize are not equal
|
|
|
|
// we expect this to be handled in SExt/ZExt/AExt case.
|
|
|
|
unsigned PhysRegSize =
|
|
|
|
MRI.getTargetRegisterInfo()->getRegSizeInBits(PhysReg, MRI);
|
|
|
|
unsigned ValSize = VA.getValVT().getSizeInBits();
|
|
|
|
unsigned LocSize = VA.getLocVT().getSizeInBits();
|
|
|
|
if (PhysRegSize > ValSize && LocSize == ValSize) {
|
|
|
|
auto Copy = MIRBuilder.buildCopy(LLT::scalar(PhysRegSize), PhysReg);
|
|
|
|
MIRBuilder.buildTrunc(ValVReg, Copy);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-10-10 04:07:43 +08:00
|
|
|
MIRBuilder.buildCopy(ValVReg, PhysReg);
|
|
|
|
break;
|
2018-02-09 06:41:47 +08:00
|
|
|
}
|
2017-10-10 04:07:43 +08:00
|
|
|
case CCValAssign::LocInfo::SExt:
|
|
|
|
case CCValAssign::LocInfo::ZExt:
|
|
|
|
case CCValAssign::LocInfo::AExt: {
|
|
|
|
auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg);
|
|
|
|
MIRBuilder.buildTrunc(ValVReg, Copy);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// How the physical register gets marked varies between formal
|
|
|
|
/// parameters (it's a basic-block live-in), and a call instruction
|
|
|
|
/// (it's an implicit-def of the BL).
|
|
|
|
virtual void markPhysRegUsed(unsigned PhysReg) = 0;
|
|
|
|
|
2017-08-20 17:25:22 +08:00
|
|
|
protected:
|
|
|
|
const DataLayout &DL;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct FormalArgHandler : public IncomingValueHandler {
|
|
|
|
FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
|
|
|
|
CCAssignFn *AssignFn)
|
|
|
|
: IncomingValueHandler(MIRBuilder, MRI, AssignFn) {}
|
|
|
|
|
2017-10-10 04:07:43 +08:00
|
|
|
void markPhysRegUsed(unsigned PhysReg) override {
|
2017-01-29 16:35:42 +08:00
|
|
|
MIRBuilder.getMBB().addLiveIn(PhysReg);
|
|
|
|
}
|
2017-08-20 17:25:22 +08:00
|
|
|
};
|
2017-01-29 16:35:42 +08:00
|
|
|
|
2017-08-20 17:25:22 +08:00
|
|
|
struct CallReturnHandler : public IncomingValueHandler {
|
|
|
|
CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
|
|
|
|
CCAssignFn *AssignFn, MachineInstrBuilder &MIB)
|
|
|
|
: IncomingValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
|
|
|
|
|
2017-10-10 04:07:43 +08:00
|
|
|
void markPhysRegUsed(unsigned PhysReg) override {
|
2017-08-20 17:25:22 +08:00
|
|
|
MIB.addDef(PhysReg, RegState::Implicit);
|
|
|
|
}
|
|
|
|
|
|
|
|
protected:
|
|
|
|
MachineInstrBuilder &MIB;
|
2017-01-29 16:35:42 +08:00
|
|
|
};
|
2017-08-20 17:25:22 +08:00
|
|
|
|
2017-10-05 08:33:50 +08:00
|
|
|
} // end anonymous namespace
|
2017-01-29 16:35:42 +08:00
|
|
|
|
2016-11-15 14:34:33 +08:00
|
|
|
bool X86CallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
|
|
|
|
const Function &F,
|
2019-06-24 23:50:29 +08:00
|
|
|
ArrayRef<Register> VRegs) const {
|
2017-01-29 16:35:42 +08:00
|
|
|
if (F.arg_empty())
|
|
|
|
return true;
|
|
|
|
|
2017-03-23 20:13:29 +08:00
|
|
|
// TODO: handle variadic function
|
2017-01-29 16:35:42 +08:00
|
|
|
if (F.isVarArg())
|
|
|
|
return false;
|
|
|
|
|
2017-02-06 16:37:41 +08:00
|
|
|
MachineFunction &MF = MIRBuilder.getMF();
|
|
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
|
|
auto DL = MF.getDataLayout();
|
2017-01-29 16:35:42 +08:00
|
|
|
|
2017-02-06 16:37:41 +08:00
|
|
|
SmallVector<ArgInfo, 8> SplitArgs;
|
2017-01-29 16:35:42 +08:00
|
|
|
unsigned Idx = 0;
|
Remove getArgumentList() in favor of arg_begin(), args(), etc
Users often call getArgumentList().size(), which is a linear way to get
the number of function arguments. arg_size(), on the other hand, is
constant time.
In general, the fact that arguments are stored in an iplist is an
implementation detail, so I've removed it from the Function interface
and moved all other users to the argument container APIs (arg_begin(),
arg_end(), args(), arg_size()).
Reviewed By: chandlerc
Differential Revision: https://reviews.llvm.org/D31052
llvm-svn: 298010
2017-03-17 06:59:15 +08:00
|
|
|
for (auto &Arg : F.args()) {
|
2017-07-05 19:40:35 +08:00
|
|
|
|
|
|
|
// TODO: handle not simple cases.
|
|
|
|
if (Arg.hasAttribute(Attribute::ByVal) ||
|
|
|
|
Arg.hasAttribute(Attribute::InReg) ||
|
|
|
|
Arg.hasAttribute(Attribute::StructRet) ||
|
|
|
|
Arg.hasAttribute(Attribute::SwiftSelf) ||
|
|
|
|
Arg.hasAttribute(Attribute::SwiftError) ||
|
|
|
|
Arg.hasAttribute(Attribute::Nest))
|
|
|
|
return false;
|
|
|
|
|
2017-02-06 16:37:41 +08:00
|
|
|
ArgInfo OrigArg(VRegs[Idx], Arg.getType());
|
2017-07-05 19:40:35 +08:00
|
|
|
setArgFlags(OrigArg, Idx + AttributeList::FirstArgIndex, DL, F);
|
2017-07-05 14:24:13 +08:00
|
|
|
if (!splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
|
2019-06-24 23:50:29 +08:00
|
|
|
[&](ArrayRef<Register> Regs) {
|
2017-07-05 14:24:13 +08:00
|
|
|
MIRBuilder.buildMerge(VRegs[Idx], Regs);
|
|
|
|
}))
|
|
|
|
return false;
|
2017-01-29 16:35:42 +08:00
|
|
|
Idx++;
|
|
|
|
}
|
|
|
|
|
2017-02-06 16:37:41 +08:00
|
|
|
MachineBasicBlock &MBB = MIRBuilder.getMBB();
|
|
|
|
if (!MBB.empty())
|
2017-03-23 20:13:29 +08:00
|
|
|
MIRBuilder.setInstr(*MBB.begin());
|
2017-02-06 16:37:41 +08:00
|
|
|
|
2017-08-20 17:25:22 +08:00
|
|
|
FormalArgHandler Handler(MIRBuilder, MRI, CC_X86);
|
2017-02-06 16:37:41 +08:00
|
|
|
if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Move back to the end of the basic block.
|
|
|
|
MIRBuilder.setMBB(MBB);
|
|
|
|
|
|
|
|
return true;
|
2016-11-15 14:34:33 +08:00
|
|
|
}
|
2017-08-20 17:25:22 +08:00
|
|
|
|
|
|
|
bool X86CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
|
|
|
|
CallingConv::ID CallConv,
|
|
|
|
const MachineOperand &Callee,
|
|
|
|
const ArgInfo &OrigRet,
|
|
|
|
ArrayRef<ArgInfo> OrigArgs) const {
|
|
|
|
MachineFunction &MF = MIRBuilder.getMF();
|
2017-12-16 06:22:58 +08:00
|
|
|
const Function &F = MF.getFunction();
|
2017-08-20 17:25:22 +08:00
|
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
|
|
auto &DL = F.getParent()->getDataLayout();
|
|
|
|
const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
|
|
|
|
const TargetInstrInfo &TII = *STI.getInstrInfo();
|
|
|
|
auto TRI = STI.getRegisterInfo();
|
|
|
|
|
|
|
|
// Handle only Linux C, X86_64_SysV calling conventions for now.
|
|
|
|
if (!STI.isTargetLinux() ||
|
|
|
|
!(CallConv == CallingConv::C || CallConv == CallingConv::X86_64_SysV))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
|
|
|
|
auto CallSeqStart = MIRBuilder.buildInstr(AdjStackDown);
|
|
|
|
|
|
|
|
// Create a temporarily-floating call instruction so we can add the implicit
|
|
|
|
// uses of arg registers.
|
|
|
|
bool Is64Bit = STI.is64Bit();
|
|
|
|
unsigned CallOpc = Callee.isReg()
|
|
|
|
? (Is64Bit ? X86::CALL64r : X86::CALL32r)
|
|
|
|
: (Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32);
|
|
|
|
|
|
|
|
auto MIB = MIRBuilder.buildInstrNoInsert(CallOpc).add(Callee).addRegMask(
|
|
|
|
TRI->getCallPreservedMask(MF, CallConv));
|
|
|
|
|
|
|
|
SmallVector<ArgInfo, 8> SplitArgs;
|
|
|
|
for (const auto &OrigArg : OrigArgs) {
|
2017-08-21 16:59:59 +08:00
|
|
|
|
|
|
|
// TODO: handle not simple cases.
|
|
|
|
if (OrigArg.Flags.isByVal())
|
|
|
|
return false;
|
|
|
|
|
2019-06-27 16:50:53 +08:00
|
|
|
assert(OrigArg.Regs.size() == 1 && "Can't handle multple regs yet");
|
2017-08-20 17:25:22 +08:00
|
|
|
if (!splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
|
2019-06-24 23:50:29 +08:00
|
|
|
[&](ArrayRef<Register> Regs) {
|
2019-06-27 16:50:53 +08:00
|
|
|
MIRBuilder.buildUnmerge(Regs, OrigArg.Regs[0]);
|
2017-08-20 17:25:22 +08:00
|
|
|
}))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
// Do the actual argument marshalling.
|
|
|
|
OutgoingValueHandler Handler(MIRBuilder, MRI, MIB, CC_X86);
|
|
|
|
if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
|
|
|
|
return false;
|
|
|
|
|
2017-08-30 23:10:15 +08:00
|
|
|
bool IsFixed = OrigArgs.empty() ? true : OrigArgs.back().IsFixed;
|
|
|
|
if (STI.is64Bit() && !IsFixed && !STI.isCallingConvWin64(CallConv)) {
|
|
|
|
// From AMD64 ABI document:
|
|
|
|
// For calls that may call functions that use varargs or stdargs
|
|
|
|
// (prototype-less calls or calls to functions containing ellipsis (...) in
|
|
|
|
// the declaration) %al is used as hidden argument to specify the number
|
|
|
|
// of SSE registers used. The contents of %al do not need to match exactly
|
|
|
|
// the number of registers, but must be an ubound on the number of SSE
|
|
|
|
// registers used and is in the range 0 - 8 inclusive.
|
|
|
|
|
|
|
|
MIRBuilder.buildInstr(X86::MOV8ri)
|
|
|
|
.addDef(X86::AL)
|
|
|
|
.addImm(Handler.getNumXmmRegs());
|
|
|
|
MIB.addUse(X86::AL, RegState::Implicit);
|
|
|
|
}
|
|
|
|
|
2017-08-20 17:25:22 +08:00
|
|
|
// Now we can add the actual call instruction to the correct basic block.
|
|
|
|
MIRBuilder.insertInstr(MIB);
|
|
|
|
|
|
|
|
// If Callee is a reg, since it is used by a target specific
|
|
|
|
// instruction, it must have a register class matching the
|
|
|
|
// constraint of that instruction.
|
|
|
|
if (Callee.isReg())
|
|
|
|
MIB->getOperand(0).setReg(constrainOperandRegClass(
|
|
|
|
MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
|
2018-02-27 06:56:21 +08:00
|
|
|
*MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(), Callee, 0));
|
2017-08-20 17:25:22 +08:00
|
|
|
|
|
|
|
// Finally we can copy the returned value back into its virtual-register. In
|
|
|
|
// symmetry with the arguments, the physical register must be an
|
|
|
|
// implicit-define of the call instruction.
|
|
|
|
|
2019-06-27 16:50:53 +08:00
|
|
|
if (!OrigRet.Ty->isVoidTy()) {
|
|
|
|
assert(OrigRet.Regs.size() == 1 && "Can't handle multple regs yet");
|
|
|
|
|
2017-08-20 17:25:22 +08:00
|
|
|
SplitArgs.clear();
|
2019-06-24 23:50:29 +08:00
|
|
|
SmallVector<Register, 8> NewRegs;
|
2017-08-20 17:25:22 +08:00
|
|
|
|
|
|
|
if (!splitToValueTypes(OrigRet, SplitArgs, DL, MRI,
|
2019-06-24 23:50:29 +08:00
|
|
|
[&](ArrayRef<Register> Regs) {
|
2017-08-20 17:25:22 +08:00
|
|
|
NewRegs.assign(Regs.begin(), Regs.end());
|
|
|
|
}))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
CallReturnHandler Handler(MIRBuilder, MRI, RetCC_X86, MIB);
|
|
|
|
if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!NewRegs.empty())
|
2019-06-27 16:50:53 +08:00
|
|
|
MIRBuilder.buildMerge(OrigRet.Regs[0], NewRegs);
|
2017-08-20 17:25:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
CallSeqStart.addImm(Handler.getStackSize())
|
|
|
|
.addImm(0 /* see getFrameTotalSize */)
|
|
|
|
.addImm(0 /* see getFrameAdjustment */);
|
|
|
|
|
|
|
|
unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
|
|
|
|
MIRBuilder.buildInstr(AdjStackUp)
|
|
|
|
.addImm(Handler.getStackSize())
|
|
|
|
.addImm(0 /* NumBytesForCalleeToPop */);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|