2016-11-11 16:27:37 +08:00
|
|
|
//===-- llvm/lib/Target/ARM/ARMCallLowering.cpp - Call lowering -----------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
///
|
|
|
|
/// \file
|
|
|
|
/// This file implements the lowering of LLVM calls to machine code calls for
|
|
|
|
/// GlobalISel.
|
|
|
|
///
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "ARMCallLowering.h"
|
|
|
|
|
|
|
|
#include "ARMBaseInstrInfo.h"
|
|
|
|
#include "ARMISelLowering.h"
|
2017-01-25 15:08:53 +08:00
|
|
|
#include "ARMSubtarget.h"
|
2016-11-11 16:27:37 +08:00
|
|
|
|
2017-02-02 22:01:00 +08:00
|
|
|
#include "llvm/CodeGen/Analysis.h"
|
2016-11-11 16:27:37 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
|
2016-12-19 19:55:41 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2016-11-11 16:27:37 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
#ifndef LLVM_BUILD_GLOBAL_ISEL
|
|
|
|
#error "This shouldn't be built without GISel"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
ARMCallLowering::ARMCallLowering(const ARMTargetLowering &TLI)
|
|
|
|
: CallLowering(&TLI) {}
|
|
|
|
|
2017-01-13 22:39:03 +08:00
|
|
|
static bool isSupportedType(const DataLayout &DL, const ARMTargetLowering &TLI,
|
2016-12-16 20:54:46 +08:00
|
|
|
Type *T) {
|
2017-02-02 22:00:54 +08:00
|
|
|
EVT VT = TLI.getValueType(DL, T, true);
|
2017-02-09 21:09:59 +08:00
|
|
|
if (!VT.isSimple() || VT.isVector())
|
2016-12-19 22:08:02 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned VTSize = VT.getSimpleVT().getSizeInBits();
|
2017-02-16 15:53:07 +08:00
|
|
|
|
|
|
|
if (VTSize == 64)
|
|
|
|
// FIXME: Support i64 too
|
|
|
|
return VT.isFloatingPoint();
|
|
|
|
|
2017-01-25 16:47:40 +08:00
|
|
|
return VTSize == 1 || VTSize == 8 || VTSize == 16 || VTSize == 32;
|
2016-12-16 20:54:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
struct FuncReturnHandler : public CallLowering::ValueHandler {
|
|
|
|
FuncReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
|
2017-01-18 06:30:10 +08:00
|
|
|
MachineInstrBuilder &MIB, CCAssignFn *AssignFn)
|
|
|
|
: ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
|
2016-12-16 20:54:46 +08:00
|
|
|
|
|
|
|
unsigned getStackAddress(uint64_t Size, int64_t Offset,
|
|
|
|
MachinePointerInfo &MPO) override {
|
|
|
|
llvm_unreachable("Don't know how to get a stack address yet");
|
|
|
|
}
|
|
|
|
|
|
|
|
void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
|
|
|
|
CCValAssign &VA) override {
|
|
|
|
assert(VA.isRegLoc() && "Value shouldn't be assigned to reg");
|
|
|
|
assert(VA.getLocReg() == PhysReg && "Assigning to the wrong reg?");
|
|
|
|
|
2017-02-16 15:53:07 +08:00
|
|
|
assert(VA.getValVT().getSizeInBits() <= 64 && "Unsupported value size");
|
|
|
|
assert(VA.getLocVT().getSizeInBits() <= 64 && "Unsupported location size");
|
2016-12-16 20:54:46 +08:00
|
|
|
|
2017-01-25 16:10:40 +08:00
|
|
|
unsigned ExtReg = extendRegister(ValVReg, VA);
|
|
|
|
MIRBuilder.buildCopy(PhysReg, ExtReg);
|
2016-12-16 20:54:46 +08:00
|
|
|
MIB.addUse(PhysReg, RegState::Implicit);
|
|
|
|
}
|
|
|
|
|
|
|
|
void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
|
|
|
|
MachinePointerInfo &MPO, CCValAssign &VA) override {
|
|
|
|
llvm_unreachable("Don't know how to assign a value to an address yet");
|
|
|
|
}
|
|
|
|
|
2017-02-16 15:53:07 +08:00
|
|
|
unsigned assignCustomValue(const CallLowering::ArgInfo &Arg,
|
|
|
|
ArrayRef<CCValAssign> VAs) override {
|
|
|
|
CCValAssign VA = VAs[0];
|
|
|
|
assert(VA.needsCustom() && "Value doesn't need custom handling");
|
|
|
|
assert(VA.getValVT() == MVT::f64 && "Unsupported type");
|
|
|
|
|
|
|
|
CCValAssign NextVA = VAs[1];
|
|
|
|
assert(NextVA.needsCustom() && "Value doesn't need custom handling");
|
|
|
|
assert(NextVA.getValVT() == MVT::f64 && "Unsupported type");
|
|
|
|
|
|
|
|
assert(VA.getValNo() == NextVA.getValNo() &&
|
|
|
|
"Values belong to different arguments");
|
|
|
|
|
|
|
|
assert(VA.isRegLoc() && "Value should be in reg");
|
|
|
|
assert(NextVA.isRegLoc() && "Value should be in reg");
|
|
|
|
|
|
|
|
unsigned NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
|
|
|
|
MRI.createGenericVirtualRegister(LLT::scalar(32))};
|
|
|
|
|
|
|
|
MIRBuilder.buildExtract(NewRegs, {0, 32}, Arg.Reg);
|
|
|
|
|
|
|
|
bool IsLittle = MIRBuilder.getMF().getSubtarget<ARMSubtarget>().isLittle();
|
|
|
|
if (!IsLittle)
|
|
|
|
std::swap(NewRegs[0], NewRegs[1]);
|
|
|
|
|
|
|
|
assignValueToReg(NewRegs[0], VA.getLocReg(), VA);
|
|
|
|
assignValueToReg(NewRegs[1], NextVA.getLocReg(), NextVA);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2016-12-16 20:54:46 +08:00
|
|
|
MachineInstrBuilder &MIB;
|
|
|
|
};
|
|
|
|
} // End anonymous namespace.
|
|
|
|
|
2017-02-02 22:01:00 +08:00
|
|
|
void ARMCallLowering::splitToValueTypes(const ArgInfo &OrigArg,
|
|
|
|
SmallVectorImpl<ArgInfo> &SplitArgs,
|
|
|
|
const DataLayout &DL,
|
|
|
|
MachineRegisterInfo &MRI) const {
|
|
|
|
const ARMTargetLowering &TLI = *getTLI<ARMTargetLowering>();
|
|
|
|
LLVMContext &Ctx = OrigArg.Ty->getContext();
|
|
|
|
|
|
|
|
SmallVector<EVT, 4> SplitVTs;
|
|
|
|
SmallVector<uint64_t, 4> Offsets;
|
|
|
|
ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0);
|
|
|
|
|
|
|
|
assert(SplitVTs.size() == 1 && "Unsupported type");
|
|
|
|
|
|
|
|
// Even if there is no splitting to do, we still want to replace the original
|
|
|
|
// type (e.g. pointer type -> integer).
|
|
|
|
SplitArgs.emplace_back(OrigArg.Reg, SplitVTs[0].getTypeForEVT(Ctx),
|
|
|
|
OrigArg.Flags, OrigArg.IsFixed);
|
|
|
|
}
|
|
|
|
|
2016-12-16 20:54:46 +08:00
|
|
|
/// Lower the return value for the already existing \p Ret. This assumes that
|
|
|
|
/// \p MIRBuilder's insertion point is correct.
|
|
|
|
bool ARMCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder,
|
|
|
|
const Value *Val, unsigned VReg,
|
|
|
|
MachineInstrBuilder &Ret) const {
|
|
|
|
if (!Val)
|
|
|
|
// Nothing to do here.
|
|
|
|
return true;
|
|
|
|
|
|
|
|
auto &MF = MIRBuilder.getMF();
|
|
|
|
const auto &F = *MF.getFunction();
|
|
|
|
|
|
|
|
auto DL = MF.getDataLayout();
|
|
|
|
auto &TLI = *getTLI<ARMTargetLowering>();
|
|
|
|
if (!isSupportedType(DL, TLI, Val->getType()))
|
|
|
|
return false;
|
|
|
|
|
2017-02-02 22:01:00 +08:00
|
|
|
SmallVector<ArgInfo, 4> SplitVTs;
|
2016-12-16 20:54:46 +08:00
|
|
|
ArgInfo RetInfo(VReg, Val->getType());
|
|
|
|
setArgFlags(RetInfo, AttributeSet::ReturnIndex, DL, F);
|
2017-02-02 22:01:00 +08:00
|
|
|
splitToValueTypes(RetInfo, SplitVTs, DL, MF.getRegInfo());
|
|
|
|
|
|
|
|
CCAssignFn *AssignFn =
|
|
|
|
TLI.CCAssignFnForReturn(F.getCallingConv(), F.isVarArg());
|
2016-12-16 20:54:46 +08:00
|
|
|
|
2017-01-18 06:30:10 +08:00
|
|
|
FuncReturnHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret, AssignFn);
|
2017-02-02 22:01:00 +08:00
|
|
|
return handleAssignments(MIRBuilder, SplitVTs, RetHandler);
|
2016-12-16 20:54:46 +08:00
|
|
|
}
|
|
|
|
|
2016-11-11 16:27:37 +08:00
|
|
|
bool ARMCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
|
|
|
|
const Value *Val, unsigned VReg) const {
|
2016-12-16 20:54:46 +08:00
|
|
|
assert(!Val == !VReg && "Return value without a vreg");
|
2016-11-11 16:27:37 +08:00
|
|
|
|
2017-01-13 17:37:56 +08:00
|
|
|
auto Ret = MIRBuilder.buildInstrNoInsert(ARM::BX_RET).add(predOps(ARMCC::AL));
|
2016-11-11 16:27:37 +08:00
|
|
|
|
2016-12-16 20:54:46 +08:00
|
|
|
if (!lowerReturnVal(MIRBuilder, Val, VReg, Ret))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MIRBuilder.insertInstr(Ret);
|
2016-11-11 16:27:37 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-12-16 20:54:46 +08:00
|
|
|
namespace {
|
|
|
|
struct FormalArgHandler : public CallLowering::ValueHandler {
|
2017-01-18 06:30:10 +08:00
|
|
|
FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
|
|
|
|
CCAssignFn AssignFn)
|
|
|
|
: ValueHandler(MIRBuilder, MRI, AssignFn) {}
|
2016-12-16 20:54:46 +08:00
|
|
|
|
|
|
|
unsigned getStackAddress(uint64_t Size, int64_t Offset,
|
|
|
|
MachinePointerInfo &MPO) override {
|
2017-02-16 15:53:07 +08:00
|
|
|
assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
|
|
|
|
"Unsupported size");
|
2016-12-19 19:55:41 +08:00
|
|
|
|
|
|
|
auto &MFI = MIRBuilder.getMF().getFrameInfo();
|
|
|
|
|
|
|
|
int FI = MFI.CreateFixedObject(Size, Offset, true);
|
|
|
|
MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
|
|
|
|
|
|
|
|
unsigned AddrReg =
|
|
|
|
MRI.createGenericVirtualRegister(LLT::pointer(MPO.getAddrSpace(), 32));
|
|
|
|
MIRBuilder.buildFrameIndex(AddrReg, FI);
|
|
|
|
|
|
|
|
return AddrReg;
|
|
|
|
}
|
|
|
|
|
|
|
|
void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
|
|
|
|
MachinePointerInfo &MPO, CCValAssign &VA) override {
|
2017-02-16 15:53:07 +08:00
|
|
|
assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
|
|
|
|
"Unsupported size");
|
2017-01-26 17:20:47 +08:00
|
|
|
|
|
|
|
if (VA.getLocInfo() == CCValAssign::SExt ||
|
|
|
|
VA.getLocInfo() == CCValAssign::ZExt) {
|
|
|
|
// If the argument is zero- or sign-extended by the caller, its size
|
|
|
|
// becomes 4 bytes, so that's what we should load.
|
|
|
|
Size = 4;
|
|
|
|
assert(MRI.getType(ValVReg).isScalar() && "Only scalars supported atm");
|
|
|
|
MRI.setType(ValVReg, LLT::scalar(32));
|
|
|
|
}
|
2016-12-19 19:55:41 +08:00
|
|
|
|
|
|
|
auto MMO = MIRBuilder.getMF().getMachineMemOperand(
|
|
|
|
MPO, MachineMemOperand::MOLoad, Size, /* Alignment */ 0);
|
|
|
|
MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
|
2016-12-16 20:54:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
|
|
|
|
CCValAssign &VA) override {
|
|
|
|
assert(VA.isRegLoc() && "Value shouldn't be assigned to reg");
|
|
|
|
assert(VA.getLocReg() == PhysReg && "Assigning to the wrong reg?");
|
|
|
|
|
2017-02-16 15:53:07 +08:00
|
|
|
assert(VA.getValVT().getSizeInBits() <= 64 && "Unsupported value size");
|
|
|
|
assert(VA.getLocVT().getSizeInBits() <= 64 && "Unsupported location size");
|
2016-12-16 20:54:46 +08:00
|
|
|
|
2017-01-25 16:10:40 +08:00
|
|
|
// The caller should handle all necesary extensions.
|
2016-12-16 20:54:46 +08:00
|
|
|
MIRBuilder.getMBB().addLiveIn(PhysReg);
|
|
|
|
MIRBuilder.buildCopy(ValVReg, PhysReg);
|
|
|
|
}
|
2017-02-16 15:53:07 +08:00
|
|
|
|
|
|
|
unsigned assignCustomValue(const llvm::ARMCallLowering::ArgInfo &Arg,
|
|
|
|
ArrayRef<CCValAssign> VAs) override {
|
|
|
|
CCValAssign VA = VAs[0];
|
|
|
|
assert(VA.needsCustom() && "Value doesn't need custom handling");
|
|
|
|
assert(VA.getValVT() == MVT::f64 && "Unsupported type");
|
|
|
|
|
|
|
|
CCValAssign NextVA = VAs[1];
|
|
|
|
assert(NextVA.needsCustom() && "Value doesn't need custom handling");
|
|
|
|
assert(NextVA.getValVT() == MVT::f64 && "Unsupported type");
|
|
|
|
|
|
|
|
assert(VA.getValNo() == NextVA.getValNo() &&
|
|
|
|
"Values belong to different arguments");
|
|
|
|
|
|
|
|
assert(VA.isRegLoc() && "Value should be in reg");
|
|
|
|
assert(NextVA.isRegLoc() && "Value should be in reg");
|
|
|
|
|
|
|
|
unsigned NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
|
|
|
|
MRI.createGenericVirtualRegister(LLT::scalar(32))};
|
|
|
|
|
|
|
|
assignValueToReg(NewRegs[0], VA.getLocReg(), VA);
|
|
|
|
assignValueToReg(NewRegs[1], NextVA.getLocReg(), NextVA);
|
|
|
|
|
|
|
|
bool IsLittle = MIRBuilder.getMF().getSubtarget<ARMSubtarget>().isLittle();
|
|
|
|
if (!IsLittle)
|
|
|
|
std::swap(NewRegs[0], NewRegs[1]);
|
|
|
|
|
|
|
|
MIRBuilder.buildSequence(Arg.Reg, NewRegs, {0, 32});
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
2016-12-16 20:54:46 +08:00
|
|
|
};
|
|
|
|
} // End anonymous namespace
|
|
|
|
|
2016-11-11 16:27:37 +08:00
|
|
|
bool ARMCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
|
|
|
|
const Function &F,
|
|
|
|
ArrayRef<unsigned> VRegs) const {
|
2016-12-16 20:54:46 +08:00
|
|
|
// Quick exit if there aren't any args
|
|
|
|
if (F.arg_empty())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (F.isVarArg())
|
|
|
|
return false;
|
|
|
|
|
2017-02-02 22:01:00 +08:00
|
|
|
auto &MF = MIRBuilder.getMF();
|
|
|
|
auto DL = MF.getDataLayout();
|
2016-12-16 20:54:46 +08:00
|
|
|
auto &TLI = *getTLI<ARMTargetLowering>();
|
|
|
|
|
2017-02-09 21:09:59 +08:00
|
|
|
auto Subtarget = TLI.getSubtarget();
|
|
|
|
|
|
|
|
if (Subtarget->isThumb())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// FIXME: Support soft float (when we're ready to generate libcalls)
|
|
|
|
if (Subtarget->useSoftFloat() || !Subtarget->hasVFP2())
|
2017-01-25 15:08:53 +08:00
|
|
|
return false;
|
|
|
|
|
2016-12-16 20:54:46 +08:00
|
|
|
auto &Args = F.getArgumentList();
|
2017-01-26 17:20:47 +08:00
|
|
|
for (auto &Arg : Args)
|
2016-12-16 20:54:46 +08:00
|
|
|
if (!isSupportedType(DL, TLI, Arg.getType()))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
CCAssignFn *AssignFn =
|
|
|
|
TLI.CCAssignFnForCall(F.getCallingConv(), F.isVarArg());
|
|
|
|
|
|
|
|
SmallVector<ArgInfo, 8> ArgInfos;
|
|
|
|
unsigned Idx = 0;
|
|
|
|
for (auto &Arg : Args) {
|
|
|
|
ArgInfo AInfo(VRegs[Idx], Arg.getType());
|
|
|
|
setArgFlags(AInfo, Idx + 1, DL, F);
|
2017-02-02 22:01:00 +08:00
|
|
|
splitToValueTypes(AInfo, ArgInfos, DL, MF.getRegInfo());
|
2016-12-16 20:54:46 +08:00
|
|
|
Idx++;
|
|
|
|
}
|
|
|
|
|
2017-01-18 06:30:10 +08:00
|
|
|
FormalArgHandler ArgHandler(MIRBuilder, MIRBuilder.getMF().getRegInfo(),
|
|
|
|
AssignFn);
|
|
|
|
return handleAssignments(MIRBuilder, ArgInfos, ArgHandler);
|
2016-11-11 16:27:37 +08:00
|
|
|
}
|
2017-02-21 19:33:59 +08:00
|
|
|
|
|
|
|
bool ARMCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
|
|
|
|
const MachineOperand &Callee,
|
|
|
|
const ArgInfo &OrigRet,
|
|
|
|
ArrayRef<ArgInfo> OrigArgs) const {
|
|
|
|
const MachineFunction &MF = MIRBuilder.getMF();
|
|
|
|
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
|
|
|
|
|
|
|
|
if (MF.getSubtarget<ARMSubtarget>().genLongCalls())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// FIXME: Support calling functions with arguments.
|
|
|
|
if (OrigArgs.size() > 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// FIXME: Support calling functions with return types.
|
|
|
|
if (!OrigRet.Ty->isVoidTy())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MIRBuilder.buildInstr(ARM::ADJCALLSTACKDOWN)
|
|
|
|
.addImm(0)
|
|
|
|
.add(predOps(ARMCC::AL));
|
|
|
|
|
|
|
|
MIRBuilder.buildInstr(ARM::BLX)
|
|
|
|
.add(Callee)
|
|
|
|
// FIXME: Don't hardcode the calling conv here...
|
|
|
|
.addRegMask(TRI->getCallPreservedMask(MF, CallingConv::ARM_AAPCS));
|
|
|
|
|
|
|
|
MIRBuilder.buildInstr(ARM::ADJCALLSTACKUP)
|
|
|
|
.addImm(0)
|
|
|
|
.addImm(0)
|
|
|
|
.add(predOps(ARMCC::AL));
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|