forked from OSchip/llvm-project
[CallSite removal][GlobalISel] Use CallBase instead of CallSite in lowerCall and translateCallBase.
Differential Revision: https://reviews.llvm.org/D78001
This commit is contained in:
parent
e6605a209c
commit
68eb08646c
|
@ -328,7 +328,7 @@ public:
|
|||
/// range of an immediate jump.
|
||||
///
|
||||
/// \return true if the lowering succeeded, false otherwise.
|
||||
bool lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS,
|
||||
bool lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &Call,
|
||||
ArrayRef<Register> ResRegs,
|
||||
ArrayRef<ArrayRef<Register>> ArgRegs, Register SwiftErrorVReg,
|
||||
std::function<unsigned()> GetCalleeReg) const;
|
||||
|
|
|
@ -244,8 +244,7 @@ private:
|
|||
SmallVectorImpl<uint64_t> *Offsets = nullptr);
|
||||
|
||||
/// Common code for translating normal calls or invokes.
|
||||
bool translateCallSite(const ImmutableCallSite &CS,
|
||||
MachineIRBuilder &MIRBuilder);
|
||||
bool translateCallBase(const CallBase &CB, MachineIRBuilder &MIRBuilder);
|
||||
|
||||
/// Translate call instruction.
|
||||
/// \pre \p U is a call instruction.
|
||||
|
|
|
@ -29,7 +29,7 @@ using namespace llvm;
|
|||
|
||||
void CallLowering::anchor() {}
|
||||
|
||||
bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS,
|
||||
bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
|
||||
ArrayRef<Register> ResRegs,
|
||||
ArrayRef<ArrayRef<Register>> ArgRegs,
|
||||
Register SwiftErrorVReg,
|
||||
|
@ -41,39 +41,39 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS,
|
|||
// physregs and memory locations. Gather the sequence of argument types that
|
||||
// we'll pass to the assigner function.
|
||||
unsigned i = 0;
|
||||
unsigned NumFixedArgs = CS.getFunctionType()->getNumParams();
|
||||
for (auto &Arg : CS.args()) {
|
||||
unsigned NumFixedArgs = CB.getFunctionType()->getNumParams();
|
||||
for (auto &Arg : CB.args()) {
|
||||
ArgInfo OrigArg{ArgRegs[i], Arg->getType(), ISD::ArgFlagsTy{},
|
||||
i < NumFixedArgs};
|
||||
setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CS);
|
||||
setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB);
|
||||
Info.OrigArgs.push_back(OrigArg);
|
||||
++i;
|
||||
}
|
||||
|
||||
// Try looking through a bitcast from one function type to another.
|
||||
// Commonly happens with calls to objc_msgSend().
|
||||
const Value *CalleeV = CS.getCalledValue()->stripPointerCasts();
|
||||
const Value *CalleeV = CB.getCalledValue()->stripPointerCasts();
|
||||
if (const Function *F = dyn_cast<Function>(CalleeV))
|
||||
Info.Callee = MachineOperand::CreateGA(F, 0);
|
||||
else
|
||||
Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
|
||||
|
||||
Info.OrigRet = ArgInfo{ResRegs, CS.getType(), ISD::ArgFlagsTy{}};
|
||||
Info.OrigRet = ArgInfo{ResRegs, CB.getType(), ISD::ArgFlagsTy{}};
|
||||
if (!Info.OrigRet.Ty->isVoidTy())
|
||||
setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CS);
|
||||
setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CB);
|
||||
|
||||
MachineFunction &MF = MIRBuilder.getMF();
|
||||
Info.KnownCallees =
|
||||
CS.getInstruction()->getMetadata(LLVMContext::MD_callees);
|
||||
Info.CallConv = CS.getCallingConv();
|
||||
Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
|
||||
Info.CallConv = CB.getCallingConv();
|
||||
Info.SwiftErrorVReg = SwiftErrorVReg;
|
||||
Info.IsMustTailCall = CS.isMustTailCall();
|
||||
Info.IsTailCall = CS.isTailCall() &&
|
||||
isInTailCallPosition(CS, MF.getTarget()) &&
|
||||
(MF.getFunction()
|
||||
.getFnAttribute("disable-tail-calls")
|
||||
.getValueAsString() != "true");
|
||||
Info.IsVarArg = CS.getFunctionType()->isVarArg();
|
||||
Info.IsMustTailCall = CB.isMustTailCall();
|
||||
Info.IsTailCall =
|
||||
CB.isTailCall() &&
|
||||
isInTailCallPosition(ImmutableCallSite(&CB), MF.getTarget()) &&
|
||||
(MF.getFunction()
|
||||
.getFnAttribute("disable-tail-calls")
|
||||
.getValueAsString() != "true");
|
||||
Info.IsVarArg = CB.getFunctionType()->isVarArg();
|
||||
return lowerCall(MIRBuilder, Info);
|
||||
}
|
||||
|
||||
|
@ -126,9 +126,9 @@ CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
|
|||
const Function &FuncInfo) const;
|
||||
|
||||
template void
|
||||
CallLowering::setArgFlags<CallInst>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
|
||||
CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
|
||||
const DataLayout &DL,
|
||||
const CallInst &FuncInfo) const;
|
||||
const CallBase &FuncInfo) const;
|
||||
|
||||
Register CallLowering::packRegs(ArrayRef<Register> SrcRegs, Type *PackedTy,
|
||||
MachineIRBuilder &MIRBuilder) const {
|
||||
|
|
|
@ -1598,24 +1598,23 @@ bool IRTranslator::translateInlineAsm(const CallInst &CI,
|
|||
return true;
|
||||
}
|
||||
|
||||
bool IRTranslator::translateCallSite(const ImmutableCallSite &CS,
|
||||
bool IRTranslator::translateCallBase(const CallBase &CB,
|
||||
MachineIRBuilder &MIRBuilder) {
|
||||
const Instruction &I = *CS.getInstruction();
|
||||
ArrayRef<Register> Res = getOrCreateVRegs(I);
|
||||
ArrayRef<Register> Res = getOrCreateVRegs(CB);
|
||||
|
||||
SmallVector<ArrayRef<Register>, 8> Args;
|
||||
Register SwiftInVReg = 0;
|
||||
Register SwiftErrorVReg = 0;
|
||||
for (auto &Arg : CS.args()) {
|
||||
for (auto &Arg : CB.args()) {
|
||||
if (CLI->supportSwiftError() && isSwiftError(Arg)) {
|
||||
assert(SwiftInVReg == 0 && "Expected only one swift error argument");
|
||||
LLT Ty = getLLTForType(*Arg->getType(), *DL);
|
||||
SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
|
||||
MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
|
||||
&I, &MIRBuilder.getMBB(), Arg));
|
||||
&CB, &MIRBuilder.getMBB(), Arg));
|
||||
Args.emplace_back(makeArrayRef(SwiftInVReg));
|
||||
SwiftErrorVReg =
|
||||
SwiftError.getOrCreateVRegDefAt(&I, &MIRBuilder.getMBB(), Arg);
|
||||
SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.getMBB(), Arg);
|
||||
continue;
|
||||
}
|
||||
Args.push_back(getOrCreateVRegs(*Arg));
|
||||
|
@ -1625,8 +1624,8 @@ bool IRTranslator::translateCallSite(const ImmutableCallSite &CS,
|
|||
// optimize into tail calls. Instead, we defer that to selection where a final
|
||||
// scan is done to check if any instructions are calls.
|
||||
bool Success =
|
||||
CLI->lowerCall(MIRBuilder, CS, Res, Args, SwiftErrorVReg,
|
||||
[&]() { return getOrCreateVReg(*CS.getCalledValue()); });
|
||||
CLI->lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg,
|
||||
[&]() { return getOrCreateVReg(*CB.getCalledValue()); });
|
||||
|
||||
// Check if we just inserted a tail call.
|
||||
if (Success) {
|
||||
|
@ -1664,7 +1663,7 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
|
|||
}
|
||||
|
||||
if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic)
|
||||
return translateCallSite(&CI, MIRBuilder);
|
||||
return translateCallBase(CI, MIRBuilder);
|
||||
|
||||
assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
|
||||
|
||||
|
@ -1757,7 +1756,7 @@ bool IRTranslator::translateInvoke(const User &U,
|
|||
MCSymbol *BeginSymbol = Context.createTempSymbol();
|
||||
MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
|
||||
|
||||
if (!translateCallSite(&I, MIRBuilder))
|
||||
if (!translateCallBase(I, MIRBuilder))
|
||||
return false;
|
||||
|
||||
MCSymbol *EndSymbol = Context.createTempSymbol();
|
||||
|
|
Loading…
Reference in New Issue