DAG: Add calling convention argument to calling convention funcs

This seems like a pretty glaring omission, and AMDGPU
wants to treat kernels differently from other calling
conventions.

llvm-svn: 338194
This commit is contained in:
Matt Arsenault 2018-07-28 13:25:19 +00:00
parent 72b0e38b26
commit 81920b0a25
20 changed files with 162 additions and 126 deletions

View File

@ -718,7 +718,7 @@ public:
/// always broken down into scalars in some contexts. This occurs even if the
/// vector type is legal.
virtual unsigned getVectorTypeBreakdownForCallingConv(
LLVMContext &Context, EVT VT, EVT &IntermediateVT,
LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
unsigned &NumIntermediates, MVT &RegisterVT) const {
return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates,
RegisterVT);
@ -1174,7 +1174,7 @@ public:
/// are legal for some operations and not for other operations.
/// For MIPS all vector types must be passed through the integer register set.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context,
EVT VT) const {
CallingConv::ID CC, EVT VT) const {
return getRegisterType(Context, VT);
}
@ -1182,6 +1182,7 @@ public:
/// this occurs when a vector type is used, as vector are passed through the
/// integer register set.
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context,
CallingConv::ID CC,
EVT VT) const {
return getNumRegisters(Context, VT);
}
@ -3690,7 +3691,7 @@ private:
/// Given an LLVM IR type and return type attributes, compute the return value
/// EVTs and flags, and optionally also the offsets, if the return value is
/// being lowered to memory.
void GetReturnInfo(Type *ReturnType, AttributeList attr,
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr,
SmallVectorImpl<ISD::OutputArg> &Outs,
const TargetLowering &TLI, const DataLayout &DL);

View File

@ -1130,7 +1130,7 @@ bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
SmallVector<ISD::OutputArg, 4> Outs;
GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
bool CanLowerReturn = TLI.CanLowerReturn(
CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());

View File

@ -89,10 +89,12 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
// Check whether the function can return without sret-demotion.
SmallVector<ISD::OutputArg, 4> Outs;
GetReturnInfo(Fn->getReturnType(), Fn->getAttributes(), Outs, *TLI,
CallingConv::ID CC = Fn->getCallingConv();
GetReturnInfo(CC, Fn->getReturnType(), Fn->getAttributes(), Outs, *TLI,
mf.getDataLayout());
CanLowerReturn = TLI->CanLowerReturn(Fn->getCallingConv(), *MF,
Fn->isVarArg(), Outs, Fn->getContext());
CanLowerReturn =
TLI->CanLowerReturn(CC, *MF, Fn->isVarArg(), Outs, Fn->getContext());
// If this personality uses funclets, we need to do a bit more work.
DenseMap<const AllocaInst *, TinyPtrVector<int *>> CatchObjects;

View File

@ -157,31 +157,36 @@ static cl::opt<unsigned> SwitchPeelThreshold(
// store [4096 x i8] %data, [4096 x i8]* %buffer
static const unsigned MaxParallelChains = 64;
// True if the Value passed requires ABI mangling as it is a parameter to a
// function or a return value from a function which is not an intrinsic.
static bool isABIRegCopy(const Value *V) {
const bool IsRetInst = V && isa<ReturnInst>(V);
const bool IsCallInst = V && isa<CallInst>(V);
const bool IsInLineAsm =
IsCallInst && static_cast<const CallInst *>(V)->isInlineAsm();
const bool IsIndirectFunctionCall =
IsCallInst && !IsInLineAsm &&
!static_cast<const CallInst *>(V)->getCalledFunction();
// It is possible that the call instruction is an inline asm statement or an
// indirect function call in which case the return value of
// getCalledFunction() would be nullptr.
const bool IsInstrinsicCall =
IsCallInst && !IsInLineAsm && !IsIndirectFunctionCall &&
static_cast<const CallInst *>(V)->getCalledFunction()->getIntrinsicID() !=
Intrinsic::not_intrinsic;
// Return the calling convention if the Value passed requires ABI mangling as it
// is a parameter to a function or a return value from a function which is not
// an intrinsic.
static Optional<CallingConv::ID> getABIRegCopyCC(const Value *V) {
if (auto *R = dyn_cast<ReturnInst>(V))
return R->getParent()->getParent()->getCallingConv();
return IsRetInst || (IsCallInst && (!IsInLineAsm && !IsInstrinsicCall));
if (auto *CI = dyn_cast<CallInst>(V)) {
const bool IsInlineAsm = CI->isInlineAsm();
const bool IsIndirectFunctionCall =
!IsInlineAsm && !CI->getCalledFunction();
// It is possible that the call instruction is an inline asm statement or an
// indirect function call in which case the return value of
// getCalledFunction() would be nullptr.
const bool IsInstrinsicCall =
!IsInlineAsm && !IsIndirectFunctionCall &&
CI->getCalledFunction()->getIntrinsicID() != Intrinsic::not_intrinsic;
if (!IsInlineAsm && !IsInstrinsicCall)
return CI->getCallingConv();
}
return None;
}
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
const SDValue *Parts, unsigned NumParts,
MVT PartVT, EVT ValueVT, const Value *V,
bool IsABIRegCopy);
Optional<CallingConv::ID> CC);
/// getCopyFromParts - Create a value that contains the specified legal parts
/// combined into the value they represent. If the parts combine to a type
@ -191,11 +196,11 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL,
const SDValue *Parts, unsigned NumParts,
MVT PartVT, EVT ValueVT, const Value *V,
Optional<ISD::NodeType> AssertOp = None,
bool IsABIRegCopy = false) {
Optional<CallingConv::ID> CC = None,
Optional<ISD::NodeType> AssertOp = None) {
if (ValueVT.isVector())
return getCopyFromPartsVector(DAG, DL, Parts, NumParts,
PartVT, ValueVT, V, IsABIRegCopy);
return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V,
CC);
assert(NumParts > 0 && "No parts to assemble!");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@ -236,8 +241,8 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL,
// Assemble the trailing non-power-of-2 part.
unsigned OddParts = NumParts - RoundParts;
EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
Hi = getCopyFromParts(DAG, DL,
Parts + RoundParts, OddParts, PartVT, OddVT, V);
Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT,
OddVT, V, CC);
// Combine the round and odd parts.
Lo = Val;
@ -267,7 +272,7 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL,
assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
!PartVT.isVector() && "Unexpected split");
EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V);
Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V, CC);
}
}
@ -340,9 +345,11 @@ static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
const SDValue *Parts, unsigned NumParts,
MVT PartVT, EVT ValueVT, const Value *V,
bool IsABIRegCopy) {
Optional<CallingConv::ID> CallConv) {
assert(ValueVT.isVector() && "Not a vector value");
assert(NumParts > 0 && "No parts to assemble!");
const bool IsABIRegCopy = CallConv.hasValue();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue Val = Parts[0];
@ -355,8 +362,8 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
if (IsABIRegCopy) {
NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
*DAG.getContext(), ValueVT, IntermediateVT, NumIntermediates,
RegisterVT);
*DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
NumIntermediates, RegisterVT);
} else {
NumRegs =
TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
@ -470,7 +477,8 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
SDValue Val, SDValue *Parts, unsigned NumParts,
MVT PartVT, const Value *V, bool IsABIRegCopy);
MVT PartVT, const Value *V,
Optional<CallingConv::ID> CallConv);
/// getCopyToParts - Create a series of nodes that contain the specified value
/// split into legal parts. If the parts contain more bits than Val, then, for
@ -478,14 +486,14 @@ static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
SDValue *Parts, unsigned NumParts, MVT PartVT,
const Value *V,
ISD::NodeType ExtendKind = ISD::ANY_EXTEND,
bool IsABIRegCopy = false) {
Optional<CallingConv::ID> CallConv = None,
ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
EVT ValueVT = Val.getValueType();
// Handle the vector case separately.
if (ValueVT.isVector())
return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
IsABIRegCopy);
CallConv);
unsigned PartBits = PartVT.getSizeInBits();
unsigned OrigNumParts = NumParts;
@ -564,7 +572,8 @@ static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
unsigned OddParts = NumParts - RoundParts;
SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
DAG.getIntPtrConstant(RoundBits, DL));
getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V);
getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
CallConv);
if (DAG.getDataLayout().isBigEndian())
// The odd parts were reversed by getCopyToParts - unreverse them.
@ -605,16 +614,16 @@ static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
std::reverse(Parts, Parts + OrigNumParts);
}
/// getCopyToPartsVector - Create a series of nodes that contain the specified
/// value split into legal parts.
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
SDValue Val, SDValue *Parts, unsigned NumParts,
MVT PartVT, const Value *V,
bool IsABIRegCopy) {
Optional<CallingConv::ID> CallConv) {
EVT ValueVT = Val.getValueType();
assert(ValueVT.isVector() && "Not a vector");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
const bool IsABIRegCopy = CallConv.hasValue();
if (NumParts == 1) {
EVT PartEVT = PartVT;
@ -679,8 +688,8 @@ static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
unsigned NumRegs;
if (IsABIRegCopy) {
NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
*DAG.getContext(), ValueVT, IntermediateVT, NumIntermediates,
RegisterVT);
*DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
NumIntermediates, RegisterVT);
} else {
NumRegs =
TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
@ -720,7 +729,7 @@ static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
// If the register was not expanded, promote or copy the value,
// as appropriate.
for (unsigned i = 0; i != NumParts; ++i)
getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V);
getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
} else if (NumParts > 0) {
// If the intermediate type was expanded, split each the value into
// legal parts.
@ -729,29 +738,32 @@ static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
"Must expand into a divisible number of parts!");
unsigned Factor = NumParts / NumIntermediates;
for (unsigned i = 0; i != NumIntermediates; ++i)
getCopyToParts(DAG, DL, Ops[i], &Parts[i*Factor], Factor, PartVT, V);
getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
CallConv);
}
}
RegsForValue::RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt,
EVT valuevt, bool IsABIMangledValue)
EVT valuevt, Optional<CallingConv::ID> CC)
: ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
RegCount(1, regs.size()), IsABIMangled(IsABIMangledValue) {}
RegCount(1, regs.size()), CallConv(CC) {}
RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
const DataLayout &DL, unsigned Reg, Type *Ty,
bool IsABIMangledValue) {
Optional<CallingConv::ID> CC) {
ComputeValueVTs(TLI, DL, Ty, ValueVTs);
IsABIMangled = IsABIMangledValue;
CallConv = CC;
for (EVT ValueVT : ValueVTs) {
unsigned NumRegs = IsABIMangledValue
? TLI.getNumRegistersForCallingConv(Context, ValueVT)
: TLI.getNumRegisters(Context, ValueVT);
MVT RegisterVT = IsABIMangledValue
? TLI.getRegisterTypeForCallingConv(Context, ValueVT)
: TLI.getRegisterType(Context, ValueVT);
unsigned NumRegs =
isABIMangled()
? TLI.getNumRegistersForCallingConv(Context, CC.getValue(), ValueVT)
: TLI.getNumRegisters(Context, ValueVT);
MVT RegisterVT =
isABIMangled()
? TLI.getRegisterTypeForCallingConv(Context, CC.getValue(), ValueVT)
: TLI.getRegisterType(Context, ValueVT);
for (unsigned i = 0; i != NumRegs; ++i)
Regs.push_back(Reg + i);
RegVTs.push_back(RegisterVT);
@ -777,9 +789,10 @@ SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
// Copy the legal parts from the registers.
EVT ValueVT = ValueVTs[Value];
unsigned NumRegs = RegCount[Value];
MVT RegisterVT = IsABIMangled
? TLI.getRegisterTypeForCallingConv(*DAG.getContext(), RegVTs[Value])
: RegVTs[Value];
MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
*DAG.getContext(),
CallConv.getValue(), RegVTs[Value])
: RegVTs[Value];
Parts.resize(NumRegs);
for (unsigned i = 0; i != NumRegs; ++i) {
@ -837,8 +850,8 @@ SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
RegisterVT, P, DAG.getValueType(FromVT));
}
Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
NumRegs, RegisterVT, ValueVT, V);
Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs,
RegisterVT, ValueVT, V, CallConv);
Part += NumRegs;
Parts.clear();
}
@ -859,15 +872,16 @@ void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
unsigned NumParts = RegCount[Value];
MVT RegisterVT = IsABIMangled
? TLI.getRegisterTypeForCallingConv(*DAG.getContext(), RegVTs[Value])
: RegVTs[Value];
MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
*DAG.getContext(),
CallConv.getValue(), RegVTs[Value])
: RegVTs[Value];
if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
ExtendKind = ISD::ZERO_EXTEND;
getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
&Parts[Part], NumParts, RegisterVT, V, ExtendKind);
getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part],
NumParts, RegisterVT, V, CallConv, ExtendKind);
Part += NumParts;
}
@ -1164,7 +1178,7 @@ SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
unsigned InReg = It->second;
RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
DAG.getDataLayout(), InReg, Ty, isABIRegCopy(V));
DAG.getDataLayout(), InReg, Ty, getABIRegCopyCC(V));
SDValue Chain = DAG.getEntryNode();
Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
V);
@ -1355,7 +1369,7 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
Inst->getType(), isABIRegCopy(V));
Inst->getType(), getABIRegCopyCC(V));
SDValue Chain = DAG.getEntryNode();
return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
}
@ -1589,12 +1603,14 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, VT);
MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, VT);
CallingConv::ID CC = F->getCallingConv();
unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT);
MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT);
SmallVector<SDValue, 4> Parts(NumParts);
getCopyToParts(DAG, getCurSDLoc(),
SDValue(RetOp.getNode(), RetOp.getResNo() + j),
&Parts[0], NumParts, PartVT, &I, ExtendKind, true);
&Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
// 'inreg' on function refers to return value
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
@ -4929,7 +4945,7 @@ bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
if (VMI != FuncInfo.ValueMap.end()) {
const auto &TLI = DAG.getTargetLoweringInfo();
RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
V->getType(), isABIRegCopy(V));
V->getType(), getABIRegCopyCC(V));
if (RFV.occupiesMultipleRegs()) {
unsigned Offset = 0;
for (auto RegAndSize : RFV.getRegsAndSizes()) {
@ -5288,7 +5304,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
// The PHI node may be split up into several MI PHI nodes (in
// FunctionLoweringInfo::set).
RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
V->getType(), false);
V->getType(), None);
if (RFV.occupiesMultipleRegs()) {
unsigned Offset = 0;
unsigned BitsToDescribe = 0;
@ -8289,7 +8305,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
}
SmallVector<ISD::OutputArg, 4> Outs;
GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
bool CanLowerReturn =
this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
@ -8331,10 +8347,10 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
} else {
for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
EVT VT = RetTys[I];
MVT RegisterVT =
getRegisterTypeForCallingConv(CLI.RetTy->getContext(), VT);
unsigned NumRegs =
getNumRegistersForCallingConv(CLI.RetTy->getContext(), VT);
MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
CLI.CallConv, VT);
unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
CLI.CallConv, VT);
for (unsigned i = 0; i != NumRegs; ++i) {
ISD::InputArg MyFlags;
MyFlags.VT = RegisterVT;
@ -8443,9 +8459,10 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
Flags.setInConsecutiveRegs();
Flags.setOrigAlign(OriginalAlignment);
MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(), VT);
unsigned NumParts =
getNumRegistersForCallingConv(CLI.RetTy->getContext(), VT);
MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
CLI.CallConv, VT);
unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
CLI.CallConv, VT);
SmallVector<SDValue, 4> Parts(NumParts);
ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
@ -8477,7 +8494,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
}
getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT,
CLI.CS.getInstruction(), ExtendKind, true);
CLI.CS.getInstruction(), CLI.CallConv, ExtendKind);
for (unsigned j = 0; j != NumParts; ++j) {
// if it isn't first piece, alignment must be 1
@ -8577,14 +8594,14 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
unsigned CurReg = 0;
for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
EVT VT = RetTys[I];
MVT RegisterVT =
getRegisterTypeForCallingConv(CLI.RetTy->getContext(), VT);
unsigned NumRegs =
getNumRegistersForCallingConv(CLI.RetTy->getContext(), VT);
MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
CLI.CallConv, VT);
unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
CLI.CallConv, VT);
ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
NumRegs, RegisterVT, VT, nullptr,
AssertOp, true));
CLI.CallConv, AssertOp));
CurReg += NumRegs;
}
@ -8623,8 +8640,8 @@ SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
// If this is an InlineAsm we have to match the registers required, not the
// notional registers required by the type.
RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
V->getType(), isABIRegCopy(V));
RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(),
getABIRegCopyCC(V));
SDValue Chain = DAG.getEntryNode();
ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) ==
@ -8937,10 +8954,10 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
if (ArgCopyElisionCandidates.count(&Arg))
Flags.setCopyElisionCandidate();
MVT RegisterVT =
TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(), VT);
unsigned NumRegs =
TLI->getNumRegistersForCallingConv(*CurDAG->getContext(), VT);
MVT RegisterVT = TLI->getRegisterTypeForCallingConv(
*CurDAG->getContext(), F.getCallingConv(), VT);
unsigned NumRegs = TLI->getNumRegistersForCallingConv(
*CurDAG->getContext(), F.getCallingConv(), VT);
for (unsigned i = 0; i != NumRegs; ++i) {
ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed,
ArgNo, PartBase+i*RegisterVT.getStoreSize());
@ -8995,8 +9012,8 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
MVT VT = ValueVTs[0].getSimpleVT();
MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
Optional<ISD::NodeType> AssertOp = None;
SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1,
RegVT, VT, nullptr, AssertOp);
SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, VT,
nullptr, F.getCallingConv(), AssertOp);
MachineFunction& MF = SDB->DAG.getMachineFunction();
MachineRegisterInfo& RegInfo = MF.getRegInfo();
@ -9046,10 +9063,10 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
for (unsigned Val = 0; Val != NumValues; ++Val) {
EVT VT = ValueVTs[Val];
MVT PartVT =
TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(), VT);
unsigned NumParts =
TLI->getNumRegistersForCallingConv(*CurDAG->getContext(), VT);
MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(),
F.getCallingConv(), VT);
unsigned NumParts = TLI->getNumRegistersForCallingConv(
*CurDAG->getContext(), F.getCallingConv(), VT);
// Even an apparant 'unused' swifterror argument needs to be returned. So
// we do generate a copy for it that can be used on return from the
@ -9062,8 +9079,8 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
AssertOp = ISD::AssertZext;
ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
PartVT, VT, nullptr, AssertOp,
true));
PartVT, VT, nullptr,
F.getCallingConv(), AssertOp));
}
i += NumParts;

View File

@ -1015,14 +1015,18 @@ struct RegsForValue {
/// Records if this value needs to be treated in an ABI dependant manner,
/// different to normal type legalization.
bool IsABIMangled = false;
Optional<CallingConv::ID> CallConv;
RegsForValue() = default;
RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt, EVT valuevt,
bool IsABIMangledValue = false);
Optional<CallingConv::ID> CC = None);
RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
const DataLayout &DL, unsigned Reg, Type *Ty,
bool IsABIMangledValue = false);
Optional<CallingConv::ID> CC);
bool isABIMangled() const {
return CallConv.hasValue();
}
/// Add the specified values to this one.
void append(const RegsForValue &RHS) {

View File

@ -861,7 +861,8 @@ SelectionDAGBuilder::LowerStatepoint(ImmutableStatepoint ISP,
// completely and make statepoint call to return a tuple.
unsigned Reg = FuncInfo.CreateRegs(RetTy);
RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
DAG.getDataLayout(), Reg, RetTy, true);
DAG.getDataLayout(), Reg, RetTy,
ISP.getCallSite().getCallingConv());
SDValue Chain = DAG.getEntryNode();
RFV.getCopyToRegs(ReturnValue, DAG, getCurSDLoc(), Chain, nullptr);

View File

@ -1337,7 +1337,8 @@ unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT
/// type of the given function. This does not require a DAG or a return value,
/// and is suitable for use before any DAGs for the function are constructed.
/// TODO: Move this out of TargetLowering.cpp.
void llvm::GetReturnInfo(Type *ReturnType, AttributeList attr,
void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType,
AttributeList attr,
SmallVectorImpl<ISD::OutputArg> &Outs,
const TargetLowering &TLI, const DataLayout &DL) {
SmallVector<EVT, 4> ValueVTs;
@ -1365,9 +1366,9 @@ void llvm::GetReturnInfo(Type *ReturnType, AttributeList attr,
}
unsigned NumParts =
TLI.getNumRegistersForCallingConv(ReturnType->getContext(), VT);
TLI.getNumRegistersForCallingConv(ReturnType->getContext(), CC, VT);
MVT PartVT =
TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), VT);
TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), CC, VT);
// 'inreg' on function refers to return value
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();

View File

@ -3774,7 +3774,7 @@ bool AArch64FastISel::selectRet(const Instruction *I) {
if (Ret->getNumOperands() > 0) {
CallingConv::ID CC = F.getCallingConv();
SmallVector<ISD::OutputArg, 4> Outs;
GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ValLocs;

View File

@ -907,6 +907,7 @@ void AMDGPUTargetLowering::analyzeFormalArgumentsCompute(
LLVMContext &Ctx = Fn.getParent()->getContext();
const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF);
const unsigned ExplicitOffset = ST.getExplicitKernelArgOffset(Fn);
CallingConv::ID CC = Fn.getCallingConv();
unsigned MaxAlign = 1;
uint64_t ExplicitArgOffset = 0;
@ -940,10 +941,8 @@ void AMDGPUTargetLowering::analyzeFormalArgumentsCompute(
EVT ArgVT = ValueVTs[Value];
EVT MemVT = ArgVT;
MVT RegisterVT =
getRegisterTypeForCallingConv(Ctx, ArgVT);
unsigned NumRegs =
getNumRegistersForCallingConv(Ctx, ArgVT);
MVT RegisterVT = getRegisterTypeForCallingConv(Ctx, CC, ArgVT);
unsigned NumRegs = getNumRegistersForCallingConv(Ctx, CC, ArgVT);
if (NumRegs == 1) {
// This argument is not split, so the IR type is the memory type.

View File

@ -2116,7 +2116,7 @@ bool ARMFastISel::SelectRet(const Instruction *I) {
CallingConv::ID CC = F.getCallingConv();
if (Ret->getNumOperands() > 0) {
SmallVector<ISD::OutputArg, 4> Outs;
GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ValLocs;

View File

@ -418,7 +418,8 @@ void MipsCallLowering::subTargetRegTypeForCallingConv(
for (auto &Arg : Args) {
EVT VT = TLI.getValueType(DL, Arg.Ty);
MVT RegisterVT = TLI.getRegisterTypeForCallingConv(F.getContext(), VT);
MVT RegisterVT = TLI.getRegisterTypeForCallingConv(F.getContext(),
F.getCallingConv(), VT);
ISD::ArgFlagsTy Flags = Arg.Flags;
Flags.setOrigAlign(TLI.getABIAlignmentForCallingConv(Arg.Ty, DL));

View File

@ -1662,7 +1662,7 @@ bool MipsFastISel::selectRet(const Instruction *I) {
return false;
SmallVector<ISD::OutputArg, 4> Outs;
GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ValLocs;

View File

@ -111,6 +111,7 @@ static bool isShiftedMask(uint64_t I, uint64_t &Pos, uint64_t &Size) {
// The MIPS MSA ABI passes vector arguments in the integer register set.
// The number of integer registers used is dependant on the ABI used.
MVT MipsTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
CallingConv::ID CC,
EVT VT) const {
if (VT.isVector()) {
if (Subtarget.isABI_O32()) {
@ -123,6 +124,7 @@ MVT MipsTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
}
unsigned MipsTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
CallingConv::ID CC,
EVT VT) const {
if (VT.isVector())
return std::max((VT.getSizeInBits() / (Subtarget.isABI_O32() ? 32 : 64)),
@ -131,10 +133,10 @@ unsigned MipsTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
}
unsigned MipsTargetLowering::getVectorTypeBreakdownForCallingConv(
LLVMContext &Context, EVT VT, EVT &IntermediateVT,
LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
unsigned &NumIntermediates, MVT &RegisterVT) const {
// Break down vector types to either 2 i64s or 4 i32s.
RegisterVT = getRegisterTypeForCallingConv(Context, VT) ;
RegisterVT = getRegisterTypeForCallingConv(Context, CC, VT);
IntermediateVT = RegisterVT;
NumIntermediates = VT.getSizeInBits() < RegisterVT.getSizeInBits()
? VT.getVectorNumElements()

View File

@ -288,17 +288,18 @@ class TargetRegisterClass;
/// Return the register type for a given MVT, ensuring vectors are treated
/// as a series of gpr sized integers.
MVT getRegisterTypeForCallingConv(LLVMContext &Context,
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC,
EVT VT) const override;
/// Return the number of registers for a given MVT, ensuring vectors are
/// treated as a series of gpr sized integers.
unsigned getNumRegistersForCallingConv(LLVMContext &Context,
CallingConv::ID CC,
EVT VT) const override;
/// Break down vectors to the correct number of gpr sized integers.
unsigned getVectorTypeBreakdownForCallingConv(
LLVMContext &Context, EVT VT, EVT &IntermediateVT,
LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
unsigned &NumIntermediates, MVT &RegisterVT) const override;
/// Return the correct alignment for the current calling convention.

View File

@ -1697,7 +1697,7 @@ bool PPCFastISel::SelectRet(const Instruction *I) {
if (Ret->getNumOperands() > 0) {
SmallVector<ISD::OutputArg, 4> Outs;
GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ValLocs;

View File

@ -1224,6 +1224,7 @@ unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty,
}
unsigned PPCTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
CallingConv:: ID CC,
EVT VT) const {
if (Subtarget.hasSPE() && VT == MVT::f64)
return 2;
@ -1231,6 +1232,7 @@ unsigned PPCTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
}
MVT PPCTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
CallingConv:: ID CC,
EVT VT) const {
if (Subtarget.hasSPE() && VT == MVT::f64)
return MVT::i32;

View File

@ -872,9 +872,11 @@ namespace llvm {
MCContext &Ctx) const override;
unsigned getNumRegistersForCallingConv(LLVMContext &Context,
CallingConv:: ID CC,
EVT VT) const override;
MVT getRegisterTypeForCallingConv(LLVMContext &Context,
CallingConv:: ID CC,
EVT VT) const override;
private:

View File

@ -1195,7 +1195,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
if (Ret->getNumOperands() > 0) {
SmallVector<ISD::OutputArg, 4> Outs;
GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ValLocs;

View File

@ -1800,17 +1800,19 @@ X86TargetLowering::getPreferredVectorAction(EVT VT) const {
}
MVT X86TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
CallingConv::ID CC,
EVT VT) const {
if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
return MVT::v32i8;
return TargetLowering::getRegisterTypeForCallingConv(Context, VT);
return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
}
unsigned X86TargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
CallingConv::ID CC,
EVT VT) const {
if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
return 1;
return TargetLowering::getNumRegistersForCallingConv(Context, VT);
return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
}
EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,

View File

@ -1097,10 +1097,11 @@ namespace llvm {
/// Customize the preferred legalization strategy for certain types.
LegalizeTypeAction getPreferredVectorAction(EVT VT) const override;
MVT getRegisterTypeForCallingConv(LLVMContext &Context,
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC,
EVT VT) const override;
unsigned getNumRegistersForCallingConv(LLVMContext &Context,
CallingConv::ID CC,
EVT VT) const override;
bool isIntDivCheap(EVT VT, AttributeList Attr) const override;