[RISCV] Codegen support for RV32D floating point load/store, fadd.d, calling conv

fadd.d is required in order to force floating point registers to be used in
test code, as parameters are passed in integer registers in the soft float
ABI.

Much of this patch is concerned with support for passing f64 on RV32D with a
soft-float ABI. Similar to Mips, introduce pseudoinstructions to build an f64
out of a pair of i32 and to split an f64 to a pair of i32. BUILD_PAIR and
EXTRACT_ELEMENT can't be used, as a BITCAST to i64 would be necessary, but i64
is not a legal type.

llvm-svn: 329871
This commit is contained in:
Alex Bradbury 2018-04-12 05:34:25 +00:00
parent bedca0b41b
commit 0b4175f160
6 changed files with 327 additions and 22 deletions

View File

@ -56,10 +56,14 @@ public:
private:
void doPeepholeLoadStoreADDI();
void doPeepholeBuildPairF64SplitF64();
};
}
void RISCVDAGToDAGISel::PostprocessISelDAG() { doPeepholeLoadStoreADDI(); }
void RISCVDAGToDAGISel::PostprocessISelDAG() {
doPeepholeLoadStoreADDI();
doPeepholeBuildPairF64SplitF64();
}
void RISCVDAGToDAGISel::Select(SDNode *Node) {
unsigned Opcode = Node->getOpcode();
@ -212,6 +216,42 @@ void RISCVDAGToDAGISel::doPeepholeLoadStoreADDI() {
}
}
// Remove redundant BuildPairF64+SplitF64 pairs. i.e. cases where an f64 is
// built of two i32 values, only to be split apart again. This must be done
// here as a peephole optimisation as the DAG has not been fully legalized at
// the point BuildPairF64/SplitF64 nodes are created in RISCVISelLowering, so
// some nodes would not yet have been replaced with libcalls.
void RISCVDAGToDAGISel::doPeepholeBuildPairF64SplitF64() {
SelectionDAG::allnodes_iterator Position(CurDAG->getRoot().getNode());
++Position;
while (Position != CurDAG->allnodes_begin()) {
SDNode *N = &*--Position;
// Skip dead nodes and any nodes other than SplitF64Pseudo.
if (N->use_empty() || !N->isMachineOpcode() ||
!(N->getMachineOpcode() == RISCV::SplitF64Pseudo))
continue;
// If the operand to SplitF64 is a BuildPairF64, the split operation is
// redundant. Just use the operands to BuildPairF64 as the result.
SDValue F64Val = N->getOperand(0);
if (F64Val.isMachineOpcode() &&
F64Val.getMachineOpcode() == RISCV::BuildPairF64Pseudo) {
DEBUG(dbgs() << "Removing redundant SplitF64Pseudo and replacing uses "
"with BuildPairF64Pseudo operands:\n");
DEBUG(dbgs() << "N: ");
DEBUG(N->dump(CurDAG));
DEBUG(dbgs() << "F64Val: ");
DEBUG(F64Val->dump(CurDAG));
DEBUG(dbgs() << "\n");
SDValue From[] = {SDValue(N, 0), SDValue(N, 1)};
SDValue To[] = {F64Val.getOperand(0), F64Val.getOperand(1)};
CurDAG->ReplaceAllUsesOfValuesWith(From, To, 2);
}
}
CurDAG->RemoveDeadNodes();
}
// This pass converts a legalized DAG into a RISCV-specific DAG, ready
// for instruction scheduling.
FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM) {

View File

@ -47,6 +47,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
if (Subtarget.hasStdExtF())
addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
if (Subtarget.hasStdExtD())
addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
// Compute derived properties from the register classes.
computeRegisterProperties(STI.getRegisterInfo());
@ -119,6 +121,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::BR_CC, MVT::f32, Expand);
}
if (Subtarget.hasStdExtD())
setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
setOperationAction(ISD::BlockAddress, XLenVT, Custom);
setOperationAction(ISD::ConstantPool, XLenVT, Custom);
@ -392,18 +397,83 @@ SDValue RISCVTargetLowering::LowerRETURNADDR(SDValue Op,
return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
}
static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
MachineBasicBlock *BB) {
assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
MachineFunction &MF = *BB->getParent();
DebugLoc DL = MI.getDebugLoc();
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
unsigned LoReg = MI.getOperand(0).getReg();
unsigned HiReg = MI.getOperand(1).getReg();
unsigned SrcReg = MI.getOperand(2).getReg();
const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex();
TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
RI);
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
MachineMemOperand::MOLoad, 8, 8);
BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
.addFrameIndex(FI)
.addImm(0)
.addMemOperand(MMO);
BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
.addFrameIndex(FI)
.addImm(4)
.addMemOperand(MMO);
MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
MachineBasicBlock *BB) {
assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
"Unexpected instruction");
MachineFunction &MF = *BB->getParent();
DebugLoc DL = MI.getDebugLoc();
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
unsigned DstReg = MI.getOperand(0).getReg();
unsigned LoReg = MI.getOperand(1).getReg();
unsigned HiReg = MI.getOperand(2).getReg();
const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex();
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
MachineMemOperand::MOStore, 8, 8);
BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
.addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
.addFrameIndex(FI)
.addImm(0)
.addMemOperand(MMO);
BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
.addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
.addFrameIndex(FI)
.addImm(4)
.addMemOperand(MMO);
TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
MachineBasicBlock *
RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *BB) const {
const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
switch (MI.getOpcode()) {
default:
llvm_unreachable("Unexpected instr type to insert");
case RISCV::Select_GPR_Using_CC_GPR:
case RISCV::Select_FPR32_Using_CC_GPR:
break;
case RISCV::BuildPairF64Pseudo:
return emitBuildPairF64Pseudo(MI, BB);
case RISCV::SplitF64Pseudo:
return emitSplitF64Pseudo(MI, BB);
}
// To "insert" a SELECT instruction, we actually have to insert the triangle
@ -417,7 +487,9 @@ RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
// | IfFalseMBB
// | /
// TailMBB
const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
const BasicBlock *LLVM_BB = BB->getBasicBlock();
DebugLoc DL = MI.getDebugLoc();
MachineFunction::iterator I = ++BB->getIterator();
MachineBasicBlock *HeadMBB = BB;
@ -542,7 +614,6 @@ static bool CC_RISCV(const DataLayout &DL, unsigned ValNo, MVT ValVT, MVT LocVT,
LocVT = MVT::i32;
LocInfo = CCValAssign::BCvt;
}
assert(LocVT == XLenVT && "Unexpected LocVT");
// Any return value split in to more than two values can't be returned
// directly.
@ -572,6 +643,28 @@ static bool CC_RISCV(const DataLayout &DL, unsigned ValNo, MVT ValVT, MVT LocVT,
assert(PendingLocs.size() == PendingArgFlags.size() &&
"PendingLocs and PendingArgFlags out of sync");
// Handle passing f64 on RV32D with a soft float ABI.
if (XLen == 32 && ValVT == MVT::f64) {
assert(!ArgFlags.isSplit() && PendingLocs.empty() ||
"Can't lower f64 if it is split");
// Depending on available argument GPRS, f64 may be passed in a pair of
// GPRs, split between a GPR and the stack, or passed completely on the
// stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
// cases.
unsigned Reg = State.AllocateReg(ArgGPRs);
LocVT = MVT::i32;
if (!Reg) {
unsigned StackOffset = State.AllocateStack(8, 8);
State.addLoc(
CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
return false;
}
if (!State.AllocateReg(ArgGPRs))
State.AllocateStack(4, 4);
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
return false;
}
// Split arguments might be passed indirectly, so keep track of the pending
// values.
if (ArgFlags.isSplit() || !PendingLocs.empty()) {
@ -733,6 +826,43 @@ static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
return Val;
}
static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
const CCValAssign &VA, const SDLoc &DL) {
assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
"Unexpected VA");
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo &MFI = MF.getFrameInfo();
MachineRegisterInfo &RegInfo = MF.getRegInfo();
if (VA.isMemLoc()) {
// f64 is passed on the stack.
int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
return DAG.getLoad(MVT::f64, DL, Chain, FIN,
MachinePointerInfo::getFixedStack(MF, FI));
}
assert(VA.isRegLoc() && "Expected register VA assignment");
unsigned LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
SDValue Hi;
if (VA.getLocReg() == RISCV::X17) {
// Second half of f64 is passed on the stack.
int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
MachinePointerInfo::getFixedStack(MF, FI));
} else {
// Second half of f64 is passed in another GPR.
unsigned HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
}
return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
}
// Transform physical registers into virtual registers.
SDValue RISCVTargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
@ -763,7 +893,11 @@ SDValue RISCVTargetLowering::LowerFormalArguments(
CCValAssign &VA = ArgLocs[i];
assert(VA.getLocVT() == XLenVT && "Unhandled argument type");
SDValue ArgValue;
if (VA.isRegLoc())
// Passing f64 on RV32D with a soft float ABI must be handled as a special
// case.
if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
else if (VA.isRegLoc())
ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL);
else
ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
@ -917,6 +1051,37 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
SDValue ArgValue = OutVals[i];
ISD::ArgFlagsTy Flags = Outs[i].Flags;
// Handle passing f64 on RV32D with a soft float ABI as a special case.
bool IsF64OnRV32DSoftABI =
VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
SDValue SplitF64 = DAG.getNode(
RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
SDValue Lo = SplitF64.getValue(0);
SDValue Hi = SplitF64.getValue(1);
unsigned RegLo = VA.getLocReg();
RegsToPass.push_back(std::make_pair(RegLo, Lo));
if (RegLo == RISCV::X17) {
// Second half of f64 is passed on the stack.
// Work out the address of the stack slot.
if (!StackPtr.getNode())
StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
// Emit the store.
MemOpChains.push_back(
DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
} else {
// Second half of f64 is passed in another GPR.
unsigned RegHigh = RegLo + 1;
RegsToPass.push_back(std::make_pair(RegHigh, Hi));
}
continue;
}
// IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
// as any other MemLoc.
// Promote the value if needed.
// For now, only handle fully promoted and indirect arguments.
switch (VA.getLocInfo()) {
@ -1033,11 +1198,21 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
// Copy all of the result registers out of their specified physreg.
for (auto &VA : RVLocs) {
// Copy the value out, gluing the copy to the end of the call sequence.
SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(),
VA.getLocVT(), Glue);
// Copy the value out
SDValue RetValue =
DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
// Glue the RetValue to the end of the call sequence
Chain = RetValue.getValue(1);
Glue = RetValue.getValue(2);
if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
SDValue RetValue2 =
DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
Chain = RetValue2.getValue(1);
Glue = RetValue2.getValue(2);
RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
RetValue2);
}
switch (VA.getLocInfo()) {
default:
@ -1102,7 +1277,7 @@ RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
nullptr);
SDValue Flag;
SDValue Glue;
SmallVector<SDValue, 4> RetOps(1, Chain);
// Copy the result values into the output registers.
@ -1110,20 +1285,38 @@ RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
SDValue Val = OutVals[i];
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
Val = packIntoRegLoc(DAG, Val, VA, DL);
Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Flag);
if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
// Handle returning f64 on RV32D with a soft float ABI.
assert(VA.isRegLoc() && "Expected return via registers");
SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
DAG.getVTList(MVT::i32, MVT::i32), Val);
SDValue Lo = SplitF64.getValue(0);
SDValue Hi = SplitF64.getValue(1);
unsigned RegLo = VA.getLocReg();
unsigned RegHi = RegLo + 1;
Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
Glue = Chain.getValue(1);
RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
Glue = Chain.getValue(1);
RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
} else {
// Handle a 'normal' return.
Val = packIntoRegLoc(DAG, Val, VA, DL);
Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
// Guarantee that all emitted copies are stuck together.
Flag = Chain.getValue(1);
RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
// Guarantee that all emitted copies are stuck together.
Glue = Chain.getValue(1);
RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
}
}
RetOps[0] = Chain; // Update chain.
// Add the flag if we have it.
if (Flag.getNode()) {
RetOps.push_back(Flag);
// Add the glue node if we have it.
if (Glue.getNode()) {
RetOps.push_back(Glue);
}
return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps);
@ -1139,6 +1332,10 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
return "RISCVISD::CALL";
case RISCVISD::SELECT_CC:
return "RISCVISD::SELECT_CC";
case RISCVISD::BuildPairF64:
return "RISCVISD::BuildPairF64";
case RISCVISD::SplitF64:
return "RISCVISD::SplitF64";
}
return nullptr;
}

View File

@ -26,7 +26,9 @@ enum NodeType : unsigned {
FIRST_NUMBER = ISD::BUILTIN_OP_END,
RET_FLAG,
CALL,
SELECT_CC
SELECT_CC,
BuildPairF64,
SplitF64
};
}

View File

@ -68,6 +68,8 @@ void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
Opcode = RISCV::SW;
else if (RISCV::FPR32RegClass.hasSubClassEq(RC))
Opcode = RISCV::FSW;
else if (RISCV::FPR64RegClass.hasSubClassEq(RC))
Opcode = RISCV::FSD;
else
llvm_unreachable("Can't store this register to stack slot");
@ -92,6 +94,8 @@ void RISCVInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
Opcode = RISCV::LW;
else if (RISCV::FPR32RegClass.hasSubClassEq(RC))
Opcode = RISCV::FLW;
else if (RISCV::FPR64RegClass.hasSubClassEq(RC))
Opcode = RISCV::FLD;
else
llvm_unreachable("Can't load this register from stack slot");

View File

@ -12,6 +12,20 @@
//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// RISC-V specific DAG Nodes.
//===----------------------------------------------------------------------===//
def SDT_RISCVBuildPairF64 : SDTypeProfile<1, 2, [SDTCisVT<0, f64>,
SDTCisVT<1, i32>,
SDTCisSameAs<1, 2>]>;
def SDT_RISCVSplitF64 : SDTypeProfile<2, 1, [SDTCisVT<0, i32>,
SDTCisVT<1, i32>,
SDTCisVT<2, f64>]>;
def RISCVBuildPairF64 : SDNode<"RISCVISD::BuildPairF64", SDT_RISCVBuildPairF64>;
def RISCVSplitF64 : SDNode<"RISCVISD::SplitF64", SDT_RISCVSplitF64>;
//===----------------------------------------------------------------------===//
// Instruction Class Templates
//===----------------------------------------------------------------------===//
@ -172,3 +186,40 @@ def : InstAlias<"fmv.d $rd, $rs", (FSGNJ_D FPR64:$rd, FPR64:$rs, FPR64:$rs)>;
def : InstAlias<"fabs.d $rd, $rs", (FSGNJX_D FPR64:$rd, FPR64:$rs, FPR64:$rs)>;
def : InstAlias<"fneg.d $rd, $rs", (FSGNJN_D FPR64:$rd, FPR64:$rs, FPR64:$rs)>;
} // Predicates = [HasStdExtD]
//===----------------------------------------------------------------------===//
// Pseudo-instructions and codegen patterns
//===----------------------------------------------------------------------===//
class PatFpr64Fpr64DynFrm<SDPatternOperator OpNode, RVInstRFrm Inst>
: Pat<(OpNode FPR64:$rs1, FPR64:$rs2), (Inst $rs1, $rs2, 0b111)>;
let Predicates = [HasStdExtD] in {
/// Float arithmetic operations
def : PatFpr64Fpr64DynFrm<fadd, FADD_D>;
/// Loads
defm : LdPat<load, FLD>;
/// Stores
defm : StPat<store, FSD, FPR64>;
/// Pseudo-instructions needed for the soft-float ABI with RV32D
// Moves two GPRs to an FPR.
let usesCustomInserter = 1 in
def BuildPairF64Pseudo
: Pseudo<(outs FPR64:$dst), (ins GPR:$src1, GPR:$src2),
[(set FPR64:$dst, (RISCVBuildPairF64 GPR:$src1, GPR:$src2))]>;
// Moves an FPR to two GPRs.
let usesCustomInserter = 1 in
def SplitF64Pseudo
: Pseudo<(outs GPR:$dst1, GPR:$dst2), (ins FPR64:$src),
[(set GPR:$dst1, GPR:$dst2, (RISCVSplitF64 FPR64:$src))]>;
} // Predicates = [HasStdExtD]

View File

@ -14,6 +14,7 @@
#ifndef LLVM_LIB_TARGET_RISCV_RISCVMACHINEFUNCTIONINFO_H
#define LLVM_LIB_TARGET_RISCV_RISCVMACHINEFUNCTIONINFO_H
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
namespace llvm {
@ -21,22 +22,32 @@ namespace llvm {
/// RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo
/// and contains private RISCV-specific information for each MachineFunction.
class RISCVMachineFunctionInfo : public MachineFunctionInfo {
private:
MachineFunction &MF;
/// FrameIndex for start of varargs area
int VarArgsFrameIndex = 0;
/// Size of the save area used for varargs
int VarArgsSaveSize = 0;
/// FrameIndex used for transferring values between 64-bit FPRs and a pair
/// of 32-bit GPRs via the stack.
int MoveF64FrameIndex = -1;
public:
RISCVMachineFunctionInfo() = default;
// RISCVMachineFunctionInfo() = default;
explicit RISCVMachineFunctionInfo(MachineFunction &MF) {}
RISCVMachineFunctionInfo(MachineFunction &MF) : MF(MF) {}
int getVarArgsFrameIndex() const { return VarArgsFrameIndex; }
void setVarArgsFrameIndex(int Index) { VarArgsFrameIndex = Index; }
unsigned getVarArgsSaveSize() const { return VarArgsSaveSize; }
void setVarArgsSaveSize(int Size) { VarArgsSaveSize = Size; }
int getMoveF64FrameIndex() {
if (MoveF64FrameIndex == -1)
MoveF64FrameIndex = MF.getFrameInfo().CreateStackObject(8, 8, false);
return MoveF64FrameIndex;
}
};
} // end namespace llvm