forked from OSchip/llvm-project
[GlobalISel] Tidy up unnecessary calls to createGenericVirtualRegister
Summary: As a side effect some redundant copies of constant values are removed by CSEMIRBuilder. Reviewers: aemerson, arsenm, dsanders, aditya_nandakumar Subscribers: sdardis, jvesely, wdng, nhaehnle, rovka, hiraditya, jrtc27, atanasyan, volkan, Petar.Avramovic, kerbowa, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D73789
This commit is contained in:
parent
6c7efe2eec
commit
2a1b5af299
|
@ -847,8 +847,8 @@ public:
|
|||
MachineInstrBuilder buildConcatVectors(const DstOp &Res,
|
||||
ArrayRef<Register> Ops);
|
||||
|
||||
MachineInstrBuilder buildInsert(Register Res, Register Src,
|
||||
Register Op, unsigned Index);
|
||||
MachineInstrBuilder buildInsert(const DstOp &Res, const SrcOp &Src,
|
||||
const SrcOp &Op, unsigned Index);
|
||||
|
||||
/// Build and insert either a G_INTRINSIC (if \p HasSideEffects is false) or
|
||||
/// G_INTRINSIC_W_SIDE_EFFECTS instruction. Its first operand will be the
|
||||
|
|
|
@ -650,22 +650,20 @@ MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
|
|||
return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
|
||||
}
|
||||
|
||||
MachineInstrBuilder MachineIRBuilder::buildInsert(Register Res, Register Src,
|
||||
Register Op, unsigned Index) {
|
||||
assert(Index + getMRI()->getType(Op).getSizeInBits() <=
|
||||
getMRI()->getType(Res).getSizeInBits() &&
|
||||
MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res,
|
||||
const SrcOp &Src,
|
||||
const SrcOp &Op,
|
||||
unsigned Index) {
|
||||
assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
|
||||
Res.getLLTTy(*getMRI()).getSizeInBits() &&
|
||||
"insertion past the end of a register");
|
||||
|
||||
if (getMRI()->getType(Res).getSizeInBits() ==
|
||||
getMRI()->getType(Op).getSizeInBits()) {
|
||||
if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
|
||||
Op.getLLTTy(*getMRI()).getSizeInBits()) {
|
||||
return buildCast(Res, Op);
|
||||
}
|
||||
|
||||
return buildInstr(TargetOpcode::G_INSERT)
|
||||
.addDef(Res)
|
||||
.addUse(Src)
|
||||
.addUse(Op)
|
||||
.addImm(Index);
|
||||
return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
|
||||
}
|
||||
|
||||
MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
|
||||
|
|
|
@ -62,10 +62,9 @@ struct IncomingArgHandler : public CallLowering::ValueHandler {
|
|||
auto &MFI = MIRBuilder.getMF().getFrameInfo();
|
||||
int FI = MFI.CreateFixedObject(Size, Offset, true);
|
||||
MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
|
||||
Register AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 64));
|
||||
MIRBuilder.buildFrameIndex(AddrReg, FI);
|
||||
auto AddrReg = MIRBuilder.buildFrameIndex(LLT::pointer(0, 64), FI);
|
||||
StackUsed = std::max(StackUsed, Size + Offset);
|
||||
return AddrReg;
|
||||
return AddrReg.getReg(0);
|
||||
}
|
||||
|
||||
void assignValueToReg(Register ValVReg, Register PhysReg,
|
||||
|
@ -147,23 +146,19 @@ struct OutgoingArgHandler : public CallLowering::ValueHandler {
|
|||
if (IsTailCall) {
|
||||
Offset += FPDiff;
|
||||
int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true);
|
||||
Register FIReg = MRI.createGenericVirtualRegister(p0);
|
||||
MIRBuilder.buildFrameIndex(FIReg, FI);
|
||||
auto FIReg = MIRBuilder.buildFrameIndex(p0, FI);
|
||||
MPO = MachinePointerInfo::getFixedStack(MF, FI);
|
||||
return FIReg;
|
||||
return FIReg.getReg(0);
|
||||
}
|
||||
|
||||
Register SPReg = MRI.createGenericVirtualRegister(p0);
|
||||
MIRBuilder.buildCopy(SPReg, Register(AArch64::SP));
|
||||
auto SPReg = MIRBuilder.buildCopy(p0, Register(AArch64::SP));
|
||||
|
||||
Register OffsetReg = MRI.createGenericVirtualRegister(s64);
|
||||
MIRBuilder.buildConstant(OffsetReg, Offset);
|
||||
auto OffsetReg = MIRBuilder.buildConstant(s64, Offset);
|
||||
|
||||
Register AddrReg = MRI.createGenericVirtualRegister(p0);
|
||||
MIRBuilder.buildPtrAdd(AddrReg, SPReg, OffsetReg);
|
||||
auto AddrReg = MIRBuilder.buildPtrAdd(p0, SPReg, OffsetReg);
|
||||
|
||||
MPO = MachinePointerInfo::getStack(MF, Offset);
|
||||
return AddrReg;
|
||||
return AddrReg.getReg(0);
|
||||
}
|
||||
|
||||
void assignValueToReg(Register ValVReg, Register PhysReg,
|
||||
|
|
|
@ -709,12 +709,11 @@ bool AArch64LegalizerInfo::legalizeLoadStore(
|
|||
const LLT NewTy = LLT::vector(ValTy.getNumElements(), PtrSize);
|
||||
auto &MMO = **MI.memoperands_begin();
|
||||
if (MI.getOpcode() == TargetOpcode::G_STORE) {
|
||||
auto Bitcast = MIRBuilder.buildBitcast({NewTy}, {ValReg});
|
||||
auto Bitcast = MIRBuilder.buildBitcast(NewTy, ValReg);
|
||||
MIRBuilder.buildStore(Bitcast.getReg(0), MI.getOperand(1), MMO);
|
||||
} else {
|
||||
Register NewReg = MRI.createGenericVirtualRegister(NewTy);
|
||||
auto NewLoad = MIRBuilder.buildLoad(NewReg, MI.getOperand(1), MMO);
|
||||
MIRBuilder.buildBitcast({ValReg}, {NewLoad});
|
||||
auto NewLoad = MIRBuilder.buildLoad(NewTy, MI.getOperand(1), MMO);
|
||||
MIRBuilder.buildBitcast(ValReg, NewLoad);
|
||||
}
|
||||
MI.eraseFromParent();
|
||||
return true;
|
||||
|
@ -733,21 +732,19 @@ bool AArch64LegalizerInfo::legalizeVaArg(MachineInstr &MI,
|
|||
LLT IntPtrTy = LLT::scalar(PtrTy.getSizeInBits());
|
||||
|
||||
const unsigned PtrSize = PtrTy.getSizeInBits() / 8;
|
||||
Register List = MRI.createGenericVirtualRegister(PtrTy);
|
||||
MIRBuilder.buildLoad(
|
||||
List, ListPtr,
|
||||
auto List = MIRBuilder.buildLoad(
|
||||
PtrTy, ListPtr,
|
||||
*MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOLoad,
|
||||
PtrSize, /* Align = */ PtrSize));
|
||||
|
||||
Register DstPtr;
|
||||
MachineInstrBuilder DstPtr;
|
||||
if (Align > PtrSize) {
|
||||
// Realign the list to the actual required alignment.
|
||||
auto AlignMinus1 = MIRBuilder.buildConstant(IntPtrTy, Align - 1);
|
||||
|
||||
auto ListTmp = MIRBuilder.buildPtrAdd(PtrTy, List, AlignMinus1.getReg(0));
|
||||
|
||||
DstPtr = MRI.createGenericVirtualRegister(PtrTy);
|
||||
MIRBuilder.buildPtrMask(DstPtr, ListTmp, Log2_64(Align));
|
||||
DstPtr = MIRBuilder.buildPtrMask(PtrTy, ListTmp, Log2_64(Align));
|
||||
} else
|
||||
DstPtr = List;
|
||||
|
||||
|
|
|
@ -84,11 +84,10 @@ struct IncomingArgHandler : public CallLowering::ValueHandler {
|
|||
auto &MFI = MIRBuilder.getMF().getFrameInfo();
|
||||
int FI = MFI.CreateFixedObject(Size, Offset, true);
|
||||
MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
|
||||
Register AddrReg = MRI.createGenericVirtualRegister(
|
||||
LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32));
|
||||
MIRBuilder.buildFrameIndex(AddrReg, FI);
|
||||
auto AddrReg = MIRBuilder.buildFrameIndex(
|
||||
LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32), FI);
|
||||
StackUsed = std::max(StackUsed, Size + Offset);
|
||||
return AddrReg;
|
||||
return AddrReg.getReg(0);
|
||||
}
|
||||
|
||||
void assignValueToReg(Register ValVReg, Register PhysReg,
|
||||
|
@ -222,9 +221,6 @@ static void unpackRegsToOrigType(MachineIRBuilder &B,
|
|||
LLT PartTy) {
|
||||
assert(DstRegs.size() > 1 && "Nothing to unpack");
|
||||
|
||||
MachineFunction &MF = B.getMF();
|
||||
MachineRegisterInfo &MRI = MF.getRegInfo();
|
||||
|
||||
const unsigned SrcSize = SrcTy.getSizeInBits();
|
||||
const unsigned PartSize = PartTy.getSizeInBits();
|
||||
|
||||
|
@ -248,12 +244,11 @@ static void unpackRegsToOrigType(MachineIRBuilder &B,
|
|||
LLT BigTy = getMultipleType(PartTy, NumRoundedParts);
|
||||
auto ImpDef = B.buildUndef(BigTy);
|
||||
|
||||
Register BigReg = MRI.createGenericVirtualRegister(BigTy);
|
||||
B.buildInsert(BigReg, ImpDef.getReg(0), SrcReg, 0).getReg(0);
|
||||
auto Big = B.buildInsert(BigTy, ImpDef.getReg(0), SrcReg, 0).getReg(0);
|
||||
|
||||
int64_t Offset = 0;
|
||||
for (unsigned i = 0, e = DstRegs.size(); i != e; ++i, Offset += PartSize)
|
||||
B.buildExtract(DstRegs[i], BigReg, Offset);
|
||||
B.buildExtract(DstRegs[i], Big, Offset);
|
||||
}
|
||||
|
||||
/// Lower the return value for the already existing \p Ret. This assumes that
|
||||
|
@ -348,17 +343,13 @@ Register AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &B,
|
|||
const DataLayout &DL = F.getParent()->getDataLayout();
|
||||
PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS);
|
||||
LLT PtrType = getLLTForType(*PtrTy, DL);
|
||||
Register DstReg = MRI.createGenericVirtualRegister(PtrType);
|
||||
Register KernArgSegmentPtr =
|
||||
MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
|
||||
Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr);
|
||||
|
||||
Register OffsetReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
|
||||
B.buildConstant(OffsetReg, Offset);
|
||||
auto OffsetReg = B.buildConstant(LLT::scalar(64), Offset);
|
||||
|
||||
B.buildPtrAdd(DstReg, KernArgSegmentVReg, OffsetReg);
|
||||
|
||||
return DstReg;
|
||||
return B.buildPtrAdd(PtrType, KernArgSegmentVReg, OffsetReg).getReg(0);
|
||||
}
|
||||
|
||||
void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &B,
|
||||
|
|
|
@ -1226,7 +1226,6 @@ Register AMDGPULegalizerInfo::getSegmentAperture(
|
|||
Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
|
||||
WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
|
||||
|
||||
Register ApertureReg = MRI.createGenericVirtualRegister(S32);
|
||||
Register GetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
|
||||
|
||||
B.buildInstr(AMDGPU::S_GETREG_B32)
|
||||
|
@ -1235,9 +1234,7 @@ Register AMDGPULegalizerInfo::getSegmentAperture(
|
|||
MRI.setType(GetReg, S32);
|
||||
|
||||
auto ShiftAmt = B.buildConstant(S32, WidthM1 + 1);
|
||||
B.buildShl(ApertureReg, GetReg, ShiftAmt);
|
||||
|
||||
return ApertureReg;
|
||||
return B.buildShl(S32, GetReg, ShiftAmt).getReg(0);
|
||||
}
|
||||
|
||||
Register QueuePtr = MRI.createGenericVirtualRegister(
|
||||
|
@ -1261,12 +1258,10 @@ Register AMDGPULegalizerInfo::getSegmentAperture(
|
|||
4,
|
||||
MinAlign(64, StructOffset));
|
||||
|
||||
Register LoadResult = MRI.createGenericVirtualRegister(S32);
|
||||
Register LoadAddr;
|
||||
|
||||
B.materializePtrAdd(LoadAddr, QueuePtr, LLT::scalar(64), StructOffset);
|
||||
B.buildLoad(LoadResult, LoadAddr, *MMO);
|
||||
return LoadResult;
|
||||
return B.buildLoad(S32, LoadAddr, *MMO).getReg(0);
|
||||
}
|
||||
|
||||
bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
|
||||
|
@ -1327,13 +1322,11 @@ bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
|
|||
auto SegmentNull = B.buildConstant(DstTy, NullVal);
|
||||
auto FlatNull = B.buildConstant(SrcTy, 0);
|
||||
|
||||
Register PtrLo32 = MRI.createGenericVirtualRegister(DstTy);
|
||||
|
||||
// Extract low 32-bits of the pointer.
|
||||
B.buildExtract(PtrLo32, Src, 0);
|
||||
auto PtrLo32 = B.buildExtract(DstTy, Src, 0);
|
||||
|
||||
Register CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
|
||||
B.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, FlatNull.getReg(0));
|
||||
auto CmpRes =
|
||||
B.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Src, FlatNull.getReg(0));
|
||||
B.buildSelect(Dst, CmpRes, PtrLo32, SegmentNull.getReg(0));
|
||||
|
||||
MI.eraseFromParent();
|
||||
|
@ -1355,19 +1348,16 @@ bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
|
|||
if (!ApertureReg.isValid())
|
||||
return false;
|
||||
|
||||
Register CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
|
||||
B.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, SegmentNull.getReg(0));
|
||||
|
||||
Register BuildPtr = MRI.createGenericVirtualRegister(DstTy);
|
||||
auto CmpRes =
|
||||
B.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Src, SegmentNull.getReg(0));
|
||||
|
||||
// Coerce the type of the low half of the result so we can use merge_values.
|
||||
Register SrcAsInt = MRI.createGenericVirtualRegister(S32);
|
||||
B.buildPtrToInt(SrcAsInt, Src);
|
||||
Register SrcAsInt = B.buildPtrToInt(S32, Src).getReg(0);
|
||||
|
||||
// TODO: Should we allow mismatched types but matching sizes in merges to
|
||||
// avoid the ptrtoint?
|
||||
B.buildMerge(BuildPtr, {SrcAsInt, ApertureReg});
|
||||
B.buildSelect(Dst, CmpRes, BuildPtr, FlatNull.getReg(0));
|
||||
auto BuildPtr = B.buildMerge(DstTy, {SrcAsInt, ApertureReg});
|
||||
B.buildSelect(Dst, CmpRes, BuildPtr, FlatNull);
|
||||
|
||||
MI.eraseFromParent();
|
||||
return true;
|
||||
|
@ -2281,8 +2271,6 @@ bool AMDGPULegalizerInfo::legalizeFDIV64(MachineInstr &MI,
|
|||
// Workaround a hardware bug on SI where the condition output from div_scale
|
||||
// is not usable.
|
||||
|
||||
Scale = MRI.createGenericVirtualRegister(S1);
|
||||
|
||||
LLT S32 = LLT::scalar(32);
|
||||
|
||||
auto NumUnmerge = B.buildUnmerge(S32, LHS);
|
||||
|
@ -2294,7 +2282,7 @@ bool AMDGPULegalizerInfo::legalizeFDIV64(MachineInstr &MI,
|
|||
Scale1Unmerge.getReg(1));
|
||||
auto CmpDen = B.buildICmp(ICmpInst::ICMP_EQ, S1, DenUnmerge.getReg(1),
|
||||
Scale0Unmerge.getReg(1));
|
||||
B.buildXor(Scale, CmpNum, CmpDen);
|
||||
Scale = B.buildXor(S1, CmpNum, CmpDen).getReg(0);
|
||||
} else {
|
||||
Scale = DivScale1.getReg(1);
|
||||
}
|
||||
|
|
|
@ -1180,8 +1180,7 @@ bool AMDGPURegisterBankInfo::applyMappingWideLoad(MachineInstr &MI,
|
|||
B.setInsertPt(*RepairInst->getParent(), RepairInst);
|
||||
|
||||
for (unsigned DefIdx = 0, e = DefRegs.size(); DefIdx != e; ++DefIdx) {
|
||||
Register IdxReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
|
||||
B.buildConstant(IdxReg, DefIdx);
|
||||
Register IdxReg = B.buildConstant(LLT::scalar(32), DefIdx).getReg(0);
|
||||
MRI.setRegBank(IdxReg, AMDGPU::VGPRRegBank);
|
||||
B.buildExtractVectorElement(DefRegs[DefIdx], TmpReg, IdxReg);
|
||||
}
|
||||
|
|
|
@ -99,17 +99,14 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler {
|
|||
|
||||
LLT p0 = LLT::pointer(0, 32);
|
||||
LLT s32 = LLT::scalar(32);
|
||||
Register SPReg = MRI.createGenericVirtualRegister(p0);
|
||||
MIRBuilder.buildCopy(SPReg, Register(ARM::SP));
|
||||
auto SPReg = MIRBuilder.buildCopy(p0, Register(ARM::SP));
|
||||
|
||||
Register OffsetReg = MRI.createGenericVirtualRegister(s32);
|
||||
MIRBuilder.buildConstant(OffsetReg, Offset);
|
||||
auto OffsetReg = MIRBuilder.buildConstant(s32, Offset);
|
||||
|
||||
Register AddrReg = MRI.createGenericVirtualRegister(p0);
|
||||
MIRBuilder.buildPtrAdd(AddrReg, SPReg, OffsetReg);
|
||||
auto AddrReg = MIRBuilder.buildPtrAdd(p0, SPReg, OffsetReg);
|
||||
|
||||
MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
|
||||
return AddrReg;
|
||||
return AddrReg.getReg(0);
|
||||
}
|
||||
|
||||
void assignValueToReg(Register ValVReg, Register PhysReg,
|
||||
|
@ -299,11 +296,8 @@ struct IncomingValueHandler : public CallLowering::ValueHandler {
|
|||
int FI = MFI.CreateFixedObject(Size, Offset, true);
|
||||
MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
|
||||
|
||||
Register AddrReg =
|
||||
MRI.createGenericVirtualRegister(LLT::pointer(MPO.getAddrSpace(), 32));
|
||||
MIRBuilder.buildFrameIndex(AddrReg, FI);
|
||||
|
||||
return AddrReg;
|
||||
return MIRBuilder.buildFrameIndex(LLT::pointer(MPO.getAddrSpace(), 32), FI)
|
||||
.getReg(0);
|
||||
}
|
||||
|
||||
void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
|
||||
|
@ -318,8 +312,8 @@ struct IncomingValueHandler : public CallLowering::ValueHandler {
|
|||
Size = 4;
|
||||
assert(MRI.getType(ValVReg).isScalar() && "Only scalars supported atm");
|
||||
|
||||
auto LoadVReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
|
||||
buildLoad(LoadVReg, Addr, Size, /* Alignment */ 1, MPO);
|
||||
auto LoadVReg =
|
||||
buildLoad(LLT::scalar(32), Addr, Size, /* Alignment */ 1, MPO);
|
||||
MIRBuilder.buildTrunc(ValVReg, LoadVReg);
|
||||
} else {
|
||||
// If the value is not extended, a simple load will suffice.
|
||||
|
@ -327,11 +321,11 @@ struct IncomingValueHandler : public CallLowering::ValueHandler {
|
|||
}
|
||||
}
|
||||
|
||||
void buildLoad(Register Val, Register Addr, uint64_t Size, unsigned Alignment,
|
||||
MachinePointerInfo &MPO) {
|
||||
MachineInstrBuilder buildLoad(const DstOp &Res, Register Addr, uint64_t Size,
|
||||
unsigned Alignment, MachinePointerInfo &MPO) {
|
||||
auto MMO = MIRBuilder.getMF().getMachineMemOperand(
|
||||
MPO, MachineMemOperand::MOLoad, Size, Alignment);
|
||||
MIRBuilder.buildLoad(Val, Addr, *MMO);
|
||||
return MIRBuilder.buildLoad(Res, Addr, *MMO);
|
||||
}
|
||||
|
||||
void assignValueToReg(Register ValVReg, Register PhysReg,
|
||||
|
@ -354,9 +348,7 @@ struct IncomingValueHandler : public CallLowering::ValueHandler {
|
|||
// We cannot create a truncating copy, nor a trunc of a physical register.
|
||||
// Therefore, we need to copy the content of the physical register into a
|
||||
// virtual one and then truncate that.
|
||||
auto PhysRegToVReg =
|
||||
MRI.createGenericVirtualRegister(LLT::scalar(LocSize));
|
||||
MIRBuilder.buildCopy(PhysRegToVReg, PhysReg);
|
||||
auto PhysRegToVReg = MIRBuilder.buildCopy(LLT::scalar(LocSize), PhysReg);
|
||||
MIRBuilder.buildTrunc(ValVReg, PhysRegToVReg);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -445,8 +445,7 @@ bool ARMLegalizerInfo::legalizeCustom(MachineInstr &MI,
|
|||
} else {
|
||||
// We need to compare against 0.
|
||||
assert(CmpInst::isIntPredicate(ResultPred) && "Unsupported predicate");
|
||||
auto Zero = MRI.createGenericVirtualRegister(LLT::scalar(32));
|
||||
MIRBuilder.buildConstant(Zero, 0);
|
||||
auto Zero = MIRBuilder.buildConstant(LLT::scalar(32), 0);
|
||||
MIRBuilder.buildICmp(ResultPred, ProcessedResult, LibcallResult, Zero);
|
||||
}
|
||||
Results.push_back(ProcessedResult);
|
||||
|
|
|
@ -110,10 +110,10 @@ private:
|
|||
MIRBuilder.getMBB().addLiveIn(PhysReg);
|
||||
}
|
||||
|
||||
void buildLoad(Register Val, const CCValAssign &VA) {
|
||||
MachineInstrBuilder buildLoad(const DstOp &Res, const CCValAssign &VA) {
|
||||
MachineMemOperand *MMO;
|
||||
Register Addr = getStackAddress(VA, MMO);
|
||||
MIRBuilder.buildLoad(Val, Addr, *MMO);
|
||||
return MIRBuilder.buildLoad(Res, Addr, *MMO);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -192,10 +192,7 @@ Register IncomingValueHandler::getStackAddress(const CCValAssign &VA,
|
|||
unsigned Align = MinAlign(TFL->getStackAlignment(), Offset);
|
||||
MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOLoad, Size, Align);
|
||||
|
||||
Register AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 32));
|
||||
MIRBuilder.buildFrameIndex(AddrReg, FI);
|
||||
|
||||
return AddrReg;
|
||||
return MIRBuilder.buildFrameIndex(LLT::pointer(0, 32), FI).getReg(0);
|
||||
}
|
||||
|
||||
void IncomingValueHandler::assignValueToAddress(Register ValVReg,
|
||||
|
@ -203,9 +200,8 @@ void IncomingValueHandler::assignValueToAddress(Register ValVReg,
|
|||
if (VA.getLocInfo() == CCValAssign::SExt ||
|
||||
VA.getLocInfo() == CCValAssign::ZExt ||
|
||||
VA.getLocInfo() == CCValAssign::AExt) {
|
||||
Register LoadReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
|
||||
buildLoad(LoadReg, VA);
|
||||
MIRBuilder.buildTrunc(ValVReg, LoadReg);
|
||||
auto Load = buildLoad(LLT::scalar(32), VA);
|
||||
MIRBuilder.buildTrunc(ValVReg, Load);
|
||||
} else
|
||||
buildLoad(ValVReg, VA);
|
||||
}
|
||||
|
@ -291,15 +287,12 @@ Register OutgoingValueHandler::getStackAddress(const CCValAssign &VA,
|
|||
|
||||
LLT p0 = LLT::pointer(0, 32);
|
||||
LLT s32 = LLT::scalar(32);
|
||||
Register SPReg = MRI.createGenericVirtualRegister(p0);
|
||||
MIRBuilder.buildCopy(SPReg, Register(Mips::SP));
|
||||
auto SPReg = MIRBuilder.buildCopy(p0, Register(Mips::SP));
|
||||
|
||||
Register OffsetReg = MRI.createGenericVirtualRegister(s32);
|
||||
unsigned Offset = VA.getLocMemOffset();
|
||||
MIRBuilder.buildConstant(OffsetReg, Offset);
|
||||
auto OffsetReg = MIRBuilder.buildConstant(s32, Offset);
|
||||
|
||||
Register AddrReg = MRI.createGenericVirtualRegister(p0);
|
||||
MIRBuilder.buildPtrAdd(AddrReg, SPReg, OffsetReg);
|
||||
auto AddrReg = MIRBuilder.buildPtrAdd(p0, SPReg, OffsetReg);
|
||||
|
||||
MachinePointerInfo MPO =
|
||||
MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
|
||||
|
@ -307,7 +300,7 @@ Register OutgoingValueHandler::getStackAddress(const CCValAssign &VA,
|
|||
unsigned Align = MinAlign(TFL->getStackAlignment(), Offset);
|
||||
MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, Size, Align);
|
||||
|
||||
return AddrReg;
|
||||
return AddrReg.getReg(0);
|
||||
}
|
||||
|
||||
void OutgoingValueHandler::assignValueToAddress(Register ValVReg,
|
||||
|
@ -323,19 +316,13 @@ Register OutgoingValueHandler::extendRegister(Register ValReg,
|
|||
LLT LocTy{VA.getLocVT()};
|
||||
switch (VA.getLocInfo()) {
|
||||
case CCValAssign::SExt: {
|
||||
Register ExtReg = MRI.createGenericVirtualRegister(LocTy);
|
||||
MIRBuilder.buildSExt(ExtReg, ValReg);
|
||||
return ExtReg;
|
||||
return MIRBuilder.buildSExt(LocTy, ValReg).getReg(0);
|
||||
}
|
||||
case CCValAssign::ZExt: {
|
||||
Register ExtReg = MRI.createGenericVirtualRegister(LocTy);
|
||||
MIRBuilder.buildZExt(ExtReg, ValReg);
|
||||
return ExtReg;
|
||||
return MIRBuilder.buildZExt(LocTy, ValReg).getReg(0);
|
||||
}
|
||||
case CCValAssign::AExt: {
|
||||
Register ExtReg = MRI.createGenericVirtualRegister(LocTy);
|
||||
MIRBuilder.buildAnyExt(ExtReg, ValReg);
|
||||
return ExtReg;
|
||||
return MIRBuilder.buildAnyExt(LocTy, ValReg).getReg(0);
|
||||
}
|
||||
// TODO : handle upper extends
|
||||
case CCValAssign::Full:
|
||||
|
|
|
@ -394,11 +394,10 @@ bool MipsLegalizerInfo::legalizeIntrinsic(MachineInstr &MI,
|
|||
return constrainSelectedInstRegOperands(*Trap, TII, TRI, RBI);
|
||||
}
|
||||
case Intrinsic::vacopy: {
|
||||
Register Tmp = MRI.createGenericVirtualRegister(LLT::pointer(0, 32));
|
||||
MachinePointerInfo MPO;
|
||||
MIRBuilder.buildLoad(Tmp, MI.getOperand(2),
|
||||
*MI.getMF()->getMachineMemOperand(
|
||||
MPO, MachineMemOperand::MOLoad, 4, 4));
|
||||
auto Tmp = MIRBuilder.buildLoad(LLT::pointer(0, 32), MI.getOperand(2),
|
||||
*MI.getMF()->getMachineMemOperand(
|
||||
MPO, MachineMemOperand::MOLoad, 4, 4));
|
||||
MIRBuilder.buildStore(Tmp, MI.getOperand(1),
|
||||
*MI.getMF()->getMachineMemOperand(
|
||||
MPO, MachineMemOperand::MOStore, 4, 4));
|
||||
|
|
|
@ -108,17 +108,15 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler {
|
|||
MachinePointerInfo &MPO) override {
|
||||
LLT p0 = LLT::pointer(0, DL.getPointerSizeInBits(0));
|
||||
LLT SType = LLT::scalar(DL.getPointerSizeInBits(0));
|
||||
Register SPReg = MRI.createGenericVirtualRegister(p0);
|
||||
MIRBuilder.buildCopy(SPReg, STI.getRegisterInfo()->getStackRegister());
|
||||
auto SPReg =
|
||||
MIRBuilder.buildCopy(p0, STI.getRegisterInfo()->getStackRegister());
|
||||
|
||||
Register OffsetReg = MRI.createGenericVirtualRegister(SType);
|
||||
MIRBuilder.buildConstant(OffsetReg, Offset);
|
||||
auto OffsetReg = MIRBuilder.buildConstant(SType, Offset);
|
||||
|
||||
Register AddrReg = MRI.createGenericVirtualRegister(p0);
|
||||
MIRBuilder.buildPtrAdd(AddrReg, SPReg, OffsetReg);
|
||||
auto AddrReg = MIRBuilder.buildPtrAdd(p0, SPReg, OffsetReg);
|
||||
|
||||
MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
|
||||
return AddrReg;
|
||||
return AddrReg.getReg(0);
|
||||
}
|
||||
|
||||
void assignValueToReg(Register ValVReg, Register PhysReg,
|
||||
|
@ -240,10 +238,9 @@ struct IncomingValueHandler : public CallLowering::ValueHandler {
|
|||
int FI = MFI.CreateFixedObject(Size, Offset, true);
|
||||
MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
|
||||
|
||||
Register AddrReg = MRI.createGenericVirtualRegister(
|
||||
LLT::pointer(0, DL.getPointerSizeInBits(0)));
|
||||
MIRBuilder.buildFrameIndex(AddrReg, FI);
|
||||
return AddrReg;
|
||||
return MIRBuilder
|
||||
.buildFrameIndex(LLT::pointer(0, DL.getPointerSizeInBits(0)), FI)
|
||||
.getReg(0);
|
||||
}
|
||||
|
||||
void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
|
||||
|
|
|
@ -12,8 +12,7 @@
|
|||
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[CST2]](s64)
|
||||
; CHECK: G_STORE [[LO]](s64), [[GEP2]](p0) :: (store 8 into stack, align 1)
|
||||
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
|
||||
; CHECK: [[CST3:%[0-9]+]]:_(s64) = COPY [[CST]]
|
||||
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[CST3]](s64)
|
||||
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[CST]](s64)
|
||||
; CHECK: G_STORE [[HI]](s64), [[GEP3]](p0) :: (store 8 into stack + 8, align 1)
|
||||
define void @test_split_struct([2 x i64]* %ptr) {
|
||||
%struct = load [2 x i64], [2 x i64]* %ptr
|
||||
|
|
|
@ -66,8 +66,7 @@ define void @take_128bit_struct([2 x i64]* %ptr, [2 x i64] %in) {
|
|||
; CHECK: G_STORE [[LD1]](s64), [[ADDR]](p0) :: (store 8 into stack, align 1)
|
||||
|
||||
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
|
||||
; CHECK: [[OFF:%[0-9]+]]:_(s64) = COPY [[CST]]
|
||||
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[OFF]]
|
||||
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[CST]]
|
||||
; CHECK: G_STORE [[LD2]](s64), [[ADDR]](p0) :: (store 8 into stack + 8, align 1)
|
||||
define void @test_split_struct([2 x i64]* %ptr) {
|
||||
%struct = load [2 x i64], [2 x i64]* %ptr
|
||||
|
|
|
@ -257,8 +257,7 @@ define void @take_128bit_struct([2 x i64]* %ptr, [2 x i64] %in) {
|
|||
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[CST2]](s64)
|
||||
; CHECK: G_STORE [[LO]](s64), [[GEP2]](p0) :: (store 8 into stack, align 1)
|
||||
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
|
||||
; CHECK: [[CST3:%[0-9]+]]:_(s64) = COPY [[CST]]
|
||||
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[CST3]](s64)
|
||||
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[CST]](s64)
|
||||
; CHECK: G_STORE [[HI]](s64), [[GEP3]](p0) :: (store 8 into stack + 8, align 1)
|
||||
define void @test_split_struct([2 x i64]* %ptr) {
|
||||
%struct = load [2 x i64], [2 x i64]* %ptr
|
||||
|
|
|
@ -1737,8 +1737,7 @@ body: |
|
|||
; SOFT-DEFAULT: BL{{.*}} &__ltsf2, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0
|
||||
; SOFT: [[RET2:%[0-9]+]]:_(s32) = COPY $r0
|
||||
; SOFT: ADJCALLSTACKUP
|
||||
; SOFT-DEFAULT: [[ZERO2:%[0-9]+]]:_(s32) = COPY [[ZERO]]
|
||||
; SOFT-DEFAULT: [[R2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[RET2]](s32), [[ZERO2]]
|
||||
; SOFT-DEFAULT: [[R2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[RET2]](s32), [[ZERO]]
|
||||
; SOFT-AEABI: [[R1EXT:%[0-9]+]]:_(s32) = COPY [[RET1]]
|
||||
; SOFT-AEABI: [[R2EXT:%[0-9]+]]:_(s32) = COPY [[RET2]]
|
||||
; SOFT-DEFAULT: [[R1EXT:%[0-9]+]]:_(s32) = G_ANYEXT [[R1]]
|
||||
|
@ -1797,8 +1796,7 @@ body: |
|
|||
; SOFT-DEFAULT: BL{{.*}} &__unordsf2, {{.*}}, implicit $r0, implicit $r1, implicit-def $r0
|
||||
; SOFT: [[RET2:%[0-9]+]]:_(s32) = COPY $r0
|
||||
; SOFT: ADJCALLSTACKUP
|
||||
; SOFT-DEFAULT: [[ZERO2:%[0-9]+]]:_(s32) = COPY [[ZERO]]
|
||||
; SOFT-DEFAULT: [[R2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[RET2]](s32), [[ZERO2]]
|
||||
; SOFT-DEFAULT: [[R2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[RET2]](s32), [[ZERO]]
|
||||
; SOFT-AEABI: [[R1EXT:%[0-9]+]]:_(s32) = COPY [[RET1]]
|
||||
; SOFT-AEABI: [[R2EXT:%[0-9]+]]:_(s32) = COPY [[RET2]]
|
||||
; SOFT-DEFAULT: [[R1EXT:%[0-9]+]]:_(s32) = G_ANYEXT [[R1]]
|
||||
|
@ -2660,8 +2658,7 @@ body: |
|
|||
; SOFT-DEFAULT: BL{{.*}} &__ltdf2, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0
|
||||
; SOFT: [[RET2:%[0-9]+]]:_(s32) = COPY $r0
|
||||
; SOFT: ADJCALLSTACKUP
|
||||
; SOFT-DEFAULT: [[ZERO2:%[0-9]+]]:_(s32) = COPY [[ZERO]]
|
||||
; SOFT-DEFAULT: [[R2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[RET2]](s32), [[ZERO2]]
|
||||
; SOFT-DEFAULT: [[R2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[RET2]](s32), [[ZERO]]
|
||||
; SOFT-AEABI: [[R1EXT:%[0-9]+]]:_(s32) = COPY [[RET1]]
|
||||
; SOFT-AEABI: [[R2EXT:%[0-9]+]]:_(s32) = COPY [[RET2]]
|
||||
; SOFT-DEFAULT: [[R1EXT:%[0-9]+]]:_(s32) = G_ANYEXT [[R1]]
|
||||
|
@ -2736,8 +2733,7 @@ body: |
|
|||
; SOFT-DEFAULT: BL{{.*}} &__unorddf2, {{.*}}, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0
|
||||
; SOFT: [[RET2:%[0-9]+]]:_(s32) = COPY $r0
|
||||
; SOFT: ADJCALLSTACKUP
|
||||
; SOFT-DEFAULT: [[ZERO2:%[0-9]+]]:_(s32) = COPY [[ZERO]]
|
||||
; SOFT-DEFAULT: [[R2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[RET2]](s32), [[ZERO2]]
|
||||
; SOFT-DEFAULT: [[R2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[RET2]](s32), [[ZERO]]
|
||||
; SOFT-AEABI: [[R1EXT:%[0-9]+]]:_(s32) = COPY [[RET1]]
|
||||
; SOFT-AEABI: [[R2EXT:%[0-9]+]]:_(s32) = COPY [[RET2]]
|
||||
; SOFT-DEFAULT: [[R1EXT:%[0-9]+]]:_(s32) = G_ANYEXT [[R1]]
|
||||
|
|
|
@ -577,16 +577,14 @@ define void @test_abi_exts_call(i8* %addr) {
|
|||
; X32: ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
|
||||
; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
|
||||
; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
|
||||
; X32: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C]](s32)
|
||||
; X32: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[COPY2]](s32)
|
||||
; X32: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s32)
|
||||
; X32: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD1]](s8)
|
||||
; X32: G_STORE [[SEXT]](s32), [[GEP1]](p0) :: (store 4 into stack, align 1)
|
||||
; X32: CALLpcrel32 @take_char, csr_32, implicit $esp, implicit $ssp
|
||||
; X32: ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
|
||||
; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
|
||||
; X32: [[COPY3:%[0-9]+]]:_(p0) = COPY $esp
|
||||
; X32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C]](s32)
|
||||
; X32: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY3]], [[COPY4]](s32)
|
||||
; X32: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY3]], [[C]](s32)
|
||||
; X32: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s8)
|
||||
; X32: G_STORE [[ZEXT]](s32), [[GEP2]](p0) :: (store 4 into stack, align 1)
|
||||
; X32: CALLpcrel32 @take_char, csr_32, implicit $esp, implicit $ssp
|
||||
|
|
Loading…
Reference in New Issue