[InstrInfo] Refactor foldOperandImpl to thread through InsertPt. NFC

Summary:
This was a longstanding FIXME and is a necessary precursor to cases
where foldOperandImpl may have to create more than one instruction
(e.g. to constrain a register class). This is the split out NFC changes from
D6262.

Reviewers: pete, ributzka, uweigand, mcrosier

Reviewed By: mcrosier

Subscribers: mcrosier, ted, llvm-commits

Differential Revision: http://reviews.llvm.org/D10174

llvm-svn: 239336
This commit is contained in:
Keno Fischer 2015-06-08 20:09:58 +00:00
parent 4b23eb85f9
commit e70b31fc1b
13 changed files with 132 additions and 104 deletions

View File

@ -711,20 +711,22 @@ protected:
/// Target-dependent implementation for foldMemoryOperand.
/// Target-independent code in foldMemoryOperand will
/// take care of adding a MachineMemOperand to the newly created instruction.
virtual MachineInstr *foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr *MI,
ArrayRef<unsigned> Ops,
int FrameIndex) const {
/// The instruction and any auxiliary instructions necessary will be inserted
/// at InsertPt.
virtual MachineInstr *foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
return nullptr;
}
/// Target-dependent implementation for foldMemoryOperand.
/// Target-independent code in foldMemoryOperand will
/// take care of adding a MachineMemOperand to the newly created instruction.
virtual MachineInstr *foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr *MI,
ArrayRef<unsigned> Ops,
MachineInstr *LoadMI) const {
/// The instruction and any auxiliary instructions necessary will be inserted
/// at InsertPt.
virtual MachineInstr *foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const {
return nullptr;
}

View File

@ -471,11 +471,13 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
MI->getOpcode() == TargetOpcode::PATCHPOINT) {
// Fold stackmap/patchpoint.
NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
if (NewMI)
MBB->insert(MI, NewMI);
} else {
// Ask the target to do the actual folding.
NewMI =foldMemoryOperandImpl(MF, MI, Ops, FI);
NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI);
}
if (NewMI) {
NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
// Add a memory operand, foldMemoryOperandImpl doesn't do that.
@ -493,8 +495,7 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
MFI.getObjectAlignment(FI));
NewMI->addMemOperand(MF, MMO);
// FIXME: change foldMemoryOperandImpl semantics to also insert NewMI.
return MBB->insert(MI, NewMI);
return NewMI;
}
// Straight COPY may fold as load/store.
@ -539,15 +540,15 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
isLoadFromStackSlot(LoadMI, FrameIndex)) {
// Fold stackmap/patchpoint.
NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
if (NewMI)
NewMI = MBB.insert(MI, NewMI);
} else {
// Ask the target to do the actual folding.
NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI);
}
if (!NewMI) return nullptr;
NewMI = MBB.insert(MI, NewMI);
// Copy the memoperands from the load to the folded instruction.
if (MI->memoperands_empty()) {
NewMI->setMemRefs(LoadMI->memoperands_begin(),

View File

@ -2066,10 +2066,9 @@ void llvm::emitFrameOffset(MachineBasicBlock &MBB,
.setMIFlag(Flag);
}
MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr *MI,
ArrayRef<unsigned> Ops,
int FrameIndex) const {
MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
// This is a bit of a hack. Consider this instruction:
//
// %vreg0<def> = COPY %SP; GPR64all:%vreg0

View File

@ -131,6 +131,7 @@ public:
using TargetInstrInfo::foldMemoryOperandImpl;
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt,
int FrameIndex) const override;
bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,

View File

@ -779,10 +779,9 @@ HexagonInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
return false;
}
MachineInstr *HexagonInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr *MI,
ArrayRef<unsigned> Ops,
int FI) const {
MachineInstr *HexagonInstrInfo::foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, int FI) const {
// Hexagon_TODO: Implement.
return nullptr;
}

View File

@ -114,10 +114,12 @@ public:
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt,
int FrameIndex) const override;
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt,
MachineInstr *LoadMI) const override {
return nullptr;
}

View File

@ -152,17 +152,15 @@ bool AMDGPUInstrInfo::expandPostRAPseudo (MachineBasicBlock::iterator MI) const
return true;
}
MachineInstr *AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr *MI,
ArrayRef<unsigned> Ops,
int FrameIndex) const {
MachineInstr *AMDGPUInstrInfo::foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
// TODO: Implement this function
return nullptr;
}
MachineInstr *
AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops,
MachineInstr *LoadMI) const {
MachineInstr *AMDGPUInstrInfo::foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const {
// TODO: Implement this function
return nullptr;
}

View File

@ -87,9 +87,11 @@ public:
protected:
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt,
int FrameIndex) const override;
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt,
MachineInstr *LoadMI) const override;
public:

View File

@ -752,10 +752,9 @@ SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
return nullptr;
}
MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr *MI,
ArrayRef<unsigned> Ops,
int FrameIndex) const {
MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
unsigned Size = MFI->getObjectSize(FrameIndex);
unsigned Opcode = MI->getOpcode();
@ -765,9 +764,11 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
isInt<8>(MI->getOperand(2).getImm()) &&
!MI->getOperand(3).getReg()) {
// LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST
return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::AGSI))
.addFrameIndex(FrameIndex).addImm(0)
.addImm(MI->getOperand(2).getImm());
return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
get(SystemZ::AGSI))
.addFrameIndex(FrameIndex)
.addImm(0)
.addImm(MI->getOperand(2).getImm());
}
return nullptr;
}
@ -786,9 +787,11 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
isInt<8>(MI->getOperand(2).getImm())) {
// A(G)HI %reg, CONST -> A(G)SI %mem, CONST
Opcode = (Opcode == SystemZ::AHI ? SystemZ::ASI : SystemZ::AGSI);
return BuildMI(MF, MI->getDebugLoc(), get(Opcode))
.addFrameIndex(FrameIndex).addImm(0)
.addImm(MI->getOperand(2).getImm());
return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
get(Opcode))
.addFrameIndex(FrameIndex)
.addImm(0)
.addImm(MI->getOperand(2).getImm());
}
if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) {
@ -798,17 +801,23 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
// source register instead.
if (OpNum == 0) {
unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD;
return BuildMI(MF, MI->getDebugLoc(), get(StoreOpcode))
.addOperand(MI->getOperand(1)).addFrameIndex(FrameIndex)
.addImm(0).addReg(0);
return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
get(StoreOpcode))
.addOperand(MI->getOperand(1))
.addFrameIndex(FrameIndex)
.addImm(0)
.addReg(0);
}
// If we're spilling the source of an LDGR or LGDR, load the
// destination register instead.
if (OpNum == 1) {
unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD;
unsigned Dest = MI->getOperand(0).getReg();
return BuildMI(MF, MI->getDebugLoc(), get(LoadOpcode), Dest)
.addFrameIndex(FrameIndex).addImm(0).addReg(0);
return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
get(LoadOpcode), Dest)
.addFrameIndex(FrameIndex)
.addImm(0)
.addReg(0);
}
}
@ -830,17 +839,25 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
if (MMO->getSize() == Size && !MMO->isVolatile()) {
// Handle conversion of loads.
if (isSimpleBD12Move(MI, SystemZII::SimpleBDXLoad)) {
return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC))
.addFrameIndex(FrameIndex).addImm(0).addImm(Size)
.addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm())
.addMemOperand(MMO);
return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
get(SystemZ::MVC))
.addFrameIndex(FrameIndex)
.addImm(0)
.addImm(Size)
.addOperand(MI->getOperand(1))
.addImm(MI->getOperand(2).getImm())
.addMemOperand(MMO);
}
// Handle conversion of stores.
if (isSimpleBD12Move(MI, SystemZII::SimpleBDXStore)) {
return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC))
.addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm())
.addImm(Size).addFrameIndex(FrameIndex).addImm(0)
.addMemOperand(MMO);
return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
get(SystemZ::MVC))
.addOperand(MI->getOperand(1))
.addImm(MI->getOperand(2).getImm())
.addImm(Size)
.addFrameIndex(FrameIndex)
.addImm(0)
.addMemOperand(MMO);
}
}
}
@ -856,7 +873,8 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
assert(AccessBytes != 0 && "Size of access should be known");
assert(AccessBytes <= Size && "Access outside the frame index");
uint64_t Offset = Size - AccessBytes;
MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(MemOpcode));
MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
MI->getDebugLoc(), get(MemOpcode));
for (unsigned I = 0; I < OpNum; ++I)
MIB.addOperand(MI->getOperand(I));
MIB.addFrameIndex(FrameIndex).addImm(Offset);
@ -869,10 +887,9 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
return nullptr;
}
MachineInstr *
SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops,
MachineInstr *LoadMI) const {
MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const {
return nullptr;
}

View File

@ -187,9 +187,11 @@ public:
LiveVariables *LV) const override;
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt,
int FrameIndex) const override;
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt,
MachineInstr *LoadMI) const override;
bool expandPostRAPseudo(MachineBasicBlock::iterator MBBI) const override;
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const

View File

@ -3530,9 +3530,9 @@ bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
SmallVector<MachineOperand, 8> AddrOps;
AM.getFullAddress(AddrOps);
MachineInstr *Result =
XII.foldMemoryOperandImpl(*FuncInfo.MF, MI, OpNo, AddrOps,
Size, Alignment, /*AllowCommute=*/true);
MachineInstr *Result = XII.foldMemoryOperandImpl(
*FuncInfo.MF, MI, OpNo, AddrOps, FuncInfo.InsertPt, Size, Alignment,
/*AllowCommute=*/true);
if (!Result)
return false;
@ -3556,7 +3556,6 @@ bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
}
Result->addMemOperand(*FuncInfo.MF, createMachineMemOperandFor(LI));
FuncInfo.MBB->insert(FuncInfo.InsertPt, Result);
MI->eraseFromParent();
return true;
}

View File

@ -4703,8 +4703,17 @@ bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
return false;
}
static void addOperands(MachineInstrBuilder &MIB, ArrayRef<MachineOperand> MOs) {
unsigned NumAddrOps = MOs.size();
for (unsigned i = 0; i != NumAddrOps; ++i)
MIB.addOperand(MOs[i]);
if (NumAddrOps < 4) // FrameIndex only
addOffset(MIB, 0);
}
static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
MachineInstr *MI,
const TargetInstrInfo &TII) {
// Create the base instruction with the memory operand as the first part.
@ -4712,11 +4721,7 @@ static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode),
MI->getDebugLoc(), true);
MachineInstrBuilder MIB(MF, NewMI);
unsigned NumAddrOps = MOs.size();
for (unsigned i = 0; i != NumAddrOps; ++i)
MIB.addOperand(MOs[i]);
if (NumAddrOps < 4) // FrameIndex only
addOffset(MIB, 0);
addOperands(MIB, MOs);
// Loop over the rest of the ri operands, converting them over.
unsigned NumOps = MI->getDesc().getNumOperands()-2;
@ -4728,11 +4733,16 @@ static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
MachineOperand &MO = MI->getOperand(i);
MIB.addOperand(MO);
}
MachineBasicBlock *MBB = InsertPt->getParent();
MBB->insert(InsertPt, NewMI);
return MIB;
}
static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode,
unsigned OpNo, ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
MachineInstr *MI, const TargetInstrInfo &TII) {
// Omit the implicit operands, something BuildMI can't do.
MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode),
@ -4743,38 +4753,32 @@ static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode,
MachineOperand &MO = MI->getOperand(i);
if (i == OpNo) {
assert(MO.isReg() && "Expected to fold into reg operand!");
unsigned NumAddrOps = MOs.size();
for (unsigned i = 0; i != NumAddrOps; ++i)
MIB.addOperand(MOs[i]);
if (NumAddrOps < 4) // FrameIndex only
addOffset(MIB, 0);
addOperands(MIB, MOs);
} else {
MIB.addOperand(MO);
}
}
MachineBasicBlock *MBB = InsertPt->getParent();
MBB->insert(InsertPt, NewMI);
return MIB;
}
static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
MachineInstr *MI) {
MachineFunction &MF = *MI->getParent()->getParent();
MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), TII.get(Opcode));
unsigned NumAddrOps = MOs.size();
for (unsigned i = 0; i != NumAddrOps; ++i)
MIB.addOperand(MOs[i]);
if (NumAddrOps < 4) // FrameIndex only
addOffset(MIB, 0);
MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
MI->getDebugLoc(), TII.get(Opcode));
addOperands(MIB, MOs);
return MIB.addImm(0);
}
MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr *MI,
unsigned OpNum,
ArrayRef<MachineOperand> MOs,
unsigned Size, unsigned Align,
bool AllowCommute) const {
MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr *MI, unsigned OpNum,
ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
unsigned Size, unsigned Align, bool AllowCommute) const {
const DenseMap<unsigned,
std::pair<unsigned,unsigned> > *OpcodeTablePtr = nullptr;
bool isCallRegIndirect = Subtarget.callRegIndirect();
@ -4808,7 +4812,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
isTwoAddrFold = true;
} else if (OpNum == 0) {
if (MI->getOpcode() == X86::MOV32r0) {
NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, MI);
NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, InsertPt, MI);
if (NewMI)
return NewMI;
}
@ -4853,9 +4857,9 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
}
if (isTwoAddrFold)
NewMI = FuseTwoAddrInst(MF, Opcode, MOs, MI, *this);
NewMI = FuseTwoAddrInst(MF, Opcode, MOs, InsertPt, MI, *this);
else
NewMI = FuseInst(MF, Opcode, OpNum, MOs, MI, *this);
NewMI = FuseInst(MF, Opcode, OpNum, MOs, InsertPt, MI, *this);
if (NarrowToMOV32rm) {
// If this is the special case where we use a MOV32rm to load a 32-bit
@ -4907,8 +4911,9 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
// Attempt to fold with the commuted version of the instruction.
unsigned CommuteOp =
(CommuteOpIdx1 == OriginalOpIdx ? CommuteOpIdx2 : CommuteOpIdx1);
NewMI = foldMemoryOperandImpl(MF, MI, CommuteOp, MOs, Size, Align,
/*AllowCommute=*/false);
NewMI =
foldMemoryOperandImpl(MF, MI, CommuteOp, MOs, InsertPt, Size, Align,
/*AllowCommute=*/false);
if (NewMI)
return NewMI;
@ -5137,10 +5142,9 @@ breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
MI->addRegisterKilled(Reg, TRI, true);
}
MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr *MI,
ArrayRef<unsigned> Ops,
int FrameIndex) const {
MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
// Check switch flag
if (NoFusing) return nullptr;
@ -5179,8 +5183,8 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
return nullptr;
return foldMemoryOperandImpl(MF, MI, Ops[0],
MachineOperand::CreateFI(FrameIndex), Size,
Alignment, /*AllowCommute=*/true);
MachineOperand::CreateFI(FrameIndex), InsertPt,
Size, Alignment, /*AllowCommute=*/true);
}
static bool isPartialRegisterLoad(const MachineInstr &LoadMI,
@ -5202,17 +5206,16 @@ static bool isPartialRegisterLoad(const MachineInstr &LoadMI,
return false;
}
MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr *MI,
ArrayRef<unsigned> Ops,
MachineInstr *LoadMI) const {
MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const {
// If loading from a FrameIndex, fold directly from the FrameIndex.
unsigned NumOps = LoadMI->getDesc().getNumOperands();
int FrameIndex;
if (isLoadFromStackSlot(LoadMI, FrameIndex)) {
if (isPartialRegisterLoad(*LoadMI, MF))
return nullptr;
return foldMemoryOperandImpl(MF, MI, Ops, FrameIndex);
return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex);
}
// Check switch flag
@ -5332,7 +5335,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
break;
}
}
return foldMemoryOperandImpl(MF, MI, Ops[0], MOs,
return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, InsertPt,
/*Size=*/0, Alignment, /*AllowCommute=*/true);
}

View File

@ -307,6 +307,7 @@ public:
/// references has been changed.
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt,
int FrameIndex) const override;
/// foldMemoryOperand - Same as the previous version except it allows folding
@ -314,6 +315,7 @@ public:
/// stack slot.
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt,
MachineInstr *LoadMI) const override;
/// canFoldMemoryOperand - Returns true if the specified load / store is
@ -407,6 +409,7 @@ public:
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
unsigned OpNum,
ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
unsigned Size, unsigned Alignment,
bool AllowCommute) const;