forked from OSchip/llvm-project
Make InlineSpiller bundle-aware.
Simply treat bundles as instructions. Spill code is inserted between bundles, never inside a bundle. Rewrite all operands in a bundle at once. Don't attempt and memory operand folding inside bundles. llvm-svn: 151787
This commit is contained in:
parent
05e916bc95
commit
abe8c09b20
|
@ -15,6 +15,7 @@
|
|||
#define LLVM_CODEGEN_MACHINEREGISTERINFO_H
|
||||
|
||||
#include "llvm/Target/TargetRegisterInfo.h"
|
||||
#include "llvm/CodeGen/MachineInstrBundle.h"
|
||||
#include "llvm/ADT/BitVector.h"
|
||||
#include "llvm/ADT/IndexedMap.h"
|
||||
#include <vector>
|
||||
|
@ -483,6 +484,14 @@ public:
|
|||
return MI;
|
||||
}
|
||||
|
||||
MachineInstr *skipBundle() {
|
||||
if (!Op) return 0;
|
||||
MachineInstr *MI = getBundleStart(Op->getParent());
|
||||
do ++*this;
|
||||
while (Op && getBundleStart(Op->getParent()) == MI);
|
||||
return MI;
|
||||
}
|
||||
|
||||
MachineOperand &getOperand() const {
|
||||
assert(Op && "Cannot dereference end iterator!");
|
||||
return *Op;
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
|
||||
#include "llvm/CodeGen/LiveStackAnalysis.h"
|
||||
#include "llvm/CodeGen/MachineDominators.h"
|
||||
#include "llvm/CodeGen/MachineInstrBundle.h"
|
||||
#include "llvm/CodeGen/MachineFrameInfo.h"
|
||||
#include "llvm/CodeGen/MachineFunction.h"
|
||||
#include "llvm/CodeGen/MachineLoopInfo.h"
|
||||
|
@ -173,8 +174,7 @@ private:
|
|||
void reMaterializeAll();
|
||||
|
||||
bool coalesceStackAccess(MachineInstr *MI, unsigned Reg);
|
||||
bool foldMemoryOperand(MachineBasicBlock::iterator MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> >,
|
||||
MachineInstr *LoadMI = 0);
|
||||
void insertReload(LiveInterval &NewLI, SlotIndex,
|
||||
MachineBasicBlock::iterator MI);
|
||||
|
@ -864,24 +864,19 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg,
|
|||
|
||||
// If the instruction also writes VirtReg.reg, it had better not require the
|
||||
// same register for uses and defs.
|
||||
bool Reads, Writes;
|
||||
SmallVector<unsigned, 8> Ops;
|
||||
tie(Reads, Writes) = MI->readsWritesVirtualRegister(VirtReg.reg, &Ops);
|
||||
if (Writes) {
|
||||
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
|
||||
MachineOperand &MO = MI->getOperand(Ops[i]);
|
||||
if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) {
|
||||
markValueUsed(&VirtReg, ParentVNI);
|
||||
DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops;
|
||||
MIBundleOperands::RegInfo RI =
|
||||
MIBundleOperands(MI).analyzeVirtReg(VirtReg.reg, &Ops);
|
||||
if (RI.Tied) {
|
||||
markValueUsed(&VirtReg, ParentVNI);
|
||||
DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Before rematerializing into a register for a single instruction, try to
|
||||
// fold a load into the instruction. That avoids allocating a new register.
|
||||
if (RM.OrigMI->canFoldAsLoad() &&
|
||||
foldMemoryOperand(MI, Ops, RM.OrigMI)) {
|
||||
foldMemoryOperand(Ops, RM.OrigMI)) {
|
||||
Edit->markRematerialized(RM.ParentVNI);
|
||||
++NumFoldedLoads;
|
||||
return true;
|
||||
|
@ -899,7 +894,7 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg,
|
|||
|
||||
// Replace operands
|
||||
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
|
||||
MachineOperand &MO = MI->getOperand(Ops[i]);
|
||||
MachineOperand &MO = MI->getOperand(Ops[i].second);
|
||||
if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) {
|
||||
MO.setReg(NewLI.reg);
|
||||
MO.setIsKill();
|
||||
|
@ -930,7 +925,7 @@ void InlineSpiller::reMaterializeAll() {
|
|||
LiveInterval &LI = LIS.getInterval(Reg);
|
||||
for (MachineRegisterInfo::use_nodbg_iterator
|
||||
RI = MRI.use_nodbg_begin(Reg);
|
||||
MachineInstr *MI = RI.skipInstruction();)
|
||||
MachineInstr *MI = RI.skipBundle();)
|
||||
anyRemat |= reMaterializeFor(LI, MI);
|
||||
}
|
||||
if (!anyRemat)
|
||||
|
@ -1009,14 +1004,22 @@ bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) {
|
|||
return true;
|
||||
}
|
||||
|
||||
/// foldMemoryOperand - Try folding stack slot references in Ops into MI.
|
||||
/// @param MI Instruction using or defining the current register.
|
||||
/// @param Ops Operand indices from readsWritesVirtualRegister().
|
||||
/// foldMemoryOperand - Try folding stack slot references in Ops into their
|
||||
/// instructions.
|
||||
///
|
||||
/// @param Ops Operand indices from analyzeVirtReg().
|
||||
/// @param LoadMI Load instruction to use instead of stack slot when non-null.
|
||||
/// @return True on success, and MI will be erased.
|
||||
bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr *LoadMI) {
|
||||
/// @return True on success.
|
||||
bool InlineSpiller::
|
||||
foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops,
|
||||
MachineInstr *LoadMI) {
|
||||
if (Ops.empty())
|
||||
return false;
|
||||
// Don't attempt folding in bundles.
|
||||
MachineInstr *MI = Ops.front().first;
|
||||
if (Ops.back().first != MI || MI->isBundled())
|
||||
return false;
|
||||
|
||||
bool WasCopy = MI->isCopy();
|
||||
unsigned ImpReg = 0;
|
||||
|
||||
|
@ -1024,7 +1027,7 @@ bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
|
|||
// operands.
|
||||
SmallVector<unsigned, 8> FoldOps;
|
||||
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
|
||||
unsigned Idx = Ops[i];
|
||||
unsigned Idx = Ops[i].second;
|
||||
MachineOperand &MO = MI->getOperand(Idx);
|
||||
if (MO.isImplicit()) {
|
||||
ImpReg = MO.getReg();
|
||||
|
@ -1064,7 +1067,7 @@ bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
|
|||
<< *FoldMI);
|
||||
if (!WasCopy)
|
||||
++NumFolded;
|
||||
else if (Ops.front() == 0)
|
||||
else if (Ops.front().second == 0)
|
||||
++NumSpills;
|
||||
else
|
||||
++NumReloads;
|
||||
|
@ -1106,8 +1109,8 @@ void InlineSpiller::spillAroundUses(unsigned Reg) {
|
|||
LiveInterval &OldLI = LIS.getInterval(Reg);
|
||||
|
||||
// Iterate over instructions using Reg.
|
||||
for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(Reg);
|
||||
MachineInstr *MI = RI.skipInstruction();) {
|
||||
for (MachineRegisterInfo::reg_iterator RegI = MRI.reg_begin(Reg);
|
||||
MachineInstr *MI = RegI.skipBundle();) {
|
||||
|
||||
// Debug values are not allowed to affect codegen.
|
||||
if (MI->isDebugValue()) {
|
||||
|
@ -1136,9 +1139,9 @@ void InlineSpiller::spillAroundUses(unsigned Reg) {
|
|||
continue;
|
||||
|
||||
// Analyze instruction.
|
||||
bool Reads, Writes;
|
||||
SmallVector<unsigned, 8> Ops;
|
||||
tie(Reads, Writes) = MI->readsWritesVirtualRegister(Reg, &Ops);
|
||||
SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops;
|
||||
MIBundleOperands::RegInfo RI =
|
||||
MIBundleOperands(MI).analyzeVirtReg(Reg, &Ops);
|
||||
|
||||
// Find the slot index where this instruction reads and writes OldLI.
|
||||
// This is usually the def slot, except for tied early clobbers.
|
||||
|
@ -1156,7 +1159,7 @@ void InlineSpiller::spillAroundUses(unsigned Reg) {
|
|||
SnippetCopies.insert(MI);
|
||||
continue;
|
||||
}
|
||||
if (Writes) {
|
||||
if (RI.Writes) {
|
||||
// Hoist the spill of a sib-reg copy.
|
||||
if (hoistSpill(OldLI, MI)) {
|
||||
// This COPY is now dead, the value is already in the stack slot.
|
||||
|
@ -1173,7 +1176,7 @@ void InlineSpiller::spillAroundUses(unsigned Reg) {
|
|||
}
|
||||
|
||||
// Attempt to fold memory ops.
|
||||
if (foldMemoryOperand(MI, Ops))
|
||||
if (foldMemoryOperand(Ops))
|
||||
continue;
|
||||
|
||||
// Allocate interval around instruction.
|
||||
|
@ -1181,16 +1184,16 @@ void InlineSpiller::spillAroundUses(unsigned Reg) {
|
|||
LiveInterval &NewLI = Edit->createFrom(Reg, LIS, VRM);
|
||||
NewLI.markNotSpillable();
|
||||
|
||||
if (Reads)
|
||||
if (RI.Reads)
|
||||
insertReload(NewLI, Idx, MI);
|
||||
|
||||
// Rewrite instruction operands.
|
||||
bool hasLiveDef = false;
|
||||
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
|
||||
MachineOperand &MO = MI->getOperand(Ops[i]);
|
||||
MachineOperand &MO = Ops[i].first->getOperand(Ops[i].second);
|
||||
MO.setReg(NewLI.reg);
|
||||
if (MO.isUse()) {
|
||||
if (!MI->isRegTiedToDefOperand(Ops[i]))
|
||||
if (!Ops[i].first->isRegTiedToDefOperand(Ops[i].second))
|
||||
MO.setIsKill();
|
||||
} else {
|
||||
if (!MO.isDead())
|
||||
|
@ -1200,15 +1203,15 @@ void InlineSpiller::spillAroundUses(unsigned Reg) {
|
|||
DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI);
|
||||
|
||||
// FIXME: Use a second vreg if instruction has no tied ops.
|
||||
if (Writes) {
|
||||
if (hasLiveDef)
|
||||
insertSpill(NewLI, OldLI, Idx, MI);
|
||||
else {
|
||||
// This instruction defines a dead value. We don't need to spill it,
|
||||
// but do create a live range for the dead value.
|
||||
VNInfo *VNI = NewLI.getNextValue(Idx, LIS.getVNInfoAllocator());
|
||||
NewLI.addRange(LiveRange(Idx, Idx.getDeadSlot(), VNI));
|
||||
}
|
||||
if (RI.Writes) {
|
||||
if (hasLiveDef)
|
||||
insertSpill(NewLI, OldLI, Idx, MI);
|
||||
else {
|
||||
// This instruction defines a dead value. We don't need to spill it,
|
||||
// but do create a live range for the dead value.
|
||||
VNInfo *VNI = NewLI.getNextValue(Idx, LIS.getVNInfoAllocator());
|
||||
NewLI.addRange(LiveRange(Idx, Idx.getDeadSlot(), VNI));
|
||||
}
|
||||
}
|
||||
|
||||
DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
|
||||
|
|
Loading…
Reference in New Issue