2009-12-14 14:49:42 +08:00
|
|
|
//===------------------------ CalcSpillWeights.cpp ------------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2015-08-10 19:59:44 +08:00
|
|
|
#include "llvm/CodeGen/VirtRegMap.h"
|
2009-12-14 14:49:42 +08:00
|
|
|
#include "llvm/CodeGen/CalcSpillWeights.h"
|
|
|
|
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
|
2013-06-18 03:00:36 +08:00
|
|
|
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
|
2009-12-14 14:49:42 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
|
|
|
#include "llvm/CodeGen/MachineLoopInfo.h"
|
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
|
|
|
#include "llvm/Target/TargetInstrInfo.h"
|
|
|
|
#include "llvm/Target/TargetRegisterInfo.h"
|
2014-08-05 05:25:23 +08:00
|
|
|
#include "llvm/Target/TargetSubtargetInfo.h"
|
2009-12-14 14:49:42 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2014-04-22 10:02:50 +08:00
|
|
|
#define DEBUG_TYPE "calcspillweights"
|
|
|
|
|
2013-11-12 03:04:45 +08:00
|
|
|
void llvm::calculateSpillWeightsAndHints(LiveIntervals &LIS,
|
2013-11-11 01:46:31 +08:00
|
|
|
MachineFunction &MF,
|
2015-08-10 19:59:44 +08:00
|
|
|
VirtRegMap *VRM,
|
2013-11-11 01:46:31 +08:00
|
|
|
const MachineLoopInfo &MLI,
|
2013-11-12 03:56:14 +08:00
|
|
|
const MachineBlockFrequencyInfo &MBFI,
|
|
|
|
VirtRegAuxInfo::NormalizingFn norm) {
|
2009-12-24 08:39:02 +08:00
|
|
|
DEBUG(dbgs() << "********** Compute Spill Weights **********\n"
|
2012-08-23 01:18:53 +08:00
|
|
|
<< "********** Function: " << MF.getName() << '\n');
|
2009-12-14 14:49:42 +08:00
|
|
|
|
2012-06-21 05:25:05 +08:00
|
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
2015-08-10 19:59:44 +08:00
|
|
|
VirtRegAuxInfo VRAI(MF, LIS, VRM, MLI, MBFI, norm);
|
2012-06-21 05:25:05 +08:00
|
|
|
for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
|
|
|
|
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
|
|
|
|
if (MRI.reg_nodbg_empty(Reg))
|
|
|
|
continue;
|
2013-11-12 03:04:45 +08:00
|
|
|
VRAI.calculateSpillWeightAndHint(LIS.getInterval(Reg));
|
2009-12-14 14:49:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-10 08:02:26 +08:00
|
|
|
// Return the preferred allocation register for reg, given a COPY instruction.
|
|
|
|
static unsigned copyHint(const MachineInstr *mi, unsigned reg,
|
|
|
|
const TargetRegisterInfo &tri,
|
|
|
|
const MachineRegisterInfo &mri) {
|
|
|
|
unsigned sub, hreg, hsub;
|
|
|
|
if (mi->getOperand(0).getReg() == reg) {
|
|
|
|
sub = mi->getOperand(0).getSubReg();
|
|
|
|
hreg = mi->getOperand(1).getReg();
|
|
|
|
hsub = mi->getOperand(1).getSubReg();
|
|
|
|
} else {
|
|
|
|
sub = mi->getOperand(1).getSubReg();
|
|
|
|
hreg = mi->getOperand(0).getReg();
|
|
|
|
hsub = mi->getOperand(0).getSubReg();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!hreg)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (TargetRegisterInfo::isVirtualRegister(hreg))
|
|
|
|
return sub == hsub ? hreg : 0;
|
|
|
|
|
|
|
|
const TargetRegisterClass *rc = mri.getRegClass(reg);
|
|
|
|
|
|
|
|
// Only allow physreg hints in rc.
|
|
|
|
if (sub == 0)
|
|
|
|
return rc->contains(hreg) ? hreg : 0;
|
|
|
|
|
|
|
|
// reg:sub should match the physreg hreg.
|
|
|
|
return tri.getMatchingSuperReg(hreg, sub, rc);
|
2009-12-14 14:49:42 +08:00
|
|
|
}
|
2010-08-10 08:02:26 +08:00
|
|
|
|
2012-06-05 09:06:12 +08:00
|
|
|
// Check if all values in LI are rematerializable
|
|
|
|
static bool isRematerializable(const LiveInterval &LI,
|
|
|
|
const LiveIntervals &LIS,
|
2015-08-10 19:59:44 +08:00
|
|
|
VirtRegMap *VRM,
|
2012-06-05 09:06:12 +08:00
|
|
|
const TargetInstrInfo &TII) {
|
2015-08-10 19:59:44 +08:00
|
|
|
unsigned Reg = LI.reg;
|
|
|
|
unsigned Original = VRM ? VRM->getOriginal(Reg) : 0;
|
2012-06-05 09:06:12 +08:00
|
|
|
for (LiveInterval::const_vni_iterator I = LI.vni_begin(), E = LI.vni_end();
|
|
|
|
I != E; ++I) {
|
|
|
|
const VNInfo *VNI = *I;
|
|
|
|
if (VNI->isUnused())
|
|
|
|
continue;
|
|
|
|
if (VNI->isPHIDef())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
|
|
|
|
assert(MI && "Dead valno in interval");
|
|
|
|
|
2015-08-10 19:59:44 +08:00
|
|
|
// Trace copies introduced by live range splitting. The inline
|
|
|
|
// spiller can rematerialize through these copies, so the spill
|
|
|
|
// weight must reflect this.
|
|
|
|
if (VRM) {
|
|
|
|
while (MI->isFullCopy()) {
|
|
|
|
// The copy destination must match the interval register.
|
|
|
|
if (MI->getOperand(0).getReg() != Reg)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Get the source register.
|
|
|
|
Reg = MI->getOperand(1).getReg();
|
|
|
|
|
|
|
|
// If the original (pre-splitting) registers match this
|
|
|
|
// copy came from a split.
|
|
|
|
if (!TargetRegisterInfo::isVirtualRegister(Reg) ||
|
|
|
|
VRM->getOriginal(Reg) != Original)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Follow the copy live-in value.
|
|
|
|
const LiveInterval &SrcLI = LIS.getInterval(Reg);
|
|
|
|
LiveQueryResult SrcQ = SrcLI.Query(VNI->def);
|
|
|
|
VNI = SrcQ.valueIn();
|
|
|
|
assert(VNI && "Copy from non-existing value");
|
|
|
|
if (VNI->isPHIDef())
|
|
|
|
return false;
|
|
|
|
MI = LIS.getInstructionFromIndex(VNI->def);
|
|
|
|
assert(MI && "Dead valno in interval");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
if (!TII.isTriviallyReMaterializable(*MI, LIS.getAliasAnalysis()))
|
2012-06-05 09:06:12 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-06-18 03:00:36 +08:00
|
|
|
void
|
2013-11-12 03:04:45 +08:00
|
|
|
VirtRegAuxInfo::calculateSpillWeightAndHint(LiveInterval &li) {
|
2011-04-27 02:52:36 +08:00
|
|
|
MachineRegisterInfo &mri = MF.getRegInfo();
|
2014-08-05 10:39:49 +08:00
|
|
|
const TargetRegisterInfo &tri = *MF.getSubtarget().getRegisterInfo();
|
2014-04-14 08:51:57 +08:00
|
|
|
MachineBasicBlock *mbb = nullptr;
|
|
|
|
MachineLoop *loop = nullptr;
|
2010-08-10 08:02:26 +08:00
|
|
|
bool isExiting = false;
|
|
|
|
float totalWeight = 0;
|
[PBQP] Tweak spill costs and coalescing benefits
This patch improves how the different costs (register, interference, spill
and coalescing) relates together. The assumption is now that:
- coalescing (or any other "side effect" of reg alloc) is negative, and
instead of being derived from a spill cost, they use the block
frequency info.
- spill costs are in the [MinSpillCost:+inf( range
- register or interference costs are in [0.0:MinSpillCost( or +inf
The current MinSpillCost is set to 10.0, which is a random value high
enough that the current constraint builders do not need to worry about
when settings costs. It would however be worth adding a normalization
step for register and interference costs as the last step in the
constraint builder chain to ensure they are not greater than SpillMinCost
(unless this has some sense for some architectures). This would work well
with the current builder pipeline, where all costs are tweaked relatively
to each others, but could grow above MinSpillCost if the pipeline is
deep enough.
The current heuristic is tuned to depend rather on the number of uses of
a live interval rather than a density of uses, as used by the greedy
allocator. This heuristic provides a few percent improvement on a number
of benchmarks (eembc, spec, ...) and will definitely need to change once
spill placement is implemented: the current spill placement is really
ineficient, so making the cost proportionnal to the number of use is a
clear win.
llvm-svn: 221292
2014-11-05 04:51:24 +08:00
|
|
|
unsigned numInstr = 0; // Number of instructions using li
|
2010-08-10 08:02:26 +08:00
|
|
|
SmallPtrSet<MachineInstr*, 8> visited;
|
|
|
|
|
2013-04-06 12:24:12 +08:00
|
|
|
// Find the best physreg hint and the best virtreg hint.
|
2010-08-10 08:02:26 +08:00
|
|
|
float bestPhys = 0, bestVirt = 0;
|
|
|
|
unsigned hintPhys = 0, hintVirt = 0;
|
|
|
|
|
|
|
|
// Don't recompute a target specific hint.
|
|
|
|
bool noHint = mri.getRegAllocationHint(li.reg).first != 0;
|
|
|
|
|
2011-03-30 05:20:19 +08:00
|
|
|
// Don't recompute spill weight for an unspillable register.
|
|
|
|
bool Spillable = li.isSpillable();
|
|
|
|
|
2014-03-13 14:02:25 +08:00
|
|
|
for (MachineRegisterInfo::reg_instr_iterator
|
|
|
|
I = mri.reg_instr_begin(li.reg), E = mri.reg_instr_end();
|
|
|
|
I != E; ) {
|
|
|
|
MachineInstr *mi = &*(I++);
|
[PBQP] Tweak spill costs and coalescing benefits
This patch improves how the different costs (register, interference, spill
and coalescing) relates together. The assumption is now that:
- coalescing (or any other "side effect" of reg alloc) is negative, and
instead of being derived from a spill cost, they use the block
frequency info.
- spill costs are in the [MinSpillCost:+inf( range
- register or interference costs are in [0.0:MinSpillCost( or +inf
The current MinSpillCost is set to 10.0, which is a random value high
enough that the current constraint builders do not need to worry about
when settings costs. It would however be worth adding a normalization
step for register and interference costs as the last step in the
constraint builder chain to ensure they are not greater than SpillMinCost
(unless this has some sense for some architectures). This would work well
with the current builder pipeline, where all costs are tweaked relatively
to each others, but could grow above MinSpillCost if the pipeline is
deep enough.
The current heuristic is tuned to depend rather on the number of uses of
a live interval rather than a density of uses, as used by the greedy
allocator. This heuristic provides a few percent improvement on a number
of benchmarks (eembc, spec, ...) and will definitely need to change once
spill placement is implemented: the current spill placement is really
ineficient, so making the cost proportionnal to the number of use is a
clear win.
llvm-svn: 221292
2014-11-05 04:51:24 +08:00
|
|
|
numInstr++;
|
2010-08-10 08:02:26 +08:00
|
|
|
if (mi->isIdentityCopy() || mi->isImplicitDef() || mi->isDebugValue())
|
|
|
|
continue;
|
2014-11-19 15:49:26 +08:00
|
|
|
if (!visited.insert(mi).second)
|
2010-08-10 08:02:26 +08:00
|
|
|
continue;
|
|
|
|
|
2011-03-30 05:20:19 +08:00
|
|
|
float weight = 1.0f;
|
|
|
|
if (Spillable) {
|
|
|
|
// Get loop info for mi.
|
|
|
|
if (mi->getParent() != mbb) {
|
|
|
|
mbb = mi->getParent();
|
2011-04-27 02:52:36 +08:00
|
|
|
loop = Loops.getLoopFor(mbb);
|
2011-03-30 05:20:19 +08:00
|
|
|
isExiting = loop ? loop->isLoopExiting(mbb) : false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Calculate instr weight.
|
|
|
|
bool reads, writes;
|
2014-03-02 21:30:33 +08:00
|
|
|
std::tie(reads, writes) = mi->readsWritesVirtualRegister(li.reg);
|
2016-02-28 04:14:29 +08:00
|
|
|
weight = LiveIntervals::getSpillWeight(writes, reads, &MBFI, *mi);
|
2011-03-30 05:20:19 +08:00
|
|
|
|
|
|
|
// Give extra weight to what looks like a loop induction variable update.
|
2011-04-27 02:52:36 +08:00
|
|
|
if (writes && isExiting && LIS.isLiveOutOfMBB(li, mbb))
|
2011-03-30 05:20:19 +08:00
|
|
|
weight *= 3;
|
|
|
|
|
|
|
|
totalWeight += weight;
|
2010-08-10 08:02:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Get allocation hints from copies.
|
|
|
|
if (noHint || !mi->isCopy())
|
|
|
|
continue;
|
|
|
|
unsigned hint = copyHint(mi, li.reg, tri, mri);
|
|
|
|
if (!hint)
|
|
|
|
continue;
|
2014-04-22 01:57:01 +08:00
|
|
|
// Force hweight onto the stack so that x86 doesn't add hidden precision,
|
|
|
|
// making the comparison incorrectly pass (i.e., 1 > 1 == true??).
|
|
|
|
//
|
|
|
|
// FIXME: we probably shouldn't use floats at all.
|
|
|
|
volatile float hweight = Hint[hint] += weight;
|
2010-08-10 08:02:26 +08:00
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(hint)) {
|
2016-02-19 06:09:30 +08:00
|
|
|
if (hweight > bestPhys && mri.isAllocatable(hint)) {
|
|
|
|
bestPhys = hweight;
|
|
|
|
hintPhys = hint;
|
|
|
|
}
|
2010-08-10 08:02:26 +08:00
|
|
|
} else {
|
2016-02-19 06:09:30 +08:00
|
|
|
if (hweight > bestVirt) {
|
|
|
|
bestVirt = hweight;
|
|
|
|
hintVirt = hint;
|
|
|
|
}
|
2010-08-10 08:02:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-27 02:52:36 +08:00
|
|
|
Hint.clear();
|
2010-08-10 08:02:26 +08:00
|
|
|
|
|
|
|
// Always prefer the physreg hint.
|
|
|
|
if (unsigned hint = hintPhys ? hintPhys : hintVirt) {
|
|
|
|
mri.setRegAllocationHint(li.reg, 0, hint);
|
2011-03-30 05:20:19 +08:00
|
|
|
// Weakly boost the spill weight of hinted registers.
|
2010-08-10 08:02:26 +08:00
|
|
|
totalWeight *= 1.01F;
|
|
|
|
}
|
|
|
|
|
2011-03-30 05:20:19 +08:00
|
|
|
// If the live interval was already unspillable, leave it that way.
|
|
|
|
if (!Spillable)
|
|
|
|
return;
|
|
|
|
|
2016-02-09 06:52:51 +08:00
|
|
|
// Mark li as unspillable if all live ranges are tiny and the interval
|
|
|
|
// is not live at any reg mask. If the interval is live at a reg mask
|
|
|
|
// spilling may be required.
|
|
|
|
if (li.isZeroLength(LIS.getSlotIndexes()) &&
|
|
|
|
!li.isLiveAtIndexes(LIS.getRegMaskSlots())) {
|
2010-08-10 08:02:26 +08:00
|
|
|
li.markNotSpillable();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If all of the definitions of the interval are re-materializable,
|
2012-06-05 09:06:12 +08:00
|
|
|
// it is a preferred candidate for spilling.
|
2010-08-10 08:02:26 +08:00
|
|
|
// FIXME: this gets much more complicated once we support non-trivial
|
|
|
|
// re-materialization.
|
2015-08-10 19:59:44 +08:00
|
|
|
if (isRematerializable(li, LIS, VRM, *MF.getSubtarget().getInstrInfo()))
|
2012-06-05 09:06:12 +08:00
|
|
|
totalWeight *= 0.5F;
|
2010-08-10 08:02:26 +08:00
|
|
|
|
[PBQP] Tweak spill costs and coalescing benefits
This patch improves how the different costs (register, interference, spill
and coalescing) relates together. The assumption is now that:
- coalescing (or any other "side effect" of reg alloc) is negative, and
instead of being derived from a spill cost, they use the block
frequency info.
- spill costs are in the [MinSpillCost:+inf( range
- register or interference costs are in [0.0:MinSpillCost( or +inf
The current MinSpillCost is set to 10.0, which is a random value high
enough that the current constraint builders do not need to worry about
when settings costs. It would however be worth adding a normalization
step for register and interference costs as the last step in the
constraint builder chain to ensure they are not greater than SpillMinCost
(unless this has some sense for some architectures). This would work well
with the current builder pipeline, where all costs are tweaked relatively
to each others, but could grow above MinSpillCost if the pipeline is
deep enough.
The current heuristic is tuned to depend rather on the number of uses of
a live interval rather than a density of uses, as used by the greedy
allocator. This heuristic provides a few percent improvement on a number
of benchmarks (eembc, spec, ...) and will definitely need to change once
spill placement is implemented: the current spill placement is really
ineficient, so making the cost proportionnal to the number of use is a
clear win.
llvm-svn: 221292
2014-11-05 04:51:24 +08:00
|
|
|
li.weight = normalize(totalWeight, li.getSize(), numInstr);
|
2010-08-10 08:02:26 +08:00
|
|
|
}
|