2017-09-30 05:55:49 +08:00
|
|
|
//===- CalcSpillWeights.cpp -----------------------------------------------===//
|
2009-12-14 14:49:42 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2009-12-14 14:49:42 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/CodeGen/CalcSpillWeights.h"
|
2017-09-30 05:55:49 +08:00
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
|
|
|
#include "llvm/CodeGen/LiveInterval.h"
|
2017-12-13 10:51:04 +08:00
|
|
|
#include "llvm/CodeGen/LiveIntervals.h"
|
2009-12-14 14:49:42 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
2017-09-30 05:55:49 +08:00
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
2009-12-14 14:49:42 +08:00
|
|
|
#include "llvm/CodeGen/MachineLoopInfo.h"
|
2017-09-30 05:55:49 +08:00
|
|
|
#include "llvm/CodeGen/MachineOperand.h"
|
2009-12-14 14:49:42 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2017-11-08 09:01:31 +08:00
|
|
|
#include "llvm/CodeGen/TargetInstrInfo.h"
|
2017-11-17 09:07:10 +08:00
|
|
|
#include "llvm/CodeGen/TargetRegisterInfo.h"
|
|
|
|
#include "llvm/CodeGen/TargetSubtargetInfo.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/CodeGen/VirtRegMap.h"
|
2009-12-14 14:49:42 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2017-09-30 05:55:49 +08:00
|
|
|
#include <cassert>
|
|
|
|
#include <tuple>
|
|
|
|
|
2009-12-14 14:49:42 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2014-04-22 10:02:50 +08:00
|
|
|
#define DEBUG_TYPE "calcspillweights"
|
|
|
|
|
2013-11-12 03:04:45 +08:00
|
|
|
void llvm::calculateSpillWeightsAndHints(LiveIntervals &LIS,
|
2013-11-11 01:46:31 +08:00
|
|
|
MachineFunction &MF,
|
2015-08-10 19:59:44 +08:00
|
|
|
VirtRegMap *VRM,
|
2013-11-11 01:46:31 +08:00
|
|
|
const MachineLoopInfo &MLI,
|
2013-11-12 03:56:14 +08:00
|
|
|
const MachineBlockFrequencyInfo &MBFI,
|
|
|
|
VirtRegAuxInfo::NormalizingFn norm) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "********** Compute Spill Weights **********\n"
|
|
|
|
<< "********** Function: " << MF.getName() << '\n');
|
2009-12-14 14:49:42 +08:00
|
|
|
|
2012-06-21 05:25:05 +08:00
|
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
2015-08-10 19:59:44 +08:00
|
|
|
VirtRegAuxInfo VRAI(MF, LIS, VRM, MLI, MBFI, norm);
|
2012-06-21 05:25:05 +08:00
|
|
|
for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
|
2019-08-02 07:27:28 +08:00
|
|
|
unsigned Reg = Register::index2VirtReg(i);
|
2012-06-21 05:25:05 +08:00
|
|
|
if (MRI.reg_nodbg_empty(Reg))
|
|
|
|
continue;
|
2013-11-12 03:04:45 +08:00
|
|
|
VRAI.calculateSpillWeightAndHint(LIS.getInterval(Reg));
|
2009-12-14 14:49:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-10 08:02:26 +08:00
|
|
|
// Return the preferred allocation register for reg, given a COPY instruction.
|
2019-08-03 04:23:00 +08:00
|
|
|
static Register copyHint(const MachineInstr *mi, unsigned reg,
|
2010-08-10 08:02:26 +08:00
|
|
|
const TargetRegisterInfo &tri,
|
|
|
|
const MachineRegisterInfo &mri) {
|
2019-08-03 04:23:00 +08:00
|
|
|
unsigned sub, hsub;
|
|
|
|
Register hreg;
|
2010-08-10 08:02:26 +08:00
|
|
|
if (mi->getOperand(0).getReg() == reg) {
|
|
|
|
sub = mi->getOperand(0).getSubReg();
|
|
|
|
hreg = mi->getOperand(1).getReg();
|
|
|
|
hsub = mi->getOperand(1).getSubReg();
|
|
|
|
} else {
|
|
|
|
sub = mi->getOperand(1).getSubReg();
|
|
|
|
hreg = mi->getOperand(0).getReg();
|
|
|
|
hsub = mi->getOperand(0).getSubReg();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!hreg)
|
|
|
|
return 0;
|
|
|
|
|
2019-08-02 07:27:28 +08:00
|
|
|
if (Register::isVirtualRegister(hreg))
|
2019-08-03 04:23:00 +08:00
|
|
|
return sub == hsub ? hreg : Register();
|
2010-08-10 08:02:26 +08:00
|
|
|
|
|
|
|
const TargetRegisterClass *rc = mri.getRegClass(reg);
|
2019-08-03 04:23:00 +08:00
|
|
|
Register CopiedPReg = (hsub ? tri.getSubReg(hreg, hsub) : hreg);
|
2017-12-05 18:52:24 +08:00
|
|
|
if (rc->contains(CopiedPReg))
|
|
|
|
return CopiedPReg;
|
|
|
|
|
|
|
|
// Check if reg:sub matches so that a super register could be hinted.
|
|
|
|
if (sub)
|
|
|
|
return tri.getMatchingSuperReg(CopiedPReg, sub, rc);
|
2010-08-10 08:02:26 +08:00
|
|
|
|
2017-12-05 18:52:24 +08:00
|
|
|
return 0;
|
2009-12-14 14:49:42 +08:00
|
|
|
}
|
2010-08-10 08:02:26 +08:00
|
|
|
|
2012-06-05 09:06:12 +08:00
|
|
|
// Check if all values in LI are rematerializable
|
|
|
|
static bool isRematerializable(const LiveInterval &LI,
|
|
|
|
const LiveIntervals &LIS,
|
2015-08-10 19:59:44 +08:00
|
|
|
VirtRegMap *VRM,
|
2012-06-05 09:06:12 +08:00
|
|
|
const TargetInstrInfo &TII) {
|
2015-08-10 19:59:44 +08:00
|
|
|
unsigned Reg = LI.reg;
|
|
|
|
unsigned Original = VRM ? VRM->getOriginal(Reg) : 0;
|
2012-06-05 09:06:12 +08:00
|
|
|
for (LiveInterval::const_vni_iterator I = LI.vni_begin(), E = LI.vni_end();
|
|
|
|
I != E; ++I) {
|
|
|
|
const VNInfo *VNI = *I;
|
|
|
|
if (VNI->isUnused())
|
|
|
|
continue;
|
|
|
|
if (VNI->isPHIDef())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
|
|
|
|
assert(MI && "Dead valno in interval");
|
|
|
|
|
2015-08-10 19:59:44 +08:00
|
|
|
// Trace copies introduced by live range splitting. The inline
|
|
|
|
// spiller can rematerialize through these copies, so the spill
|
|
|
|
// weight must reflect this.
|
|
|
|
if (VRM) {
|
|
|
|
while (MI->isFullCopy()) {
|
|
|
|
// The copy destination must match the interval register.
|
|
|
|
if (MI->getOperand(0).getReg() != Reg)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Get the source register.
|
|
|
|
Reg = MI->getOperand(1).getReg();
|
|
|
|
|
|
|
|
// If the original (pre-splitting) registers match this
|
|
|
|
// copy came from a split.
|
2019-08-02 07:27:28 +08:00
|
|
|
if (!Register::isVirtualRegister(Reg) ||
|
2015-08-10 19:59:44 +08:00
|
|
|
VRM->getOriginal(Reg) != Original)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Follow the copy live-in value.
|
|
|
|
const LiveInterval &SrcLI = LIS.getInterval(Reg);
|
|
|
|
LiveQueryResult SrcQ = SrcLI.Query(VNI->def);
|
|
|
|
VNI = SrcQ.valueIn();
|
|
|
|
assert(VNI && "Copy from non-existing value");
|
|
|
|
if (VNI->isPHIDef())
|
|
|
|
return false;
|
|
|
|
MI = LIS.getInstructionFromIndex(VNI->def);
|
|
|
|
assert(MI && "Dead valno in interval");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
if (!TII.isTriviallyReMaterializable(*MI, LIS.getAliasAnalysis()))
|
2012-06-05 09:06:12 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
Add logic to greedy reg alloc to avoid bad eviction chains
This fixes bugzilla 26810
https://bugs.llvm.org/show_bug.cgi?id=26810
This is intended to prevent sequences like:
movl %ebp, 8(%esp) # 4-byte Spill
movl %ecx, %ebp
movl %ebx, %ecx
movl %edi, %ebx
movl %edx, %edi
cltd
idivl %esi
movl %edi, %edx
movl %ebx, %edi
movl %ecx, %ebx
movl %ebp, %ecx
movl 16(%esp), %ebp # 4 - byte Reload
Such sequences are created in 2 scenarios:
Scenario #1:
vreg0 is evicted from physreg0 by vreg1
Evictee vreg0 is intended for region splitting with split candidate physreg0 (the reg vreg0 was evicted from)
Region splitting creates a local interval because of interference with the evictor vreg1 (normally region spliiting creates 2 interval, the "by reg" and "by stack" intervals. Local interval created when interference occurs.)
one of the split intervals ends up evicting vreg2 from physreg1
Evictee vreg2 is intended for region splitting with split candidate physreg1
one of the split intervals ends up evicting vreg3 from physreg2 etc.. until someone spills
Scenario #2
vreg0 is evicted from physreg0 by vreg1
vreg2 is evicted from physreg2 by vreg3 etc
Evictee vreg0 is intended for region splitting with split candidate physreg1
Region splitting creates a local interval because of interference with the evictor vreg1
one of the split intervals ends up evicting back original evictor vreg1 from physreg0 (the reg vreg0 was evicted from)
Another evictee vreg2 is intended for region splitting with split candidate physreg1
one of the split intervals ends up evicting vreg3 from physreg2 etc.. until someone spills
As compile time was a concern, I've added a flag to control weather we do cost calculations for local intervals we expect to be created (it's on by default for X86 target, off for the rest).
Differential Revision: https://reviews.llvm.org/D35816
Change-Id: Id9411ff7bbb845463d289ba2ae97737a1ee7cc39
llvm-svn: 316295
2017-10-23 01:59:38 +08:00
|
|
|
void VirtRegAuxInfo::calculateSpillWeightAndHint(LiveInterval &li) {
|
|
|
|
float weight = weightCalcHelper(li);
|
|
|
|
// Check if unspillable.
|
|
|
|
if (weight < 0)
|
|
|
|
return;
|
|
|
|
li.weight = weight;
|
|
|
|
}
|
|
|
|
|
|
|
|
float VirtRegAuxInfo::futureWeight(LiveInterval &li, SlotIndex start,
|
|
|
|
SlotIndex end) {
|
|
|
|
return weightCalcHelper(li, &start, &end);
|
|
|
|
}
|
|
|
|
|
|
|
|
float VirtRegAuxInfo::weightCalcHelper(LiveInterval &li, SlotIndex *start,
|
|
|
|
SlotIndex *end) {
|
2011-04-27 02:52:36 +08:00
|
|
|
MachineRegisterInfo &mri = MF.getRegInfo();
|
2014-08-05 10:39:49 +08:00
|
|
|
const TargetRegisterInfo &tri = *MF.getSubtarget().getRegisterInfo();
|
2014-04-14 08:51:57 +08:00
|
|
|
MachineBasicBlock *mbb = nullptr;
|
|
|
|
MachineLoop *loop = nullptr;
|
2010-08-10 08:02:26 +08:00
|
|
|
bool isExiting = false;
|
|
|
|
float totalWeight = 0;
|
[PBQP] Tweak spill costs and coalescing benefits
This patch improves how the different costs (register, interference, spill
and coalescing) relates together. The assumption is now that:
- coalescing (or any other "side effect" of reg alloc) is negative, and
instead of being derived from a spill cost, they use the block
frequency info.
- spill costs are in the [MinSpillCost:+inf( range
- register or interference costs are in [0.0:MinSpillCost( or +inf
The current MinSpillCost is set to 10.0, which is a random value high
enough that the current constraint builders do not need to worry about
when settings costs. It would however be worth adding a normalization
step for register and interference costs as the last step in the
constraint builder chain to ensure they are not greater than SpillMinCost
(unless this has some sense for some architectures). This would work well
with the current builder pipeline, where all costs are tweaked relatively
to each others, but could grow above MinSpillCost if the pipeline is
deep enough.
The current heuristic is tuned to depend rather on the number of uses of
a live interval rather than a density of uses, as used by the greedy
allocator. This heuristic provides a few percent improvement on a number
of benchmarks (eembc, spec, ...) and will definitely need to change once
spill placement is implemented: the current spill placement is really
ineficient, so making the cost proportionnal to the number of use is a
clear win.
llvm-svn: 221292
2014-11-05 04:51:24 +08:00
|
|
|
unsigned numInstr = 0; // Number of instructions using li
|
2010-08-10 08:02:26 +08:00
|
|
|
SmallPtrSet<MachineInstr*, 8> visited;
|
|
|
|
|
2017-12-05 18:52:24 +08:00
|
|
|
std::pair<unsigned, unsigned> TargetHint = mri.getRegAllocationHint(li.reg);
|
2010-08-10 08:02:26 +08:00
|
|
|
|
2011-03-30 05:20:19 +08:00
|
|
|
// Don't recompute spill weight for an unspillable register.
|
|
|
|
bool Spillable = li.isSpillable();
|
|
|
|
|
Add logic to greedy reg alloc to avoid bad eviction chains
This fixes bugzilla 26810
https://bugs.llvm.org/show_bug.cgi?id=26810
This is intended to prevent sequences like:
movl %ebp, 8(%esp) # 4-byte Spill
movl %ecx, %ebp
movl %ebx, %ecx
movl %edi, %ebx
movl %edx, %edi
cltd
idivl %esi
movl %edi, %edx
movl %ebx, %edi
movl %ecx, %ebx
movl %ebp, %ecx
movl 16(%esp), %ebp # 4 - byte Reload
Such sequences are created in 2 scenarios:
Scenario #1:
vreg0 is evicted from physreg0 by vreg1
Evictee vreg0 is intended for region splitting with split candidate physreg0 (the reg vreg0 was evicted from)
Region splitting creates a local interval because of interference with the evictor vreg1 (normally region spliiting creates 2 interval, the "by reg" and "by stack" intervals. Local interval created when interference occurs.)
one of the split intervals ends up evicting vreg2 from physreg1
Evictee vreg2 is intended for region splitting with split candidate physreg1
one of the split intervals ends up evicting vreg3 from physreg2 etc.. until someone spills
Scenario #2
vreg0 is evicted from physreg0 by vreg1
vreg2 is evicted from physreg2 by vreg3 etc
Evictee vreg0 is intended for region splitting with split candidate physreg1
Region splitting creates a local interval because of interference with the evictor vreg1
one of the split intervals ends up evicting back original evictor vreg1 from physreg0 (the reg vreg0 was evicted from)
Another evictee vreg2 is intended for region splitting with split candidate physreg1
one of the split intervals ends up evicting vreg3 from physreg2 etc.. until someone spills
As compile time was a concern, I've added a flag to control weather we do cost calculations for local intervals we expect to be created (it's on by default for X86 target, off for the rest).
Differential Revision: https://reviews.llvm.org/D35816
Change-Id: Id9411ff7bbb845463d289ba2ae97737a1ee7cc39
llvm-svn: 316295
2017-10-23 01:59:38 +08:00
|
|
|
bool localSplitArtifact = start && end;
|
|
|
|
|
|
|
|
// Do not update future local split artifacts.
|
|
|
|
bool updateLI = !localSplitArtifact;
|
|
|
|
|
|
|
|
if (localSplitArtifact) {
|
|
|
|
MachineBasicBlock *localMBB = LIS.getMBBFromIndex(*end);
|
|
|
|
assert(localMBB == LIS.getMBBFromIndex(*start) &&
|
|
|
|
"start and end are expected to be in the same basic block");
|
|
|
|
|
|
|
|
// Local split artifact will have 2 additional copy instructions and they
|
|
|
|
// will be in the same BB.
|
|
|
|
// localLI = COPY other
|
|
|
|
// ...
|
|
|
|
// other = COPY localLI
|
|
|
|
totalWeight += LiveIntervals::getSpillWeight(true, false, &MBFI, localMBB);
|
|
|
|
totalWeight += LiveIntervals::getSpillWeight(false, true, &MBFI, localMBB);
|
|
|
|
|
|
|
|
numInstr += 2;
|
|
|
|
}
|
|
|
|
|
2017-12-05 18:52:24 +08:00
|
|
|
// CopyHint is a sortable hint derived from a COPY instruction.
|
|
|
|
struct CopyHint {
|
|
|
|
unsigned Reg;
|
|
|
|
float Weight;
|
|
|
|
bool IsPhys;
|
2018-10-05 22:23:11 +08:00
|
|
|
CopyHint(unsigned R, float W, bool P) :
|
|
|
|
Reg(R), Weight(W), IsPhys(P) {}
|
2017-12-05 18:52:24 +08:00
|
|
|
bool operator<(const CopyHint &rhs) const {
|
|
|
|
// Always prefer any physreg hint.
|
|
|
|
if (IsPhys != rhs.IsPhys)
|
|
|
|
return (IsPhys && !rhs.IsPhys);
|
|
|
|
if (Weight != rhs.Weight)
|
|
|
|
return (Weight > rhs.Weight);
|
2018-10-05 22:23:11 +08:00
|
|
|
return Reg < rhs.Reg; // Tie-breaker.
|
2017-12-05 18:52:24 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
std::set<CopyHint> CopyHints;
|
|
|
|
|
2020-03-17 23:16:39 +08:00
|
|
|
for (MachineRegisterInfo::reg_instr_nodbg_iterator
|
|
|
|
I = mri.reg_instr_nodbg_begin(li.reg),
|
|
|
|
E = mri.reg_instr_nodbg_end();
|
|
|
|
I != E;) {
|
2014-03-13 14:02:25 +08:00
|
|
|
MachineInstr *mi = &*(I++);
|
Add logic to greedy reg alloc to avoid bad eviction chains
This fixes bugzilla 26810
https://bugs.llvm.org/show_bug.cgi?id=26810
This is intended to prevent sequences like:
movl %ebp, 8(%esp) # 4-byte Spill
movl %ecx, %ebp
movl %ebx, %ecx
movl %edi, %ebx
movl %edx, %edi
cltd
idivl %esi
movl %edi, %edx
movl %ebx, %edi
movl %ecx, %ebx
movl %ebp, %ecx
movl 16(%esp), %ebp # 4 - byte Reload
Such sequences are created in 2 scenarios:
Scenario #1:
vreg0 is evicted from physreg0 by vreg1
Evictee vreg0 is intended for region splitting with split candidate physreg0 (the reg vreg0 was evicted from)
Region splitting creates a local interval because of interference with the evictor vreg1 (normally region spliiting creates 2 interval, the "by reg" and "by stack" intervals. Local interval created when interference occurs.)
one of the split intervals ends up evicting vreg2 from physreg1
Evictee vreg2 is intended for region splitting with split candidate physreg1
one of the split intervals ends up evicting vreg3 from physreg2 etc.. until someone spills
Scenario #2
vreg0 is evicted from physreg0 by vreg1
vreg2 is evicted from physreg2 by vreg3 etc
Evictee vreg0 is intended for region splitting with split candidate physreg1
Region splitting creates a local interval because of interference with the evictor vreg1
one of the split intervals ends up evicting back original evictor vreg1 from physreg0 (the reg vreg0 was evicted from)
Another evictee vreg2 is intended for region splitting with split candidate physreg1
one of the split intervals ends up evicting vreg3 from physreg2 etc.. until someone spills
As compile time was a concern, I've added a flag to control weather we do cost calculations for local intervals we expect to be created (it's on by default for X86 target, off for the rest).
Differential Revision: https://reviews.llvm.org/D35816
Change-Id: Id9411ff7bbb845463d289ba2ae97737a1ee7cc39
llvm-svn: 316295
2017-10-23 01:59:38 +08:00
|
|
|
|
|
|
|
// For local split artifacts, we are interested only in instructions between
|
|
|
|
// the expected start and end of the range.
|
|
|
|
SlotIndex si = LIS.getInstructionIndex(*mi);
|
|
|
|
if (localSplitArtifact && ((si < *start) || (si > *end)))
|
|
|
|
continue;
|
|
|
|
|
[PBQP] Tweak spill costs and coalescing benefits
This patch improves how the different costs (register, interference, spill
and coalescing) relates together. The assumption is now that:
- coalescing (or any other "side effect" of reg alloc) is negative, and
instead of being derived from a spill cost, they use the block
frequency info.
- spill costs are in the [MinSpillCost:+inf( range
- register or interference costs are in [0.0:MinSpillCost( or +inf
The current MinSpillCost is set to 10.0, which is a random value high
enough that the current constraint builders do not need to worry about
when settings costs. It would however be worth adding a normalization
step for register and interference costs as the last step in the
constraint builder chain to ensure they are not greater than SpillMinCost
(unless this has some sense for some architectures). This would work well
with the current builder pipeline, where all costs are tweaked relatively
to each others, but could grow above MinSpillCost if the pipeline is
deep enough.
The current heuristic is tuned to depend rather on the number of uses of
a live interval rather than a density of uses, as used by the greedy
allocator. This heuristic provides a few percent improvement on a number
of benchmarks (eembc, spec, ...) and will definitely need to change once
spill placement is implemented: the current spill placement is really
ineficient, so making the cost proportionnal to the number of use is a
clear win.
llvm-svn: 221292
2014-11-05 04:51:24 +08:00
|
|
|
numInstr++;
|
2020-03-17 23:16:39 +08:00
|
|
|
if (mi->isIdentityCopy() || mi->isImplicitDef())
|
2010-08-10 08:02:26 +08:00
|
|
|
continue;
|
2014-11-19 15:49:26 +08:00
|
|
|
if (!visited.insert(mi).second)
|
2010-08-10 08:02:26 +08:00
|
|
|
continue;
|
|
|
|
|
2011-03-30 05:20:19 +08:00
|
|
|
float weight = 1.0f;
|
|
|
|
if (Spillable) {
|
|
|
|
// Get loop info for mi.
|
|
|
|
if (mi->getParent() != mbb) {
|
|
|
|
mbb = mi->getParent();
|
2011-04-27 02:52:36 +08:00
|
|
|
loop = Loops.getLoopFor(mbb);
|
2011-03-30 05:20:19 +08:00
|
|
|
isExiting = loop ? loop->isLoopExiting(mbb) : false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Calculate instr weight.
|
|
|
|
bool reads, writes;
|
2014-03-02 21:30:33 +08:00
|
|
|
std::tie(reads, writes) = mi->readsWritesVirtualRegister(li.reg);
|
2016-02-28 04:14:29 +08:00
|
|
|
weight = LiveIntervals::getSpillWeight(writes, reads, &MBFI, *mi);
|
2011-03-30 05:20:19 +08:00
|
|
|
|
|
|
|
// Give extra weight to what looks like a loop induction variable update.
|
2011-04-27 02:52:36 +08:00
|
|
|
if (writes && isExiting && LIS.isLiveOutOfMBB(li, mbb))
|
2011-03-30 05:20:19 +08:00
|
|
|
weight *= 3;
|
|
|
|
|
|
|
|
totalWeight += weight;
|
2010-08-10 08:02:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Get allocation hints from copies.
|
2018-10-05 22:23:11 +08:00
|
|
|
if (!mi->isCopy())
|
2010-08-10 08:02:26 +08:00
|
|
|
continue;
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register hint = copyHint(mi, li.reg, tri, mri);
|
2010-08-10 08:02:26 +08:00
|
|
|
if (!hint)
|
|
|
|
continue;
|
2014-04-22 01:57:01 +08:00
|
|
|
// Force hweight onto the stack so that x86 doesn't add hidden precision,
|
|
|
|
// making the comparison incorrectly pass (i.e., 1 > 1 == true??).
|
|
|
|
//
|
|
|
|
// FIXME: we probably shouldn't use floats at all.
|
|
|
|
volatile float hweight = Hint[hint] += weight;
|
2019-08-02 07:27:28 +08:00
|
|
|
if (Register::isVirtualRegister(hint) || mri.isAllocatable(hint))
|
|
|
|
CopyHints.insert(
|
|
|
|
CopyHint(hint, hweight, Register::isPhysicalRegister(hint)));
|
2010-08-10 08:02:26 +08:00
|
|
|
}
|
|
|
|
|
2011-04-27 02:52:36 +08:00
|
|
|
Hint.clear();
|
2010-08-10 08:02:26 +08:00
|
|
|
|
2017-12-05 18:52:24 +08:00
|
|
|
// Pass all the sorted copy hints to mri.
|
|
|
|
if (updateLI && CopyHints.size()) {
|
|
|
|
// Remove a generic hint if previously added by target.
|
|
|
|
if (TargetHint.first == 0 && TargetHint.second)
|
|
|
|
mri.clearSimpleHint(li.reg);
|
|
|
|
|
2018-10-03 20:51:19 +08:00
|
|
|
std::set<unsigned> HintedRegs;
|
2017-12-05 18:52:24 +08:00
|
|
|
for (auto &Hint : CopyHints) {
|
2018-10-03 20:51:19 +08:00
|
|
|
if (!HintedRegs.insert(Hint.Reg).second ||
|
|
|
|
(TargetHint.first != 0 && Hint.Reg == TargetHint.second))
|
|
|
|
// Don't add the same reg twice or the target-type hint again.
|
2017-12-05 18:52:24 +08:00
|
|
|
continue;
|
|
|
|
mri.addRegAllocationHint(li.reg, Hint.Reg);
|
Add logic to greedy reg alloc to avoid bad eviction chains
This fixes bugzilla 26810
https://bugs.llvm.org/show_bug.cgi?id=26810
This is intended to prevent sequences like:
movl %ebp, 8(%esp) # 4-byte Spill
movl %ecx, %ebp
movl %ebx, %ecx
movl %edi, %ebx
movl %edx, %edi
cltd
idivl %esi
movl %edi, %edx
movl %ebx, %edi
movl %ecx, %ebx
movl %ebp, %ecx
movl 16(%esp), %ebp # 4 - byte Reload
Such sequences are created in 2 scenarios:
Scenario #1:
vreg0 is evicted from physreg0 by vreg1
Evictee vreg0 is intended for region splitting with split candidate physreg0 (the reg vreg0 was evicted from)
Region splitting creates a local interval because of interference with the evictor vreg1 (normally region spliiting creates 2 interval, the "by reg" and "by stack" intervals. Local interval created when interference occurs.)
one of the split intervals ends up evicting vreg2 from physreg1
Evictee vreg2 is intended for region splitting with split candidate physreg1
one of the split intervals ends up evicting vreg3 from physreg2 etc.. until someone spills
Scenario #2
vreg0 is evicted from physreg0 by vreg1
vreg2 is evicted from physreg2 by vreg3 etc
Evictee vreg0 is intended for region splitting with split candidate physreg1
Region splitting creates a local interval because of interference with the evictor vreg1
one of the split intervals ends up evicting back original evictor vreg1 from physreg0 (the reg vreg0 was evicted from)
Another evictee vreg2 is intended for region splitting with split candidate physreg1
one of the split intervals ends up evicting vreg3 from physreg2 etc.. until someone spills
As compile time was a concern, I've added a flag to control weather we do cost calculations for local intervals we expect to be created (it's on by default for X86 target, off for the rest).
Differential Revision: https://reviews.llvm.org/D35816
Change-Id: Id9411ff7bbb845463d289ba2ae97737a1ee7cc39
llvm-svn: 316295
2017-10-23 01:59:38 +08:00
|
|
|
}
|
2017-12-05 18:52:24 +08:00
|
|
|
|
|
|
|
// Weakly boost the spill weight of hinted registers.
|
|
|
|
totalWeight *= 1.01F;
|
2010-08-10 08:02:26 +08:00
|
|
|
}
|
|
|
|
|
2011-03-30 05:20:19 +08:00
|
|
|
// If the live interval was already unspillable, leave it that way.
|
|
|
|
if (!Spillable)
|
Add logic to greedy reg alloc to avoid bad eviction chains
This fixes bugzilla 26810
https://bugs.llvm.org/show_bug.cgi?id=26810
This is intended to prevent sequences like:
movl %ebp, 8(%esp) # 4-byte Spill
movl %ecx, %ebp
movl %ebx, %ecx
movl %edi, %ebx
movl %edx, %edi
cltd
idivl %esi
movl %edi, %edx
movl %ebx, %edi
movl %ecx, %ebx
movl %ebp, %ecx
movl 16(%esp), %ebp # 4 - byte Reload
Such sequences are created in 2 scenarios:
Scenario #1:
vreg0 is evicted from physreg0 by vreg1
Evictee vreg0 is intended for region splitting with split candidate physreg0 (the reg vreg0 was evicted from)
Region splitting creates a local interval because of interference with the evictor vreg1 (normally region spliiting creates 2 interval, the "by reg" and "by stack" intervals. Local interval created when interference occurs.)
one of the split intervals ends up evicting vreg2 from physreg1
Evictee vreg2 is intended for region splitting with split candidate physreg1
one of the split intervals ends up evicting vreg3 from physreg2 etc.. until someone spills
Scenario #2
vreg0 is evicted from physreg0 by vreg1
vreg2 is evicted from physreg2 by vreg3 etc
Evictee vreg0 is intended for region splitting with split candidate physreg1
Region splitting creates a local interval because of interference with the evictor vreg1
one of the split intervals ends up evicting back original evictor vreg1 from physreg0 (the reg vreg0 was evicted from)
Another evictee vreg2 is intended for region splitting with split candidate physreg1
one of the split intervals ends up evicting vreg3 from physreg2 etc.. until someone spills
As compile time was a concern, I've added a flag to control weather we do cost calculations for local intervals we expect to be created (it's on by default for X86 target, off for the rest).
Differential Revision: https://reviews.llvm.org/D35816
Change-Id: Id9411ff7bbb845463d289ba2ae97737a1ee7cc39
llvm-svn: 316295
2017-10-23 01:59:38 +08:00
|
|
|
return -1.0;
|
2011-03-30 05:20:19 +08:00
|
|
|
|
2016-02-09 06:52:51 +08:00
|
|
|
// Mark li as unspillable if all live ranges are tiny and the interval
|
|
|
|
// is not live at any reg mask. If the interval is live at a reg mask
|
|
|
|
// spilling may be required.
|
Add logic to greedy reg alloc to avoid bad eviction chains
This fixes bugzilla 26810
https://bugs.llvm.org/show_bug.cgi?id=26810
This is intended to prevent sequences like:
movl %ebp, 8(%esp) # 4-byte Spill
movl %ecx, %ebp
movl %ebx, %ecx
movl %edi, %ebx
movl %edx, %edi
cltd
idivl %esi
movl %edi, %edx
movl %ebx, %edi
movl %ecx, %ebx
movl %ebp, %ecx
movl 16(%esp), %ebp # 4 - byte Reload
Such sequences are created in 2 scenarios:
Scenario #1:
vreg0 is evicted from physreg0 by vreg1
Evictee vreg0 is intended for region splitting with split candidate physreg0 (the reg vreg0 was evicted from)
Region splitting creates a local interval because of interference with the evictor vreg1 (normally region spliiting creates 2 interval, the "by reg" and "by stack" intervals. Local interval created when interference occurs.)
one of the split intervals ends up evicting vreg2 from physreg1
Evictee vreg2 is intended for region splitting with split candidate physreg1
one of the split intervals ends up evicting vreg3 from physreg2 etc.. until someone spills
Scenario #2
vreg0 is evicted from physreg0 by vreg1
vreg2 is evicted from physreg2 by vreg3 etc
Evictee vreg0 is intended for region splitting with split candidate physreg1
Region splitting creates a local interval because of interference with the evictor vreg1
one of the split intervals ends up evicting back original evictor vreg1 from physreg0 (the reg vreg0 was evicted from)
Another evictee vreg2 is intended for region splitting with split candidate physreg1
one of the split intervals ends up evicting vreg3 from physreg2 etc.. until someone spills
As compile time was a concern, I've added a flag to control weather we do cost calculations for local intervals we expect to be created (it's on by default for X86 target, off for the rest).
Differential Revision: https://reviews.llvm.org/D35816
Change-Id: Id9411ff7bbb845463d289ba2ae97737a1ee7cc39
llvm-svn: 316295
2017-10-23 01:59:38 +08:00
|
|
|
if (updateLI && li.isZeroLength(LIS.getSlotIndexes()) &&
|
2016-02-09 06:52:51 +08:00
|
|
|
!li.isLiveAtIndexes(LIS.getRegMaskSlots())) {
|
2010-08-10 08:02:26 +08:00
|
|
|
li.markNotSpillable();
|
Add logic to greedy reg alloc to avoid bad eviction chains
This fixes bugzilla 26810
https://bugs.llvm.org/show_bug.cgi?id=26810
This is intended to prevent sequences like:
movl %ebp, 8(%esp) # 4-byte Spill
movl %ecx, %ebp
movl %ebx, %ecx
movl %edi, %ebx
movl %edx, %edi
cltd
idivl %esi
movl %edi, %edx
movl %ebx, %edi
movl %ecx, %ebx
movl %ebp, %ecx
movl 16(%esp), %ebp # 4 - byte Reload
Such sequences are created in 2 scenarios:
Scenario #1:
vreg0 is evicted from physreg0 by vreg1
Evictee vreg0 is intended for region splitting with split candidate physreg0 (the reg vreg0 was evicted from)
Region splitting creates a local interval because of interference with the evictor vreg1 (normally region spliiting creates 2 interval, the "by reg" and "by stack" intervals. Local interval created when interference occurs.)
one of the split intervals ends up evicting vreg2 from physreg1
Evictee vreg2 is intended for region splitting with split candidate physreg1
one of the split intervals ends up evicting vreg3 from physreg2 etc.. until someone spills
Scenario #2
vreg0 is evicted from physreg0 by vreg1
vreg2 is evicted from physreg2 by vreg3 etc
Evictee vreg0 is intended for region splitting with split candidate physreg1
Region splitting creates a local interval because of interference with the evictor vreg1
one of the split intervals ends up evicting back original evictor vreg1 from physreg0 (the reg vreg0 was evicted from)
Another evictee vreg2 is intended for region splitting with split candidate physreg1
one of the split intervals ends up evicting vreg3 from physreg2 etc.. until someone spills
As compile time was a concern, I've added a flag to control weather we do cost calculations for local intervals we expect to be created (it's on by default for X86 target, off for the rest).
Differential Revision: https://reviews.llvm.org/D35816
Change-Id: Id9411ff7bbb845463d289ba2ae97737a1ee7cc39
llvm-svn: 316295
2017-10-23 01:59:38 +08:00
|
|
|
return -1.0;
|
2010-08-10 08:02:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// If all of the definitions of the interval are re-materializable,
|
2012-06-05 09:06:12 +08:00
|
|
|
// it is a preferred candidate for spilling.
|
2010-08-10 08:02:26 +08:00
|
|
|
// FIXME: this gets much more complicated once we support non-trivial
|
|
|
|
// re-materialization.
|
2015-08-10 19:59:44 +08:00
|
|
|
if (isRematerializable(li, LIS, VRM, *MF.getSubtarget().getInstrInfo()))
|
2012-06-05 09:06:12 +08:00
|
|
|
totalWeight *= 0.5F;
|
2010-08-10 08:02:26 +08:00
|
|
|
|
Add logic to greedy reg alloc to avoid bad eviction chains
This fixes bugzilla 26810
https://bugs.llvm.org/show_bug.cgi?id=26810
This is intended to prevent sequences like:
movl %ebp, 8(%esp) # 4-byte Spill
movl %ecx, %ebp
movl %ebx, %ecx
movl %edi, %ebx
movl %edx, %edi
cltd
idivl %esi
movl %edi, %edx
movl %ebx, %edi
movl %ecx, %ebx
movl %ebp, %ecx
movl 16(%esp), %ebp # 4 - byte Reload
Such sequences are created in 2 scenarios:
Scenario #1:
vreg0 is evicted from physreg0 by vreg1
Evictee vreg0 is intended for region splitting with split candidate physreg0 (the reg vreg0 was evicted from)
Region splitting creates a local interval because of interference with the evictor vreg1 (normally region spliiting creates 2 interval, the "by reg" and "by stack" intervals. Local interval created when interference occurs.)
one of the split intervals ends up evicting vreg2 from physreg1
Evictee vreg2 is intended for region splitting with split candidate physreg1
one of the split intervals ends up evicting vreg3 from physreg2 etc.. until someone spills
Scenario #2
vreg0 is evicted from physreg0 by vreg1
vreg2 is evicted from physreg2 by vreg3 etc
Evictee vreg0 is intended for region splitting with split candidate physreg1
Region splitting creates a local interval because of interference with the evictor vreg1
one of the split intervals ends up evicting back original evictor vreg1 from physreg0 (the reg vreg0 was evicted from)
Another evictee vreg2 is intended for region splitting with split candidate physreg1
one of the split intervals ends up evicting vreg3 from physreg2 etc.. until someone spills
As compile time was a concern, I've added a flag to control weather we do cost calculations for local intervals we expect to be created (it's on by default for X86 target, off for the rest).
Differential Revision: https://reviews.llvm.org/D35816
Change-Id: Id9411ff7bbb845463d289ba2ae97737a1ee7cc39
llvm-svn: 316295
2017-10-23 01:59:38 +08:00
|
|
|
if (localSplitArtifact)
|
|
|
|
return normalize(totalWeight, start->distance(*end), numInstr);
|
|
|
|
return normalize(totalWeight, li.getSize(), numInstr);
|
2010-08-10 08:02:26 +08:00
|
|
|
}
|