2017-02-22 06:07:52 +08:00
|
|
|
//===- RegAllocPBQP.cpp ---- PBQP Register Allocator ----------------------===//
|
2008-10-03 02:29:27 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2008-10-03 02:29:27 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2009-01-08 23:50:22 +08:00
|
|
|
//
|
2008-10-03 02:29:27 +08:00
|
|
|
// This file contains a Partitioned Boolean Quadratic Programming (PBQP) based
|
|
|
|
// register allocator for LLVM. This allocator works by constructing a PBQP
|
|
|
|
// problem representing the register allocation problem under consideration,
|
|
|
|
// solving this using a PBQP solver, and mapping the solution back to a
|
|
|
|
// register assignment. If any variables are selected for spilling then spill
|
2009-01-08 23:50:22 +08:00
|
|
|
// code is inserted and the process repeated.
|
2008-10-03 02:29:27 +08:00
|
|
|
//
|
|
|
|
// The PBQP solver (pbqp.c) provided for this allocator uses a heuristic tuned
|
|
|
|
// for register allocation. For more information on PBQP for register
|
2009-01-09 00:40:25 +08:00
|
|
|
// allocation, see the following papers:
|
2008-10-03 02:29:27 +08:00
|
|
|
//
|
|
|
|
// (1) Hames, L. and Scholz, B. 2006. Nearly optimal register allocation with
|
|
|
|
// PBQP. In Proceedings of the 7th Joint Modular Languages Conference
|
|
|
|
// (JMLC'06). LNCS, vol. 4228. Springer, New York, NY, USA. 346-361.
|
|
|
|
//
|
|
|
|
// (2) Scholz, B., Eckstein, E. 2002. Register allocation for irregular
|
|
|
|
// architectures. In Proceedings of the Joint Conference on Languages,
|
|
|
|
// Compilers and Tools for Embedded Systems (LCTES'02), ACM Press, New York,
|
|
|
|
// NY, USA, 139-148.
|
2009-01-08 23:50:22 +08:00
|
|
|
//
|
2008-10-03 02:29:27 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/CodeGen/RegAllocPBQP.h"
|
2011-06-27 05:41:06 +08:00
|
|
|
#include "RegisterCoalescer.h"
|
2017-02-22 06:07:52 +08:00
|
|
|
#include "llvm/ADT/ArrayRef.h"
|
|
|
|
#include "llvm/ADT/BitVector.h"
|
|
|
|
#include "llvm/ADT/DenseMap.h"
|
|
|
|
#include "llvm/ADT/DenseSet.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2017-02-22 06:07:52 +08:00
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
|
|
|
#include "llvm/ADT/StringRef.h"
|
2011-12-06 09:45:57 +08:00
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
2009-12-14 14:49:42 +08:00
|
|
|
#include "llvm/CodeGen/CalcSpillWeights.h"
|
2017-02-22 06:07:52 +08:00
|
|
|
#include "llvm/CodeGen/LiveInterval.h"
|
2017-12-13 10:51:04 +08:00
|
|
|
#include "llvm/CodeGen/LiveIntervals.h"
|
2012-04-03 06:44:18 +08:00
|
|
|
#include "llvm/CodeGen/LiveRangeEdit.h"
|
2017-12-19 07:19:44 +08:00
|
|
|
#include "llvm/CodeGen/LiveStacks.h"
|
2013-06-18 03:00:36 +08:00
|
|
|
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
|
2011-12-06 09:45:57 +08:00
|
|
|
#include "llvm/CodeGen/MachineDominators.h"
|
2017-02-22 06:07:52 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
2009-01-08 23:50:22 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
2017-06-02 07:25:02 +08:00
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
2013-07-02 04:47:47 +08:00
|
|
|
#include "llvm/CodeGen/MachineLoopInfo.h"
|
2009-01-08 23:50:22 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2017-02-22 06:07:52 +08:00
|
|
|
#include "llvm/CodeGen/PBQP/Graph.h"
|
2017-06-02 07:25:02 +08:00
|
|
|
#include "llvm/CodeGen/PBQP/Math.h"
|
2017-02-22 06:07:52 +08:00
|
|
|
#include "llvm/CodeGen/PBQP/Solution.h"
|
|
|
|
#include "llvm/CodeGen/PBQPRAConstraint.h"
|
2009-01-08 23:50:22 +08:00
|
|
|
#include "llvm/CodeGen/RegAllocRegistry.h"
|
2017-02-22 06:07:52 +08:00
|
|
|
#include "llvm/CodeGen/SlotIndexes.h"
|
2020-03-09 00:36:29 +08:00
|
|
|
#include "llvm/CodeGen/Spiller.h"
|
2017-11-17 09:07:10 +08:00
|
|
|
#include "llvm/CodeGen/TargetRegisterInfo.h"
|
|
|
|
#include "llvm/CodeGen/TargetSubtargetInfo.h"
|
2012-11-29 03:13:06 +08:00
|
|
|
#include "llvm/CodeGen/VirtRegMap.h"
|
2018-04-30 22:59:11 +08:00
|
|
|
#include "llvm/Config/llvm-config.h"
|
2017-02-22 06:07:52 +08:00
|
|
|
#include "llvm/IR/Function.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Module.h"
|
2017-02-22 06:07:52 +08:00
|
|
|
#include "llvm/MC/MCRegisterInfo.h"
|
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Support/Compiler.h"
|
2008-10-03 02:29:27 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2014-04-30 07:26:49 +08:00
|
|
|
#include "llvm/Support/FileSystem.h"
|
2015-12-04 09:31:59 +08:00
|
|
|
#include "llvm/Support/Printable.h"
|
2009-07-25 08:23:56 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2017-02-22 06:07:52 +08:00
|
|
|
#include <algorithm>
|
|
|
|
#include <cassert>
|
|
|
|
#include <cstddef>
|
2009-01-08 23:50:22 +08:00
|
|
|
#include <limits>
|
2017-02-22 06:07:52 +08:00
|
|
|
#include <map>
|
2009-01-08 23:50:22 +08:00
|
|
|
#include <memory>
|
2014-10-19 01:26:07 +08:00
|
|
|
#include <queue>
|
2008-10-03 02:29:27 +08:00
|
|
|
#include <set>
|
2012-03-27 07:07:23 +08:00
|
|
|
#include <sstream>
|
2017-02-22 06:07:52 +08:00
|
|
|
#include <string>
|
|
|
|
#include <system_error>
|
|
|
|
#include <tuple>
|
|
|
|
#include <utility>
|
2017-06-06 19:49:48 +08:00
|
|
|
#include <vector>
|
2008-10-03 02:29:27 +08:00
|
|
|
|
2010-09-23 12:28:54 +08:00
|
|
|
using namespace llvm;
|
2010-09-18 17:07:10 +08:00
|
|
|
|
2014-04-22 10:02:50 +08:00
|
|
|
#define DEBUG_TYPE "regalloc"
|
|
|
|
|
2008-10-03 02:29:27 +08:00
|
|
|
static RegisterRegAlloc
|
2014-10-10 02:20:51 +08:00
|
|
|
RegisterPBQPRepAlloc("pbqp", "PBQP register allocator",
|
2010-09-23 12:28:54 +08:00
|
|
|
createDefaultPBQPRegisterAllocator);
|
2008-10-03 02:29:27 +08:00
|
|
|
|
2009-08-19 09:36:14 +08:00
|
|
|
static cl::opt<bool>
|
2014-10-10 02:20:51 +08:00
|
|
|
PBQPCoalescing("pbqp-coalescing",
|
2010-01-26 12:49:58 +08:00
|
|
|
cl::desc("Attempt coalescing during PBQP register allocation."),
|
|
|
|
cl::init(false), cl::Hidden);
|
2009-08-19 09:36:14 +08:00
|
|
|
|
2012-03-27 07:07:23 +08:00
|
|
|
#ifndef NDEBUG
|
|
|
|
static cl::opt<bool>
|
2014-10-10 02:20:51 +08:00
|
|
|
PBQPDumpGraphs("pbqp-dump-graphs",
|
2012-03-27 07:07:23 +08:00
|
|
|
cl::desc("Dump graphs for each function/round in the compilation unit."),
|
|
|
|
cl::init(false), cl::Hidden);
|
|
|
|
#endif
|
|
|
|
|
2010-09-23 12:28:54 +08:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
///
|
|
|
|
/// PBQP based allocators solve the register allocation problem by mapping
|
|
|
|
/// register allocation problems to Partitioned Boolean Quadratic
|
|
|
|
/// Programming problems.
|
|
|
|
class RegAllocPBQP : public MachineFunctionPass {
|
|
|
|
public:
|
|
|
|
static char ID;
|
|
|
|
|
|
|
|
/// Construct a PBQP register allocator.
|
2014-10-10 02:20:51 +08:00
|
|
|
RegAllocPBQP(char *cPassID = nullptr)
|
|
|
|
: MachineFunctionPass(ID), customPassID(cPassID) {
|
2010-10-20 01:21:58 +08:00
|
|
|
initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
|
|
|
|
initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
|
|
|
|
initializeLiveStacksPass(*PassRegistry::getPassRegistry());
|
|
|
|
initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
2010-09-23 12:28:54 +08:00
|
|
|
|
|
|
|
/// Return the pass name.
|
2016-10-01 10:56:57 +08:00
|
|
|
StringRef getPassName() const override { return "PBQP Register Allocator"; }
|
2010-09-23 12:28:54 +08:00
|
|
|
|
|
|
|
/// PBQP analysis usage.
|
2014-03-07 17:26:03 +08:00
|
|
|
void getAnalysisUsage(AnalysisUsage &au) const override;
|
2010-09-23 12:28:54 +08:00
|
|
|
|
|
|
|
/// Perform register allocation
|
2014-03-07 17:26:03 +08:00
|
|
|
bool runOnMachineFunction(MachineFunction &MF) override;
|
2010-09-23 12:28:54 +08:00
|
|
|
|
2016-08-24 05:19:49 +08:00
|
|
|
MachineFunctionProperties getRequiredProperties() const override {
|
|
|
|
return MachineFunctionProperties().set(
|
|
|
|
MachineFunctionProperties::Property::NoPHIs);
|
|
|
|
}
|
|
|
|
|
2010-09-23 12:28:54 +08:00
|
|
|
private:
|
2017-06-02 07:25:02 +08:00
|
|
|
using LI2NodeMap = std::map<const LiveInterval *, unsigned>;
|
|
|
|
using Node2LIMap = std::vector<const LiveInterval *>;
|
|
|
|
using AllowedSet = std::vector<unsigned>;
|
|
|
|
using AllowedSetMap = std::vector<AllowedSet>;
|
|
|
|
using RegPair = std::pair<unsigned, unsigned>;
|
|
|
|
using CoalesceMap = std::map<RegPair, PBQP::PBQPNum>;
|
|
|
|
using RegSet = std::set<unsigned>;
|
2010-09-23 12:28:54 +08:00
|
|
|
|
2011-06-17 15:09:01 +08:00
|
|
|
char *customPassID;
|
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
RegSet VRegsToAlloc, EmptyIntervalVRegs;
|
2010-09-23 12:28:54 +08:00
|
|
|
|
2016-04-13 11:08:27 +08:00
|
|
|
/// Inst which is a def of an original reg and whose defs are already all
|
|
|
|
/// dead after remat is saved in DeadRemats. The deletion of such inst is
|
|
|
|
/// postponed till all the allocations are done, so its remat expr is
|
|
|
|
/// always available for the remat of all the siblings of the original reg.
|
|
|
|
SmallPtrSet<MachineInstr *, 32> DeadRemats;
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Finds the initial set of vreg intervals to allocate.
|
2014-10-10 02:20:51 +08:00
|
|
|
void findVRegIntervalsToAlloc(const MachineFunction &MF, LiveIntervals &LIS);
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Constructs an initial graph.
|
2015-02-03 14:14:06 +08:00
|
|
|
void initializeGraph(PBQPRAGraph &G, VirtRegMap &VRM, Spiller &VRegSpiller);
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Spill the given VReg.
|
2015-02-03 14:14:06 +08:00
|
|
|
void spillVReg(unsigned VReg, SmallVectorImpl<unsigned> &NewIntervals,
|
|
|
|
MachineFunction &MF, LiveIntervals &LIS, VirtRegMap &VRM,
|
|
|
|
Spiller &VRegSpiller);
|
2010-09-23 12:28:54 +08:00
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Given a solved PBQP problem maps this solution back to a register
|
2010-09-23 12:28:54 +08:00
|
|
|
/// assignment.
|
2014-10-10 02:20:51 +08:00
|
|
|
bool mapPBQPToRegAlloc(const PBQPRAGraph &G,
|
|
|
|
const PBQP::Solution &Solution,
|
|
|
|
VirtRegMap &VRM,
|
|
|
|
Spiller &VRegSpiller);
|
2010-09-23 12:28:54 +08:00
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Postprocessing before final spilling. Sets basic block "live in"
|
2010-09-23 12:28:54 +08:00
|
|
|
/// variables.
|
2014-10-10 02:20:51 +08:00
|
|
|
void finalizeAlloc(MachineFunction &MF, LiveIntervals &LIS,
|
|
|
|
VirtRegMap &VRM) const;
|
2010-09-23 12:28:54 +08:00
|
|
|
|
2016-04-13 11:08:27 +08:00
|
|
|
void postOptimization(Spiller &VRegSpiller, LiveIntervals &LIS);
|
2010-09-23 12:28:54 +08:00
|
|
|
};
|
|
|
|
|
2010-09-18 17:07:10 +08:00
|
|
|
char RegAllocPBQP::ID = 0;
|
|
|
|
|
2018-05-02 00:10:38 +08:00
|
|
|
/// Set spill costs for each node in the PBQP reg-alloc graph.
|
2014-10-10 02:20:51 +08:00
|
|
|
class SpillCosts : public PBQPRAConstraint {
|
|
|
|
public:
|
|
|
|
void apply(PBQPRAGraph &G) override {
|
|
|
|
LiveIntervals &LIS = G.getMetadata().LIS;
|
|
|
|
|
[PBQP] Tweak spill costs and coalescing benefits
This patch improves how the different costs (register, interference, spill
and coalescing) relates together. The assumption is now that:
- coalescing (or any other "side effect" of reg alloc) is negative, and
instead of being derived from a spill cost, they use the block
frequency info.
- spill costs are in the [MinSpillCost:+inf( range
- register or interference costs are in [0.0:MinSpillCost( or +inf
The current MinSpillCost is set to 10.0, which is a random value high
enough that the current constraint builders do not need to worry about
when settings costs. It would however be worth adding a normalization
step for register and interference costs as the last step in the
constraint builder chain to ensure they are not greater than SpillMinCost
(unless this has some sense for some architectures). This would work well
with the current builder pipeline, where all costs are tweaked relatively
to each others, but could grow above MinSpillCost if the pipeline is
deep enough.
The current heuristic is tuned to depend rather on the number of uses of
a live interval rather than a density of uses, as used by the greedy
allocator. This heuristic provides a few percent improvement on a number
of benchmarks (eembc, spec, ...) and will definitely need to change once
spill placement is implemented: the current spill placement is really
ineficient, so making the cost proportionnal to the number of use is a
clear win.
llvm-svn: 221292
2014-11-05 04:51:24 +08:00
|
|
|
// A minimum spill costs, so that register constraints can can be set
|
|
|
|
// without normalization in the [0.0:MinSpillCost( interval.
|
|
|
|
const PBQP::PBQPNum MinSpillCost = 10.0;
|
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
for (auto NId : G.nodeIds()) {
|
|
|
|
PBQP::PBQPNum SpillCost =
|
|
|
|
LIS.getInterval(G.getNodeMetadata(NId).getVReg()).weight;
|
|
|
|
if (SpillCost == 0.0)
|
|
|
|
SpillCost = std::numeric_limits<PBQP::PBQPNum>::min();
|
[PBQP] Tweak spill costs and coalescing benefits
This patch improves how the different costs (register, interference, spill
and coalescing) relates together. The assumption is now that:
- coalescing (or any other "side effect" of reg alloc) is negative, and
instead of being derived from a spill cost, they use the block
frequency info.
- spill costs are in the [MinSpillCost:+inf( range
- register or interference costs are in [0.0:MinSpillCost( or +inf
The current MinSpillCost is set to 10.0, which is a random value high
enough that the current constraint builders do not need to worry about
when settings costs. It would however be worth adding a normalization
step for register and interference costs as the last step in the
constraint builder chain to ensure they are not greater than SpillMinCost
(unless this has some sense for some architectures). This would work well
with the current builder pipeline, where all costs are tweaked relatively
to each others, but could grow above MinSpillCost if the pipeline is
deep enough.
The current heuristic is tuned to depend rather on the number of uses of
a live interval rather than a density of uses, as used by the greedy
allocator. This heuristic provides a few percent improvement on a number
of benchmarks (eembc, spec, ...) and will definitely need to change once
spill placement is implemented: the current spill placement is really
ineficient, so making the cost proportionnal to the number of use is a
clear win.
llvm-svn: 221292
2014-11-05 04:51:24 +08:00
|
|
|
else
|
|
|
|
SpillCost += MinSpillCost;
|
2014-10-10 02:20:51 +08:00
|
|
|
PBQPRAGraph::RawVector NodeCosts(G.getNodeCosts(NId));
|
|
|
|
NodeCosts[PBQP::RegAlloc::getSpillOptionIdx()] = SpillCost;
|
|
|
|
G.setNodeCosts(NId, std::move(NodeCosts));
|
|
|
|
}
|
2010-09-18 17:07:10 +08:00
|
|
|
}
|
2014-10-10 02:20:51 +08:00
|
|
|
};
|
2010-09-18 17:07:10 +08:00
|
|
|
|
2018-05-02 00:10:38 +08:00
|
|
|
/// Add interference edges between overlapping vregs.
|
2014-10-10 02:20:51 +08:00
|
|
|
class Interference : public PBQPRAConstraint {
|
2014-10-28 01:44:25 +08:00
|
|
|
private:
|
2017-06-02 07:25:02 +08:00
|
|
|
using AllowedRegVecPtr = const PBQP::RegAlloc::AllowedRegVector *;
|
|
|
|
using IKey = std::pair<AllowedRegVecPtr, AllowedRegVecPtr>;
|
|
|
|
using IMatrixCache = DenseMap<IKey, PBQPRAGraph::MatrixPtr>;
|
|
|
|
using DisjointAllowedRegsCache = DenseSet<IKey>;
|
|
|
|
using IEdgeKey = std::pair<PBQP::GraphBase::NodeId, PBQP::GraphBase::NodeId>;
|
|
|
|
using IEdgeCache = DenseSet<IEdgeKey>;
|
2015-03-02 04:39:34 +08:00
|
|
|
|
|
|
|
bool haveDisjointAllowedRegs(const PBQPRAGraph &G, PBQPRAGraph::NodeId NId,
|
|
|
|
PBQPRAGraph::NodeId MId,
|
|
|
|
const DisjointAllowedRegsCache &D) const {
|
|
|
|
const auto *NRegs = &G.getNodeMetadata(NId).getAllowedRegs();
|
|
|
|
const auto *MRegs = &G.getNodeMetadata(MId).getAllowedRegs();
|
|
|
|
|
|
|
|
if (NRegs == MRegs)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (NRegs < MRegs)
|
|
|
|
return D.count(IKey(NRegs, MRegs)) > 0;
|
2015-03-02 05:22:50 +08:00
|
|
|
|
|
|
|
return D.count(IKey(MRegs, NRegs)) > 0;
|
2015-03-02 04:39:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void setDisjointAllowedRegs(const PBQPRAGraph &G, PBQPRAGraph::NodeId NId,
|
|
|
|
PBQPRAGraph::NodeId MId,
|
|
|
|
DisjointAllowedRegsCache &D) {
|
|
|
|
const auto *NRegs = &G.getNodeMetadata(NId).getAllowedRegs();
|
|
|
|
const auto *MRegs = &G.getNodeMetadata(MId).getAllowedRegs();
|
|
|
|
|
|
|
|
assert(NRegs != MRegs && "AllowedRegs can not be disjoint with itself");
|
|
|
|
|
|
|
|
if (NRegs < MRegs)
|
|
|
|
D.insert(IKey(NRegs, MRegs));
|
|
|
|
else
|
|
|
|
D.insert(IKey(MRegs, NRegs));
|
|
|
|
}
|
2014-10-28 01:44:25 +08:00
|
|
|
|
2014-10-19 01:26:07 +08:00
|
|
|
// Holds (Interval, CurrentSegmentID, and NodeId). The first two are required
|
|
|
|
// for the fast interference graph construction algorithm. The last is there
|
|
|
|
// to save us from looking up node ids via the VRegToNode map in the graph
|
|
|
|
// metadata.
|
2017-06-02 07:25:02 +08:00
|
|
|
using IntervalInfo =
|
|
|
|
std::tuple<LiveInterval*, size_t, PBQP::GraphBase::NodeId>;
|
2014-10-19 01:26:07 +08:00
|
|
|
|
|
|
|
static SlotIndex getStartPoint(const IntervalInfo &I) {
|
|
|
|
return std::get<0>(I)->segments[std::get<1>(I)].start;
|
|
|
|
}
|
|
|
|
|
|
|
|
static SlotIndex getEndPoint(const IntervalInfo &I) {
|
|
|
|
return std::get<0>(I)->segments[std::get<1>(I)].end;
|
|
|
|
}
|
|
|
|
|
|
|
|
static PBQP::GraphBase::NodeId getNodeId(const IntervalInfo &I) {
|
|
|
|
return std::get<2>(I);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool lowestStartPoint(const IntervalInfo &I1,
|
|
|
|
const IntervalInfo &I2) {
|
|
|
|
// Condition reversed because priority queue has the *highest* element at
|
|
|
|
// the front, rather than the lowest.
|
|
|
|
return getStartPoint(I1) > getStartPoint(I2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool lowestEndPoint(const IntervalInfo &I1,
|
|
|
|
const IntervalInfo &I2) {
|
|
|
|
SlotIndex E1 = getEndPoint(I1);
|
|
|
|
SlotIndex E2 = getEndPoint(I2);
|
|
|
|
|
|
|
|
if (E1 < E2)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (E1 > E2)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If two intervals end at the same point, we need a way to break the tie or
|
|
|
|
// the set will assume they're actually equal and refuse to insert a
|
|
|
|
// "duplicate". Just compare the vregs - fast and guaranteed unique.
|
|
|
|
return std::get<0>(I1)->reg < std::get<0>(I2)->reg;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool isAtLastSegment(const IntervalInfo &I) {
|
|
|
|
return std::get<1>(I) == std::get<0>(I)->size() - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static IntervalInfo nextSegment(const IntervalInfo &I) {
|
|
|
|
return std::make_tuple(std::get<0>(I), std::get<1>(I) + 1, std::get<2>(I));
|
|
|
|
}
|
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
public:
|
|
|
|
void apply(PBQPRAGraph &G) override {
|
2014-10-19 01:26:07 +08:00
|
|
|
// The following is loosely based on the linear scan algorithm introduced in
|
|
|
|
// "Linear Scan Register Allocation" by Poletto and Sarkar. This version
|
|
|
|
// isn't linear, because the size of the active set isn't bound by the
|
|
|
|
// number of registers, but rather the size of the largest clique in the
|
|
|
|
// graph. Still, we expect this to be better than N^2.
|
2014-10-10 02:20:51 +08:00
|
|
|
LiveIntervals &LIS = G.getMetadata().LIS;
|
2014-10-28 01:44:25 +08:00
|
|
|
|
|
|
|
// Interferenc matrices are incredibly regular - they're only a function of
|
|
|
|
// the allowed sets, so we cache them to avoid the overhead of constructing
|
|
|
|
// and uniquing them.
|
|
|
|
IMatrixCache C;
|
2014-10-10 02:20:51 +08:00
|
|
|
|
2015-03-05 17:12:59 +08:00
|
|
|
// Finding an edge is expensive in the worst case (O(max_clique(G))). So
|
|
|
|
// cache locally edges we have already seen.
|
|
|
|
IEdgeCache EC;
|
|
|
|
|
2015-03-02 04:39:34 +08:00
|
|
|
// Cache known disjoint allowed registers pairs
|
|
|
|
DisjointAllowedRegsCache D;
|
|
|
|
|
2017-06-02 07:25:02 +08:00
|
|
|
using IntervalSet = std::set<IntervalInfo, decltype(&lowestEndPoint)>;
|
|
|
|
using IntervalQueue =
|
|
|
|
std::priority_queue<IntervalInfo, std::vector<IntervalInfo>,
|
|
|
|
decltype(&lowestStartPoint)>;
|
2014-10-19 01:26:07 +08:00
|
|
|
IntervalSet Active(lowestEndPoint);
|
|
|
|
IntervalQueue Inactive(lowestStartPoint);
|
|
|
|
|
|
|
|
// Start by building the inactive set.
|
|
|
|
for (auto NId : G.nodeIds()) {
|
|
|
|
unsigned VReg = G.getNodeMetadata(NId).getVReg();
|
|
|
|
LiveInterval &LI = LIS.getInterval(VReg);
|
|
|
|
assert(!LI.empty() && "PBQP graph contains node for empty interval");
|
|
|
|
Inactive.push(std::make_tuple(&LI, 0, NId));
|
|
|
|
}
|
|
|
|
|
|
|
|
while (!Inactive.empty()) {
|
|
|
|
// Tentatively grab the "next" interval - this choice may be overriden
|
|
|
|
// below.
|
|
|
|
IntervalInfo Cur = Inactive.top();
|
|
|
|
|
|
|
|
// Retire any active intervals that end before Cur starts.
|
|
|
|
IntervalSet::iterator RetireItr = Active.begin();
|
|
|
|
while (RetireItr != Active.end() &&
|
|
|
|
(getEndPoint(*RetireItr) <= getStartPoint(Cur))) {
|
|
|
|
// If this interval has subsequent segments, add the next one to the
|
|
|
|
// inactive list.
|
|
|
|
if (!isAtLastSegment(*RetireItr))
|
|
|
|
Inactive.push(nextSegment(*RetireItr));
|
|
|
|
|
|
|
|
++RetireItr;
|
|
|
|
}
|
|
|
|
Active.erase(Active.begin(), RetireItr);
|
|
|
|
|
|
|
|
// One of the newly retired segments may actually start before the
|
|
|
|
// Cur segment, so re-grab the front of the inactive list.
|
|
|
|
Cur = Inactive.top();
|
|
|
|
Inactive.pop();
|
|
|
|
|
|
|
|
// At this point we know that Cur overlaps all active intervals. Add the
|
|
|
|
// interference edges.
|
|
|
|
PBQP::GraphBase::NodeId NId = getNodeId(Cur);
|
|
|
|
for (const auto &A : Active) {
|
|
|
|
PBQP::GraphBase::NodeId MId = getNodeId(A);
|
|
|
|
|
2015-03-02 04:39:34 +08:00
|
|
|
// Do not add an edge when the nodes' allowed registers do not
|
|
|
|
// intersect: there is obviously no interference.
|
|
|
|
if (haveDisjointAllowedRegs(G, NId, MId, D))
|
|
|
|
continue;
|
|
|
|
|
2014-10-19 01:26:07 +08:00
|
|
|
// Check that we haven't already added this edge
|
2015-03-05 17:12:59 +08:00
|
|
|
IEdgeKey EK(std::min(NId, MId), std::max(NId, MId));
|
|
|
|
if (EC.count(EK))
|
2014-10-19 01:26:07 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// This is a new edge - add it to the graph.
|
2015-03-02 04:39:34 +08:00
|
|
|
if (!createInterferenceEdge(G, NId, MId, C))
|
|
|
|
setDisjointAllowedRegs(G, NId, MId, D);
|
2015-03-05 17:12:59 +08:00
|
|
|
else
|
|
|
|
EC.insert(EK);
|
2010-09-18 17:07:10 +08:00
|
|
|
}
|
2014-10-19 01:26:07 +08:00
|
|
|
|
|
|
|
// Finally, add Cur to the Active set.
|
|
|
|
Active.insert(Cur);
|
2010-09-18 17:07:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
private:
|
2015-03-02 04:39:34 +08:00
|
|
|
// Create an Interference edge and add it to the graph, unless it is
|
|
|
|
// a null matrix, meaning the nodes' allowed registers do not have any
|
|
|
|
// interference. This case occurs frequently between integer and floating
|
|
|
|
// point registers for example.
|
|
|
|
// return true iff both nodes interferes.
|
|
|
|
bool createInterferenceEdge(PBQPRAGraph &G,
|
|
|
|
PBQPRAGraph::NodeId NId, PBQPRAGraph::NodeId MId,
|
|
|
|
IMatrixCache &C) {
|
2014-10-28 01:44:25 +08:00
|
|
|
const TargetRegisterInfo &TRI =
|
2015-01-27 16:27:06 +08:00
|
|
|
*G.getMetadata().MF.getSubtarget().getRegisterInfo();
|
2014-10-28 01:44:25 +08:00
|
|
|
const auto &NRegs = G.getNodeMetadata(NId).getAllowedRegs();
|
|
|
|
const auto &MRegs = G.getNodeMetadata(MId).getAllowedRegs();
|
|
|
|
|
|
|
|
// Try looking the edge costs up in the IMatrixCache first.
|
2015-03-02 04:39:34 +08:00
|
|
|
IKey K(&NRegs, &MRegs);
|
2014-10-28 01:44:25 +08:00
|
|
|
IMatrixCache::iterator I = C.find(K);
|
|
|
|
if (I != C.end()) {
|
|
|
|
G.addEdgeBypassingCostAllocator(NId, MId, I->second);
|
2015-03-02 04:39:34 +08:00
|
|
|
return true;
|
2014-10-28 01:44:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
PBQPRAGraph::RawMatrix M(NRegs.size() + 1, MRegs.size() + 1, 0);
|
2015-03-02 04:39:34 +08:00
|
|
|
bool NodesInterfere = false;
|
2014-10-28 01:44:25 +08:00
|
|
|
for (unsigned I = 0; I != NRegs.size(); ++I) {
|
|
|
|
unsigned PRegN = NRegs[I];
|
|
|
|
for (unsigned J = 0; J != MRegs.size(); ++J) {
|
|
|
|
unsigned PRegM = MRegs[J];
|
2015-03-02 04:39:34 +08:00
|
|
|
if (TRI.regsOverlap(PRegN, PRegM)) {
|
2014-10-10 02:20:51 +08:00
|
|
|
M[I + 1][J + 1] = std::numeric_limits<PBQP::PBQPNum>::infinity();
|
2015-03-02 04:39:34 +08:00
|
|
|
NodesInterfere = true;
|
|
|
|
}
|
2010-07-17 14:31:41 +08:00
|
|
|
}
|
2010-09-18 17:07:10 +08:00
|
|
|
}
|
2014-10-10 02:20:51 +08:00
|
|
|
|
2015-03-02 04:39:34 +08:00
|
|
|
if (!NodesInterfere)
|
|
|
|
return false;
|
|
|
|
|
2014-10-28 01:44:25 +08:00
|
|
|
PBQPRAGraph::EdgeId EId = G.addEdge(NId, MId, std::move(M));
|
|
|
|
C[K] = G.getEdgeCostsPtr(EId);
|
2015-03-02 04:39:34 +08:00
|
|
|
|
|
|
|
return true;
|
2010-09-18 17:07:10 +08:00
|
|
|
}
|
2014-10-10 02:20:51 +08:00
|
|
|
};
|
2008-10-03 02:29:27 +08:00
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
class Coalescing : public PBQPRAConstraint {
|
|
|
|
public:
|
|
|
|
void apply(PBQPRAGraph &G) override {
|
|
|
|
MachineFunction &MF = G.getMetadata().MF;
|
|
|
|
MachineBlockFrequencyInfo &MBFI = G.getMetadata().MBFI;
|
2015-01-27 16:27:06 +08:00
|
|
|
CoalescerPair CP(*MF.getSubtarget().getRegisterInfo());
|
2014-10-10 02:20:51 +08:00
|
|
|
|
|
|
|
// Scan the machine function and add a coalescing cost whenever CoalescerPair
|
|
|
|
// gives the Ok.
|
|
|
|
for (const auto &MBB : MF) {
|
|
|
|
for (const auto &MI : MBB) {
|
|
|
|
// Skip not-coalescable or already coalesced copies.
|
|
|
|
if (!CP.setRegisters(&MI) || CP.getSrcReg() == CP.getDstReg())
|
|
|
|
continue;
|
2010-09-21 21:19:36 +08:00
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
unsigned DstReg = CP.getDstReg();
|
|
|
|
unsigned SrcReg = CP.getSrcReg();
|
2010-09-21 21:19:36 +08:00
|
|
|
|
[PBQP] Tweak spill costs and coalescing benefits
This patch improves how the different costs (register, interference, spill
and coalescing) relates together. The assumption is now that:
- coalescing (or any other "side effect" of reg alloc) is negative, and
instead of being derived from a spill cost, they use the block
frequency info.
- spill costs are in the [MinSpillCost:+inf( range
- register or interference costs are in [0.0:MinSpillCost( or +inf
The current MinSpillCost is set to 10.0, which is a random value high
enough that the current constraint builders do not need to worry about
when settings costs. It would however be worth adding a normalization
step for register and interference costs as the last step in the
constraint builder chain to ensure they are not greater than SpillMinCost
(unless this has some sense for some architectures). This would work well
with the current builder pipeline, where all costs are tweaked relatively
to each others, but could grow above MinSpillCost if the pipeline is
deep enough.
The current heuristic is tuned to depend rather on the number of uses of
a live interval rather than a density of uses, as used by the greedy
allocator. This heuristic provides a few percent improvement on a number
of benchmarks (eembc, spec, ...) and will definitely need to change once
spill placement is implemented: the current spill placement is really
ineficient, so making the cost proportionnal to the number of use is a
clear win.
llvm-svn: 221292
2014-11-05 04:51:24 +08:00
|
|
|
const float Scale = 1.0f / MBFI.getEntryFreq();
|
|
|
|
PBQP::PBQPNum CBenefit = MBFI.getBlockFreq(&MBB).getFrequency() * Scale;
|
2010-09-21 21:19:36 +08:00
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
if (CP.isPhys()) {
|
|
|
|
if (!MF.getRegInfo().isAllocatable(DstReg))
|
|
|
|
continue;
|
2010-09-21 21:19:36 +08:00
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
PBQPRAGraph::NodeId NId = G.getMetadata().getNodeIdForVReg(SrcReg);
|
2012-02-10 12:10:26 +08:00
|
|
|
|
2014-10-28 01:44:25 +08:00
|
|
|
const PBQPRAGraph::NodeMetadata::AllowedRegVector &Allowed =
|
|
|
|
G.getNodeMetadata(NId).getAllowedRegs();
|
2010-09-21 21:19:36 +08:00
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
unsigned PRegOpt = 0;
|
|
|
|
while (PRegOpt < Allowed.size() && Allowed[PRegOpt] != DstReg)
|
|
|
|
++PRegOpt;
|
2010-09-21 21:19:36 +08:00
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
if (PRegOpt < Allowed.size()) {
|
|
|
|
PBQPRAGraph::RawVector NewCosts(G.getNodeCosts(NId));
|
2014-10-22 00:24:15 +08:00
|
|
|
NewCosts[PRegOpt + 1] -= CBenefit;
|
2014-10-10 02:20:51 +08:00
|
|
|
G.setNodeCosts(NId, std::move(NewCosts));
|
|
|
|
}
|
2010-09-21 21:19:36 +08:00
|
|
|
} else {
|
2014-10-10 02:20:51 +08:00
|
|
|
PBQPRAGraph::NodeId N1Id = G.getMetadata().getNodeIdForVReg(DstReg);
|
|
|
|
PBQPRAGraph::NodeId N2Id = G.getMetadata().getNodeIdForVReg(SrcReg);
|
2014-10-28 01:44:25 +08:00
|
|
|
const PBQPRAGraph::NodeMetadata::AllowedRegVector *Allowed1 =
|
|
|
|
&G.getNodeMetadata(N1Id).getAllowedRegs();
|
|
|
|
const PBQPRAGraph::NodeMetadata::AllowedRegVector *Allowed2 =
|
|
|
|
&G.getNodeMetadata(N2Id).getAllowedRegs();
|
2014-10-10 02:20:51 +08:00
|
|
|
|
|
|
|
PBQPRAGraph::EdgeId EId = G.findEdge(N1Id, N2Id);
|
|
|
|
if (EId == G.invalidEdgeId()) {
|
|
|
|
PBQPRAGraph::RawMatrix Costs(Allowed1->size() + 1,
|
|
|
|
Allowed2->size() + 1, 0);
|
|
|
|
addVirtRegCoalesce(Costs, *Allowed1, *Allowed2, CBenefit);
|
|
|
|
G.addEdge(N1Id, N2Id, std::move(Costs));
|
|
|
|
} else {
|
|
|
|
if (G.getEdgeNode1Id(EId) == N2Id) {
|
|
|
|
std::swap(N1Id, N2Id);
|
|
|
|
std::swap(Allowed1, Allowed2);
|
|
|
|
}
|
|
|
|
PBQPRAGraph::RawMatrix Costs(G.getEdgeCosts(EId));
|
|
|
|
addVirtRegCoalesce(Costs, *Allowed1, *Allowed2, CBenefit);
|
2015-02-11 16:25:36 +08:00
|
|
|
G.updateEdgeCosts(EId, std::move(Costs));
|
2010-09-21 21:19:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
private:
|
|
|
|
void addVirtRegCoalesce(
|
2014-10-28 01:44:25 +08:00
|
|
|
PBQPRAGraph::RawMatrix &CostMat,
|
|
|
|
const PBQPRAGraph::NodeMetadata::AllowedRegVector &Allowed1,
|
|
|
|
const PBQPRAGraph::NodeMetadata::AllowedRegVector &Allowed2,
|
|
|
|
PBQP::PBQPNum Benefit) {
|
2014-10-10 02:20:51 +08:00
|
|
|
assert(CostMat.getRows() == Allowed1.size() + 1 && "Size mismatch.");
|
|
|
|
assert(CostMat.getCols() == Allowed2.size() + 1 && "Size mismatch.");
|
|
|
|
for (unsigned I = 0; I != Allowed1.size(); ++I) {
|
|
|
|
unsigned PReg1 = Allowed1[I];
|
|
|
|
for (unsigned J = 0; J != Allowed2.size(); ++J) {
|
|
|
|
unsigned PReg2 = Allowed2[J];
|
|
|
|
if (PReg1 == PReg2)
|
2014-10-22 00:24:15 +08:00
|
|
|
CostMat[I + 1][J + 1] -= Benefit;
|
2012-02-10 12:10:26 +08:00
|
|
|
}
|
2010-09-21 21:19:36 +08:00
|
|
|
}
|
|
|
|
}
|
2014-10-10 02:20:51 +08:00
|
|
|
};
|
|
|
|
|
2017-02-22 06:07:52 +08:00
|
|
|
} // end anonymous namespace
|
2014-10-10 02:20:51 +08:00
|
|
|
|
|
|
|
// Out-of-line destructor/anchor for PBQPRAConstraint.
|
2017-02-22 06:07:52 +08:00
|
|
|
PBQPRAConstraint::~PBQPRAConstraint() = default;
|
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
void PBQPRAConstraint::anchor() {}
|
2017-02-22 06:07:52 +08:00
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
void PBQPRAConstraintList::anchor() {}
|
2010-09-18 17:07:10 +08:00
|
|
|
|
|
|
|
void RegAllocPBQP::getAnalysisUsage(AnalysisUsage &au) const {
|
2011-12-06 09:45:57 +08:00
|
|
|
au.setPreservesCFG();
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
au.addRequired<AAResultsWrapperPass>();
|
|
|
|
au.addPreserved<AAResultsWrapperPass>();
|
2010-09-18 17:07:10 +08:00
|
|
|
au.addRequired<SlotIndexes>();
|
|
|
|
au.addPreserved<SlotIndexes>();
|
|
|
|
au.addRequired<LiveIntervals>();
|
2012-10-04 12:50:53 +08:00
|
|
|
au.addPreserved<LiveIntervals>();
|
2010-09-18 17:07:10 +08:00
|
|
|
//au.addRequiredID(SplitCriticalEdgesID);
|
2011-06-17 15:09:01 +08:00
|
|
|
if (customPassID)
|
|
|
|
au.addRequiredID(*customPassID);
|
2010-09-18 17:07:10 +08:00
|
|
|
au.addRequired<LiveStacks>();
|
|
|
|
au.addPreserved<LiveStacks>();
|
2013-06-18 03:00:36 +08:00
|
|
|
au.addRequired<MachineBlockFrequencyInfo>();
|
|
|
|
au.addPreserved<MachineBlockFrequencyInfo>();
|
2013-07-02 04:47:47 +08:00
|
|
|
au.addRequired<MachineLoopInfo>();
|
|
|
|
au.addPreserved<MachineLoopInfo>();
|
2011-12-06 09:45:57 +08:00
|
|
|
au.addRequired<MachineDominatorTree>();
|
|
|
|
au.addPreserved<MachineDominatorTree>();
|
2010-09-18 17:07:10 +08:00
|
|
|
au.addRequired<VirtRegMap>();
|
2012-10-04 12:50:53 +08:00
|
|
|
au.addPreserved<VirtRegMap>();
|
2010-09-18 17:07:10 +08:00
|
|
|
MachineFunctionPass::getAnalysisUsage(au);
|
|
|
|
}
|
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
void RegAllocPBQP::findVRegIntervalsToAlloc(const MachineFunction &MF,
|
|
|
|
LiveIntervals &LIS) {
|
|
|
|
const MachineRegisterInfo &MRI = MF.getRegInfo();
|
2008-11-16 20:12:54 +08:00
|
|
|
|
|
|
|
// Iterate over all live ranges.
|
2014-10-10 02:20:51 +08:00
|
|
|
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
|
2019-08-02 07:27:28 +08:00
|
|
|
unsigned Reg = Register::index2VirtReg(I);
|
2014-10-10 02:20:51 +08:00
|
|
|
if (MRI.reg_nodbg_empty(Reg))
|
2008-11-16 20:12:54 +08:00
|
|
|
continue;
|
2018-02-21 06:15:09 +08:00
|
|
|
VRegsToAlloc.insert(Reg);
|
2014-10-10 02:20:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-05 04:51:29 +08:00
|
|
|
static bool isACalleeSavedRegister(unsigned reg, const TargetRegisterInfo &TRI,
|
|
|
|
const MachineFunction &MF) {
|
2017-03-14 17:09:26 +08:00
|
|
|
const MCPhysReg *CSR = MF.getRegInfo().getCalleeSavedRegs();
|
2014-11-05 04:51:29 +08:00
|
|
|
for (unsigned i = 0; CSR[i] != 0; ++i)
|
|
|
|
if (TRI.regsOverlap(reg, CSR[i]))
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-02-03 14:14:06 +08:00
|
|
|
void RegAllocPBQP::initializeGraph(PBQPRAGraph &G, VirtRegMap &VRM,
|
|
|
|
Spiller &VRegSpiller) {
|
2014-10-10 02:20:51 +08:00
|
|
|
MachineFunction &MF = G.getMetadata().MF;
|
|
|
|
|
|
|
|
LiveIntervals &LIS = G.getMetadata().LIS;
|
|
|
|
const MachineRegisterInfo &MRI = G.getMetadata().MF.getRegInfo();
|
|
|
|
const TargetRegisterInfo &TRI =
|
2015-01-27 16:27:06 +08:00
|
|
|
*G.getMetadata().MF.getSubtarget().getRegisterInfo();
|
2014-10-10 02:20:51 +08:00
|
|
|
|
2015-02-03 14:14:06 +08:00
|
|
|
std::vector<unsigned> Worklist(VRegsToAlloc.begin(), VRegsToAlloc.end());
|
|
|
|
|
2018-02-21 06:15:09 +08:00
|
|
|
std::map<unsigned, std::vector<unsigned>> VRegAllowedMap;
|
|
|
|
|
2015-02-03 14:14:06 +08:00
|
|
|
while (!Worklist.empty()) {
|
|
|
|
unsigned VReg = Worklist.back();
|
|
|
|
Worklist.pop_back();
|
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
LiveInterval &VRegLI = LIS.getInterval(VReg);
|
|
|
|
|
2018-02-21 06:15:09 +08:00
|
|
|
// If this is an empty interval move it to the EmptyIntervalVRegs set then
|
|
|
|
// continue.
|
|
|
|
if (VRegLI.empty()) {
|
|
|
|
EmptyIntervalVRegs.insert(VRegLI.reg);
|
|
|
|
VRegsToAlloc.erase(VRegLI.reg);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
const TargetRegisterClass *TRC = MRI.getRegClass(VReg);
|
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
// Record any overlaps with regmask operands.
|
|
|
|
BitVector RegMaskOverlaps;
|
|
|
|
LIS.checkRegMaskInterference(VRegLI, RegMaskOverlaps);
|
|
|
|
|
|
|
|
// Compute an initial allowed set for the current vreg.
|
|
|
|
std::vector<unsigned> VRegAllowed;
|
|
|
|
ArrayRef<MCPhysReg> RawPRegOrder = TRC->getRawAllocationOrder(MF);
|
|
|
|
for (unsigned I = 0; I != RawPRegOrder.size(); ++I) {
|
|
|
|
unsigned PReg = RawPRegOrder[I];
|
|
|
|
if (MRI.isReserved(PReg))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// vregLI crosses a regmask operand that clobbers preg.
|
|
|
|
if (!RegMaskOverlaps.empty() && !RegMaskOverlaps.test(PReg))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// vregLI overlaps fixed regunit interference.
|
|
|
|
bool Interference = false;
|
|
|
|
for (MCRegUnitIterator Units(PReg, &TRI); Units.isValid(); ++Units) {
|
|
|
|
if (VRegLI.overlaps(LIS.getRegUnit(*Units))) {
|
|
|
|
Interference = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (Interference)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// preg is usable for this virtual register.
|
|
|
|
VRegAllowed.push_back(PReg);
|
2008-11-16 20:12:54 +08:00
|
|
|
}
|
2014-10-10 02:20:51 +08:00
|
|
|
|
2015-02-03 14:14:06 +08:00
|
|
|
// Check for vregs that have no allowed registers. These should be
|
|
|
|
// pre-spilled and the new vregs added to the worklist.
|
|
|
|
if (VRegAllowed.empty()) {
|
|
|
|
SmallVector<unsigned, 8> NewVRegs;
|
|
|
|
spillVReg(VReg, NewVRegs, MF, LIS, VRM, VRegSpiller);
|
2015-02-17 23:29:18 +08:00
|
|
|
Worklist.insert(Worklist.end(), NewVRegs.begin(), NewVRegs.end());
|
2015-02-03 14:14:06 +08:00
|
|
|
continue;
|
2018-02-21 06:15:09 +08:00
|
|
|
} else
|
|
|
|
VRegAllowedMap[VReg] = std::move(VRegAllowed);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto &KV : VRegAllowedMap) {
|
|
|
|
auto VReg = KV.first;
|
|
|
|
|
|
|
|
// Move empty intervals to the EmptyIntervalVReg set.
|
|
|
|
if (LIS.getInterval(VReg).empty()) {
|
|
|
|
EmptyIntervalVRegs.insert(VReg);
|
|
|
|
VRegsToAlloc.erase(VReg);
|
|
|
|
continue;
|
2015-02-03 14:14:06 +08:00
|
|
|
}
|
|
|
|
|
2018-02-21 06:15:09 +08:00
|
|
|
auto &VRegAllowed = KV.second;
|
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
PBQPRAGraph::RawVector NodeCosts(VRegAllowed.size() + 1, 0);
|
2014-11-05 04:51:29 +08:00
|
|
|
|
|
|
|
// Tweak cost of callee saved registers, as using then force spilling and
|
|
|
|
// restoring them. This would only happen in the prologue / epilogue though.
|
|
|
|
for (unsigned i = 0; i != VRegAllowed.size(); ++i)
|
|
|
|
if (isACalleeSavedRegister(VRegAllowed[i], TRI, MF))
|
|
|
|
NodeCosts[1 + i] += 1.0;
|
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
PBQPRAGraph::NodeId NId = G.addNode(std::move(NodeCosts));
|
|
|
|
G.getNodeMetadata(NId).setVReg(VReg);
|
2014-10-28 01:44:25 +08:00
|
|
|
G.getNodeMetadata(NId).setAllowedRegs(
|
|
|
|
G.getMetadata().getAllowedRegs(std::move(VRegAllowed)));
|
2014-10-10 02:20:51 +08:00
|
|
|
G.getMetadata().setNodeIdForVReg(VReg, NId);
|
2008-11-16 20:12:54 +08:00
|
|
|
}
|
2008-10-03 02:29:27 +08:00
|
|
|
}
|
|
|
|
|
2015-02-03 14:14:06 +08:00
|
|
|
void RegAllocPBQP::spillVReg(unsigned VReg,
|
|
|
|
SmallVectorImpl<unsigned> &NewIntervals,
|
|
|
|
MachineFunction &MF, LiveIntervals &LIS,
|
|
|
|
VirtRegMap &VRM, Spiller &VRegSpiller) {
|
|
|
|
VRegsToAlloc.erase(VReg);
|
2016-04-13 11:08:27 +08:00
|
|
|
LiveRangeEdit LRE(&LIS.getInterval(VReg), NewIntervals, MF, LIS, &VRM,
|
|
|
|
nullptr, &DeadRemats);
|
2015-02-03 14:14:06 +08:00
|
|
|
VRegSpiller.spill(LRE);
|
|
|
|
|
|
|
|
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
|
|
|
|
(void)TRI;
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "VREG " << printReg(VReg, &TRI) << " -> SPILLED (Cost: "
|
|
|
|
<< LRE.getParent().weight << ", New vregs: ");
|
2015-02-03 14:14:06 +08:00
|
|
|
|
|
|
|
// Copy any newly inserted live intervals into the list of regs to
|
|
|
|
// allocate.
|
|
|
|
for (LiveRangeEdit::iterator I = LRE.begin(), E = LRE.end();
|
|
|
|
I != E; ++I) {
|
|
|
|
const LiveInterval &LI = LIS.getInterval(*I);
|
|
|
|
assert(!LI.empty() && "Empty spill range.");
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << printReg(LI.reg, &TRI) << " ");
|
2015-02-03 14:14:06 +08:00
|
|
|
VRegsToAlloc.insert(LI.reg);
|
|
|
|
}
|
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << ")\n");
|
2015-02-03 14:14:06 +08:00
|
|
|
}
|
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
bool RegAllocPBQP::mapPBQPToRegAlloc(const PBQPRAGraph &G,
|
|
|
|
const PBQP::Solution &Solution,
|
|
|
|
VirtRegMap &VRM,
|
|
|
|
Spiller &VRegSpiller) {
|
|
|
|
MachineFunction &MF = G.getMetadata().MF;
|
|
|
|
LiveIntervals &LIS = G.getMetadata().LIS;
|
2015-01-27 16:27:06 +08:00
|
|
|
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
|
2014-10-10 02:20:51 +08:00
|
|
|
(void)TRI;
|
|
|
|
|
2010-09-18 17:07:10 +08:00
|
|
|
// Set to true if we have any spills
|
2014-10-10 02:20:51 +08:00
|
|
|
bool AnotherRoundNeeded = false;
|
2010-09-18 17:07:10 +08:00
|
|
|
|
|
|
|
// Clear the existing allocation.
|
2014-10-10 02:20:51 +08:00
|
|
|
VRM.clearAllVirt();
|
2010-09-18 17:07:10 +08:00
|
|
|
|
|
|
|
// Iterate over the nodes mapping the PBQP solution to a register
|
|
|
|
// assignment.
|
2014-10-10 02:20:51 +08:00
|
|
|
for (auto NId : G.nodeIds()) {
|
|
|
|
unsigned VReg = G.getNodeMetadata(NId).getVReg();
|
|
|
|
unsigned AllocOption = Solution.getSelection(NId);
|
|
|
|
|
|
|
|
if (AllocOption != PBQP::RegAlloc::getSpillOptionIdx()) {
|
2014-10-28 01:44:25 +08:00
|
|
|
unsigned PReg = G.getNodeMetadata(NId).getAllowedRegs()[AllocOption - 1];
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "VREG " << printReg(VReg, &TRI) << " -> "
|
|
|
|
<< TRI.getName(PReg) << "\n");
|
2014-10-10 02:20:51 +08:00
|
|
|
assert(PReg != 0 && "Invalid preg selected.");
|
|
|
|
VRM.assignVirt2Phys(VReg, PReg);
|
|
|
|
} else {
|
2015-02-03 14:14:06 +08:00
|
|
|
// Spill VReg. If this introduces new intervals we'll need another round
|
|
|
|
// of allocation.
|
|
|
|
SmallVector<unsigned, 8> NewVRegs;
|
|
|
|
spillVReg(VReg, NewVRegs, MF, LIS, VRM, VRegSpiller);
|
|
|
|
AnotherRoundNeeded |= !NewVRegs.empty();
|
2008-10-03 02:29:27 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
return !AnotherRoundNeeded;
|
2008-10-03 02:29:27 +08:00
|
|
|
}
|
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
void RegAllocPBQP::finalizeAlloc(MachineFunction &MF,
|
|
|
|
LiveIntervals &LIS,
|
|
|
|
VirtRegMap &VRM) const {
|
|
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
|
|
|
2008-11-16 20:12:54 +08:00
|
|
|
// First allocate registers for the empty intervals.
|
2010-09-18 17:07:10 +08:00
|
|
|
for (RegSet::const_iterator
|
2014-10-10 02:20:51 +08:00
|
|
|
I = EmptyIntervalVRegs.begin(), E = EmptyIntervalVRegs.end();
|
|
|
|
I != E; ++I) {
|
|
|
|
LiveInterval &LI = LIS.getInterval(*I);
|
2008-11-16 20:12:54 +08:00
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
unsigned PReg = MRI.getSimpleHint(LI.reg);
|
2009-08-07 07:32:48 +08:00
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
if (PReg == 0) {
|
|
|
|
const TargetRegisterClass &RC = *MRI.getRegClass(LI.reg);
|
2017-06-09 05:30:54 +08:00
|
|
|
const ArrayRef<MCPhysReg> RawPRegOrder = RC.getRawAllocationOrder(MF);
|
|
|
|
for (unsigned CandidateReg : RawPRegOrder) {
|
|
|
|
if (!VRM.getRegInfo().isReserved(CandidateReg)) {
|
|
|
|
PReg = CandidateReg;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert(PReg &&
|
|
|
|
"No un-reserved physical registers in this register class");
|
2008-11-16 20:12:54 +08:00
|
|
|
}
|
2009-01-08 23:50:22 +08:00
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
VRM.assignVirt2Phys(LI.reg, PReg);
|
2008-11-16 20:12:54 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-13 11:08:27 +08:00
|
|
|
void RegAllocPBQP::postOptimization(Spiller &VRegSpiller, LiveIntervals &LIS) {
|
|
|
|
VRegSpiller.postOptimization();
|
|
|
|
/// Remove dead defs because of rematerialization.
|
|
|
|
for (auto DeadInst : DeadRemats) {
|
|
|
|
LIS.RemoveMachineInstrFromMaps(*DeadInst);
|
|
|
|
DeadInst->eraseFromParent();
|
|
|
|
}
|
|
|
|
DeadRemats.clear();
|
|
|
|
}
|
|
|
|
|
[PBQP] Tweak spill costs and coalescing benefits
This patch improves how the different costs (register, interference, spill
and coalescing) relates together. The assumption is now that:
- coalescing (or any other "side effect" of reg alloc) is negative, and
instead of being derived from a spill cost, they use the block
frequency info.
- spill costs are in the [MinSpillCost:+inf( range
- register or interference costs are in [0.0:MinSpillCost( or +inf
The current MinSpillCost is set to 10.0, which is a random value high
enough that the current constraint builders do not need to worry about
when settings costs. It would however be worth adding a normalization
step for register and interference costs as the last step in the
constraint builder chain to ensure they are not greater than SpillMinCost
(unless this has some sense for some architectures). This would work well
with the current builder pipeline, where all costs are tweaked relatively
to each others, but could grow above MinSpillCost if the pipeline is
deep enough.
The current heuristic is tuned to depend rather on the number of uses of
a live interval rather than a density of uses, as used by the greedy
allocator. This heuristic provides a few percent improvement on a number
of benchmarks (eembc, spec, ...) and will definitely need to change once
spill placement is implemented: the current spill placement is really
ineficient, so making the cost proportionnal to the number of use is a
clear win.
llvm-svn: 221292
2014-11-05 04:51:24 +08:00
|
|
|
static inline float normalizePBQPSpillWeight(float UseDefFreq, unsigned Size,
|
|
|
|
unsigned NumInstr) {
|
|
|
|
// All intervals have a spill weight that is mostly proportional to the number
|
|
|
|
// of uses, with uses in loops having a bigger weight.
|
|
|
|
return NumInstr * normalizeSpillWeight(UseDefFreq, Size, 1);
|
|
|
|
}
|
|
|
|
|
2010-09-18 17:07:10 +08:00
|
|
|
bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) {
|
2014-10-10 02:20:51 +08:00
|
|
|
LiveIntervals &LIS = getAnalysis<LiveIntervals>();
|
|
|
|
MachineBlockFrequencyInfo &MBFI =
|
|
|
|
getAnalysis<MachineBlockFrequencyInfo>();
|
2008-11-16 20:12:54 +08:00
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
VirtRegMap &VRM = getAnalysis<VirtRegMap>();
|
2013-11-11 01:46:31 +08:00
|
|
|
|
2015-08-10 19:59:44 +08:00
|
|
|
calculateSpillWeightsAndHints(LIS, MF, &VRM, getAnalysis<MachineLoopInfo>(),
|
|
|
|
MBFI, normalizePBQPSpillWeight);
|
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
std::unique_ptr<Spiller> VRegSpiller(createInlineSpiller(*this, MF, VRM));
|
2008-10-03 02:29:27 +08:00
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
MF.getRegInfo().freezeReservedRegs(MF);
|
2012-11-28 08:21:29 +08:00
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "PBQP Register Allocating for " << MF.getName() << "\n");
|
2008-11-16 20:12:54 +08:00
|
|
|
|
2008-10-03 02:29:27 +08:00
|
|
|
// Allocator main loop:
|
2009-01-08 23:50:22 +08:00
|
|
|
//
|
2008-10-03 02:29:27 +08:00
|
|
|
// * Map current regalloc problem to a PBQP problem
|
|
|
|
// * Solve the PBQP problem
|
|
|
|
// * Map the solution back to a register allocation
|
|
|
|
// * Spill if necessary
|
2009-01-08 23:50:22 +08:00
|
|
|
//
|
2008-10-03 02:29:27 +08:00
|
|
|
// This process is continued till no more spills are generated.
|
|
|
|
|
2008-11-16 20:12:54 +08:00
|
|
|
// Find the vreg intervals in need of allocation.
|
2014-10-10 02:20:51 +08:00
|
|
|
findVRegIntervalsToAlloc(MF, LIS);
|
2009-01-08 23:50:22 +08:00
|
|
|
|
2012-08-22 14:07:19 +08:00
|
|
|
#ifndef NDEBUG
|
2017-12-16 06:22:58 +08:00
|
|
|
const Function &F = MF.getFunction();
|
2014-10-10 02:20:51 +08:00
|
|
|
std::string FullyQualifiedName =
|
|
|
|
F.getParent()->getModuleIdentifier() + "." + F.getName().str();
|
2012-08-22 14:07:19 +08:00
|
|
|
#endif
|
2012-03-27 07:07:23 +08:00
|
|
|
|
2008-11-16 20:12:54 +08:00
|
|
|
// If there are non-empty intervals allocate them using pbqp.
|
2014-10-10 02:20:51 +08:00
|
|
|
if (!VRegsToAlloc.empty()) {
|
2015-01-27 16:27:06 +08:00
|
|
|
const TargetSubtargetInfo &Subtarget = MF.getSubtarget();
|
2014-10-10 02:20:51 +08:00
|
|
|
std::unique_ptr<PBQPRAConstraintList> ConstraintsRoot =
|
2019-08-15 23:54:37 +08:00
|
|
|
std::make_unique<PBQPRAConstraintList>();
|
|
|
|
ConstraintsRoot->addConstraint(std::make_unique<SpillCosts>());
|
|
|
|
ConstraintsRoot->addConstraint(std::make_unique<Interference>());
|
2014-10-10 02:20:51 +08:00
|
|
|
if (PBQPCoalescing)
|
2019-08-15 23:54:37 +08:00
|
|
|
ConstraintsRoot->addConstraint(std::make_unique<Coalescing>());
|
2014-10-10 02:20:51 +08:00
|
|
|
ConstraintsRoot->addConstraint(Subtarget.getCustomPBQPConstraints());
|
2008-11-16 20:12:54 +08:00
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
bool PBQPAllocComplete = false;
|
|
|
|
unsigned Round = 0;
|
2009-08-07 07:32:48 +08:00
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
while (!PBQPAllocComplete) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << " PBQP Regalloc round " << Round << ":\n");
|
2014-10-10 02:20:51 +08:00
|
|
|
|
|
|
|
PBQPRAGraph G(PBQPRAGraph::GraphMetadata(MF, LIS, MBFI));
|
2015-02-03 14:14:06 +08:00
|
|
|
initializeGraph(G, VRM, *VRegSpiller);
|
2014-10-10 02:20:51 +08:00
|
|
|
ConstraintsRoot->apply(G);
|
2012-03-27 07:07:23 +08:00
|
|
|
|
|
|
|
#ifndef NDEBUG
|
2014-10-10 02:20:51 +08:00
|
|
|
if (PBQPDumpGraphs) {
|
|
|
|
std::ostringstream RS;
|
|
|
|
RS << Round;
|
|
|
|
std::string GraphFileName = FullyQualifiedName + "." + RS.str() +
|
|
|
|
".pbqpgraph";
|
2014-08-26 02:16:47 +08:00
|
|
|
std::error_code EC;
|
2019-08-05 13:43:48 +08:00
|
|
|
raw_fd_ostream OS(GraphFileName, EC, sys::fs::OF_Text);
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Dumping graph for round " << Round << " to \""
|
|
|
|
<< GraphFileName << "\"\n");
|
2015-02-04 07:40:24 +08:00
|
|
|
G.dump(OS);
|
2012-03-27 07:07:23 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
PBQP::Solution Solution = PBQP::RegAlloc::solve(G);
|
|
|
|
PBQPAllocComplete = mapPBQPToRegAlloc(G, Solution, VRM, *VRegSpiller);
|
|
|
|
++Round;
|
2008-11-16 20:12:54 +08:00
|
|
|
}
|
2008-10-03 02:29:27 +08:00
|
|
|
}
|
|
|
|
|
2008-11-16 20:12:54 +08:00
|
|
|
// Finalise allocation, allocate empty ranges.
|
2014-10-10 02:20:51 +08:00
|
|
|
finalizeAlloc(MF, LIS, VRM);
|
2016-04-13 11:08:27 +08:00
|
|
|
postOptimization(*VRegSpiller, LIS);
|
2014-10-10 02:20:51 +08:00
|
|
|
VRegsToAlloc.clear();
|
|
|
|
EmptyIntervalVRegs.clear();
|
2008-10-03 02:29:27 +08:00
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Post alloc VirtRegMap:\n" << VRM << "\n");
|
2008-11-16 20:12:54 +08:00
|
|
|
|
2009-01-08 23:50:22 +08:00
|
|
|
return true;
|
2008-10-03 02:29:27 +08:00
|
|
|
}
|
|
|
|
|
2015-12-04 09:31:59 +08:00
|
|
|
/// Create Printable object for node and register info.
|
|
|
|
static Printable PrintNodeInfo(PBQP::RegAlloc::PBQPRAGraph::NodeId NId,
|
|
|
|
const PBQP::RegAlloc::PBQPRAGraph &G) {
|
|
|
|
return Printable([NId, &G](raw_ostream &OS) {
|
2015-02-04 07:40:24 +08:00
|
|
|
const MachineRegisterInfo &MRI = G.getMetadata().MF.getRegInfo();
|
|
|
|
const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
|
|
|
|
unsigned VReg = G.getNodeMetadata(NId).getVReg();
|
|
|
|
const char *RegClassName = TRI->getRegClassName(MRI.getRegClass(VReg));
|
2017-11-28 20:42:37 +08:00
|
|
|
OS << NId << " (" << RegClassName << ':' << printReg(VReg, TRI) << ')';
|
2015-12-04 09:31:59 +08:00
|
|
|
});
|
2015-02-04 07:40:24 +08:00
|
|
|
}
|
|
|
|
|
2017-10-15 22:32:27 +08:00
|
|
|
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
2017-01-28 10:02:38 +08:00
|
|
|
LLVM_DUMP_METHOD void PBQP::RegAlloc::PBQPRAGraph::dump(raw_ostream &OS) const {
|
2015-02-04 07:40:24 +08:00
|
|
|
for (auto NId : nodeIds()) {
|
|
|
|
const Vector &Costs = getNodeCosts(NId);
|
|
|
|
assert(Costs.getLength() != 0 && "Empty vector in graph.");
|
|
|
|
OS << PrintNodeInfo(NId, *this) << ": " << Costs << '\n';
|
|
|
|
}
|
|
|
|
OS << '\n';
|
|
|
|
|
|
|
|
for (auto EId : edgeIds()) {
|
|
|
|
NodeId N1Id = getEdgeNode1Id(EId);
|
|
|
|
NodeId N2Id = getEdgeNode2Id(EId);
|
|
|
|
assert(N1Id != N2Id && "PBQP graphs should not have self-edges.");
|
|
|
|
const Matrix &M = getEdgeCosts(EId);
|
|
|
|
assert(M.getRows() != 0 && "No rows in matrix.");
|
|
|
|
assert(M.getCols() != 0 && "No cols in matrix.");
|
|
|
|
OS << PrintNodeInfo(N1Id, *this) << ' ' << M.getRows() << " rows / ";
|
|
|
|
OS << PrintNodeInfo(N2Id, *this) << ' ' << M.getCols() << " cols:\n";
|
|
|
|
OS << M << '\n';
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-28 10:02:38 +08:00
|
|
|
LLVM_DUMP_METHOD void PBQP::RegAlloc::PBQPRAGraph::dump() const {
|
|
|
|
dump(dbgs());
|
|
|
|
}
|
|
|
|
#endif
|
2015-02-04 07:40:24 +08:00
|
|
|
|
|
|
|
void PBQP::RegAlloc::PBQPRAGraph::printDot(raw_ostream &OS) const {
|
|
|
|
OS << "graph {\n";
|
|
|
|
for (auto NId : nodeIds()) {
|
|
|
|
OS << " node" << NId << " [ label=\""
|
|
|
|
<< PrintNodeInfo(NId, *this) << "\\n"
|
|
|
|
<< getNodeCosts(NId) << "\" ]\n";
|
|
|
|
}
|
|
|
|
|
|
|
|
OS << " edge [ len=" << nodeIds().size() << " ]\n";
|
|
|
|
for (auto EId : edgeIds()) {
|
|
|
|
OS << " node" << getEdgeNode1Id(EId)
|
|
|
|
<< " -- node" << getEdgeNode2Id(EId)
|
|
|
|
<< " [ label=\"";
|
|
|
|
const Matrix &EdgeCosts = getEdgeCosts(EId);
|
|
|
|
for (unsigned i = 0; i < EdgeCosts.getRows(); ++i) {
|
|
|
|
OS << EdgeCosts.getRowAsVector(i) << "\\n";
|
|
|
|
}
|
|
|
|
OS << "\" ]\n";
|
|
|
|
}
|
|
|
|
OS << "}\n";
|
|
|
|
}
|
|
|
|
|
2014-10-10 02:20:51 +08:00
|
|
|
FunctionPass *llvm::createPBQPRegisterAllocator(char *customPassID) {
|
|
|
|
return new RegAllocPBQP(customPassID);
|
2008-10-03 02:29:27 +08:00
|
|
|
}
|
|
|
|
|
2010-09-23 12:28:54 +08:00
|
|
|
FunctionPass* llvm::createDefaultPBQPRegisterAllocator() {
|
2014-10-10 02:20:51 +08:00
|
|
|
return createPBQPRegisterAllocator();
|
2010-09-18 17:07:10 +08:00
|
|
|
}
|