2017-12-13 10:51:04 +08:00
|
|
|
//===- LiveIntervals.cpp - Live Interval Analysis -------------------------===//
|
2003-11-20 11:32:25 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2003-11-20 11:32:25 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2017-01-19 08:32:13 +08:00
|
|
|
/// \file This file implements the LiveInterval analysis pass which is used
|
|
|
|
/// by the Linear Scan Register allocator. This pass linearizes the
|
|
|
|
/// basic blocks of the function in DFS order and computes live intervals for
|
|
|
|
/// each virtual and physical register.
|
2003-11-20 11:32:25 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2017-12-13 10:51:04 +08:00
|
|
|
#include "llvm/CodeGen/LiveIntervals.h"
|
2017-05-25 07:10:29 +08:00
|
|
|
#include "llvm/ADT/ArrayRef.h"
|
|
|
|
#include "llvm/ADT/DepthFirstIterator.h"
|
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/ADT/iterator_range.h"
|
2008-07-25 08:02:30 +08:00
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
2017-05-25 07:10:29 +08:00
|
|
|
#include "llvm/CodeGen/LiveInterval.h"
|
2020-03-23 10:08:29 +08:00
|
|
|
#include "llvm/CodeGen/LiveIntervalCalc.h"
|
2003-11-20 11:32:25 +08:00
|
|
|
#include "llvm/CodeGen/LiveVariables.h"
|
2017-05-25 07:10:29 +08:00
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
2013-12-14 08:53:32 +08:00
|
|
|
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
|
2012-06-06 06:02:15 +08:00
|
|
|
#include "llvm/CodeGen/MachineDominators.h"
|
2017-05-25 07:10:29 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
2003-11-20 11:32:25 +08:00
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
2017-05-25 07:10:29 +08:00
|
|
|
#include "llvm/CodeGen/MachineInstrBundle.h"
|
|
|
|
#include "llvm/CodeGen/MachineOperand.h"
|
2007-12-31 12:13:23 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2003-11-20 11:32:25 +08:00
|
|
|
#include "llvm/CodeGen/Passes.h"
|
2017-05-25 07:10:29 +08:00
|
|
|
#include "llvm/CodeGen/SlotIndexes.h"
|
2017-11-17 09:07:10 +08:00
|
|
|
#include "llvm/CodeGen/TargetRegisterInfo.h"
|
|
|
|
#include "llvm/CodeGen/TargetSubtargetInfo.h"
|
2012-11-29 03:13:06 +08:00
|
|
|
#include "llvm/CodeGen/VirtRegMap.h"
|
2018-04-30 22:59:11 +08:00
|
|
|
#include "llvm/Config/llvm-config.h"
|
2017-05-25 07:10:29 +08:00
|
|
|
#include "llvm/MC/LaneBitmask.h"
|
|
|
|
#include "llvm/MC/MCRegisterInfo.h"
|
|
|
|
#include "llvm/Pass.h"
|
2013-06-18 03:00:36 +08:00
|
|
|
#include "llvm/Support/BlockFrequency.h"
|
2012-07-28 04:58:46 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2017-05-25 07:10:29 +08:00
|
|
|
#include "llvm/Support/Compiler.h"
|
2004-09-02 06:55:40 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2017-05-25 07:10:29 +08:00
|
|
|
#include "llvm/Support/MathExtras.h"
|
2009-07-11 21:10:19 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2004-09-04 02:19:51 +08:00
|
|
|
#include <algorithm>
|
2017-05-25 07:10:29 +08:00
|
|
|
#include <cassert>
|
|
|
|
#include <cstdint>
|
|
|
|
#include <iterator>
|
|
|
|
#include <tuple>
|
|
|
|
#include <utility>
|
|
|
|
|
2003-11-20 11:32:25 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2014-04-22 10:02:50 +08:00
|
|
|
#define DEBUG_TYPE "regalloc"
|
|
|
|
|
2007-05-03 09:11:54 +08:00
|
|
|
char LiveIntervals::ID = 0;
|
2012-08-04 06:12:54 +08:00
|
|
|
char &llvm::LiveIntervalsID = LiveIntervals::ID;
|
2010-10-13 03:48:12 +08:00
|
|
|
INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
|
|
|
|
"Live Interval Analysis", false, false)
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
|
2012-02-10 12:10:36 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
|
2010-10-13 03:48:12 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
|
|
|
|
INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
|
2010-10-08 06:25:06 +08:00
|
|
|
"Live Interval Analysis", false, false)
|
2003-11-20 11:32:25 +08:00
|
|
|
|
2013-06-22 02:33:23 +08:00
|
|
|
#ifndef NDEBUG
|
|
|
|
static cl::opt<bool> EnablePrecomputePhysRegs(
|
|
|
|
"precompute-phys-liveness", cl::Hidden,
|
|
|
|
cl::desc("Eagerly compute live intervals for all physreg units."));
|
|
|
|
#else
|
|
|
|
static bool EnablePrecomputePhysRegs = false;
|
|
|
|
#endif // NDEBUG
|
|
|
|
|
2015-02-07 02:42:41 +08:00
|
|
|
namespace llvm {
|
2017-05-25 07:10:29 +08:00
|
|
|
|
2015-02-07 02:42:41 +08:00
|
|
|
cl::opt<bool> UseSegmentSetForPhysRegs(
|
|
|
|
"use-segment-set-for-physregs", cl::Hidden, cl::init(true),
|
|
|
|
cl::desc(
|
|
|
|
"Use segment set for the computation of the live ranges of physregs."));
|
2017-05-25 07:10:29 +08:00
|
|
|
|
|
|
|
} // end namespace llvm
|
2015-02-07 02:42:41 +08:00
|
|
|
|
2006-08-25 06:43:55 +08:00
|
|
|
void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
|
2009-08-01 07:37:33 +08:00
|
|
|
AU.setPreservesCFG();
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
AU.addRequired<AAResultsWrapperPass>();
|
|
|
|
AU.addPreserved<AAResultsWrapperPass>();
|
2010-08-18 05:00:37 +08:00
|
|
|
AU.addPreserved<LiveVariables>();
|
2012-02-14 04:44:42 +08:00
|
|
|
AU.addPreservedID(MachineLoopInfoID);
|
2012-06-21 07:31:34 +08:00
|
|
|
AU.addRequiredTransitiveID(MachineDominatorsID);
|
2008-01-05 04:54:55 +08:00
|
|
|
AU.addPreservedID(MachineDominatorsID);
|
2009-11-04 07:52:08 +08:00
|
|
|
AU.addPreserved<SlotIndexes>();
|
|
|
|
AU.addRequiredTransitive<SlotIndexes>();
|
2004-08-04 17:46:26 +08:00
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
2003-11-20 11:32:25 +08:00
|
|
|
}
|
|
|
|
|
2017-05-25 07:10:29 +08:00
|
|
|
LiveIntervals::LiveIntervals() : MachineFunctionPass(ID) {
|
2012-06-06 06:02:15 +08:00
|
|
|
initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
2020-03-23 10:08:29 +08:00
|
|
|
LiveIntervals::~LiveIntervals() { delete LICalc; }
|
2012-06-06 06:02:15 +08:00
|
|
|
|
2006-08-25 06:43:55 +08:00
|
|
|
void LiveIntervals::releaseMemory() {
|
2008-08-14 05:49:13 +08:00
|
|
|
// Free the live intervals themselves.
|
2012-06-23 04:37:52 +08:00
|
|
|
for (unsigned i = 0, e = VirtRegIntervals.size(); i != e; ++i)
|
2019-08-02 07:27:28 +08:00
|
|
|
delete VirtRegIntervals[Register::index2VirtReg(i)];
|
2012-06-23 04:37:52 +08:00
|
|
|
VirtRegIntervals.clear();
|
2012-02-09 01:33:45 +08:00
|
|
|
RegMaskSlots.clear();
|
|
|
|
RegMaskBits.clear();
|
2012-02-10 09:26:29 +08:00
|
|
|
RegMaskBlocks.clear();
|
2009-07-09 11:57:02 +08:00
|
|
|
|
2017-01-19 08:32:13 +08:00
|
|
|
for (LiveRange *LR : RegUnitRanges)
|
|
|
|
delete LR;
|
2013-10-11 05:29:02 +08:00
|
|
|
RegUnitRanges.clear();
|
2012-06-06 06:02:15 +08:00
|
|
|
|
2010-06-26 19:30:59 +08:00
|
|
|
// Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
|
|
|
|
VNInfoAllocator.Reset();
|
2006-05-11 15:29:24 +08:00
|
|
|
}
|
|
|
|
|
2008-05-29 04:54:50 +08:00
|
|
|
bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
|
2012-06-05 06:39:14 +08:00
|
|
|
MF = &fn;
|
|
|
|
MRI = &MF->getRegInfo();
|
2014-10-14 14:26:53 +08:00
|
|
|
TRI = MF->getSubtarget().getRegisterInfo();
|
|
|
|
TII = MF->getSubtarget().getInstrInfo();
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
|
2012-06-05 06:39:14 +08:00
|
|
|
Indexes = &getAnalysis<SlotIndexes>();
|
2012-06-21 07:31:34 +08:00
|
|
|
DomTree = &getAnalysis<MachineDominatorTree>();
|
2014-12-10 09:12:30 +08:00
|
|
|
|
2020-03-23 10:08:29 +08:00
|
|
|
if (!LICalc)
|
|
|
|
LICalc = new LiveIntervalCalc();
|
2003-11-20 11:32:25 +08:00
|
|
|
|
2012-07-28 04:58:46 +08:00
|
|
|
// Allocate space for all virtual registers.
|
|
|
|
VirtRegIntervals.resize(MRI->getNumVirtRegs());
|
|
|
|
|
2013-02-09 08:04:07 +08:00
|
|
|
computeVirtRegs();
|
|
|
|
computeRegMasks();
|
2012-06-21 07:31:34 +08:00
|
|
|
computeLiveInRegUnits();
|
2012-06-06 06:02:15 +08:00
|
|
|
|
2013-06-22 02:33:23 +08:00
|
|
|
if (EnablePrecomputePhysRegs) {
|
|
|
|
// For stress testing, precompute live ranges of all physical register
|
|
|
|
// units, including reserved registers.
|
|
|
|
for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
|
|
|
|
getRegUnit(i);
|
|
|
|
}
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dump());
|
2004-08-04 17:46:26 +08:00
|
|
|
return true;
|
2003-11-20 11:32:25 +08:00
|
|
|
}
|
|
|
|
|
2009-08-23 14:03:38 +08:00
|
|
|
void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
|
2009-08-23 11:41:05 +08:00
|
|
|
OS << "********** INTERVALS **********\n";
|
2012-02-15 07:46:21 +08:00
|
|
|
|
2012-06-06 06:02:15 +08:00
|
|
|
// Dump the regunits.
|
2017-01-19 08:32:13 +08:00
|
|
|
for (unsigned Unit = 0, UnitE = RegUnitRanges.size(); Unit != UnitE; ++Unit)
|
|
|
|
if (LiveRange *LR = RegUnitRanges[Unit])
|
2017-11-28 20:42:37 +08:00
|
|
|
OS << printRegUnit(Unit, TRI) << ' ' << *LR << '\n';
|
2012-06-06 06:02:15 +08:00
|
|
|
|
2012-02-15 07:46:21 +08:00
|
|
|
// Dump the virtregs.
|
2012-06-23 04:37:52 +08:00
|
|
|
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
|
2019-08-02 07:27:28 +08:00
|
|
|
unsigned Reg = Register::index2VirtReg(i);
|
2012-06-23 04:37:52 +08:00
|
|
|
if (hasInterval(Reg))
|
2013-10-11 05:29:05 +08:00
|
|
|
OS << getInterval(Reg) << '\n';
|
2012-06-23 04:37:52 +08:00
|
|
|
}
|
2004-09-30 23:59:17 +08:00
|
|
|
|
2012-11-10 03:18:49 +08:00
|
|
|
OS << "RegMasks:";
|
2017-01-19 08:32:13 +08:00
|
|
|
for (SlotIndex Idx : RegMaskSlots)
|
|
|
|
OS << ' ' << Idx;
|
2012-11-10 03:18:49 +08:00
|
|
|
OS << '\n';
|
|
|
|
|
2009-09-15 05:33:42 +08:00
|
|
|
printInstrs(OS);
|
|
|
|
}
|
|
|
|
|
|
|
|
void LiveIntervals::printInstrs(raw_ostream &OS) const {
|
2009-08-23 11:41:05 +08:00
|
|
|
OS << "********** MACHINEINSTRS **********\n";
|
2012-06-05 06:39:14 +08:00
|
|
|
MF->print(OS, Indexes);
|
2004-09-30 23:59:17 +08:00
|
|
|
}
|
|
|
|
|
2017-10-15 22:32:27 +08:00
|
|
|
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
2017-01-28 10:02:38 +08:00
|
|
|
LLVM_DUMP_METHOD void LiveIntervals::dumpInstrs() const {
|
2010-01-05 06:49:02 +08:00
|
|
|
printInstrs(dbgs());
|
2009-09-15 05:33:42 +08:00
|
|
|
}
|
2012-09-07 03:06:06 +08:00
|
|
|
#endif
|
2009-09-15 05:33:42 +08:00
|
|
|
|
2008-08-14 05:49:13 +08:00
|
|
|
LiveInterval* LiveIntervals::createInterval(unsigned reg) {
|
2019-08-02 07:27:28 +08:00
|
|
|
float Weight = Register::isPhysicalRegister(reg) ? huge_valf : 0.0F;
|
2008-08-14 05:49:13 +08:00
|
|
|
return new LiveInterval(reg, Weight);
|
2004-04-10 02:07:57 +08:00
|
|
|
}
|
2007-11-12 14:35:08 +08:00
|
|
|
|
2017-01-19 08:32:13 +08:00
|
|
|
/// Compute the live interval of a virtual register, based on defs and uses.
|
2019-10-30 21:21:27 +08:00
|
|
|
bool LiveIntervals::computeVirtRegInterval(LiveInterval &LI) {
|
2020-03-23 10:08:29 +08:00
|
|
|
assert(LICalc && "LICalc not initialized.");
|
2013-10-11 05:28:57 +08:00
|
|
|
assert(LI.empty() && "Should only compute empty intervals.");
|
2020-03-23 10:08:29 +08:00
|
|
|
LICalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
|
|
|
|
LICalc->calculate(LI, MRI->shouldTrackSubRegLiveness(LI.reg));
|
2019-10-30 21:21:27 +08:00
|
|
|
return computeDeadValues(LI, nullptr);
|
2012-07-28 04:58:46 +08:00
|
|
|
}
|
|
|
|
|
2012-07-28 05:56:39 +08:00
|
|
|
void LiveIntervals::computeVirtRegs() {
|
|
|
|
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
|
2019-08-02 07:27:28 +08:00
|
|
|
unsigned Reg = Register::index2VirtReg(i);
|
2012-07-28 05:56:39 +08:00
|
|
|
if (MRI->reg_nodbg_empty(Reg))
|
|
|
|
continue;
|
2019-10-30 21:21:27 +08:00
|
|
|
LiveInterval &LI = createEmptyInterval(Reg);
|
|
|
|
bool NeedSplit = computeVirtRegInterval(LI);
|
|
|
|
if (NeedSplit) {
|
|
|
|
SmallVector<LiveInterval*, 8> SplitLIs;
|
|
|
|
splitSeparateComponents(LI, SplitLIs);
|
|
|
|
}
|
2012-07-28 05:56:39 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void LiveIntervals::computeRegMasks() {
|
|
|
|
RegMaskBlocks.resize(MF->getNumBlockIDs());
|
|
|
|
|
|
|
|
// Find all instructions with regmask operands.
|
2017-01-19 08:32:13 +08:00
|
|
|
for (const MachineBasicBlock &MBB : *MF) {
|
2015-11-06 10:01:02 +08:00
|
|
|
std::pair<unsigned, unsigned> &RMB = RegMaskBlocks[MBB.getNumber()];
|
2012-07-28 05:56:39 +08:00
|
|
|
RMB.first = RegMaskSlots.size();
|
2015-11-07 01:06:38 +08:00
|
|
|
|
|
|
|
// Some block starts, such as EH funclets, create masks.
|
|
|
|
if (const uint32_t *Mask = MBB.getBeginClobberMask(TRI)) {
|
|
|
|
RegMaskSlots.push_back(Indexes->getMBBStartIdx(&MBB));
|
|
|
|
RegMaskBits.push_back(Mask);
|
|
|
|
}
|
|
|
|
|
2017-01-19 08:32:13 +08:00
|
|
|
for (const MachineInstr &MI : MBB) {
|
2015-11-06 10:01:02 +08:00
|
|
|
for (const MachineOperand &MO : MI.operands()) {
|
2015-05-29 10:56:46 +08:00
|
|
|
if (!MO.isRegMask())
|
2012-07-28 05:56:39 +08:00
|
|
|
continue;
|
2016-02-27 14:40:41 +08:00
|
|
|
RegMaskSlots.push_back(Indexes->getInstructionIndex(MI).getRegSlot());
|
2015-11-06 10:01:02 +08:00
|
|
|
RegMaskBits.push_back(MO.getRegMask());
|
2012-07-28 05:56:39 +08:00
|
|
|
}
|
2015-11-06 10:01:02 +08:00
|
|
|
}
|
2015-11-07 01:06:38 +08:00
|
|
|
|
2016-02-27 00:53:19 +08:00
|
|
|
// Some block ends, such as funclet returns, create masks. Put the mask on
|
|
|
|
// the last instruction of the block, because MBB slot index intervals are
|
|
|
|
// half-open.
|
2015-11-07 01:06:38 +08:00
|
|
|
if (const uint32_t *Mask = MBB.getEndClobberMask(TRI)) {
|
2016-02-27 00:53:19 +08:00
|
|
|
assert(!MBB.empty() && "empty return block?");
|
|
|
|
RegMaskSlots.push_back(
|
2016-02-27 14:40:41 +08:00
|
|
|
Indexes->getInstructionIndex(MBB.back()).getRegSlot());
|
2015-11-07 01:06:38 +08:00
|
|
|
RegMaskBits.push_back(Mask);
|
|
|
|
}
|
|
|
|
|
2012-07-28 05:56:39 +08:00
|
|
|
// Compute the number of register mask instructions in this block.
|
2012-09-11 05:26:47 +08:00
|
|
|
RMB.second = RegMaskSlots.size() - RMB.first;
|
2012-07-28 05:56:39 +08:00
|
|
|
}
|
|
|
|
}
|
2012-07-28 04:58:46 +08:00
|
|
|
|
2012-06-06 06:02:15 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Register Unit Liveness
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Fixed interference typically comes from ABI boundaries: Function arguments
|
|
|
|
// and return values are passed in fixed registers, and so are exception
|
|
|
|
// pointers entering landing pads. Certain instructions require values to be
|
|
|
|
// present in specific registers. That is also represented through fixed
|
|
|
|
// interference.
|
|
|
|
//
|
|
|
|
|
2017-01-19 08:32:13 +08:00
|
|
|
/// Compute the live range of a register unit, based on the uses and defs of
|
|
|
|
/// aliasing registers. The range should be empty, or contain only dead
|
|
|
|
/// phi-defs from ABI blocks.
|
2013-10-11 05:29:02 +08:00
|
|
|
void LiveIntervals::computeRegUnitRange(LiveRange &LR, unsigned Unit) {
|
2020-03-23 10:08:29 +08:00
|
|
|
assert(LICalc && "LICalc not initialized.");
|
|
|
|
LICalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
|
2012-06-06 06:02:15 +08:00
|
|
|
|
|
|
|
// The physregs aliasing Unit are the roots and their super-registers.
|
|
|
|
// Create all values as dead defs before extending to uses. Note that roots
|
|
|
|
// may share super-registers. That's OK because createDeadDefs() is
|
|
|
|
// idempotent. It is very rare for a register unit to have multiple roots, so
|
|
|
|
// uniquing super-registers is probably not worthwhile.
|
2017-09-02 02:36:26 +08:00
|
|
|
bool IsReserved = false;
|
2017-01-19 08:32:13 +08:00
|
|
|
for (MCRegUnitRootIterator Root(Unit, TRI); Root.isValid(); ++Root) {
|
2017-09-02 02:36:26 +08:00
|
|
|
bool IsRootReserved = true;
|
2017-01-19 08:32:13 +08:00
|
|
|
for (MCSuperRegIterator Super(*Root, TRI, /*IncludeSelf=*/true);
|
|
|
|
Super.isValid(); ++Super) {
|
|
|
|
unsigned Reg = *Super;
|
|
|
|
if (!MRI->reg_empty(Reg))
|
2020-03-23 10:08:29 +08:00
|
|
|
LICalc->createDeadDefs(LR, Reg);
|
2017-01-24 09:12:58 +08:00
|
|
|
// A register unit is considered reserved if all its roots and all their
|
|
|
|
// super registers are reserved.
|
|
|
|
if (!MRI->isReserved(Reg))
|
2017-09-02 02:36:26 +08:00
|
|
|
IsRootReserved = false;
|
2014-12-16 05:36:35 +08:00
|
|
|
}
|
2017-09-02 02:36:26 +08:00
|
|
|
IsReserved |= IsRootReserved;
|
2014-12-16 05:36:35 +08:00
|
|
|
}
|
2017-09-02 02:36:26 +08:00
|
|
|
assert(IsReserved == MRI->isReservedRegUnit(Unit) &&
|
|
|
|
"reserved computation mismatch");
|
2014-12-16 05:36:35 +08:00
|
|
|
|
|
|
|
// Now extend LR to reach all uses.
|
|
|
|
// Ignore uses of reserved registers. We only track defs of those.
|
2017-01-24 09:12:58 +08:00
|
|
|
if (!IsReserved) {
|
|
|
|
for (MCRegUnitRootIterator Root(Unit, TRI); Root.isValid(); ++Root) {
|
|
|
|
for (MCSuperRegIterator Super(*Root, TRI, /*IncludeSelf=*/true);
|
|
|
|
Super.isValid(); ++Super) {
|
|
|
|
unsigned Reg = *Super;
|
|
|
|
if (!MRI->reg_empty(Reg))
|
2020-03-23 10:08:29 +08:00
|
|
|
LICalc->extendToUses(LR, Reg);
|
2017-01-24 09:12:58 +08:00
|
|
|
}
|
2012-06-06 06:02:15 +08:00
|
|
|
}
|
|
|
|
}
|
2015-02-07 02:42:41 +08:00
|
|
|
|
|
|
|
// Flush the segment set to the segment vector.
|
|
|
|
if (UseSegmentSetForPhysRegs)
|
|
|
|
LR.flushSegmentSet();
|
2012-06-06 06:02:15 +08:00
|
|
|
}
|
|
|
|
|
2017-01-19 08:32:13 +08:00
|
|
|
/// Precompute the live ranges of any register units that are live-in to an ABI
|
|
|
|
/// block somewhere. Register values can appear without a corresponding def when
|
|
|
|
/// entering the entry block or a landing pad.
|
2012-06-06 06:02:15 +08:00
|
|
|
void LiveIntervals::computeLiveInRegUnits() {
|
2013-10-11 05:29:02 +08:00
|
|
|
RegUnitRanges.resize(TRI->getNumRegUnits());
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Computing live-in reg-units in ABI blocks.\n");
|
2012-06-06 06:02:15 +08:00
|
|
|
|
2013-10-11 05:29:02 +08:00
|
|
|
// Keep track of the live range sets allocated.
|
|
|
|
SmallVector<unsigned, 8> NewRanges;
|
2012-06-06 06:02:15 +08:00
|
|
|
|
|
|
|
// Check all basic blocks for live-ins.
|
2017-01-19 08:32:13 +08:00
|
|
|
for (const MachineBasicBlock &MBB : *MF) {
|
2012-06-06 06:02:15 +08:00
|
|
|
// We only care about ABI blocks: Entry + landing pads.
|
2017-01-19 08:32:13 +08:00
|
|
|
if ((&MBB != &MF->front() && !MBB.isEHPad()) || MBB.livein_empty())
|
2012-06-06 06:02:15 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// Create phi-defs at Begin for all live-in registers.
|
2017-01-19 08:32:13 +08:00
|
|
|
SlotIndex Begin = Indexes->getMBBStartIdx(&MBB);
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << Begin << "\t" << printMBBReference(MBB));
|
2017-01-19 08:32:13 +08:00
|
|
|
for (const auto &LI : MBB.liveins()) {
|
2015-09-10 02:08:03 +08:00
|
|
|
for (MCRegUnitIterator Units(LI.PhysReg, TRI); Units.isValid(); ++Units) {
|
2012-06-06 06:02:15 +08:00
|
|
|
unsigned Unit = *Units;
|
2013-10-11 05:29:02 +08:00
|
|
|
LiveRange *LR = RegUnitRanges[Unit];
|
|
|
|
if (!LR) {
|
2015-02-07 02:42:41 +08:00
|
|
|
// Use segment set to speed-up initial computation of the live range.
|
|
|
|
LR = RegUnitRanges[Unit] = new LiveRange(UseSegmentSetForPhysRegs);
|
2013-10-11 05:29:02 +08:00
|
|
|
NewRanges.push_back(Unit);
|
2012-06-06 06:02:15 +08:00
|
|
|
}
|
2013-10-11 05:29:02 +08:00
|
|
|
VNInfo *VNI = LR->createDeadDef(Begin, getVNInfoAllocator());
|
2012-06-06 07:00:03 +08:00
|
|
|
(void)VNI;
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << ' ' << printRegUnit(Unit, TRI) << '#' << VNI->id);
|
2012-06-06 06:02:15 +08:00
|
|
|
}
|
|
|
|
}
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << '\n');
|
2012-06-06 06:02:15 +08:00
|
|
|
}
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Created " << NewRanges.size() << " new intervals.\n");
|
2012-06-06 06:02:15 +08:00
|
|
|
|
2013-10-11 05:29:02 +08:00
|
|
|
// Compute the 'normal' part of the ranges.
|
2017-01-19 08:32:13 +08:00
|
|
|
for (unsigned Unit : NewRanges)
|
2013-10-11 05:29:02 +08:00
|
|
|
computeRegUnitRange(*RegUnitRanges[Unit], Unit);
|
2012-06-06 06:02:15 +08:00
|
|
|
}
|
|
|
|
|
2014-12-10 09:12:18 +08:00
|
|
|
static void createSegmentsForValues(LiveRange &LR,
|
2017-01-19 08:32:13 +08:00
|
|
|
iterator_range<LiveInterval::vni_iterator> VNIs) {
|
|
|
|
for (VNInfo *VNI : VNIs) {
|
2014-12-10 09:12:18 +08:00
|
|
|
if (VNI->isUnused())
|
|
|
|
continue;
|
|
|
|
SlotIndex Def = VNI->def;
|
|
|
|
LR.addSegment(LiveRange::Segment(Def, Def.getDeadSlot(), VNI));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-26 22:37:16 +08:00
|
|
|
void LiveIntervals::extendSegmentsToUses(LiveRange &Segments,
|
|
|
|
ShrinkToUsesWorkList &WorkList,
|
|
|
|
unsigned Reg, LaneBitmask LaneMask) {
|
2014-12-10 09:12:18 +08:00
|
|
|
// Keep track of the PHIs that are in use.
|
|
|
|
SmallPtrSet<VNInfo*, 8> UsedPHIs;
|
|
|
|
// Blocks that have already been added to WorkList as live-out.
|
2017-01-19 08:32:13 +08:00
|
|
|
SmallPtrSet<const MachineBasicBlock*, 16> LiveOut;
|
2014-12-10 09:12:18 +08:00
|
|
|
|
2018-06-26 22:37:16 +08:00
|
|
|
auto getSubRange = [](const LiveInterval &I, LaneBitmask M)
|
|
|
|
-> const LiveRange& {
|
|
|
|
if (M.none())
|
|
|
|
return I;
|
|
|
|
for (const LiveInterval::SubRange &SR : I.subranges()) {
|
|
|
|
if ((SR.LaneMask & M).any()) {
|
|
|
|
assert(SR.LaneMask == M && "Expecting lane masks to match exactly");
|
|
|
|
return SR;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
llvm_unreachable("Subrange for mask not found");
|
|
|
|
};
|
|
|
|
|
|
|
|
const LiveInterval &LI = getInterval(Reg);
|
|
|
|
const LiveRange &OldRange = getSubRange(LI, LaneMask);
|
|
|
|
|
2014-12-10 09:12:18 +08:00
|
|
|
// Extend intervals to reach all uses in WorkList.
|
|
|
|
while (!WorkList.empty()) {
|
|
|
|
SlotIndex Idx = WorkList.back().first;
|
|
|
|
VNInfo *VNI = WorkList.back().second;
|
|
|
|
WorkList.pop_back();
|
2018-06-26 22:37:16 +08:00
|
|
|
const MachineBasicBlock *MBB = Indexes->getMBBFromIndex(Idx.getPrevSlot());
|
|
|
|
SlotIndex BlockStart = Indexes->getMBBStartIdx(MBB);
|
2014-12-10 09:12:18 +08:00
|
|
|
|
|
|
|
// Extend the live range for VNI to be live at Idx.
|
2018-06-26 22:37:16 +08:00
|
|
|
if (VNInfo *ExtVNI = Segments.extendInBlock(BlockStart, Idx)) {
|
2014-12-10 09:12:18 +08:00
|
|
|
assert(ExtVNI == VNI && "Unexpected existing value number");
|
|
|
|
(void)ExtVNI;
|
|
|
|
// Is this a PHIDef we haven't seen before?
|
|
|
|
if (!VNI->isPHIDef() || VNI->def != BlockStart ||
|
|
|
|
!UsedPHIs.insert(VNI).second)
|
|
|
|
continue;
|
|
|
|
// The PHI is live, make sure the predecessors are live-out.
|
2017-01-19 08:32:13 +08:00
|
|
|
for (const MachineBasicBlock *Pred : MBB->predecessors()) {
|
2014-12-10 09:12:18 +08:00
|
|
|
if (!LiveOut.insert(Pred).second)
|
|
|
|
continue;
|
2018-06-26 22:37:16 +08:00
|
|
|
SlotIndex Stop = Indexes->getMBBEndIdx(Pred);
|
2014-12-10 09:12:18 +08:00
|
|
|
// A predecessor is not required to have a live-out value for a PHI.
|
|
|
|
if (VNInfo *PVNI = OldRange.getVNInfoBefore(Stop))
|
|
|
|
WorkList.push_back(std::make_pair(Stop, PVNI));
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// VNI is live-in to MBB.
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
|
2018-06-26 22:37:16 +08:00
|
|
|
Segments.addSegment(LiveRange::Segment(BlockStart, Idx, VNI));
|
2014-12-10 09:12:18 +08:00
|
|
|
|
|
|
|
// Make sure VNI is live-out from the predecessors.
|
2017-01-19 08:32:13 +08:00
|
|
|
for (const MachineBasicBlock *Pred : MBB->predecessors()) {
|
2014-12-10 09:12:18 +08:00
|
|
|
if (!LiveOut.insert(Pred).second)
|
|
|
|
continue;
|
2018-06-26 22:37:16 +08:00
|
|
|
SlotIndex Stop = Indexes->getMBBEndIdx(Pred);
|
|
|
|
if (VNInfo *OldVNI = OldRange.getVNInfoBefore(Stop)) {
|
|
|
|
assert(OldVNI == VNI && "Wrong value out of predecessor");
|
2018-06-26 22:55:04 +08:00
|
|
|
(void)OldVNI;
|
2018-06-26 22:37:16 +08:00
|
|
|
WorkList.push_back(std::make_pair(Stop, VNI));
|
|
|
|
} else {
|
|
|
|
#ifndef NDEBUG
|
|
|
|
// There was no old VNI. Verify that Stop is jointly dominated
|
|
|
|
// by <undef>s for this live range.
|
|
|
|
assert(LaneMask.any() &&
|
|
|
|
"Missing value out of predecessor for main range");
|
|
|
|
SmallVector<SlotIndex,8> Undefs;
|
|
|
|
LI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
|
|
|
|
assert(LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes) &&
|
|
|
|
"Missing value out of predecessor for subrange");
|
|
|
|
#endif
|
|
|
|
}
|
2014-12-10 09:12:18 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-18 04:37:07 +08:00
|
|
|
bool LiveIntervals::shrinkToUses(LiveInterval *li,
|
2011-03-08 07:29:10 +08:00
|
|
|
SmallVectorImpl<MachineInstr*> *dead) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Shrink: " << *li << '\n');
|
2019-08-02 07:27:28 +08:00
|
|
|
assert(Register::isVirtualRegister(li->reg) &&
|
|
|
|
"Can only shrink virtual registers");
|
2011-02-08 08:03:05 +08:00
|
|
|
|
2014-12-10 09:12:18 +08:00
|
|
|
// Shrink subregister live ranges.
|
2015-07-17 02:55:35 +08:00
|
|
|
bool NeedsCleanup = false;
|
2014-12-11 08:59:06 +08:00
|
|
|
for (LiveInterval::SubRange &S : li->subranges()) {
|
|
|
|
shrinkToUses(S, li->reg);
|
2015-07-17 02:55:35 +08:00
|
|
|
if (S.empty())
|
|
|
|
NeedsCleanup = true;
|
2014-12-10 09:12:18 +08:00
|
|
|
}
|
2015-07-17 02:55:35 +08:00
|
|
|
if (NeedsCleanup)
|
|
|
|
li->removeEmptySubRanges();
|
2014-12-10 09:12:18 +08:00
|
|
|
|
|
|
|
// Find all the values used, including PHI kills.
|
|
|
|
ShrinkToUsesWorkList WorkList;
|
2011-09-15 23:24:16 +08:00
|
|
|
|
2011-02-08 08:03:05 +08:00
|
|
|
// Visit all instructions reading li->reg.
|
2017-01-19 08:32:13 +08:00
|
|
|
unsigned Reg = li->reg;
|
|
|
|
for (MachineInstr &UseMI : MRI->reg_instructions(Reg)) {
|
|
|
|
if (UseMI.isDebugValue() || !UseMI.readsVirtualRegister(Reg))
|
2011-02-08 08:03:05 +08:00
|
|
|
continue;
|
2017-01-19 08:32:13 +08:00
|
|
|
SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot();
|
2013-10-11 05:28:52 +08:00
|
|
|
LiveQueryResult LRQ = li->Query(Idx);
|
2012-05-20 10:54:52 +08:00
|
|
|
VNInfo *VNI = LRQ.valueIn();
|
2011-03-18 11:06:04 +08:00
|
|
|
if (!VNI) {
|
|
|
|
// This shouldn't happen: readsVirtualRegister returns true, but there is
|
|
|
|
// no live value. It is likely caused by a target getting <undef> flags
|
|
|
|
// wrong.
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(
|
|
|
|
dbgs() << Idx << '\t' << UseMI
|
|
|
|
<< "Warning: Instr claims to read non-existent value in "
|
|
|
|
<< *li << '\n');
|
2011-03-18 11:06:04 +08:00
|
|
|
continue;
|
|
|
|
}
|
2011-11-15 02:45:38 +08:00
|
|
|
// Special case: An early-clobber tied operand reads and writes the
|
2012-05-20 10:54:52 +08:00
|
|
|
// register one slot early.
|
|
|
|
if (VNInfo *DefVNI = LRQ.valueDefined())
|
|
|
|
Idx = DefVNI->def;
|
|
|
|
|
2011-02-08 08:03:05 +08:00
|
|
|
WorkList.push_back(std::make_pair(Idx, VNI));
|
|
|
|
}
|
|
|
|
|
2013-10-11 05:28:47 +08:00
|
|
|
// Create new live ranges with only minimal live segments per def.
|
|
|
|
LiveRange NewLR;
|
2014-12-10 09:12:18 +08:00
|
|
|
createSegmentsForValues(NewLR, make_range(li->vni_begin(), li->vni_end()));
|
2018-06-26 22:37:16 +08:00
|
|
|
extendSegmentsToUses(NewLR, WorkList, Reg, LaneBitmask::getNone());
|
2011-02-08 08:03:05 +08:00
|
|
|
|
2014-06-04 06:42:10 +08:00
|
|
|
// Move the trimmed segments back.
|
|
|
|
li->segments.swap(NewLR.segments);
|
2014-12-19 03:58:52 +08:00
|
|
|
|
|
|
|
// Handle dead values.
|
|
|
|
bool CanSeparate = computeDeadValues(*li, dead);
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Shrunk: " << *li << '\n');
|
2014-06-04 06:42:10 +08:00
|
|
|
return CanSeparate;
|
|
|
|
}
|
|
|
|
|
2014-12-19 03:58:52 +08:00
|
|
|
bool LiveIntervals::computeDeadValues(LiveInterval &LI,
|
2014-06-04 06:42:10 +08:00
|
|
|
SmallVectorImpl<MachineInstr*> *dead) {
|
2015-09-23 06:37:44 +08:00
|
|
|
bool MayHaveSplitComponents = false;
|
2019-10-30 21:21:27 +08:00
|
|
|
bool HaveDeadDef = false;
|
|
|
|
|
2017-01-19 08:32:13 +08:00
|
|
|
for (VNInfo *VNI : LI.valnos) {
|
2011-02-08 08:03:05 +08:00
|
|
|
if (VNI->isUnused())
|
|
|
|
continue;
|
2015-01-22 06:55:13 +08:00
|
|
|
SlotIndex Def = VNI->def;
|
|
|
|
LiveRange::iterator I = LI.FindSegmentContaining(Def);
|
2014-12-19 03:58:52 +08:00
|
|
|
assert(I != LI.end() && "Missing segment for VNI");
|
2015-01-22 06:55:13 +08:00
|
|
|
|
|
|
|
// Is the register live before? Otherwise we may have to add a read-undef
|
|
|
|
// flag for subregister defs.
|
2015-09-23 06:37:44 +08:00
|
|
|
unsigned VReg = LI.reg;
|
|
|
|
if (MRI->shouldTrackSubRegLiveness(VReg)) {
|
2015-01-22 06:55:13 +08:00
|
|
|
if ((I == LI.begin() || std::prev(I)->end < Def) && !VNI->isPHIDef()) {
|
|
|
|
MachineInstr *MI = getInstructionFromIndex(Def);
|
2015-11-11 08:41:58 +08:00
|
|
|
MI->setRegisterDefReadUndef(VReg);
|
2015-01-22 06:55:13 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (I->end != Def.getDeadSlot())
|
2011-02-08 08:03:05 +08:00
|
|
|
continue;
|
2011-03-02 08:33:01 +08:00
|
|
|
if (VNI->isPHIDef()) {
|
2011-02-08 08:03:05 +08:00
|
|
|
// This is a dead PHI. Remove it.
|
2012-08-04 04:59:32 +08:00
|
|
|
VNI->markUnused();
|
2014-12-19 03:58:52 +08:00
|
|
|
LI.removeSegment(I);
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Dead PHI at " << Def << " may separate interval\n");
|
2015-09-23 06:37:44 +08:00
|
|
|
MayHaveSplitComponents = true;
|
2014-12-19 03:58:52 +08:00
|
|
|
} else {
|
2011-02-08 08:03:05 +08:00
|
|
|
// This is a dead def. Make sure the instruction knows.
|
2015-01-22 06:55:13 +08:00
|
|
|
MachineInstr *MI = getInstructionFromIndex(Def);
|
2011-02-08 08:03:05 +08:00
|
|
|
assert(MI && "No instruction defining live value");
|
2016-04-29 04:35:26 +08:00
|
|
|
MI->addRegisterDead(LI.reg, TRI);
|
2019-10-30 21:21:27 +08:00
|
|
|
if (HaveDeadDef)
|
|
|
|
MayHaveSplitComponents = true;
|
|
|
|
HaveDeadDef = true;
|
|
|
|
|
2011-03-08 07:29:10 +08:00
|
|
|
if (dead && MI->allDefsAreDead()) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "All defs dead: " << Def << '\t' << *MI);
|
2011-03-08 07:29:10 +08:00
|
|
|
dead->push_back(MI);
|
|
|
|
}
|
2011-02-08 08:03:05 +08:00
|
|
|
}
|
|
|
|
}
|
2015-09-23 06:37:44 +08:00
|
|
|
return MayHaveSplitComponents;
|
2014-12-10 09:12:18 +08:00
|
|
|
}
|
|
|
|
|
2016-08-24 21:37:55 +08:00
|
|
|
void LiveIntervals::shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Shrink: " << SR << '\n');
|
2019-08-02 07:27:28 +08:00
|
|
|
assert(Register::isVirtualRegister(Reg) &&
|
|
|
|
"Can only shrink virtual registers");
|
2014-12-10 09:12:18 +08:00
|
|
|
// Find all the values used, including PHI kills.
|
|
|
|
ShrinkToUsesWorkList WorkList;
|
|
|
|
|
|
|
|
// Visit all instructions reading Reg.
|
|
|
|
SlotIndex LastIdx;
|
2016-09-03 03:48:55 +08:00
|
|
|
for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
|
|
|
|
// Skip "undef" uses.
|
|
|
|
if (!MO.readsReg())
|
2014-12-10 09:12:18 +08:00
|
|
|
continue;
|
|
|
|
// Maybe the operand is for a subregister we don't care about.
|
|
|
|
unsigned SubReg = MO.getSubReg();
|
|
|
|
if (SubReg != 0) {
|
2015-09-26 05:51:14 +08:00
|
|
|
LaneBitmask LaneMask = TRI->getSubRegIndexLaneMask(SubReg);
|
2016-12-15 22:36:06 +08:00
|
|
|
if ((LaneMask & SR.LaneMask).none())
|
2014-12-10 09:12:18 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// We only need to visit each instruction once.
|
2016-09-03 03:48:55 +08:00
|
|
|
MachineInstr *UseMI = MO.getParent();
|
2016-02-27 14:40:41 +08:00
|
|
|
SlotIndex Idx = getInstructionIndex(*UseMI).getRegSlot();
|
2014-12-10 09:12:18 +08:00
|
|
|
if (Idx == LastIdx)
|
|
|
|
continue;
|
|
|
|
LastIdx = Idx;
|
|
|
|
|
|
|
|
LiveQueryResult LRQ = SR.Query(Idx);
|
|
|
|
VNInfo *VNI = LRQ.valueIn();
|
|
|
|
// For Subranges it is possible that only undef values are left in that
|
|
|
|
// part of the subregister, so there is no real liverange at the use
|
|
|
|
if (!VNI)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Special case: An early-clobber tied operand reads and writes the
|
|
|
|
// register one slot early.
|
|
|
|
if (VNInfo *DefVNI = LRQ.valueDefined())
|
|
|
|
Idx = DefVNI->def;
|
|
|
|
|
|
|
|
WorkList.push_back(std::make_pair(Idx, VNI));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a new live ranges with only minimal live segments per def.
|
|
|
|
LiveRange NewLR;
|
|
|
|
createSegmentsForValues(NewLR, make_range(SR.vni_begin(), SR.vni_end()));
|
2018-06-26 22:37:16 +08:00
|
|
|
extendSegmentsToUses(NewLR, WorkList, Reg, SR.LaneMask);
|
2014-12-10 09:12:18 +08:00
|
|
|
|
|
|
|
// Move the trimmed ranges back.
|
|
|
|
SR.segments.swap(NewLR.segments);
|
2014-12-19 03:58:52 +08:00
|
|
|
|
|
|
|
// Remove dead PHI value numbers
|
2017-01-19 08:32:13 +08:00
|
|
|
for (VNInfo *VNI : SR.valnos) {
|
2014-12-19 03:58:52 +08:00
|
|
|
if (VNI->isUnused())
|
|
|
|
continue;
|
|
|
|
const LiveRange::Segment *Segment = SR.getSegmentContaining(VNI->def);
|
|
|
|
assert(Segment != nullptr && "Missing segment for VNI");
|
|
|
|
if (Segment->end != VNI->def.getDeadSlot())
|
|
|
|
continue;
|
|
|
|
if (VNI->isPHIDef()) {
|
|
|
|
// This is a dead PHI. Remove it.
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Dead PHI at " << VNI->def
|
|
|
|
<< " may separate interval\n");
|
2014-12-19 03:58:52 +08:00
|
|
|
VNI->markUnused();
|
|
|
|
SR.removeSegment(*Segment);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Shrunk: " << SR << '\n');
|
2011-02-08 08:03:05 +08:00
|
|
|
}
|
|
|
|
|
2013-10-11 05:28:57 +08:00
|
|
|
void LiveIntervals::extendToIndices(LiveRange &LR,
|
2016-09-01 20:10:36 +08:00
|
|
|
ArrayRef<SlotIndex> Indices,
|
|
|
|
ArrayRef<SlotIndex> Undefs) {
|
2020-03-23 10:08:29 +08:00
|
|
|
assert(LICalc && "LICalc not initialized.");
|
|
|
|
LICalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
|
2017-01-19 08:32:13 +08:00
|
|
|
for (SlotIndex Idx : Indices)
|
2020-03-23 10:08:29 +08:00
|
|
|
LICalc->extend(LR, Idx, /*PhysReg=*/0, Undefs);
|
2012-09-18 07:03:25 +08:00
|
|
|
}
|
|
|
|
|
2014-12-10 09:12:36 +08:00
|
|
|
void LiveIntervals::pruneValue(LiveRange &LR, SlotIndex Kill,
|
2012-09-18 07:03:25 +08:00
|
|
|
SmallVectorImpl<SlotIndex> *EndPoints) {
|
2014-12-10 09:12:36 +08:00
|
|
|
LiveQueryResult LRQ = LR.Query(Kill);
|
|
|
|
VNInfo *VNI = LRQ.valueOutOrDead();
|
2012-09-18 07:03:25 +08:00
|
|
|
if (!VNI)
|
|
|
|
return;
|
|
|
|
|
|
|
|
MachineBasicBlock *KillMBB = Indexes->getMBBFromIndex(Kill);
|
2014-12-10 09:12:36 +08:00
|
|
|
SlotIndex MBBEnd = Indexes->getMBBEndIdx(KillMBB);
|
2012-09-18 07:03:25 +08:00
|
|
|
|
|
|
|
// If VNI isn't live out from KillMBB, the value is trivially pruned.
|
|
|
|
if (LRQ.endPoint() < MBBEnd) {
|
2014-12-10 09:12:36 +08:00
|
|
|
LR.removeSegment(Kill, LRQ.endPoint());
|
2012-09-18 07:03:25 +08:00
|
|
|
if (EndPoints) EndPoints->push_back(LRQ.endPoint());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// VNI is live out of KillMBB.
|
2014-12-10 09:12:36 +08:00
|
|
|
LR.removeSegment(Kill, MBBEnd);
|
2012-09-18 07:03:25 +08:00
|
|
|
if (EndPoints) EndPoints->push_back(MBBEnd);
|
|
|
|
|
2012-10-14 00:15:31 +08:00
|
|
|
// Find all blocks that are reachable from KillMBB without leaving VNI's live
|
|
|
|
// range. It is possible that KillMBB itself is reachable, so start a DFS
|
|
|
|
// from each successor.
|
2017-05-25 07:10:29 +08:00
|
|
|
using VisitedTy = df_iterator_default_set<MachineBasicBlock*,9>;
|
2012-10-14 00:15:31 +08:00
|
|
|
VisitedTy Visited;
|
2017-01-19 08:32:13 +08:00
|
|
|
for (MachineBasicBlock *Succ : KillMBB->successors()) {
|
2012-10-14 00:15:31 +08:00
|
|
|
for (df_ext_iterator<MachineBasicBlock*, VisitedTy>
|
2017-01-19 08:32:13 +08:00
|
|
|
I = df_ext_begin(Succ, Visited), E = df_ext_end(Succ, Visited);
|
2012-10-14 00:15:31 +08:00
|
|
|
I != E;) {
|
|
|
|
MachineBasicBlock *MBB = *I;
|
|
|
|
|
|
|
|
// Check if VNI is live in to MBB.
|
2014-12-10 09:12:36 +08:00
|
|
|
SlotIndex MBBStart, MBBEnd;
|
2014-03-02 21:30:33 +08:00
|
|
|
std::tie(MBBStart, MBBEnd) = Indexes->getMBBRange(MBB);
|
2014-12-10 09:12:36 +08:00
|
|
|
LiveQueryResult LRQ = LR.Query(MBBStart);
|
2012-10-14 00:15:31 +08:00
|
|
|
if (LRQ.valueIn() != VNI) {
|
2013-10-11 05:28:43 +08:00
|
|
|
// This block isn't part of the VNI segment. Prune the search.
|
2012-10-14 00:15:31 +08:00
|
|
|
I.skipChildren();
|
|
|
|
continue;
|
|
|
|
}
|
2012-09-18 07:03:25 +08:00
|
|
|
|
2012-10-14 00:15:31 +08:00
|
|
|
// Prune the search if VNI is killed in MBB.
|
|
|
|
if (LRQ.endPoint() < MBBEnd) {
|
2014-12-10 09:12:36 +08:00
|
|
|
LR.removeSegment(MBBStart, LRQ.endPoint());
|
2012-10-14 00:15:31 +08:00
|
|
|
if (EndPoints) EndPoints->push_back(LRQ.endPoint());
|
|
|
|
I.skipChildren();
|
|
|
|
continue;
|
|
|
|
}
|
2012-09-18 07:03:25 +08:00
|
|
|
|
2012-10-14 00:15:31 +08:00
|
|
|
// VNI is live through MBB.
|
2014-12-10 09:12:36 +08:00
|
|
|
LR.removeSegment(MBBStart, MBBEnd);
|
2012-10-14 00:15:31 +08:00
|
|
|
if (EndPoints) EndPoints->push_back(MBBEnd);
|
|
|
|
++I;
|
2012-09-18 07:03:25 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-02-08 08:03:05 +08:00
|
|
|
|
2007-11-12 14:35:08 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Register allocator hooks.
|
|
|
|
//
|
|
|
|
|
2012-09-07 02:15:18 +08:00
|
|
|
void LiveIntervals::addKillFlags(const VirtRegMap *VRM) {
|
|
|
|
// Keep track of regunit ranges.
|
2014-12-20 09:54:48 +08:00
|
|
|
SmallVector<std::pair<const LiveRange*, LiveRange::const_iterator>, 8> RU;
|
2014-12-20 09:54:50 +08:00
|
|
|
// Keep track of subregister ranges.
|
|
|
|
SmallVector<std::pair<const LiveInterval::SubRange*,
|
|
|
|
LiveRange::const_iterator>, 4> SRs;
|
2012-09-07 02:15:18 +08:00
|
|
|
|
2012-06-21 07:23:59 +08:00
|
|
|
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
|
2019-08-02 07:27:28 +08:00
|
|
|
unsigned Reg = Register::index2VirtReg(i);
|
2012-06-05 06:39:14 +08:00
|
|
|
if (MRI->reg_nodbg_empty(Reg))
|
2011-02-09 05:13:03 +08:00
|
|
|
continue;
|
2014-12-20 09:54:48 +08:00
|
|
|
const LiveInterval &LI = getInterval(Reg);
|
|
|
|
if (LI.empty())
|
2012-09-07 02:15:18 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// Find the regunit intervals for the assigned register. They may overlap
|
|
|
|
// the virtual register live range, cancelling any kills.
|
|
|
|
RU.clear();
|
2017-01-19 08:32:13 +08:00
|
|
|
for (MCRegUnitIterator Unit(VRM->getPhys(Reg), TRI); Unit.isValid();
|
|
|
|
++Unit) {
|
|
|
|
const LiveRange &RURange = getRegUnit(*Unit);
|
2014-12-20 09:54:48 +08:00
|
|
|
if (RURange.empty())
|
2012-09-07 02:15:18 +08:00
|
|
|
continue;
|
2014-12-20 09:54:48 +08:00
|
|
|
RU.push_back(std::make_pair(&RURange, RURange.find(LI.begin()->end)));
|
2012-09-07 02:15:18 +08:00
|
|
|
}
|
2011-02-09 05:13:03 +08:00
|
|
|
|
2015-03-19 08:21:58 +08:00
|
|
|
if (MRI->subRegLivenessEnabled()) {
|
2014-12-20 09:54:50 +08:00
|
|
|
SRs.clear();
|
|
|
|
for (const LiveInterval::SubRange &SR : LI.subranges()) {
|
|
|
|
SRs.push_back(std::make_pair(&SR, SR.find(LI.begin()->end)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-11 05:28:43 +08:00
|
|
|
// Every instruction that kills Reg corresponds to a segment range end
|
|
|
|
// point.
|
2014-12-20 09:54:48 +08:00
|
|
|
for (LiveInterval::const_iterator RI = LI.begin(), RE = LI.end(); RI != RE;
|
2011-02-09 05:13:03 +08:00
|
|
|
++RI) {
|
2011-11-14 04:45:27 +08:00
|
|
|
// A block index indicates an MBB edge.
|
|
|
|
if (RI->end.isBlock())
|
2011-02-09 05:13:03 +08:00
|
|
|
continue;
|
|
|
|
MachineInstr *MI = getInstructionFromIndex(RI->end);
|
|
|
|
if (!MI)
|
|
|
|
continue;
|
2012-09-07 02:15:18 +08:00
|
|
|
|
2013-10-05 00:52:58 +08:00
|
|
|
// Check if any of the regunits are live beyond the end of RI. That could
|
2012-09-07 02:15:18 +08:00
|
|
|
// happen when a physreg is defined as a copy of a virtreg:
|
|
|
|
//
|
2017-11-30 20:12:19 +08:00
|
|
|
// %eax = COPY %5
|
|
|
|
// FOO %5 <--- MI, cancel kill because %eax is live.
|
2017-12-07 18:40:31 +08:00
|
|
|
// BAR killed %eax
|
2012-09-07 02:15:18 +08:00
|
|
|
//
|
2017-11-30 20:12:19 +08:00
|
|
|
// There should be no kill flag on FOO when %5 is rewritten as %eax.
|
2014-12-20 09:54:48 +08:00
|
|
|
for (auto &RUP : RU) {
|
|
|
|
const LiveRange &RURange = *RUP.first;
|
2014-12-24 10:11:43 +08:00
|
|
|
LiveRange::const_iterator &I = RUP.second;
|
2014-12-20 09:54:48 +08:00
|
|
|
if (I == RURange.end())
|
2012-09-07 02:15:18 +08:00
|
|
|
continue;
|
2014-12-20 09:54:48 +08:00
|
|
|
I = RURange.advanceTo(I, RI->end);
|
|
|
|
if (I == RURange.end() || I->start >= RI->end)
|
2012-09-07 02:15:18 +08:00
|
|
|
continue;
|
|
|
|
// I is overlapping RI.
|
2014-12-20 09:54:50 +08:00
|
|
|
goto CancelKill;
|
2012-09-07 02:15:18 +08:00
|
|
|
}
|
2014-12-10 09:13:04 +08:00
|
|
|
|
2015-03-19 08:21:58 +08:00
|
|
|
if (MRI->subRegLivenessEnabled()) {
|
2014-12-20 09:54:50 +08:00
|
|
|
// When reading a partial undefined value we must not add a kill flag.
|
|
|
|
// The regalloc might have used the undef lane for something else.
|
|
|
|
// Example:
|
2017-11-30 20:12:19 +08:00
|
|
|
// %1 = ... ; R32: %1
|
|
|
|
// %2:high16 = ... ; R64: %2
|
2017-12-07 18:40:31 +08:00
|
|
|
// = read killed %2 ; R64: %2
|
2017-11-30 20:12:19 +08:00
|
|
|
// = read %1 ; R32: %1
|
|
|
|
// The <kill> flag is correct for %2, but the register allocator may
|
|
|
|
// assign R0L to %1, and R0 to %2 because the low 32bits of R0
|
|
|
|
// are actually never written by %2. After assignment the <kill>
|
2014-12-20 09:54:50 +08:00
|
|
|
// flag at the read instruction is invalid.
|
2015-09-26 05:51:14 +08:00
|
|
|
LaneBitmask DefinedLanesMask;
|
2014-12-20 09:54:50 +08:00
|
|
|
if (!SRs.empty()) {
|
|
|
|
// Compute a mask of lanes that are defined.
|
2016-12-15 22:36:06 +08:00
|
|
|
DefinedLanesMask = LaneBitmask::getNone();
|
2014-12-20 09:54:50 +08:00
|
|
|
for (auto &SRP : SRs) {
|
|
|
|
const LiveInterval::SubRange &SR = *SRP.first;
|
2014-12-24 10:11:43 +08:00
|
|
|
LiveRange::const_iterator &I = SRP.second;
|
2014-12-20 09:54:50 +08:00
|
|
|
if (I == SR.end())
|
|
|
|
continue;
|
|
|
|
I = SR.advanceTo(I, RI->end);
|
|
|
|
if (I == SR.end() || I->start >= RI->end)
|
|
|
|
continue;
|
|
|
|
// I is overlapping RI
|
|
|
|
DefinedLanesMask |= SR.LaneMask;
|
|
|
|
}
|
|
|
|
} else
|
2016-12-15 22:36:06 +08:00
|
|
|
DefinedLanesMask = LaneBitmask::getAll();
|
2014-12-20 09:54:50 +08:00
|
|
|
|
|
|
|
bool IsFullWrite = false;
|
|
|
|
for (const MachineOperand &MO : MI->operands()) {
|
|
|
|
if (!MO.isReg() || MO.getReg() != Reg)
|
|
|
|
continue;
|
|
|
|
if (MO.isUse()) {
|
|
|
|
// Reading any undefined lanes?
|
2015-09-26 05:51:14 +08:00
|
|
|
LaneBitmask UseMask = TRI->getSubRegIndexLaneMask(MO.getSubReg());
|
2016-12-17 03:11:56 +08:00
|
|
|
if ((UseMask & ~DefinedLanesMask).any())
|
2014-12-20 09:54:50 +08:00
|
|
|
goto CancelKill;
|
|
|
|
} else if (MO.getSubReg() == 0) {
|
|
|
|
// Writing to the full register?
|
|
|
|
assert(MO.isDef());
|
|
|
|
IsFullWrite = true;
|
2014-12-10 09:13:04 +08:00
|
|
|
}
|
2014-12-20 09:54:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// If an instruction writes to a subregister, a new segment starts in
|
|
|
|
// the LiveInterval. But as this is only overriding part of the register
|
|
|
|
// adding kill-flags is not correct here after registers have been
|
|
|
|
// assigned.
|
|
|
|
if (!IsFullWrite) {
|
|
|
|
// Next segment has to be adjacent in the subregister write case.
|
|
|
|
LiveRange::const_iterator N = std::next(RI);
|
|
|
|
if (N != LI.end() && N->start == RI->end)
|
|
|
|
goto CancelKill;
|
2014-12-10 09:13:04 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-20 09:54:50 +08:00
|
|
|
MI->addRegisterKilled(Reg, nullptr);
|
|
|
|
continue;
|
|
|
|
CancelKill:
|
|
|
|
MI->clearRegisterKills(Reg, nullptr);
|
2011-02-09 05:13:03 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-02-10 09:23:55 +08:00
|
|
|
MachineBasicBlock*
|
|
|
|
LiveIntervals::intervalIsInOneMBB(const LiveInterval &LI) const {
|
|
|
|
// A local live range must be fully contained inside the block, meaning it is
|
|
|
|
// defined and killed at instructions, not at block boundaries. It is not
|
2018-04-12 13:53:20 +08:00
|
|
|
// live in or out of any block.
|
2012-02-10 09:23:55 +08:00
|
|
|
//
|
|
|
|
// It is technically possible to have a PHI-defined live range identical to a
|
|
|
|
// single block, but we are going to return false in that case.
|
|
|
|
|
|
|
|
SlotIndex Start = LI.beginIndex();
|
|
|
|
if (Start.isBlock())
|
2014-04-14 08:51:57 +08:00
|
|
|
return nullptr;
|
2012-02-10 09:23:55 +08:00
|
|
|
|
|
|
|
SlotIndex Stop = LI.endIndex();
|
|
|
|
if (Stop.isBlock())
|
2014-04-14 08:51:57 +08:00
|
|
|
return nullptr;
|
2012-02-10 09:23:55 +08:00
|
|
|
|
|
|
|
// getMBBFromIndex doesn't need to search the MBB table when both indexes
|
|
|
|
// belong to proper instructions.
|
2012-06-05 06:39:14 +08:00
|
|
|
MachineBasicBlock *MBB1 = Indexes->getMBBFromIndex(Start);
|
|
|
|
MachineBasicBlock *MBB2 = Indexes->getMBBFromIndex(Stop);
|
2014-04-14 08:51:57 +08:00
|
|
|
return MBB1 == MBB2 ? MBB1 : nullptr;
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 08:40:40 +08:00
|
|
|
}
|
|
|
|
|
2012-08-04 04:10:24 +08:00
|
|
|
bool
|
|
|
|
LiveIntervals::hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const {
|
2014-12-11 07:07:54 +08:00
|
|
|
for (const VNInfo *PHI : LI.valnos) {
|
2012-08-04 04:10:24 +08:00
|
|
|
if (PHI->isUnused() || !PHI->isPHIDef())
|
|
|
|
continue;
|
|
|
|
const MachineBasicBlock *PHIMBB = getMBBFromIndex(PHI->def);
|
|
|
|
// Conservatively return true instead of scanning huge predecessor lists.
|
|
|
|
if (PHIMBB->pred_size() > 100)
|
|
|
|
return true;
|
2017-01-19 08:32:13 +08:00
|
|
|
for (const MachineBasicBlock *Pred : PHIMBB->predecessors())
|
|
|
|
if (VNI == LI.getVNInfoBefore(Indexes->getMBBEndIdx(Pred)))
|
2012-08-04 04:10:24 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-02-28 04:14:29 +08:00
|
|
|
float LiveIntervals::getSpillWeight(bool isDef, bool isUse,
|
|
|
|
const MachineBlockFrequencyInfo *MBFI,
|
|
|
|
const MachineInstr &MI) {
|
Add logic to greedy reg alloc to avoid bad eviction chains
This fixes bugzilla 26810
https://bugs.llvm.org/show_bug.cgi?id=26810
This is intended to prevent sequences like:
movl %ebp, 8(%esp) # 4-byte Spill
movl %ecx, %ebp
movl %ebx, %ecx
movl %edi, %ebx
movl %edx, %edi
cltd
idivl %esi
movl %edi, %edx
movl %ebx, %edi
movl %ecx, %ebx
movl %ebp, %ecx
movl 16(%esp), %ebp # 4 - byte Reload
Such sequences are created in 2 scenarios:
Scenario #1:
vreg0 is evicted from physreg0 by vreg1
Evictee vreg0 is intended for region splitting with split candidate physreg0 (the reg vreg0 was evicted from)
Region splitting creates a local interval because of interference with the evictor vreg1 (normally region spliiting creates 2 interval, the "by reg" and "by stack" intervals. Local interval created when interference occurs.)
one of the split intervals ends up evicting vreg2 from physreg1
Evictee vreg2 is intended for region splitting with split candidate physreg1
one of the split intervals ends up evicting vreg3 from physreg2 etc.. until someone spills
Scenario #2
vreg0 is evicted from physreg0 by vreg1
vreg2 is evicted from physreg2 by vreg3 etc
Evictee vreg0 is intended for region splitting with split candidate physreg1
Region splitting creates a local interval because of interference with the evictor vreg1
one of the split intervals ends up evicting back original evictor vreg1 from physreg0 (the reg vreg0 was evicted from)
Another evictee vreg2 is intended for region splitting with split candidate physreg1
one of the split intervals ends up evicting vreg3 from physreg2 etc.. until someone spills
As compile time was a concern, I've added a flag to control weather we do cost calculations for local intervals we expect to be created (it's on by default for X86 target, off for the rest).
Differential Revision: https://reviews.llvm.org/D35816
Change-Id: Id9411ff7bbb845463d289ba2ae97737a1ee7cc39
llvm-svn: 316295
2017-10-23 01:59:38 +08:00
|
|
|
return getSpillWeight(isDef, isUse, MBFI, MI.getParent());
|
|
|
|
}
|
|
|
|
|
|
|
|
float LiveIntervals::getSpillWeight(bool isDef, bool isUse,
|
|
|
|
const MachineBlockFrequencyInfo *MBFI,
|
|
|
|
const MachineBasicBlock *MBB) {
|
2020-07-23 23:42:34 +08:00
|
|
|
return (isDef + isUse) * MBFI->getBlockFreqRelativeToEntryBlock(MBB);
|
2010-03-02 04:59:38 +08:00
|
|
|
}
|
|
|
|
|
2013-10-11 05:28:47 +08:00
|
|
|
LiveRange::Segment
|
2016-02-27 14:40:41 +08:00
|
|
|
LiveIntervals::addSegmentToEndOfBlock(unsigned reg, MachineInstr &startInst) {
|
2013-08-15 07:50:16 +08:00
|
|
|
LiveInterval& Interval = createEmptyInterval(reg);
|
2016-02-27 14:40:41 +08:00
|
|
|
VNInfo *VN = Interval.getNextValue(
|
|
|
|
SlotIndex(getInstructionIndex(startInst).getRegSlot()),
|
|
|
|
getVNInfoAllocator());
|
|
|
|
LiveRange::Segment S(SlotIndex(getInstructionIndex(startInst).getRegSlot()),
|
|
|
|
getMBBEndIdx(startInst.getParent()), VN);
|
2013-10-11 05:28:43 +08:00
|
|
|
Interval.addSegment(S);
|
2010-08-13 04:01:23 +08:00
|
|
|
|
2013-10-11 05:28:43 +08:00
|
|
|
return S;
|
2008-06-06 01:15:43 +08:00
|
|
|
}
|
2012-02-09 01:33:45 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Register mask functions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
bool LiveIntervals::checkRegMaskInterference(LiveInterval &LI,
|
|
|
|
BitVector &UsableRegs) {
|
|
|
|
if (LI.empty())
|
|
|
|
return false;
|
2012-02-10 09:31:31 +08:00
|
|
|
LiveInterval::iterator LiveI = LI.begin(), LiveE = LI.end();
|
|
|
|
|
|
|
|
// Use a smaller arrays for local live ranges.
|
|
|
|
ArrayRef<SlotIndex> Slots;
|
|
|
|
ArrayRef<const uint32_t*> Bits;
|
|
|
|
if (MachineBasicBlock *MBB = intervalIsInOneMBB(LI)) {
|
|
|
|
Slots = getRegMaskSlotsInBlock(MBB->getNumber());
|
|
|
|
Bits = getRegMaskBitsInBlock(MBB->getNumber());
|
|
|
|
} else {
|
|
|
|
Slots = getRegMaskSlots();
|
|
|
|
Bits = getRegMaskBits();
|
|
|
|
}
|
2012-02-09 01:33:45 +08:00
|
|
|
|
|
|
|
// We are going to enumerate all the register mask slots contained in LI.
|
|
|
|
// Start with a binary search of RegMaskSlots to find a starting point.
|
2019-06-21 13:40:31 +08:00
|
|
|
ArrayRef<SlotIndex>::iterator SlotI = llvm::lower_bound(Slots, LiveI->start);
|
2012-02-09 01:33:45 +08:00
|
|
|
ArrayRef<SlotIndex>::iterator SlotE = Slots.end();
|
|
|
|
|
|
|
|
// No slots in range, LI begins after the last call.
|
|
|
|
if (SlotI == SlotE)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
bool Found = false;
|
2017-05-25 07:10:29 +08:00
|
|
|
while (true) {
|
2012-02-09 01:33:45 +08:00
|
|
|
assert(*SlotI >= LiveI->start);
|
|
|
|
// Loop over all slots overlapping this segment.
|
|
|
|
while (*SlotI < LiveI->end) {
|
|
|
|
// *SlotI overlaps LI. Collect mask bits.
|
|
|
|
if (!Found) {
|
|
|
|
// This is the first overlap. Initialize UsableRegs to all ones.
|
|
|
|
UsableRegs.clear();
|
2012-06-05 06:39:14 +08:00
|
|
|
UsableRegs.resize(TRI->getNumRegs(), true);
|
2012-02-09 01:33:45 +08:00
|
|
|
Found = true;
|
|
|
|
}
|
|
|
|
// Remove usable registers clobbered by this mask.
|
2012-02-10 09:31:31 +08:00
|
|
|
UsableRegs.clearBitsNotInMask(Bits[SlotI-Slots.begin()]);
|
2012-02-09 01:33:45 +08:00
|
|
|
if (++SlotI == SlotE)
|
|
|
|
return Found;
|
|
|
|
}
|
|
|
|
// *SlotI is beyond the current LI segment.
|
|
|
|
LiveI = LI.advanceTo(LiveI, *SlotI);
|
|
|
|
if (LiveI == LiveE)
|
|
|
|
return Found;
|
|
|
|
// Advance SlotI until it overlaps.
|
|
|
|
while (*SlotI < LiveI->start)
|
|
|
|
if (++SlotI == SlotE)
|
|
|
|
return Found;
|
|
|
|
}
|
|
|
|
}
|
2012-02-18 02:44:18 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// IntervalUpdate class.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2017-01-19 08:32:13 +08:00
|
|
|
/// Toolkit used by handleMove to trim or extend live intervals.
|
2012-02-18 02:44:18 +08:00
|
|
|
class LiveIntervals::HMEditor {
|
|
|
|
private:
|
2012-02-18 07:43:40 +08:00
|
|
|
LiveIntervals& LIS;
|
|
|
|
const MachineRegisterInfo& MRI;
|
|
|
|
const TargetRegisterInfo& TRI;
|
2012-10-13 05:31:57 +08:00
|
|
|
SlotIndex OldIdx;
|
2012-02-18 07:43:40 +08:00
|
|
|
SlotIndex NewIdx;
|
2013-10-11 05:29:02 +08:00
|
|
|
SmallPtrSet<LiveRange*, 8> Updated;
|
2012-10-16 08:22:51 +08:00
|
|
|
bool UpdateFlags;
|
2012-02-19 15:13:05 +08:00
|
|
|
|
2012-02-18 02:44:18 +08:00
|
|
|
public:
|
2012-02-18 07:43:40 +08:00
|
|
|
HMEditor(LiveIntervals& LIS, const MachineRegisterInfo& MRI,
|
2012-10-13 05:31:57 +08:00
|
|
|
const TargetRegisterInfo& TRI,
|
2012-10-16 08:22:51 +08:00
|
|
|
SlotIndex OldIdx, SlotIndex NewIdx, bool UpdateFlags)
|
|
|
|
: LIS(LIS), MRI(MRI), TRI(TRI), OldIdx(OldIdx), NewIdx(NewIdx),
|
|
|
|
UpdateFlags(UpdateFlags) {}
|
|
|
|
|
|
|
|
// FIXME: UpdateFlags is a workaround that creates live intervals for all
|
|
|
|
// physregs, even those that aren't needed for regalloc, in order to update
|
|
|
|
// kill flags. This is wasteful. Eventually, LiveVariables will strip all kill
|
|
|
|
// flags, and postRA passes will use a live register utility instead.
|
2013-10-11 05:29:02 +08:00
|
|
|
LiveRange *getRegUnitLI(unsigned Unit) {
|
2017-09-02 02:36:26 +08:00
|
|
|
if (UpdateFlags && !MRI.isReservedRegUnit(Unit))
|
2012-10-16 08:22:51 +08:00
|
|
|
return &LIS.getRegUnit(Unit);
|
|
|
|
return LIS.getCachedRegUnit(Unit);
|
|
|
|
}
|
2012-10-13 05:31:57 +08:00
|
|
|
|
|
|
|
/// Update all live ranges touched by MI, assuming a move from OldIdx to
|
|
|
|
/// NewIdx.
|
|
|
|
void updateAllRanges(MachineInstr *MI) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "handleMove " << OldIdx << " -> " << NewIdx << ": "
|
|
|
|
<< *MI);
|
2012-10-13 05:31:57 +08:00
|
|
|
bool hasRegMask = false;
|
2015-05-29 10:56:46 +08:00
|
|
|
for (MachineOperand &MO : MI->operands()) {
|
|
|
|
if (MO.isRegMask())
|
2012-10-13 05:31:57 +08:00
|
|
|
hasRegMask = true;
|
2015-05-29 10:56:46 +08:00
|
|
|
if (!MO.isReg())
|
2012-02-22 06:29:38 +08:00
|
|
|
continue;
|
2016-05-07 05:47:41 +08:00
|
|
|
if (MO.isUse()) {
|
|
|
|
if (!MO.readsReg())
|
|
|
|
continue;
|
|
|
|
// Aggressively clear all kill flags.
|
|
|
|
// They are reinserted by VirtRegRewriter.
|
2015-05-29 10:56:46 +08:00
|
|
|
MO.setIsKill(false);
|
2016-05-07 05:47:41 +08:00
|
|
|
}
|
2012-02-19 15:13:05 +08:00
|
|
|
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register Reg = MO.getReg();
|
2012-10-13 05:31:57 +08:00
|
|
|
if (!Reg)
|
|
|
|
continue;
|
2019-08-02 07:27:28 +08:00
|
|
|
if (Register::isVirtualRegister(Reg)) {
|
2013-10-11 05:29:02 +08:00
|
|
|
LiveInterval &LI = LIS.getInterval(Reg);
|
2014-12-10 09:12:20 +08:00
|
|
|
if (LI.hasSubRanges()) {
|
2015-05-29 10:56:46 +08:00
|
|
|
unsigned SubReg = MO.getSubReg();
|
2016-08-24 21:37:55 +08:00
|
|
|
LaneBitmask LaneMask = SubReg ? TRI.getSubRegIndexLaneMask(SubReg)
|
|
|
|
: MRI.getMaxLaneMaskForVReg(Reg);
|
2014-12-11 08:59:06 +08:00
|
|
|
for (LiveInterval::SubRange &S : LI.subranges()) {
|
2016-12-15 22:36:06 +08:00
|
|
|
if ((S.LaneMask & LaneMask).none())
|
2014-12-10 09:12:20 +08:00
|
|
|
continue;
|
2014-12-11 08:59:06 +08:00
|
|
|
updateRange(S, Reg, S.LaneMask);
|
2014-12-10 09:12:20 +08:00
|
|
|
}
|
|
|
|
}
|
2016-12-15 22:36:06 +08:00
|
|
|
updateRange(LI, Reg, LaneBitmask::getNone());
|
2020-07-01 05:51:07 +08:00
|
|
|
// If main range has a hole and we are moving a subrange use across
|
|
|
|
// the hole updateRange() cannot properly handle it since it only
|
|
|
|
// gets the LiveRange and not the whole LiveInterval. As a result
|
|
|
|
// we may end up with a main range not covering all subranges.
|
|
|
|
// This is extremely rare case, so let's check and reconstruct the
|
|
|
|
// main range.
|
|
|
|
for (LiveInterval::SubRange &S : LI.subranges()) {
|
|
|
|
if (LI.covers(S))
|
|
|
|
continue;
|
|
|
|
LI.clear();
|
|
|
|
LIS.constructMainRangeFromSubranges(LI);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-10-13 05:31:57 +08:00
|
|
|
continue;
|
|
|
|
}
|
2012-02-22 06:29:38 +08:00
|
|
|
|
2012-10-13 05:31:57 +08:00
|
|
|
// For physregs, only update the regunits that actually have a
|
|
|
|
// precomputed live range.
|
|
|
|
for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units)
|
2013-10-11 05:29:02 +08:00
|
|
|
if (LiveRange *LR = getRegUnitLI(*Units))
|
2016-12-15 22:36:06 +08:00
|
|
|
updateRange(*LR, *Units, LaneBitmask::getNone());
|
2012-10-13 05:31:57 +08:00
|
|
|
}
|
|
|
|
if (hasRegMask)
|
|
|
|
updateRegMaskSlots();
|
2012-02-19 15:13:05 +08:00
|
|
|
}
|
|
|
|
|
2012-02-19 11:00:30 +08:00
|
|
|
private:
|
2012-10-13 05:31:57 +08:00
|
|
|
/// Update a single live range, assuming an instruction has been moved from
|
|
|
|
/// OldIdx to NewIdx.
|
2015-09-26 05:51:14 +08:00
|
|
|
void updateRange(LiveRange &LR, unsigned Reg, LaneBitmask LaneMask) {
|
2014-11-19 15:49:26 +08:00
|
|
|
if (!Updated.insert(&LR).second)
|
2012-10-13 05:31:57 +08:00
|
|
|
return;
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG({
|
2012-10-13 05:31:57 +08:00
|
|
|
dbgs() << " ";
|
2019-08-02 07:27:28 +08:00
|
|
|
if (Register::isVirtualRegister(Reg)) {
|
2017-11-28 20:42:37 +08:00
|
|
|
dbgs() << printReg(Reg);
|
2016-12-17 03:11:56 +08:00
|
|
|
if (LaneMask.any())
|
2015-09-26 05:51:24 +08:00
|
|
|
dbgs() << " L" << PrintLaneMask(LaneMask);
|
2014-12-10 09:12:20 +08:00
|
|
|
} else {
|
2017-11-28 20:42:37 +08:00
|
|
|
dbgs() << printRegUnit(Reg, &TRI);
|
2014-12-10 09:12:20 +08:00
|
|
|
}
|
2013-10-11 05:29:02 +08:00
|
|
|
dbgs() << ":\t" << LR << '\n';
|
2012-10-13 05:31:57 +08:00
|
|
|
});
|
|
|
|
if (SlotIndex::isEarlierInstr(OldIdx, NewIdx))
|
2013-10-11 05:29:02 +08:00
|
|
|
handleMoveDown(LR);
|
2012-10-13 05:31:57 +08:00
|
|
|
else
|
2014-12-10 09:12:20 +08:00
|
|
|
handleMoveUp(LR, Reg, LaneMask);
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << " -->\t" << LR << '\n');
|
2013-10-11 05:29:02 +08:00
|
|
|
LR.verify();
|
2012-10-13 05:31:57 +08:00
|
|
|
}
|
|
|
|
|
2013-10-11 05:29:02 +08:00
|
|
|
/// Update LR to reflect an instruction has been moved downwards from OldIdx
|
2016-01-26 08:43:50 +08:00
|
|
|
/// to NewIdx (OldIdx < NewIdx).
|
2013-10-11 05:29:02 +08:00
|
|
|
void handleMoveDown(LiveRange &LR) {
|
|
|
|
LiveRange::iterator E = LR.end();
|
2016-01-26 08:43:50 +08:00
|
|
|
// Segment going into OldIdx.
|
|
|
|
LiveRange::iterator OldIdxIn = LR.find(OldIdx.getBaseIndex());
|
|
|
|
|
|
|
|
// No value live before or after OldIdx? Nothing to do.
|
|
|
|
if (OldIdxIn == E || SlotIndex::isEarlierInstr(OldIdx, OldIdxIn->start))
|
2012-10-13 05:31:57 +08:00
|
|
|
return;
|
2012-02-19 11:00:30 +08:00
|
|
|
|
2016-01-26 08:43:50 +08:00
|
|
|
LiveRange::iterator OldIdxOut;
|
|
|
|
// Do we have a value live-in to OldIdx?
|
|
|
|
if (SlotIndex::isEarlierInstr(OldIdxIn->start, OldIdx)) {
|
2012-10-13 05:31:57 +08:00
|
|
|
// If the live-in value already extends to NewIdx, there is nothing to do.
|
2016-01-26 08:43:50 +08:00
|
|
|
if (SlotIndex::isEarlierEqualInstr(NewIdx, OldIdxIn->end))
|
2012-02-19 11:00:30 +08:00
|
|
|
return;
|
2012-10-13 05:31:57 +08:00
|
|
|
// Aggressively remove all kill flags from the old kill point.
|
|
|
|
// Kill flags shouldn't be used while live intervals exist, they will be
|
|
|
|
// reinserted by VirtRegRewriter.
|
2016-01-26 08:43:50 +08:00
|
|
|
if (MachineInstr *KillMI = LIS.getInstructionFromIndex(OldIdxIn->end))
|
2019-12-05 16:52:24 +08:00
|
|
|
for (MachineOperand &MOP : mi_bundle_ops(*KillMI))
|
|
|
|
if (MOP.isReg() && MOP.isUse())
|
|
|
|
MOP.setIsKill(false);
|
2016-02-16 03:25:36 +08:00
|
|
|
|
|
|
|
// Is there a def before NewIdx which is not OldIdx?
|
|
|
|
LiveRange::iterator Next = std::next(OldIdxIn);
|
|
|
|
if (Next != E && !SlotIndex::isSameInstr(OldIdx, Next->start) &&
|
|
|
|
SlotIndex::isEarlierInstr(Next->start, NewIdx)) {
|
|
|
|
// If we are here then OldIdx was just a use but not a def. We only have
|
|
|
|
// to ensure liveness extends to NewIdx.
|
|
|
|
LiveRange::iterator NewIdxIn =
|
|
|
|
LR.advanceTo(Next, NewIdx.getBaseIndex());
|
|
|
|
// Extend the segment before NewIdx if necessary.
|
|
|
|
if (NewIdxIn == E ||
|
|
|
|
!SlotIndex::isEarlierInstr(NewIdxIn->start, NewIdx)) {
|
|
|
|
LiveRange::iterator Prev = std::prev(NewIdxIn);
|
|
|
|
Prev->end = NewIdx.getRegSlot();
|
|
|
|
}
|
2016-07-26 11:57:45 +08:00
|
|
|
// Extend OldIdxIn.
|
|
|
|
OldIdxIn->end = Next->start;
|
2016-02-16 03:25:36 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-01-26 08:43:50 +08:00
|
|
|
// Adjust OldIdxIn->end to reach NewIdx. This may temporarily make LR
|
2016-01-26 09:40:48 +08:00
|
|
|
// invalid by overlapping ranges.
|
2016-01-26 08:43:50 +08:00
|
|
|
bool isKill = SlotIndex::isSameInstr(OldIdx, OldIdxIn->end);
|
|
|
|
OldIdxIn->end = NewIdx.getRegSlot(OldIdxIn->end.isEarlyClobber());
|
|
|
|
// If this was not a kill, then there was no def and we're done.
|
2012-10-13 05:31:57 +08:00
|
|
|
if (!isKill)
|
2012-02-19 11:00:30 +08:00
|
|
|
return;
|
2016-01-26 08:43:50 +08:00
|
|
|
|
|
|
|
// Did we have a Def at OldIdx?
|
2016-02-16 03:25:36 +08:00
|
|
|
OldIdxOut = Next;
|
2016-01-26 08:43:50 +08:00
|
|
|
if (OldIdxOut == E || !SlotIndex::isSameInstr(OldIdx, OldIdxOut->start))
|
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
OldIdxOut = OldIdxIn;
|
2012-06-20 07:50:18 +08:00
|
|
|
}
|
2012-02-19 11:00:30 +08:00
|
|
|
|
2016-01-26 08:43:50 +08:00
|
|
|
// If we are here then there is a Definition at OldIdx. OldIdxOut points
|
|
|
|
// to the segment starting there.
|
|
|
|
assert(OldIdxOut != E && SlotIndex::isSameInstr(OldIdx, OldIdxOut->start) &&
|
|
|
|
"No def?");
|
|
|
|
VNInfo *OldIdxVNI = OldIdxOut->valno;
|
|
|
|
assert(OldIdxVNI->def == OldIdxOut->start && "Inconsistent def");
|
|
|
|
|
|
|
|
// If the defined value extends beyond NewIdx, just move the beginning
|
|
|
|
// of the segment to NewIdx.
|
|
|
|
SlotIndex NewIdxDef = NewIdx.getRegSlot(OldIdxOut->start.isEarlyClobber());
|
|
|
|
if (SlotIndex::isEarlierInstr(NewIdxDef, OldIdxOut->end)) {
|
|
|
|
OldIdxVNI->def = NewIdxDef;
|
|
|
|
OldIdxOut->start = OldIdxVNI->def;
|
2012-10-13 05:31:57 +08:00
|
|
|
return;
|
2012-06-20 07:50:18 +08:00
|
|
|
}
|
2016-01-26 08:43:50 +08:00
|
|
|
|
|
|
|
// If we are here then we have a Definition at OldIdx which ends before
|
2016-02-16 03:25:36 +08:00
|
|
|
// NewIdx.
|
|
|
|
|
2016-01-26 08:43:50 +08:00
|
|
|
// Is there an existing Def at NewIdx?
|
|
|
|
LiveRange::iterator AfterNewIdx
|
|
|
|
= LR.advanceTo(OldIdxOut, NewIdx.getRegSlot());
|
2016-02-16 03:25:36 +08:00
|
|
|
bool OldIdxDefIsDead = OldIdxOut->end.isDead();
|
|
|
|
if (!OldIdxDefIsDead &&
|
|
|
|
SlotIndex::isEarlierInstr(OldIdxOut->end, NewIdxDef)) {
|
|
|
|
// OldIdx is not a dead def, and NewIdxDef is inside a new interval.
|
|
|
|
VNInfo *DefVNI;
|
|
|
|
if (OldIdxOut != LR.begin() &&
|
|
|
|
!SlotIndex::isEarlierInstr(std::prev(OldIdxOut)->end,
|
|
|
|
OldIdxOut->start)) {
|
|
|
|
// There is no gap between OldIdxOut and its predecessor anymore,
|
|
|
|
// merge them.
|
|
|
|
LiveRange::iterator IPrev = std::prev(OldIdxOut);
|
|
|
|
DefVNI = OldIdxVNI;
|
|
|
|
IPrev->end = OldIdxOut->end;
|
|
|
|
} else {
|
|
|
|
// The value is live in to OldIdx
|
|
|
|
LiveRange::iterator INext = std::next(OldIdxOut);
|
|
|
|
assert(INext != E && "Must have following segment");
|
|
|
|
// We merge OldIdxOut and its successor. As we're dealing with subreg
|
|
|
|
// reordering, there is always a successor to OldIdxOut in the same BB
|
|
|
|
// We don't need INext->valno anymore and will reuse for the new segment
|
|
|
|
// we create later.
|
2016-04-28 10:11:49 +08:00
|
|
|
DefVNI = OldIdxVNI;
|
2016-02-16 03:25:36 +08:00
|
|
|
INext->start = OldIdxOut->end;
|
|
|
|
INext->valno->def = INext->start;
|
|
|
|
}
|
|
|
|
// If NewIdx is behind the last segment, extend that and append a new one.
|
|
|
|
if (AfterNewIdx == E) {
|
|
|
|
// OldIdxOut is undef at this point, Slide (OldIdxOut;AfterNewIdx] up
|
|
|
|
// one position.
|
|
|
|
// |- ?/OldIdxOut -| |- X0 -| ... |- Xn -| end
|
|
|
|
// => |- X0/OldIdxOut -| ... |- Xn -| |- undef/NewS -| end
|
|
|
|
std::copy(std::next(OldIdxOut), E, OldIdxOut);
|
|
|
|
// The last segment is undefined now, reuse it for a dead def.
|
|
|
|
LiveRange::iterator NewSegment = std::prev(E);
|
|
|
|
*NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(),
|
|
|
|
DefVNI);
|
|
|
|
DefVNI->def = NewIdxDef;
|
|
|
|
|
|
|
|
LiveRange::iterator Prev = std::prev(NewSegment);
|
|
|
|
Prev->end = NewIdxDef;
|
|
|
|
} else {
|
|
|
|
// OldIdxOut is undef at this point, Slide (OldIdxOut;AfterNewIdx] up
|
|
|
|
// one position.
|
|
|
|
// |- ?/OldIdxOut -| |- X0 -| ... |- Xn/AfterNewIdx -| |- Next -|
|
|
|
|
// => |- X0/OldIdxOut -| ... |- Xn -| |- Xn/AfterNewIdx -| |- Next -|
|
|
|
|
std::copy(std::next(OldIdxOut), std::next(AfterNewIdx), OldIdxOut);
|
|
|
|
LiveRange::iterator Prev = std::prev(AfterNewIdx);
|
|
|
|
// We have two cases:
|
|
|
|
if (SlotIndex::isEarlierInstr(Prev->start, NewIdxDef)) {
|
|
|
|
// Case 1: NewIdx is inside a liverange. Split this liverange at
|
|
|
|
// NewIdxDef into the segment "Prev" followed by "NewSegment".
|
|
|
|
LiveRange::iterator NewSegment = AfterNewIdx;
|
|
|
|
*NewSegment = LiveRange::Segment(NewIdxDef, Prev->end, Prev->valno);
|
|
|
|
Prev->valno->def = NewIdxDef;
|
|
|
|
|
|
|
|
*Prev = LiveRange::Segment(Prev->start, NewIdxDef, DefVNI);
|
|
|
|
DefVNI->def = Prev->start;
|
|
|
|
} else {
|
|
|
|
// Case 2: NewIdx is in a lifetime hole. Keep AfterNewIdx as is and
|
|
|
|
// turn Prev into a segment from NewIdx to AfterNewIdx->start.
|
|
|
|
*Prev = LiveRange::Segment(NewIdxDef, AfterNewIdx->start, DefVNI);
|
|
|
|
DefVNI->def = NewIdxDef;
|
|
|
|
assert(DefVNI != AfterNewIdx->valno);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-01-26 08:43:50 +08:00
|
|
|
if (AfterNewIdx != E &&
|
|
|
|
SlotIndex::isSameInstr(AfterNewIdx->start, NewIdxDef)) {
|
|
|
|
// There is an existing def at NewIdx. The def at OldIdx is coalesced into
|
|
|
|
// that value.
|
|
|
|
assert(AfterNewIdx->valno != OldIdxVNI && "Multiple defs of value?");
|
|
|
|
LR.removeValNo(OldIdxVNI);
|
|
|
|
} else {
|
|
|
|
// There was no existing def at NewIdx. We need to create a dead def
|
|
|
|
// at NewIdx. Shift segments over the old OldIdxOut segment, this frees
|
|
|
|
// a new segment at the place where we want to construct the dead def.
|
|
|
|
// |- OldIdxOut -| |- X0 -| ... |- Xn -| |- AfterNewIdx -|
|
|
|
|
// => |- X0/OldIdxOut -| ... |- Xn -| |- undef/NewS. -| |- AfterNewIdx -|
|
|
|
|
assert(AfterNewIdx != OldIdxOut && "Inconsistent iterators");
|
|
|
|
std::copy(std::next(OldIdxOut), AfterNewIdx, OldIdxOut);
|
|
|
|
// We can reuse OldIdxVNI now.
|
|
|
|
LiveRange::iterator NewSegment = std::prev(AfterNewIdx);
|
|
|
|
VNInfo *NewSegmentVNI = OldIdxVNI;
|
|
|
|
NewSegmentVNI->def = NewIdxDef;
|
|
|
|
*NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(),
|
|
|
|
NewSegmentVNI);
|
2012-02-18 02:44:18 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-11 05:29:02 +08:00
|
|
|
/// Update LR to reflect an instruction has been moved upwards from OldIdx
|
2016-01-26 08:43:50 +08:00
|
|
|
/// to NewIdx (NewIdx < OldIdx).
|
2015-09-26 05:51:14 +08:00
|
|
|
void handleMoveUp(LiveRange &LR, unsigned Reg, LaneBitmask LaneMask) {
|
2013-10-11 05:29:02 +08:00
|
|
|
LiveRange::iterator E = LR.end();
|
2016-01-26 08:43:50 +08:00
|
|
|
// Segment going into OldIdx.
|
|
|
|
LiveRange::iterator OldIdxIn = LR.find(OldIdx.getBaseIndex());
|
|
|
|
|
|
|
|
// No value live before or after OldIdx? Nothing to do.
|
|
|
|
if (OldIdxIn == E || SlotIndex::isEarlierInstr(OldIdx, OldIdxIn->start))
|
2012-10-13 05:31:57 +08:00
|
|
|
return;
|
2012-02-19 15:13:05 +08:00
|
|
|
|
2016-01-26 08:43:50 +08:00
|
|
|
LiveRange::iterator OldIdxOut;
|
|
|
|
// Do we have a value live-in to OldIdx?
|
|
|
|
if (SlotIndex::isEarlierInstr(OldIdxIn->start, OldIdx)) {
|
|
|
|
// If the live-in value isn't killed here, then we have no Def at
|
|
|
|
// OldIdx, moreover the value must be live at NewIdx so there is nothing
|
|
|
|
// to do.
|
|
|
|
bool isKill = SlotIndex::isSameInstr(OldIdx, OldIdxIn->end);
|
|
|
|
if (!isKill)
|
2012-10-13 05:31:57 +08:00
|
|
|
return;
|
2016-01-26 08:43:50 +08:00
|
|
|
|
|
|
|
// At this point we have to move OldIdxIn->end back to the nearest
|
2016-02-16 03:25:36 +08:00
|
|
|
// previous use or (dead-)def but no further than NewIdx.
|
|
|
|
SlotIndex DefBeforeOldIdx
|
|
|
|
= std::max(OldIdxIn->start.getDeadSlot(),
|
|
|
|
NewIdx.getRegSlot(OldIdxIn->end.isEarlyClobber()));
|
|
|
|
OldIdxIn->end = findLastUseBefore(DefBeforeOldIdx, Reg, LaneMask);
|
2016-01-26 08:43:50 +08:00
|
|
|
|
2016-02-16 03:25:36 +08:00
|
|
|
// Did we have a Def at OldIdx? If not we are done now.
|
2016-01-26 08:43:50 +08:00
|
|
|
OldIdxOut = std::next(OldIdxIn);
|
2016-02-16 03:25:36 +08:00
|
|
|
if (OldIdxOut == E || !SlotIndex::isSameInstr(OldIdx, OldIdxOut->start))
|
2012-10-13 05:31:57 +08:00
|
|
|
return;
|
2016-01-26 08:43:50 +08:00
|
|
|
} else {
|
|
|
|
OldIdxOut = OldIdxIn;
|
2016-02-16 03:25:36 +08:00
|
|
|
OldIdxIn = OldIdxOut != LR.begin() ? std::prev(OldIdxOut) : E;
|
2012-02-19 15:13:05 +08:00
|
|
|
}
|
|
|
|
|
2016-01-26 08:43:50 +08:00
|
|
|
// If we are here then there is a Definition at OldIdx. OldIdxOut points
|
|
|
|
// to the segment starting there.
|
|
|
|
assert(OldIdxOut != E && SlotIndex::isSameInstr(OldIdx, OldIdxOut->start) &&
|
|
|
|
"No def?");
|
|
|
|
VNInfo *OldIdxVNI = OldIdxOut->valno;
|
|
|
|
assert(OldIdxVNI->def == OldIdxOut->start && "Inconsistent def");
|
|
|
|
bool OldIdxDefIsDead = OldIdxOut->end.isDead();
|
|
|
|
|
|
|
|
// Is there an existing def at NewIdx?
|
|
|
|
SlotIndex NewIdxDef = NewIdx.getRegSlot(OldIdxOut->start.isEarlyClobber());
|
|
|
|
LiveRange::iterator NewIdxOut = LR.find(NewIdx.getRegSlot());
|
|
|
|
if (SlotIndex::isSameInstr(NewIdxOut->start, NewIdx)) {
|
|
|
|
assert(NewIdxOut->valno != OldIdxVNI &&
|
|
|
|
"Same value defined more than once?");
|
|
|
|
// If OldIdx was a dead def remove it.
|
|
|
|
if (!OldIdxDefIsDead) {
|
2016-01-26 09:40:48 +08:00
|
|
|
// Remove segment starting at NewIdx and move begin of OldIdxOut to
|
|
|
|
// NewIdx so it can take its place.
|
2016-01-26 08:43:50 +08:00
|
|
|
OldIdxVNI->def = NewIdxDef;
|
|
|
|
OldIdxOut->start = NewIdxDef;
|
|
|
|
LR.removeValNo(NewIdxOut->valno);
|
|
|
|
} else {
|
2016-01-26 09:40:48 +08:00
|
|
|
// Simply remove the dead def at OldIdx.
|
2016-01-26 08:43:50 +08:00
|
|
|
LR.removeValNo(OldIdxVNI);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Previously nothing was live after NewIdx, so all we have to do now is
|
|
|
|
// move the begin of OldIdxOut to NewIdx.
|
|
|
|
if (!OldIdxDefIsDead) {
|
2016-02-16 03:25:36 +08:00
|
|
|
// Do we have any intermediate Defs between OldIdx and NewIdx?
|
|
|
|
if (OldIdxIn != E &&
|
|
|
|
SlotIndex::isEarlierInstr(NewIdxDef, OldIdxIn->start)) {
|
|
|
|
// OldIdx is not a dead def and NewIdx is before predecessor start.
|
|
|
|
LiveRange::iterator NewIdxIn = NewIdxOut;
|
|
|
|
assert(NewIdxIn == LR.find(NewIdx.getBaseIndex()));
|
|
|
|
const SlotIndex SplitPos = NewIdxDef;
|
2017-03-11 08:14:52 +08:00
|
|
|
OldIdxVNI = OldIdxIn->valno;
|
2016-02-16 03:25:36 +08:00
|
|
|
|
2019-10-19 07:24:25 +08:00
|
|
|
SlotIndex NewDefEndPoint = std::next(NewIdxIn)->end;
|
|
|
|
LiveRange::iterator Prev = std::prev(OldIdxIn);
|
|
|
|
if (OldIdxIn != LR.begin() &&
|
|
|
|
SlotIndex::isEarlierInstr(NewIdx, Prev->end)) {
|
|
|
|
// If the segment before OldIdx read a value defined earlier than
|
|
|
|
// NewIdx, the moved instruction also reads and forwards that
|
|
|
|
// value. Extend the lifetime of the new def point.
|
|
|
|
|
|
|
|
// Extend to where the previous range started, unless there is
|
|
|
|
// another redef first.
|
|
|
|
NewDefEndPoint = std::min(OldIdxIn->start,
|
|
|
|
std::next(NewIdxOut)->start);
|
|
|
|
}
|
|
|
|
|
2016-02-16 03:25:36 +08:00
|
|
|
// Merge the OldIdxIn and OldIdxOut segments into OldIdxOut.
|
2017-03-11 08:14:52 +08:00
|
|
|
OldIdxOut->valno->def = OldIdxIn->start;
|
2016-02-16 03:25:36 +08:00
|
|
|
*OldIdxOut = LiveRange::Segment(OldIdxIn->start, OldIdxOut->end,
|
2017-03-11 08:14:52 +08:00
|
|
|
OldIdxOut->valno);
|
2016-02-16 03:25:36 +08:00
|
|
|
// OldIdxIn and OldIdxVNI are now undef and can be overridden.
|
|
|
|
// We Slide [NewIdxIn, OldIdxIn) down one position.
|
|
|
|
// |- X0/NewIdxIn -| ... |- Xn-1 -||- Xn/OldIdxIn -||- OldIdxOut -|
|
|
|
|
// => |- undef/NexIdxIn -| |- X0 -| ... |- Xn-1 -| |- Xn/OldIdxOut -|
|
|
|
|
std::copy_backward(NewIdxIn, OldIdxIn, OldIdxOut);
|
|
|
|
// NewIdxIn is now considered undef so we can reuse it for the moved
|
|
|
|
// value.
|
|
|
|
LiveRange::iterator NewSegment = NewIdxIn;
|
|
|
|
LiveRange::iterator Next = std::next(NewSegment);
|
|
|
|
if (SlotIndex::isEarlierInstr(Next->start, NewIdx)) {
|
|
|
|
// There is no gap between NewSegment and its predecessor.
|
|
|
|
*NewSegment = LiveRange::Segment(Next->start, SplitPos,
|
2016-05-25 05:54:01 +08:00
|
|
|
Next->valno);
|
2019-10-19 07:24:25 +08:00
|
|
|
|
|
|
|
*Next = LiveRange::Segment(SplitPos, NewDefEndPoint, OldIdxVNI);
|
2016-02-16 03:25:36 +08:00
|
|
|
Next->valno->def = SplitPos;
|
|
|
|
} else {
|
|
|
|
// There is a gap between NewSegment and its predecessor
|
|
|
|
// Value becomes live in.
|
2016-05-25 05:54:01 +08:00
|
|
|
*NewSegment = LiveRange::Segment(SplitPos, Next->start, OldIdxVNI);
|
2016-02-16 03:25:36 +08:00
|
|
|
NewSegment->valno->def = SplitPos;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Leave the end point of a live def.
|
|
|
|
OldIdxOut->start = NewIdxDef;
|
|
|
|
OldIdxVNI->def = NewIdxDef;
|
|
|
|
if (OldIdxIn != E && SlotIndex::isEarlierInstr(NewIdx, OldIdxIn->end))
|
2020-06-19 01:20:58 +08:00
|
|
|
OldIdxIn->end = NewIdxDef;
|
2016-02-16 03:25:36 +08:00
|
|
|
}
|
2018-02-26 22:42:13 +08:00
|
|
|
} else if (OldIdxIn != E
|
|
|
|
&& SlotIndex::isEarlierInstr(NewIdxOut->start, NewIdx)
|
|
|
|
&& SlotIndex::isEarlierInstr(NewIdx, NewIdxOut->end)) {
|
|
|
|
// OldIdxVNI is a dead def that has been moved into the middle of
|
|
|
|
// another value in LR. That can happen when LR is a whole register,
|
|
|
|
// but the dead def is a write to a subreg that is dead at NewIdx.
|
|
|
|
// The dead def may have been moved across other values
|
|
|
|
// in LR, so move OldIdxOut up to NewIdxOut. Slide [NewIdxOut;OldIdxOut)
|
|
|
|
// down one position.
|
|
|
|
// |- X0/NewIdxOut -| ... |- Xn-1 -| |- Xn/OldIdxOut -| |- next - |
|
|
|
|
// => |- X0/NewIdxOut -| |- X0 -| ... |- Xn-1 -| |- next -|
|
|
|
|
std::copy_backward(NewIdxOut, OldIdxOut, std::next(OldIdxOut));
|
|
|
|
// Modify the segment at NewIdxOut and the following segment to meet at
|
|
|
|
// the point of the dead def, with the following segment getting
|
|
|
|
// OldIdxVNI as its value number.
|
|
|
|
*NewIdxOut = LiveRange::Segment(
|
|
|
|
NewIdxOut->start, NewIdxDef.getRegSlot(), NewIdxOut->valno);
|
|
|
|
*(NewIdxOut + 1) = LiveRange::Segment(
|
|
|
|
NewIdxDef.getRegSlot(), (NewIdxOut + 1)->end, OldIdxVNI);
|
|
|
|
OldIdxVNI->def = NewIdxDef;
|
|
|
|
// Modify subsequent segments to be defined by the moved def OldIdxVNI.
|
|
|
|
for (auto Idx = NewIdxOut + 2; Idx <= OldIdxOut; ++Idx)
|
|
|
|
Idx->valno = OldIdxVNI;
|
|
|
|
// Aggressively remove all dead flags from the former dead definition.
|
|
|
|
// Kill/dead flags shouldn't be used while live intervals exist; they
|
|
|
|
// will be reinserted by VirtRegRewriter.
|
|
|
|
if (MachineInstr *KillMI = LIS.getInstructionFromIndex(NewIdx))
|
|
|
|
for (MIBundleOperands MO(*KillMI); MO.isValid(); ++MO)
|
|
|
|
if (MO->isReg() && !MO->isUse())
|
|
|
|
MO->setIsDead(false);
|
2016-01-26 08:43:50 +08:00
|
|
|
} else {
|
|
|
|
// OldIdxVNI is a dead def. It may have been moved across other values
|
|
|
|
// in LR, so move OldIdxOut up to NewIdxOut. Slide [NewIdxOut;OldIdxOut)
|
|
|
|
// down one position.
|
|
|
|
// |- X0/NewIdxOut -| ... |- Xn-1 -| |- Xn/OldIdxOut -| |- next - |
|
|
|
|
// => |- undef/NewIdxOut -| |- X0 -| ... |- Xn-1 -| |- next -|
|
|
|
|
std::copy_backward(NewIdxOut, OldIdxOut, std::next(OldIdxOut));
|
|
|
|
// OldIdxVNI can be reused now to build a new dead def segment.
|
|
|
|
LiveRange::iterator NewSegment = NewIdxOut;
|
|
|
|
VNInfo *NewSegmentVNI = OldIdxVNI;
|
|
|
|
*NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(),
|
|
|
|
NewSegmentVNI);
|
|
|
|
NewSegmentVNI->def = NewIdxDef;
|
2012-02-19 15:13:05 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-13 05:31:57 +08:00
|
|
|
void updateRegMaskSlots() {
|
2012-02-19 11:00:30 +08:00
|
|
|
SmallVectorImpl<SlotIndex>::iterator RI =
|
2019-06-21 13:40:31 +08:00
|
|
|
llvm::lower_bound(LIS.RegMaskSlots, OldIdx);
|
2012-11-10 03:18:49 +08:00
|
|
|
assert(RI != LIS.RegMaskSlots.end() && *RI == OldIdx.getRegSlot() &&
|
|
|
|
"No RegMask at OldIdx.");
|
|
|
|
*RI = NewIdx.getRegSlot();
|
|
|
|
assert((RI == LIS.RegMaskSlots.begin() ||
|
2014-03-02 20:27:27 +08:00
|
|
|
SlotIndex::isEarlierInstr(*std::prev(RI), *RI)) &&
|
|
|
|
"Cannot move regmask instruction above another call");
|
|
|
|
assert((std::next(RI) == LIS.RegMaskSlots.end() ||
|
|
|
|
SlotIndex::isEarlierInstr(*RI, *std::next(RI))) &&
|
|
|
|
"Cannot move regmask instruction below another call");
|
2012-02-18 02:44:18 +08:00
|
|
|
}
|
|
|
|
|
2012-02-19 11:00:30 +08:00
|
|
|
// Return the last use of reg between NewIdx and OldIdx.
|
2016-02-16 03:25:36 +08:00
|
|
|
SlotIndex findLastUseBefore(SlotIndex Before, unsigned Reg,
|
|
|
|
LaneBitmask LaneMask) {
|
2019-08-02 07:27:28 +08:00
|
|
|
if (Register::isVirtualRegister(Reg)) {
|
2016-02-16 03:25:36 +08:00
|
|
|
SlotIndex LastUse = Before;
|
2014-12-10 09:12:20 +08:00
|
|
|
for (MachineOperand &MO : MRI.use_nodbg_operands(Reg)) {
|
2016-06-11 08:31:28 +08:00
|
|
|
if (MO.isUndef())
|
|
|
|
continue;
|
2014-12-10 09:12:20 +08:00
|
|
|
unsigned SubReg = MO.getSubReg();
|
2016-12-17 03:11:56 +08:00
|
|
|
if (SubReg != 0 && LaneMask.any()
|
2016-12-15 22:36:06 +08:00
|
|
|
&& (TRI.getSubRegIndexLaneMask(SubReg) & LaneMask).none())
|
2014-12-10 09:12:20 +08:00
|
|
|
continue;
|
|
|
|
|
2016-02-27 14:40:41 +08:00
|
|
|
const MachineInstr &MI = *MO.getParent();
|
2012-09-12 14:56:16 +08:00
|
|
|
SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI);
|
|
|
|
if (InstSlot > LastUse && InstSlot < OldIdx)
|
2016-02-16 03:25:36 +08:00
|
|
|
LastUse = InstSlot.getRegSlot();
|
2012-09-12 14:56:16 +08:00
|
|
|
}
|
2013-03-09 02:08:57 +08:00
|
|
|
return LastUse;
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is a regunit interval, so scanning the use list could be very
|
|
|
|
// expensive. Scan upwards from OldIdx instead.
|
2016-02-16 03:25:36 +08:00
|
|
|
assert(Before < OldIdx && "Expected upwards move");
|
2013-03-09 02:08:57 +08:00
|
|
|
SlotIndexes *Indexes = LIS.getSlotIndexes();
|
2016-02-16 03:25:36 +08:00
|
|
|
MachineBasicBlock *MBB = Indexes->getMBBFromIndex(Before);
|
2013-03-09 02:08:57 +08:00
|
|
|
|
|
|
|
// OldIdx may not correspond to an instruction any longer, so set MII to
|
|
|
|
// point to the next instruction after OldIdx, or MBB->end().
|
|
|
|
MachineBasicBlock::iterator MII = MBB->end();
|
|
|
|
if (MachineInstr *MI = Indexes->getInstructionFromIndex(
|
|
|
|
Indexes->getNextNonNullIndex(OldIdx)))
|
|
|
|
if (MI->getParent() == MBB)
|
|
|
|
MII = MI;
|
|
|
|
|
|
|
|
MachineBasicBlock::iterator Begin = MBB->begin();
|
|
|
|
while (MII != Begin) {
|
2018-05-09 10:42:00 +08:00
|
|
|
if ((--MII)->isDebugInstr())
|
2013-03-09 02:08:57 +08:00
|
|
|
continue;
|
2016-02-27 14:40:41 +08:00
|
|
|
SlotIndex Idx = Indexes->getInstructionIndex(*MII);
|
2013-03-09 02:08:57 +08:00
|
|
|
|
2016-02-16 03:25:36 +08:00
|
|
|
// Stop searching when Before is reached.
|
|
|
|
if (!SlotIndex::isEarlierInstr(Before, Idx))
|
|
|
|
return Before;
|
2013-03-09 02:08:57 +08:00
|
|
|
|
|
|
|
// Check if MII uses Reg.
|
2016-02-28 01:05:33 +08:00
|
|
|
for (MIBundleOperands MO(*MII); MO.isValid(); ++MO)
|
2016-06-11 08:31:28 +08:00
|
|
|
if (MO->isReg() && !MO->isUndef() &&
|
2019-08-02 07:27:28 +08:00
|
|
|
Register::isPhysicalRegister(MO->getReg()) &&
|
2013-03-09 02:08:57 +08:00
|
|
|
TRI.hasRegUnit(MO->getReg(), Reg))
|
2016-02-16 03:25:36 +08:00
|
|
|
return Idx.getRegSlot();
|
2012-02-18 02:44:18 +08:00
|
|
|
}
|
2016-02-16 03:25:36 +08:00
|
|
|
// Didn't reach Before. It must be the first instruction in the block.
|
|
|
|
return Before;
|
2012-02-18 02:44:18 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2016-02-28 04:14:29 +08:00
|
|
|
void LiveIntervals::handleMove(MachineInstr &MI, bool UpdateFlags) {
|
2019-07-20 03:32:00 +08:00
|
|
|
// It is fine to move a bundle as a whole, but not an individual instruction
|
|
|
|
// inside it.
|
|
|
|
assert((!MI.isBundled() || MI.getOpcode() == TargetOpcode::BUNDLE) &&
|
|
|
|
"Cannot move instruction in bundle");
|
2016-02-28 04:14:29 +08:00
|
|
|
SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
|
|
|
|
Indexes->removeMachineInstrFromMaps(MI);
|
|
|
|
SlotIndex NewIndex = Indexes->insertMachineInstrInMaps(MI);
|
|
|
|
assert(getMBBStartIdx(MI.getParent()) <= OldIndex &&
|
|
|
|
OldIndex < getMBBEndIdx(MI.getParent()) &&
|
2012-02-18 02:44:18 +08:00
|
|
|
"Cannot handle moves across basic block boundaries.");
|
|
|
|
|
2012-10-16 08:22:51 +08:00
|
|
|
HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags);
|
2016-02-28 04:14:29 +08:00
|
|
|
HME.updateAllRanges(&MI);
|
2012-02-22 06:29:38 +08:00
|
|
|
}
|
|
|
|
|
[LiveIntervals] Replace handleMoveIntoBundle
Summary:
The current handleMoveIntoBundle implementation is unusable,
it attempts to access the slot indexes of bundled instructions.
It also leaves bundled instructions with slot indexes assigned.
Replace handleMoveIntoBundle this with a more explicit
handleMoveIntoNewBundle function which recalculates the live
intervals for all instructions moved into a newly formed bundle,
and removes slot indexes from these instructions.
Reviewers: arsenm, MaskRay, kariddi, tpr, qcolombet
Reviewed By: qcolombet
Subscribers: MatzeB, wdng, hiraditya, arphaman, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D77969
2020-04-16 18:57:55 +08:00
|
|
|
void LiveIntervals::handleMoveIntoNewBundle(MachineInstr &BundleStart,
|
|
|
|
bool UpdateFlags) {
|
|
|
|
assert((BundleStart.getOpcode() == TargetOpcode::BUNDLE) &&
|
|
|
|
"Bundle start is not a bundle");
|
|
|
|
SmallVector<SlotIndex, 16> ToProcess;
|
|
|
|
const SlotIndex NewIndex = Indexes->insertMachineInstrInMaps(BundleStart);
|
|
|
|
auto BundleEnd = getBundleEnd(BundleStart.getIterator());
|
|
|
|
|
|
|
|
auto I = BundleStart.getIterator();
|
|
|
|
I++;
|
|
|
|
while (I != BundleEnd) {
|
|
|
|
if (!Indexes->hasIndex(*I))
|
|
|
|
continue;
|
|
|
|
SlotIndex OldIndex = Indexes->getInstructionIndex(*I, true);
|
|
|
|
ToProcess.push_back(OldIndex);
|
|
|
|
Indexes->removeMachineInstrFromMaps(*I, true);
|
|
|
|
I++;
|
|
|
|
}
|
|
|
|
for (SlotIndex OldIndex : ToProcess) {
|
|
|
|
HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags);
|
|
|
|
HME.updateAllRanges(&BundleStart);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fix up dead defs
|
|
|
|
const SlotIndex Index = getInstructionIndex(BundleStart);
|
|
|
|
for (unsigned Idx = 0, E = BundleStart.getNumOperands(); Idx != E; ++Idx) {
|
|
|
|
MachineOperand &MO = BundleStart.getOperand(Idx);
|
|
|
|
if (!MO.isReg())
|
|
|
|
continue;
|
|
|
|
Register Reg = MO.getReg();
|
|
|
|
if (Reg.isVirtual() && hasInterval(Reg) && !MO.isUndef()) {
|
|
|
|
LiveInterval &LI = getInterval(Reg);
|
|
|
|
LiveQueryResult LRQ = LI.Query(Index);
|
|
|
|
if (LRQ.isDeadDef())
|
|
|
|
MO.setIsDead();
|
|
|
|
}
|
|
|
|
}
|
2012-02-18 02:44:18 +08:00
|
|
|
}
|
2013-02-17 08:10:44 +08:00
|
|
|
|
2014-12-10 09:12:26 +08:00
|
|
|
void LiveIntervals::repairOldRegInRange(const MachineBasicBlock::iterator Begin,
|
|
|
|
const MachineBasicBlock::iterator End,
|
|
|
|
const SlotIndex endIdx,
|
|
|
|
LiveRange &LR, const unsigned Reg,
|
2015-09-26 05:51:14 +08:00
|
|
|
LaneBitmask LaneMask) {
|
2014-12-10 09:12:26 +08:00
|
|
|
LiveInterval::iterator LII = LR.find(endIdx);
|
|
|
|
SlotIndex lastUseIdx;
|
LiveIntervalAnalysis: fix a crash in repairOldRegInRange
Summary:
See the new test case for one that was (non-deterministically) crashing
on trunk and deterministically hit the assertion that I added in D23302.
Basically, the machine function contains a sequence
DS_WRITE_B32 %vreg4, %vreg14:sub0, ...
DS_WRITE_B32 %vreg4, %vreg14:sub0, ...
%vreg14:sub1<def> = COPY %vreg14:sub0
and SILoadStoreOptimizer::mergeWrite2Pair merges the two DS_WRITE_B32
instructions into one before calling repairIntervalsInRange.
Now repairIntervalsInRange wants to repair %vreg14, in particular, and
ends up trying to repair %vreg14:sub1 as well, but that only becomes
active _after_ the range that is to be repaired, hence the crash due
to LR.find(...) == LR.begin() at the start of repairOldRegInRange.
I believe that just skipping those subrange is fine, but again, not too
familiar with that code.
Reviewers: MatzeB, kparzysz, tstellarAMD
Subscribers: llvm-commits, MatzeB
Differential Revision: https://reviews.llvm.org/D23303
llvm-svn: 278268
2016-08-11 02:51:14 +08:00
|
|
|
if (LII == LR.begin()) {
|
|
|
|
// This happens when the function is called for a subregister that only
|
|
|
|
// occurs _after_ the range that is to be repaired.
|
|
|
|
return;
|
|
|
|
}
|
2014-12-10 09:12:26 +08:00
|
|
|
if (LII != LR.end() && LII->start < endIdx)
|
|
|
|
lastUseIdx = LII->end;
|
|
|
|
else
|
|
|
|
--LII;
|
|
|
|
|
|
|
|
for (MachineBasicBlock::iterator I = End; I != Begin;) {
|
|
|
|
--I;
|
2016-02-28 04:14:29 +08:00
|
|
|
MachineInstr &MI = *I;
|
2018-05-09 10:42:00 +08:00
|
|
|
if (MI.isDebugInstr())
|
2014-12-10 09:12:26 +08:00
|
|
|
continue;
|
|
|
|
|
2016-02-28 04:14:29 +08:00
|
|
|
SlotIndex instrIdx = getInstructionIndex(MI);
|
2014-12-10 09:12:26 +08:00
|
|
|
bool isStartValid = getInstructionFromIndex(LII->start);
|
|
|
|
bool isEndValid = getInstructionFromIndex(LII->end);
|
|
|
|
|
|
|
|
// FIXME: This doesn't currently handle early-clobber or multiple removed
|
|
|
|
// defs inside of the region to repair.
|
2016-02-28 04:14:29 +08:00
|
|
|
for (MachineInstr::mop_iterator OI = MI.operands_begin(),
|
|
|
|
OE = MI.operands_end();
|
|
|
|
OI != OE; ++OI) {
|
2014-12-10 09:12:26 +08:00
|
|
|
const MachineOperand &MO = *OI;
|
|
|
|
if (!MO.isReg() || MO.getReg() != Reg)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
unsigned SubReg = MO.getSubReg();
|
2015-09-26 05:51:14 +08:00
|
|
|
LaneBitmask Mask = TRI->getSubRegIndexLaneMask(SubReg);
|
2016-12-15 22:36:06 +08:00
|
|
|
if ((Mask & LaneMask).none())
|
2014-12-10 09:12:26 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (MO.isDef()) {
|
|
|
|
if (!isStartValid) {
|
|
|
|
if (LII->end.isDead()) {
|
|
|
|
SlotIndex prevStart;
|
|
|
|
if (LII != LR.begin())
|
|
|
|
prevStart = std::prev(LII)->start;
|
|
|
|
|
|
|
|
// FIXME: This could be more efficient if there was a
|
|
|
|
// removeSegment method that returned an iterator.
|
|
|
|
LR.removeSegment(*LII, true);
|
|
|
|
if (prevStart.isValid())
|
|
|
|
LII = LR.find(prevStart);
|
|
|
|
else
|
|
|
|
LII = LR.begin();
|
|
|
|
} else {
|
|
|
|
LII->start = instrIdx.getRegSlot();
|
|
|
|
LII->valno->def = instrIdx.getRegSlot();
|
|
|
|
if (MO.getSubReg() && !MO.isUndef())
|
|
|
|
lastUseIdx = instrIdx.getRegSlot();
|
|
|
|
else
|
|
|
|
lastUseIdx = SlotIndex();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!lastUseIdx.isValid()) {
|
|
|
|
VNInfo *VNI = LR.getNextValue(instrIdx.getRegSlot(), VNInfoAllocator);
|
|
|
|
LiveRange::Segment S(instrIdx.getRegSlot(),
|
|
|
|
instrIdx.getDeadSlot(), VNI);
|
|
|
|
LII = LR.addSegment(S);
|
|
|
|
} else if (LII->start != instrIdx.getRegSlot()) {
|
|
|
|
VNInfo *VNI = LR.getNextValue(instrIdx.getRegSlot(), VNInfoAllocator);
|
|
|
|
LiveRange::Segment S(instrIdx.getRegSlot(), lastUseIdx, VNI);
|
|
|
|
LII = LR.addSegment(S);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (MO.getSubReg() && !MO.isUndef())
|
|
|
|
lastUseIdx = instrIdx.getRegSlot();
|
|
|
|
else
|
|
|
|
lastUseIdx = SlotIndex();
|
|
|
|
} else if (MO.isUse()) {
|
|
|
|
// FIXME: This should probably be handled outside of this branch,
|
|
|
|
// either as part of the def case (for defs inside of the region) or
|
|
|
|
// after the loop over the region.
|
|
|
|
if (!isEndValid && !LII->end.isBlock())
|
|
|
|
LII->end = instrIdx.getRegSlot();
|
|
|
|
if (!lastUseIdx.isValid())
|
|
|
|
lastUseIdx = instrIdx.getRegSlot();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-02-17 08:10:44 +08:00
|
|
|
void
|
|
|
|
LiveIntervals::repairIntervalsInRange(MachineBasicBlock *MBB,
|
2013-02-17 19:09:00 +08:00
|
|
|
MachineBasicBlock::iterator Begin,
|
|
|
|
MachineBasicBlock::iterator End,
|
2020-04-08 22:40:26 +08:00
|
|
|
ArrayRef<Register> OrigRegs) {
|
2013-02-21 06:10:00 +08:00
|
|
|
// Find anchor points, which are at the beginning/end of blocks or at
|
|
|
|
// instructions that already have indexes.
|
2016-02-27 14:40:41 +08:00
|
|
|
while (Begin != MBB->begin() && !Indexes->hasIndex(*Begin))
|
2013-02-21 06:10:00 +08:00
|
|
|
--Begin;
|
2016-02-27 14:40:41 +08:00
|
|
|
while (End != MBB->end() && !Indexes->hasIndex(*End))
|
2013-02-21 06:10:00 +08:00
|
|
|
++End;
|
|
|
|
|
2013-02-20 14:46:48 +08:00
|
|
|
SlotIndex endIdx;
|
|
|
|
if (End == MBB->end())
|
|
|
|
endIdx = getMBBEndIdx(MBB).getPrevSlot();
|
2013-02-17 19:09:00 +08:00
|
|
|
else
|
2016-02-27 14:40:41 +08:00
|
|
|
endIdx = getInstructionIndex(*End);
|
2013-02-17 19:09:00 +08:00
|
|
|
|
2016-05-22 00:03:50 +08:00
|
|
|
Indexes->repairIndexesInRange(MBB, Begin, End);
|
2013-02-20 14:46:41 +08:00
|
|
|
|
2013-02-20 14:46:48 +08:00
|
|
|
for (MachineBasicBlock::iterator I = End; I != Begin;) {
|
|
|
|
--I;
|
2016-02-28 04:14:29 +08:00
|
|
|
MachineInstr &MI = *I;
|
2018-05-09 10:42:00 +08:00
|
|
|
if (MI.isDebugInstr())
|
2013-02-23 18:25:25 +08:00
|
|
|
continue;
|
2016-02-28 04:14:29 +08:00
|
|
|
for (MachineInstr::const_mop_iterator MOI = MI.operands_begin(),
|
|
|
|
MOE = MI.operands_end();
|
|
|
|
MOI != MOE; ++MOI) {
|
2019-08-02 07:27:28 +08:00
|
|
|
if (MOI->isReg() && Register::isVirtualRegister(MOI->getReg()) &&
|
2013-02-20 14:46:48 +08:00
|
|
|
!hasInterval(MOI->getReg())) {
|
2013-08-15 07:50:16 +08:00
|
|
|
createAndComputeVirtRegInterval(MOI->getReg());
|
2013-02-20 14:46:48 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-08 22:40:26 +08:00
|
|
|
for (Register Reg : OrigRegs) {
|
|
|
|
if (!Reg.isVirtual())
|
2013-02-17 08:10:44 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
LiveInterval &LI = getInterval(Reg);
|
2013-02-21 06:09:57 +08:00
|
|
|
// FIXME: Should we support undefs that gain defs?
|
|
|
|
if (!LI.hasAtLeastOneValue())
|
|
|
|
continue;
|
|
|
|
|
2017-01-19 08:32:13 +08:00
|
|
|
for (LiveInterval::SubRange &S : LI.subranges())
|
2014-12-11 08:59:06 +08:00
|
|
|
repairOldRegInRange(Begin, End, endIdx, S, Reg, S.LaneMask);
|
2017-01-19 08:32:13 +08:00
|
|
|
|
2014-12-10 09:12:26 +08:00
|
|
|
repairOldRegInRange(Begin, End, endIdx, LI, Reg);
|
2013-02-17 08:10:44 +08:00
|
|
|
}
|
|
|
|
}
|
2015-01-22 02:50:21 +08:00
|
|
|
|
|
|
|
void LiveIntervals::removePhysRegDefAt(unsigned Reg, SlotIndex Pos) {
|
2017-01-19 08:32:13 +08:00
|
|
|
for (MCRegUnitIterator Unit(Reg, TRI); Unit.isValid(); ++Unit) {
|
|
|
|
if (LiveRange *LR = getCachedRegUnit(*Unit))
|
2015-01-22 02:50:21 +08:00
|
|
|
if (VNInfo *VNI = LR->getVNInfoAt(Pos))
|
|
|
|
LR->removeValNo(VNI);
|
|
|
|
}
|
|
|
|
}
|
2015-01-22 03:02:30 +08:00
|
|
|
|
|
|
|
void LiveIntervals::removeVRegDefAt(LiveInterval &LI, SlotIndex Pos) {
|
2016-08-24 21:37:55 +08:00
|
|
|
// LI may not have the main range computed yet, but its subranges may
|
|
|
|
// be present.
|
2015-01-22 03:02:30 +08:00
|
|
|
VNInfo *VNI = LI.getVNInfoAt(Pos);
|
2016-08-24 21:37:55 +08:00
|
|
|
if (VNI != nullptr) {
|
|
|
|
assert(VNI->def.getBaseIndex() == Pos.getBaseIndex());
|
|
|
|
LI.removeValNo(VNI);
|
|
|
|
}
|
2015-01-22 03:02:30 +08:00
|
|
|
|
2016-08-24 21:37:55 +08:00
|
|
|
// Also remove the value defined in subranges.
|
2015-01-22 03:02:30 +08:00
|
|
|
for (LiveInterval::SubRange &S : LI.subranges()) {
|
|
|
|
if (VNInfo *SVNI = S.getVNInfoAt(Pos))
|
2016-08-24 21:37:55 +08:00
|
|
|
if (SVNI->def.getBaseIndex() == Pos.getBaseIndex())
|
|
|
|
S.removeValNo(SVNI);
|
2015-01-22 03:02:30 +08:00
|
|
|
}
|
|
|
|
LI.removeEmptySubRanges();
|
|
|
|
}
|
2015-09-22 11:44:41 +08:00
|
|
|
|
|
|
|
void LiveIntervals::splitSeparateComponents(LiveInterval &LI,
|
|
|
|
SmallVectorImpl<LiveInterval*> &SplitLIs) {
|
|
|
|
ConnectedVNInfoEqClasses ConEQ(*this);
|
2016-01-08 09:16:35 +08:00
|
|
|
unsigned NumComp = ConEQ.Classify(LI);
|
2015-09-22 11:44:41 +08:00
|
|
|
if (NumComp <= 1)
|
|
|
|
return;
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << " Split " << NumComp << " components: " << LI << '\n');
|
2015-09-22 11:44:41 +08:00
|
|
|
unsigned Reg = LI.reg;
|
|
|
|
const TargetRegisterClass *RegClass = MRI->getRegClass(Reg);
|
|
|
|
for (unsigned I = 1; I < NumComp; ++I) {
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register NewVReg = MRI->createVirtualRegister(RegClass);
|
2015-09-22 11:44:41 +08:00
|
|
|
LiveInterval &NewLI = createEmptyInterval(NewVReg);
|
|
|
|
SplitLIs.push_back(&NewLI);
|
|
|
|
}
|
|
|
|
ConEQ.Distribute(LI, SplitLIs.data(), *MRI);
|
|
|
|
}
|
2016-01-20 08:23:21 +08:00
|
|
|
|
2016-05-21 07:14:56 +08:00
|
|
|
void LiveIntervals::constructMainRangeFromSubranges(LiveInterval &LI) {
|
2020-03-23 10:08:29 +08:00
|
|
|
assert(LICalc && "LICalc not initialized.");
|
|
|
|
LICalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
|
|
|
|
LICalc->constructMainRangeFromSubranges(LI);
|
2016-05-21 07:14:56 +08:00
|
|
|
}
|