2018-01-22 18:06:50 +08:00
|
|
|
//===- ExecutionDomainFix.cpp - Fix execution domain issues ----*- C++ -*--===//
|
2010-03-26 01:25:00 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2010-03-26 01:25:00 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2018-01-22 18:06:33 +08:00
|
|
|
#include "llvm/CodeGen/ExecutionDomainFix.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2017-11-08 09:01:31 +08:00
|
|
|
#include "llvm/CodeGen/TargetInstrInfo.h"
|
2019-10-19 09:31:09 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2014-08-05 05:25:23 +08:00
|
|
|
|
2010-03-26 01:25:00 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2017-03-18 13:05:40 +08:00
|
|
|
#define DEBUG_TYPE "execution-deps-fix"
|
2014-04-22 10:02:50 +08:00
|
|
|
|
2014-12-18 03:13:47 +08:00
|
|
|
iterator_range<SmallVectorImpl<int>::const_iterator>
|
2018-01-22 18:05:23 +08:00
|
|
|
ExecutionDomainFix::regIndices(unsigned Reg) const {
|
2011-09-28 07:50:46 +08:00
|
|
|
assert(Reg < AliasMap.size() && "Invalid register");
|
2014-12-18 03:13:47 +08:00
|
|
|
const auto &Entry = AliasMap[Reg];
|
|
|
|
return make_range(Entry.begin(), Entry.end());
|
2010-03-30 07:24:21 +08:00
|
|
|
}
|
|
|
|
|
2018-01-22 18:05:23 +08:00
|
|
|
DomainValue *ExecutionDomainFix::alloc(int domain) {
|
2018-01-22 18:06:18 +08:00
|
|
|
DomainValue *dv = Avail.empty() ? new (Allocator.Allocate()) DomainValue
|
|
|
|
: Avail.pop_back_val();
|
2010-04-05 02:00:21 +08:00
|
|
|
if (domain >= 0)
|
2010-04-05 05:27:26 +08:00
|
|
|
dv->addDomain(domain);
|
2011-11-09 07:26:00 +08:00
|
|
|
assert(dv->Refs == 0 && "Reference count wasn't cleared");
|
2011-11-09 08:06:18 +08:00
|
|
|
assert(!dv->Next && "Chained DomainValue shouldn't have been recycled");
|
2010-04-05 02:00:21 +08:00
|
|
|
return dv;
|
|
|
|
}
|
|
|
|
|
2018-01-22 18:05:23 +08:00
|
|
|
void ExecutionDomainFix::release(DomainValue *DV) {
|
2011-11-09 08:06:18 +08:00
|
|
|
while (DV) {
|
|
|
|
assert(DV->Refs && "Bad DomainValue");
|
|
|
|
if (--DV->Refs)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// There are no more DV references. Collapse any contained instructions.
|
|
|
|
if (DV->AvailableDomains && !DV->isCollapsed())
|
|
|
|
collapse(DV, DV->getFirstDomain());
|
|
|
|
|
|
|
|
DomainValue *Next = DV->Next;
|
|
|
|
DV->clear();
|
|
|
|
Avail.push_back(DV);
|
|
|
|
// Also release the next DomainValue in the chain.
|
|
|
|
DV = Next;
|
|
|
|
}
|
|
|
|
}
|
2011-11-09 05:57:44 +08:00
|
|
|
|
2018-01-22 18:05:23 +08:00
|
|
|
DomainValue *ExecutionDomainFix::resolve(DomainValue *&DVRef) {
|
2011-11-09 08:06:18 +08:00
|
|
|
DomainValue *DV = DVRef;
|
|
|
|
if (!DV || !DV->Next)
|
|
|
|
return DV;
|
|
|
|
|
|
|
|
// DV has a chain. Find the end.
|
2018-01-22 18:06:18 +08:00
|
|
|
do
|
|
|
|
DV = DV->Next;
|
2011-11-09 08:06:18 +08:00
|
|
|
while (DV->Next);
|
|
|
|
|
|
|
|
// Update DVRef to point to DV.
|
|
|
|
retain(DV);
|
|
|
|
release(DVRef);
|
|
|
|
DVRef = DV;
|
|
|
|
return DV;
|
2010-04-05 02:00:21 +08:00
|
|
|
}
|
|
|
|
|
2018-01-22 18:05:23 +08:00
|
|
|
void ExecutionDomainFix::setLiveReg(int rx, DomainValue *dv) {
|
2010-03-31 04:04:01 +08:00
|
|
|
assert(unsigned(rx) < NumRegs && "Invalid index");
|
2018-01-22 18:05:53 +08:00
|
|
|
assert(!LiveRegs.empty() && "Must enter basic block first.");
|
2010-03-31 04:04:01 +08:00
|
|
|
|
2018-01-22 18:06:01 +08:00
|
|
|
if (LiveRegs[rx] == dv)
|
2010-03-30 07:24:21 +08:00
|
|
|
return;
|
2018-01-22 18:06:01 +08:00
|
|
|
if (LiveRegs[rx])
|
|
|
|
release(LiveRegs[rx]);
|
|
|
|
LiveRegs[rx] = retain(dv);
|
2010-03-30 07:24:21 +08:00
|
|
|
}
|
|
|
|
|
2018-01-22 18:05:23 +08:00
|
|
|
void ExecutionDomainFix::kill(int rx) {
|
2010-03-31 04:04:01 +08:00
|
|
|
assert(unsigned(rx) < NumRegs && "Invalid index");
|
2018-01-22 18:05:53 +08:00
|
|
|
assert(!LiveRegs.empty() && "Must enter basic block first.");
|
2018-01-22 18:06:01 +08:00
|
|
|
if (!LiveRegs[rx])
|
2011-11-15 09:15:25 +08:00
|
|
|
return;
|
2010-03-30 07:24:21 +08:00
|
|
|
|
2018-01-22 18:06:01 +08:00
|
|
|
release(LiveRegs[rx]);
|
|
|
|
LiveRegs[rx] = nullptr;
|
2010-03-30 07:24:21 +08:00
|
|
|
}
|
|
|
|
|
2018-01-22 18:05:23 +08:00
|
|
|
void ExecutionDomainFix::force(int rx, unsigned domain) {
|
2010-03-31 04:04:01 +08:00
|
|
|
assert(unsigned(rx) < NumRegs && "Invalid index");
|
2018-01-22 18:05:53 +08:00
|
|
|
assert(!LiveRegs.empty() && "Must enter basic block first.");
|
2018-01-22 18:06:01 +08:00
|
|
|
if (DomainValue *dv = LiveRegs[rx]) {
|
2010-04-05 05:27:26 +08:00
|
|
|
if (dv->isCollapsed())
|
|
|
|
dv->addDomain(domain);
|
2010-04-07 03:48:56 +08:00
|
|
|
else if (dv->hasDomain(domain))
|
2011-11-09 05:57:47 +08:00
|
|
|
collapse(dv, domain);
|
2010-04-07 03:48:56 +08:00
|
|
|
else {
|
2011-09-28 08:01:56 +08:00
|
|
|
// This is an incompatible open DomainValue. Collapse it to whatever and
|
|
|
|
// force the new value into domain. This costs a domain crossing.
|
2011-11-09 05:57:47 +08:00
|
|
|
collapse(dv, dv->getFirstDomain());
|
2018-01-22 18:06:01 +08:00
|
|
|
assert(LiveRegs[rx] && "Not live after collapse?");
|
|
|
|
LiveRegs[rx]->addDomain(domain);
|
2010-04-07 03:48:56 +08:00
|
|
|
}
|
2010-03-30 07:24:21 +08:00
|
|
|
} else {
|
2010-03-31 04:04:01 +08:00
|
|
|
// Set up basic collapsed DomainValue.
|
2011-11-09 05:57:47 +08:00
|
|
|
setLiveReg(rx, alloc(domain));
|
2010-03-30 07:24:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-22 18:05:23 +08:00
|
|
|
void ExecutionDomainFix::collapse(DomainValue *dv, unsigned domain) {
|
2010-04-05 05:27:26 +08:00
|
|
|
assert(dv->hasDomain(domain) && "Cannot collapse");
|
2010-03-30 07:24:21 +08:00
|
|
|
|
|
|
|
// Collapse all the instructions.
|
2010-04-05 05:27:26 +08:00
|
|
|
while (!dv->Instrs.empty())
|
2016-06-30 08:01:54 +08:00
|
|
|
TII->setExecutionDomain(*dv->Instrs.pop_back_val(), domain);
|
2010-04-05 05:27:26 +08:00
|
|
|
dv->setSingleDomain(domain);
|
2010-03-30 07:24:21 +08:00
|
|
|
|
|
|
|
// If there are multiple users, give them new, unique DomainValues.
|
2018-01-22 18:05:53 +08:00
|
|
|
if (!LiveRegs.empty() && dv->Refs > 1)
|
2010-03-31 04:04:01 +08:00
|
|
|
for (unsigned rx = 0; rx != NumRegs; ++rx)
|
2018-01-22 18:06:01 +08:00
|
|
|
if (LiveRegs[rx] == dv)
|
2011-11-09 05:57:47 +08:00
|
|
|
setLiveReg(rx, alloc(domain));
|
2010-03-30 07:24:21 +08:00
|
|
|
}
|
|
|
|
|
2018-01-22 18:05:23 +08:00
|
|
|
bool ExecutionDomainFix::merge(DomainValue *A, DomainValue *B) {
|
2010-04-05 05:27:26 +08:00
|
|
|
assert(!A->isCollapsed() && "Cannot merge into collapsed");
|
|
|
|
assert(!B->isCollapsed() && "Cannot merge from collapsed");
|
2010-04-01 04:05:12 +08:00
|
|
|
if (A == B)
|
2010-04-01 01:13:16 +08:00
|
|
|
return true;
|
2010-04-05 05:27:26 +08:00
|
|
|
// Restrict to the domains that A and B have in common.
|
|
|
|
unsigned common = A->getCommonDomains(B->AvailableDomains);
|
|
|
|
if (!common)
|
2010-03-30 07:24:21 +08:00
|
|
|
return false;
|
2010-04-05 05:27:26 +08:00
|
|
|
A->AvailableDomains = common;
|
2010-03-30 07:24:21 +08:00
|
|
|
A->Instrs.append(B->Instrs.begin(), B->Instrs.end());
|
2011-11-09 04:57:04 +08:00
|
|
|
|
|
|
|
// Clear the old DomainValue so we won't try to swizzle instructions twice.
|
2011-11-09 07:26:00 +08:00
|
|
|
B->clear();
|
2011-11-09 08:06:18 +08:00
|
|
|
// All uses of B are referred to A.
|
|
|
|
B->Next = retain(A);
|
2011-11-09 04:57:04 +08:00
|
|
|
|
2014-12-16 02:48:43 +08:00
|
|
|
for (unsigned rx = 0; rx != NumRegs; ++rx) {
|
2018-01-22 18:05:53 +08:00
|
|
|
assert(!LiveRegs.empty() && "no space allocated for live registers");
|
2018-01-22 18:06:01 +08:00
|
|
|
if (LiveRegs[rx] == B)
|
2011-11-09 05:57:47 +08:00
|
|
|
setLiveReg(rx, A);
|
2014-12-16 02:48:43 +08:00
|
|
|
}
|
2010-03-30 07:24:21 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-01-22 18:05:23 +08:00
|
|
|
void ExecutionDomainFix::enterBasicBlock(
|
|
|
|
const LoopTraversal::TraversedMBBInfo &TraversedMBB) {
|
|
|
|
|
|
|
|
MachineBasicBlock *MBB = TraversedMBB.MBB;
|
|
|
|
|
|
|
|
// Set up LiveRegs to represent registers entering MBB.
|
2018-01-22 18:06:01 +08:00
|
|
|
// Set default domain values to 'no domain' (nullptr)
|
2018-01-22 18:05:53 +08:00
|
|
|
if (LiveRegs.empty())
|
2018-01-22 18:06:01 +08:00
|
|
|
LiveRegs.assign(NumRegs, nullptr);
|
2018-01-22 18:05:23 +08:00
|
|
|
|
|
|
|
// This is the entry block.
|
|
|
|
if (MBB->pred_empty()) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << ": entry\n");
|
2018-01-22 18:05:23 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try to coalesce live-out registers from predecessors.
|
2018-01-22 18:06:18 +08:00
|
|
|
for (MachineBasicBlock *pred : MBB->predecessors()) {
|
2018-01-22 21:24:10 +08:00
|
|
|
assert(unsigned(pred->getNumber()) < MBBOutRegsInfos.size() &&
|
2018-01-22 18:06:18 +08:00
|
|
|
"Should have pre-allocated MBBInfos for all MBBs");
|
|
|
|
LiveRegsDVInfo &Incoming = MBBOutRegsInfos[pred->getNumber()];
|
2018-01-22 18:05:23 +08:00
|
|
|
// Incoming is null if this is a backedge from a BB
|
|
|
|
// we haven't processed yet
|
2018-01-22 18:05:53 +08:00
|
|
|
if (Incoming.empty())
|
2018-01-22 18:05:23 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
for (unsigned rx = 0; rx != NumRegs; ++rx) {
|
2018-01-22 18:06:01 +08:00
|
|
|
DomainValue *pdv = resolve(Incoming[rx]);
|
2011-11-15 09:15:25 +08:00
|
|
|
if (!pdv)
|
2011-11-09 09:06:56 +08:00
|
|
|
continue;
|
2018-01-22 18:06:01 +08:00
|
|
|
if (!LiveRegs[rx]) {
|
2011-11-09 05:57:47 +08:00
|
|
|
setLiveReg(rx, pdv);
|
2010-04-01 04:32:51 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We have a live DomainValue from more than one predecessor.
|
2018-01-22 18:06:01 +08:00
|
|
|
if (LiveRegs[rx]->isCollapsed()) {
|
2014-05-21 01:11:11 +08:00
|
|
|
// We are already collapsed, but predecessor is not. Force it.
|
2018-01-22 18:06:01 +08:00
|
|
|
unsigned Domain = LiveRegs[rx]->getFirstDomain();
|
2011-11-15 09:15:25 +08:00
|
|
|
if (!pdv->isCollapsed() && pdv->hasDomain(Domain))
|
|
|
|
collapse(pdv, Domain);
|
2010-04-01 04:32:51 +08:00
|
|
|
continue;
|
2010-03-31 04:04:01 +08:00
|
|
|
}
|
2010-04-05 02:00:21 +08:00
|
|
|
|
2010-04-01 04:32:51 +08:00
|
|
|
// Currently open, merge in predecessor.
|
2010-04-05 05:27:26 +08:00
|
|
|
if (!pdv->isCollapsed())
|
2018-01-22 18:06:01 +08:00
|
|
|
merge(LiveRegs[rx], pdv);
|
2010-04-01 04:32:51 +08:00
|
|
|
else
|
2011-11-09 05:57:47 +08:00
|
|
|
force(rx, pdv->getFirstDomain());
|
2010-03-31 04:04:01 +08:00
|
|
|
}
|
|
|
|
}
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << printMBBReference(*MBB)
|
|
|
|
<< (!TraversedMBB.IsDone ? ": incomplete\n"
|
|
|
|
: ": all preds known\n"));
|
2010-03-30 07:24:21 +08:00
|
|
|
}
|
|
|
|
|
2018-01-22 18:05:23 +08:00
|
|
|
void ExecutionDomainFix::leaveBasicBlock(
|
|
|
|
const LoopTraversal::TraversedMBBInfo &TraversedMBB) {
|
2018-01-22 18:05:53 +08:00
|
|
|
assert(!LiveRegs.empty() && "Must enter basic block first.");
|
2018-01-22 21:24:10 +08:00
|
|
|
unsigned MBBNumber = TraversedMBB.MBB->getNumber();
|
2018-01-22 18:06:18 +08:00
|
|
|
assert(MBBNumber < MBBOutRegsInfos.size() &&
|
|
|
|
"Unexpected basic block number.");
|
2018-01-22 18:05:23 +08:00
|
|
|
// Save register clearances at end of MBB - used by enterBasicBlock().
|
2018-01-22 18:06:01 +08:00
|
|
|
for (DomainValue *OldLiveReg : MBBOutRegsInfos[MBBNumber]) {
|
|
|
|
release(OldLiveReg);
|
2018-01-22 18:05:53 +08:00
|
|
|
}
|
2018-01-22 18:06:01 +08:00
|
|
|
MBBOutRegsInfos[MBBNumber] = LiveRegs;
|
2018-01-22 18:05:53 +08:00
|
|
|
LiveRegs.clear();
|
2011-11-08 05:40:27 +08:00
|
|
|
}
|
|
|
|
|
2018-01-22 18:05:23 +08:00
|
|
|
bool ExecutionDomainFix::visitInstr(MachineInstr *MI) {
|
2011-11-15 09:15:25 +08:00
|
|
|
// Update instructions with explicit execution domains.
|
2016-06-30 08:01:54 +08:00
|
|
|
std::pair<uint16_t, uint16_t> DomP = TII->getExecutionDomain(*MI);
|
2011-11-15 09:15:25 +08:00
|
|
|
if (DomP.first) {
|
|
|
|
if (DomP.second)
|
|
|
|
visitSoftInstr(MI, DomP.second);
|
2011-11-08 05:40:27 +08:00
|
|
|
else
|
2011-11-15 09:15:25 +08:00
|
|
|
visitHardInstr(MI, DomP.first);
|
|
|
|
}
|
|
|
|
|
[ExecutionDepsFix] Improve clearance calculation for loops
Summary:
In revision rL278321, ExecutionDepsFix learned how to pick a better
register for undef register reads, e.g. for instructions such as
`vcvtsi2sdq`. While this revision improved performance on a good number
of our benchmarks, it unfortunately also caused significant regressions
(up to 3x) on others. This regression turned out to be caused by loops
such as:
PH -> A -> B (xmm<Undef> -> xmm<Def>) -> C -> D -> EXIT
^ |
+----------------------------------+
In the previous version of the clearance calculation, we would visit
the blocks in order, remembering for each whether there were any
incoming backedges from blocks that we hadn't processed yet and if
so queuing up the block to be re-processed. However, for loop structures
such as the above, this is clearly insufficient, since the block B
does not have any unknown backedges, so we do not see the false
dependency from the previous interation's Def of xmm registers in B.
To fix this, we need to consider all blocks that are part of the loop
and reprocess them one the correct clearance values are known. As
an optimization, we also want to avoid reprocessing any later blocks
that are not part of the loop.
In summary, the iteration order is as follows:
Before: PH A B C D A'
Corrected (Naive): PH A B C D A' B' C' D'
Corrected (w/ optimization): PH A B C A' B' C' D
To facilitate this optimization we introduce two new counters for each
basic block. The first counts how many of it's predecssors have
completed primary processing. The second counts how many of its
predecessors have completed all processing (we will call such a block
*done*. Now, the criteria to reprocess a block is as follows:
- All Predecessors have completed primary processing
- For x the number of predecessors that have completed primary
processing *at the time of primary processing of this block*,
the number of predecessors that are done has reached x.
The intuition behind this criterion is as follows:
We need to perform primary processing on all predecessors in order to
find out any direct defs in those predecessors. When predecessors are
done, we also know that we have information about indirect defs (e.g.
in block B though that were inherited through B->C->A->B). However,
we can't wait for all predecessors to be done, since that would
cause cyclic dependencies. However, it is guaranteed that all those
predecessors that are prior to us in reverse postorder will be done
before us. Since we iterate of the basic blocks in reverse postorder,
the number x above, is precisely the count of the number of predecessors
prior to us in reverse postorder.
Reviewers: myatsina
Differential Revision: https://reviews.llvm.org/D28759
llvm-svn: 293571
2017-01-31 07:37:03 +08:00
|
|
|
return !DomP.first;
|
2011-11-15 09:15:25 +08:00
|
|
|
}
|
|
|
|
|
2018-01-22 18:05:23 +08:00
|
|
|
void ExecutionDomainFix::processDefs(MachineInstr *MI, bool Kill) {
|
2018-05-09 10:42:00 +08:00
|
|
|
assert(!MI->isDebugInstr() && "Won't process debug values");
|
2011-11-15 09:15:25 +08:00
|
|
|
const MCInstrDesc &MCID = MI->getDesc();
|
|
|
|
for (unsigned i = 0,
|
2018-01-22 18:06:18 +08:00
|
|
|
e = MI->isVariadic() ? MI->getNumOperands() : MCID.getNumDefs();
|
|
|
|
i != e; ++i) {
|
2011-11-15 09:15:25 +08:00
|
|
|
MachineOperand &MO = MI->getOperand(i);
|
|
|
|
if (!MO.isReg())
|
|
|
|
continue;
|
|
|
|
if (MO.isUse())
|
|
|
|
continue;
|
2015-03-07 02:56:20 +08:00
|
|
|
for (int rx : regIndices(MO.getReg())) {
|
2014-12-18 03:13:47 +08:00
|
|
|
// This instruction explicitly defines rx.
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << printReg(RC->getRegister(rx), TRI) << ":\t" << *MI);
|
2014-12-18 03:13:47 +08:00
|
|
|
|
|
|
|
// Kill off domains redefined by generic instructions.
|
|
|
|
if (Kill)
|
|
|
|
kill(rx);
|
|
|
|
}
|
2013-10-15 06:19:03 +08:00
|
|
|
}
|
2018-01-22 18:05:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void ExecutionDomainFix::visitHardInstr(MachineInstr *mi, unsigned domain) {
|
2010-03-30 07:24:21 +08:00
|
|
|
// Collapse all uses.
|
|
|
|
for (unsigned i = mi->getDesc().getNumDefs(),
|
2018-01-22 18:06:18 +08:00
|
|
|
e = mi->getDesc().getNumOperands();
|
|
|
|
i != e; ++i) {
|
2010-03-30 07:24:21 +08:00
|
|
|
MachineOperand &mo = mi->getOperand(i);
|
2018-01-22 18:06:18 +08:00
|
|
|
if (!mo.isReg())
|
|
|
|
continue;
|
2015-03-07 02:56:20 +08:00
|
|
|
for (int rx : regIndices(mo.getReg())) {
|
2014-12-18 03:13:47 +08:00
|
|
|
force(rx, domain);
|
|
|
|
}
|
2010-03-30 07:24:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Kill all defs and force them.
|
|
|
|
for (unsigned i = 0, e = mi->getDesc().getNumDefs(); i != e; ++i) {
|
|
|
|
MachineOperand &mo = mi->getOperand(i);
|
2018-01-22 18:06:18 +08:00
|
|
|
if (!mo.isReg())
|
|
|
|
continue;
|
2015-03-07 02:56:20 +08:00
|
|
|
for (int rx : regIndices(mo.getReg())) {
|
2014-12-18 03:13:47 +08:00
|
|
|
kill(rx);
|
|
|
|
force(rx, domain);
|
|
|
|
}
|
2010-03-30 07:24:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-22 18:05:23 +08:00
|
|
|
void ExecutionDomainFix::visitSoftInstr(MachineInstr *mi, unsigned mask) {
|
2010-04-05 05:27:26 +08:00
|
|
|
// Bitmask of available domains for this instruction after taking collapsed
|
|
|
|
// operands into account.
|
|
|
|
unsigned available = mask;
|
|
|
|
|
2010-03-30 07:24:21 +08:00
|
|
|
// Scan the explicit use operands for incoming domains.
|
|
|
|
SmallVector<int, 4> used;
|
2018-01-22 18:05:53 +08:00
|
|
|
if (!LiveRegs.empty())
|
2010-03-31 04:04:01 +08:00
|
|
|
for (unsigned i = mi->getDesc().getNumDefs(),
|
2018-01-22 18:06:18 +08:00
|
|
|
e = mi->getDesc().getNumOperands();
|
|
|
|
i != e; ++i) {
|
2010-04-01 04:32:51 +08:00
|
|
|
MachineOperand &mo = mi->getOperand(i);
|
2018-01-22 18:06:18 +08:00
|
|
|
if (!mo.isReg())
|
|
|
|
continue;
|
2015-03-07 02:56:20 +08:00
|
|
|
for (int rx : regIndices(mo.getReg())) {
|
2018-01-22 18:06:01 +08:00
|
|
|
DomainValue *dv = LiveRegs[rx];
|
2014-12-18 03:13:47 +08:00
|
|
|
if (dv == nullptr)
|
|
|
|
continue;
|
2010-04-05 05:27:26 +08:00
|
|
|
// Bitmask of domains that dv and available have in common.
|
|
|
|
unsigned common = dv->getCommonDomains(available);
|
2010-04-01 04:32:51 +08:00
|
|
|
// Is it possible to use this collapsed register for free?
|
2010-04-05 05:27:26 +08:00
|
|
|
if (dv->isCollapsed()) {
|
|
|
|
// Restrict available domains to the ones in common with the operand.
|
2013-10-15 06:19:03 +08:00
|
|
|
// If there are no common domains, we must pay the cross-domain
|
2010-04-05 05:27:26 +08:00
|
|
|
// penalty for this operand.
|
2018-01-22 18:06:18 +08:00
|
|
|
if (common)
|
|
|
|
available = common;
|
2010-04-05 05:27:26 +08:00
|
|
|
} else if (common)
|
|
|
|
// Open DomainValue is compatible, save it for merging.
|
2010-04-01 04:32:51 +08:00
|
|
|
used.push_back(rx);
|
|
|
|
else
|
2010-04-05 05:27:26 +08:00
|
|
|
// Open DomainValue is not compatible with instruction. It is useless
|
|
|
|
// now.
|
2011-11-09 05:57:47 +08:00
|
|
|
kill(rx);
|
2010-04-01 04:32:51 +08:00
|
|
|
}
|
2010-03-30 07:24:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// If the collapsed operands force a single domain, propagate the collapse.
|
2010-04-05 05:27:26 +08:00
|
|
|
if (isPowerOf2_32(available)) {
|
2013-05-25 06:23:49 +08:00
|
|
|
unsigned domain = countTrailingZeros(available);
|
2016-06-30 08:01:54 +08:00
|
|
|
TII->setExecutionDomain(*mi, domain);
|
2010-03-30 07:24:21 +08:00
|
|
|
visitHardInstr(mi, domain);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-04-05 05:27:26 +08:00
|
|
|
// Kill off any remaining uses that don't match available, and build a list of
|
|
|
|
// incoming DomainValues that we want to merge.
|
2018-01-22 18:06:01 +08:00
|
|
|
SmallVector<int, 4> Regs;
|
2017-02-24 14:38:24 +08:00
|
|
|
for (int rx : used) {
|
2018-01-22 18:05:53 +08:00
|
|
|
assert(!LiveRegs.empty() && "no space allocated for live registers");
|
2018-01-22 18:06:01 +08:00
|
|
|
DomainValue *&LR = LiveRegs[rx];
|
2010-03-31 04:04:01 +08:00
|
|
|
// This useless DomainValue could have been missed above.
|
2018-01-22 18:06:01 +08:00
|
|
|
if (!LR->getCommonDomains(available)) {
|
2011-11-15 09:15:25 +08:00
|
|
|
kill(rx);
|
2010-03-30 07:24:21 +08:00
|
|
|
continue;
|
|
|
|
}
|
2011-11-15 09:15:25 +08:00
|
|
|
// Sorted insertion.
|
2018-01-22 18:06:01 +08:00
|
|
|
// Enables giving priority to the latest domains during merging.
|
2019-04-20 21:00:50 +08:00
|
|
|
const int Def = RDA->getReachingDef(mi, RC->getRegister(rx));
|
2019-06-30 19:19:56 +08:00
|
|
|
auto I = partition_point(Regs, [&](int I) {
|
|
|
|
return RDA->getReachingDef(mi, RC->getRegister(I)) <= Def;
|
2019-04-12 19:31:16 +08:00
|
|
|
});
|
2018-01-22 18:06:01 +08:00
|
|
|
Regs.insert(I, rx);
|
2010-03-30 07:24:21 +08:00
|
|
|
}
|
|
|
|
|
2010-04-05 05:27:26 +08:00
|
|
|
// doms are now sorted in order of appearance. Try to merge them all, giving
|
|
|
|
// priority to the latest ones.
|
2014-04-14 08:51:57 +08:00
|
|
|
DomainValue *dv = nullptr;
|
2011-11-15 09:15:25 +08:00
|
|
|
while (!Regs.empty()) {
|
2010-04-01 04:32:51 +08:00
|
|
|
if (!dv) {
|
2018-01-22 18:06:01 +08:00
|
|
|
dv = LiveRegs[Regs.pop_back_val()];
|
2011-11-23 12:03:08 +08:00
|
|
|
// Force the first dv to match the current instruction.
|
|
|
|
dv->AvailableDomains = dv->getCommonDomains(available);
|
|
|
|
assert(dv->AvailableDomains && "Domain should have been filtered");
|
2010-04-01 04:32:51 +08:00
|
|
|
continue;
|
|
|
|
}
|
2010-04-05 02:00:21 +08:00
|
|
|
|
2018-01-22 18:06:01 +08:00
|
|
|
DomainValue *Latest = LiveRegs[Regs.pop_back_val()];
|
2011-11-15 09:15:25 +08:00
|
|
|
// Skip already merged values.
|
|
|
|
if (Latest == dv || Latest->Next)
|
|
|
|
continue;
|
|
|
|
if (merge(dv, Latest))
|
|
|
|
continue;
|
2010-04-05 02:00:21 +08:00
|
|
|
|
2010-04-05 05:27:26 +08:00
|
|
|
// If latest didn't merge, it is useless now. Kill all registers using it.
|
2014-12-16 02:48:43 +08:00
|
|
|
for (int i : used) {
|
2018-01-22 18:05:53 +08:00
|
|
|
assert(!LiveRegs.empty() && "no space allocated for live registers");
|
2018-01-22 18:06:01 +08:00
|
|
|
if (LiveRegs[i] == Latest)
|
2014-12-16 02:48:43 +08:00
|
|
|
kill(i);
|
|
|
|
}
|
2010-03-30 07:24:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// dv is the DomainValue we are going to use for this instruction.
|
2011-11-23 12:03:08 +08:00
|
|
|
if (!dv) {
|
2011-11-09 05:57:47 +08:00
|
|
|
dv = alloc();
|
2011-11-23 12:03:08 +08:00
|
|
|
dv->AvailableDomains = available;
|
|
|
|
}
|
2010-03-30 07:24:21 +08:00
|
|
|
dv->Instrs.push_back(mi);
|
|
|
|
|
2012-10-03 16:29:36 +08:00
|
|
|
// Finally set all defs and non-collapsed uses to dv. We must iterate through
|
|
|
|
// all the operators, including imp-def ones.
|
2018-01-22 18:05:37 +08:00
|
|
|
for (MachineOperand &mo : mi->operands()) {
|
2018-01-22 18:06:18 +08:00
|
|
|
if (!mo.isReg())
|
|
|
|
continue;
|
2015-03-07 02:56:20 +08:00
|
|
|
for (int rx : regIndices(mo.getReg())) {
|
2018-01-22 18:06:01 +08:00
|
|
|
if (!LiveRegs[rx] || (mo.isDef() && LiveRegs[rx] != dv)) {
|
2014-12-18 03:13:47 +08:00
|
|
|
kill(rx);
|
|
|
|
setLiveReg(rx, dv);
|
|
|
|
}
|
2010-03-30 07:24:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-22 18:05:23 +08:00
|
|
|
void ExecutionDomainFix::processBasicBlock(
|
|
|
|
const LoopTraversal::TraversedMBBInfo &TraversedMBB) {
|
|
|
|
enterBasicBlock(TraversedMBB);
|
[ExecutionDepsFix] Improve clearance calculation for loops
Summary:
In revision rL278321, ExecutionDepsFix learned how to pick a better
register for undef register reads, e.g. for instructions such as
`vcvtsi2sdq`. While this revision improved performance on a good number
of our benchmarks, it unfortunately also caused significant regressions
(up to 3x) on others. This regression turned out to be caused by loops
such as:
PH -> A -> B (xmm<Undef> -> xmm<Def>) -> C -> D -> EXIT
^ |
+----------------------------------+
In the previous version of the clearance calculation, we would visit
the blocks in order, remembering for each whether there were any
incoming backedges from blocks that we hadn't processed yet and if
so queuing up the block to be re-processed. However, for loop structures
such as the above, this is clearly insufficient, since the block B
does not have any unknown backedges, so we do not see the false
dependency from the previous interation's Def of xmm registers in B.
To fix this, we need to consider all blocks that are part of the loop
and reprocess them one the correct clearance values are known. As
an optimization, we also want to avoid reprocessing any later blocks
that are not part of the loop.
In summary, the iteration order is as follows:
Before: PH A B C D A'
Corrected (Naive): PH A B C D A' B' C' D'
Corrected (w/ optimization): PH A B C A' B' C' D
To facilitate this optimization we introduce two new counters for each
basic block. The first counts how many of it's predecssors have
completed primary processing. The second counts how many of its
predecessors have completed all processing (we will call such a block
*done*. Now, the criteria to reprocess a block is as follows:
- All Predecessors have completed primary processing
- For x the number of predecessors that have completed primary
processing *at the time of primary processing of this block*,
the number of predecessors that are done has reached x.
The intuition behind this criterion is as follows:
We need to perform primary processing on all predecessors in order to
find out any direct defs in those predecessors. When predecessors are
done, we also know that we have information about indirect defs (e.g.
in block B though that were inherited through B->C->A->B). However,
we can't wait for all predecessors to be done, since that would
cause cyclic dependencies. However, it is guaranteed that all those
predecessors that are prior to us in reverse postorder will be done
before us. Since we iterate of the basic blocks in reverse postorder,
the number x above, is precisely the count of the number of predecessors
prior to us in reverse postorder.
Reviewers: myatsina
Differential Revision: https://reviews.llvm.org/D28759
llvm-svn: 293571
2017-01-31 07:37:03 +08:00
|
|
|
// If this block is not done, it makes little sense to make any decisions
|
|
|
|
// based on clearance information. We need to make a second pass anyway,
|
|
|
|
// and by then we'll have better information, so we can avoid doing the work
|
|
|
|
// to try and break dependencies now.
|
2018-01-22 18:05:23 +08:00
|
|
|
for (MachineInstr &MI : *TraversedMBB.MBB) {
|
2018-05-09 10:42:00 +08:00
|
|
|
if (!MI.isDebugInstr()) {
|
[ExecutionDepsFix] Improve clearance calculation for loops
Summary:
In revision rL278321, ExecutionDepsFix learned how to pick a better
register for undef register reads, e.g. for instructions such as
`vcvtsi2sdq`. While this revision improved performance on a good number
of our benchmarks, it unfortunately also caused significant regressions
(up to 3x) on others. This regression turned out to be caused by loops
such as:
PH -> A -> B (xmm<Undef> -> xmm<Def>) -> C -> D -> EXIT
^ |
+----------------------------------+
In the previous version of the clearance calculation, we would visit
the blocks in order, remembering for each whether there were any
incoming backedges from blocks that we hadn't processed yet and if
so queuing up the block to be re-processed. However, for loop structures
such as the above, this is clearly insufficient, since the block B
does not have any unknown backedges, so we do not see the false
dependency from the previous interation's Def of xmm registers in B.
To fix this, we need to consider all blocks that are part of the loop
and reprocess them one the correct clearance values are known. As
an optimization, we also want to avoid reprocessing any later blocks
that are not part of the loop.
In summary, the iteration order is as follows:
Before: PH A B C D A'
Corrected (Naive): PH A B C D A' B' C' D'
Corrected (w/ optimization): PH A B C A' B' C' D
To facilitate this optimization we introduce two new counters for each
basic block. The first counts how many of it's predecssors have
completed primary processing. The second counts how many of its
predecessors have completed all processing (we will call such a block
*done*. Now, the criteria to reprocess a block is as follows:
- All Predecessors have completed primary processing
- For x the number of predecessors that have completed primary
processing *at the time of primary processing of this block*,
the number of predecessors that are done has reached x.
The intuition behind this criterion is as follows:
We need to perform primary processing on all predecessors in order to
find out any direct defs in those predecessors. When predecessors are
done, we also know that we have information about indirect defs (e.g.
in block B though that were inherited through B->C->A->B). However,
we can't wait for all predecessors to be done, since that would
cause cyclic dependencies. However, it is guaranteed that all those
predecessors that are prior to us in reverse postorder will be done
before us. Since we iterate of the basic blocks in reverse postorder,
the number x above, is precisely the count of the number of predecessors
prior to us in reverse postorder.
Reviewers: myatsina
Differential Revision: https://reviews.llvm.org/D28759
llvm-svn: 293571
2017-01-31 07:37:03 +08:00
|
|
|
bool Kill = false;
|
2018-01-22 18:05:23 +08:00
|
|
|
if (TraversedMBB.PrimaryPass)
|
[ExecutionDepsFix] Improve clearance calculation for loops
Summary:
In revision rL278321, ExecutionDepsFix learned how to pick a better
register for undef register reads, e.g. for instructions such as
`vcvtsi2sdq`. While this revision improved performance on a good number
of our benchmarks, it unfortunately also caused significant regressions
(up to 3x) on others. This regression turned out to be caused by loops
such as:
PH -> A -> B (xmm<Undef> -> xmm<Def>) -> C -> D -> EXIT
^ |
+----------------------------------+
In the previous version of the clearance calculation, we would visit
the blocks in order, remembering for each whether there were any
incoming backedges from blocks that we hadn't processed yet and if
so queuing up the block to be re-processed. However, for loop structures
such as the above, this is clearly insufficient, since the block B
does not have any unknown backedges, so we do not see the false
dependency from the previous interation's Def of xmm registers in B.
To fix this, we need to consider all blocks that are part of the loop
and reprocess them one the correct clearance values are known. As
an optimization, we also want to avoid reprocessing any later blocks
that are not part of the loop.
In summary, the iteration order is as follows:
Before: PH A B C D A'
Corrected (Naive): PH A B C D A' B' C' D'
Corrected (w/ optimization): PH A B C A' B' C' D
To facilitate this optimization we introduce two new counters for each
basic block. The first counts how many of it's predecssors have
completed primary processing. The second counts how many of its
predecessors have completed all processing (we will call such a block
*done*. Now, the criteria to reprocess a block is as follows:
- All Predecessors have completed primary processing
- For x the number of predecessors that have completed primary
processing *at the time of primary processing of this block*,
the number of predecessors that are done has reached x.
The intuition behind this criterion is as follows:
We need to perform primary processing on all predecessors in order to
find out any direct defs in those predecessors. When predecessors are
done, we also know that we have information about indirect defs (e.g.
in block B though that were inherited through B->C->A->B). However,
we can't wait for all predecessors to be done, since that would
cause cyclic dependencies. However, it is guaranteed that all those
predecessors that are prior to us in reverse postorder will be done
before us. Since we iterate of the basic blocks in reverse postorder,
the number x above, is precisely the count of the number of predecessors
prior to us in reverse postorder.
Reviewers: myatsina
Differential Revision: https://reviews.llvm.org/D28759
llvm-svn: 293571
2017-01-31 07:37:03 +08:00
|
|
|
Kill = visitInstr(&MI);
|
2018-01-22 18:05:23 +08:00
|
|
|
processDefs(&MI, Kill);
|
[ExecutionDepsFix] Improve clearance calculation for loops
Summary:
In revision rL278321, ExecutionDepsFix learned how to pick a better
register for undef register reads, e.g. for instructions such as
`vcvtsi2sdq`. While this revision improved performance on a good number
of our benchmarks, it unfortunately also caused significant regressions
(up to 3x) on others. This regression turned out to be caused by loops
such as:
PH -> A -> B (xmm<Undef> -> xmm<Def>) -> C -> D -> EXIT
^ |
+----------------------------------+
In the previous version of the clearance calculation, we would visit
the blocks in order, remembering for each whether there were any
incoming backedges from blocks that we hadn't processed yet and if
so queuing up the block to be re-processed. However, for loop structures
such as the above, this is clearly insufficient, since the block B
does not have any unknown backedges, so we do not see the false
dependency from the previous interation's Def of xmm registers in B.
To fix this, we need to consider all blocks that are part of the loop
and reprocess them one the correct clearance values are known. As
an optimization, we also want to avoid reprocessing any later blocks
that are not part of the loop.
In summary, the iteration order is as follows:
Before: PH A B C D A'
Corrected (Naive): PH A B C D A' B' C' D'
Corrected (w/ optimization): PH A B C A' B' C' D
To facilitate this optimization we introduce two new counters for each
basic block. The first counts how many of it's predecssors have
completed primary processing. The second counts how many of its
predecessors have completed all processing (we will call such a block
*done*. Now, the criteria to reprocess a block is as follows:
- All Predecessors have completed primary processing
- For x the number of predecessors that have completed primary
processing *at the time of primary processing of this block*,
the number of predecessors that are done has reached x.
The intuition behind this criterion is as follows:
We need to perform primary processing on all predecessors in order to
find out any direct defs in those predecessors. When predecessors are
done, we also know that we have information about indirect defs (e.g.
in block B though that were inherited through B->C->A->B). However,
we can't wait for all predecessors to be done, since that would
cause cyclic dependencies. However, it is guaranteed that all those
predecessors that are prior to us in reverse postorder will be done
before us. Since we iterate of the basic blocks in reverse postorder,
the number x above, is precisely the count of the number of predecessors
prior to us in reverse postorder.
Reviewers: myatsina
Differential Revision: https://reviews.llvm.org/D28759
llvm-svn: 293571
2017-01-31 07:37:03 +08:00
|
|
|
}
|
|
|
|
}
|
2018-01-22 18:05:23 +08:00
|
|
|
leaveBasicBlock(TraversedMBB);
|
[ExecutionDepsFix] Improve clearance calculation for loops
Summary:
In revision rL278321, ExecutionDepsFix learned how to pick a better
register for undef register reads, e.g. for instructions such as
`vcvtsi2sdq`. While this revision improved performance on a good number
of our benchmarks, it unfortunately also caused significant regressions
(up to 3x) on others. This regression turned out to be caused by loops
such as:
PH -> A -> B (xmm<Undef> -> xmm<Def>) -> C -> D -> EXIT
^ |
+----------------------------------+
In the previous version of the clearance calculation, we would visit
the blocks in order, remembering for each whether there were any
incoming backedges from blocks that we hadn't processed yet and if
so queuing up the block to be re-processed. However, for loop structures
such as the above, this is clearly insufficient, since the block B
does not have any unknown backedges, so we do not see the false
dependency from the previous interation's Def of xmm registers in B.
To fix this, we need to consider all blocks that are part of the loop
and reprocess them one the correct clearance values are known. As
an optimization, we also want to avoid reprocessing any later blocks
that are not part of the loop.
In summary, the iteration order is as follows:
Before: PH A B C D A'
Corrected (Naive): PH A B C D A' B' C' D'
Corrected (w/ optimization): PH A B C A' B' C' D
To facilitate this optimization we introduce two new counters for each
basic block. The first counts how many of it's predecssors have
completed primary processing. The second counts how many of its
predecessors have completed all processing (we will call such a block
*done*. Now, the criteria to reprocess a block is as follows:
- All Predecessors have completed primary processing
- For x the number of predecessors that have completed primary
processing *at the time of primary processing of this block*,
the number of predecessors that are done has reached x.
The intuition behind this criterion is as follows:
We need to perform primary processing on all predecessors in order to
find out any direct defs in those predecessors. When predecessors are
done, we also know that we have information about indirect defs (e.g.
in block B though that were inherited through B->C->A->B). However,
we can't wait for all predecessors to be done, since that would
cause cyclic dependencies. However, it is guaranteed that all those
predecessors that are prior to us in reverse postorder will be done
before us. Since we iterate of the basic blocks in reverse postorder,
the number x above, is precisely the count of the number of predecessors
prior to us in reverse postorder.
Reviewers: myatsina
Differential Revision: https://reviews.llvm.org/D28759
llvm-svn: 293571
2017-01-31 07:37:03 +08:00
|
|
|
}
|
|
|
|
|
2018-01-22 18:05:23 +08:00
|
|
|
bool ExecutionDomainFix::runOnMachineFunction(MachineFunction &mf) {
|
|
|
|
if (skipFunction(mf.getFunction()))
|
|
|
|
return false;
|
|
|
|
MF = &mf;
|
|
|
|
TII = MF->getSubtarget().getInstrInfo();
|
|
|
|
TRI = MF->getSubtarget().getRegisterInfo();
|
2018-01-22 18:05:53 +08:00
|
|
|
LiveRegs.clear();
|
2018-01-22 18:05:23 +08:00
|
|
|
assert(NumRegs == RC->getNumRegs() && "Bad regclass");
|
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "********** FIX EXECUTION DOMAIN: "
|
|
|
|
<< TRI->getRegClassName(RC) << " **********\n");
|
2018-01-22 18:05:23 +08:00
|
|
|
|
|
|
|
// If no relevant registers are used in the function, we can skip it
|
|
|
|
// completely.
|
|
|
|
bool anyregs = false;
|
|
|
|
const MachineRegisterInfo &MRI = mf.getRegInfo();
|
|
|
|
for (unsigned Reg : *RC) {
|
|
|
|
if (MRI.isPhysRegUsed(Reg)) {
|
|
|
|
anyregs = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2018-01-22 18:06:18 +08:00
|
|
|
if (!anyregs)
|
|
|
|
return false;
|
2018-01-22 18:05:23 +08:00
|
|
|
|
2018-01-22 18:05:37 +08:00
|
|
|
RDA = &getAnalysis<ReachingDefAnalysis>();
|
|
|
|
|
2018-01-22 18:05:23 +08:00
|
|
|
// Initialize the AliasMap on the first use.
|
|
|
|
if (AliasMap.empty()) {
|
|
|
|
// Given a PhysReg, AliasMap[PhysReg] returns a list of indices into RC and
|
|
|
|
// therefore the LiveRegs array.
|
|
|
|
AliasMap.resize(TRI->getNumRegs());
|
|
|
|
for (unsigned i = 0, e = RC->getNumRegs(); i != e; ++i)
|
2018-01-22 18:06:18 +08:00
|
|
|
for (MCRegAliasIterator AI(RC->getRegister(i), TRI, true); AI.isValid();
|
|
|
|
++AI)
|
2018-01-22 18:05:23 +08:00
|
|
|
AliasMap[*AI].push_back(i);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize the MBBOutRegsInfos
|
2018-01-22 18:05:53 +08:00
|
|
|
MBBOutRegsInfos.resize(mf.getNumBlockIDs());
|
2018-01-22 18:05:23 +08:00
|
|
|
|
|
|
|
// Traverse the basic blocks.
|
|
|
|
LoopTraversal Traversal;
|
2018-01-22 18:05:37 +08:00
|
|
|
LoopTraversal::TraversalOrder TraversedMBBOrder = Traversal.traverse(mf);
|
|
|
|
for (LoopTraversal::TraversedMBBInfo TraversedMBB : TraversedMBBOrder) {
|
2018-01-22 18:05:23 +08:00
|
|
|
processBasicBlock(TraversedMBB);
|
|
|
|
}
|
|
|
|
|
2018-01-22 18:06:18 +08:00
|
|
|
for (LiveRegsDVInfo OutLiveRegs : MBBOutRegsInfos) {
|
2018-01-22 18:06:01 +08:00
|
|
|
for (DomainValue *OutLiveReg : OutLiveRegs) {
|
|
|
|
if (OutLiveReg)
|
|
|
|
release(OutLiveReg);
|
2018-01-22 18:05:53 +08:00
|
|
|
}
|
2011-11-08 07:08:21 +08:00
|
|
|
}
|
2018-01-22 18:05:23 +08:00
|
|
|
MBBOutRegsInfos.clear();
|
2010-04-05 02:00:21 +08:00
|
|
|
Avail.clear();
|
|
|
|
Allocator.DestroyAll();
|
2010-03-30 07:24:21 +08:00
|
|
|
|
2010-03-26 01:25:00 +08:00
|
|
|
return false;
|
|
|
|
}
|