2017-08-02 07:14:32 +08:00
|
|
|
//===-- SIOptimizeExecMaskingPreRA.cpp ------------------------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
/// \file
|
2018-05-01 23:54:18 +08:00
|
|
|
/// This pass removes redundant S_OR_B64 instructions enabling lanes in
|
2017-08-02 07:14:32 +08:00
|
|
|
/// the exec. If two SI_END_CF (lowered as S_OR_B64) come together without any
|
|
|
|
/// vector instructions between them we can only keep outer SI_END_CF, given
|
|
|
|
/// that CFG is structured and exec bits of the outer end statement are always
|
|
|
|
/// not less than exec bit of the inner one.
|
|
|
|
///
|
|
|
|
/// This needs to be done before the RA to eliminate saved exec bits registers
|
|
|
|
/// but after register coalescer to have no vector registers copies in between
|
|
|
|
/// of different end cf statements.
|
|
|
|
///
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "AMDGPU.h"
|
|
|
|
#include "AMDGPUSubtarget.h"
|
|
|
|
#include "SIInstrInfo.h"
|
AMDGPU: Remove #include "MCTargetDesc/AMDGPUMCTargetDesc.h" from common headers
Summary:
MCTargetDesc/AMDGPUMCTargetDesc.h contains enums for all the instuction
and register defintions, which are huge so we only want to include
them where needed.
This will also make it easier if we want to split the R600 and GCN
definitions into separate tablegenerated files.
I was unable to remove AMDGPUMCTargetDesc.h from SIMachineFunctionInfo.h
because it uses some enums from the header to initialize default values
for the SIMachineFunction class, so I ended up having to remove includes of
SIMachineFunctionInfo.h from headers too.
Reviewers: arsenm, nhaehnle
Reviewed By: nhaehnle
Subscribers: MatzeB, kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D46272
llvm-svn: 332930
2018-05-22 10:03:23 +08:00
|
|
|
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
|
2017-12-13 10:51:04 +08:00
|
|
|
#include "llvm/CodeGen/LiveIntervals.h"
|
2017-08-02 07:14:32 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "si-optimize-exec-masking-pre-ra"
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
class SIOptimizeExecMaskingPreRA : public MachineFunctionPass {
|
|
|
|
public:
|
|
|
|
static char ID;
|
|
|
|
|
|
|
|
public:
|
|
|
|
SIOptimizeExecMaskingPreRA() : MachineFunctionPass(ID) {
|
|
|
|
initializeSIOptimizeExecMaskingPreRAPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
|
|
|
bool runOnMachineFunction(MachineFunction &MF) override;
|
|
|
|
|
|
|
|
StringRef getPassName() const override {
|
|
|
|
return "SI optimize exec mask operations pre-RA";
|
|
|
|
}
|
|
|
|
|
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
|
|
AU.addRequired<LiveIntervals>();
|
|
|
|
AU.setPreservesAll();
|
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
} // End anonymous namespace.
|
|
|
|
|
|
|
|
INITIALIZE_PASS_BEGIN(SIOptimizeExecMaskingPreRA, DEBUG_TYPE,
|
|
|
|
"SI optimize exec mask operations pre-RA", false, false)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
|
|
|
|
INITIALIZE_PASS_END(SIOptimizeExecMaskingPreRA, DEBUG_TYPE,
|
|
|
|
"SI optimize exec mask operations pre-RA", false, false)
|
|
|
|
|
|
|
|
char SIOptimizeExecMaskingPreRA::ID = 0;
|
|
|
|
|
|
|
|
char &llvm::SIOptimizeExecMaskingPreRAID = SIOptimizeExecMaskingPreRA::ID;
|
|
|
|
|
|
|
|
FunctionPass *llvm::createSIOptimizeExecMaskingPreRAPass() {
|
|
|
|
return new SIOptimizeExecMaskingPreRA();
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool isEndCF(const MachineInstr& MI, const SIRegisterInfo* TRI) {
|
|
|
|
return MI.getOpcode() == AMDGPU::S_OR_B64 &&
|
|
|
|
MI.modifiesRegister(AMDGPU::EXEC, TRI);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool isFullExecCopy(const MachineInstr& MI) {
|
|
|
|
return MI.isFullCopy() && MI.getOperand(1).getReg() == AMDGPU::EXEC;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned getOrNonExecReg(const MachineInstr &MI,
|
|
|
|
const SIInstrInfo &TII) {
|
|
|
|
auto Op = TII.getNamedOperand(MI, AMDGPU::OpName::src1);
|
|
|
|
if (Op->isReg() && Op->getReg() != AMDGPU::EXEC)
|
|
|
|
return Op->getReg();
|
|
|
|
Op = TII.getNamedOperand(MI, AMDGPU::OpName::src0);
|
|
|
|
if (Op->isReg() && Op->getReg() != AMDGPU::EXEC)
|
|
|
|
return Op->getReg();
|
|
|
|
return AMDGPU::NoRegister;
|
|
|
|
}
|
|
|
|
|
|
|
|
static MachineInstr* getOrExecSource(const MachineInstr &MI,
|
|
|
|
const SIInstrInfo &TII,
|
|
|
|
const MachineRegisterInfo &MRI) {
|
|
|
|
auto SavedExec = getOrNonExecReg(MI, TII);
|
|
|
|
if (SavedExec == AMDGPU::NoRegister)
|
|
|
|
return nullptr;
|
|
|
|
auto SaveExecInst = MRI.getUniqueVRegDef(SavedExec);
|
|
|
|
if (!SaveExecInst || !isFullExecCopy(*SaveExecInst))
|
|
|
|
return nullptr;
|
|
|
|
return SaveExecInst;
|
|
|
|
}
|
|
|
|
|
2018-12-13 11:17:40 +08:00
|
|
|
// Optimize sequence
|
|
|
|
// %sel = V_CNDMASK_B32_e64 0, 1, %cc
|
|
|
|
// %cmp = V_CMP_NE_U32 1, %1
|
|
|
|
// $vcc = S_AND_B64 $exec, %cmp
|
|
|
|
// S_CBRANCH_VCC[N]Z
|
|
|
|
// =>
|
|
|
|
// $vcc = S_ANDN2_B64 $exec, %cc
|
|
|
|
// S_CBRANCH_VCC[N]Z
|
|
|
|
//
|
|
|
|
// It is the negation pattern inserted by DAGCombiner::visitBRCOND() in the
|
|
|
|
// rebuildSetCC(). We start with S_CBRANCH to avoid exhaustive search, but
|
|
|
|
// only 3 first instructions are really needed. S_AND_B64 with exec is a
|
|
|
|
// required part of the pattern since V_CNDMASK_B32 writes zeroes for inactive
|
|
|
|
// lanes.
|
|
|
|
//
|
|
|
|
// Returns %cc register on success.
|
|
|
|
static unsigned optimizeVcndVcmpPair(MachineBasicBlock &MBB,
|
|
|
|
const GCNSubtarget &ST,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
LiveIntervals *LIS) {
|
|
|
|
const SIRegisterInfo *TRI = ST.getRegisterInfo();
|
|
|
|
const SIInstrInfo *TII = ST.getInstrInfo();
|
|
|
|
const unsigned AndOpc = AMDGPU::S_AND_B64;
|
|
|
|
const unsigned Andn2Opc = AMDGPU::S_ANDN2_B64;
|
|
|
|
const unsigned CondReg = AMDGPU::VCC;
|
|
|
|
const unsigned ExecReg = AMDGPU::EXEC;
|
|
|
|
|
|
|
|
auto I = llvm::find_if(MBB.terminators(), [](const MachineInstr &MI) {
|
|
|
|
unsigned Opc = MI.getOpcode();
|
|
|
|
return Opc == AMDGPU::S_CBRANCH_VCCZ ||
|
|
|
|
Opc == AMDGPU::S_CBRANCH_VCCNZ; });
|
|
|
|
if (I == MBB.terminators().end())
|
|
|
|
return AMDGPU::NoRegister;
|
|
|
|
|
|
|
|
auto *And = TRI->findReachingDef(CondReg, AMDGPU::NoSubRegister,
|
|
|
|
*I, MRI, LIS);
|
|
|
|
if (!And || And->getOpcode() != AndOpc ||
|
|
|
|
!And->getOperand(1).isReg() || !And->getOperand(2).isReg())
|
|
|
|
return AMDGPU::NoRegister;
|
|
|
|
|
|
|
|
MachineOperand *AndCC = &And->getOperand(1);
|
|
|
|
unsigned CmpReg = AndCC->getReg();
|
|
|
|
unsigned CmpSubReg = AndCC->getSubReg();
|
|
|
|
if (CmpReg == ExecReg) {
|
|
|
|
AndCC = &And->getOperand(2);
|
|
|
|
CmpReg = AndCC->getReg();
|
|
|
|
CmpSubReg = AndCC->getSubReg();
|
|
|
|
} else if (And->getOperand(2).getReg() != ExecReg) {
|
|
|
|
return AMDGPU::NoRegister;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto *Cmp = TRI->findReachingDef(CmpReg, CmpSubReg, *And, MRI, LIS);
|
|
|
|
if (!Cmp || !(Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e32 ||
|
|
|
|
Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e64) ||
|
|
|
|
Cmp->getParent() != And->getParent())
|
|
|
|
return AMDGPU::NoRegister;
|
|
|
|
|
|
|
|
MachineOperand *Op1 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src0);
|
|
|
|
MachineOperand *Op2 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src1);
|
|
|
|
if (Op1->isImm() && Op2->isReg())
|
|
|
|
std::swap(Op1, Op2);
|
|
|
|
if (!Op1->isReg() || !Op2->isImm() || Op2->getImm() != 1)
|
|
|
|
return AMDGPU::NoRegister;
|
|
|
|
|
|
|
|
unsigned SelReg = Op1->getReg();
|
|
|
|
auto *Sel = TRI->findReachingDef(SelReg, Op1->getSubReg(), *Cmp, MRI, LIS);
|
|
|
|
if (!Sel || Sel->getOpcode() != AMDGPU::V_CNDMASK_B32_e64)
|
|
|
|
return AMDGPU::NoRegister;
|
|
|
|
|
|
|
|
Op1 = TII->getNamedOperand(*Sel, AMDGPU::OpName::src0);
|
|
|
|
Op2 = TII->getNamedOperand(*Sel, AMDGPU::OpName::src1);
|
|
|
|
MachineOperand *CC = TII->getNamedOperand(*Sel, AMDGPU::OpName::src2);
|
|
|
|
if (!Op1->isImm() || !Op2->isImm() || !CC->isReg() ||
|
|
|
|
Op1->getImm() != 0 || Op2->getImm() != 1)
|
|
|
|
return AMDGPU::NoRegister;
|
|
|
|
|
|
|
|
LLVM_DEBUG(dbgs() << "Folding sequence:\n\t" << *Sel << '\t'
|
|
|
|
<< *Cmp << '\t' << *And);
|
|
|
|
|
|
|
|
unsigned CCReg = CC->getReg();
|
|
|
|
LIS->RemoveMachineInstrFromMaps(*And);
|
|
|
|
MachineInstr *Andn2 = BuildMI(MBB, *And, And->getDebugLoc(),
|
|
|
|
TII->get(Andn2Opc), And->getOperand(0).getReg())
|
|
|
|
.addReg(ExecReg)
|
|
|
|
.addReg(CCReg, CC->getSubReg());
|
|
|
|
And->eraseFromParent();
|
|
|
|
LIS->InsertMachineInstrInMaps(*Andn2);
|
|
|
|
|
|
|
|
LLVM_DEBUG(dbgs() << "=>\n\t" << *Andn2 << '\n');
|
|
|
|
|
|
|
|
// Try to remove compare. Cmp value should not used in between of cmp
|
|
|
|
// and s_and_b64 if VCC or just unused if any other register.
|
|
|
|
if ((TargetRegisterInfo::isVirtualRegister(CmpReg) &&
|
|
|
|
MRI.use_nodbg_empty(CmpReg)) ||
|
|
|
|
(CmpReg == CondReg &&
|
|
|
|
std::none_of(std::next(Cmp->getIterator()), Andn2->getIterator(),
|
2018-12-13 13:52:11 +08:00
|
|
|
[&](const MachineInstr &MI) {
|
2018-12-13 11:17:40 +08:00
|
|
|
return MI.readsRegister(CondReg, TRI); }))) {
|
|
|
|
LLVM_DEBUG(dbgs() << "Erasing: " << *Cmp << '\n');
|
|
|
|
|
|
|
|
LIS->RemoveMachineInstrFromMaps(*Cmp);
|
|
|
|
Cmp->eraseFromParent();
|
|
|
|
|
|
|
|
// Try to remove v_cndmask_b32.
|
|
|
|
if (TargetRegisterInfo::isVirtualRegister(SelReg) &&
|
|
|
|
MRI.use_nodbg_empty(SelReg)) {
|
|
|
|
LLVM_DEBUG(dbgs() << "Erasing: " << *Sel << '\n');
|
|
|
|
|
|
|
|
LIS->RemoveMachineInstrFromMaps(*Sel);
|
|
|
|
Sel->eraseFromParent();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return CCReg;
|
|
|
|
}
|
|
|
|
|
2017-08-02 07:14:32 +08:00
|
|
|
bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) {
|
2017-12-16 06:22:58 +08:00
|
|
|
if (skipFunction(MF.getFunction()))
|
2017-08-02 07:14:32 +08:00
|
|
|
return false;
|
|
|
|
|
2018-07-12 04:59:01 +08:00
|
|
|
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
|
2017-08-02 07:14:32 +08:00
|
|
|
const SIRegisterInfo *TRI = ST.getRegisterInfo();
|
|
|
|
const SIInstrInfo *TII = ST.getInstrInfo();
|
|
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
|
|
LiveIntervals *LIS = &getAnalysis<LiveIntervals>();
|
2017-08-16 12:43:49 +08:00
|
|
|
DenseSet<unsigned> RecalcRegs({AMDGPU::EXEC_LO, AMDGPU::EXEC_HI});
|
2017-08-02 07:14:32 +08:00
|
|
|
bool Changed = false;
|
|
|
|
|
|
|
|
for (MachineBasicBlock &MBB : MF) {
|
2017-08-16 12:43:49 +08:00
|
|
|
|
2018-12-13 11:17:40 +08:00
|
|
|
if (unsigned Reg = optimizeVcndVcmpPair(MBB, ST, MRI, LIS)) {
|
|
|
|
RecalcRegs.insert(Reg);
|
|
|
|
RecalcRegs.insert(AMDGPU::VCC_LO);
|
|
|
|
RecalcRegs.insert(AMDGPU::VCC_HI);
|
|
|
|
RecalcRegs.insert(AMDGPU::SCC);
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
|
2017-08-16 12:43:49 +08:00
|
|
|
// Try to remove unneeded instructions before s_endpgm.
|
|
|
|
if (MBB.succ_empty()) {
|
2018-08-29 02:55:55 +08:00
|
|
|
if (MBB.empty())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Skip this if the endpgm has any implicit uses, otherwise we would need
|
|
|
|
// to be careful to update / remove them.
|
|
|
|
MachineInstr &Term = MBB.back();
|
|
|
|
if (Term.getOpcode() != AMDGPU::S_ENDPGM ||
|
|
|
|
Term.getNumOperands() != 0)
|
2017-08-16 12:43:49 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
SmallVector<MachineBasicBlock*, 4> Blocks({&MBB});
|
|
|
|
|
|
|
|
while (!Blocks.empty()) {
|
|
|
|
auto CurBB = Blocks.pop_back_val();
|
|
|
|
auto I = CurBB->rbegin(), E = CurBB->rend();
|
|
|
|
if (I != E) {
|
|
|
|
if (I->isUnconditionalBranch() || I->getOpcode() == AMDGPU::S_ENDPGM)
|
|
|
|
++I;
|
|
|
|
else if (I->isBranch())
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (I != E) {
|
2018-05-09 10:42:00 +08:00
|
|
|
if (I->isDebugInstr()) {
|
2017-12-06 02:23:17 +08:00
|
|
|
I = std::next(I);
|
2017-08-16 12:43:49 +08:00
|
|
|
continue;
|
2017-12-06 02:23:17 +08:00
|
|
|
}
|
|
|
|
|
2017-08-16 12:43:49 +08:00
|
|
|
if (I->mayStore() || I->isBarrier() || I->isCall() ||
|
|
|
|
I->hasUnmodeledSideEffects() || I->hasOrderedMemoryRef())
|
|
|
|
break;
|
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs()
|
|
|
|
<< "Removing no effect instruction: " << *I << '\n');
|
2017-08-16 12:43:49 +08:00
|
|
|
|
2017-09-09 02:51:26 +08:00
|
|
|
for (auto &Op : I->operands()) {
|
2017-08-16 12:43:49 +08:00
|
|
|
if (Op.isReg())
|
|
|
|
RecalcRegs.insert(Op.getReg());
|
2017-09-09 02:51:26 +08:00
|
|
|
}
|
2017-08-16 12:43:49 +08:00
|
|
|
|
|
|
|
auto Next = std::next(I);
|
|
|
|
LIS->RemoveMachineInstrFromMaps(*I);
|
|
|
|
I->eraseFromParent();
|
|
|
|
I = Next;
|
|
|
|
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (I != E)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Try to ascend predecessors.
|
|
|
|
for (auto *Pred : CurBB->predecessors()) {
|
|
|
|
if (Pred->succ_size() == 1)
|
|
|
|
Blocks.push_back(Pred);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try to collapse adjacent endifs.
|
2017-08-02 07:14:32 +08:00
|
|
|
auto Lead = MBB.begin(), E = MBB.end();
|
|
|
|
if (MBB.succ_size() != 1 || Lead == E || !isEndCF(*Lead, TRI))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
const MachineBasicBlock* Succ = *MBB.succ_begin();
|
|
|
|
if (!MBB.isLayoutSuccessor(Succ))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
auto I = std::next(Lead);
|
|
|
|
|
|
|
|
for ( ; I != E; ++I)
|
|
|
|
if (!TII->isSALU(*I) || I->readsRegister(AMDGPU::EXEC, TRI))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (I != E)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
const auto NextLead = Succ->begin();
|
|
|
|
if (NextLead == Succ->end() || !isEndCF(*NextLead, TRI) ||
|
|
|
|
!getOrExecSource(*NextLead, *TII, MRI))
|
|
|
|
continue;
|
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Redundant EXEC = S_OR_B64 found: " << *Lead << '\n');
|
2017-08-02 07:14:32 +08:00
|
|
|
|
2017-08-02 09:18:57 +08:00
|
|
|
auto SaveExec = getOrExecSource(*Lead, *TII, MRI);
|
2017-08-02 07:14:32 +08:00
|
|
|
unsigned SaveExecReg = getOrNonExecReg(*Lead, *TII);
|
2017-09-09 02:51:26 +08:00
|
|
|
for (auto &Op : Lead->operands()) {
|
|
|
|
if (Op.isReg())
|
|
|
|
RecalcRegs.insert(Op.getReg());
|
|
|
|
}
|
|
|
|
|
2017-08-02 07:14:32 +08:00
|
|
|
LIS->RemoveMachineInstrFromMaps(*Lead);
|
|
|
|
Lead->eraseFromParent();
|
|
|
|
if (SaveExecReg) {
|
|
|
|
LIS->removeInterval(SaveExecReg);
|
|
|
|
LIS->createAndComputeVirtRegInterval(SaveExecReg);
|
|
|
|
}
|
|
|
|
|
|
|
|
Changed = true;
|
2017-08-02 07:44:35 +08:00
|
|
|
|
|
|
|
// If the only use of saved exec in the removed instruction is S_AND_B64
|
|
|
|
// fold the copy now.
|
|
|
|
if (!SaveExec || !SaveExec->isFullCopy())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
unsigned SavedExec = SaveExec->getOperand(0).getReg();
|
|
|
|
bool SafeToReplace = true;
|
|
|
|
for (auto& U : MRI.use_nodbg_instructions(SavedExec)) {
|
|
|
|
if (U.getParent() != SaveExec->getParent()) {
|
|
|
|
SafeToReplace = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Redundant EXEC COPY: " << *SaveExec << '\n');
|
2017-08-02 07:44:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (SafeToReplace) {
|
|
|
|
LIS->RemoveMachineInstrFromMaps(*SaveExec);
|
|
|
|
SaveExec->eraseFromParent();
|
|
|
|
MRI.replaceRegWith(SavedExec, AMDGPU::EXEC);
|
|
|
|
LIS->removeInterval(SavedExec);
|
|
|
|
}
|
2017-08-02 07:14:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (Changed) {
|
2017-08-16 12:43:49 +08:00
|
|
|
for (auto Reg : RecalcRegs) {
|
|
|
|
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
|
|
|
|
LIS->removeInterval(Reg);
|
|
|
|
if (!MRI.reg_empty(Reg))
|
|
|
|
LIS->createAndComputeVirtRegInterval(Reg);
|
|
|
|
} else {
|
|
|
|
for (MCRegUnitIterator U(Reg, TRI); U.isValid(); ++U)
|
|
|
|
LIS->removeRegUnit(*U);
|
|
|
|
}
|
|
|
|
}
|
2017-08-02 07:14:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return Changed;
|
|
|
|
}
|