Finish moving TargetRegisterInfo::isVirtualRegister() and friends to llvm::Register as started by r367614. NFC

llvm-svn: 367633
This commit is contained in:
Daniel Sanders 2019-08-01 23:27:28 +00:00
parent 9debb024d4
commit 2bea69bf65
195 changed files with 1039 additions and 1158 deletions

View File

@ -98,7 +98,7 @@ bool InstructionSelector::executeMatchTable(
return false;
break;
}
if (TRI.isPhysicalRegister(MO.getReg())) {
if (Register::isPhysicalRegister(MO.getReg())) {
DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
dbgs() << CurrentIdx << ": Is a physical register\n");
if (handleReject() == RejectAndGiveUp)

View File

@ -54,7 +54,7 @@ public:
if (!O->isReg())
continue;
unsigned Reg = O->getReg();
if (!TargetRegisterInfo::isPhysicalRegister(Reg))
if (!Register::isPhysicalRegister(Reg))
continue;
if (O->isDef()) {
// Some architectures (e.g. AArch64 XZR/WZR) have registers that are

View File

@ -490,16 +490,13 @@ inline unsigned getRenamableRegState(bool B) {
/// Get all register state flags from machine operand \p RegOp.
inline unsigned getRegState(const MachineOperand &RegOp) {
assert(RegOp.isReg() && "Not a register operand");
return getDefRegState(RegOp.isDef()) |
getImplRegState(RegOp.isImplicit()) |
getKillRegState(RegOp.isKill()) |
getDeadRegState(RegOp.isDead()) |
getUndefRegState(RegOp.isUndef()) |
getInternalReadRegState(RegOp.isInternalRead()) |
getDebugRegState(RegOp.isDebug()) |
getRenamableRegState(
TargetRegisterInfo::isPhysicalRegister(RegOp.getReg()) &&
RegOp.isRenamable());
return getDefRegState(RegOp.isDef()) | getImplRegState(RegOp.isImplicit()) |
getKillRegState(RegOp.isKill()) | getDeadRegState(RegOp.isDead()) |
getUndefRegState(RegOp.isUndef()) |
getInternalReadRegState(RegOp.isInternalRead()) |
getDebugRegState(RegOp.isDebug()) |
getRenamableRegState(Register::isPhysicalRegister(RegOp.getReg()) &&
RegOp.isRenamable());
}
/// Helper class for constructing bundles of MachineInstrs.

View File

@ -108,13 +108,13 @@ private:
/// getRegUseDefListHead - Return the head pointer for the register use/def
/// list for the specified virtual or physical register.
MachineOperand *&getRegUseDefListHead(unsigned RegNo) {
if (TargetRegisterInfo::isVirtualRegister(RegNo))
if (Register::isVirtualRegister(RegNo))
return VRegInfo[RegNo].second;
return PhysRegUseDefLists[RegNo];
}
MachineOperand *getRegUseDefListHead(unsigned RegNo) const {
if (TargetRegisterInfo::isVirtualRegister(RegNo))
if (Register::isVirtualRegister(RegNo))
return VRegInfo[RegNo].second;
return PhysRegUseDefLists[RegNo];
}
@ -215,7 +215,7 @@ public:
return subRegLivenessEnabled() && RC.HasDisjunctSubRegs;
}
bool shouldTrackSubRegLiveness(unsigned VReg) const {
assert(TargetRegisterInfo::isVirtualRegister(VReg) && "Must pass a VReg");
assert(Register::isVirtualRegister(VReg) && "Must pass a VReg");
return shouldTrackSubRegLiveness(*getRegClass(VReg));
}
bool subRegLivenessEnabled() const {
@ -727,7 +727,7 @@ public:
/// Get the low-level type of \p Reg or LLT{} if Reg is not a generic
/// (target independent) virtual register.
LLT getType(unsigned Reg) const {
if (TargetRegisterInfo::isVirtualRegister(Reg) && VRegToType.inBounds(Reg))
if (Register::isVirtualRegister(Reg) && VRegToType.inBounds(Reg))
return VRegToType[Reg];
return LLT{};
}
@ -760,7 +760,7 @@ public:
/// specified virtual register. This is typically used by target, and in case
/// of an earlier hint it will be overwritten.
void setRegAllocationHint(unsigned VReg, unsigned Type, unsigned PrefReg) {
assert(TargetRegisterInfo::isVirtualRegister(VReg));
assert(Register::isVirtualRegister(VReg));
RegAllocHints[VReg].first = Type;
RegAllocHints[VReg].second.clear();
RegAllocHints[VReg].second.push_back(PrefReg);
@ -769,7 +769,7 @@ public:
/// addRegAllocationHint - Add a register allocation hint to the hints
/// vector for VReg.
void addRegAllocationHint(unsigned VReg, unsigned PrefReg) {
assert(TargetRegisterInfo::isVirtualRegister(VReg));
assert(Register::isVirtualRegister(VReg));
RegAllocHints[VReg].second.push_back(PrefReg);
}
@ -790,7 +790,7 @@ public:
/// one with the greatest weight.
std::pair<unsigned, unsigned>
getRegAllocationHint(unsigned VReg) const {
assert(TargetRegisterInfo::isVirtualRegister(VReg));
assert(Register::isVirtualRegister(VReg));
unsigned BestHint = (RegAllocHints[VReg].second.size() ?
RegAllocHints[VReg].second[0] : 0);
return std::pair<unsigned, unsigned>(RegAllocHints[VReg].first, BestHint);
@ -799,7 +799,7 @@ public:
/// getSimpleHint - same as getRegAllocationHint except it will only return
/// a target independent hint.
unsigned getSimpleHint(unsigned VReg) const {
assert(TargetRegisterInfo::isVirtualRegister(VReg));
assert(Register::isVirtualRegister(VReg));
std::pair<unsigned, unsigned> Hint = getRegAllocationHint(VReg);
return Hint.first ? 0 : Hint.second;
}
@ -808,7 +808,7 @@ public:
/// register allocation hints for VReg.
const std::pair<unsigned, SmallVector<unsigned, 4>>
&getRegAllocationHints(unsigned VReg) const {
assert(TargetRegisterInfo::isVirtualRegister(VReg));
assert(Register::isVirtualRegister(VReg));
return RegAllocHints[VReg];
}
@ -1164,7 +1164,7 @@ public:
PSetIterator(unsigned RegUnit, const MachineRegisterInfo *MRI) {
const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo();
if (TargetRegisterInfo::isVirtualRegister(RegUnit)) {
if (Register::isVirtualRegister(RegUnit)) {
const TargetRegisterClass *RC = MRI->getRegClass(RegUnit);
PSet = TRI->getRegClassPressureSets(RC);
Weight = TRI->getRegClassWeight(RC).RegWeight;

View File

@ -110,7 +110,7 @@ public:
/// getLastCalleeSavedAlias - Returns the last callee saved register that
/// overlaps PhysReg, or 0 if Reg doesn't overlap a CalleeSavedAliases.
unsigned getLastCalleeSavedAlias(unsigned PhysReg) const {
assert(TargetRegisterInfo::isPhysicalRegister(PhysReg));
assert(Register::isPhysicalRegister(PhysReg));
if (PhysReg < CalleeSavedAliases.size())
return CalleeSavedAliases[PhysReg];
return 0;

View File

@ -273,15 +273,15 @@ private:
unsigned NumRegUnits;
unsigned getSparseIndexFromReg(unsigned Reg) const {
if (TargetRegisterInfo::isVirtualRegister(Reg))
return TargetRegisterInfo::virtReg2Index(Reg) + NumRegUnits;
if (Register::isVirtualRegister(Reg))
return Register::virtReg2Index(Reg) + NumRegUnits;
assert(Reg < NumRegUnits);
return Reg;
}
unsigned getRegFromSparseIndex(unsigned SparseIndex) const {
if (SparseIndex >= NumRegUnits)
return TargetRegisterInfo::index2VirtReg(SparseIndex-NumRegUnits);
return Register::index2VirtReg(SparseIndex-NumRegUnits);
return SparseIndex;
}

View File

@ -57,7 +57,7 @@ namespace llvm {
: VirtReg(VReg), LaneMask(LaneMask), SU(SU) {}
unsigned getSparseSetIndex() const {
return TargetRegisterInfo::virtReg2Index(VirtReg);
return Register::virtReg2Index(VirtReg);
}
};

View File

@ -258,52 +258,6 @@ public:
// Further sentinels can be allocated from the small negative integers.
// DenseMapInfo<unsigned> uses -1u and -2u.
/// isStackSlot - Sometimes it is useful the be able to store a non-negative
/// frame index in a variable that normally holds a register. isStackSlot()
/// returns true if Reg is in the range used for stack slots.
///
/// Note that isVirtualRegister() and isPhysicalRegister() cannot handle stack
/// slots, so if a variable may contains a stack slot, always check
/// isStackSlot() first.
///
static bool isStackSlot(unsigned Reg) {
return Register::isStackSlot(Reg);
}
/// Compute the frame index from a register value representing a stack slot.
static int stackSlot2Index(unsigned Reg) {
return Register::stackSlot2Index(Reg);
}
/// Convert a non-negative frame index to a stack slot register value.
static unsigned index2StackSlot(int FI) {
return Register::index2StackSlot(FI);
}
/// Return true if the specified register number is in
/// the physical register namespace.
static bool isPhysicalRegister(unsigned Reg) {
return Register::isPhysicalRegister(Reg);
}
/// Return true if the specified register number is in
/// the virtual register namespace.
static bool isVirtualRegister(unsigned Reg) {
return Register::isVirtualRegister(Reg);
}
/// Convert a virtual register number to a 0-based index.
/// The first virtual register in a function will get the index 0.
static unsigned virtReg2Index(unsigned Reg) {
return Register::virtReg2Index(Reg);
}
/// Convert a 0-based index to a virtual register number.
/// This is the inverse operation of VirtReg2IndexFunctor below.
static unsigned index2VirtReg(unsigned Index) {
return Register::index2VirtReg(Index);
}
/// Return the size in bits of a register from class RC.
unsigned getRegSizeInBits(const TargetRegisterClass &RC) const {
return getRegClassInfo(RC).RegSize;
@ -416,7 +370,7 @@ public:
/// The registers may be virtual registers.
bool regsOverlap(unsigned regA, unsigned regB) const {
if (regA == regB) return true;
if (isVirtualRegister(regA) || isVirtualRegister(regB))
if (Register::isVirtualRegister(regA) || Register::isVirtualRegister(regB))
return false;
// Regunits are numerically ordered. Find a common unit.
@ -1156,7 +1110,7 @@ public:
struct VirtReg2IndexFunctor {
using argument_type = unsigned;
unsigned operator()(unsigned Reg) const {
return TargetRegisterInfo::virtReg2Index(Reg);
return Register::virtReg2Index(Reg);
}
};

View File

@ -111,7 +111,7 @@ class TargetInstrInfo;
/// clears the specified virtual register's, physical
/// register mapping
void clearVirt(unsigned virtReg) {
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
assert(Register::isVirtualRegister(virtReg));
assert(Virt2PhysMap[virtReg] != NO_PHYS_REG &&
"attempt to clear a not assigned virtual register");
Virt2PhysMap[virtReg] = NO_PHYS_REG;
@ -163,7 +163,7 @@ class TargetInstrInfo;
/// returns the stack slot mapped to the specified virtual
/// register
int getStackSlot(unsigned virtReg) const {
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
assert(Register::isVirtualRegister(virtReg));
return Virt2StackSlotMap[virtReg];
}

View File

@ -275,7 +275,7 @@ void llvm::calculateDbgEntityHistory(const MachineFunction *MF,
continue;
// If this is a virtual register, only clobber it since it doesn't
// have aliases.
if (TRI->isVirtualRegister(MO.getReg()))
if (Register::isVirtualRegister(MO.getReg()))
clobberRegisterUses(RegVars, MO.getReg(), DbgValues, LiveEntries,
MI);
// If this is a register def operand, it may end a debug value
@ -296,7 +296,7 @@ void llvm::calculateDbgEntityHistory(const MachineFunction *MF,
// Don't consider SP to be clobbered by register masks.
for (auto It : RegVars) {
unsigned int Reg = It.first;
if (Reg != SP && TRI->isPhysicalRegister(Reg) &&
if (Reg != SP && Register::isPhysicalRegister(Reg) &&
MO.clobbersPhysReg(Reg))
RegsToClobber.push_back(Reg);
}

View File

@ -399,7 +399,7 @@ DIE &DwarfCompileUnit::updateSubprogramScopeDIE(const DISubprogram *SP) {
} else {
const TargetRegisterInfo *RI = Asm->MF->getSubtarget().getRegisterInfo();
MachineLocation Location(RI->getFrameRegister(*Asm->MF));
if (RI->isPhysicalRegister(Location.getReg()))
if (Register::isPhysicalRegister(Location.getReg()))
addAddress(*SPDie, dwarf::DW_AT_frame_base, Location);
}
}

View File

@ -606,7 +606,8 @@ static void collectCallSiteParameters(const MachineInstr *CallMI,
return;
for (const MachineOperand &MO : MI.operands()) {
if (MO.isReg() && MO.isDef() && TRI->isPhysicalRegister(MO.getReg())) {
if (MO.isReg() && MO.isDef() &&
Register::isPhysicalRegister(MO.getReg())) {
for (auto FwdReg : ForwardedRegWorklist) {
if (TRI->regsOverlap(FwdReg, MO.getReg())) {
Defs.push_back(FwdReg);

View File

@ -15,6 +15,7 @@
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/CodeGen/Register.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/Support/ErrorHandling.h"
@ -97,7 +98,7 @@ void DwarfExpression::addAnd(unsigned Mask) {
bool DwarfExpression::addMachineReg(const TargetRegisterInfo &TRI,
unsigned MachineReg, unsigned MaxSize) {
if (!TRI.isPhysicalRegister(MachineReg)) {
if (!llvm::Register::isPhysicalRegister(MachineReg)) {
if (isFrameRegister(TRI, MachineReg)) {
DwarfRegs.push_back({-1, 0, nullptr});
return true;

View File

@ -1843,7 +1843,7 @@ static MachineBasicBlock *findFalseBlock(MachineBasicBlock *BB,
template <class Container>
static void addRegAndItsAliases(unsigned Reg, const TargetRegisterInfo *TRI,
Container &Set) {
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (Register::isPhysicalRegister(Reg)) {
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
Set.insert(*AI);
} else {
@ -1944,7 +1944,7 @@ MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB,
addRegAndItsAliases(Reg, TRI, Uses);
} else {
if (Uses.erase(Reg)) {
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (Register::isPhysicalRegister(Reg)) {
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
Uses.erase(*SubRegs); // Use sub-registers to be conservative
}
@ -2066,7 +2066,7 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
if (!AllDefsSet.count(Reg)) {
continue;
}
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (Register::isPhysicalRegister(Reg)) {
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
ActiveDefsSet.erase(*AI);
} else {
@ -2079,7 +2079,7 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
if (!MO.isReg() || !MO.isDef() || MO.isDead())
continue;
unsigned Reg = MO.getReg();
if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg))
if (!Reg || Register::isVirtualRegister(Reg))
continue;
addRegAndItsAliases(Reg, TRI, ActiveDefsSet);
addRegAndItsAliases(Reg, TRI, AllDefsSet);

View File

@ -40,7 +40,7 @@ void llvm::calculateSpillWeightsAndHints(LiveIntervals &LIS,
MachineRegisterInfo &MRI = MF.getRegInfo();
VirtRegAuxInfo VRAI(MF, LIS, VRM, MLI, MBFI, norm);
for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
unsigned Reg = Register::index2VirtReg(i);
if (MRI.reg_nodbg_empty(Reg))
continue;
VRAI.calculateSpillWeightAndHint(LIS.getInterval(Reg));
@ -65,7 +65,7 @@ static unsigned copyHint(const MachineInstr *mi, unsigned reg,
if (!hreg)
return 0;
if (TargetRegisterInfo::isVirtualRegister(hreg))
if (Register::isVirtualRegister(hreg))
return sub == hsub ? hreg : 0;
const TargetRegisterClass *rc = mri.getRegClass(reg);
@ -112,7 +112,7 @@ static bool isRematerializable(const LiveInterval &LI,
// If the original (pre-splitting) registers match this
// copy came from a split.
if (!TargetRegisterInfo::isVirtualRegister(Reg) ||
if (!Register::isVirtualRegister(Reg) ||
VRM->getOriginal(Reg) != Original)
return false;
@ -251,8 +251,9 @@ float VirtRegAuxInfo::weightCalcHelper(LiveInterval &li, SlotIndex *start,
//
// FIXME: we probably shouldn't use floats at all.
volatile float hweight = Hint[hint] += weight;
if (TargetRegisterInfo::isVirtualRegister(hint) || mri.isAllocatable(hint))
CopyHints.insert(CopyHint(hint, hweight, tri.isPhysicalRegister(hint)));
if (Register::isVirtualRegister(hint) || mri.isAllocatable(hint))
CopyHints.insert(
CopyHint(hint, hweight, Register::isPhysicalRegister(hint)));
}
Hint.clear();

View File

@ -76,7 +76,7 @@ bool DeadMachineInstructionElim::isDead(const MachineInstr *MI) const {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isDef()) {
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (Register::isPhysicalRegister(Reg)) {
// Don't delete live physreg defs, or any reserved register defs.
if (LivePhysRegs.test(Reg) || MRI->isReserved(Reg))
return false;
@ -141,7 +141,7 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isDef()) {
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (Register::isPhysicalRegister(Reg)) {
// Check the subreg set, not the alias set, because a def
// of a super-register may still be partially live after
// this def.
@ -160,7 +160,7 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isUse()) {
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (Register::isPhysicalRegister(Reg)) {
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
LivePhysRegs.set(*AI);
}

View File

@ -195,7 +195,7 @@ void DetectDeadLanes::addUsedLanesOnOperand(const MachineOperand &MO,
if (!MO.readsReg())
return;
unsigned MOReg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(MOReg))
if (!Register::isVirtualRegister(MOReg))
return;
unsigned MOSubReg = MO.getSubReg();
@ -203,7 +203,7 @@ void DetectDeadLanes::addUsedLanesOnOperand(const MachineOperand &MO,
UsedLanes = TRI->composeSubRegIndexLaneMask(MOSubReg, UsedLanes);
UsedLanes &= MRI->getMaxLaneMaskForVReg(MOReg);
unsigned MORegIdx = TargetRegisterInfo::virtReg2Index(MOReg);
unsigned MORegIdx = Register::virtReg2Index(MOReg);
VRegInfo &MORegInfo = VRegInfos[MORegIdx];
LaneBitmask PrevUsedLanes = MORegInfo.UsedLanes;
// Any change at all?
@ -219,7 +219,7 @@ void DetectDeadLanes::addUsedLanesOnOperand(const MachineOperand &MO,
void DetectDeadLanes::transferUsedLanesStep(const MachineInstr &MI,
LaneBitmask UsedLanes) {
for (const MachineOperand &MO : MI.uses()) {
if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
if (!MO.isReg() || !Register::isVirtualRegister(MO.getReg()))
continue;
LaneBitmask UsedOnMO = transferUsedLanes(MI, UsedLanes, MO);
addUsedLanesOnOperand(MO, UsedOnMO);
@ -230,8 +230,8 @@ LaneBitmask DetectDeadLanes::transferUsedLanes(const MachineInstr &MI,
LaneBitmask UsedLanes,
const MachineOperand &MO) const {
unsigned OpNum = MI.getOperandNo(&MO);
assert(lowersToCopies(MI) && DefinedByCopy[
TargetRegisterInfo::virtReg2Index(MI.getOperand(0).getReg())]);
assert(lowersToCopies(MI) &&
DefinedByCopy[Register::virtReg2Index(MI.getOperand(0).getReg())]);
switch (MI.getOpcode()) {
case TargetOpcode::COPY:
@ -286,9 +286,9 @@ void DetectDeadLanes::transferDefinedLanesStep(const MachineOperand &Use,
return;
const MachineOperand &Def = *MI.defs().begin();
unsigned DefReg = Def.getReg();
if (!TargetRegisterInfo::isVirtualRegister(DefReg))
if (!Register::isVirtualRegister(DefReg))
return;
unsigned DefRegIdx = TargetRegisterInfo::virtReg2Index(DefReg);
unsigned DefRegIdx = Register::virtReg2Index(DefReg);
if (!DefinedByCopy.test(DefRegIdx))
return;
@ -360,7 +360,7 @@ LaneBitmask DetectDeadLanes::determineInitialDefinedLanes(unsigned Reg) {
if (lowersToCopies(DefMI)) {
// Start optimisatically with no used or defined lanes for copy
// instructions. The following dataflow analysis will add more bits.
unsigned RegIdx = TargetRegisterInfo::virtReg2Index(Reg);
unsigned RegIdx = Register::virtReg2Index(Reg);
DefinedByCopy.set(RegIdx);
PutInWorklist(RegIdx);
@ -382,12 +382,12 @@ LaneBitmask DetectDeadLanes::determineInitialDefinedLanes(unsigned Reg) {
continue;
LaneBitmask MODefinedLanes;
if (TargetRegisterInfo::isPhysicalRegister(MOReg)) {
if (Register::isPhysicalRegister(MOReg)) {
MODefinedLanes = LaneBitmask::getAll();
} else if (isCrossCopy(*MRI, DefMI, DefRC, MO)) {
MODefinedLanes = LaneBitmask::getAll();
} else {
assert(TargetRegisterInfo::isVirtualRegister(MOReg));
assert(Register::isVirtualRegister(MOReg));
if (MRI->hasOneDef(MOReg)) {
const MachineOperand &MODef = *MRI->def_begin(MOReg);
const MachineInstr &MODefMI = *MODef.getParent();
@ -431,7 +431,7 @@ LaneBitmask DetectDeadLanes::determineInitialUsedLanes(unsigned Reg) {
unsigned DefReg = Def.getReg();
// The used lanes of COPY-like instruction operands are determined by the
// following dataflow analysis.
if (TargetRegisterInfo::isVirtualRegister(DefReg)) {
if (Register::isVirtualRegister(DefReg)) {
// But ignore copies across incompatible register classes.
bool CrossCopy = false;
if (lowersToCopies(UseMI)) {
@ -471,9 +471,9 @@ bool DetectDeadLanes::isUndefInput(const MachineOperand &MO,
return false;
const MachineOperand &Def = MI.getOperand(0);
unsigned DefReg = Def.getReg();
if (!TargetRegisterInfo::isVirtualRegister(DefReg))
if (!Register::isVirtualRegister(DefReg))
return false;
unsigned DefRegIdx = TargetRegisterInfo::virtReg2Index(DefReg);
unsigned DefRegIdx = Register::virtReg2Index(DefReg);
if (!DefinedByCopy.test(DefRegIdx))
return false;
@ -483,7 +483,7 @@ bool DetectDeadLanes::isUndefInput(const MachineOperand &MO,
return false;
unsigned MOReg = MO.getReg();
if (TargetRegisterInfo::isVirtualRegister(MOReg)) {
if (Register::isVirtualRegister(MOReg)) {
const TargetRegisterClass *DstRC = MRI->getRegClass(DefReg);
*CrossCopy = isCrossCopy(*MRI, MI, DstRC, MO);
}
@ -494,7 +494,7 @@ bool DetectDeadLanes::runOnce(MachineFunction &MF) {
// First pass: Populate defs/uses of vregs with initial values
unsigned NumVirtRegs = MRI->getNumVirtRegs();
for (unsigned RegIdx = 0; RegIdx < NumVirtRegs; ++RegIdx) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(RegIdx);
unsigned Reg = Register::index2VirtReg(RegIdx);
// Determine used/defined lanes and add copy instructions to worklist.
VRegInfo &Info = VRegInfos[RegIdx];
@ -508,7 +508,7 @@ bool DetectDeadLanes::runOnce(MachineFunction &MF) {
Worklist.pop_front();
WorklistMembers.reset(RegIdx);
VRegInfo &Info = VRegInfos[RegIdx];
unsigned Reg = TargetRegisterInfo::index2VirtReg(RegIdx);
unsigned Reg = Register::index2VirtReg(RegIdx);
// Transfer UsedLanes to operands of DefMI (backwards dataflow).
MachineOperand &Def = *MRI->def_begin(Reg);
@ -522,7 +522,7 @@ bool DetectDeadLanes::runOnce(MachineFunction &MF) {
LLVM_DEBUG(dbgs() << "Defined/Used lanes:\n"; for (unsigned RegIdx = 0;
RegIdx < NumVirtRegs;
++RegIdx) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(RegIdx);
unsigned Reg = Register::index2VirtReg(RegIdx);
const VRegInfo &Info = VRegInfos[RegIdx];
dbgs() << printReg(Reg, nullptr)
<< " Used: " << PrintLaneMask(Info.UsedLanes)
@ -537,9 +537,9 @@ bool DetectDeadLanes::runOnce(MachineFunction &MF) {
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
if (!Register::isVirtualRegister(Reg))
continue;
unsigned RegIdx = TargetRegisterInfo::virtReg2Index(Reg);
unsigned RegIdx = Register::virtReg2Index(Reg);
const VRegInfo &RegInfo = VRegInfos[RegIdx];
if (MO.isDef() && !MO.isDead() && RegInfo.UsedLanes.none()) {
LLVM_DEBUG(dbgs()

View File

@ -235,11 +235,11 @@ bool SSAIfConv::canSpeculateInstrs(MachineBasicBlock *MBB) {
unsigned Reg = MO.getReg();
// Remember clobbered regunits.
if (MO.isDef() && TargetRegisterInfo::isPhysicalRegister(Reg))
if (MO.isDef() && Register::isPhysicalRegister(Reg))
for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units)
ClobberedRegUnits.set(*Units);
if (!MO.readsReg() || !TargetRegisterInfo::isVirtualRegister(Reg))
if (!MO.readsReg() || !Register::isVirtualRegister(Reg))
continue;
MachineInstr *DefMI = MRI->getVRegDef(Reg);
if (!DefMI || DefMI->getParent() != Head)
@ -289,7 +289,7 @@ bool SSAIfConv::findInsertionPoint() {
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isPhysicalRegister(Reg))
if (!Register::isPhysicalRegister(Reg))
continue;
// I clobbers Reg, so it isn't live before I.
if (MO.isDef())
@ -423,8 +423,8 @@ bool SSAIfConv::canConvertIf(MachineBasicBlock *MBB) {
if (PI.PHI->getOperand(i+1).getMBB() == FPred)
PI.FReg = PI.PHI->getOperand(i).getReg();
}
assert(TargetRegisterInfo::isVirtualRegister(PI.TReg) && "Bad PHI");
assert(TargetRegisterInfo::isVirtualRegister(PI.FReg) && "Bad PHI");
assert(Register::isVirtualRegister(PI.TReg) && "Bad PHI");
assert(Register::isVirtualRegister(PI.FReg) && "Bad PHI");
// Get target information.
if (!TII->canInsertSelect(*Head, Cond, PI.TReg, PI.FReg,

View File

@ -87,9 +87,9 @@ bool ExpandPostRA::LowerSubregToReg(MachineInstr *MI) {
assert(SubIdx != 0 && "Invalid index for insert_subreg");
unsigned DstSubReg = TRI->getSubReg(DstReg, SubIdx);
assert(TargetRegisterInfo::isPhysicalRegister(DstReg) &&
assert(Register::isPhysicalRegister(DstReg) &&
"Insert destination must be in a physical register");
assert(TargetRegisterInfo::isPhysicalRegister(InsReg) &&
assert(Register::isPhysicalRegister(InsReg) &&
"Inserted value must be in a physical register");
LLVM_DEBUG(dbgs() << "subreg: CONVERTING: " << *MI);

View File

@ -161,8 +161,8 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
continue;
unsigned SrcReg = MI.getOperand(1).getReg();
unsigned DstReg = MI.getOperand(0).getReg();
if (TargetRegisterInfo::isVirtualRegister(SrcReg) &&
TargetRegisterInfo::isVirtualRegister(DstReg)) {
if (Register::isVirtualRegister(SrcReg) &&
Register::isVirtualRegister(DstReg)) {
auto SrcRC = MRI.getRegClass(SrcReg);
auto DstRC = MRI.getRegClass(DstReg);
if (SrcRC == DstRC) {
@ -179,7 +179,7 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
// that the size of the now-constrained vreg is unchanged and that it has a
// register class.
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
unsigned VReg = TargetRegisterInfo::index2VirtReg(I);
unsigned VReg = Register::index2VirtReg(I);
MachineInstr *MI = nullptr;
if (!MRI.def_empty(VReg))

View File

@ -154,7 +154,7 @@ bool RegBankSelect::repairReg(
std::swap(Src, Dst);
assert((RepairPt.getNumInsertPoints() == 1 ||
TargetRegisterInfo::isPhysicalRegister(Dst)) &&
Register::isPhysicalRegister(Dst)) &&
"We are about to create several defs for Dst");
// Build the instruction used to repair, then clone it at the right
@ -398,7 +398,7 @@ void RegBankSelect::tryAvoidingSplit(
// Check if this is a physical or virtual register.
Register Reg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (Register::isPhysicalRegister(Reg)) {
// We are going to split every outgoing edges.
// Check that this is possible.
// FIXME: The machine representation is currently broken

View File

@ -82,7 +82,7 @@ bool RegisterBankInfo::verify(const TargetRegisterInfo &TRI) const {
const RegisterBank *
RegisterBankInfo::getRegBank(Register Reg, const MachineRegisterInfo &MRI,
const TargetRegisterInfo &TRI) const {
if (TargetRegisterInfo::isPhysicalRegister(Reg))
if (Register::isPhysicalRegister(Reg))
return &getRegBankFromRegClass(getMinimalPhysRegClass(Reg, TRI));
assert(Reg && "NoRegister does not have a register bank");
@ -97,8 +97,7 @@ RegisterBankInfo::getRegBank(Register Reg, const MachineRegisterInfo &MRI,
const TargetRegisterClass &
RegisterBankInfo::getMinimalPhysRegClass(Register Reg,
const TargetRegisterInfo &TRI) const {
assert(TargetRegisterInfo::isPhysicalRegister(Reg) &&
"Reg must be a physreg");
assert(Register::isPhysicalRegister(Reg) && "Reg must be a physreg");
const auto &RegRCIt = PhysRegMinimalRCs.find(Reg);
if (RegRCIt != PhysRegMinimalRCs.end())
return *RegRCIt->second;
@ -489,7 +488,7 @@ void RegisterBankInfo::applyDefaultMapping(const OperandsMapper &OpdMapper) {
unsigned RegisterBankInfo::getSizeInBits(Register Reg,
const MachineRegisterInfo &MRI,
const TargetRegisterInfo &TRI) const {
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (Register::isPhysicalRegister(Reg)) {
// The size is not directly available for physical registers.
// Instead, we need to access a register class that contains Reg and
// get the size of that register class.

View File

@ -45,8 +45,7 @@ unsigned llvm::constrainOperandRegClass(
unsigned OpIdx) {
unsigned Reg = RegMO.getReg();
// Assume physical registers are properly constrained.
assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
"PhysReg not implemented");
assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
unsigned ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
// If we created a new virtual register because the class is not compatible
@ -75,8 +74,7 @@ unsigned llvm::constrainOperandRegClass(
const MachineOperand &RegMO, unsigned OpIdx) {
unsigned Reg = RegMO.getReg();
// Assume physical registers are properly constrained.
assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
"PhysReg not implemented");
assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
const TargetRegisterClass *RegClass = TII.getRegClass(II, OpIdx, &TRI, MF);
// Some of the target independent instructions, like COPY, may not impose any
@ -132,7 +130,7 @@ bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
unsigned Reg = MO.getReg();
// Physical registers don't need to be constrained.
if (TRI.isPhysicalRegister(Reg))
if (Register::isPhysicalRegister(Reg))
continue;
// Register operands with a value of 0 (e.g. predicate operands) don't need
@ -171,8 +169,7 @@ bool llvm::isTriviallyDead(const MachineInstr &MI,
continue;
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
!MRI.use_nodbg_empty(Reg))
if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg))
return false;
}
return true;
@ -235,7 +232,7 @@ Optional<ValueAndVReg> llvm::getConstantVRegValWithLookThrough(
break;
case TargetOpcode::COPY:
VReg = MI->getOperand(1).getReg();
if (TargetRegisterInfo::isPhysicalRegister(VReg))
if (Register::isPhysicalRegister(VReg))
return None;
break;
case TargetOpcode::G_INTTOPTR:

View File

@ -346,8 +346,7 @@ void InlineSpiller::collectRegsToSpill() {
}
bool InlineSpiller::isSibling(unsigned Reg) {
return TargetRegisterInfo::isVirtualRegister(Reg) &&
VRM.getOriginal(Reg) == Original;
return Register::isVirtualRegister(Reg) && VRM.getOriginal(Reg) == Original;
}
/// It is beneficial to spill to earlier place in the same BB in case
@ -846,8 +845,7 @@ foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
if (!MO->isReg())
continue;
unsigned Reg = MO->getReg();
if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) ||
MRI.isReserved(Reg)) {
if (!Reg || Register::isVirtualRegister(Reg) || MRI.isReserved(Reg)) {
continue;
}
// Skip non-Defs, including undef uses and internal reads.
@ -1111,8 +1109,8 @@ void InlineSpiller::spillAll() {
void InlineSpiller::spill(LiveRangeEdit &edit) {
++NumSpilledRanges;
Edit = &edit;
assert(!TargetRegisterInfo::isStackSlot(edit.getReg())
&& "Trying to spill a stack slot.");
assert(!Register::isStackSlot(edit.getReg()) &&
"Trying to spill a stack slot.");
// Share a stack slot among all descendants of Original.
Original = VRM.getOriginal(edit.getReg());
StackSlot = VRM.getStackSlot(Original);
@ -1459,7 +1457,7 @@ void HoistSpillHelper::hoistAllSpills() {
LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this);
for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
unsigned Reg = Register::index2VirtReg(i);
unsigned Original = VRM.getPreSplitReg(Reg);
if (!MRI.def_empty(Reg))
Virt2SiblingsMap[Original].insert(Reg);

View File

@ -719,7 +719,7 @@ void LiveDebugValues::transferRegisterDef(
// instructions never clobber SP, because some backends (e.g., AArch64)
// never list SP in the regmask.
if (MO.isReg() && MO.isDef() && MO.getReg() &&
TRI->isPhysicalRegister(MO.getReg()) &&
Register::isPhysicalRegister(MO.getReg()) &&
!(MI.isCall() && MO.getReg() == SP)) {
// Remove ranges of all aliased registers.
for (MCRegAliasIterator RAI(MO.getReg(), TRI, true); RAI.isValid(); ++RAI)

View File

@ -554,7 +554,7 @@ void LDVImpl::print(raw_ostream &OS) {
void UserValue::mapVirtRegs(LDVImpl *LDV) {
for (unsigned i = 0, e = locations.size(); i != e; ++i)
if (locations[i].isReg() &&
TargetRegisterInfo::isVirtualRegister(locations[i].getReg()))
Register::isVirtualRegister(locations[i].getReg()))
LDV->mapVirtReg(locations[i].getReg(), this);
}
@ -577,7 +577,7 @@ UserValue *LDVImpl::getUserValue(const DILocalVariable *Var,
}
void LDVImpl::mapVirtReg(unsigned VirtReg, UserValue *EC) {
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && "Only map VirtRegs");
assert(Register::isVirtualRegister(VirtReg) && "Only map VirtRegs");
UserValue *&Leader = virtRegToEqClass[VirtReg];
Leader = UserValue::merge(Leader, EC);
}
@ -606,7 +606,7 @@ bool LDVImpl::handleDebugValue(MachineInstr &MI, SlotIndex Idx) {
// could be removed or replaced by asserts.
bool Discard = false;
if (MI.getOperand(0).isReg() &&
TargetRegisterInfo::isVirtualRegister(MI.getOperand(0).getReg())) {
Register::isVirtualRegister(MI.getOperand(0).getReg())) {
const unsigned Reg = MI.getOperand(0).getReg();
if (!LIS->hasInterval(Reg)) {
// The DBG_VALUE is described by a virtual register that does not have a
@ -758,7 +758,7 @@ void UserValue::addDefsFromCopies(
if (Kills.empty())
return;
// Don't track copies from physregs, there are too many uses.
if (!TargetRegisterInfo::isVirtualRegister(LI->reg))
if (!Register::isVirtualRegister(LI->reg))
return;
// Collect all the (vreg, valno) pairs that are copies of LI.
@ -774,7 +774,7 @@ void UserValue::addDefsFromCopies(
// arguments, and the argument registers are always call clobbered. We are
// better off in the source register which could be a callee-saved register,
// or it could be spilled.
if (!TargetRegisterInfo::isVirtualRegister(DstReg))
if (!Register::isVirtualRegister(DstReg))
continue;
// Is LocNo extended to reach this copy? If not, another def may be blocking
@ -845,7 +845,7 @@ void UserValue::computeIntervals(MachineRegisterInfo &MRI,
}
// Register locations are constrained to where the register value is live.
if (TargetRegisterInfo::isVirtualRegister(LocMO.getReg())) {
if (Register::isVirtualRegister(LocMO.getReg())) {
LiveInterval *LI = nullptr;
const VNInfo *VNI = nullptr;
if (LIS.hasInterval(LocMO.getReg())) {
@ -1161,10 +1161,10 @@ void UserValue::rewriteLocations(VirtRegMap &VRM, const MachineFunction &MF,
MachineOperand Loc = locations[I];
// Only virtual registers are rewritten.
if (Loc.isReg() && Loc.getReg() &&
TargetRegisterInfo::isVirtualRegister(Loc.getReg())) {
Register::isVirtualRegister(Loc.getReg())) {
unsigned VirtReg = Loc.getReg();
if (VRM.isAssignedReg(VirtReg) &&
TargetRegisterInfo::isPhysicalRegister(VRM.getPhys(VirtReg))) {
Register::isPhysicalRegister(VRM.getPhys(VirtReg))) {
// This can create a %noreg operand in rare cases when the sub-register
// index is no longer available. That means the user value is in a
// non-existent sub-register, and %noreg is exactly what we want.

View File

@ -886,7 +886,7 @@ static void stripValuesNotDefiningMask(unsigned Reg, LiveInterval::SubRange &SR,
const TargetRegisterInfo &TRI) {
// Phys reg should not be tracked at subreg level.
// Same for noreg (Reg == 0).
if (!TargetRegisterInfo::isVirtualRegister(Reg) || !Reg)
if (!Register::isVirtualRegister(Reg) || !Reg)
return;
// Remove the values that don't define those lanes.
SmallVector<VNInfo *, 8> ToBeRemoved;
@ -967,7 +967,7 @@ void LiveInterval::computeSubRangeUndefs(SmallVectorImpl<SlotIndex> &Undefs,
LaneBitmask LaneMask,
const MachineRegisterInfo &MRI,
const SlotIndexes &Indexes) const {
assert(TargetRegisterInfo::isVirtualRegister(reg));
assert(Register::isVirtualRegister(reg));
LaneBitmask VRegMask = MRI.getMaxLaneMaskForVReg(reg);
assert((VRegMask & LaneMask).any());
const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();

View File

@ -108,7 +108,7 @@ LiveIntervals::~LiveIntervals() {
void LiveIntervals::releaseMemory() {
// Free the live intervals themselves.
for (unsigned i = 0, e = VirtRegIntervals.size(); i != e; ++i)
delete VirtRegIntervals[TargetRegisterInfo::index2VirtReg(i)];
delete VirtRegIntervals[Register::index2VirtReg(i)];
VirtRegIntervals.clear();
RegMaskSlots.clear();
RegMaskBits.clear();
@ -161,7 +161,7 @@ void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
// Dump the virtregs.
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
unsigned Reg = Register::index2VirtReg(i);
if (hasInterval(Reg))
OS << getInterval(Reg) << '\n';
}
@ -186,7 +186,7 @@ LLVM_DUMP_METHOD void LiveIntervals::dumpInstrs() const {
#endif
LiveInterval* LiveIntervals::createInterval(unsigned reg) {
float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? huge_valf : 0.0F;
float Weight = Register::isPhysicalRegister(reg) ? huge_valf : 0.0F;
return new LiveInterval(reg, Weight);
}
@ -201,7 +201,7 @@ void LiveIntervals::computeVirtRegInterval(LiveInterval &LI) {
void LiveIntervals::computeVirtRegs() {
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
unsigned Reg = Register::index2VirtReg(i);
if (MRI->reg_nodbg_empty(Reg))
continue;
createAndComputeVirtRegInterval(Reg);
@ -441,8 +441,8 @@ void LiveIntervals::extendSegmentsToUses(LiveRange &Segments,
bool LiveIntervals::shrinkToUses(LiveInterval *li,
SmallVectorImpl<MachineInstr*> *dead) {
LLVM_DEBUG(dbgs() << "Shrink: " << *li << '\n');
assert(TargetRegisterInfo::isVirtualRegister(li->reg)
&& "Can only shrink virtual registers");
assert(Register::isVirtualRegister(li->reg) &&
"Can only shrink virtual registers");
// Shrink subregister live ranges.
bool NeedsCleanup = false;
@ -541,8 +541,8 @@ bool LiveIntervals::computeDeadValues(LiveInterval &LI,
void LiveIntervals::shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg) {
LLVM_DEBUG(dbgs() << "Shrink: " << SR << '\n');
assert(TargetRegisterInfo::isVirtualRegister(Reg)
&& "Can only shrink virtual registers");
assert(Register::isVirtualRegister(Reg) &&
"Can only shrink virtual registers");
// Find all the values used, including PHI kills.
ShrinkToUsesWorkList WorkList;
@ -688,7 +688,7 @@ void LiveIntervals::addKillFlags(const VirtRegMap *VRM) {
LiveRange::const_iterator>, 4> SRs;
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
unsigned Reg = Register::index2VirtReg(i);
if (MRI->reg_nodbg_empty(Reg))
continue;
const LiveInterval &LI = getInterval(Reg);
@ -989,7 +989,7 @@ public:
unsigned Reg = MO.getReg();
if (!Reg)
continue;
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
LiveInterval &LI = LIS.getInterval(Reg);
if (LI.hasSubRanges()) {
unsigned SubReg = MO.getSubReg();
@ -1023,7 +1023,7 @@ private:
return;
LLVM_DEBUG({
dbgs() << " ";
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
dbgs() << printReg(Reg);
if (LaneMask.any())
dbgs() << " L" << PrintLaneMask(LaneMask);
@ -1384,7 +1384,7 @@ private:
// Return the last use of reg between NewIdx and OldIdx.
SlotIndex findLastUseBefore(SlotIndex Before, unsigned Reg,
LaneBitmask LaneMask) {
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
SlotIndex LastUse = Before;
for (MachineOperand &MO : MRI.use_nodbg_operands(Reg)) {
if (MO.isUndef())
@ -1429,7 +1429,7 @@ private:
// Check if MII uses Reg.
for (MIBundleOperands MO(*MII); MO.isValid(); ++MO)
if (MO->isReg() && !MO->isUndef() &&
TargetRegisterInfo::isPhysicalRegister(MO->getReg()) &&
Register::isPhysicalRegister(MO->getReg()) &&
TRI.hasRegUnit(MO->getReg(), Reg))
return Idx.getRegSlot();
}
@ -1585,8 +1585,7 @@ LiveIntervals::repairIntervalsInRange(MachineBasicBlock *MBB,
for (MachineInstr::const_mop_iterator MOI = MI.operands_begin(),
MOE = MI.operands_end();
MOI != MOE; ++MOI) {
if (MOI->isReg() &&
TargetRegisterInfo::isVirtualRegister(MOI->getReg()) &&
if (MOI->isReg() && Register::isVirtualRegister(MOI->getReg()) &&
!hasInterval(MOI->getReg())) {
createAndComputeVirtRegInterval(MOI->getReg());
}
@ -1594,7 +1593,7 @@ LiveIntervals::repairIntervalsInRange(MachineBasicBlock *MBB,
}
for (unsigned Reg : OrigRegs) {
if (!TargetRegisterInfo::isVirtualRegister(Reg))
if (!Register::isVirtualRegister(Reg))
continue;
LiveInterval &LI = getInterval(Reg);

View File

@ -47,7 +47,7 @@ void LivePhysRegs::removeDefs(const MachineInstr &MI) {
if (!O->isDef() || O->isDebug())
continue;
unsigned Reg = O->getReg();
if (!TargetRegisterInfo::isPhysicalRegister(Reg))
if (!Register::isPhysicalRegister(Reg))
continue;
removeReg(Reg);
} else if (O->isRegMask())
@ -61,7 +61,7 @@ void LivePhysRegs::addUses(const MachineInstr &MI) {
if (!O->isReg() || !O->readsReg() || O->isDebug())
continue;
unsigned Reg = O->getReg();
if (!TargetRegisterInfo::isPhysicalRegister(Reg))
if (!Register::isPhysicalRegister(Reg))
continue;
addReg(Reg);
}
@ -87,7 +87,7 @@ void LivePhysRegs::stepForward(const MachineInstr &MI,
for (ConstMIBundleOperands O(MI); O.isValid(); ++O) {
if (O->isReg() && !O->isDebug()) {
unsigned Reg = O->getReg();
if (!TargetRegisterInfo::isPhysicalRegister(Reg))
if (!Register::isPhysicalRegister(Reg))
continue;
if (O->isDef()) {
// Note, dead defs are still recorded. The caller should decide how to
@ -295,7 +295,7 @@ void llvm::recomputeLivenessFlags(MachineBasicBlock &MBB) {
unsigned Reg = MO->getReg();
if (Reg == 0)
continue;
assert(TargetRegisterInfo::isPhysicalRegister(Reg));
assert(Register::isPhysicalRegister(Reg));
bool IsNotLive = LiveRegs.available(MRI, Reg);
MO->setIsDead(IsNotLive);
@ -312,7 +312,7 @@ void llvm::recomputeLivenessFlags(MachineBasicBlock &MBB) {
unsigned Reg = MO->getReg();
if (Reg == 0)
continue;
assert(TargetRegisterInfo::isPhysicalRegister(Reg));
assert(Register::isPhysicalRegister(Reg));
bool IsNotLive = LiveRegs.available(MRI, Reg);
MO->setIsKill(IsNotLive);

View File

@ -372,8 +372,7 @@ bool LiveRangeCalc::findReachingDefs(LiveRange &LR, MachineBasicBlock &UseMBB,
report_fatal_error("Use not jointly dominated by defs.");
}
if (TargetRegisterInfo::isPhysicalRegister(PhysReg) &&
!MBB->isLiveIn(PhysReg)) {
if (Register::isPhysicalRegister(PhysReg) && !MBB->isLiveIn(PhysReg)) {
MBB->getParent()->verify();
const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo();
errs() << "The register " << printReg(PhysReg, TRI)

View File

@ -114,7 +114,7 @@ bool LiveRangeEdit::allUsesAvailableAt(const MachineInstr *OrigMI,
continue;
// We can't remat physreg uses, unless it is a constant.
if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
if (Register::isPhysicalRegister(MO.getReg())) {
if (MRI.isConstantPhysReg(MO.getReg()))
continue;
return false;
@ -309,7 +309,7 @@ void LiveRangeEdit::eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink,
if (!MOI->isReg())
continue;
unsigned Reg = MOI->getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
if (!Register::isVirtualRegister(Reg)) {
// Check if MI reads any unreserved physregs.
if (Reg && MOI->readsReg() && !MRI.isReserved(Reg))
ReadsPhysRegs = true;
@ -349,7 +349,7 @@ void LiveRangeEdit::eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink,
// Remove all operands that aren't physregs.
for (unsigned i = MI->getNumOperands(); i; --i) {
const MachineOperand &MO = MI->getOperand(i-1);
if (MO.isReg() && TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
if (MO.isReg() && Register::isPhysicalRegister(MO.getReg()))
continue;
MI->RemoveOperand(i-1);
}

View File

@ -175,7 +175,7 @@ bool LiveRangeShrink::runOnMachineFunction(MachineFunction &MF) {
unsigned Reg = MO.getReg();
// Do not move the instruction if it def/uses a physical register,
// unless it is a constant physical register or a noreg.
if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
if (!Register::isVirtualRegister(Reg)) {
if (!Reg || MRI.isConstantPhysReg(Reg))
continue;
Insert = nullptr;

View File

@ -48,7 +48,7 @@ void LiveRegUnits::stepBackward(const MachineInstr &MI) {
if (!O->isDef() || O->isDebug())
continue;
unsigned Reg = O->getReg();
if (!TargetRegisterInfo::isPhysicalRegister(Reg))
if (!Register::isPhysicalRegister(Reg))
continue;
removeReg(Reg);
} else if (O->isRegMask())
@ -60,7 +60,7 @@ void LiveRegUnits::stepBackward(const MachineInstr &MI) {
if (!O->isReg() || !O->readsReg() || O->isDebug())
continue;
unsigned Reg = O->getReg();
if (!TargetRegisterInfo::isPhysicalRegister(Reg))
if (!Register::isPhysicalRegister(Reg))
continue;
addReg(Reg);
}
@ -71,7 +71,7 @@ void LiveRegUnits::accumulate(const MachineInstr &MI) {
for (ConstMIBundleOperands O(MI); O.isValid(); ++O) {
if (O->isReg()) {
unsigned Reg = O->getReg();
if (!TargetRegisterInfo::isPhysicalRegister(Reg))
if (!Register::isPhysicalRegister(Reg))
continue;
if (!O->isDef() && !O->readsReg())
continue;

View File

@ -58,9 +58,10 @@ LiveStacks::getOrCreateInterval(int Slot, const TargetRegisterClass *RC) {
assert(Slot >= 0 && "Spill slot indice must be >= 0");
SS2IntervalMap::iterator I = S2IMap.find(Slot);
if (I == S2IMap.end()) {
I = S2IMap.emplace(std::piecewise_construct, std::forward_as_tuple(Slot),
std::forward_as_tuple(
TargetRegisterInfo::index2StackSlot(Slot), 0.0F))
I = S2IMap
.emplace(
std::piecewise_construct, std::forward_as_tuple(Slot),
std::forward_as_tuple(Register::index2StackSlot(Slot), 0.0F))
.first;
S2RCMap.insert(std::make_pair(Slot, RC));
} else {

View File

@ -82,7 +82,7 @@ LLVM_DUMP_METHOD void LiveVariables::VarInfo::dump() const {
/// getVarInfo - Get (possibly creating) a VarInfo object for the given vreg.
LiveVariables::VarInfo &LiveVariables::getVarInfo(unsigned RegIdx) {
assert(TargetRegisterInfo::isVirtualRegister(RegIdx) &&
assert(Register::isVirtualRegister(RegIdx) &&
"getVarInfo: not a virtual register!");
VirtRegInfo.grow(RegIdx);
return VirtRegInfo[RegIdx];
@ -521,8 +521,7 @@ void LiveVariables::runOnInstr(MachineInstr &MI,
continue;
unsigned MOReg = MO.getReg();
if (MO.isUse()) {
if (!(TargetRegisterInfo::isPhysicalRegister(MOReg) &&
MRI->isReserved(MOReg)))
if (!(Register::isPhysicalRegister(MOReg) && MRI->isReserved(MOReg)))
MO.setIsKill(false);
if (MO.readsReg())
UseRegs.push_back(MOReg);
@ -530,8 +529,7 @@ void LiveVariables::runOnInstr(MachineInstr &MI,
assert(MO.isDef());
// FIXME: We should not remove any dead flags. However the MIPS RDDSP
// instruction needs it at the moment: http://llvm.org/PR27116.
if (TargetRegisterInfo::isPhysicalRegister(MOReg) &&
!MRI->isReserved(MOReg))
if (Register::isPhysicalRegister(MOReg) && !MRI->isReserved(MOReg))
MO.setIsDead(false);
DefRegs.push_back(MOReg);
}
@ -541,7 +539,7 @@ void LiveVariables::runOnInstr(MachineInstr &MI,
// Process all uses.
for (unsigned i = 0, e = UseRegs.size(); i != e; ++i) {
unsigned MOReg = UseRegs[i];
if (TargetRegisterInfo::isVirtualRegister(MOReg))
if (Register::isVirtualRegister(MOReg))
HandleVirtRegUse(MOReg, MBB, MI);
else if (!MRI->isReserved(MOReg))
HandlePhysRegUse(MOReg, MI);
@ -554,7 +552,7 @@ void LiveVariables::runOnInstr(MachineInstr &MI,
// Process all defs.
for (unsigned i = 0, e = DefRegs.size(); i != e; ++i) {
unsigned MOReg = DefRegs[i];
if (TargetRegisterInfo::isVirtualRegister(MOReg))
if (Register::isVirtualRegister(MOReg))
HandleVirtRegDef(MOReg, MI);
else if (!MRI->isReserved(MOReg))
HandlePhysRegDef(MOReg, &MI, Defs);
@ -566,7 +564,7 @@ void LiveVariables::runOnBlock(MachineBasicBlock *MBB, const unsigned NumRegs) {
// Mark live-in registers as live-in.
SmallVector<unsigned, 4> Defs;
for (const auto &LI : MBB->liveins()) {
assert(TargetRegisterInfo::isPhysicalRegister(LI.PhysReg) &&
assert(Register::isPhysicalRegister(LI.PhysReg) &&
"Cannot have a live-in virtual register!");
HandlePhysRegDef(LI.PhysReg, nullptr, Defs);
}
@ -654,7 +652,7 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
// Convert and transfer the dead / killed information we have gathered into
// VirtRegInfo onto MI's.
for (unsigned i = 0, e1 = VirtRegInfo.size(); i != e1; ++i) {
const unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
const unsigned Reg = Register::index2VirtReg(i);
for (unsigned j = 0, e2 = VirtRegInfo[Reg].Kills.size(); j != e2; ++j)
if (VirtRegInfo[Reg].Kills[j] == MRI->getVRegDef(Reg))
VirtRegInfo[Reg].Kills[j]->addRegisterDead(Reg, TRI);
@ -693,7 +691,7 @@ void LiveVariables::removeVirtualRegistersKilled(MachineInstr &MI) {
if (MO.isReg() && MO.isKill()) {
MO.setIsKill(false);
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
bool removed = getVarInfo(Reg).removeKill(MI);
assert(removed && "kill not in register's VarInfo?");
(void)removed;
@ -783,7 +781,7 @@ void LiveVariables::addNewBlock(MachineBasicBlock *BB,
for (; BBI != BBE; ++BBI) {
for (MachineInstr::mop_iterator I = BBI->operands_begin(),
E = BBI->operands_end(); I != E; ++I) {
if (I->isReg() && TargetRegisterInfo::isVirtualRegister(I->getReg())) {
if (I->isReg() && Register::isVirtualRegister(I->getReg())) {
if (I->isDef())
Defs.insert(I->getReg());
else if (I->isKill())
@ -794,7 +792,7 @@ void LiveVariables::addNewBlock(MachineBasicBlock *BB,
// Update info for all live variables
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
unsigned Reg = Register::index2VirtReg(i);
// If the Defs is defined in the successor it can't be live in BB.
if (Defs.count(Reg))

View File

@ -190,7 +190,7 @@ static bool rescheduleCanonically(unsigned &PseudoIdempotentInstCount,
if (!MO.isReg())
continue;
if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
if (Register::isVirtualRegister(MO.getReg()))
continue;
if (!MO.isDef())
@ -207,7 +207,7 @@ static bool rescheduleCanonically(unsigned &PseudoIdempotentInstCount,
continue;
MachineOperand &MO = II->getOperand(0);
if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
if (!MO.isReg() || !Register::isVirtualRegister(MO.getReg()))
continue;
if (!MO.isDef())
continue;
@ -220,7 +220,7 @@ static bool rescheduleCanonically(unsigned &PseudoIdempotentInstCount,
}
if (II->getOperand(i).isReg()) {
if (!TargetRegisterInfo::isVirtualRegister(II->getOperand(i).getReg()))
if (!Register::isVirtualRegister(II->getOperand(i).getReg()))
if (llvm::find(PhysRegDefs, II->getOperand(i).getReg()) ==
PhysRegDefs.end()) {
continue;
@ -343,9 +343,9 @@ static bool propagateLocalCopies(MachineBasicBlock *MBB) {
const unsigned Dst = MI->getOperand(0).getReg();
const unsigned Src = MI->getOperand(1).getReg();
if (!TargetRegisterInfo::isVirtualRegister(Dst))
if (!Register::isVirtualRegister(Dst))
continue;
if (!TargetRegisterInfo::isVirtualRegister(Src))
if (!Register::isVirtualRegister(Src))
continue;
// Not folding COPY instructions if regbankselect has not set the RCs.
// Why are we only considering Register Classes? Because the verifier
@ -387,7 +387,7 @@ static std::vector<MachineInstr *> populateCandidates(MachineBasicBlock *MBB) {
if (MI->getNumOperands() > 0 && MI->getOperand(0).isReg()) {
const unsigned Dst = MI->getOperand(0).getReg();
DoesMISideEffect |= !TargetRegisterInfo::isVirtualRegister(Dst);
DoesMISideEffect |= !Register::isVirtualRegister(Dst);
for (auto UI = MRI.use_begin(Dst); UI != MRI.use_end(); ++UI) {
if (DoesMISideEffect)
@ -428,7 +428,7 @@ static void doCandidateWalk(std::vector<TypedVReg> &VRegs,
assert(TReg.isReg() && "Expected vreg or physreg.");
unsigned Reg = TReg.getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
LLVM_DEBUG({
dbgs() << "Popping vreg ";
MRI.def_begin(Reg)->dump();
@ -554,7 +554,7 @@ GetVRegRenameMap(const std::vector<TypedVReg> &VRegs,
NVC.incrementVirtualVReg(LastRenameReg % 10);
FirstCandidate = false;
continue;
} else if (!TargetRegisterInfo::isVirtualRegister(vreg.getReg())) {
} else if (!Register::isVirtualRegister(vreg.getReg())) {
unsigned LastRenameReg = NVC.incrementVirtualVReg();
(void)LastRenameReg;
LLVM_DEBUG({
@ -706,7 +706,7 @@ static bool runOnBasicBlock(MachineBasicBlock *MBB,
break;
MachineOperand &MO = candidate->getOperand(i);
if (!(MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())))
if (!(MO.isReg() && Register::isVirtualRegister(MO.getReg())))
continue;
LLVM_DEBUG(dbgs() << "Enqueue register"; MO.dump(); dbgs() << "\n";);

View File

@ -1078,7 +1078,7 @@ static const char *printImplicitRegisterFlag(const MachineOperand &MO) {
static std::string getRegisterName(const TargetRegisterInfo *TRI,
unsigned Reg) {
assert(TargetRegisterInfo::isPhysicalRegister(Reg) && "expected phys reg");
assert(Register::isPhysicalRegister(Reg) && "expected phys reg");
return StringRef(TRI->getName(Reg)).lower();
}
@ -1408,11 +1408,11 @@ bool MIParser::parseRegisterOperand(MachineOperand &Dest,
if (Token.is(MIToken::dot)) {
if (parseSubRegisterIndex(SubReg))
return true;
if (!TargetRegisterInfo::isVirtualRegister(Reg))
if (!Register::isVirtualRegister(Reg))
return error("subregister index expects a virtual register");
}
if (Token.is(MIToken::colon)) {
if (!TargetRegisterInfo::isVirtualRegister(Reg))
if (!Register::isVirtualRegister(Reg))
return error("register class specification expects a virtual register");
lex();
if (parseRegisterClassOrBank(*RegInfo))
@ -1441,7 +1441,7 @@ bool MIParser::parseRegisterOperand(MachineOperand &Dest,
}
} else if (consumeIfPresent(MIToken::lparen)) {
// Virtual registers may have a tpe with GlobalISel.
if (!TargetRegisterInfo::isVirtualRegister(Reg))
if (!Register::isVirtualRegister(Reg))
return error("unexpected type on physical register");
LLT Ty;
@ -1455,7 +1455,7 @@ bool MIParser::parseRegisterOperand(MachineOperand &Dest,
return error("inconsistent type for generic virtual register");
MRI.setType(Reg, Ty);
} else if (TargetRegisterInfo::isVirtualRegister(Reg)) {
} else if (Register::isVirtualRegister(Reg)) {
// Generic virtual registers must have a type.
// If we end up here this means the type hasn't been specified and
// this is bad!

View File

@ -306,7 +306,7 @@ bool MIRParserImpl::parseMachineFunction(Module &M, MachineModuleInfo &MMI) {
static bool isSSA(const MachineFunction &MF) {
const MachineRegisterInfo &MRI = MF.getRegInfo();
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(I);
unsigned Reg = Register::index2VirtReg(I);
if (!MRI.hasOneDef(Reg) && !MRI.def_empty(Reg))
return false;
}

View File

@ -290,7 +290,7 @@ void MIRPrinter::convert(yaml::MachineFunction &MF,
// Print the virtual register definitions.
for (unsigned I = 0, E = RegInfo.getNumVirtRegs(); I < E; ++I) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(I);
unsigned Reg = Register::index2VirtReg(I);
yaml::VirtualRegisterDefinition VReg;
VReg.ID = I;
if (RegInfo.getVRegName(Reg) != "")

View File

@ -486,7 +486,7 @@ void MachineBasicBlock::sortUniqueLiveIns() {
unsigned
MachineBasicBlock::addLiveIn(MCPhysReg PhysReg, const TargetRegisterClass *RC) {
assert(getParent() && "MBB must be inserted in function");
assert(TargetRegisterInfo::isPhysicalRegister(PhysReg) && "Expected physreg");
assert(Register::isPhysicalRegister(PhysReg) && "Expected physreg");
assert(RC && "Register class is required");
assert((isEHPad() || this == &getParent()->front()) &&
"Only the entry block and landing pads can have physreg live ins");
@ -908,7 +908,7 @@ MachineBasicBlock *MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ,
!OI->isUse() || !OI->isKill() || OI->isUndef())
continue;
unsigned Reg = OI->getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
if (Register::isPhysicalRegister(Reg) ||
LV->getVarInfo(Reg).removeKill(*MI)) {
KilledRegs.push_back(Reg);
LLVM_DEBUG(dbgs() << "Removing terminator kill: " << *MI);
@ -1000,7 +1000,7 @@ MachineBasicBlock *MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ,
for (instr_iterator I = instr_end(), E = instr_begin(); I != E;) {
if (!(--I)->addRegisterKilled(Reg, TRI, /* AddIfNotFound= */ false))
continue;
if (TargetRegisterInfo::isVirtualRegister(Reg))
if (Register::isVirtualRegister(Reg))
LV->getVarInfo(Reg).Kills.push_back(&*I);
LLVM_DEBUG(dbgs() << "Restored terminator kill: " << *I);
break;
@ -1049,7 +1049,7 @@ MachineBasicBlock *MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ,
MachineRegisterInfo *MRI = &getParent()->getRegInfo();
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
unsigned Reg = Register::index2VirtReg(i);
if (PHISrcRegs.count(Reg) || !LIS->hasInterval(Reg))
continue;

View File

@ -168,14 +168,14 @@ bool MachineCSE::PerformTrivialCopyPropagation(MachineInstr *MI,
if (!MO.isReg() || !MO.isUse())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
if (!Register::isVirtualRegister(Reg))
continue;
bool OnlyOneUse = MRI->hasOneNonDBGUse(Reg);
MachineInstr *DefMI = MRI->getVRegDef(Reg);
if (!DefMI->isCopy())
continue;
unsigned SrcReg = DefMI->getOperand(1).getReg();
if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
if (!Register::isVirtualRegister(SrcReg))
continue;
if (DefMI->getOperand(0).getSubReg())
continue;
@ -283,7 +283,7 @@ bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI,
unsigned Reg = MO.getReg();
if (!Reg)
continue;
if (TargetRegisterInfo::isVirtualRegister(Reg))
if (Register::isVirtualRegister(Reg))
continue;
// Reading either caller preserved or constant physregs is ok.
if (!isCallerPreservedOrConstPhysReg(Reg, *MI->getMF(), *TRI))
@ -302,7 +302,7 @@ bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI,
unsigned Reg = MO.getReg();
if (!Reg)
continue;
if (TargetRegisterInfo::isVirtualRegister(Reg))
if (Register::isVirtualRegister(Reg))
continue;
// Check against PhysRefs even if the def is "dead".
if (PhysRefs.count(Reg))
@ -377,7 +377,7 @@ bool MachineCSE::PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
if (!MO.isReg() || !MO.isDef())
continue;
unsigned MOReg = MO.getReg();
if (TargetRegisterInfo::isVirtualRegister(MOReg))
if (Register::isVirtualRegister(MOReg))
continue;
if (PhysRefs.count(MOReg))
return false;
@ -433,8 +433,7 @@ bool MachineCSE::isProfitableToCSE(unsigned CSReg, unsigned Reg,
// If CSReg is used at all uses of Reg, CSE should not increase register
// pressure of CSReg.
bool MayIncreasePressure = true;
if (TargetRegisterInfo::isVirtualRegister(CSReg) &&
TargetRegisterInfo::isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(CSReg) && Register::isVirtualRegister(Reg)) {
MayIncreasePressure = false;
SmallPtrSet<MachineInstr*, 8> CSUses;
for (MachineInstr &MI : MRI->use_nodbg_instructions(CSReg)) {
@ -462,8 +461,7 @@ bool MachineCSE::isProfitableToCSE(unsigned CSReg, unsigned Reg,
// of the redundant computation are copies, do not cse.
bool HasVRegUse = false;
for (const MachineOperand &MO : MI->operands()) {
if (MO.isReg() && MO.isUse() &&
TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
if (MO.isReg() && MO.isUse() && Register::isVirtualRegister(MO.getReg())) {
HasVRegUse = true;
break;
}
@ -613,8 +611,8 @@ bool MachineCSE::ProcessBlockCSE(MachineBasicBlock *MBB) {
continue;
}
assert(TargetRegisterInfo::isVirtualRegister(OldReg) &&
TargetRegisterInfo::isVirtualRegister(NewReg) &&
assert(Register::isVirtualRegister(OldReg) &&
Register::isVirtualRegister(NewReg) &&
"Do not CSE physical register defs!");
if (!isProfitableToCSE(NewReg, OldReg, CSMI->getParent(), MI)) {
@ -778,11 +776,11 @@ bool MachineCSE::isPRECandidate(MachineInstr *MI) {
return false;
for (auto def : MI->defs())
if (!TRI->isVirtualRegister(def.getReg()))
if (!Register::isVirtualRegister(def.getReg()))
return false;
for (auto use : MI->uses())
if (use.isReg() && !TRI->isVirtualRegister(use.getReg()))
if (use.isReg() && !Register::isVirtualRegister(use.getReg()))
return false;
return true;

View File

@ -137,7 +137,7 @@ void MachineCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
MachineInstr *MachineCombiner::getOperandDef(const MachineOperand &MO) {
MachineInstr *DefInstr = nullptr;
// We need a virtual register definition.
if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
if (MO.isReg() && Register::isVirtualRegister(MO.getReg()))
DefInstr = MRI->getUniqueVRegDef(MO.getReg());
// PHI's have no depth etc.
if (DefInstr && DefInstr->isPHI())
@ -168,7 +168,7 @@ MachineCombiner::getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs,
unsigned IDepth = 0;
for (const MachineOperand &MO : InstrPtr->operands()) {
// Check for virtual register operand.
if (!(MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())))
if (!(MO.isReg() && Register::isVirtualRegister(MO.getReg())))
continue;
if (!MO.isUse())
continue;
@ -223,7 +223,7 @@ unsigned MachineCombiner::getLatency(MachineInstr *Root, MachineInstr *NewRoot,
for (const MachineOperand &MO : NewRoot->operands()) {
// Check for virtual register operand.
if (!(MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())))
if (!(MO.isReg() && Register::isVirtualRegister(MO.getReg())))
continue;
if (!MO.isDef())
continue;

View File

@ -459,8 +459,8 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
unsigned Def = MI->getOperand(0).getReg();
unsigned Src = MI->getOperand(1).getReg();
assert(!TargetRegisterInfo::isVirtualRegister(Def) &&
!TargetRegisterInfo::isVirtualRegister(Src) &&
assert(!Register::isVirtualRegister(Def) &&
!Register::isVirtualRegister(Src) &&
"MachineCopyPropagation should be run after register allocation!");
// The two copies cancel out and the source of the first copy
@ -552,7 +552,7 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
if (!Reg)
continue;
assert(!TargetRegisterInfo::isVirtualRegister(Reg) &&
assert(!Register::isVirtualRegister(Reg) &&
"MachineCopyPropagation should be run after register allocation!");
if (MO.isDef() && !MO.isEarlyClobber()) {

View File

@ -636,8 +636,8 @@ bool MachineInstr::isIdenticalTo(const MachineInstr &Other,
if (Check == IgnoreDefs)
continue;
else if (Check == IgnoreVRegDefs) {
if (!TargetRegisterInfo::isVirtualRegister(MO.getReg()) ||
!TargetRegisterInfo::isVirtualRegister(OMO.getReg()))
if (!Register::isVirtualRegister(MO.getReg()) ||
!Register::isVirtualRegister(OMO.getReg()))
if (!MO.isIdenticalTo(OMO))
return false;
} else {
@ -693,7 +693,7 @@ void MachineInstr::eraseFromParentAndMarkDBGValuesForRemoval() {
if (!MO.isReg() || !MO.isDef())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
if (!Register::isVirtualRegister(Reg))
continue;
MRI.markUsesInDebugValueAsUndef(Reg);
}
@ -996,7 +996,7 @@ MachineInstr::readsWritesVirtualRegister(unsigned Reg,
int
MachineInstr::findRegisterDefOperandIdx(unsigned Reg, bool isDead, bool Overlap,
const TargetRegisterInfo *TRI) const {
bool isPhys = TargetRegisterInfo::isPhysicalRegister(Reg);
bool isPhys = Register::isPhysicalRegister(Reg);
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
const MachineOperand &MO = getOperand(i);
// Accept regmask operands when Overlap is set.
@ -1007,8 +1007,7 @@ MachineInstr::findRegisterDefOperandIdx(unsigned Reg, bool isDead, bool Overlap,
continue;
unsigned MOReg = MO.getReg();
bool Found = (MOReg == Reg);
if (!Found && TRI && isPhys &&
TargetRegisterInfo::isPhysicalRegister(MOReg)) {
if (!Found && TRI && isPhys && Register::isPhysicalRegister(MOReg)) {
if (Overlap)
Found = TRI->regsOverlap(MOReg, Reg);
else
@ -1145,7 +1144,7 @@ void MachineInstr::clearKillInfo() {
void MachineInstr::substituteRegister(unsigned FromReg, unsigned ToReg,
unsigned SubIdx,
const TargetRegisterInfo &RegInfo) {
if (TargetRegisterInfo::isPhysicalRegister(ToReg)) {
if (Register::isPhysicalRegister(ToReg)) {
if (SubIdx)
ToReg = RegInfo.getSubReg(ToReg, SubIdx);
for (MachineOperand &MO : operands()) {
@ -1783,7 +1782,7 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
bool MachineInstr::addRegisterKilled(unsigned IncomingReg,
const TargetRegisterInfo *RegInfo,
bool AddIfNotFound) {
bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(IncomingReg);
bool isPhysReg = Register::isPhysicalRegister(IncomingReg);
bool hasAliases = isPhysReg &&
MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
bool Found = false;
@ -1814,8 +1813,7 @@ bool MachineInstr::addRegisterKilled(unsigned IncomingReg,
MO.setIsKill();
Found = true;
}
} else if (hasAliases && MO.isKill() &&
TargetRegisterInfo::isPhysicalRegister(Reg)) {
} else if (hasAliases && MO.isKill() && Register::isPhysicalRegister(Reg)) {
// A super-register kill already exists.
if (RegInfo->isSuperRegister(IncomingReg, Reg))
return true;
@ -1849,7 +1847,7 @@ bool MachineInstr::addRegisterKilled(unsigned IncomingReg,
void MachineInstr::clearRegisterKills(unsigned Reg,
const TargetRegisterInfo *RegInfo) {
if (!TargetRegisterInfo::isPhysicalRegister(Reg))
if (!Register::isPhysicalRegister(Reg))
RegInfo = nullptr;
for (MachineOperand &MO : operands()) {
if (!MO.isReg() || !MO.isUse() || !MO.isKill())
@ -1863,7 +1861,7 @@ void MachineInstr::clearRegisterKills(unsigned Reg,
bool MachineInstr::addRegisterDead(unsigned Reg,
const TargetRegisterInfo *RegInfo,
bool AddIfNotFound) {
bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(Reg);
bool isPhysReg = Register::isPhysicalRegister(Reg);
bool hasAliases = isPhysReg &&
MCRegAliasIterator(Reg, RegInfo, false).isValid();
bool Found = false;
@ -1880,7 +1878,7 @@ bool MachineInstr::addRegisterDead(unsigned Reg,
MO.setIsDead();
Found = true;
} else if (hasAliases && MO.isDead() &&
TargetRegisterInfo::isPhysicalRegister(MOReg)) {
Register::isPhysicalRegister(MOReg)) {
// There exists a super-register that's marked dead.
if (RegInfo->isSuperRegister(Reg, MOReg))
return true;
@ -1931,7 +1929,7 @@ void MachineInstr::setRegisterDefReadUndef(unsigned Reg, bool IsUndef) {
void MachineInstr::addRegisterDefined(unsigned Reg,
const TargetRegisterInfo *RegInfo) {
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (Register::isPhysicalRegister(Reg)) {
MachineOperand *MO = findRegisterDefOperand(Reg, false, false, RegInfo);
if (MO)
return;
@ -1957,7 +1955,8 @@ void MachineInstr::setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs,
}
if (!MO.isReg() || !MO.isDef()) continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
if (!Register::isPhysicalRegister(Reg))
continue;
// If there are no uses, including partial uses, the def is dead.
if (llvm::none_of(UsedRegs,
[&](unsigned Use) { return TRI.regsOverlap(Use, Reg); }))
@ -1979,8 +1978,7 @@ MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) {
HashComponents.reserve(MI->getNumOperands() + 1);
HashComponents.push_back(MI->getOpcode());
for (const MachineOperand &MO : MI->operands()) {
if (MO.isReg() && MO.isDef() &&
TargetRegisterInfo::isVirtualRegister(MO.getReg()))
if (MO.isReg() && MO.isDef() && Register::isVirtualRegister(MO.getReg()))
continue; // Skip virtual register defs.
HashComponents.push_back(hash_value(MO));

View File

@ -194,7 +194,7 @@ void llvm::finalizeBundle(MachineBasicBlock &MBB,
DeadDefSet.erase(Reg);
}
if (!MO.isDead() && TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (!MO.isDead() && Register::isPhysicalRegister(Reg)) {
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
unsigned SubReg = *SubRegs;
if (LocalDefSet.insert(SubReg).second)
@ -316,7 +316,7 @@ MachineOperandIteratorBase::analyzePhysReg(unsigned Reg,
bool AllDefsDead = true;
PhysRegInfo PRI = {false, false, false, false, false, false, false, false};
assert(TargetRegisterInfo::isPhysicalRegister(Reg) &&
assert(Register::isPhysicalRegister(Reg) &&
"analyzePhysReg not given a physical register!");
for (; isValid(); ++*this) {
MachineOperand &MO = deref();
@ -330,7 +330,7 @@ MachineOperandIteratorBase::analyzePhysReg(unsigned Reg,
continue;
unsigned MOReg = MO.getReg();
if (!MOReg || !TargetRegisterInfo::isPhysicalRegister(MOReg))
if (!MOReg || !Register::isPhysicalRegister(MOReg))
continue;
if (!TRI->regsOverlap(MOReg, Reg))

View File

@ -427,7 +427,7 @@ void MachineLICMBase::ProcessMI(MachineInstr *MI,
unsigned Reg = MO.getReg();
if (!Reg)
continue;
assert(TargetRegisterInfo::isPhysicalRegister(Reg) &&
assert(Register::isPhysicalRegister(Reg) &&
"Not expecting virtual register!");
if (!MO.isDef()) {
@ -853,7 +853,7 @@ MachineLICMBase::calcRegisterCost(const MachineInstr *MI, bool ConsiderSeen,
if (!MO.isReg() || MO.isImplicit())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
if (!Register::isVirtualRegister(Reg))
continue;
// FIXME: It seems bad to use RegSeen only for some of these calculations.
@ -925,9 +925,9 @@ static bool isInvariantStore(const MachineInstr &MI,
unsigned Reg = MO.getReg();
// If operand is a virtual register, check if it comes from a copy of a
// physical register.
if (TargetRegisterInfo::isVirtualRegister(Reg))
if (Register::isVirtualRegister(Reg))
Reg = TRI->lookThruCopyLike(MO.getReg(), MRI);
if (TargetRegisterInfo::isVirtualRegister(Reg))
if (Register::isVirtualRegister(Reg))
return false;
if (!TRI->isCallerPreservedPhysReg(Reg, *MI.getMF()))
return false;
@ -956,7 +956,7 @@ static bool isCopyFeedingInvariantStore(const MachineInstr &MI,
const MachineFunction *MF = MI.getMF();
// Check that we are copying a constant physical register.
unsigned CopySrcReg = MI.getOperand(1).getReg();
if (TargetRegisterInfo::isVirtualRegister(CopySrcReg))
if (Register::isVirtualRegister(CopySrcReg))
return false;
if (!TRI->isCallerPreservedPhysReg(CopySrcReg, *MF))
@ -964,8 +964,8 @@ static bool isCopyFeedingInvariantStore(const MachineInstr &MI,
unsigned CopyDstReg = MI.getOperand(0).getReg();
// Check if any of the uses of the copy are invariant stores.
assert (TargetRegisterInfo::isVirtualRegister(CopyDstReg) &&
"copy dst is not a virtual reg");
assert(Register::isVirtualRegister(CopyDstReg) &&
"copy dst is not a virtual reg");
for (MachineInstr &UseMI : MRI->use_instructions(CopyDstReg)) {
if (UseMI.mayStore() && isInvariantStore(UseMI, TRI, MRI))
@ -1014,7 +1014,7 @@ bool MachineLICMBase::IsLoopInvariantInst(MachineInstr &I) {
if (Reg == 0) continue;
// Don't hoist an instruction that uses or defines a physical register.
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (Register::isPhysicalRegister(Reg)) {
if (MO.isUse()) {
// If the physreg has no defs anywhere, it's just an ambient register
// and we can freely move its uses. Alternatively, if it's allocatable,
@ -1062,7 +1062,7 @@ bool MachineLICMBase::HasLoopPHIUse(const MachineInstr *MI) const {
if (!MO.isReg() || !MO.isDef())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
if (!Register::isVirtualRegister(Reg))
continue;
for (MachineInstr &UseMI : MRI->use_instructions(Reg)) {
// A PHI may cause a copy to be inserted.
@ -1133,7 +1133,7 @@ bool MachineLICMBase::IsCheapInstruction(MachineInstr &MI) const {
continue;
--NumDefs;
unsigned Reg = DefMO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg))
if (Register::isPhysicalRegister(Reg))
continue;
if (!TII->hasLowDefLatency(SchedModel, MI, i))
@ -1226,7 +1226,7 @@ bool MachineLICMBase::IsProfitableToHoist(MachineInstr &MI) {
if (!MO.isReg() || MO.isImplicit())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
if (!Register::isVirtualRegister(Reg))
continue;
if (MO.isDef() && HasHighOperandLatency(MI, i, Reg)) {
LLVM_DEBUG(dbgs() << "Hoist High Latency: " << MI);
@ -1378,12 +1378,12 @@ bool MachineLICMBase::EliminateCSE(MachineInstr *MI,
// Physical registers may not differ here.
assert((!MO.isReg() || MO.getReg() == 0 ||
!TargetRegisterInfo::isPhysicalRegister(MO.getReg()) ||
!Register::isPhysicalRegister(MO.getReg()) ||
MO.getReg() == Dup->getOperand(i).getReg()) &&
"Instructions with different phys regs are not identical!");
if (MO.isReg() && MO.isDef() &&
!TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
!Register::isPhysicalRegister(MO.getReg()))
Defs.push_back(i);
}

View File

@ -73,7 +73,7 @@ void MachineOperand::setReg(unsigned Reg) {
void MachineOperand::substVirtReg(unsigned Reg, unsigned SubIdx,
const TargetRegisterInfo &TRI) {
assert(TargetRegisterInfo::isVirtualRegister(Reg));
assert(Register::isVirtualRegister(Reg));
if (SubIdx && getSubReg())
SubIdx = TRI.composeSubRegIndices(SubIdx, getSubReg());
setReg(Reg);
@ -82,7 +82,7 @@ void MachineOperand::substVirtReg(unsigned Reg, unsigned SubIdx,
}
void MachineOperand::substPhysReg(unsigned Reg, const TargetRegisterInfo &TRI) {
assert(TargetRegisterInfo::isPhysicalRegister(Reg));
assert(Register::isPhysicalRegister(Reg));
if (getSubReg()) {
Reg = TRI.getSubReg(Reg, getSubReg());
// Note that getSubReg() may return 0 if the sub-register doesn't exist.
@ -114,7 +114,7 @@ void MachineOperand::setIsDef(bool Val) {
bool MachineOperand::isRenamable() const {
assert(isReg() && "Wrong MachineOperand accessor");
assert(TargetRegisterInfo::isPhysicalRegister(getReg()) &&
assert(Register::isPhysicalRegister(getReg()) &&
"isRenamable should only be checked on physical registers");
if (!IsRenamable)
return false;
@ -132,7 +132,7 @@ bool MachineOperand::isRenamable() const {
void MachineOperand::setIsRenamable(bool Val) {
assert(isReg() && "Wrong MachineOperand accessor");
assert(TargetRegisterInfo::isPhysicalRegister(getReg()) &&
assert(Register::isPhysicalRegister(getReg()) &&
"setIsRenamable should only be called on physical registers");
IsRenamable = Val;
}
@ -762,13 +762,13 @@ void MachineOperand::print(raw_ostream &OS, ModuleSlotTracker &MST,
OS << "undef ";
if (isEarlyClobber())
OS << "early-clobber ";
if (TargetRegisterInfo::isPhysicalRegister(getReg()) && isRenamable())
if (Register::isPhysicalRegister(getReg()) && isRenamable())
OS << "renamable ";
// isDebug() is exactly true for register operands of a DBG_VALUE. So we
// simply infer it when parsing and do not need to print it.
const MachineRegisterInfo *MRI = nullptr;
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
if (const MachineFunction *MF = getMFIfAvailable(*this)) {
MRI = &MF->getRegInfo();
}
@ -783,7 +783,7 @@ void MachineOperand::print(raw_ostream &OS, ModuleSlotTracker &MST,
OS << ".subreg" << SubReg;
}
// Print the register class / bank.
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
if (const MachineFunction *MF = getMFIfAvailable(*this)) {
const MachineRegisterInfo &MRI = MF->getRegInfo();
if (IsStandalone || !PrintDef || MRI.def_empty(Reg)) {

View File

@ -1515,7 +1515,7 @@ static void computeLiveOuts(MachineFunction &MF, RegPressureTracker &RPTracker,
for (const MachineOperand &MO : MI->operands())
if (MO.isReg() && MO.isUse()) {
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg))
if (Register::isVirtualRegister(Reg))
Uses.insert(Reg);
else if (MRI.isAllocatable(Reg))
for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units)
@ -1526,7 +1526,7 @@ static void computeLiveOuts(MachineFunction &MF, RegPressureTracker &RPTracker,
for (const MachineOperand &MO : SU->getInstr()->operands())
if (MO.isReg() && MO.isDef() && !MO.isDead()) {
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
if (!Uses.count(Reg))
LiveOutRegs.push_back(RegisterMaskPair(Reg,
LaneBitmask::getNone()));
@ -2553,7 +2553,7 @@ void SwingSchedulerDAG::generatePhis(
for (unsigned i = 0, e = BBI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = BBI->getOperand(i);
if (!MO.isReg() || !MO.isDef() ||
!TargetRegisterInfo::isVirtualRegister(MO.getReg()))
!Register::isVirtualRegister(MO.getReg()))
continue;
int StageScheduled = Schedule.stageScheduled(getSUnit(&*BBI));
@ -2658,7 +2658,7 @@ void SwingSchedulerDAG::removeDeadInstructions(MachineBasicBlock *KernelBB,
continue;
unsigned reg = MOI->getReg();
// Assume physical registers are used, unless they are marked dead.
if (TargetRegisterInfo::isPhysicalRegister(reg)) {
if (Register::isPhysicalRegister(reg)) {
used = !MOI->isDead();
if (used)
break;
@ -2813,7 +2813,7 @@ void SwingSchedulerDAG::addBranches(MachineBasicBlock &PreheaderBB,
LCMin = LC;
unsigned numAdded = 0;
if (TargetRegisterInfo::isVirtualRegister(LC)) {
if (Register::isVirtualRegister(LC)) {
Prolog->addSuccessor(Epilog);
numAdded = TII->insertBranch(*Prolog, Epilog, LastPro, Cond, DebugLoc());
} else if (j >= LCMin) {
@ -2962,7 +2962,7 @@ void SwingSchedulerDAG::updateInstruction(MachineInstr *NewMI, bool LastDef,
ValueMapTy *VRMap) {
for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = NewMI->getOperand(i);
if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
if (!MO.isReg() || !Register::isVirtualRegister(MO.getReg()))
continue;
unsigned reg = MO.getReg();
if (MO.isDef()) {
@ -3499,7 +3499,7 @@ void SMSchedule::orderDependence(SwingSchedulerDAG *SSD, SUnit *SU,
++I, ++Pos) {
for (unsigned i = 0, e = MI->getNumOperands(); i < e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
if (!MO.isReg() || !Register::isVirtualRegister(MO.getReg()))
continue;
unsigned Reg = MO.getReg();
@ -3676,7 +3676,7 @@ bool SMSchedule::isValidSchedule(SwingSchedulerDAG *SSD) {
assert(StageDef != -1 && "Instruction should have been scheduled.");
for (auto &SI : SU.Succs)
if (SI.isAssignedRegDep())
if (ST.getRegisterInfo()->isPhysicalRegister(SI.getReg()))
if (Register::isPhysicalRegister(SI.getReg()))
if (stageScheduled(SI.getSUnit()) != StageDef)
return false;
}

View File

@ -144,7 +144,7 @@ MachineRegisterInfo::recomputeRegClass(unsigned Reg) {
}
unsigned MachineRegisterInfo::createIncompleteVirtualRegister(StringRef Name) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(getNumVirtRegs());
unsigned Reg = Register::index2VirtReg(getNumVirtRegs());
VRegInfo.grow(Reg);
RegAllocHints.grow(Reg);
insertVRegByName(Name, Reg);
@ -202,7 +202,7 @@ void MachineRegisterInfo::clearVirtRegTypes() { VRegToType.clear(); }
void MachineRegisterInfo::clearVirtRegs() {
#ifndef NDEBUG
for (unsigned i = 0, e = getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
unsigned Reg = Register::index2VirtReg(i);
if (!VRegInfo[Reg].second)
continue;
verifyUseList(Reg);
@ -255,7 +255,7 @@ void MachineRegisterInfo::verifyUseList(unsigned Reg) const {
void MachineRegisterInfo::verifyUseLists() const {
#ifndef NDEBUG
for (unsigned i = 0, e = getNumVirtRegs(); i != e; ++i)
verifyUseList(TargetRegisterInfo::index2VirtReg(i));
verifyUseList(Register::index2VirtReg(i));
for (unsigned i = 1, e = getTargetRegisterInfo()->getNumRegs(); i != e; ++i)
verifyUseList(i);
#endif
@ -386,7 +386,7 @@ void MachineRegisterInfo::replaceRegWith(unsigned FromReg, unsigned ToReg) {
for (reg_iterator I = reg_begin(FromReg), E = reg_end(); I != E; ) {
MachineOperand &O = *I;
++I;
if (TargetRegisterInfo::isPhysicalRegister(ToReg)) {
if (Register::isPhysicalRegister(ToReg)) {
O.substPhysReg(ToReg, *TRI);
} else {
O.setReg(ToReg);
@ -498,7 +498,7 @@ MachineRegisterInfo::EmitLiveInCopies(MachineBasicBlock *EntryMBB,
LaneBitmask MachineRegisterInfo::getMaxLaneMaskForVReg(unsigned Reg) const {
// Lane masks are only defined for vregs.
assert(TargetRegisterInfo::isVirtualRegister(Reg));
assert(Register::isVirtualRegister(Reg));
const TargetRegisterClass &TRC = *getRegClass(Reg);
return TRC.getLaneMask();
}
@ -517,7 +517,7 @@ void MachineRegisterInfo::freezeReservedRegs(const MachineFunction &MF) {
}
bool MachineRegisterInfo::isConstantPhysReg(unsigned PhysReg) const {
assert(TargetRegisterInfo::isPhysicalRegister(PhysReg));
assert(Register::isPhysicalRegister(PhysReg));
const TargetRegisterInfo *TRI = getTargetRegisterInfo();
if (TRI->isConstantPhysReg(PhysReg))

View File

@ -934,7 +934,7 @@ void ScheduleDAGMILive::collectVRegUses(SUnit &SU) {
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
if (!Register::isVirtualRegister(Reg))
continue;
// Ignore re-defs.
@ -1095,7 +1095,7 @@ void ScheduleDAGMILive::updatePressureDiffs(
for (const RegisterMaskPair &P : LiveUses) {
unsigned Reg = P.RegUnit;
/// FIXME: Currently assuming single-use physregs.
if (!TRI->isVirtualRegister(Reg))
if (!Register::isVirtualRegister(Reg))
continue;
if (ShouldTrackLaneMasks) {
@ -1319,8 +1319,8 @@ unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
// Visit each live out vreg def to find def/use pairs that cross iterations.
for (const RegisterMaskPair &P : RPTracker.getPressure().LiveOutRegs) {
unsigned Reg = P.RegUnit;
if (!TRI->isVirtualRegister(Reg))
continue;
if (!Register::isVirtualRegister(Reg))
continue;
const LiveInterval &LI = LIS->getInterval(Reg);
const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
if (!DefVNI)
@ -1688,12 +1688,12 @@ void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
// Check for pure vreg copies.
const MachineOperand &SrcOp = Copy->getOperand(1);
unsigned SrcReg = SrcOp.getReg();
if (!TargetRegisterInfo::isVirtualRegister(SrcReg) || !SrcOp.readsReg())
if (!Register::isVirtualRegister(SrcReg) || !SrcOp.readsReg())
return;
const MachineOperand &DstOp = Copy->getOperand(0);
unsigned DstReg = DstOp.getReg();
if (!TargetRegisterInfo::isVirtualRegister(DstReg) || DstOp.isDead())
if (!Register::isVirtualRegister(DstReg) || DstOp.isDead())
return;
// Check if either the dest or source is local. If it's live across a back
@ -2914,14 +2914,12 @@ int biasPhysReg(const SUnit *SU, bool isTop) {
unsigned UnscheduledOper = isTop ? 0 : 1;
// If we have already scheduled the physreg produce/consumer, immediately
// schedule the copy.
if (TargetRegisterInfo::isPhysicalRegister(
MI->getOperand(ScheduledOper).getReg()))
if (Register::isPhysicalRegister(MI->getOperand(ScheduledOper).getReg()))
return 1;
// If the physreg is at the boundary, defer it. Otherwise schedule it
// immediately to free the dependent. We can hoist the copy later.
bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
if (TargetRegisterInfo::isPhysicalRegister(
MI->getOperand(UnscheduledOper).getReg()))
if (Register::isPhysicalRegister(MI->getOperand(UnscheduledOper).getReg()))
return AtBoundary ? -1 : 1;
}
@ -2931,7 +2929,7 @@ int biasPhysReg(const SUnit *SU, bool isTop) {
// physical registers.
bool DoBias = true;
for (const MachineOperand &Op : MI->defs()) {
if (Op.isReg() && !TargetRegisterInfo::isPhysicalRegister(Op.getReg())) {
if (Op.isReg() && !Register::isPhysicalRegister(Op.getReg())) {
DoBias = false;
break;
}
@ -3259,7 +3257,8 @@ void GenericScheduler::reschedulePhysReg(SUnit *SU, bool isTop) {
// Find already scheduled copies with a single physreg dependence and move
// them just above the scheduled instruction.
for (SDep &Dep : Deps) {
if (Dep.getKind() != SDep::Data || !TRI->isPhysicalRegister(Dep.getReg()))
if (Dep.getKind() != SDep::Data ||
!Register::isPhysicalRegister(Dep.getReg()))
continue;
SUnit *DepSU = Dep.getSUnit();
if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)

View File

@ -197,9 +197,8 @@ bool MachineSinking::PerformTrivialForwardCoalescing(MachineInstr &MI,
unsigned SrcReg = MI.getOperand(1).getReg();
unsigned DstReg = MI.getOperand(0).getReg();
if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
!TargetRegisterInfo::isVirtualRegister(DstReg) ||
!MRI->hasOneNonDBGUse(SrcReg))
if (!Register::isVirtualRegister(SrcReg) ||
!Register::isVirtualRegister(DstReg) || !MRI->hasOneNonDBGUse(SrcReg))
return false;
const TargetRegisterClass *SRC = MRI->getRegClass(SrcReg);
@ -233,8 +232,7 @@ MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
MachineBasicBlock *DefMBB,
bool &BreakPHIEdge,
bool &LocalUse) const {
assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
"Only makes sense for vregs");
assert(Register::isVirtualRegister(Reg) && "Only makes sense for vregs");
// Ignore debug uses because debug info doesn't affect the code.
if (MRI->use_nodbg_empty(Reg))
@ -422,7 +420,7 @@ bool MachineSinking::isWorthBreakingCriticalEdge(MachineInstr &MI,
// We don't move live definitions of physical registers,
// so sinking their uses won't enable any opportunities.
if (TargetRegisterInfo::isPhysicalRegister(Reg))
if (Register::isPhysicalRegister(Reg))
continue;
// If this instruction is the only user of a virtual register,
@ -618,7 +616,7 @@ MachineSinking::FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB,
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (Register::isPhysicalRegister(Reg)) {
if (MO.isUse()) {
// If the physreg has no defs anywhere, it's just an ambient register
// and we can freely move its uses. Alternatively, if it's allocatable,
@ -818,7 +816,8 @@ bool MachineSinking::SinkInstruction(MachineInstr &MI, bool &SawStore,
const MachineOperand &MO = MI.getOperand(I);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0 || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
if (Reg == 0 || !Register::isPhysicalRegister(Reg))
continue;
if (SuccToSinkTo->isLiveIn(Reg))
return false;
}
@ -1130,7 +1129,7 @@ bool PostRAMachineSinking::tryToSinkCopy(MachineBasicBlock &CurBB,
// for DBG_VALUEs later, record them when they're encountered.
if (MI->isDebugValue()) {
auto &MO = MI->getOperand(0);
if (MO.isReg() && TRI->isPhysicalRegister(MO.getReg())) {
if (MO.isReg() && Register::isPhysicalRegister(MO.getReg())) {
// Bail if we can already tell the sink would be rejected, rather
// than needlessly accumulating lots of DBG_VALUEs.
if (hasRegisterDependency(MI, UsedOpsInCopy, DefedRegsInCopy,

View File

@ -634,7 +634,7 @@ struct DataDep {
/// Create a DataDep from an SSA form virtual register.
DataDep(const MachineRegisterInfo *MRI, unsigned VirtReg, unsigned UseOp)
: UseOp(UseOp) {
assert(TargetRegisterInfo::isVirtualRegister(VirtReg));
assert(Register::isVirtualRegister(VirtReg));
MachineRegisterInfo::def_iterator DefI = MRI->def_begin(VirtReg);
assert(!DefI.atEnd() && "Register has no defs");
DefMI = DefI->getParent();
@ -663,7 +663,7 @@ static bool getDataDeps(const MachineInstr &UseMI,
unsigned Reg = MO.getReg();
if (!Reg)
continue;
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (Register::isPhysicalRegister(Reg)) {
HasPhysRegs = true;
continue;
}
@ -709,7 +709,7 @@ static void updatePhysDepsDownwards(const MachineInstr *UseMI,
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isPhysicalRegister(Reg))
if (!Register::isPhysicalRegister(Reg))
continue;
// Track live defs and kills for updating RegUnits.
if (MO.isDef()) {
@ -765,7 +765,7 @@ computeCrossBlockCriticalPath(const TraceBlockInfo &TBI) {
assert(TBI.HasValidInstrHeights && "Missing height info");
unsigned MaxLen = 0;
for (const LiveInReg &LIR : TBI.LiveIns) {
if (!TargetRegisterInfo::isVirtualRegister(LIR.Reg))
if (!Register::isVirtualRegister(LIR.Reg))
continue;
const MachineInstr *DefMI = MTM.MRI->getVRegDef(LIR.Reg);
// Ignore dependencies outside the current trace.
@ -903,7 +903,7 @@ static unsigned updatePhysDepsUpwards(const MachineInstr &MI, unsigned Height,
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isPhysicalRegister(Reg))
if (!Register::isPhysicalRegister(Reg))
continue;
if (MO.readsReg())
ReadOps.push_back(MI.getOperandNo(MOI));
@ -979,7 +979,7 @@ addLiveIns(const MachineInstr *DefMI, unsigned DefOp,
ArrayRef<const MachineBasicBlock*> Trace) {
assert(!Trace.empty() && "Trace should contain at least one block");
unsigned Reg = DefMI->getOperand(DefOp).getReg();
assert(TargetRegisterInfo::isVirtualRegister(Reg));
assert(Register::isVirtualRegister(Reg));
const MachineBasicBlock *DefMBB = DefMI->getParent();
// Reg is live-in to all blocks in Trace that follow DefMBB.
@ -1026,7 +1026,7 @@ computeInstrHeights(const MachineBasicBlock *MBB) {
if (MBB) {
TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
for (LiveInReg &LI : TBI.LiveIns) {
if (TargetRegisterInfo::isVirtualRegister(LI.Reg)) {
if (Register::isVirtualRegister(LI.Reg)) {
// For virtual registers, the def latency is included.
unsigned &Height = Heights[MTM.MRI->getVRegDef(LI.Reg)];
if (Height < LI.Height)

View File

@ -122,7 +122,7 @@ namespace {
// Add Reg and any sub-registers to RV
void addRegWithSubRegs(RegVector &RV, unsigned Reg) {
RV.push_back(Reg);
if (TargetRegisterInfo::isPhysicalRegister(Reg))
if (Register::isPhysicalRegister(Reg))
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
RV.push_back(*SubRegs);
}
@ -159,7 +159,7 @@ namespace {
// Add register to vregsPassed if it belongs there. Return true if
// anything changed.
bool addPassed(unsigned Reg) {
if (!TargetRegisterInfo::isVirtualRegister(Reg))
if (!Register::isVirtualRegister(Reg))
return false;
if (regsKilled.count(Reg) || regsLiveOut.count(Reg))
return false;
@ -178,7 +178,7 @@ namespace {
// Add register to vregsRequired if it belongs there. Return true if
// anything changed.
bool addRequired(unsigned Reg) {
if (!TargetRegisterInfo::isVirtualRegister(Reg))
if (!Register::isVirtualRegister(Reg))
return false;
if (regsLiveOut.count(Reg))
return false;
@ -552,7 +552,7 @@ void MachineVerifier::report_context_vreg(unsigned VReg) const {
}
void MachineVerifier::report_context_vreg_regunit(unsigned VRegOrUnit) const {
if (TargetRegisterInfo::isVirtualRegister(VRegOrUnit)) {
if (Register::isVirtualRegister(VRegOrUnit)) {
report_context_vreg(VRegOrUnit);
} else {
errs() << "- regunit: " << printRegUnit(VRegOrUnit, TRI) << '\n';
@ -797,7 +797,7 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
regsLive.clear();
if (MRI->tracksLiveness()) {
for (const auto &LI : MBB->liveins()) {
if (!TargetRegisterInfo::isPhysicalRegister(LI.PhysReg)) {
if (!Register::isPhysicalRegister(LI.PhysReg)) {
report("MBB live-in list contains non-physical register", MBB);
continue;
}
@ -957,7 +957,7 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
// Generic opcodes must not have physical register operands.
for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
const MachineOperand *MO = &MI->getOperand(I);
if (MO->isReg() && TargetRegisterInfo::isPhysicalRegister(MO->getReg()))
if (MO->isReg() && Register::isPhysicalRegister(MO->getReg()))
report("Generic instruction cannot have physical register", MO, I);
}
@ -1525,11 +1525,11 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
report("Operand should be tied", MO, MONum);
else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
report("Tied def doesn't match MCInstrDesc", MO, MONum);
else if (TargetRegisterInfo::isPhysicalRegister(MO->getReg())) {
else if (Register::isPhysicalRegister(MO->getReg())) {
const MachineOperand &MOTied = MI->getOperand(TiedTo);
if (!MOTied.isReg())
report("Tied counterpart must be a register", &MOTied, TiedTo);
else if (TargetRegisterInfo::isPhysicalRegister(MOTied.getReg()) &&
else if (Register::isPhysicalRegister(MOTied.getReg()) &&
MO->getReg() != MOTied.getReg())
report("Tied physical registers must match.", &MOTied, TiedTo);
}
@ -1581,7 +1581,7 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
// Check register classes.
unsigned SubIdx = MO->getSubReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (Register::isPhysicalRegister(Reg)) {
if (SubIdx) {
report("Illegal subregister index for physical register", MO, MONum);
return;
@ -1817,7 +1817,7 @@ void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
if (MO->isDead()) {
LiveQueryResult LRQ = LR.Query(DefIdx);
if (!LRQ.isDeadDef()) {
assert(TargetRegisterInfo::isVirtualRegister(VRegOrUnit) &&
assert(Register::isVirtualRegister(VRegOrUnit) &&
"Expecting a virtual register.");
// A dead subreg def only tells us that the specific subreg is dead. There
// could be other non-dead defs of other subregs, or we could have other
@ -1845,8 +1845,7 @@ void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
addRegWithSubRegs(regsKilled, Reg);
// Check that LiveVars knows this kill.
if (LiveVars && TargetRegisterInfo::isVirtualRegister(Reg) &&
MO->isKill()) {
if (LiveVars && Register::isVirtualRegister(Reg) && MO->isKill()) {
LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
if (!is_contained(VI.Kills, MI))
report("Kill missing from LiveVariables", MO, MONum);
@ -1856,7 +1855,7 @@ void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
SlotIndex UseIdx = LiveInts->getInstructionIndex(*MI);
// Check the cached regunit intervals.
if (TargetRegisterInfo::isPhysicalRegister(Reg) && !isReserved(Reg)) {
if (Register::isPhysicalRegister(Reg) && !isReserved(Reg)) {
for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) {
if (MRI->isReservedRegUnit(*Units))
continue;
@ -1865,7 +1864,7 @@ void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
}
}
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
if (LiveInts->hasInterval(Reg)) {
// This is a virtual register interval.
const LiveInterval &LI = LiveInts->getInterval(Reg);
@ -1900,7 +1899,7 @@ void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
// Use of a dead register.
if (!regsLive.count(Reg)) {
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (Register::isPhysicalRegister(Reg)) {
// Reserved registers may be used even when 'dead'.
bool Bad = !isReserved(Reg);
// We are fine if just any subregister has a defined value.
@ -1922,7 +1921,7 @@ void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
if (!MOP.isReg() || !MOP.isImplicit())
continue;
if (!TargetRegisterInfo::isPhysicalRegister(MOP.getReg()))
if (!Register::isPhysicalRegister(MOP.getReg()))
continue;
for (MCSubRegIterator SubRegs(MOP.getReg(), TRI); SubRegs.isValid();
@ -1960,7 +1959,7 @@ void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
addRegWithSubRegs(regsDefined, Reg);
// Verify SSA form.
if (MRI->isSSA() && TargetRegisterInfo::isVirtualRegister(Reg) &&
if (MRI->isSSA() && Register::isVirtualRegister(Reg) &&
std::next(MRI->def_begin(Reg)) != MRI->def_end())
report("Multiple virtual register defs in SSA form", MO, MONum);
@ -1969,7 +1968,7 @@ void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
if (LiveInts->hasInterval(Reg)) {
const LiveInterval &LI = LiveInts->getInterval(Reg);
checkLivenessAtDef(MO, MONum, DefIdx, LI, Reg);
@ -2007,7 +2006,7 @@ void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
while (!regMasks.empty()) {
const uint32_t *Mask = regMasks.pop_back_val();
for (RegSet::iterator I = regsLive.begin(), E = regsLive.end(); I != E; ++I)
if (TargetRegisterInfo::isPhysicalRegister(*I) &&
if (Register::isPhysicalRegister(*I) &&
MachineOperand::clobbersPhysReg(Mask, *I))
regsDead.push_back(*I);
}
@ -2120,7 +2119,7 @@ void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
MODef.isEarlyClobber() || MODef.isDebug())
report("Unexpected flag on PHI operand", &MODef, 0);
unsigned DefReg = MODef.getReg();
if (!TargetRegisterInfo::isVirtualRegister(DefReg))
if (!Register::isVirtualRegister(DefReg))
report("Expected first PHI operand to be a virtual register", &MODef, 0);
for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
@ -2212,7 +2211,7 @@ void MachineVerifier::visitMachineFunctionAfter() {
void MachineVerifier::verifyLiveVariables() {
assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
unsigned Reg = Register::index2VirtReg(i);
LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
for (const auto &MBB : *MF) {
BBInfo &MInfo = MBBInfoMap[&MBB];
@ -2238,7 +2237,7 @@ void MachineVerifier::verifyLiveVariables() {
void MachineVerifier::verifyLiveIntervals() {
assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
unsigned Reg = Register::index2VirtReg(i);
// Spilling and splitting may leave unused registers around. Skip them.
if (MRI->reg_nodbg_empty(Reg))
@ -2315,11 +2314,11 @@ void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
if (!MOI->isReg() || !MOI->isDef())
continue;
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
if (MOI->getReg() != Reg)
continue;
} else {
if (!TargetRegisterInfo::isPhysicalRegister(MOI->getReg()) ||
if (!Register::isPhysicalRegister(MOI->getReg()) ||
!TRI->hasRegUnit(MOI->getReg(), Reg))
continue;
}
@ -2402,7 +2401,7 @@ void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
return;
// RegUnit intervals are allowed dead phis.
if (!TargetRegisterInfo::isVirtualRegister(Reg) && VNI->isPHIDef() &&
if (!Register::isVirtualRegister(Reg) && VNI->isPHIDef() &&
S.start == VNI->def && S.end == VNI->def.getDeadSlot())
return;
@ -2446,7 +2445,7 @@ void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
// The following checks only apply to virtual registers. Physreg liveness
// is too weird to check.
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
// A live segment can end with either a redefinition, a kill flag on a
// use, or a dead flag on a def.
bool hasRead = false;
@ -2519,8 +2518,7 @@ void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
while (true) {
assert(LiveInts->isLiveInToMBB(LR, &*MFI));
// We don't know how to track physregs into a landing pad.
if (!TargetRegisterInfo::isVirtualRegister(Reg) &&
MFI->isEHPad()) {
if (!Register::isVirtualRegister(Reg) && MFI->isEHPad()) {
if (&*MFI == EndMBB)
break;
++MFI;
@ -2580,7 +2578,7 @@ void MachineVerifier::verifyLiveRange(const LiveRange &LR, unsigned Reg,
void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
unsigned Reg = LI.reg;
assert(TargetRegisterInfo::isVirtualRegister(Reg));
assert(Register::isVirtualRegister(Reg));
verifyLiveRange(LI, Reg);
LaneBitmask Mask;

View File

@ -115,10 +115,9 @@ bool OptimizePHIs::IsSingleValuePHICycle(MachineInstr *MI,
MachineInstr *SrcMI = MRI->getVRegDef(SrcReg);
// Skip over register-to-register moves.
if (SrcMI && SrcMI->isCopy() &&
!SrcMI->getOperand(0).getSubReg() &&
if (SrcMI && SrcMI->isCopy() && !SrcMI->getOperand(0).getSubReg() &&
!SrcMI->getOperand(1).getSubReg() &&
TargetRegisterInfo::isVirtualRegister(SrcMI->getOperand(1).getReg())) {
Register::isVirtualRegister(SrcMI->getOperand(1).getReg())) {
SrcReg = SrcMI->getOperand(1).getReg();
SrcMI = MRI->getVRegDef(SrcReg);
}
@ -143,7 +142,7 @@ bool OptimizePHIs::IsSingleValuePHICycle(MachineInstr *MI,
bool OptimizePHIs::IsDeadPHICycle(MachineInstr *MI, InstrSet &PHIsInCycle) {
assert(MI->isPHI() && "IsDeadPHICycle expects a PHI instruction");
unsigned DstReg = MI->getOperand(0).getReg();
assert(TargetRegisterInfo::isVirtualRegister(DstReg) &&
assert(Register::isVirtualRegister(DstReg) &&
"PHI destination is not a virtual register");
// See if we already saw this register.

View File

@ -372,7 +372,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
unsigned SrcSubReg = MPhi->getOperand(i*2+1).getSubReg();
bool SrcUndef = MPhi->getOperand(i*2+1).isUndef() ||
isImplicitlyDefined(SrcReg, *MRI);
assert(TargetRegisterInfo::isVirtualRegister(SrcReg) &&
assert(Register::isVirtualRegister(SrcReg) &&
"Machine PHI Operands must all be virtual registers!");
// Get the MachineBasicBlock equivalent of the BasicBlock that is the source

View File

@ -418,7 +418,7 @@ namespace {
const MachineRegisterInfo &MRI,
const TargetInstrInfo *TII = nullptr)
: DefSubReg(DefSubReg), Reg(Reg), MRI(MRI), TII(TII) {
if (!TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (!Register::isPhysicalRegister(Reg)) {
Def = MRI.getVRegDef(Reg);
DefIdx = MRI.def_begin(Reg).getOperandNo();
}
@ -460,8 +460,8 @@ optimizeExtInstr(MachineInstr &MI, MachineBasicBlock &MBB,
if (!TII->isCoalescableExtInstr(MI, SrcReg, DstReg, SubIdx))
return false;
if (TargetRegisterInfo::isPhysicalRegister(DstReg) ||
TargetRegisterInfo::isPhysicalRegister(SrcReg))
if (Register::isPhysicalRegister(DstReg) ||
Register::isPhysicalRegister(SrcReg))
return false;
if (MRI->hasOneNonDBGUse(SrcReg))
@ -609,8 +609,8 @@ bool PeepholeOptimizer::optimizeCmpInstr(MachineInstr &MI) {
unsigned SrcReg, SrcReg2;
int CmpMask, CmpValue;
if (!TII->analyzeCompare(MI, SrcReg, SrcReg2, CmpMask, CmpValue) ||
TargetRegisterInfo::isPhysicalRegister(SrcReg) ||
(SrcReg2 != 0 && TargetRegisterInfo::isPhysicalRegister(SrcReg2)))
Register::isPhysicalRegister(SrcReg) ||
(SrcReg2 != 0 && Register::isPhysicalRegister(SrcReg2)))
return false;
// Attempt to optimize the comparison instruction.
@ -663,7 +663,7 @@ bool PeepholeOptimizer::findNextSource(RegSubRegPair RegSubReg,
// Thus, instead of maintaining untested code, we will revisit that if
// that changes at some point.
unsigned Reg = RegSubReg.Reg;
if (TargetRegisterInfo::isPhysicalRegister(Reg))
if (Register::isPhysicalRegister(Reg))
return false;
const TargetRegisterClass *DefRC = MRI->getRegClass(Reg);
@ -675,7 +675,7 @@ bool PeepholeOptimizer::findNextSource(RegSubRegPair RegSubReg,
do {
CurSrcPair = SrcToLook.pop_back_val();
// As explained above, do not handle physical registers
if (TargetRegisterInfo::isPhysicalRegister(CurSrcPair.Reg))
if (Register::isPhysicalRegister(CurSrcPair.Reg))
return false;
ValueTracker ValTracker(CurSrcPair.Reg, CurSrcPair.SubReg, *MRI, TII);
@ -723,7 +723,7 @@ bool PeepholeOptimizer::findNextSource(RegSubRegPair RegSubReg,
// constraints to the register allocator. Moreover, if we want to extend
// the live-range of a physical register, unlike SSA virtual register,
// we will have to check that they aren't redefine before the related use.
if (TargetRegisterInfo::isPhysicalRegister(CurSrcPair.Reg))
if (Register::isPhysicalRegister(CurSrcPair.Reg))
return false;
// Keep following the chain if the value isn't any better yet.
@ -1170,7 +1170,7 @@ bool PeepholeOptimizer::optimizeCoalescableCopy(MachineInstr &MI) {
"Coalescer can understand multiple defs?!");
const MachineOperand &MODef = MI.getOperand(0);
// Do not rewrite physical definitions.
if (TargetRegisterInfo::isPhysicalRegister(MODef.getReg()))
if (Register::isPhysicalRegister(MODef.getReg()))
return false;
bool Changed = false;
@ -1221,7 +1221,7 @@ bool PeepholeOptimizer::optimizeCoalescableCopy(MachineInstr &MI) {
MachineInstr &
PeepholeOptimizer::rewriteSource(MachineInstr &CopyLike,
RegSubRegPair Def, RewriteMapTy &RewriteMap) {
assert(!TargetRegisterInfo::isPhysicalRegister(Def.Reg) &&
assert(!Register::isPhysicalRegister(Def.Reg) &&
"We do not rewrite physical registers");
// Find the new source to use in the COPY rewrite.
@ -1280,7 +1280,7 @@ bool PeepholeOptimizer::optimizeUncoalescableCopy(
while (CpyRewriter.getNextRewritableSource(Src, Def)) {
// If a physical register is here, this is probably for a good reason.
// Do not rewrite that.
if (TargetRegisterInfo::isPhysicalRegister(Def.Reg))
if (Register::isPhysicalRegister(Def.Reg))
return false;
// If we do not know how to rewrite this definition, there is no point
@ -1319,8 +1319,7 @@ bool PeepholeOptimizer::isLoadFoldable(
// To reduce compilation time, we check MRI->hasOneNonDBGUser when inserting
// loads. It should be checked when processing uses of the load, since
// uses can be removed during peephole.
if (!MI.getOperand(0).getSubReg() &&
TargetRegisterInfo::isVirtualRegister(Reg) &&
if (!MI.getOperand(0).getSubReg() && Register::isVirtualRegister(Reg) &&
MRI->hasOneNonDBGUser(Reg)) {
FoldAsLoadDefCandidates.insert(Reg);
return true;
@ -1337,7 +1336,7 @@ bool PeepholeOptimizer::isMoveImmediate(
if (MCID.getNumDefs() != 1)
return false;
unsigned Reg = MI.getOperand(0).getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
ImmDefMIs.insert(std::make_pair(Reg, &MI));
ImmDefRegs.insert(Reg);
return true;
@ -1360,7 +1359,7 @@ bool PeepholeOptimizer::foldImmediate(MachineInstr &MI,
if (MO.isImplicit() && MO.isDead())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
if (!Register::isVirtualRegister(Reg))
continue;
if (ImmDefRegs.count(Reg) == 0)
continue;
@ -1394,11 +1393,11 @@ bool PeepholeOptimizer::foldRedundantCopy(MachineInstr &MI,
assert(MI.isCopy() && "expected a COPY machine instruction");
unsigned SrcReg = MI.getOperand(1).getReg();
if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
if (!Register::isVirtualRegister(SrcReg))
return false;
unsigned DstReg = MI.getOperand(0).getReg();
if (!TargetRegisterInfo::isVirtualRegister(DstReg))
if (!Register::isVirtualRegister(DstReg))
return false;
if (CopySrcRegs.insert(SrcReg).second) {
@ -1433,8 +1432,7 @@ bool PeepholeOptimizer::foldRedundantCopy(MachineInstr &MI,
}
bool PeepholeOptimizer::isNAPhysCopy(unsigned Reg) {
return TargetRegisterInfo::isPhysicalRegister(Reg) &&
!MRI->isAllocatable(Reg);
return Register::isPhysicalRegister(Reg) && !MRI->isAllocatable(Reg);
}
bool PeepholeOptimizer::foldRedundantNAPhysCopy(
@ -1446,7 +1444,7 @@ bool PeepholeOptimizer::foldRedundantNAPhysCopy(
unsigned DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = MI.getOperand(1).getReg();
if (isNAPhysCopy(SrcReg) && TargetRegisterInfo::isVirtualRegister(DstReg)) {
if (isNAPhysCopy(SrcReg) && Register::isVirtualRegister(DstReg)) {
// %vreg = COPY %physreg
// Avoid using a datastructure which can track multiple live non-allocatable
// phys->virt copies since LLVM doesn't seem to do this.
@ -1454,7 +1452,7 @@ bool PeepholeOptimizer::foldRedundantNAPhysCopy(
return false;
}
if (!(TargetRegisterInfo::isVirtualRegister(SrcReg) && isNAPhysCopy(DstReg)))
if (!(Register::isVirtualRegister(SrcReg) && isNAPhysCopy(DstReg)))
return false;
// %physreg = COPY %vreg
@ -1489,7 +1487,7 @@ bool PeepholeOptimizer::foldRedundantNAPhysCopy(
static bool isVirtualRegisterOperand(MachineOperand &MO) {
if (!MO.isReg())
return false;
return TargetRegisterInfo::isVirtualRegister(MO.getReg());
return Register::isVirtualRegister(MO.getReg());
}
bool PeepholeOptimizer::findTargetRecurrence(
@ -2087,7 +2085,7 @@ ValueTrackerResult ValueTracker::getNextSource() {
// If we can still move up in the use-def chain, move to the next
// definition.
if (!TargetRegisterInfo::isPhysicalRegister(Reg) && OneRegSrc) {
if (!Register::isPhysicalRegister(Reg) && OneRegSrc) {
MachineRegisterInfo::def_iterator DI = MRI.def_begin(Reg);
if (DI != MRI.def_end()) {
Def = DI->getParent();

View File

@ -75,7 +75,7 @@ void ProcessImplicitDefs::processImplicitDef(MachineInstr *MI) {
LLVM_DEBUG(dbgs() << "Processing " << *MI);
unsigned Reg = MI->getOperand(0).getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
// For virtual registers, mark all uses as <undef>, and convert users to
// implicit-def when possible.
for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
@ -101,7 +101,7 @@ void ProcessImplicitDefs::processImplicitDef(MachineInstr *MI) {
if (!MO.isReg())
continue;
unsigned UserReg = MO.getReg();
if (!TargetRegisterInfo::isPhysicalRegister(UserReg) ||
if (!Register::isPhysicalRegister(UserReg) ||
!TRI->regsOverlap(Reg, UserReg))
continue;
// UserMI uses or redefines Reg. Set <undef> flags on all uses.

View File

@ -73,7 +73,7 @@ void RegAllocBase::seedLiveRegs() {
NamedRegionTimer T("seed", "Seed Live Regs", TimerGroupName,
TimerGroupDescription, TimePassesIsEnabled);
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
unsigned Reg = Register::index2VirtReg(i);
if (MRI->reg_nodbg_empty(Reg))
continue;
enqueue(&LIS->getInterval(Reg));
@ -154,7 +154,7 @@ void RegAllocBase::allocatePhysRegs() {
continue;
}
LLVM_DEBUG(dbgs() << "queuing new interval: " << *SplitVirtReg << "\n");
assert(TargetRegisterInfo::isVirtualRegister(SplitVirtReg->reg) &&
assert(Register::isVirtualRegister(SplitVirtReg->reg) &&
"expect split value in virtual register");
enqueue(SplitVirtReg);
++NumNewQueued;

View File

@ -90,7 +90,7 @@ namespace {
explicit LiveReg(unsigned VirtReg) : VirtReg(VirtReg) {}
unsigned getSparseSetIndex() const {
return TargetRegisterInfo::virtReg2Index(VirtReg);
return Register::virtReg2Index(VirtReg);
}
};
@ -200,11 +200,11 @@ namespace {
void assignVirtToPhysReg(LiveReg &, MCPhysReg PhysReg);
LiveRegMap::iterator findLiveVirtReg(unsigned VirtReg) {
return LiveVirtRegs.find(TargetRegisterInfo::virtReg2Index(VirtReg));
return LiveVirtRegs.find(Register::virtReg2Index(VirtReg));
}
LiveRegMap::const_iterator findLiveVirtReg(unsigned VirtReg) const {
return LiveVirtRegs.find(TargetRegisterInfo::virtReg2Index(VirtReg));
return LiveVirtRegs.find(Register::virtReg2Index(VirtReg));
}
void allocVirtReg(MachineInstr &MI, LiveReg &LR, unsigned Hint);
@ -264,7 +264,7 @@ int RegAllocFast::getStackSpaceFor(unsigned VirtReg) {
/// Returns false if \p VirtReg is known to not live out of the current block.
bool RegAllocFast::mayLiveOut(unsigned VirtReg) {
if (MayLiveAcrossBlocks.test(TargetRegisterInfo::virtReg2Index(VirtReg))) {
if (MayLiveAcrossBlocks.test(Register::virtReg2Index(VirtReg))) {
// Cannot be live-out if there are no successors.
return !MBB->succ_empty();
}
@ -272,7 +272,7 @@ bool RegAllocFast::mayLiveOut(unsigned VirtReg) {
// If this block loops back to itself, it would be necessary to check whether
// the use comes after the def.
if (MBB->isSuccessor(MBB)) {
MayLiveAcrossBlocks.set(TargetRegisterInfo::virtReg2Index(VirtReg));
MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg));
return true;
}
@ -282,7 +282,7 @@ bool RegAllocFast::mayLiveOut(unsigned VirtReg) {
unsigned C = 0;
for (const MachineInstr &UseInst : MRI->reg_nodbg_instructions(VirtReg)) {
if (UseInst.getParent() != MBB || ++C >= Limit) {
MayLiveAcrossBlocks.set(TargetRegisterInfo::virtReg2Index(VirtReg));
MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg));
// Cannot be live-out if there are no successors.
return !MBB->succ_empty();
}
@ -293,7 +293,7 @@ bool RegAllocFast::mayLiveOut(unsigned VirtReg) {
/// Returns false if \p VirtReg is known to not be live into the current block.
bool RegAllocFast::mayLiveIn(unsigned VirtReg) {
if (MayLiveAcrossBlocks.test(TargetRegisterInfo::virtReg2Index(VirtReg)))
if (MayLiveAcrossBlocks.test(Register::virtReg2Index(VirtReg)))
return !MBB->pred_empty();
// See if the first \p Limit def of the register are all in the current block.
@ -301,7 +301,7 @@ bool RegAllocFast::mayLiveIn(unsigned VirtReg) {
unsigned C = 0;
for (const MachineInstr &DefInst : MRI->def_instructions(VirtReg)) {
if (DefInst.getParent() != MBB || ++C >= Limit) {
MayLiveAcrossBlocks.set(TargetRegisterInfo::virtReg2Index(VirtReg));
MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg));
return !MBB->pred_empty();
}
}
@ -394,7 +394,7 @@ void RegAllocFast::killVirtReg(LiveReg &LR) {
/// Mark virtreg as no longer available.
void RegAllocFast::killVirtReg(unsigned VirtReg) {
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
assert(Register::isVirtualRegister(VirtReg) &&
"killVirtReg needs a virtual register");
LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
if (LRI != LiveVirtRegs.end() && LRI->PhysReg)
@ -405,7 +405,7 @@ void RegAllocFast::killVirtReg(unsigned VirtReg) {
/// stack slot if needed.
void RegAllocFast::spillVirtReg(MachineBasicBlock::iterator MI,
unsigned VirtReg) {
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
assert(Register::isVirtualRegister(VirtReg) &&
"Spilling a physical register is illegal!");
LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
assert(LRI != LiveVirtRegs.end() && LRI->PhysReg &&
@ -456,8 +456,7 @@ void RegAllocFast::usePhysReg(MachineOperand &MO) {
return;
unsigned PhysReg = MO.getReg();
assert(TargetRegisterInfo::isPhysicalRegister(PhysReg) &&
"Bad usePhysReg operand");
assert(Register::isPhysicalRegister(PhysReg) && "Bad usePhysReg operand");
markRegUsedInInstr(PhysReg);
switch (PhysRegState[PhysReg]) {
@ -626,9 +625,9 @@ unsigned RegAllocFast::traceCopyChain(unsigned Reg) const {
static const unsigned ChainLengthLimit = 3;
unsigned C = 0;
do {
if (TargetRegisterInfo::isPhysicalRegister(Reg))
if (Register::isPhysicalRegister(Reg))
return Reg;
assert(TargetRegisterInfo::isVirtualRegister(Reg));
assert(Register::isVirtualRegister(Reg));
MachineInstr *VRegDef = MRI->getUniqueVRegDef(Reg);
if (!VRegDef || !isCoalescable(*VRegDef))
@ -662,7 +661,7 @@ unsigned RegAllocFast::traceCopies(unsigned VirtReg) const {
void RegAllocFast::allocVirtReg(MachineInstr &MI, LiveReg &LR, unsigned Hint0) {
const unsigned VirtReg = LR.VirtReg;
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
assert(Register::isVirtualRegister(VirtReg) &&
"Can only allocate virtual registers");
const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
@ -671,8 +670,8 @@ void RegAllocFast::allocVirtReg(MachineInstr &MI, LiveReg &LR, unsigned Hint0) {
<< " with hint " << printReg(Hint0, TRI) << '\n');
// Take hint when possible.
if (TargetRegisterInfo::isPhysicalRegister(Hint0) &&
MRI->isAllocatable(Hint0) && RC.contains(Hint0)) {
if (Register::isPhysicalRegister(Hint0) && MRI->isAllocatable(Hint0) &&
RC.contains(Hint0)) {
// Ignore the hint if we would have to spill a dirty register.
unsigned Cost = calcSpillCost(Hint0);
if (Cost < spillDirty) {
@ -692,9 +691,8 @@ void RegAllocFast::allocVirtReg(MachineInstr &MI, LiveReg &LR, unsigned Hint0) {
// Try other hint.
unsigned Hint1 = traceCopies(VirtReg);
if (TargetRegisterInfo::isPhysicalRegister(Hint1) &&
MRI->isAllocatable(Hint1) && RC.contains(Hint1) &&
!isRegUsedInInstr(Hint1)) {
if (Register::isPhysicalRegister(Hint1) && MRI->isAllocatable(Hint1) &&
RC.contains(Hint1) && !isRegUsedInInstr(Hint1)) {
// Ignore the hint if we would have to spill a dirty register.
unsigned Cost = calcSpillCost(Hint1);
if (Cost < spillDirty) {
@ -753,7 +751,7 @@ void RegAllocFast::allocVirtReg(MachineInstr &MI, LiveReg &LR, unsigned Hint0) {
void RegAllocFast::allocVirtRegUndef(MachineOperand &MO) {
assert(MO.isUndef() && "expected undef use");
unsigned VirtReg = MO.getReg();
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && "Expected virtreg");
assert(Register::isVirtualRegister(VirtReg) && "Expected virtreg");
LiveRegMap::const_iterator LRI = findLiveVirtReg(VirtReg);
MCPhysReg PhysReg;
@ -778,14 +776,13 @@ void RegAllocFast::allocVirtRegUndef(MachineOperand &MO) {
/// Allocates a register for VirtReg and mark it as dirty.
MCPhysReg RegAllocFast::defineVirtReg(MachineInstr &MI, unsigned OpNum,
unsigned VirtReg, unsigned Hint) {
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
"Not a virtual register");
assert(Register::isVirtualRegister(VirtReg) && "Not a virtual register");
LiveRegMap::iterator LRI;
bool New;
std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
if (!LRI->PhysReg) {
// If there is no hint, peek at the only use of this register.
if ((!Hint || !TargetRegisterInfo::isPhysicalRegister(Hint)) &&
if ((!Hint || !Register::isPhysicalRegister(Hint)) &&
MRI->hasOneNonDBGUse(VirtReg)) {
const MachineInstr &UseMI = *MRI->use_instr_nodbg_begin(VirtReg);
// It's a copy, use the destination register as a hint.
@ -812,8 +809,7 @@ RegAllocFast::LiveReg &RegAllocFast::reloadVirtReg(MachineInstr &MI,
unsigned OpNum,
unsigned VirtReg,
unsigned Hint) {
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
"Not a virtual register");
assert(Register::isVirtualRegister(VirtReg) && "Not a virtual register");
LiveRegMap::iterator LRI;
bool New;
std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
@ -894,7 +890,7 @@ void RegAllocFast::handleThroughOperands(MachineInstr &MI,
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
if (!Register::isVirtualRegister(Reg))
continue;
if (MO.isEarlyClobber() || (MO.isUse() && MO.isTied()) ||
(MO.getSubReg() && MI.readsVirtualRegister(Reg))) {
@ -909,7 +905,8 @@ void RegAllocFast::handleThroughOperands(MachineInstr &MI,
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg() || !MO.isDef()) continue;
unsigned Reg = MO.getReg();
if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
if (!Reg || !Register::isPhysicalRegister(Reg))
continue;
markRegUsedInInstr(Reg);
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
if (ThroughRegs.count(PhysRegState[*AI]))
@ -923,7 +920,8 @@ void RegAllocFast::handleThroughOperands(MachineInstr &MI,
MachineOperand &MO = MI.getOperand(I);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue;
if (!Register::isVirtualRegister(Reg))
continue;
if (MO.isUse()) {
if (!MO.isTied()) continue;
LLVM_DEBUG(dbgs() << "Operand " << I << "(" << MO
@ -948,7 +946,8 @@ void RegAllocFast::handleThroughOperands(MachineInstr &MI,
const MachineOperand &MO = MI.getOperand(I);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue;
if (!Register::isVirtualRegister(Reg))
continue;
if (!MO.isEarlyClobber())
continue;
// Note: defineVirtReg may invalidate MO.
@ -962,7 +961,8 @@ void RegAllocFast::handleThroughOperands(MachineInstr &MI,
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg() || (MO.isDef() && !MO.isEarlyClobber())) continue;
unsigned Reg = MO.getReg();
if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
if (!Reg || !Register::isPhysicalRegister(Reg))
continue;
LLVM_DEBUG(dbgs() << "\tSetting " << printReg(Reg, TRI)
<< " as used in instr\n");
markRegUsedInInstr(Reg);
@ -1002,10 +1002,8 @@ void RegAllocFast::dumpState() {
e = LiveVirtRegs.end(); i != e; ++i) {
if (!i->PhysReg)
continue;
assert(TargetRegisterInfo::isVirtualRegister(i->VirtReg) &&
"Bad map key");
assert(TargetRegisterInfo::isPhysicalRegister(i->PhysReg) &&
"Bad map value");
assert(Register::isVirtualRegister(i->VirtReg) && "Bad map key");
assert(Register::isPhysicalRegister(i->PhysReg) && "Bad map value");
assert(PhysRegState[i->PhysReg] == i->VirtReg && "Bad inverse map");
}
}
@ -1047,7 +1045,7 @@ void RegAllocFast::allocateInstruction(MachineInstr &MI) {
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (!Reg) continue;
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
VirtOpEnd = i+1;
if (MO.isUse()) {
hasTiedOps = hasTiedOps ||
@ -1097,7 +1095,8 @@ void RegAllocFast::allocateInstruction(MachineInstr &MI) {
MachineOperand &MO = MI.getOperand(I);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue;
if (!Register::isVirtualRegister(Reg))
continue;
if (MO.isUse()) {
if (MO.isUndef()) {
HasUndefUse = true;
@ -1125,7 +1124,7 @@ void RegAllocFast::allocateInstruction(MachineInstr &MI) {
if (!MO.isReg() || !MO.isUse())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
if (!Register::isVirtualRegister(Reg))
continue;
assert(MO.isUndef() && "Should only have undef virtreg uses left");
@ -1140,7 +1139,8 @@ void RegAllocFast::allocateInstruction(MachineInstr &MI) {
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
if (!Reg || !Register::isPhysicalRegister(Reg))
continue;
// Look for physreg defs and tied uses.
if (!MO.isDef() && !MO.isTied()) continue;
markRegUsedInInstr(Reg);
@ -1168,8 +1168,7 @@ void RegAllocFast::allocateInstruction(MachineInstr &MI) {
continue;
unsigned Reg = MO.getReg();
if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg) ||
!MRI->isAllocatable(Reg))
if (!Reg || !Register::isPhysicalRegister(Reg) || !MRI->isAllocatable(Reg))
continue;
definePhysReg(MI, Reg, MO.isDead() ? regFree : regReserved);
}
@ -1183,7 +1182,7 @@ void RegAllocFast::allocateInstruction(MachineInstr &MI) {
unsigned Reg = MO.getReg();
// We have already dealt with phys regs in the previous scan.
if (TargetRegisterInfo::isPhysicalRegister(Reg))
if (Register::isPhysicalRegister(Reg))
continue;
MCPhysReg PhysReg = defineVirtReg(MI, I, Reg, CopySrcReg);
if (setPhysReg(MI, MI.getOperand(I), PhysReg)) {
@ -1216,7 +1215,7 @@ void RegAllocFast::handleDebugValue(MachineInstr &MI) {
if (!MO.isReg())
return;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
if (!Register::isVirtualRegister(Reg))
return;
// See if this virtual register has already been allocated to a physical

View File

@ -685,7 +685,7 @@ void RAGreedy::enqueue(PQueue &CurQueue, LiveInterval *LI) {
// The queue holds (size, reg) pairs.
const unsigned Size = LI->getSize();
const unsigned Reg = LI->reg;
assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
assert(Register::isVirtualRegister(Reg) &&
"Can only enqueue virtual registers");
unsigned Prio;
@ -899,7 +899,7 @@ bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
// Check if any interfering live range is heavier than MaxWeight.
for (unsigned i = Q.interferingVRegs().size(); i; --i) {
LiveInterval *Intf = Q.interferingVRegs()[i - 1];
assert(TargetRegisterInfo::isVirtualRegister(Intf->reg) &&
assert(Register::isVirtualRegister(Intf->reg) &&
"Only expecting virtual register interference from query");
// Do not allow eviction of a virtual register if we are in the middle
@ -984,7 +984,7 @@ bool RAGreedy::canEvictInterferenceInRange(LiveInterval &VirtReg,
continue;
// Cannot evict non virtual reg interference.
if (!TargetRegisterInfo::isVirtualRegister(Intf->reg))
if (!Register::isVirtualRegister(Intf->reg))
return false;
// Never evict spill products. They cannot split or spill.
if (getStage(*Intf) == RS_Done)
@ -2881,7 +2881,7 @@ void RAGreedy::collectHintInfo(unsigned Reg, HintsInfo &Out) {
continue;
}
// Get the current assignment.
Register OtherPhysReg = TargetRegisterInfo::isPhysicalRegister(OtherReg)
Register OtherPhysReg = Register::isPhysicalRegister(OtherReg)
? OtherReg
: VRM->getPhys(OtherReg);
// Push the collected information.
@ -2932,7 +2932,7 @@ void RAGreedy::tryHintRecoloring(LiveInterval &VirtReg) {
Reg = RecoloringCandidates.pop_back_val();
// We cannot recolor physical register.
if (TargetRegisterInfo::isPhysicalRegister(Reg))
if (Register::isPhysicalRegister(Reg))
continue;
assert(VRM->hasPhys(Reg) && "We have unallocated variable!!");
@ -3021,7 +3021,7 @@ void RAGreedy::tryHintRecoloring(LiveInterval &VirtReg) {
/// getting rid of 2 copies.
void RAGreedy::tryHintsRecoloring() {
for (LiveInterval *LI : SetOfBrokenHints) {
assert(TargetRegisterInfo::isVirtualRegister(LI->reg) &&
assert(Register::isVirtualRegister(LI->reg) &&
"Recoloring is possible only for virtual registers");
// Some dead defs may be around (e.g., because of debug uses).
// Ignore those.

View File

@ -558,7 +558,7 @@ void RegAllocPBQP::findVRegIntervalsToAlloc(const MachineFunction &MF,
// Iterate over all live ranges.
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(I);
unsigned Reg = Register::index2VirtReg(I);
if (MRI.reg_nodbg_empty(Reg))
continue;
VRegsToAlloc.insert(Reg);

View File

@ -406,8 +406,8 @@ bool CoalescerPair::setRegisters(const MachineInstr *MI) {
Partial = SrcSub || DstSub;
// If one register is a physreg, it must be Dst.
if (TargetRegisterInfo::isPhysicalRegister(Src)) {
if (TargetRegisterInfo::isPhysicalRegister(Dst))
if (Register::isPhysicalRegister(Src)) {
if (Register::isPhysicalRegister(Dst))
return false;
std::swap(Src, Dst);
std::swap(SrcSub, DstSub);
@ -416,7 +416,7 @@ bool CoalescerPair::setRegisters(const MachineInstr *MI) {
const MachineRegisterInfo &MRI = MI->getMF()->getRegInfo();
if (TargetRegisterInfo::isPhysicalRegister(Dst)) {
if (Register::isPhysicalRegister(Dst)) {
// Eliminate DstSub on a physreg.
if (DstSub) {
Dst = TRI.getSubReg(Dst, DstSub);
@ -474,8 +474,8 @@ bool CoalescerPair::setRegisters(const MachineInstr *MI) {
CrossClass = NewRC != DstRC || NewRC != SrcRC;
}
// Check our invariants
assert(TargetRegisterInfo::isVirtualRegister(Src) && "Src must be virtual");
assert(!(TargetRegisterInfo::isPhysicalRegister(Dst) && DstSub) &&
assert(Register::isVirtualRegister(Src) && "Src must be virtual");
assert(!(Register::isPhysicalRegister(Dst) && DstSub) &&
"Cannot have a physical SubIdx");
SrcReg = Src;
DstReg = Dst;
@ -483,7 +483,7 @@ bool CoalescerPair::setRegisters(const MachineInstr *MI) {
}
bool CoalescerPair::flip() {
if (TargetRegisterInfo::isPhysicalRegister(DstReg))
if (Register::isPhysicalRegister(DstReg))
return false;
std::swap(SrcReg, DstReg);
std::swap(SrcIdx, DstIdx);
@ -507,8 +507,8 @@ bool CoalescerPair::isCoalescable(const MachineInstr *MI) const {
}
// Now check that Dst matches DstReg.
if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
if (!TargetRegisterInfo::isPhysicalRegister(Dst))
if (Register::isPhysicalRegister(DstReg)) {
if (!Register::isPhysicalRegister(Dst))
return false;
assert(!DstIdx && !SrcIdx && "Inconsistent CoalescerPair state.");
// DstSub could be set for a physreg from INSERT_SUBREG.
@ -835,8 +835,8 @@ RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP,
TII->commuteInstruction(*DefMI, false, UseOpIdx, NewDstIdx);
if (!NewMI)
return { false, false };
if (TargetRegisterInfo::isVirtualRegister(IntA.reg) &&
TargetRegisterInfo::isVirtualRegister(IntB.reg) &&
if (Register::isVirtualRegister(IntA.reg) &&
Register::isVirtualRegister(IntB.reg) &&
!MRI->constrainRegClass(IntB.reg, MRI->getRegClass(IntA.reg)))
return { false, false };
if (NewMI != DefMI) {
@ -877,7 +877,7 @@ RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP,
continue;
// Kill flags are no longer accurate. They are recomputed after RA.
UseMO.setIsKill(false);
if (TargetRegisterInfo::isPhysicalRegister(NewReg))
if (Register::isPhysicalRegister(NewReg))
UseMO.substPhysReg(NewReg, *TRI);
else
UseMO.setReg(NewReg);
@ -1188,7 +1188,7 @@ bool RegisterCoalescer::removePartialRedundancy(const CoalescerPair &CP,
/// Returns true if @p MI defines the full vreg @p Reg, as opposed to just
/// defining a subregister.
static bool definesFullReg(const MachineInstr &MI, unsigned Reg) {
assert(!TargetRegisterInfo::isPhysicalRegister(Reg) &&
assert(!Register::isPhysicalRegister(Reg) &&
"This code cannot handle physreg aliasing");
for (const MachineOperand &Op : MI.operands()) {
if (!Op.isReg() || !Op.isDef() || Op.getReg() != Reg)
@ -1209,7 +1209,7 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
unsigned SrcIdx = CP.isFlipped() ? CP.getDstIdx() : CP.getSrcIdx();
unsigned DstReg = CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg();
unsigned DstIdx = CP.isFlipped() ? CP.getSrcIdx() : CP.getDstIdx();
if (TargetRegisterInfo::isPhysicalRegister(SrcReg))
if (Register::isPhysicalRegister(SrcReg))
return false;
LiveInterval &SrcInt = LIS->getInterval(SrcReg);
@ -1254,7 +1254,7 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
const TargetRegisterClass *DefRC = TII->getRegClass(MCID, 0, TRI, *MF);
if (!DefMI->isImplicitDef()) {
if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
if (Register::isPhysicalRegister(DstReg)) {
unsigned NewDstReg = DstReg;
unsigned NewDstIdx = TRI->composeSubRegIndices(CP.getSrcIdx(),
@ -1269,7 +1269,7 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
} else {
// Theoretically, some stack frame reference could exist. Just make sure
// it hasn't actually happened.
assert(TargetRegisterInfo::isVirtualRegister(DstReg) &&
assert(Register::isVirtualRegister(DstReg) &&
"Only expect to deal with virtual or physical registers");
}
}
@ -1317,7 +1317,7 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
if (MO.isReg()) {
assert(MO.isImplicit() && "No explicit operands after implicit operands.");
// Discard VReg implicit defs.
if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
if (Register::isPhysicalRegister(MO.getReg()))
ImplicitOps.push_back(MO);
}
}
@ -1336,12 +1336,12 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
MachineOperand &MO = NewMI.getOperand(i);
if (MO.isReg() && MO.isDef()) {
assert(MO.isImplicit() && MO.isDead() &&
TargetRegisterInfo::isPhysicalRegister(MO.getReg()));
Register::isPhysicalRegister(MO.getReg()));
NewMIImplDefs.push_back(MO.getReg());
}
}
if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
if (Register::isVirtualRegister(DstReg)) {
unsigned NewIdx = NewMI.getOperand(0).getSubReg();
if (DefRC != nullptr) {
@ -1428,7 +1428,7 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
} else if (NewMI.getOperand(0).getReg() != CopyDstReg) {
// The New instruction may be defining a sub-register of what's actually
// been asked for. If so it must implicitly define the whole thing.
assert(TargetRegisterInfo::isPhysicalRegister(DstReg) &&
assert(Register::isPhysicalRegister(DstReg) &&
"Only expect virtual or physical registers in remat");
NewMI.getOperand(0).setIsDead(true);
NewMI.addOperand(MachineOperand::CreateReg(
@ -1480,7 +1480,7 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
for (MachineOperand &UseMO : MRI->use_operands(SrcReg)) {
MachineInstr *UseMI = UseMO.getParent();
if (UseMI->isDebugValue()) {
if (TargetRegisterInfo::isPhysicalRegister(DstReg))
if (Register::isPhysicalRegister(DstReg))
UseMO.substPhysReg(DstReg, *TRI);
else
UseMO.setReg(DstReg);
@ -1651,7 +1651,7 @@ void RegisterCoalescer::addUndefFlag(const LiveInterval &Int, SlotIndex UseIdx,
void RegisterCoalescer::updateRegDefsUses(unsigned SrcReg,
unsigned DstReg,
unsigned SubIdx) {
bool DstIsPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
bool DstIsPhys = Register::isPhysicalRegister(DstReg);
LiveInterval *DstInt = DstIsPhys ? nullptr : &LIS->getInterval(DstReg);
if (DstInt && DstInt->hasSubRanges() && DstReg != SrcReg) {
@ -2412,7 +2412,7 @@ std::pair<const VNInfo*, unsigned> JoinVals::followCopyChain(
if (!MI->isFullCopy())
return std::make_pair(VNI, TrackReg);
unsigned SrcReg = MI->getOperand(1).getReg();
if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
if (!Register::isVirtualRegister(SrcReg))
return std::make_pair(VNI, TrackReg);
const LiveInterval &LI = LIS->getInterval(SrcReg);
@ -3190,8 +3190,8 @@ void JoinVals::eraseInstrs(SmallPtrSetImpl<MachineInstr*> &ErasedInstrs,
assert(MI && "No instruction to erase");
if (MI->isCopy()) {
unsigned Reg = MI->getOperand(1).getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg) &&
Reg != CP.getSrcReg() && Reg != CP.getDstReg())
if (Register::isVirtualRegister(Reg) && Reg != CP.getSrcReg() &&
Reg != CP.getDstReg())
ShrinkRegs.push_back(Reg);
}
ErasedInstrs.insert(MI);
@ -3465,8 +3465,8 @@ static bool isLocalCopy(MachineInstr *Copy, const LiveIntervals *LIS) {
unsigned SrcReg = Copy->getOperand(1).getReg();
unsigned DstReg = Copy->getOperand(0).getReg();
if (TargetRegisterInfo::isPhysicalRegister(SrcReg)
|| TargetRegisterInfo::isPhysicalRegister(DstReg))
if (Register::isPhysicalRegister(SrcReg) ||
Register::isPhysicalRegister(DstReg))
return false;
return LIS->intervalIsInOneMBB(LIS->getInterval(SrcReg))
@ -3526,12 +3526,11 @@ bool RegisterCoalescer::applyTerminalRule(const MachineInstr &Copy) const {
if (!isMoveInstr(*TRI, &Copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
return false;
// Check if the destination of this copy has any other affinity.
if (TargetRegisterInfo::isPhysicalRegister(DstReg) ||
if (Register::isPhysicalRegister(DstReg) ||
// If SrcReg is a physical register, the copy won't be coalesced.
// Ignoring it may have other side effect (like missing
// rematerialization). So keep it.
TargetRegisterInfo::isPhysicalRegister(SrcReg) ||
!isTerminalReg(DstReg, Copy, MRI))
Register::isPhysicalRegister(SrcReg) || !isTerminalReg(DstReg, Copy, MRI))
return false;
// DstReg is a terminal node. Check if it interferes with any other
@ -3554,7 +3553,7 @@ bool RegisterCoalescer::applyTerminalRule(const MachineInstr &Copy) const {
if (OtherReg == SrcReg)
OtherReg = OtherSrcReg;
// Check if OtherReg is a non-terminal.
if (TargetRegisterInfo::isPhysicalRegister(OtherReg) ||
if (Register::isPhysicalRegister(OtherReg) ||
isTerminalReg(OtherReg, MI, MRI))
continue;
// Check that OtherReg interfere with DstReg.

View File

@ -219,7 +219,7 @@ void LiveRegSet::clear() {
}
static const LiveRange *getLiveRange(const LiveIntervals &LIS, unsigned Reg) {
if (TargetRegisterInfo::isVirtualRegister(Reg))
if (Register::isVirtualRegister(Reg))
return &LIS.getInterval(Reg);
return LIS.getCachedRegUnit(Reg);
}
@ -345,7 +345,7 @@ void RegPressureTracker::initLiveThru(const RegPressureTracker &RPTracker) {
assert(isBottomClosed() && "need bottom-up tracking to intialize.");
for (const RegisterMaskPair &Pair : P.LiveOutRegs) {
unsigned RegUnit = Pair.RegUnit;
if (TargetRegisterInfo::isVirtualRegister(RegUnit)
if (Register::isVirtualRegister(RegUnit)
&& !RPTracker.hasUntiedDef(RegUnit))
increaseSetPressure(LiveThruPressure, *MRI, RegUnit,
LaneBitmask::getNone(), Pair.LaneMask);
@ -406,7 +406,7 @@ static LaneBitmask getLanesWithProperty(const LiveIntervals &LIS,
const MachineRegisterInfo &MRI, bool TrackLaneMasks, unsigned RegUnit,
SlotIndex Pos, LaneBitmask SafeDefault,
bool(*Property)(const LiveRange &LR, SlotIndex Pos)) {
if (TargetRegisterInfo::isVirtualRegister(RegUnit)) {
if (Register::isVirtualRegister(RegUnit)) {
const LiveInterval &LI = LIS.getInterval(RegUnit);
LaneBitmask Result;
if (TrackLaneMasks && LI.hasSubRanges()) {
@ -503,7 +503,7 @@ class RegisterOperandsCollector {
void pushReg(unsigned Reg,
SmallVectorImpl<RegisterMaskPair> &RegUnits) const {
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
addRegLanes(RegUnits, RegisterMaskPair(Reg, LaneBitmask::getAll()));
} else if (MRI.isAllocatable(Reg)) {
for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units)
@ -535,7 +535,7 @@ class RegisterOperandsCollector {
void pushRegLanes(unsigned Reg, unsigned SubRegIdx,
SmallVectorImpl<RegisterMaskPair> &RegUnits) const {
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
LaneBitmask LaneMask = SubRegIdx != 0
? TRI.getSubRegIndexLaneMask(SubRegIdx)
: MRI.getMaxLaneMaskForVReg(Reg);
@ -590,7 +590,7 @@ void RegisterOperands::adjustLaneLiveness(const LiveIntervals &LIS,
// If the def is all that is live after the instruction, then in case
// of a subregister def we need a read-undef flag.
unsigned RegUnit = I->RegUnit;
if (TargetRegisterInfo::isVirtualRegister(RegUnit) &&
if (Register::isVirtualRegister(RegUnit) &&
AddFlagsMI != nullptr && (LiveAfter & ~I->LaneMask).none())
AddFlagsMI->setRegisterDefReadUndef(RegUnit);
@ -616,7 +616,7 @@ void RegisterOperands::adjustLaneLiveness(const LiveIntervals &LIS,
if (AddFlagsMI != nullptr) {
for (const RegisterMaskPair &P : DeadDefs) {
unsigned RegUnit = P.RegUnit;
if (!TargetRegisterInfo::isVirtualRegister(RegUnit))
if (!Register::isVirtualRegister(RegUnit))
continue;
LaneBitmask LiveAfter = getLiveLanesAt(LIS, MRI, true, RegUnit,
Pos.getDeadSlot());
@ -825,7 +825,7 @@ void RegPressureTracker::recede(const RegisterOperands &RegOpers,
if (TrackUntiedDefs) {
for (const RegisterMaskPair &Def : RegOpers.Defs) {
unsigned RegUnit = Def.RegUnit;
if (TargetRegisterInfo::isVirtualRegister(RegUnit) &&
if (Register::isVirtualRegister(RegUnit) &&
(LiveRegs.contains(RegUnit) & Def.LaneMask).none())
UntiedDefs.insert(RegUnit);
}

View File

@ -134,7 +134,7 @@ void RegScavenger::determineKillsAndDefs() {
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isPhysicalRegister(Reg) || isReserved(Reg))
if (!Register::isPhysicalRegister(Reg) || isReserved(Reg))
continue;
if (MO.isUse()) {
@ -205,7 +205,7 @@ void RegScavenger::forward() {
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isPhysicalRegister(Reg) || isReserved(Reg))
if (!Register::isPhysicalRegister(Reg) || isReserved(Reg))
continue;
if (MO.isUse()) {
if (MO.isUndef())
@ -329,7 +329,7 @@ unsigned RegScavenger::findSurvivorReg(MachineBasicBlock::iterator StartMI,
Candidates.clearBitsNotInMask(MO.getRegMask());
if (!MO.isReg() || MO.isUndef() || !MO.getReg())
continue;
if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
if (Register::isVirtualRegister(MO.getReg())) {
if (MO.isDef())
isVirtDefInsn = true;
else if (MO.isKill())
@ -430,7 +430,7 @@ findSurvivorBackwards(const MachineRegisterInfo &MRI,
// be usefull for this other vreg as well later.
bool FoundVReg = false;
for (const MachineOperand &MO : MI.operands()) {
if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
if (MO.isReg() && Register::isVirtualRegister(MO.getReg())) {
FoundVReg = true;
break;
}
@ -542,7 +542,7 @@ unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC,
// Exclude all the registers being used by the instruction.
for (const MachineOperand &MO : MI.operands()) {
if (MO.isReg() && MO.getReg() != 0 && !(MO.isUse() && MO.isUndef()) &&
!TargetRegisterInfo::isVirtualRegister(MO.getReg()))
!Register::isVirtualRegister(MO.getReg()))
for (MCRegAliasIterator AI(MO.getReg(), TRI, true); AI.isValid(); ++AI)
Candidates.reset(*AI);
}
@ -698,8 +698,8 @@ static bool scavengeFrameVirtualRegsInBlock(MachineRegisterInfo &MRI,
// We only care about virtual registers and ignore virtual registers
// created by the target callbacks in the process (those will be handled
// in a scavenging round).
if (!TargetRegisterInfo::isVirtualRegister(Reg) ||
TargetRegisterInfo::virtReg2Index(Reg) >= InitialNumVirtRegs)
if (!Register::isVirtualRegister(Reg) ||
Register::virtReg2Index(Reg) >= InitialNumVirtRegs)
continue;
if (!MO.readsReg())
continue;
@ -718,8 +718,8 @@ static bool scavengeFrameVirtualRegsInBlock(MachineRegisterInfo &MRI,
continue;
unsigned Reg = MO.getReg();
// Only vregs, no newly created vregs (see above).
if (!TargetRegisterInfo::isVirtualRegister(Reg) ||
TargetRegisterInfo::virtReg2Index(Reg) >= InitialNumVirtRegs)
if (!Register::isVirtualRegister(Reg) ||
Register::virtReg2Index(Reg) >= InitialNumVirtRegs)
continue;
// We have to look at all operands anyway so we can precalculate here
// whether there is a reading operand. This allows use to skip the use
@ -737,7 +737,7 @@ static bool scavengeFrameVirtualRegsInBlock(MachineRegisterInfo &MRI,
}
#ifndef NDEBUG
for (const MachineOperand &MO : MBB.front().operands()) {
if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
if (!MO.isReg() || !Register::isVirtualRegister(MO.getReg()))
continue;
assert(!MO.isInternalRead() && "Cannot assign inside bundles");
assert((!MO.isUndef() || MO.isDef()) && "Cannot handle undef uses");

View File

@ -390,7 +390,7 @@ bool RenameIndependentSubregs::runOnMachineFunction(MachineFunction &MF) {
// there can't be any further splitting.
bool Changed = false;
for (size_t I = 0, E = MRI->getNumVirtRegs(); I < E; ++I) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(I);
unsigned Reg = Register::index2VirtReg(I);
if (!LIS->hasInterval(Reg))
continue;
LiveInterval &LI = LIS->getInterval(Reg);

View File

@ -206,9 +206,9 @@ void ScheduleDAGInstrs::addSchedBarrierDeps() {
for (const MachineOperand &MO : ExitMI->operands()) {
if (!MO.isReg() || MO.isDef()) continue;
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (Register::isPhysicalRegister(Reg)) {
Uses.insert(PhysRegSUOper(&ExitSU, -1, Reg));
} else if (TargetRegisterInfo::isVirtualRegister(Reg) && MO.readsReg()) {
} else if (Register::isVirtualRegister(Reg) && MO.readsReg()) {
addVRegUseDeps(&ExitSU, ExitMI->getOperandNo(&MO));
}
}
@ -822,9 +822,9 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
if (!MO.isReg() || !MO.isDef())
continue;
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (Register::isPhysicalRegister(Reg)) {
addPhysRegDeps(SU, j);
} else if (TargetRegisterInfo::isVirtualRegister(Reg)) {
} else if (Register::isVirtualRegister(Reg)) {
HasVRegDef = true;
addVRegDefDeps(SU, j);
}
@ -839,9 +839,9 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
if (!MO.isReg() || !MO.isUse())
continue;
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (Register::isPhysicalRegister(Reg)) {
addPhysRegDeps(SU, j);
} else if (TargetRegisterInfo::isVirtualRegister(Reg) && MO.readsReg()) {
} else if (Register::isVirtualRegister(Reg) && MO.readsReg()) {
addVRegUseDeps(SU, j);
}
}

View File

@ -174,7 +174,7 @@ static unsigned findSinkableLocalRegDef(MachineInstr &MI) {
if (RegDef)
return 0;
RegDef = MO.getReg();
} else if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
} else if (Register::isVirtualRegister(MO.getReg())) {
// This is another use of a vreg. Don't try to sink it.
return 0;
}
@ -2028,7 +2028,7 @@ unsigned FastISel::createResultReg(const TargetRegisterClass *RC) {
unsigned FastISel::constrainOperandRegClass(const MCInstrDesc &II, unsigned Op,
unsigned OpNum) {
if (TargetRegisterInfo::isVirtualRegister(Op)) {
if (Register::isVirtualRegister(Op)) {
const TargetRegisterClass *RegClass =
TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
if (!MRI.constrainRegClass(Op, RegClass)) {
@ -2236,7 +2236,7 @@ unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
bool Op0IsKill, uint32_t Idx) {
unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
assert(Register::isVirtualRegister(Op0) &&
"Cannot yet extract from physregs");
const TargetRegisterClass *RC = MRI.getRegClass(Op0);
MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));

View File

@ -424,7 +424,7 @@ void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
unsigned BitWidth = IntVT.getSizeInBits();
unsigned DestReg = ValueMap[PN];
if (!TargetRegisterInfo::isVirtualRegister(DestReg))
if (!Register::isVirtualRegister(DestReg))
return;
LiveOutRegInfo.grow(DestReg);
LiveOutInfo &DestLOI = LiveOutRegInfo[DestReg];
@ -445,7 +445,7 @@ void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
assert(ValueMap.count(V) && "V should have been placed in ValueMap when its"
"CopyToReg node was created.");
unsigned SrcReg = ValueMap[V];
if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) {
if (!Register::isVirtualRegister(SrcReg)) {
DestLOI.IsValid = false;
return;
}
@ -480,7 +480,7 @@ void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
assert(ValueMap.count(V) && "V should have been placed in ValueMap when "
"its CopyToReg node was created.");
unsigned SrcReg = ValueMap[V];
if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) {
if (!Register::isVirtualRegister(SrcReg)) {
DestLOI.IsValid = false;
return;
}

View File

@ -71,7 +71,7 @@ static unsigned countOperands(SDNode *Node, unsigned NumExpUses,
if (isa<RegisterMaskSDNode>(Node->getOperand(I - 1)))
continue;
if (RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Node->getOperand(I - 1)))
if (TargetRegisterInfo::isPhysicalRegister(RN->getReg()))
if (Register::isPhysicalRegister(RN->getReg()))
continue;
NumImpUses = N - I;
break;
@ -86,7 +86,7 @@ void InstrEmitter::
EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned,
unsigned SrcReg, DenseMap<SDValue, unsigned> &VRBaseMap) {
unsigned VRBase = 0;
if (TargetRegisterInfo::isVirtualRegister(SrcReg)) {
if (Register::isVirtualRegister(SrcReg)) {
// Just use the input register directly!
SDValue Op(Node, ResNo);
if (IsClone)
@ -114,7 +114,7 @@ EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned,
User->getOperand(2).getNode() == Node &&
User->getOperand(2).getResNo() == ResNo) {
unsigned DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(DestReg)) {
if (Register::isVirtualRegister(DestReg)) {
VRBase = DestReg;
Match = false;
} else if (DestReg != SrcReg)
@ -219,7 +219,7 @@ void InstrEmitter::CreateVirtualRegisters(SDNode *Node,
if (II.OpInfo[i].isOptionalDef()) {
// Optional def must be a physical register.
VRBase = cast<RegisterSDNode>(Node->getOperand(i-NumResults))->getReg();
assert(TargetRegisterInfo::isPhysicalRegister(VRBase));
assert(Register::isPhysicalRegister(VRBase));
MIB.addReg(VRBase, RegState::Define);
}
@ -229,7 +229,7 @@ void InstrEmitter::CreateVirtualRegisters(SDNode *Node,
User->getOperand(2).getNode() == Node &&
User->getOperand(2).getResNo() == i) {
unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
const TargetRegisterClass *RegRC = MRI->getRegClass(Reg);
if (RegRC == RC) {
VRBase = Reg;
@ -385,8 +385,7 @@ void InstrEmitter::AddOperand(MachineInstrBuilder &MIB,
(IIRC && TRI->isDivergentRegClass(IIRC)))
: nullptr;
if (OpRC && IIRC && OpRC != IIRC &&
TargetRegisterInfo::isVirtualRegister(VReg)) {
if (OpRC && IIRC && OpRC != IIRC && Register::isVirtualRegister(VReg)) {
unsigned NewVReg = MRI->createVirtualRegister(IIRC);
BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(),
TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg);
@ -485,7 +484,7 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
if (User->getOpcode() == ISD::CopyToReg &&
User->getOperand(2).getNode() == Node) {
unsigned DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(DestReg)) {
if (Register::isVirtualRegister(DestReg)) {
VRBase = DestReg;
break;
}
@ -503,7 +502,7 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
unsigned Reg;
MachineInstr *DefMI;
RegisterSDNode *R = dyn_cast<RegisterSDNode>(Node->getOperand(0));
if (R && TargetRegisterInfo::isPhysicalRegister(R->getReg())) {
if (R && Register::isPhysicalRegister(R->getReg())) {
Reg = R->getReg();
DefMI = nullptr;
} else {
@ -529,7 +528,7 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
// Reg may not support a SubIdx sub-register, and we may need to
// constrain its register class or issue a COPY to a compatible register
// class.
if (TargetRegisterInfo::isVirtualRegister(Reg))
if (Register::isVirtualRegister(Reg))
Reg = ConstrainForSubReg(Reg, SubIdx,
Node->getOperand(0).getSimpleValueType(),
Node->isDivergent(), Node->getDebugLoc());
@ -541,7 +540,7 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
MachineInstrBuilder CopyMI =
BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
TII->get(TargetOpcode::COPY), VRBase);
if (TargetRegisterInfo::isVirtualRegister(Reg))
if (Register::isVirtualRegister(Reg))
CopyMI.addReg(Reg, 0, SubIdx);
else
CopyMI.addReg(TRI->getSubReg(Reg, SubIdx));
@ -649,7 +648,7 @@ void InstrEmitter::EmitRegSequence(SDNode *Node,
RegisterSDNode *R = dyn_cast<RegisterSDNode>(Node->getOperand(i-1));
// Skip physical registers as they don't have a vreg to get and we'll
// insert copies for them in TwoAddressInstructionPass anyway.
if (!R || !TargetRegisterInfo::isPhysicalRegister(R->getReg())) {
if (!R || !Register::isPhysicalRegister(R->getReg())) {
unsigned SubIdx = cast<ConstantSDNode>(Op)->getZExtValue();
unsigned SubReg = getVR(Node->getOperand(i-1), VRBaseMap);
const TargetRegisterClass *TRC = MRI->getRegClass(SubReg);
@ -961,7 +960,7 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
for (unsigned i = 0, e = F->getNumOperands(); i != e; ++i)
if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(F->getOperand(i))) {
unsigned Reg = R->getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg))
if (Register::isPhysicalRegister(Reg))
UsedRegs.push_back(Reg);
}
}
@ -995,8 +994,7 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
case ISD::CopyToReg: {
unsigned DestReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
SDValue SrcVal = Node->getOperand(2);
if (TargetRegisterInfo::isVirtualRegister(DestReg) &&
SrcVal.isMachineOpcode() &&
if (Register::isVirtualRegister(DestReg) && SrcVal.isMachineOpcode() &&
SrcVal.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) {
// Instead building a COPY to that vreg destination, build an
// IMPLICIT_DEF instruction instead.
@ -1093,16 +1091,18 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
// FIXME: Add dead flags for physical and virtual registers defined.
// For now, mark physical register defs as implicit to help fast
// regalloc. This makes inline asm look a lot like calls.
MIB.addReg(Reg, RegState::Define |
getImplRegState(TargetRegisterInfo::isPhysicalRegister(Reg)));
MIB.addReg(Reg,
RegState::Define |
getImplRegState(Register::isPhysicalRegister(Reg)));
}
break;
case InlineAsm::Kind_RegDefEarlyClobber:
case InlineAsm::Kind_Clobber:
for (unsigned j = 0; j != NumVals; ++j, ++i) {
unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
MIB.addReg(Reg, RegState::Define | RegState::EarlyClobber |
getImplRegState(TargetRegisterInfo::isPhysicalRegister(Reg)));
MIB.addReg(Reg,
RegState::Define | RegState::EarlyClobber |
getImplRegState(Register::isPhysicalRegister(Reg)));
ECRegs.push_back(Reg);
}
break;

View File

@ -498,7 +498,7 @@ bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU,
// Check for def of register or earlyclobber register.
for (; NumVals; --NumVals, ++i) {
unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg))
if (Register::isPhysicalRegister(Reg))
CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
}
} else

View File

@ -1374,7 +1374,7 @@ DelayForLiveRegsBottomUp(SUnit *SU, SmallVectorImpl<unsigned> &LRegs) {
// Check for def of register or earlyclobber register.
for (; NumVals; --NumVals, ++i) {
unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg))
if (Register::isPhysicalRegister(Reg))
CheckForLiveRegDef(SU, Reg, LiveRegDefs.get(), RegAdded, LRegs, TRI);
}
} else
@ -2358,7 +2358,7 @@ static bool hasOnlyLiveInOpers(const SUnit *SU) {
PredSU->getNode()->getOpcode() == ISD::CopyFromReg) {
unsigned Reg =
cast<RegisterSDNode>(PredSU->getNode()->getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
RetVal = true;
continue;
}
@ -2379,7 +2379,7 @@ static bool hasOnlyLiveOutUses(const SUnit *SU) {
if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) {
unsigned Reg =
cast<RegisterSDNode>(SuccSU->getNode()->getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
RetVal = true;
continue;
}
@ -2948,8 +2948,8 @@ void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
// like other nodes from the perspective of scheduling heuristics.
if (SDNode *N = SU.getNode())
if (N->getOpcode() == ISD::CopyToReg &&
TargetRegisterInfo::isVirtualRegister
(cast<RegisterSDNode>(N->getOperand(1))->getReg()))
Register::isVirtualRegister(
cast<RegisterSDNode>(N->getOperand(1))->getReg()))
continue;
SDNode *PredFrameSetup = nullptr;
@ -2995,8 +2995,8 @@ void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
// like other nodes from the perspective of scheduling heuristics.
if (SDNode *N = SU.getNode())
if (N->getOpcode() == ISD::CopyFromReg &&
TargetRegisterInfo::isVirtualRegister
(cast<RegisterSDNode>(N->getOperand(1))->getReg()))
Register::isVirtualRegister(
cast<RegisterSDNode>(N->getOperand(1))->getReg()))
continue;
// Perform checks on the successors of PredSU.

View File

@ -115,7 +115,7 @@ static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
return;
unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg))
if (Register::isVirtualRegister(Reg))
return;
unsigned ResNo = User->getOperand(2).getResNo();
@ -656,7 +656,7 @@ void ScheduleDAGSDNodes::computeOperandLatency(SDNode *Def, SDNode *Use,
if (Latency > 1 && Use->getOpcode() == ISD::CopyToReg &&
!BB->succ_empty()) {
unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg))
if (Register::isVirtualRegister(Reg))
// This copy is a liveout value. It is likely coalesced, so reduce the
// latency so not to penalize the def.
// FIXME: need target specific adjustment here?

View File

@ -833,7 +833,7 @@ SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
// If the source register was virtual and if we know something about it,
// add an assert node.
if (!TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) ||
if (!Register::isVirtualRegister(Regs[Part + i]) ||
!RegisterVT.isInteger())
continue;
@ -948,8 +948,7 @@ void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
if (HasMatching)
Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
else if (!Regs.empty() &&
TargetRegisterInfo::isVirtualRegister(Regs.front())) {
else if (!Regs.empty() && Register::isVirtualRegister(Regs.front())) {
// Put the register class of the virtual registers in the flag word. That
// way, later passes can recompute register class constraints for inline
// assembly as well as normal instructions.
@ -9288,7 +9287,7 @@ SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
assert((Op.getOpcode() != ISD::CopyFromReg ||
cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
"Copy from a reg to the same reg!");
assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
assert(!Register::isPhysicalRegister(Reg) && "Is a physreg");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// If this is an InlineAsm we have to match the registers required, not the
@ -9782,7 +9781,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
// Update the SwiftErrorVRegDefMap.
if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) {
unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg))
if (Register::isVirtualRegister(Reg))
SwiftError->setCurrentVReg(FuncInfo->MBB, SwiftError->getFunctionArg(),
Reg);
}
@ -9794,7 +9793,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
// FIXME: This isn't very clean... it would be nice to make this more
// general.
unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
FuncInfo->ValueMap[&Arg] = Reg;
continue;
}

View File

@ -524,8 +524,7 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
To = J->second;
}
// Make sure the new register has a sufficiently constrained register class.
if (TargetRegisterInfo::isVirtualRegister(From) &&
TargetRegisterInfo::isVirtualRegister(To))
if (Register::isVirtualRegister(From) && Register::isVirtualRegister(To))
MRI.constrainRegClass(To, MRI.getRegClass(From));
// Replace it.
@ -572,7 +571,7 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
bool hasFI = MI->getOperand(0).isFI();
Register Reg =
hasFI ? TRI.getFrameRegister(*MF) : MI->getOperand(0).getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg))
if (Register::isPhysicalRegister(Reg))
EntryMBB->insert(EntryMBB->begin(), MI);
else {
MachineInstr *Def = RegInfo->getVRegDef(Reg);
@ -582,7 +581,7 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
Def->getParent()->insert(std::next(InsertPos), MI);
} else
LLVM_DEBUG(dbgs() << "Dropping debug info for dead vreg"
<< TargetRegisterInfo::virtReg2Index(Reg) << "\n");
<< Register::virtReg2Index(Reg) << "\n");
}
// If Reg is live-in then update debug info to track its copy in a vreg.
@ -671,8 +670,7 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
To = J->second;
}
// Make sure the new register has a sufficiently constrained register class.
if (TargetRegisterInfo::isVirtualRegister(From) &&
TargetRegisterInfo::isVirtualRegister(To))
if (Register::isVirtualRegister(From) && Register::isVirtualRegister(To))
MRI.constrainRegClass(To, MRI.getRegClass(From));
// Replace it.
@ -760,7 +758,7 @@ void SelectionDAGISel::ComputeLiveOutVRegInfo() {
continue;
unsigned DestReg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
if (!TargetRegisterInfo::isVirtualRegister(DestReg))
if (!Register::isVirtualRegister(DestReg))
continue;
// Ignore non-integer values.
@ -1652,9 +1650,8 @@ static bool MIIsInTerminatorSequence(const MachineInstr &MI) {
// Make sure that the copy dest is not a vreg when the copy source is a
// physical register.
if (!OPI2->isReg() ||
(!TargetRegisterInfo::isPhysicalRegister(OPI->getReg()) &&
TargetRegisterInfo::isPhysicalRegister(OPI2->getReg())))
if (!OPI2->isReg() || (!Register::isPhysicalRegister(OPI->getReg()) &&
Register::isPhysicalRegister(OPI2->getReg())))
return false;
return true;

View File

@ -281,8 +281,7 @@ bool ShrinkWrap::useOrDefCSROrFI(const MachineInstr &MI,
unsigned PhysReg = MO.getReg();
if (!PhysReg)
continue;
assert(TargetRegisterInfo::isPhysicalRegister(PhysReg) &&
"Unallocated register?!");
assert(Register::isPhysicalRegister(PhysReg) && "Unallocated register?!");
// The stack pointer is not normally described as a callee-saved register
// in calling convention definitions, so we need to watch for it
// separately. An SP mentioned by a call instruction, we can ignore,

View File

@ -148,7 +148,7 @@ StackMaps::parseOperand(MachineInstr::const_mop_iterator MOI,
if (MOI->isImplicit())
return ++MOI;
assert(TargetRegisterInfo::isPhysicalRegister(MOI->getReg()) &&
assert(Register::isPhysicalRegister(MOI->getReg()) &&
"Virtreg operands should have been rewritten before now.");
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(MOI->getReg());
assert(!MOI->getSubReg() && "Physical subreg still around.");

View File

@ -221,7 +221,7 @@ void StackSlotColoring::InitializeSlots() {
for (auto *I : Intervals) {
LiveInterval &li = I->second;
LLVM_DEBUG(li.dump());
int FI = TargetRegisterInfo::stackSlot2Index(li.reg);
int FI = Register::stackSlot2Index(li.reg);
if (MFI->isDeadObjectIndex(FI))
continue;
@ -268,7 +268,7 @@ StackSlotColoring::OverlapWithAssignments(LiveInterval *li, int Color) const {
int StackSlotColoring::ColorSlot(LiveInterval *li) {
int Color = -1;
bool Share = false;
int FI = TargetRegisterInfo::stackSlot2Index(li->reg);
int FI = Register::stackSlot2Index(li->reg);
uint8_t StackID = MFI->getStackID(FI);
if (!DisableSharing) {
@ -330,7 +330,7 @@ bool StackSlotColoring::ColorSlots(MachineFunction &MF) {
bool Changed = false;
for (unsigned i = 0, e = SSIntervals.size(); i != e; ++i) {
LiveInterval *li = SSIntervals[i];
int SS = TargetRegisterInfo::stackSlot2Index(li->reg);
int SS = Register::stackSlot2Index(li->reg);
int NewSS = ColorSlot(li);
assert(NewSS >= 0 && "Stack coloring failed?");
SlotMapping[SS] = NewSS;
@ -343,7 +343,7 @@ bool StackSlotColoring::ColorSlots(MachineFunction &MF) {
LLVM_DEBUG(dbgs() << "\nSpill slots after coloring:\n");
for (unsigned i = 0, e = SSIntervals.size(); i != e; ++i) {
LiveInterval *li = SSIntervals[i];
int SS = TargetRegisterInfo::stackSlot2Index(li->reg);
int SS = Register::stackSlot2Index(li->reg);
li->weight = SlotWeights[SS];
}
// Sort them by new weight.

View File

@ -385,7 +385,7 @@ void TailDuplicator::duplicateInstruction(
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
if (!Register::isVirtualRegister(Reg))
continue;
if (MO.isDef()) {
const TargetRegisterClass *RC = MRI->getRegClass(Reg);

View File

@ -184,10 +184,10 @@ MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI,
bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
// Avoid calling isRenamable for virtual registers since we assert that
// renamable property is only queried/set for physical registers.
bool Reg1IsRenamable = TargetRegisterInfo::isPhysicalRegister(Reg1)
bool Reg1IsRenamable = Register::isPhysicalRegister(Reg1)
? MI.getOperand(Idx1).isRenamable()
: false;
bool Reg2IsRenamable = TargetRegisterInfo::isPhysicalRegister(Reg2)
bool Reg2IsRenamable = Register::isPhysicalRegister(Reg2)
? MI.getOperand(Idx2).isRenamable()
: false;
// If destination is tied to either of the commuted source register, then
@ -229,9 +229,9 @@ MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI,
CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
// Avoid calling setIsRenamable for virtual registers since we assert that
// renamable property is only queried/set for physical registers.
if (TargetRegisterInfo::isPhysicalRegister(Reg1))
if (Register::isPhysicalRegister(Reg1))
CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable);
if (TargetRegisterInfo::isPhysicalRegister(Reg2))
if (Register::isPhysicalRegister(Reg2))
CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable);
return CommutedMI;
}
@ -446,13 +446,12 @@ static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI,
unsigned FoldReg = FoldOp.getReg();
unsigned LiveReg = LiveOp.getReg();
assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
"Cannot fold physregs");
assert(Register::isVirtualRegister(FoldReg) && "Cannot fold physregs");
const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
if (Register::isPhysicalRegister(LiveOp.getReg()))
return RC->contains(LiveOp.getReg()) ? RC : nullptr;
if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
@ -675,9 +674,9 @@ bool TargetInstrInfo::hasReassociableOperands(
// reassociate.
MachineInstr *MI1 = nullptr;
MachineInstr *MI2 = nullptr;
if (Op1.isReg() && TargetRegisterInfo::isVirtualRegister(Op1.getReg()))
if (Op1.isReg() && Register::isVirtualRegister(Op1.getReg()))
MI1 = MRI.getUniqueVRegDef(Op1.getReg());
if (Op2.isReg() && TargetRegisterInfo::isVirtualRegister(Op2.getReg()))
if (Op2.isReg() && Register::isVirtualRegister(Op2.getReg()))
MI2 = MRI.getUniqueVRegDef(Op2.getReg());
// And they need to be in the trace (otherwise, they won't have a depth).
@ -812,15 +811,15 @@ void TargetInstrInfo::reassociateOps(
unsigned RegY = OpY.getReg();
unsigned RegC = OpC.getReg();
if (TargetRegisterInfo::isVirtualRegister(RegA))
if (Register::isVirtualRegister(RegA))
MRI.constrainRegClass(RegA, RC);
if (TargetRegisterInfo::isVirtualRegister(RegB))
if (Register::isVirtualRegister(RegB))
MRI.constrainRegClass(RegB, RC);
if (TargetRegisterInfo::isVirtualRegister(RegX))
if (Register::isVirtualRegister(RegX))
MRI.constrainRegClass(RegX, RC);
if (TargetRegisterInfo::isVirtualRegister(RegY))
if (Register::isVirtualRegister(RegY))
MRI.constrainRegClass(RegY, RC);
if (TargetRegisterInfo::isVirtualRegister(RegC))
if (Register::isVirtualRegister(RegC))
MRI.constrainRegClass(RegC, RC);
// Create a new virtual register for the result of (X op Y) instead of
@ -894,8 +893,8 @@ bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
// doesn't read the other parts of the register. Otherwise it is really a
// read-modify-write operation on the full virtual register which cannot be
// moved safely.
if (TargetRegisterInfo::isVirtualRegister(DefReg) &&
MI.getOperand(0).getSubReg() && MI.readsVirtualRegister(DefReg))
if (Register::isVirtualRegister(DefReg) && MI.getOperand(0).getSubReg() &&
MI.readsVirtualRegister(DefReg))
return false;
// A load from a fixed stack slot can be rematerialized. This may be
@ -930,7 +929,7 @@ bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
continue;
// Check for a well-behaved physical register.
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (Register::isPhysicalRegister(Reg)) {
if (MO.isUse()) {
// If the physreg has no defs anywhere, it's just an ambient register
// and we can freely move its uses. Alternatively, if it's allocatable,

View File

@ -91,17 +91,16 @@ Printable printReg(unsigned Reg, const TargetRegisterInfo *TRI,
return Printable([Reg, TRI, SubIdx, MRI](raw_ostream &OS) {
if (!Reg)
OS << "$noreg";
else if (TargetRegisterInfo::isStackSlot(Reg))
OS << "SS#" << TargetRegisterInfo::stackSlot2Index(Reg);
else if (TargetRegisterInfo::isVirtualRegister(Reg)) {
else if (Register::isStackSlot(Reg))
OS << "SS#" << Register::stackSlot2Index(Reg);
else if (Register::isVirtualRegister(Reg)) {
StringRef Name = MRI ? MRI->getVRegName(Reg) : "";
if (Name != "") {
OS << '%' << Name;
} else {
OS << '%' << TargetRegisterInfo::virtReg2Index(Reg);
OS << '%' << Register::virtReg2Index(Reg);
}
}
else if (!TRI)
} else if (!TRI)
OS << '$' << "physreg" << Reg;
else if (Reg < TRI->getNumRegs()) {
OS << '$';
@ -143,8 +142,8 @@ Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI) {
Printable printVRegOrUnit(unsigned Unit, const TargetRegisterInfo *TRI) {
return Printable([Unit, TRI](raw_ostream &OS) {
if (TRI && TRI->isVirtualRegister(Unit)) {
OS << '%' << TargetRegisterInfo::virtReg2Index(Unit);
if (Register::isVirtualRegister(Unit)) {
OS << '%' << Register::virtReg2Index(Unit);
} else {
OS << printRegUnit(Unit, TRI);
}
@ -189,7 +188,8 @@ TargetRegisterInfo::getAllocatableClass(const TargetRegisterClass *RC) const {
/// the right type that contains this physreg.
const TargetRegisterClass *
TargetRegisterInfo::getMinimalPhysRegClass(unsigned reg, MVT VT) const {
assert(isPhysicalRegister(reg) && "reg must be a physical register");
assert(Register::isPhysicalRegister(reg) &&
"reg must be a physical register");
// Pick the most sub register class of the right type that contains
// this physreg.
@ -409,7 +409,7 @@ TargetRegisterInfo::getRegAllocationHints(unsigned VirtReg,
// Target-independent hints are either a physical or a virtual register.
unsigned Phys = Reg;
if (VRM && isVirtualRegister(Phys))
if (VRM && Register::isVirtualRegister(Phys))
Phys = VRM->getPhys(Phys);
// Don't add the same reg twice (Hints_MRI may contain multiple virtual
@ -417,7 +417,7 @@ TargetRegisterInfo::getRegAllocationHints(unsigned VirtReg,
if (!HintedRegs.insert(Phys).second)
continue;
// Check that Phys is a valid hint in VirtReg's register class.
if (!isPhysicalRegister(Phys))
if (!Register::isPhysicalRegister(Phys))
continue;
if (MRI.isReserved(Phys))
continue;
@ -440,7 +440,8 @@ bool TargetRegisterInfo::isCalleeSavedPhysReg(
const uint32_t *callerPreservedRegs =
getCallPreservedMask(MF, MF.getFunction().getCallingConv());
if (callerPreservedRegs) {
assert(isPhysicalRegister(PhysReg) && "Expected physical register");
assert(Register::isPhysicalRegister(PhysReg) &&
"Expected physical register");
return (callerPreservedRegs[PhysReg / 32] >> PhysReg % 32) & 1;
}
return false;
@ -479,7 +480,7 @@ bool TargetRegisterInfo::regmaskSubsetEqual(const uint32_t *mask0,
unsigned TargetRegisterInfo::getRegSizeInBits(unsigned Reg,
const MachineRegisterInfo &MRI) const {
const TargetRegisterClass *RC{};
if (isPhysicalRegister(Reg)) {
if (Register::isPhysicalRegister(Reg)) {
// The size is not directly available for physical registers.
// Instead, we need to access a register class that contains Reg and
// get the size of that register class.
@ -514,7 +515,7 @@ TargetRegisterInfo::lookThruCopyLike(unsigned SrcReg,
CopySrcReg = MI->getOperand(2).getReg();
}
if (!isVirtualRegister(CopySrcReg))
if (!Register::isVirtualRegister(CopySrcReg))
return CopySrcReg;
SrcReg = CopySrcReg;

View File

@ -418,8 +418,8 @@ static bool isCopyToReg(MachineInstr &MI, const TargetInstrInfo *TII,
} else
return false;
IsSrcPhys = TargetRegisterInfo::isPhysicalRegister(SrcReg);
IsDstPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
IsSrcPhys = Register::isPhysicalRegister(SrcReg);
IsDstPhys = Register::isPhysicalRegister(DstReg);
return true;
}
@ -427,8 +427,7 @@ static bool isCopyToReg(MachineInstr &MI, const TargetInstrInfo *TII,
/// given instruction, is killed by the given instruction.
static bool isPlainlyKilled(MachineInstr *MI, unsigned Reg,
LiveIntervals *LIS) {
if (LIS && TargetRegisterInfo::isVirtualRegister(Reg) &&
!LIS->isNotInMIMap(*MI)) {
if (LIS && Register::isVirtualRegister(Reg) && !LIS->isNotInMIMap(*MI)) {
// FIXME: Sometimes tryInstructionTransform() will add instructions and
// test whether they can be folded before keeping them. In this case it
// sets a kill before recursively calling tryInstructionTransform() again.
@ -475,12 +474,12 @@ static bool isKilled(MachineInstr &MI, unsigned Reg,
MachineInstr *DefMI = &MI;
while (true) {
// All uses of physical registers are likely to be kills.
if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
if (Register::isPhysicalRegister(Reg) &&
(allowFalsePositives || MRI->hasOneUse(Reg)))
return true;
if (!isPlainlyKilled(DefMI, Reg, LIS))
return false;
if (TargetRegisterInfo::isPhysicalRegister(Reg))
if (Register::isPhysicalRegister(Reg))
return true;
MachineRegisterInfo::def_iterator Begin = MRI->def_begin(Reg);
// If there are multiple defs, we can't do a simple analysis, so just
@ -536,7 +535,7 @@ MachineInstr *findOnlyInterestingUse(unsigned Reg, MachineBasicBlock *MBB,
}
IsDstPhys = false;
if (isTwoAddrUse(UseMI, Reg, DstReg)) {
IsDstPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
IsDstPhys = Register::isPhysicalRegister(DstReg);
return &UseMI;
}
return nullptr;
@ -546,13 +545,13 @@ MachineInstr *findOnlyInterestingUse(unsigned Reg, MachineBasicBlock *MBB,
/// to.
static unsigned
getMappedReg(unsigned Reg, DenseMap<unsigned, unsigned> &RegMap) {
while (TargetRegisterInfo::isVirtualRegister(Reg)) {
while (Register::isVirtualRegister(Reg)) {
DenseMap<unsigned, unsigned>::iterator SI = RegMap.find(Reg);
if (SI == RegMap.end())
return 0;
Reg = SI->second;
}
if (TargetRegisterInfo::isPhysicalRegister(Reg))
if (Register::isPhysicalRegister(Reg))
return Reg;
return 0;
}
@ -1105,7 +1104,7 @@ rescheduleKillAboveMI(MachineBasicBlock::iterator &mi,
Uses.insert(MOReg);
if (isKill && MOReg != Reg)
Kills.insert(MOReg);
} else if (TargetRegisterInfo::isPhysicalRegister(MOReg)) {
} else if (Register::isPhysicalRegister(MOReg)) {
Defs.insert(MOReg);
if (!MO.isDead())
LiveDefs.insert(MOReg);
@ -1154,8 +1153,7 @@ rescheduleKillAboveMI(MachineBasicBlock::iterator &mi,
unsigned MOReg = OtherDefs[i];
if (Uses.count(MOReg))
return false;
if (TargetRegisterInfo::isPhysicalRegister(MOReg) &&
LiveDefs.count(MOReg))
if (Register::isPhysicalRegister(MOReg) && LiveDefs.count(MOReg))
return false;
// Physical register def is seen.
Defs.erase(MOReg);
@ -1279,11 +1277,11 @@ tryInstructionTransform(MachineBasicBlock::iterator &mi,
unsigned regA = MI.getOperand(DstIdx).getReg();
unsigned regB = MI.getOperand(SrcIdx).getReg();
assert(TargetRegisterInfo::isVirtualRegister(regB) &&
assert(Register::isVirtualRegister(regB) &&
"cannot make instruction into two-address form");
bool regBKilled = isKilled(MI, regB, MRI, TII, LIS, true);
if (TargetRegisterInfo::isVirtualRegister(regA))
if (Register::isVirtualRegister(regA))
scanUses(regA);
bool Commuted = tryInstructionCommute(&MI, DstIdx, SrcIdx, regBKilled, Dist);
@ -1399,8 +1397,7 @@ tryInstructionTransform(MachineBasicBlock::iterator &mi,
if (LV) {
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (MO.isReg() &&
TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
if (MO.isReg() && Register::isVirtualRegister(MO.getReg())) {
if (MO.isUse()) {
if (MO.isKill()) {
if (NewMIs[0]->killsRegister(MO.getReg()))
@ -1485,7 +1482,7 @@ collectTiedOperands(MachineInstr *MI, TiedOperandMap &TiedOperands) {
// Deal with undef uses immediately - simply rewrite the src operand.
if (SrcMO.isUndef() && !DstMO.getSubReg()) {
// Constrain the DstReg register class if required.
if (TargetRegisterInfo::isVirtualRegister(DstReg))
if (Register::isVirtualRegister(DstReg))
if (const TargetRegisterClass *RC = TII->getRegClass(MCID, SrcIdx,
TRI, *MF))
MRI->constrainRegClass(DstReg, RC);
@ -1538,7 +1535,7 @@ TwoAddressInstructionPass::processTiedPairs(MachineInstr *MI,
}
LastCopiedReg = RegA;
assert(TargetRegisterInfo::isVirtualRegister(RegB) &&
assert(Register::isVirtualRegister(RegB) &&
"cannot make instruction into two-address form");
#ifndef NDEBUG
@ -1559,14 +1556,13 @@ TwoAddressInstructionPass::processTiedPairs(MachineInstr *MI,
MIB.addReg(RegB, 0, SubRegB);
const TargetRegisterClass *RC = MRI->getRegClass(RegB);
if (SubRegB) {
if (TargetRegisterInfo::isVirtualRegister(RegA)) {
if (Register::isVirtualRegister(RegA)) {
assert(TRI->getMatchingSuperRegClass(RC, MRI->getRegClass(RegA),
SubRegB) &&
"tied subregister must be a truncation");
// The superreg class will not be used to constrain the subreg class.
RC = nullptr;
}
else {
} else {
assert(TRI->getMatchingSuperReg(RegA, SubRegB, MRI->getRegClass(RegB))
&& "tied subregister must be a truncation");
}
@ -1581,7 +1577,7 @@ TwoAddressInstructionPass::processTiedPairs(MachineInstr *MI,
if (LIS) {
LastCopyIdx = LIS->InsertMachineInstrInMaps(*PrevMI).getRegSlot();
if (TargetRegisterInfo::isVirtualRegister(RegA)) {
if (Register::isVirtualRegister(RegA)) {
LiveInterval &LI = LIS->getInterval(RegA);
VNInfo *VNI = LI.getNextValue(LastCopyIdx, LIS->getVNInfoAllocator());
SlotIndex endIdx =
@ -1601,8 +1597,7 @@ TwoAddressInstructionPass::processTiedPairs(MachineInstr *MI,
}
// Make sure regA is a legal regclass for the SrcIdx operand.
if (TargetRegisterInfo::isVirtualRegister(RegA) &&
TargetRegisterInfo::isVirtualRegister(RegB))
if (Register::isVirtualRegister(RegA) && Register::isVirtualRegister(RegB))
MRI->constrainRegClass(RegA, RC);
MO.setReg(RegA);
// The getMatchingSuper asserts guarantee that the register class projected
@ -1804,8 +1799,7 @@ void TwoAddressInstructionPass::
eliminateRegSequence(MachineBasicBlock::iterator &MBBI) {
MachineInstr &MI = *MBBI;
unsigned DstReg = MI.getOperand(0).getReg();
if (MI.getOperand(0).getSubReg() ||
TargetRegisterInfo::isPhysicalRegister(DstReg) ||
if (MI.getOperand(0).getSubReg() || Register::isPhysicalRegister(DstReg) ||
!(MI.getNumOperands() & 1)) {
LLVM_DEBUG(dbgs() << "Illegal REG_SEQUENCE instruction:" << MI);
llvm_unreachable(nullptr);
@ -1855,7 +1849,7 @@ eliminateRegSequence(MachineBasicBlock::iterator &MBBI) {
DefEmitted = true;
// Update LiveVariables' kill info.
if (LV && isKill && !TargetRegisterInfo::isPhysicalRegister(SrcReg))
if (LV && isKill && !Register::isPhysicalRegister(SrcReg))
LV->replaceKillInstruction(SrcReg, MI, *CopyMI);
LLVM_DEBUG(dbgs() << "Inserted: " << *CopyMI);

View File

@ -81,8 +81,8 @@ void VirtRegMap::grow() {
}
void VirtRegMap::assignVirt2Phys(unsigned virtReg, MCPhysReg physReg) {
assert(TargetRegisterInfo::isVirtualRegister(virtReg) &&
TargetRegisterInfo::isPhysicalRegister(physReg));
assert(Register::isVirtualRegister(virtReg) &&
Register::isPhysicalRegister(physReg));
assert(Virt2PhysMap[virtReg] == NO_PHYS_REG &&
"attempt to assign physical register to already mapped "
"virtual register");
@ -103,22 +103,22 @@ bool VirtRegMap::hasPreferredPhys(unsigned VirtReg) {
unsigned Hint = MRI->getSimpleHint(VirtReg);
if (!Hint)
return false;
if (TargetRegisterInfo::isVirtualRegister(Hint))
if (Register::isVirtualRegister(Hint))
Hint = getPhys(Hint);
return getPhys(VirtReg) == Hint;
}
bool VirtRegMap::hasKnownPreference(unsigned VirtReg) {
std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(VirtReg);
if (TargetRegisterInfo::isPhysicalRegister(Hint.second))
if (Register::isPhysicalRegister(Hint.second))
return true;
if (TargetRegisterInfo::isVirtualRegister(Hint.second))
if (Register::isVirtualRegister(Hint.second))
return hasPhys(Hint.second);
return false;
}
int VirtRegMap::assignVirt2StackSlot(unsigned virtReg) {
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
assert(Register::isVirtualRegister(virtReg));
assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT &&
"attempt to assign stack slot to already spilled register");
const TargetRegisterClass* RC = MF->getRegInfo().getRegClass(virtReg);
@ -126,7 +126,7 @@ int VirtRegMap::assignVirt2StackSlot(unsigned virtReg) {
}
void VirtRegMap::assignVirt2StackSlot(unsigned virtReg, int SS) {
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
assert(Register::isVirtualRegister(virtReg));
assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT &&
"attempt to assign stack slot to already spilled register");
assert((SS >= 0 ||
@ -138,7 +138,7 @@ void VirtRegMap::assignVirt2StackSlot(unsigned virtReg, int SS) {
void VirtRegMap::print(raw_ostream &OS, const Module*) const {
OS << "********** REGISTER MAP **********\n";
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
unsigned Reg = Register::index2VirtReg(i);
if (Virt2PhysMap[Reg] != (unsigned)VirtRegMap::NO_PHYS_REG) {
OS << '[' << printReg(Reg, TRI) << " -> "
<< printReg(Virt2PhysMap[Reg], TRI) << "] "
@ -147,7 +147,7 @@ void VirtRegMap::print(raw_ostream &OS, const Module*) const {
}
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
unsigned Reg = Register::index2VirtReg(i);
if (Virt2StackSlotMap[Reg] != VirtRegMap::NO_STACK_SLOT) {
OS << '[' << printReg(Reg, TRI) << " -> fi#" << Virt2StackSlotMap[Reg]
<< "] " << TRI->getRegClassName(MRI->getRegClass(Reg)) << "\n";
@ -312,7 +312,7 @@ void VirtRegRewriter::addLiveInsForSubRanges(const LiveInterval &LI,
// assignments.
void VirtRegRewriter::addMBBLiveIns() {
for (unsigned Idx = 0, IdxE = MRI->getNumVirtRegs(); Idx != IdxE; ++Idx) {
unsigned VirtReg = TargetRegisterInfo::index2VirtReg(Idx);
unsigned VirtReg = Register::index2VirtReg(Idx);
if (MRI->reg_nodbg_empty(VirtReg))
continue;
LiveInterval &LI = LIS->getInterval(VirtReg);
@ -513,7 +513,7 @@ void VirtRegRewriter::rewrite() {
if (MO.isRegMask())
MRI->addPhysRegsUsedFromRegMask(MO.getRegMask());
if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
if (!MO.isReg() || !Register::isVirtualRegister(MO.getReg()))
continue;
unsigned VirtReg = MO.getReg();
unsigned PhysReg = VRM->getPhys(VirtReg);

View File

@ -105,14 +105,14 @@ static bool isGPR64(unsigned Reg, unsigned SubReg,
const MachineRegisterInfo *MRI) {
if (SubReg)
return false;
if (TargetRegisterInfo::isVirtualRegister(Reg))
if (Register::isVirtualRegister(Reg))
return MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::GPR64RegClass);
return AArch64::GPR64RegClass.contains(Reg);
}
static bool isFPR64(unsigned Reg, unsigned SubReg,
const MachineRegisterInfo *MRI) {
if (TargetRegisterInfo::isVirtualRegister(Reg))
if (Register::isVirtualRegister(Reg))
return (MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::FPR64RegClass) &&
SubReg == 0) ||
(MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::FPR128RegClass) &&

View File

@ -486,7 +486,7 @@ void AArch64AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNum,
llvm_unreachable("<unknown operand type>");
case MachineOperand::MO_Register: {
unsigned Reg = MO.getReg();
assert(TargetRegisterInfo::isPhysicalRegister(Reg));
assert(Register::isPhysicalRegister(Reg));
assert(!MO.getSubReg() && "Subregs should be eliminated!");
O << AArch64InstPrinter::getRegisterName(Reg);
break;

View File

@ -78,7 +78,7 @@ void AArch64CondBrTuning::getAnalysisUsage(AnalysisUsage &AU) const {
}
MachineInstr *AArch64CondBrTuning::getOperandDef(const MachineOperand &MO) {
if (!TargetRegisterInfo::isVirtualRegister(MO.getReg()))
if (!Register::isVirtualRegister(MO.getReg()))
return nullptr;
return MRI->getUniqueVRegDef(MO.getReg());
}

View File

@ -259,7 +259,7 @@ bool SSACCmpConv::isDeadDef(unsigned DstReg) {
// Writes to the zero register are dead.
if (DstReg == AArch64::WZR || DstReg == AArch64::XZR)
return true;
if (!TargetRegisterInfo::isVirtualRegister(DstReg))
if (!Register::isVirtualRegister(DstReg))
return false;
// A virtual register def without any uses will be marked dead later, and
// eventually replaced by the zero register.

View File

@ -146,7 +146,7 @@ void AArch64DeadRegisterDefinitions::processMachineBasicBlock(
// We should not have any relevant physreg defs that are replacable by
// zero before register allocation. So we just check for dead vreg defs.
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg) ||
if (!Register::isVirtualRegister(Reg) ||
(!MO.isDead() && !MRI->use_nodbg_empty(Reg)))
continue;
assert(!MO.isImplicit() && "Unexpected implicit def!");

View File

@ -416,7 +416,7 @@ unsigned AArch64InstrInfo::insertBranch(
// Find the original register that VReg is copied from.
static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
while (TargetRegisterInfo::isVirtualRegister(VReg)) {
while (Register::isVirtualRegister(VReg)) {
const MachineInstr *DefMI = MRI.getVRegDef(VReg);
if (!DefMI->isFullCopy())
return VReg;
@ -431,7 +431,7 @@ static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
unsigned *NewVReg = nullptr) {
VReg = removeCopies(MRI, VReg);
if (!TargetRegisterInfo::isVirtualRegister(VReg))
if (!Register::isVirtualRegister(VReg))
return 0;
bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
@ -1072,7 +1072,7 @@ static bool UpdateOperandRegClass(MachineInstr &Instr) {
"Operand has register constraints without being a register!");
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (Register::isPhysicalRegister(Reg)) {
if (!OpRegCstraints->contains(Reg))
return false;
} else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
@ -2350,7 +2350,7 @@ static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
if (!SubIdx)
return MIB.addReg(Reg, State);
if (TargetRegisterInfo::isPhysicalRegister(Reg))
if (Register::isPhysicalRegister(Reg))
return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
return MIB.addReg(Reg, State, SubIdx);
}
@ -2722,7 +2722,7 @@ static void storeRegPairToStackSlot(const TargetRegisterInfo &TRI,
MachineMemOperand *MMO) {
unsigned SrcReg0 = SrcReg;
unsigned SrcReg1 = SrcReg;
if (TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
if (Register::isPhysicalRegister(SrcReg)) {
SrcReg0 = TRI.getSubReg(SrcReg, SubIdx0);
SubIdx0 = 0;
SrcReg1 = TRI.getSubReg(SrcReg, SubIdx1);
@ -2761,7 +2761,7 @@ void AArch64InstrInfo::storeRegToStackSlot(
case 4:
if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
Opc = AArch64::STRWui;
if (TargetRegisterInfo::isVirtualRegister(SrcReg))
if (Register::isVirtualRegister(SrcReg))
MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
else
assert(SrcReg != AArch64::WSP);
@ -2771,7 +2771,7 @@ void AArch64InstrInfo::storeRegToStackSlot(
case 8:
if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
Opc = AArch64::STRXui;
if (TargetRegisterInfo::isVirtualRegister(SrcReg))
if (Register::isVirtualRegister(SrcReg))
MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
else
assert(SrcReg != AArch64::SP);
@ -2852,7 +2852,7 @@ static void loadRegPairFromStackSlot(const TargetRegisterInfo &TRI,
unsigned DestReg0 = DestReg;
unsigned DestReg1 = DestReg;
bool IsUndef = true;
if (TargetRegisterInfo::isPhysicalRegister(DestReg)) {
if (Register::isPhysicalRegister(DestReg)) {
DestReg0 = TRI.getSubReg(DestReg, SubIdx0);
SubIdx0 = 0;
DestReg1 = TRI.getSubReg(DestReg, SubIdx1);
@ -2892,7 +2892,7 @@ void AArch64InstrInfo::loadRegFromStackSlot(
case 4:
if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
Opc = AArch64::LDRWui;
if (TargetRegisterInfo::isVirtualRegister(DestReg))
if (Register::isVirtualRegister(DestReg))
MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass);
else
assert(DestReg != AArch64::WSP);
@ -2902,7 +2902,7 @@ void AArch64InstrInfo::loadRegFromStackSlot(
case 8:
if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
Opc = AArch64::LDRXui;
if (TargetRegisterInfo::isVirtualRegister(DestReg))
if (Register::isVirtualRegister(DestReg))
MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass);
else
assert(DestReg != AArch64::SP);
@ -3081,13 +3081,11 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
if (MI.isFullCopy()) {
unsigned DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = MI.getOperand(1).getReg();
if (SrcReg == AArch64::SP &&
TargetRegisterInfo::isVirtualRegister(DstReg)) {
if (SrcReg == AArch64::SP && Register::isVirtualRegister(DstReg)) {
MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
return nullptr;
}
if (DstReg == AArch64::SP &&
TargetRegisterInfo::isVirtualRegister(SrcReg)) {
if (DstReg == AArch64::SP && Register::isVirtualRegister(SrcReg)) {
MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
return nullptr;
}
@ -3132,9 +3130,8 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
// This is slightly expensive to compute for physical regs since
// getMinimalPhysRegClass is slow.
auto getRegClass = [&](unsigned Reg) {
return TargetRegisterInfo::isVirtualRegister(Reg)
? MRI.getRegClass(Reg)
: TRI.getMinimalPhysRegClass(Reg);
return Register::isVirtualRegister(Reg) ? MRI.getRegClass(Reg)
: TRI.getMinimalPhysRegClass(Reg);
};
if (DstMO.getSubReg() == 0 && SrcMO.getSubReg() == 0) {
@ -3159,8 +3156,7 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
//
// STRXui %xzr, %stack.0
//
if (IsSpill && DstMO.isUndef() &&
TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
if (IsSpill && DstMO.isUndef() && Register::isPhysicalRegister(SrcReg)) {
assert(SrcMO.getSubReg() == 0 &&
"Unexpected subreg on physical register");
const TargetRegisterClass *SpillRC;
@ -3459,7 +3455,7 @@ static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO,
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
MachineInstr *MI = nullptr;
if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
if (MO.isReg() && Register::isVirtualRegister(MO.getReg()))
MI = MRI.getUniqueVRegDef(MO.getReg());
// And it needs to be in the trace (otherwise, it won't have a depth).
if (!MI || MI->getParent() != &MBB || (unsigned)MI->getOpcode() != CombineOpc)
@ -3955,13 +3951,13 @@ genFusedMultiply(MachineFunction &MF, MachineRegisterInfo &MRI,
Src2IsKill = Root.getOperand(IdxOtherOpd).isKill();
}
if (TargetRegisterInfo::isVirtualRegister(ResultReg))
if (Register::isVirtualRegister(ResultReg))
MRI.constrainRegClass(ResultReg, RC);
if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
if (Register::isVirtualRegister(SrcReg0))
MRI.constrainRegClass(SrcReg0, RC);
if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
if (Register::isVirtualRegister(SrcReg1))
MRI.constrainRegClass(SrcReg1, RC);
if (TargetRegisterInfo::isVirtualRegister(SrcReg2))
if (Register::isVirtualRegister(SrcReg2))
MRI.constrainRegClass(SrcReg2, RC);
MachineInstrBuilder MIB;
@ -4021,13 +4017,13 @@ static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
unsigned SrcReg1 = MUL->getOperand(2).getReg();
bool Src1IsKill = MUL->getOperand(2).isKill();
if (TargetRegisterInfo::isVirtualRegister(ResultReg))
if (Register::isVirtualRegister(ResultReg))
MRI.constrainRegClass(ResultReg, RC);
if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
if (Register::isVirtualRegister(SrcReg0))
MRI.constrainRegClass(SrcReg0, RC);
if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
if (Register::isVirtualRegister(SrcReg1))
MRI.constrainRegClass(SrcReg1, RC);
if (TargetRegisterInfo::isVirtualRegister(VR))
if (Register::isVirtualRegister(VR))
MRI.constrainRegClass(VR, RC);
MachineInstrBuilder MIB =
@ -4618,7 +4614,7 @@ bool AArch64InstrInfo::optimizeCondBranch(MachineInstr &MI) const {
MachineFunction *MF = MBB->getParent();
MachineRegisterInfo *MRI = &MF->getRegInfo();
unsigned VReg = MI.getOperand(0).getReg();
if (!TargetRegisterInfo::isVirtualRegister(VReg))
if (!Register::isVirtualRegister(VReg))
return false;
MachineInstr *DefMI = MRI->getVRegDef(VReg);
@ -4654,7 +4650,7 @@ bool AArch64InstrInfo::optimizeCondBranch(MachineInstr &MI) const {
MachineOperand &MO = DefMI->getOperand(1);
unsigned NewReg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(NewReg))
if (!Register::isVirtualRegister(NewReg))
return false;
assert(!MRI->def_empty(NewReg) && "Register must be defined.");

View File

@ -373,7 +373,7 @@ static bool unsupportedBinOp(const MachineInstr &I,
// so, this will need to be taught about that, and we'll need to get the
// bank out of the minimal class for the register.
// Either way, this needs to be documented (and possibly verified).
if (!TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
if (!Register::isVirtualRegister(MO.getReg())) {
LLVM_DEBUG(dbgs() << "Generic inst has physical register operand\n");
return true;
}
@ -518,7 +518,7 @@ static bool isValidCopy(const MachineInstr &I, const RegisterBank &DstBank,
(DstSize == SrcSize ||
// Copies are a mean to setup initial types, the number of
// bits may not exactly match.
(TargetRegisterInfo::isPhysicalRegister(SrcReg) && DstSize <= SrcSize) ||
(Register::isPhysicalRegister(SrcReg) && DstSize <= SrcSize) ||
// Copies are a mean to copy bits around, as long as we are
// on the same register class, that's fine. Otherwise, that
// means we need some SUBREG_TO_REG or AND & co.
@ -555,7 +555,7 @@ static bool selectSubregisterCopy(MachineInstr &I, MachineRegisterInfo &MRI,
// It's possible that the destination register won't be constrained. Make
// sure that happens.
if (!TargetRegisterInfo::isPhysicalRegister(I.getOperand(0).getReg()))
if (!Register::isPhysicalRegister(I.getOperand(0).getReg()))
RBI.constrainGenericRegister(I.getOperand(0).getReg(), *To, MRI);
return true;
@ -623,11 +623,10 @@ static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII,
// result.
auto CheckCopy = [&]() {
// If we have a bitcast or something, we can't have physical registers.
assert(
(I.isCopy() ||
(!TargetRegisterInfo::isPhysicalRegister(I.getOperand(0).getReg()) &&
!TargetRegisterInfo::isPhysicalRegister(I.getOperand(1).getReg()))) &&
"No phys reg on generic operator!");
assert((I.isCopy() ||
(!Register::isPhysicalRegister(I.getOperand(0).getReg()) &&
!Register::isPhysicalRegister(I.getOperand(1).getReg()))) &&
"No phys reg on generic operator!");
assert(KnownValid || isValidCopy(I, DstRegBank, MRI, TRI, RBI));
(void)KnownValid;
return true;
@ -690,7 +689,7 @@ static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII,
// If the destination is a physical register, then there's nothing to
// change, so we're done.
if (TargetRegisterInfo::isPhysicalRegister(DstReg))
if (Register::isPhysicalRegister(DstReg))
return CheckCopy();
}
@ -3355,7 +3354,7 @@ bool AArch64InstructionSelector::tryOptSelect(MachineInstr &I) const {
// Can't see past copies from physregs.
if (Opc == TargetOpcode::COPY &&
TargetRegisterInfo::isPhysicalRegister(CondDef->getOperand(1).getReg()))
Register::isPhysicalRegister(CondDef->getOperand(1).getReg()))
return false;
CondDef = MRI.getVRegDef(CondDef->getOperand(1).getReg());

View File

@ -162,11 +162,11 @@ bool A57ChainingConstraint::addIntraChainConstraint(PBQPRAGraph &G, unsigned Rd,
LiveIntervals &LIs = G.getMetadata().LIS;
if (TRI->isPhysicalRegister(Rd) || TRI->isPhysicalRegister(Ra)) {
LLVM_DEBUG(dbgs() << "Rd is a physical reg:" << TRI->isPhysicalRegister(Rd)
<< '\n');
LLVM_DEBUG(dbgs() << "Ra is a physical reg:" << TRI->isPhysicalRegister(Ra)
<< '\n');
if (Register::isPhysicalRegister(Rd) || Register::isPhysicalRegister(Ra)) {
LLVM_DEBUG(dbgs() << "Rd is a physical reg:"
<< Register::isPhysicalRegister(Rd) << '\n');
LLVM_DEBUG(dbgs() << "Ra is a physical reg:"
<< Register::isPhysicalRegister(Ra) << '\n');
return false;
}

View File

@ -566,9 +566,9 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
unsigned DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = MI.getOperand(1).getReg();
// Check if one of the register is not a generic register.
if ((TargetRegisterInfo::isPhysicalRegister(DstReg) ||
if ((Register::isPhysicalRegister(DstReg) ||
!MRI.getType(DstReg).isValid()) ||
(TargetRegisterInfo::isPhysicalRegister(SrcReg) ||
(Register::isPhysicalRegister(SrcReg) ||
!MRI.getType(SrcReg).isValid())) {
const RegisterBank *DstRB = getRegBank(DstReg, MRI, TRI);
const RegisterBank *SrcRB = getRegBank(SrcReg, MRI, TRI);

View File

@ -544,7 +544,7 @@ const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
if (!N->isMachineOpcode()) {
if (N->getOpcode() == ISD::CopyToReg) {
unsigned Reg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
MachineRegisterInfo &MRI = CurDAG->getMachineFunction().getRegInfo();
return MRI.getRegClass(Reg);
}

View File

@ -62,7 +62,7 @@ AMDGPUInstructionSelector::AMDGPUInstructionSelector(
const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
static bool isSCC(Register Reg, const MachineRegisterInfo &MRI) {
if (TargetRegisterInfo::isPhysicalRegister(Reg))
if (Register::isPhysicalRegister(Reg))
return Reg == AMDGPU::SCC;
auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
@ -83,7 +83,7 @@ static bool isSCC(Register Reg, const MachineRegisterInfo &MRI) {
bool AMDGPUInstructionSelector::isVCC(Register Reg,
const MachineRegisterInfo &MRI) const {
if (TargetRegisterInfo::isPhysicalRegister(Reg))
if (Register::isPhysicalRegister(Reg))
return Reg == TRI.getVCC();
auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
@ -157,7 +157,7 @@ bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
}
for (const MachineOperand &MO : I.operands()) {
if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
if (Register::isPhysicalRegister(MO.getReg()))
continue;
const TargetRegisterClass *RC =
@ -550,7 +550,7 @@ bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
for (const MachineOperand &MO : Ins->operands()) {
if (!MO.isReg())
continue;
if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
if (Register::isPhysicalRegister(MO.getReg()))
continue;
const TargetRegisterClass *RC =

View File

@ -694,7 +694,7 @@ void LinearizedRegion::storeLiveOutReg(MachineBasicBlock *MBB, unsigned Reg,
const MachineRegisterInfo *MRI,
const TargetRegisterInfo *TRI,
PHILinearize &PHIInfo) {
if (TRI->isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
LLVM_DEBUG(dbgs() << "Considering Register: " << printReg(Reg, TRI)
<< "\n");
// If this is a source register to a PHI we are chaining, it
@ -734,7 +734,7 @@ void LinearizedRegion::storeLiveOutRegRegion(RegionMRT *Region, unsigned Reg,
const MachineRegisterInfo *MRI,
const TargetRegisterInfo *TRI,
PHILinearize &PHIInfo) {
if (TRI->isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
LLVM_DEBUG(dbgs() << "Considering Register: " << printReg(Reg, TRI)
<< "\n");
for (auto &UI : MRI->use_operands(Reg)) {
@ -949,7 +949,7 @@ void LinearizedRegion::replaceRegister(unsigned Register, unsigned NewRegister,
(IncludeLoopPHI && IsLoopPHI);
if (ShouldReplace) {
if (TargetRegisterInfo::isPhysicalRegister(NewRegister)) {
if (Register::isPhysicalRegister(NewRegister)) {
LLVM_DEBUG(dbgs() << "Trying to substitute physical register: "
<< printReg(NewRegister, MRI->getTargetRegisterInfo())
<< "\n");
@ -1022,7 +1022,7 @@ void LinearizedRegion::removeFalseRegisterKills(MachineRegisterInfo *MRI) {
for (auto &RI : II.uses()) {
if (RI.isReg()) {
unsigned Reg = RI.getReg();
if (TRI->isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
if (hasNoDef(Reg, MRI))
continue;
if (!MRI->hasOneDef(Reg)) {
@ -2230,7 +2230,7 @@ void AMDGPUMachineCFGStructurizer::replaceRegisterWith(unsigned Register,
I != E;) {
MachineOperand &O = *I;
++I;
if (TargetRegisterInfo::isPhysicalRegister(NewRegister)) {
if (Register::isPhysicalRegister(NewRegister)) {
LLVM_DEBUG(dbgs() << "Trying to substitute physical register: "
<< printReg(NewRegister, MRI->getTargetRegisterInfo())
<< "\n");

View File

@ -174,7 +174,7 @@ GCNNSAReassign::CheckNSA(const MachineInstr &MI, bool Fast) const {
for (unsigned I = 0; I < Info->VAddrDwords; ++I) {
const MachineOperand &Op = MI.getOperand(VAddr0Idx + I);
unsigned Reg = Op.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg) || !VRM->isAssignedReg(Reg))
if (Register::isPhysicalRegister(Reg) || !VRM->isAssignedReg(Reg))
return NSA_Status::FIXED;
unsigned PhysReg = VRM->getPhys(Reg);

View File

@ -230,7 +230,7 @@ private:
public:
Printable printReg(unsigned Reg, unsigned SubReg = 0) const {
return Printable([Reg, SubReg, this](raw_ostream &OS) {
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (Register::isPhysicalRegister(Reg)) {
OS << llvm::printReg(Reg, TRI);
return;
}
@ -275,7 +275,7 @@ char GCNRegBankReassign::ID = 0;
char &llvm::GCNRegBankReassignID = GCNRegBankReassign::ID;
unsigned GCNRegBankReassign::getPhysRegBank(unsigned Reg) const {
assert (TargetRegisterInfo::isPhysicalRegister(Reg));
assert(Register::isPhysicalRegister(Reg));
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
unsigned Size = TRI->getRegSizeInBits(*RC);
@ -293,7 +293,7 @@ unsigned GCNRegBankReassign::getPhysRegBank(unsigned Reg) const {
unsigned GCNRegBankReassign::getRegBankMask(unsigned Reg, unsigned SubReg,
int Bank) {
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (Register::isVirtualRegister(Reg)) {
if (!VRM->isAssignedReg(Reg))
return 0;
@ -420,7 +420,7 @@ unsigned GCNRegBankReassign::getOperandGatherWeight(const MachineInstr& MI,
}
bool GCNRegBankReassign::isReassignable(unsigned Reg) const {
if (TargetRegisterInfo::isPhysicalRegister(Reg) || !VRM->isAssignedReg(Reg))
if (Register::isPhysicalRegister(Reg) || !VRM->isAssignedReg(Reg))
return false;
const MachineInstr *Def = MRI->getUniqueVRegDef(Reg);

View File

@ -40,7 +40,7 @@ void llvm::printLivesAt(SlotIndex SI,
<< *LIS.getInstructionFromIndex(SI);
unsigned Num = 0;
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
const unsigned Reg = TargetRegisterInfo::index2VirtReg(I);
const unsigned Reg = Register::index2VirtReg(I);
if (!LIS.hasInterval(Reg))
continue;
const auto &LI = LIS.getInterval(Reg);
@ -84,7 +84,7 @@ bool llvm::isEqual(const GCNRPTracker::LiveRegSet &S1,
unsigned GCNRegPressure::getRegKind(unsigned Reg,
const MachineRegisterInfo &MRI) {
assert(TargetRegisterInfo::isVirtualRegister(Reg));
assert(Register::isVirtualRegister(Reg));
const auto RC = MRI.getRegClass(Reg);
auto STI = static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo());
return STI->isSGPRClass(RC) ?
@ -197,8 +197,7 @@ void GCNRegPressure::print(raw_ostream &OS, const GCNSubtarget *ST) const {
static LaneBitmask getDefRegMask(const MachineOperand &MO,
const MachineRegisterInfo &MRI) {
assert(MO.isDef() && MO.isReg() &&
TargetRegisterInfo::isVirtualRegister(MO.getReg()));
assert(MO.isDef() && MO.isReg() && Register::isVirtualRegister(MO.getReg()));
// We don't rely on read-undef flag because in case of tentative schedule
// tracking it isn't set correctly yet. This works correctly however since
@ -211,8 +210,7 @@ static LaneBitmask getDefRegMask(const MachineOperand &MO,
static LaneBitmask getUsedRegMask(const MachineOperand &MO,
const MachineRegisterInfo &MRI,
const LiveIntervals &LIS) {
assert(MO.isUse() && MO.isReg() &&
TargetRegisterInfo::isVirtualRegister(MO.getReg()));
assert(MO.isUse() && MO.isReg() && Register::isVirtualRegister(MO.getReg()));
if (auto SubReg = MO.getSubReg())
return MRI.getTargetRegisterInfo()->getSubRegIndexLaneMask(SubReg);
@ -233,7 +231,7 @@ collectVirtualRegUses(const MachineInstr &MI, const LiveIntervals &LIS,
const MachineRegisterInfo &MRI) {
SmallVector<RegisterMaskPair, 8> Res;
for (const auto &MO : MI.operands()) {
if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
if (!MO.isReg() || !Register::isVirtualRegister(MO.getReg()))
continue;
if (!MO.isUse() || !MO.readsReg())
continue;
@ -279,7 +277,7 @@ GCNRPTracker::LiveRegSet llvm::getLiveRegs(SlotIndex SI,
const MachineRegisterInfo &MRI) {
GCNRPTracker::LiveRegSet LiveRegs;
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
auto Reg = TargetRegisterInfo::index2VirtReg(I);
auto Reg = Register::index2VirtReg(I);
if (!LIS.hasInterval(Reg))
continue;
auto LiveMask = getLiveLaneMask(Reg, SI, LIS, MRI);
@ -330,8 +328,7 @@ void GCNUpwardRPTracker::recede(const MachineInstr &MI) {
MaxPressure = max(AtMIPressure, MaxPressure);
for (const auto &MO : MI.defs()) {
if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg()) ||
MO.isDead())
if (!MO.isReg() || !Register::isVirtualRegister(MO.getReg()) || MO.isDead())
continue;
auto Reg = MO.getReg();
@ -410,7 +407,7 @@ void GCNDownwardRPTracker::advanceToNext() {
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
if (!Register::isVirtualRegister(Reg))
continue;
auto &LiveMask = LiveRegs[Reg];
auto PrevMask = LiveMask;
@ -501,7 +498,7 @@ void GCNRPTracker::printLiveRegs(raw_ostream &OS, const LiveRegSet& LiveRegs,
const MachineRegisterInfo &MRI) {
const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(I);
unsigned Reg = Register::index2VirtReg(I);
auto It = LiveRegs.find(Reg);
if (It != LiveRegs.end() && It->second.any())
OS << ' ' << printVRegOrUnit(Reg, TRI) << ':'

View File

@ -214,7 +214,7 @@ getLiveRegMap(Range &&R, bool After, LiveIntervals &LIS) {
DenseMap<MachineInstr *, GCNRPTracker::LiveRegSet> LiveRegMap;
SmallVector<SlotIndex, 32> LiveIdxs, SRLiveIdxs;
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
auto Reg = TargetRegisterInfo::index2VirtReg(I);
auto Reg = Register::index2VirtReg(I);
if (!LIS.hasInterval(Reg))
continue;
auto &LI = LIS.getInterval(Reg);

View File

@ -335,7 +335,7 @@ R600TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
case R600::MASK_WRITE: {
unsigned maskedRegister = MI.getOperand(0).getReg();
assert(TargetRegisterInfo::isVirtualRegister(maskedRegister));
assert(Register::isVirtualRegister(maskedRegister));
MachineInstr * defInstr = MRI.getVRegDef(maskedRegister);
TII->addFlag(*defInstr, 0, MO_FLAG_MASK);
break;

View File

@ -97,8 +97,8 @@ bool R600InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI) const {
for (MachineInstr::const_mop_iterator I = MBBI->operands_begin(),
E = MBBI->operands_end(); I != E; ++I) {
if (I->isReg() && !TargetRegisterInfo::isVirtualRegister(I->getReg()) &&
I->isUse() && RI.isPhysRegLiveAcrossClauses(I->getReg()))
if (I->isReg() && !Register::isVirtualRegister(I->getReg()) && I->isUse() &&
RI.isPhysRegLiveAcrossClauses(I->getReg()))
return false;
}
return true;
@ -242,8 +242,7 @@ bool R600InstrInfo::readsLDSSrcReg(const MachineInstr &MI) const {
for (MachineInstr::const_mop_iterator I = MI.operands_begin(),
E = MI.operands_end();
I != E; ++I) {
if (!I->isReg() || !I->isUse() ||
TargetRegisterInfo::isVirtualRegister(I->getReg()))
if (!I->isReg() || !I->isUse() || Register::isVirtualRegister(I->getReg()))
continue;
if (R600::R600_LDS_SRC_REGRegClass.contains(I->getReg()))
@ -1193,8 +1192,7 @@ int R600InstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
for (std::pair<unsigned, unsigned> LI : MRI.liveins()) {
unsigned Reg = LI.first;
if (TargetRegisterInfo::isVirtualRegister(Reg) ||
!IndirectRC->contains(Reg))
if (Register::isVirtualRegister(Reg) || !IndirectRC->contains(Reg))
continue;
unsigned RegIndex;

Some files were not shown because too many files have changed in this diff Show More