forked from OSchip/llvm-project
AMDGPU: Use Register in more places
This commit is contained in:
parent
e8dcb6d05e
commit
178050c3ba
|
@ -2099,7 +2099,7 @@ void AMDGPUDAGToDAGISel::SelectBRCOND(SDNode *N) {
|
|||
|
||||
bool UseSCCBr = isCBranchSCC(N) && isUniformBr(N);
|
||||
unsigned BrOp = UseSCCBr ? AMDGPU::S_CBRANCH_SCC1 : AMDGPU::S_CBRANCH_VCCNZ;
|
||||
unsigned CondReg = UseSCCBr ? (unsigned)AMDGPU::SCC : TRI->getVCC();
|
||||
Register CondReg = UseSCCBr ? AMDGPU::SCC : TRI->getVCC();
|
||||
SDLoc SL(N);
|
||||
|
||||
if (!UseSCCBr) {
|
||||
|
|
|
@ -10862,8 +10862,8 @@ bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode * N,
|
|||
const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
|
||||
const MachineRegisterInfo &MRI = MF->getRegInfo();
|
||||
const SIRegisterInfo &TRI = ST.getInstrInfo()->getRegisterInfo();
|
||||
unsigned Reg = R->getReg();
|
||||
if (Register::isPhysicalRegister(Reg))
|
||||
Register Reg = R->getReg();
|
||||
if (Reg.isPhysical())
|
||||
return !TRI.isSGPRReg(MRI, Reg);
|
||||
|
||||
if (MRI.isLiveIn(Reg)) {
|
||||
|
|
|
@ -654,7 +654,7 @@ void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
|
|||
// Registers in the sequence are allocated contiguously so we can just
|
||||
// use register number to pick one of three round-robin temps.
|
||||
unsigned RegNo = DestReg % 3;
|
||||
unsigned Tmp = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0);
|
||||
Register Tmp = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0);
|
||||
if (!Tmp)
|
||||
report_fatal_error("Cannot scavenge VGPR to copy to AGPR");
|
||||
RS.setRegUsed(Tmp);
|
||||
|
@ -938,10 +938,10 @@ void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB,
|
|||
}
|
||||
}
|
||||
|
||||
unsigned SIInstrInfo::insertEQ(MachineBasicBlock *MBB,
|
||||
Register SIInstrInfo::insertEQ(MachineBasicBlock *MBB,
|
||||
MachineBasicBlock::iterator I,
|
||||
const DebugLoc &DL,
|
||||
unsigned SrcReg, int Value) const {
|
||||
Register SrcReg, int Value) const {
|
||||
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
|
||||
Register Reg = MRI.createVirtualRegister(RI.getBoolRC());
|
||||
BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg)
|
||||
|
@ -951,10 +951,10 @@ unsigned SIInstrInfo::insertEQ(MachineBasicBlock *MBB,
|
|||
return Reg;
|
||||
}
|
||||
|
||||
unsigned SIInstrInfo::insertNE(MachineBasicBlock *MBB,
|
||||
Register SIInstrInfo::insertNE(MachineBasicBlock *MBB,
|
||||
MachineBasicBlock::iterator I,
|
||||
const DebugLoc &DL,
|
||||
unsigned SrcReg, int Value) const {
|
||||
Register SrcReg, int Value) const {
|
||||
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
|
||||
Register Reg = MRI.createVirtualRegister(RI.getBoolRC());
|
||||
BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg)
|
||||
|
@ -1274,7 +1274,7 @@ void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
|
|||
// FIXME: Maybe this should not include a memoperand because it will be
|
||||
// lowered to non-memory instructions.
|
||||
const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize));
|
||||
if (Register::isVirtualRegister(DestReg) && SpillSize == 4) {
|
||||
if (DestReg.isVirtual() && SpillSize == 4) {
|
||||
MachineRegisterInfo &MRI = MF->getRegInfo();
|
||||
MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0RegClass);
|
||||
}
|
||||
|
@ -1315,7 +1315,7 @@ unsigned SIInstrInfo::calculateLDSSpillAddress(
|
|||
unsigned WorkGroupSize = MFI->getMaxFlatWorkGroupSize();
|
||||
unsigned WavefrontSize = ST.getWavefrontSize();
|
||||
|
||||
unsigned TIDReg = MFI->getTIDReg();
|
||||
Register TIDReg = MFI->getTIDReg();
|
||||
if (!MFI->hasCalculatedTID()) {
|
||||
MachineBasicBlock &Entry = MBB.getParent()->front();
|
||||
MachineBasicBlock::iterator Insert = Entry.front();
|
||||
|
@ -1343,8 +1343,8 @@ unsigned SIInstrInfo::calculateLDSSpillAddress(
|
|||
|
||||
RS->enterBasicBlock(Entry);
|
||||
// FIXME: Can we scavenge an SReg_64 and access the subregs?
|
||||
unsigned STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0);
|
||||
unsigned STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0);
|
||||
Register STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0);
|
||||
Register STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0);
|
||||
BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp0)
|
||||
.addReg(InputPtrReg)
|
||||
.addImm(SI::KernelInputOffsets::NGROUPS_Z);
|
||||
|
@ -2319,7 +2319,7 @@ void SIInstrInfo::insertSelect(MachineBasicBlock &MBB,
|
|||
|
||||
I = MIB->getIterator();
|
||||
|
||||
SmallVector<unsigned, 8> Regs;
|
||||
SmallVector<Register, 8> Regs;
|
||||
for (int Idx = 0; Idx != NElts; ++Idx) {
|
||||
Register DstElt = MRI.createVirtualRegister(EltRC);
|
||||
Regs.push_back(DstElt);
|
||||
|
@ -3215,7 +3215,7 @@ bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI,
|
|||
}
|
||||
}
|
||||
|
||||
static unsigned findImplicitSGPRRead(const MachineInstr &MI) {
|
||||
static Register findImplicitSGPRRead(const MachineInstr &MI) {
|
||||
for (const MachineOperand &MO : MI.implicit_operands()) {
|
||||
// We only care about reads.
|
||||
if (MO.isDef())
|
||||
|
@ -3523,8 +3523,8 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
|
|||
if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1)
|
||||
++ConstantBusCount;
|
||||
|
||||
SmallVector<unsigned, 2> SGPRsUsed;
|
||||
unsigned SGPRUsed = findImplicitSGPRRead(MI);
|
||||
SmallVector<Register, 2> SGPRsUsed;
|
||||
Register SGPRUsed = findImplicitSGPRRead(MI);
|
||||
if (SGPRUsed != AMDGPU::NoRegister) {
|
||||
++ConstantBusCount;
|
||||
SGPRsUsed.push_back(SGPRUsed);
|
||||
|
@ -4316,7 +4316,7 @@ void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI,
|
|||
}
|
||||
}
|
||||
|
||||
unsigned SIInstrInfo::readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI,
|
||||
Register SIInstrInfo::readlaneVGPRToSGPR(Register SrcReg, MachineInstr &UseMI,
|
||||
MachineRegisterInfo &MRI) const {
|
||||
const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg);
|
||||
const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC);
|
||||
|
@ -5722,7 +5722,7 @@ void SIInstrInfo::splitScalar64BitBFE(SetVectorType &Worklist,
|
|||
}
|
||||
|
||||
void SIInstrInfo::addUsersToMoveToVALUWorklist(
|
||||
unsigned DstReg,
|
||||
Register DstReg,
|
||||
MachineRegisterInfo &MRI,
|
||||
SetVectorType &Worklist) const {
|
||||
for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg),
|
||||
|
@ -5888,7 +5888,7 @@ const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass(
|
|||
}
|
||||
|
||||
// Find the one SGPR operand we are allowed to use.
|
||||
unsigned SIInstrInfo::findUsedSGPR(const MachineInstr &MI,
|
||||
Register SIInstrInfo::findUsedSGPR(const MachineInstr &MI,
|
||||
int OpIndices[3]) const {
|
||||
const MCInstrDesc &Desc = MI.getDesc();
|
||||
|
||||
|
@ -5901,11 +5901,11 @@ unsigned SIInstrInfo::findUsedSGPR(const MachineInstr &MI,
|
|||
//
|
||||
// If the operand's class is an SGPR, we can never move it.
|
||||
|
||||
unsigned SGPRReg = findImplicitSGPRRead(MI);
|
||||
Register SGPRReg = findImplicitSGPRRead(MI);
|
||||
if (SGPRReg != AMDGPU::NoRegister)
|
||||
return SGPRReg;
|
||||
|
||||
unsigned UsedSGPRs[3] = { AMDGPU::NoRegister };
|
||||
Register UsedSGPRs[3] = { AMDGPU::NoRegister };
|
||||
const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
|
||||
|
||||
for (unsigned i = 0; i < 3; ++i) {
|
||||
|
@ -6296,7 +6296,7 @@ MachineInstrBuilder
|
|||
SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I,
|
||||
const DebugLoc &DL,
|
||||
unsigned DestReg) const {
|
||||
Register DestReg) const {
|
||||
if (ST.hasAddNoCarry())
|
||||
return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg);
|
||||
|
||||
|
|
|
@ -119,7 +119,7 @@ private:
|
|||
MachineRegisterInfo &MRI,
|
||||
MachineInstr &Inst) const;
|
||||
|
||||
void addUsersToMoveToVALUWorklist(unsigned Reg, MachineRegisterInfo &MRI,
|
||||
void addUsersToMoveToVALUWorklist(Register Reg, MachineRegisterInfo &MRI,
|
||||
SetVectorType &Worklist) const;
|
||||
|
||||
void addSCCDefUsersToVALUWorklist(MachineOperand &Op,
|
||||
|
@ -132,7 +132,7 @@ private:
|
|||
bool checkInstOffsetsDoNotOverlap(const MachineInstr &MIa,
|
||||
const MachineInstr &MIb) const;
|
||||
|
||||
unsigned findUsedSGPR(const MachineInstr &MI, int OpIndices[3]) const;
|
||||
Register findUsedSGPR(const MachineInstr &MI, int OpIndices[3]) const;
|
||||
|
||||
protected:
|
||||
bool swapSourceModifiers(MachineInstr &MI,
|
||||
|
@ -211,13 +211,13 @@ public:
|
|||
const TargetRegisterClass *getPreferredSelectRegClass(
|
||||
unsigned Size) const;
|
||||
|
||||
unsigned insertNE(MachineBasicBlock *MBB,
|
||||
Register insertNE(MachineBasicBlock *MBB,
|
||||
MachineBasicBlock::iterator I, const DebugLoc &DL,
|
||||
unsigned SrcReg, int Value) const;
|
||||
Register SrcReg, int Value) const;
|
||||
|
||||
unsigned insertEQ(MachineBasicBlock *MBB,
|
||||
Register insertEQ(MachineBasicBlock *MBB,
|
||||
MachineBasicBlock::iterator I, const DebugLoc &DL,
|
||||
unsigned SrcReg, int Value) const;
|
||||
Register SrcReg, int Value) const;
|
||||
|
||||
void storeRegToStackSlot(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI, Register SrcReg,
|
||||
|
@ -877,7 +877,7 @@ public:
|
|||
/// be used when it is know that the value in SrcReg is same across all
|
||||
/// threads in the wave.
|
||||
/// \returns The SGPR register that \p SrcReg was copied to.
|
||||
unsigned readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI,
|
||||
Register readlaneVGPRToSGPR(Register SrcReg, MachineInstr &UseMI,
|
||||
MachineRegisterInfo &MRI) const;
|
||||
|
||||
void legalizeOperandsSMRD(MachineRegisterInfo &MRI, MachineInstr &MI) const;
|
||||
|
@ -998,7 +998,7 @@ public:
|
|||
MachineInstrBuilder getAddNoCarry(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I,
|
||||
const DebugLoc &DL,
|
||||
unsigned DestReg) const;
|
||||
Register DestReg) const;
|
||||
|
||||
MachineInstrBuilder getAddNoCarry(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I,
|
||||
|
|
|
@ -56,7 +56,7 @@ SIRegisterInfo::SIRegisterInfo(const GCNSubtarget &ST)
|
|||
}
|
||||
|
||||
void SIRegisterInfo::reserveRegisterTuples(BitVector &Reserved,
|
||||
unsigned Reg) const {
|
||||
MCRegister Reg) const {
|
||||
MCRegAliasIterator R(Reg, this, true);
|
||||
|
||||
for (; R.isValid(); ++R)
|
||||
|
@ -187,10 +187,10 @@ unsigned SIRegisterInfo::getSubRegFromChannel(unsigned Channel,
|
|||
return SubRegFromChannelTable[NumRegIndex][Channel];
|
||||
}
|
||||
|
||||
unsigned SIRegisterInfo::reservedPrivateSegmentBufferReg(
|
||||
MCRegister SIRegisterInfo::reservedPrivateSegmentBufferReg(
|
||||
const MachineFunction &MF) const {
|
||||
unsigned BaseIdx = alignDown(ST.getMaxNumSGPRs(MF), 4) - 4;
|
||||
unsigned BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx));
|
||||
MCRegister BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx));
|
||||
return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SGPR_128RegClass);
|
||||
}
|
||||
|
||||
|
@ -284,20 +284,20 @@ BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
|
|||
// We have to assume the SP is needed in case there are calls in the function,
|
||||
// which is detected after the function is lowered. If we aren't really going
|
||||
// to need SP, don't bother reserving it.
|
||||
unsigned StackPtrReg = MFI->getStackPtrOffsetReg();
|
||||
MCRegister StackPtrReg = MFI->getStackPtrOffsetReg();
|
||||
|
||||
if (StackPtrReg != AMDGPU::NoRegister) {
|
||||
if (StackPtrReg) {
|
||||
reserveRegisterTuples(Reserved, StackPtrReg);
|
||||
assert(!isSubRegister(ScratchRSrcReg, StackPtrReg));
|
||||
}
|
||||
|
||||
unsigned FrameReg = MFI->getFrameOffsetReg();
|
||||
if (FrameReg != AMDGPU::NoRegister) {
|
||||
MCRegister FrameReg = MFI->getFrameOffsetReg();
|
||||
if (FrameReg) {
|
||||
reserveRegisterTuples(Reserved, FrameReg);
|
||||
assert(!isSubRegister(ScratchRSrcReg, FrameReg));
|
||||
}
|
||||
|
||||
for (unsigned Reg : MFI->WWMReservedRegs) {
|
||||
for (MCRegister Reg : MFI->WWMReservedRegs) {
|
||||
reserveRegisterTuples(Reserved, Reg);
|
||||
}
|
||||
|
||||
|
@ -660,10 +660,10 @@ static bool buildMUBUFOffsetLoadStore(const GCNSubtarget &ST,
|
|||
void SIRegisterInfo::buildSpillLoadStore(MachineBasicBlock::iterator MI,
|
||||
unsigned LoadStoreOp,
|
||||
int Index,
|
||||
unsigned ValueReg,
|
||||
Register ValueReg,
|
||||
bool IsKill,
|
||||
unsigned ScratchRsrcReg,
|
||||
unsigned ScratchOffsetReg,
|
||||
MCRegister ScratchRsrcReg,
|
||||
MCRegister ScratchOffsetReg,
|
||||
int64_t InstOffset,
|
||||
MachineMemOperand *MMO,
|
||||
RegScavenger *RS) const {
|
||||
|
@ -677,7 +677,7 @@ void SIRegisterInfo::buildSpillLoadStore(MachineBasicBlock::iterator MI,
|
|||
bool IsStore = Desc.mayStore();
|
||||
|
||||
bool Scavenged = false;
|
||||
unsigned SOffset = ScratchOffsetReg;
|
||||
MCRegister SOffset = ScratchOffsetReg;
|
||||
|
||||
const unsigned EltSize = 4;
|
||||
const TargetRegisterClass *RC = getRegClassForReg(MF->getRegInfo(), ValueReg);
|
||||
|
@ -696,7 +696,7 @@ void SIRegisterInfo::buildSpillLoadStore(MachineBasicBlock::iterator MI,
|
|||
assert((Offset % EltSize) == 0 && "unexpected VGPR spill offset");
|
||||
|
||||
if (!isUInt<12>(Offset + Size - EltSize)) {
|
||||
SOffset = AMDGPU::NoRegister;
|
||||
SOffset = MCRegister();
|
||||
|
||||
// We currently only support spilling VGPRs to EltSize boundaries, meaning
|
||||
// we can simplify the adjustment of Offset here to just scale with
|
||||
|
@ -708,8 +708,8 @@ void SIRegisterInfo::buildSpillLoadStore(MachineBasicBlock::iterator MI,
|
|||
if (RS)
|
||||
SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0, false);
|
||||
|
||||
if (SOffset == AMDGPU::NoRegister) {
|
||||
if (ScratchOffsetReg == AMDGPU::NoRegister) {
|
||||
if (!SOffset) {
|
||||
if (!ScratchOffsetReg) {
|
||||
report_fatal_error("could not scavenge SGPR to spill in entry function");
|
||||
}
|
||||
// There are no free SGPRs, and since we are in the process of spilling
|
||||
|
@ -1276,9 +1276,8 @@ StringRef SIRegisterInfo::getRegAsmName(unsigned Reg) const {
|
|||
|
||||
// FIXME: This is very slow. It might be worth creating a map from physreg to
|
||||
// register class.
|
||||
const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
|
||||
assert(!Register::isVirtualRegister(Reg));
|
||||
|
||||
const TargetRegisterClass *
|
||||
SIRegisterInfo::getPhysRegClass(MCRegister Reg) const {
|
||||
static const TargetRegisterClass *const BaseClasses[] = {
|
||||
&AMDGPU::VGPR_32RegClass,
|
||||
&AMDGPU::SReg_32RegClass,
|
||||
|
@ -1536,15 +1535,15 @@ bool SIRegisterInfo::shouldRewriteCopySrc(
|
|||
/// Returns a register that is not used at any point in the function.
|
||||
/// If all registers are used, then this function will return
|
||||
// AMDGPU::NoRegister.
|
||||
unsigned
|
||||
MCRegister
|
||||
SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
|
||||
const TargetRegisterClass *RC,
|
||||
const MachineFunction &MF) const {
|
||||
|
||||
for (unsigned Reg : *RC)
|
||||
for (MCRegister Reg : *RC)
|
||||
if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg))
|
||||
return Reg;
|
||||
return AMDGPU::NoRegister;
|
||||
return MCRegister();
|
||||
}
|
||||
|
||||
ArrayRef<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass *RC,
|
||||
|
@ -1738,23 +1737,20 @@ ArrayRef<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass *RC
|
|||
|
||||
const TargetRegisterClass*
|
||||
SIRegisterInfo::getRegClassForReg(const MachineRegisterInfo &MRI,
|
||||
unsigned Reg) const {
|
||||
if (Register::isVirtualRegister(Reg))
|
||||
return MRI.getRegClass(Reg);
|
||||
|
||||
return getPhysRegClass(Reg);
|
||||
Register Reg) const {
|
||||
return Reg.isVirtual() ? MRI.getRegClass(Reg) : getPhysRegClass(Reg);
|
||||
}
|
||||
|
||||
bool SIRegisterInfo::isVGPR(const MachineRegisterInfo &MRI,
|
||||
unsigned Reg) const {
|
||||
const TargetRegisterClass * RC = getRegClassForReg(MRI, Reg);
|
||||
Register Reg) const {
|
||||
const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg);
|
||||
assert(RC && "Register class for the reg not found");
|
||||
return hasVGPRs(RC);
|
||||
}
|
||||
|
||||
bool SIRegisterInfo::isAGPR(const MachineRegisterInfo &MRI,
|
||||
unsigned Reg) const {
|
||||
const TargetRegisterClass * RC = getRegClassForReg(MRI, Reg);
|
||||
Register Reg) const {
|
||||
const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg);
|
||||
assert(RC && "Register class for the reg not found");
|
||||
return hasAGPRs(RC);
|
||||
}
|
||||
|
@ -1821,7 +1817,7 @@ const int *SIRegisterInfo::getRegUnitPressureSets(unsigned RegUnit) const {
|
|||
return AMDGPUGenRegisterInfo::getRegUnitPressureSets(RegUnit);
|
||||
}
|
||||
|
||||
unsigned SIRegisterInfo::getReturnAddressReg(const MachineFunction &MF) const {
|
||||
MCRegister SIRegisterInfo::getReturnAddressReg(const MachineFunction &MF) const {
|
||||
// Not a callee saved register.
|
||||
return AMDGPU::SGPR30_SGPR31;
|
||||
}
|
||||
|
@ -1887,7 +1883,7 @@ SIRegisterInfo::getConstrainedRegClassForOperand(const MachineOperand &MO,
|
|||
return getAllocatableClass(RC);
|
||||
}
|
||||
|
||||
unsigned SIRegisterInfo::getVCC() const {
|
||||
MCRegister SIRegisterInfo::getVCC() const {
|
||||
return isWave32 ? AMDGPU::VCC_LO : AMDGPU::VCC;
|
||||
}
|
||||
|
||||
|
@ -1907,7 +1903,7 @@ SIRegisterInfo::getRegClass(unsigned RCID) const {
|
|||
}
|
||||
|
||||
// Find reaching register definition
|
||||
MachineInstr *SIRegisterInfo::findReachingDef(unsigned Reg, unsigned SubReg,
|
||||
MachineInstr *SIRegisterInfo::findReachingDef(Register Reg, unsigned SubReg,
|
||||
MachineInstr &Use,
|
||||
MachineRegisterInfo &MRI,
|
||||
LiveIntervals *LIS) const {
|
||||
|
@ -1915,7 +1911,7 @@ MachineInstr *SIRegisterInfo::findReachingDef(unsigned Reg, unsigned SubReg,
|
|||
SlotIndex UseIdx = LIS->getInstructionIndex(Use);
|
||||
SlotIndex DefIdx;
|
||||
|
||||
if (Register::isVirtualRegister(Reg)) {
|
||||
if (Reg.isVirtual()) {
|
||||
if (!LIS->hasInterval(Reg))
|
||||
return nullptr;
|
||||
LiveInterval &LI = LIS->getInterval(Reg);
|
||||
|
|
|
@ -34,7 +34,7 @@ private:
|
|||
bool isWave32;
|
||||
BitVector RegPressureIgnoredUnits;
|
||||
|
||||
void reserveRegisterTuples(BitVector &, unsigned Reg) const;
|
||||
void reserveRegisterTuples(BitVector &, MCRegister Reg) const;
|
||||
|
||||
public:
|
||||
SIRegisterInfo(const GCNSubtarget &ST);
|
||||
|
@ -49,7 +49,7 @@ public:
|
|||
|
||||
/// Return the end register initially reserved for the scratch buffer in case
|
||||
/// spilling is needed.
|
||||
unsigned reservedPrivateSegmentBufferReg(const MachineFunction &MF) const;
|
||||
MCRegister reservedPrivateSegmentBufferReg(const MachineFunction &MF) const;
|
||||
|
||||
BitVector getReservedRegs(const MachineFunction &MF) const override;
|
||||
|
||||
|
@ -112,13 +112,13 @@ public:
|
|||
|
||||
StringRef getRegAsmName(unsigned Reg) const override;
|
||||
|
||||
unsigned getHWRegIndex(unsigned Reg) const {
|
||||
unsigned getHWRegIndex(MCRegister Reg) const {
|
||||
return getEncodingValue(Reg) & 0xff;
|
||||
}
|
||||
|
||||
/// Return the 'base' register class for this register.
|
||||
/// e.g. SGPR0 => SReg_32, VGPR => VGPR_32 SGPR0_SGPR1 -> SReg_32, etc.
|
||||
const TargetRegisterClass *getPhysRegClass(unsigned Reg) const;
|
||||
const TargetRegisterClass *getPhysRegClass(MCRegister Reg) const;
|
||||
|
||||
/// \returns true if this class contains only SGPR registers
|
||||
bool isSGPRClass(const TargetRegisterClass *RC) const {
|
||||
|
@ -191,15 +191,15 @@ public:
|
|||
/// -4.0f, -2.0f, -1.0f, -0.5f, 0.0f, 0.5f, 1.0f, 2.0f, 4.0f.
|
||||
bool opCanUseInlineConstant(unsigned OpType) const;
|
||||
|
||||
unsigned findUnusedRegister(const MachineRegisterInfo &MRI,
|
||||
const TargetRegisterClass *RC,
|
||||
const MachineFunction &MF) const;
|
||||
MCRegister findUnusedRegister(const MachineRegisterInfo &MRI,
|
||||
const TargetRegisterClass *RC,
|
||||
const MachineFunction &MF) const;
|
||||
|
||||
const TargetRegisterClass *getRegClassForReg(const MachineRegisterInfo &MRI,
|
||||
unsigned Reg) const;
|
||||
bool isVGPR(const MachineRegisterInfo &MRI, unsigned Reg) const;
|
||||
bool isAGPR(const MachineRegisterInfo &MRI, unsigned Reg) const;
|
||||
bool isVectorRegister(const MachineRegisterInfo &MRI, unsigned Reg) const {
|
||||
Register Reg) const;
|
||||
bool isVGPR(const MachineRegisterInfo &MRI, Register Reg) const;
|
||||
bool isAGPR(const MachineRegisterInfo &MRI, Register Reg) const;
|
||||
bool isVectorRegister(const MachineRegisterInfo &MRI, Register Reg) const {
|
||||
return isVGPR(MRI, Reg) || isAGPR(MRI, Reg);
|
||||
}
|
||||
|
||||
|
@ -226,7 +226,7 @@ public:
|
|||
|
||||
const int *getRegUnitPressureSets(unsigned RegUnit) const override;
|
||||
|
||||
unsigned getReturnAddressReg(const MachineFunction &MF) const;
|
||||
MCRegister getReturnAddressReg(const MachineFunction &MF) const;
|
||||
|
||||
const TargetRegisterClass *
|
||||
getRegClassForSizeOnBank(unsigned Size,
|
||||
|
@ -254,12 +254,12 @@ public:
|
|||
: &AMDGPU::SReg_64_XEXECRegClass;
|
||||
}
|
||||
|
||||
unsigned getVCC() const;
|
||||
MCRegister getVCC() const;
|
||||
|
||||
const TargetRegisterClass *getRegClass(unsigned RCID) const;
|
||||
|
||||
// Find reaching register definition
|
||||
MachineInstr *findReachingDef(unsigned Reg, unsigned SubReg,
|
||||
MachineInstr *findReachingDef(Register Reg, unsigned SubReg,
|
||||
MachineInstr &Use,
|
||||
MachineRegisterInfo &MRI,
|
||||
LiveIntervals *LIS) const;
|
||||
|
@ -292,10 +292,10 @@ private:
|
|||
void buildSpillLoadStore(MachineBasicBlock::iterator MI,
|
||||
unsigned LoadStoreOp,
|
||||
int Index,
|
||||
unsigned ValueReg,
|
||||
Register ValueReg,
|
||||
bool ValueIsKill,
|
||||
unsigned ScratchRsrcReg,
|
||||
unsigned ScratchOffsetReg,
|
||||
MCRegister ScratchRsrcReg,
|
||||
MCRegister ScratchOffsetReg,
|
||||
int64_t InstrOffset,
|
||||
MachineMemOperand *MMO,
|
||||
RegScavenger *RS) const;
|
||||
|
|
Loading…
Reference in New Issue