[aarch64] Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM

Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).

Manual fixups in:
AArch64InstrInfo.cpp - genFusedMultiply() now takes a Register* instead of unsigned*
AArch64LoadStoreOptimizer.cpp - Ternary operator was ambiguous between Register/MCRegister. Settled on Register

Depends on D65919

Reviewers: aemerson

Subscribers: jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits

Tags: #llvm

Differential Revision for full review was: https://reviews.llvm.org/D65962

llvm-svn: 368628
This commit is contained in:
Daniel Sanders 2019-08-12 22:40:53 +00:00
parent 05c145d694
commit 5ae66e56cf
21 changed files with 155 additions and 155 deletions

View File

@ -552,7 +552,7 @@ bool AArch64A57FPLoadBalancing::colorChain(Chain *G, Color C,
std::vector<unsigned> ToErase; std::vector<unsigned> ToErase;
for (auto &U : I.operands()) { for (auto &U : I.operands()) {
if (U.isReg() && U.isUse() && Substs.find(U.getReg()) != Substs.end()) { if (U.isReg() && U.isUse() && Substs.find(U.getReg()) != Substs.end()) {
unsigned OrigReg = U.getReg(); Register OrigReg = U.getReg();
U.setReg(Substs[OrigReg]); U.setReg(Substs[OrigReg]);
if (U.isKill()) if (U.isKill())
// Don't erase straight away, because there may be other operands // Don't erase straight away, because there may be other operands
@ -611,7 +611,7 @@ void AArch64A57FPLoadBalancing::scanInstruction(
// Create a new chain. Multiplies don't require forwarding so can go on any // Create a new chain. Multiplies don't require forwarding so can go on any
// unit. // unit.
unsigned DestReg = MI->getOperand(0).getReg(); Register DestReg = MI->getOperand(0).getReg();
LLVM_DEBUG(dbgs() << "New chain started for register " LLVM_DEBUG(dbgs() << "New chain started for register "
<< printReg(DestReg, TRI) << " at " << *MI); << printReg(DestReg, TRI) << " at " << *MI);
@ -624,8 +624,8 @@ void AArch64A57FPLoadBalancing::scanInstruction(
// It is beneficial to keep MLAs on the same functional unit as their // It is beneficial to keep MLAs on the same functional unit as their
// accumulator operand. // accumulator operand.
unsigned DestReg = MI->getOperand(0).getReg(); Register DestReg = MI->getOperand(0).getReg();
unsigned AccumReg = MI->getOperand(3).getReg(); Register AccumReg = MI->getOperand(3).getReg();
maybeKillChain(MI->getOperand(1), Idx, ActiveChains); maybeKillChain(MI->getOperand(1), Idx, ActiveChains);
maybeKillChain(MI->getOperand(2), Idx, ActiveChains); maybeKillChain(MI->getOperand(2), Idx, ActiveChains);

View File

@ -201,8 +201,8 @@ bool AArch64AdvSIMDScalar::isProfitableToTransform(
unsigned NumNewCopies = 3; unsigned NumNewCopies = 3;
unsigned NumRemovableCopies = 0; unsigned NumRemovableCopies = 0;
unsigned OrigSrc0 = MI.getOperand(1).getReg(); Register OrigSrc0 = MI.getOperand(1).getReg();
unsigned OrigSrc1 = MI.getOperand(2).getReg(); Register OrigSrc1 = MI.getOperand(2).getReg();
unsigned SubReg0; unsigned SubReg0;
unsigned SubReg1; unsigned SubReg1;
if (!MRI->def_empty(OrigSrc0)) { if (!MRI->def_empty(OrigSrc0)) {
@ -236,7 +236,7 @@ bool AArch64AdvSIMDScalar::isProfitableToTransform(
// any of the uses is a transformable instruction, it's likely the tranforms // any of the uses is a transformable instruction, it's likely the tranforms
// will chain, enabling us to save a copy there, too. This is an aggressive // will chain, enabling us to save a copy there, too. This is an aggressive
// heuristic that approximates the graph based cost analysis described above. // heuristic that approximates the graph based cost analysis described above.
unsigned Dst = MI.getOperand(0).getReg(); Register Dst = MI.getOperand(0).getReg();
bool AllUsesAreCopies = true; bool AllUsesAreCopies = true;
for (MachineRegisterInfo::use_instr_nodbg_iterator for (MachineRegisterInfo::use_instr_nodbg_iterator
Use = MRI->use_instr_nodbg_begin(Dst), Use = MRI->use_instr_nodbg_begin(Dst),
@ -293,8 +293,8 @@ void AArch64AdvSIMDScalar::transformInstruction(MachineInstr &MI) {
assert(OldOpc != NewOpc && "transform an instruction to itself?!"); assert(OldOpc != NewOpc && "transform an instruction to itself?!");
// Check if we need a copy for the source registers. // Check if we need a copy for the source registers.
unsigned OrigSrc0 = MI.getOperand(1).getReg(); Register OrigSrc0 = MI.getOperand(1).getReg();
unsigned OrigSrc1 = MI.getOperand(2).getReg(); Register OrigSrc1 = MI.getOperand(2).getReg();
unsigned Src0 = 0, SubReg0; unsigned Src0 = 0, SubReg0;
unsigned Src1 = 0, SubReg1; unsigned Src1 = 0, SubReg1;
bool KillSrc0 = false, KillSrc1 = false; bool KillSrc0 = false, KillSrc1 = false;
@ -354,7 +354,7 @@ void AArch64AdvSIMDScalar::transformInstruction(MachineInstr &MI) {
// Create a vreg for the destination. // Create a vreg for the destination.
// FIXME: No need to do this if the ultimate user expects an FPR64. // FIXME: No need to do this if the ultimate user expects an FPR64.
// Check for that and avoid the copy if possible. // Check for that and avoid the copy if possible.
unsigned Dst = MRI->createVirtualRegister(&AArch64::FPR64RegClass); Register Dst = MRI->createVirtualRegister(&AArch64::FPR64RegClass);
// For now, all of the new instructions have the same simple three-register // For now, all of the new instructions have the same simple three-register
// form, so no need to special case based on what instruction we're // form, so no need to special case based on what instruction we're

View File

@ -236,7 +236,7 @@ void AArch64AsmPrinter::EmitSled(const MachineInstr &MI, SledKind Kind)
} }
void AArch64AsmPrinter::LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI) { void AArch64AsmPrinter::LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
unsigned Reg = MI.getOperand(0).getReg(); Register Reg = MI.getOperand(0).getReg();
uint32_t AccessInfo = MI.getOperand(1).getImm(); uint32_t AccessInfo = MI.getOperand(1).getImm();
MCSymbol *&Sym = HwasanMemaccessSymbols[{Reg, AccessInfo}]; MCSymbol *&Sym = HwasanMemaccessSymbols[{Reg, AccessInfo}];
if (!Sym) { if (!Sym) {
@ -485,7 +485,7 @@ void AArch64AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNum,
default: default:
llvm_unreachable("<unknown operand type>"); llvm_unreachable("<unknown operand type>");
case MachineOperand::MO_Register: { case MachineOperand::MO_Register: {
unsigned Reg = MO.getReg(); Register Reg = MO.getReg();
assert(Register::isPhysicalRegister(Reg)); assert(Register::isPhysicalRegister(Reg));
assert(!MO.getSubReg() && "Subregs should be eliminated!"); assert(!MO.getSubReg() && "Subregs should be eliminated!");
O << AArch64InstPrinter::getRegisterName(Reg); O << AArch64InstPrinter::getRegisterName(Reg);
@ -509,7 +509,7 @@ void AArch64AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNum,
bool AArch64AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode, bool AArch64AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
raw_ostream &O) { raw_ostream &O) {
unsigned Reg = MO.getReg(); Register Reg = MO.getReg();
switch (Mode) { switch (Mode) {
default: default:
return true; // Unknown mode. return true; // Unknown mode.
@ -533,7 +533,7 @@ bool AArch64AsmPrinter::printAsmRegInClass(const MachineOperand &MO,
bool isVector, raw_ostream &O) { bool isVector, raw_ostream &O) {
assert(MO.isReg() && "Should only get here with a register!"); assert(MO.isReg() && "Should only get here with a register!");
const TargetRegisterInfo *RI = STI->getRegisterInfo(); const TargetRegisterInfo *RI = STI->getRegisterInfo();
unsigned Reg = MO.getReg(); Register Reg = MO.getReg();
unsigned RegToPrint = RC->getRegister(RI->getEncodingValue(Reg)); unsigned RegToPrint = RC->getRegister(RI->getEncodingValue(Reg));
assert(RI->regsOverlap(RegToPrint, Reg)); assert(RI->regsOverlap(RegToPrint, Reg));
O << AArch64InstPrinter::getRegisterName( O << AArch64InstPrinter::getRegisterName(
@ -604,7 +604,7 @@ bool AArch64AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
// According to ARM, we should emit x and v registers unless we have a // According to ARM, we should emit x and v registers unless we have a
// modifier. // modifier.
if (MO.isReg()) { if (MO.isReg()) {
unsigned Reg = MO.getReg(); Register Reg = MO.getReg();
// If this is a w or x register, print an x register. // If this is a w or x register, print an x register.
if (AArch64::GPR32allRegClass.contains(Reg) || if (AArch64::GPR32allRegClass.contains(Reg) ||
@ -724,12 +724,12 @@ void AArch64AsmPrinter::emitJumpTableEntry(const MachineJumpTableInfo *MJTI,
/// add xDest, xDest, xScratch, lsl #2 /// add xDest, xDest, xScratch, lsl #2
void AArch64AsmPrinter::LowerJumpTableDestSmall(llvm::MCStreamer &OutStreamer, void AArch64AsmPrinter::LowerJumpTableDestSmall(llvm::MCStreamer &OutStreamer,
const llvm::MachineInstr &MI) { const llvm::MachineInstr &MI) {
unsigned DestReg = MI.getOperand(0).getReg(); Register DestReg = MI.getOperand(0).getReg();
unsigned ScratchReg = MI.getOperand(1).getReg(); Register ScratchReg = MI.getOperand(1).getReg();
unsigned ScratchRegW = Register ScratchRegW =
STI->getRegisterInfo()->getSubReg(ScratchReg, AArch64::sub_32); STI->getRegisterInfo()->getSubReg(ScratchReg, AArch64::sub_32);
unsigned TableReg = MI.getOperand(2).getReg(); Register TableReg = MI.getOperand(2).getReg();
unsigned EntryReg = MI.getOperand(3).getReg(); Register EntryReg = MI.getOperand(3).getReg();
int JTIdx = MI.getOperand(4).getIndex(); int JTIdx = MI.getOperand(4).getIndex();
bool IsByteEntry = MI.getOpcode() == AArch64::JumpTableDest8; bool IsByteEntry = MI.getOpcode() == AArch64::JumpTableDest8;
@ -799,7 +799,7 @@ void AArch64AsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
if (CallTarget) { if (CallTarget) {
assert((CallTarget & 0xFFFFFFFFFFFF) == CallTarget && assert((CallTarget & 0xFFFFFFFFFFFF) == CallTarget &&
"High 16 bits of call target should be zero."); "High 16 bits of call target should be zero.");
unsigned ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg(); Register ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg();
EncodedBytes = 16; EncodedBytes = 16;
// Materialize the jump address: // Materialize the jump address:
EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVZXi) EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVZXi)
@ -829,7 +829,7 @@ void AArch64AsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
} }
void AArch64AsmPrinter::EmitFMov0(const MachineInstr &MI) { void AArch64AsmPrinter::EmitFMov0(const MachineInstr &MI) {
unsigned DestReg = MI.getOperand(0).getReg(); Register DestReg = MI.getOperand(0).getReg();
if (STI->hasZeroCycleZeroingFP() && !STI->hasZeroCycleZeroingFPWorkaround()) { if (STI->hasZeroCycleZeroingFP() && !STI->hasZeroCycleZeroingFPWorkaround()) {
// Convert H/S/D register to corresponding Q register // Convert H/S/D register to corresponding Q register
if (AArch64::H0 <= DestReg && DestReg <= AArch64::H31) if (AArch64::H0 <= DestReg && DestReg <= AArch64::H31)
@ -893,32 +893,32 @@ void AArch64AsmPrinter::EmitInstruction(const MachineInstr *MI) {
default: default:
break; break;
case AArch64::MOVMCSym: { case AArch64::MOVMCSym: {
unsigned DestReg = MI->getOperand(0).getReg(); Register DestReg = MI->getOperand(0).getReg();
const MachineOperand &MO_Sym = MI->getOperand(1); const MachineOperand &MO_Sym = MI->getOperand(1);
MachineOperand Hi_MOSym(MO_Sym), Lo_MOSym(MO_Sym); MachineOperand Hi_MOSym(MO_Sym), Lo_MOSym(MO_Sym);
MCOperand Hi_MCSym, Lo_MCSym; MCOperand Hi_MCSym, Lo_MCSym;
Hi_MOSym.setTargetFlags(AArch64II::MO_G1 | AArch64II::MO_S); Hi_MOSym.setTargetFlags(AArch64II::MO_G1 | AArch64II::MO_S);
Lo_MOSym.setTargetFlags(AArch64II::MO_G0 | AArch64II::MO_NC); Lo_MOSym.setTargetFlags(AArch64II::MO_G0 | AArch64II::MO_NC);
MCInstLowering.lowerOperand(Hi_MOSym, Hi_MCSym); MCInstLowering.lowerOperand(Hi_MOSym, Hi_MCSym);
MCInstLowering.lowerOperand(Lo_MOSym, Lo_MCSym); MCInstLowering.lowerOperand(Lo_MOSym, Lo_MCSym);
MCInst MovZ; MCInst MovZ;
MovZ.setOpcode(AArch64::MOVZXi); MovZ.setOpcode(AArch64::MOVZXi);
MovZ.addOperand(MCOperand::createReg(DestReg)); MovZ.addOperand(MCOperand::createReg(DestReg));
MovZ.addOperand(Hi_MCSym); MovZ.addOperand(Hi_MCSym);
MovZ.addOperand(MCOperand::createImm(16)); MovZ.addOperand(MCOperand::createImm(16));
EmitToStreamer(*OutStreamer, MovZ); EmitToStreamer(*OutStreamer, MovZ);
MCInst MovK; MCInst MovK;
MovK.setOpcode(AArch64::MOVKXi); MovK.setOpcode(AArch64::MOVKXi);
MovK.addOperand(MCOperand::createReg(DestReg)); MovK.addOperand(MCOperand::createReg(DestReg));
MovK.addOperand(MCOperand::createReg(DestReg)); MovK.addOperand(MCOperand::createReg(DestReg));
MovK.addOperand(Lo_MCSym); MovK.addOperand(Lo_MCSym);
MovK.addOperand(MCOperand::createImm(0)); MovK.addOperand(MCOperand::createImm(0));
EmitToStreamer(*OutStreamer, MovK); EmitToStreamer(*OutStreamer, MovK);
return; return;
} }
case AArch64::MOVIv2d_ns: case AArch64::MOVIv2d_ns:
// If the target has <rdar://problem/16473581>, lower this // If the target has <rdar://problem/16473581>, lower this

View File

@ -98,7 +98,7 @@ MachineInstr *AArch64CondBrTuning::convertToFlagSetting(MachineInstr &MI,
} }
bool Is64Bit; bool Is64Bit;
unsigned NewOpc = TII->convertToFlagSettingOpc(MI.getOpcode(), Is64Bit); unsigned NewOpc = TII->convertToFlagSettingOpc(MI.getOpcode(), Is64Bit);
unsigned NewDestReg = MI.getOperand(0).getReg(); Register NewDestReg = MI.getOperand(0).getReg();
if (MRI->hasOneNonDBGUse(MI.getOperand(0).getReg())) if (MRI->hasOneNonDBGUse(MI.getOperand(0).getReg()))
NewDestReg = Is64Bit ? AArch64::XZR : AArch64::WZR; NewDestReg = Is64Bit ? AArch64::XZR : AArch64::WZR;

View File

@ -220,7 +220,7 @@ bool SSACCmpConv::trivialTailPHIs() {
// PHI operands come in (VReg, MBB) pairs. // PHI operands come in (VReg, MBB) pairs.
for (unsigned oi = 1, oe = I.getNumOperands(); oi != oe; oi += 2) { for (unsigned oi = 1, oe = I.getNumOperands(); oi != oe; oi += 2) {
MachineBasicBlock *MBB = I.getOperand(oi + 1).getMBB(); MachineBasicBlock *MBB = I.getOperand(oi + 1).getMBB();
unsigned Reg = I.getOperand(oi).getReg(); Register Reg = I.getOperand(oi).getReg();
if (MBB == Head) { if (MBB == Head) {
assert((!HeadReg || HeadReg == Reg) && "Inconsistent PHI operands"); assert((!HeadReg || HeadReg == Reg) && "Inconsistent PHI operands");
HeadReg = Reg; HeadReg = Reg;
@ -631,7 +631,7 @@ void SSACCmpConv::convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks) {
} }
const MCInstrDesc &MCID = TII->get(Opc); const MCInstrDesc &MCID = TII->get(Opc);
// Create a dummy virtual register for the SUBS def. // Create a dummy virtual register for the SUBS def.
unsigned DestReg = Register DestReg =
MRI->createVirtualRegister(TII->getRegClass(MCID, 0, TRI, *MF)); MRI->createVirtualRegister(TII->getRegClass(MCID, 0, TRI, *MF));
// Insert a SUBS Rn, #0 instruction instead of the cbz / cbnz. // Insert a SUBS Rn, #0 instruction instead of the cbz / cbnz.
BuildMI(*Head, Head->end(), TermDL, MCID) BuildMI(*Head, Head->end(), TermDL, MCID)

View File

@ -145,7 +145,7 @@ void AArch64DeadRegisterDefinitions::processMachineBasicBlock(
continue; continue;
// We should not have any relevant physreg defs that are replacable by // We should not have any relevant physreg defs that are replacable by
// zero before register allocation. So we just check for dead vreg defs. // zero before register allocation. So we just check for dead vreg defs.
unsigned Reg = MO.getReg(); Register Reg = MO.getReg();
if (!Register::isVirtualRegister(Reg) || if (!Register::isVirtualRegister(Reg) ||
(!MO.isDead() && !MRI->use_nodbg_empty(Reg))) (!MO.isDead() && !MRI->use_nodbg_empty(Reg)))
continue; continue;

View File

@ -109,7 +109,7 @@ bool AArch64ExpandPseudo::expandMOVImm(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, MachineBasicBlock::iterator MBBI,
unsigned BitSize) { unsigned BitSize) {
MachineInstr &MI = *MBBI; MachineInstr &MI = *MBBI;
unsigned DstReg = MI.getOperand(0).getReg(); Register DstReg = MI.getOperand(0).getReg();
uint64_t Imm = MI.getOperand(1).getImm(); uint64_t Imm = MI.getOperand(1).getImm();
if (DstReg == AArch64::XZR || DstReg == AArch64::WZR) { if (DstReg == AArch64::XZR || DstReg == AArch64::WZR) {
@ -150,7 +150,7 @@ bool AArch64ExpandPseudo::expandMOVImm(MachineBasicBlock &MBB,
} break; } break;
case AArch64::MOVKWi: case AArch64::MOVKWi:
case AArch64::MOVKXi: { case AArch64::MOVKXi: {
unsigned DstReg = MI.getOperand(0).getReg(); Register DstReg = MI.getOperand(0).getReg();
bool DstIsDead = MI.getOperand(0).isDead(); bool DstIsDead = MI.getOperand(0).isDead();
MIBS.push_back(BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(I->Opcode)) MIBS.push_back(BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(I->Opcode))
.addReg(DstReg, .addReg(DstReg,
@ -174,14 +174,14 @@ bool AArch64ExpandPseudo::expandCMP_SWAP(
MachineInstr &MI = *MBBI; MachineInstr &MI = *MBBI;
DebugLoc DL = MI.getDebugLoc(); DebugLoc DL = MI.getDebugLoc();
const MachineOperand &Dest = MI.getOperand(0); const MachineOperand &Dest = MI.getOperand(0);
unsigned StatusReg = MI.getOperand(1).getReg(); Register StatusReg = MI.getOperand(1).getReg();
bool StatusDead = MI.getOperand(1).isDead(); bool StatusDead = MI.getOperand(1).isDead();
// Duplicating undef operands into 2 instructions does not guarantee the same // Duplicating undef operands into 2 instructions does not guarantee the same
// value on both; However undef should be replaced by xzr anyway. // value on both; However undef should be replaced by xzr anyway.
assert(!MI.getOperand(2).isUndef() && "cannot handle undef"); assert(!MI.getOperand(2).isUndef() && "cannot handle undef");
unsigned AddrReg = MI.getOperand(2).getReg(); Register AddrReg = MI.getOperand(2).getReg();
unsigned DesiredReg = MI.getOperand(3).getReg(); Register DesiredReg = MI.getOperand(3).getReg();
unsigned NewReg = MI.getOperand(4).getReg(); Register NewReg = MI.getOperand(4).getReg();
MachineFunction *MF = MBB.getParent(); MachineFunction *MF = MBB.getParent();
auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock()); auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
@ -254,16 +254,16 @@ bool AArch64ExpandPseudo::expandCMP_SWAP_128(
DebugLoc DL = MI.getDebugLoc(); DebugLoc DL = MI.getDebugLoc();
MachineOperand &DestLo = MI.getOperand(0); MachineOperand &DestLo = MI.getOperand(0);
MachineOperand &DestHi = MI.getOperand(1); MachineOperand &DestHi = MI.getOperand(1);
unsigned StatusReg = MI.getOperand(2).getReg(); Register StatusReg = MI.getOperand(2).getReg();
bool StatusDead = MI.getOperand(2).isDead(); bool StatusDead = MI.getOperand(2).isDead();
// Duplicating undef operands into 2 instructions does not guarantee the same // Duplicating undef operands into 2 instructions does not guarantee the same
// value on both; However undef should be replaced by xzr anyway. // value on both; However undef should be replaced by xzr anyway.
assert(!MI.getOperand(3).isUndef() && "cannot handle undef"); assert(!MI.getOperand(3).isUndef() && "cannot handle undef");
unsigned AddrReg = MI.getOperand(3).getReg(); Register AddrReg = MI.getOperand(3).getReg();
unsigned DesiredLoReg = MI.getOperand(4).getReg(); Register DesiredLoReg = MI.getOperand(4).getReg();
unsigned DesiredHiReg = MI.getOperand(5).getReg(); Register DesiredHiReg = MI.getOperand(5).getReg();
unsigned NewLoReg = MI.getOperand(6).getReg(); Register NewLoReg = MI.getOperand(6).getReg();
unsigned NewHiReg = MI.getOperand(7).getReg(); Register NewHiReg = MI.getOperand(7).getReg();
MachineFunction *MF = MBB.getParent(); MachineFunction *MF = MBB.getParent();
auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock()); auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
@ -475,7 +475,7 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
case AArch64::LOADgot: { case AArch64::LOADgot: {
MachineFunction *MF = MBB.getParent(); MachineFunction *MF = MBB.getParent();
unsigned DstReg = MI.getOperand(0).getReg(); Register DstReg = MI.getOperand(0).getReg();
const MachineOperand &MO1 = MI.getOperand(1); const MachineOperand &MO1 = MI.getOperand(1);
unsigned Flags = MO1.getTargetFlags(); unsigned Flags = MO1.getTargetFlags();
@ -534,7 +534,7 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
case AArch64::MOVaddrTLS: case AArch64::MOVaddrTLS:
case AArch64::MOVaddrEXT: { case AArch64::MOVaddrEXT: {
// Expand into ADRP + ADD. // Expand into ADRP + ADD.
unsigned DstReg = MI.getOperand(0).getReg(); Register DstReg = MI.getOperand(0).getReg();
MachineInstrBuilder MIB1 = MachineInstrBuilder MIB1 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg) BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg)
.add(MI.getOperand(1)); .add(MI.getOperand(1));
@ -578,7 +578,7 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
return true; return true;
case AArch64::MOVbaseTLS: { case AArch64::MOVbaseTLS: {
unsigned DstReg = MI.getOperand(0).getReg(); Register DstReg = MI.getOperand(0).getReg();
auto SysReg = AArch64SysReg::TPIDR_EL0; auto SysReg = AArch64SysReg::TPIDR_EL0;
MachineFunction *MF = MBB.getParent(); MachineFunction *MF = MBB.getParent();
if (MF->getTarget().getTargetTriple().isOSFuchsia() && if (MF->getTarget().getTargetTriple().isOSFuchsia() &&

View File

@ -642,7 +642,7 @@ static Optional<LoadInfo> getLoadInfo(const MachineInstr &MI) {
} }
// Loads from the stack pointer don't get prefetched. // Loads from the stack pointer don't get prefetched.
unsigned BaseReg = MI.getOperand(BaseRegIdx).getReg(); Register BaseReg = MI.getOperand(BaseRegIdx).getReg();
if (BaseReg == AArch64::SP || BaseReg == AArch64::WSP) if (BaseReg == AArch64::SP || BaseReg == AArch64::WSP)
return None; return None;

View File

@ -3434,8 +3434,8 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
MFI.setFrameAddressIsTaken(true); MFI.setFrameAddressIsTaken(true);
const AArch64RegisterInfo *RegInfo = Subtarget->getRegisterInfo(); const AArch64RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF)); Register FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF));
unsigned SrcReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass); Register SrcReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), SrcReg).addReg(FramePtr); TII.get(TargetOpcode::COPY), SrcReg).addReg(FramePtr);
// Recursively load frame address // Recursively load frame address
@ -3842,7 +3842,7 @@ bool AArch64FastISel::selectRet(const Instruction *I) {
return false; return false;
unsigned SrcReg = Reg + VA.getValNo(); unsigned SrcReg = Reg + VA.getValNo();
unsigned DestReg = VA.getLocReg(); Register DestReg = VA.getLocReg();
// Avoid a cross-class copy. This is very unlikely. // Avoid a cross-class copy. This is very unlikely.
if (!MRI.getRegClass(SrcReg)->contains(DestReg)) if (!MRI.getRegClass(SrcReg)->contains(DestReg))
return false; return false;
@ -3970,7 +3970,7 @@ unsigned AArch64FastISel::emiti1Ext(unsigned SrcReg, MVT DestVT, bool IsZExt) {
if (DestVT == MVT::i64) { if (DestVT == MVT::i64) {
// We're ZExt i1 to i64. The ANDWri Wd, Ws, #1 implicitly clears the // We're ZExt i1 to i64. The ANDWri Wd, Ws, #1 implicitly clears the
// upper 32 bits. Emit a SUBREG_TO_REG to extend from Wd to Xd. // upper 32 bits. Emit a SUBREG_TO_REG to extend from Wd to Xd.
unsigned Reg64 = MRI.createVirtualRegister(&AArch64::GPR64RegClass); Register Reg64 = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(AArch64::SUBREG_TO_REG), Reg64) TII.get(AArch64::SUBREG_TO_REG), Reg64)
.addImm(0) .addImm(0)
@ -4123,7 +4123,7 @@ unsigned AArch64FastISel::emitLSL_ri(MVT RetVT, MVT SrcVT, unsigned Op0,
}; };
unsigned Opc = OpcTable[IsZExt][Is64Bit]; unsigned Opc = OpcTable[IsZExt][Is64Bit];
if (SrcVT.SimpleTy <= MVT::i32 && RetVT == MVT::i64) { if (SrcVT.SimpleTy <= MVT::i32 && RetVT == MVT::i64) {
unsigned TmpReg = MRI.createVirtualRegister(RC); Register TmpReg = MRI.createVirtualRegister(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(AArch64::SUBREG_TO_REG), TmpReg) TII.get(AArch64::SUBREG_TO_REG), TmpReg)
.addImm(0) .addImm(0)
@ -4244,7 +4244,7 @@ unsigned AArch64FastISel::emitLSR_ri(MVT RetVT, MVT SrcVT, unsigned Op0,
}; };
unsigned Opc = OpcTable[IsZExt][Is64Bit]; unsigned Opc = OpcTable[IsZExt][Is64Bit];
if (SrcVT.SimpleTy <= MVT::i32 && RetVT == MVT::i64) { if (SrcVT.SimpleTy <= MVT::i32 && RetVT == MVT::i64) {
unsigned TmpReg = MRI.createVirtualRegister(RC); Register TmpReg = MRI.createVirtualRegister(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(AArch64::SUBREG_TO_REG), TmpReg) TII.get(AArch64::SUBREG_TO_REG), TmpReg)
.addImm(0) .addImm(0)
@ -4353,7 +4353,7 @@ unsigned AArch64FastISel::emitASR_ri(MVT RetVT, MVT SrcVT, unsigned Op0,
}; };
unsigned Opc = OpcTable[IsZExt][Is64Bit]; unsigned Opc = OpcTable[IsZExt][Is64Bit];
if (SrcVT.SimpleTy <= MVT::i32 && RetVT == MVT::i64) { if (SrcVT.SimpleTy <= MVT::i32 && RetVT == MVT::i64) {
unsigned TmpReg = MRI.createVirtualRegister(RC); Register TmpReg = MRI.createVirtualRegister(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(AArch64::SUBREG_TO_REG), TmpReg) TII.get(AArch64::SUBREG_TO_REG), TmpReg)
.addImm(0) .addImm(0)
@ -4412,7 +4412,7 @@ unsigned AArch64FastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
if (DestVT == MVT::i8 || DestVT == MVT::i16) if (DestVT == MVT::i8 || DestVT == MVT::i16)
DestVT = MVT::i32; DestVT = MVT::i32;
else if (DestVT == MVT::i64) { else if (DestVT == MVT::i64) {
unsigned Src64 = MRI.createVirtualRegister(&AArch64::GPR64RegClass); Register Src64 = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(AArch64::SUBREG_TO_REG), Src64) TII.get(AArch64::SUBREG_TO_REG), Src64)
.addImm(0) .addImm(0)
@ -4495,7 +4495,7 @@ bool AArch64FastISel::optimizeIntExtLoad(const Instruction *I, MVT RetVT,
const auto *LoadMI = MI; const auto *LoadMI = MI;
if (LoadMI->getOpcode() == TargetOpcode::COPY && if (LoadMI->getOpcode() == TargetOpcode::COPY &&
LoadMI->getOperand(1).getSubReg() == AArch64::sub_32) { LoadMI->getOperand(1).getSubReg() == AArch64::sub_32) {
unsigned LoadReg = MI->getOperand(1).getReg(); Register LoadReg = MI->getOperand(1).getReg();
LoadMI = MRI.getUniqueVRegDef(LoadReg); LoadMI = MRI.getUniqueVRegDef(LoadReg);
assert(LoadMI && "Expected valid instruction"); assert(LoadMI && "Expected valid instruction");
} }

View File

@ -476,8 +476,8 @@ static MachineBasicBlock::iterator InsertSEH(MachineBasicBlock::iterator MBBI,
Imm = -Imm; Imm = -Imm;
LLVM_FALLTHROUGH; LLVM_FALLTHROUGH;
case AArch64::STPXpre: { case AArch64::STPXpre: {
unsigned Reg0 = MBBI->getOperand(1).getReg(); Register Reg0 = MBBI->getOperand(1).getReg();
unsigned Reg1 = MBBI->getOperand(2).getReg(); Register Reg1 = MBBI->getOperand(2).getReg();
if (Reg0 == AArch64::FP && Reg1 == AArch64::LR) if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFPLR_X)) MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFPLR_X))
.addImm(Imm * 8) .addImm(Imm * 8)
@ -525,8 +525,8 @@ static MachineBasicBlock::iterator InsertSEH(MachineBasicBlock::iterator MBBI,
} }
case AArch64::STPXi: case AArch64::STPXi:
case AArch64::LDPXi: { case AArch64::LDPXi: {
unsigned Reg0 = MBBI->getOperand(0).getReg(); Register Reg0 = MBBI->getOperand(0).getReg();
unsigned Reg1 = MBBI->getOperand(1).getReg(); Register Reg1 = MBBI->getOperand(1).getReg();
if (Reg0 == AArch64::FP && Reg1 == AArch64::LR) if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFPLR)) MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFPLR))
.addImm(Imm * 8) .addImm(Imm * 8)
@ -1137,7 +1137,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
if (needsFrameMoves) { if (needsFrameMoves) {
const DataLayout &TD = MF.getDataLayout(); const DataLayout &TD = MF.getDataLayout();
const int StackGrowth = -TD.getPointerSize(0); const int StackGrowth = -TD.getPointerSize(0);
unsigned FramePtr = RegInfo->getFrameRegister(MF); Register FramePtr = RegInfo->getFrameRegister(MF);
// An example of the prologue: // An example of the prologue:
// //
// .globl __foo // .globl __foo

View File

@ -1316,9 +1316,9 @@ AArch64TargetLowering::EmitF128CSEL(MachineInstr &MI,
DebugLoc DL = MI.getDebugLoc(); DebugLoc DL = MI.getDebugLoc();
MachineFunction::iterator It = ++MBB->getIterator(); MachineFunction::iterator It = ++MBB->getIterator();
unsigned DestReg = MI.getOperand(0).getReg(); Register DestReg = MI.getOperand(0).getReg();
unsigned IfTrueReg = MI.getOperand(1).getReg(); Register IfTrueReg = MI.getOperand(1).getReg();
unsigned IfFalseReg = MI.getOperand(2).getReg(); Register IfFalseReg = MI.getOperand(2).getReg();
unsigned CondCode = MI.getOperand(3).getImm(); unsigned CondCode = MI.getOperand(3).getImm();
bool NZCVKilled = MI.getOperand(4).isKill(); bool NZCVKilled = MI.getOperand(4).isKill();
@ -3299,8 +3299,8 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
assert(!FuncInfo->getSRetReturnReg()); assert(!FuncInfo->getSRetReturnReg());
MVT PtrTy = getPointerTy(DAG.getDataLayout()); MVT PtrTy = getPointerTy(DAG.getDataLayout());
unsigned Reg = Register Reg =
MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy)); MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
FuncInfo->setSRetReturnReg(Reg); FuncInfo->setSRetReturnReg(Reg);
SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[I]); SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[I]);
@ -12148,7 +12148,7 @@ void AArch64TargetLowering::insertCopiesSplitCSR(
else else
llvm_unreachable("Unexpected register class in CSRsViaCopy!"); llvm_unreachable("Unexpected register class in CSRsViaCopy!");
unsigned NewVR = MRI->createVirtualRegister(RC); Register NewVR = MRI->createVirtualRegister(RC);
// Create copy from CSR to a virtual register. // Create copy from CSR to a virtual register.
// FIXME: this currently does not emit CFI pseudo-instructions, it works // FIXME: this currently does not emit CFI pseudo-instructions, it works
// fine for CXX_FAST_TLS since the C++-style TLS access functions should be // fine for CXX_FAST_TLS since the C++-style TLS access functions should be

View File

@ -575,7 +575,7 @@ void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
CC = AArch64CC::NE; CC = AArch64CC::NE;
break; break;
} }
unsigned SrcReg = Cond[2].getReg(); Register SrcReg = Cond[2].getReg();
if (Is64Bit) { if (Is64Bit) {
// cmp reg, #0 is actually subs xzr, reg, #0. // cmp reg, #0 is actually subs xzr, reg, #0.
MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass); MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass);
@ -1072,7 +1072,7 @@ static bool UpdateOperandRegClass(MachineInstr &Instr) {
assert(MO.isReg() && assert(MO.isReg() &&
"Operand has register constraints without being a register!"); "Operand has register constraints without being a register!");
unsigned Reg = MO.getReg(); Register Reg = MO.getReg();
if (Register::isPhysicalRegister(Reg)) { if (Register::isPhysicalRegister(Reg)) {
if (!OpRegCstraints->contains(Reg)) if (!OpRegCstraints->contains(Reg))
return false; return false;
@ -1498,7 +1498,7 @@ bool AArch64InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
return true; return true;
} }
unsigned Reg = MI.getOperand(0).getReg(); Register Reg = MI.getOperand(0).getReg();
const GlobalValue *GV = const GlobalValue *GV =
cast<GlobalValue>((*MI.memoperands_begin())->getValue()); cast<GlobalValue>((*MI.memoperands_begin())->getValue());
const TargetMachine &TM = MBB.getParent()->getTarget(); const TargetMachine &TM = MBB.getParent()->getTarget();
@ -1582,7 +1582,7 @@ bool AArch64InstrInfo::isGPRCopy(const MachineInstr &MI) {
break; break;
case TargetOpcode::COPY: { case TargetOpcode::COPY: {
// GPR32 copies will by lowered to ORRXrs // GPR32 copies will by lowered to ORRXrs
unsigned DstReg = MI.getOperand(0).getReg(); Register DstReg = MI.getOperand(0).getReg();
return (AArch64::GPR32RegClass.contains(DstReg) || return (AArch64::GPR32RegClass.contains(DstReg) ||
AArch64::GPR64RegClass.contains(DstReg)); AArch64::GPR64RegClass.contains(DstReg));
} }
@ -1612,7 +1612,7 @@ bool AArch64InstrInfo::isFPRCopy(const MachineInstr &MI) {
break; break;
case TargetOpcode::COPY: { case TargetOpcode::COPY: {
// FPR64 copies will by lowered to ORR.16b // FPR64 copies will by lowered to ORR.16b
unsigned DstReg = MI.getOperand(0).getReg(); Register DstReg = MI.getOperand(0).getReg();
return (AArch64::FPR64RegClass.contains(DstReg) || return (AArch64::FPR64RegClass.contains(DstReg) ||
AArch64::FPR128RegClass.contains(DstReg)); AArch64::FPR128RegClass.contains(DstReg));
} }
@ -1918,7 +1918,7 @@ bool AArch64InstrInfo::isCandidateToMergeOrPair(const MachineInstr &MI) const {
// e.g., ldr x0, [x0] // e.g., ldr x0, [x0]
// This case will never occur with an FI base. // This case will never occur with an FI base.
if (MI.getOperand(1).isReg()) { if (MI.getOperand(1).isReg()) {
unsigned BaseReg = MI.getOperand(1).getReg(); Register BaseReg = MI.getOperand(1).getReg();
const TargetRegisterInfo *TRI = &getRegisterInfo(); const TargetRegisterInfo *TRI = &getRegisterInfo();
if (MI.modifiesRegister(BaseReg, TRI)) if (MI.modifiesRegister(BaseReg, TRI))
return false; return false;
@ -3111,8 +3111,8 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
// <rdar://problem/11522048> // <rdar://problem/11522048>
// //
if (MI.isFullCopy()) { if (MI.isFullCopy()) {
unsigned DstReg = MI.getOperand(0).getReg(); Register DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = MI.getOperand(1).getReg(); Register SrcReg = MI.getOperand(1).getReg();
if (SrcReg == AArch64::SP && Register::isVirtualRegister(DstReg)) { if (SrcReg == AArch64::SP && Register::isVirtualRegister(DstReg)) {
MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass); MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
return nullptr; return nullptr;
@ -3157,8 +3157,8 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
MachineBasicBlock &MBB = *MI.getParent(); MachineBasicBlock &MBB = *MI.getParent();
const MachineOperand &DstMO = MI.getOperand(0); const MachineOperand &DstMO = MI.getOperand(0);
const MachineOperand &SrcMO = MI.getOperand(1); const MachineOperand &SrcMO = MI.getOperand(1);
unsigned DstReg = DstMO.getReg(); Register DstReg = DstMO.getReg();
unsigned SrcReg = SrcMO.getReg(); Register SrcReg = SrcMO.getReg();
// This is slightly expensive to compute for physical regs since // This is slightly expensive to compute for physical regs since
// getMinimalPhysRegClass is slow. // getMinimalPhysRegClass is slow.
auto getRegClass = [&](unsigned Reg) { auto getRegClass = [&](unsigned Reg) {
@ -3963,15 +3963,15 @@ genFusedMultiply(MachineFunction &MF, MachineRegisterInfo &MRI,
SmallVectorImpl<MachineInstr *> &InsInstrs, unsigned IdxMulOpd, SmallVectorImpl<MachineInstr *> &InsInstrs, unsigned IdxMulOpd,
unsigned MaddOpc, const TargetRegisterClass *RC, unsigned MaddOpc, const TargetRegisterClass *RC,
FMAInstKind kind = FMAInstKind::Default, FMAInstKind kind = FMAInstKind::Default,
const unsigned *ReplacedAddend = nullptr) { const Register *ReplacedAddend = nullptr) {
assert(IdxMulOpd == 1 || IdxMulOpd == 2); assert(IdxMulOpd == 1 || IdxMulOpd == 2);
unsigned IdxOtherOpd = IdxMulOpd == 1 ? 2 : 1; unsigned IdxOtherOpd = IdxMulOpd == 1 ? 2 : 1;
MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg()); MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
unsigned ResultReg = Root.getOperand(0).getReg(); Register ResultReg = Root.getOperand(0).getReg();
unsigned SrcReg0 = MUL->getOperand(1).getReg(); Register SrcReg0 = MUL->getOperand(1).getReg();
bool Src0IsKill = MUL->getOperand(1).isKill(); bool Src0IsKill = MUL->getOperand(1).isKill();
unsigned SrcReg1 = MUL->getOperand(2).getReg(); Register SrcReg1 = MUL->getOperand(2).getReg();
bool Src1IsKill = MUL->getOperand(2).isKill(); bool Src1IsKill = MUL->getOperand(2).isKill();
unsigned SrcReg2; unsigned SrcReg2;
@ -4045,10 +4045,10 @@ static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
assert(IdxMulOpd == 1 || IdxMulOpd == 2); assert(IdxMulOpd == 1 || IdxMulOpd == 2);
MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg()); MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
unsigned ResultReg = Root.getOperand(0).getReg(); Register ResultReg = Root.getOperand(0).getReg();
unsigned SrcReg0 = MUL->getOperand(1).getReg(); Register SrcReg0 = MUL->getOperand(1).getReg();
bool Src0IsKill = MUL->getOperand(1).isKill(); bool Src0IsKill = MUL->getOperand(1).isKill();
unsigned SrcReg1 = MUL->getOperand(2).getReg(); Register SrcReg1 = MUL->getOperand(2).getReg();
bool Src1IsKill = MUL->getOperand(2).isKill(); bool Src1IsKill = MUL->getOperand(2).isKill();
if (Register::isVirtualRegister(ResultReg)) if (Register::isVirtualRegister(ResultReg))
@ -4146,7 +4146,7 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
Opc = AArch64::MADDXrrr; Opc = AArch64::MADDXrrr;
RC = &AArch64::GPR64RegClass; RC = &AArch64::GPR64RegClass;
} }
unsigned NewVR = MRI.createVirtualRegister(OrrRC); Register NewVR = MRI.createVirtualRegister(OrrRC);
uint64_t Imm = Root.getOperand(2).getImm(); uint64_t Imm = Root.getOperand(2).getImm();
if (Root.getOperand(3).isImm()) { if (Root.getOperand(3).isImm()) {
@ -4188,7 +4188,7 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
Opc = AArch64::MADDXrrr; Opc = AArch64::MADDXrrr;
RC = &AArch64::GPR64RegClass; RC = &AArch64::GPR64RegClass;
} }
unsigned NewVR = MRI.createVirtualRegister(SubRC); Register NewVR = MRI.createVirtualRegister(SubRC);
// SUB NewVR, 0, C // SUB NewVR, 0, C
MachineInstrBuilder MIB1 = MachineInstrBuilder MIB1 =
BuildMI(MF, Root.getDebugLoc(), TII->get(SubOpc), NewVR) BuildMI(MF, Root.getDebugLoc(), TII->get(SubOpc), NewVR)
@ -4238,7 +4238,7 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
Opc = AArch64::MADDXrrr; Opc = AArch64::MADDXrrr;
RC = &AArch64::GPR64RegClass; RC = &AArch64::GPR64RegClass;
} }
unsigned NewVR = MRI.createVirtualRegister(OrrRC); Register NewVR = MRI.createVirtualRegister(OrrRC);
uint64_t Imm = Root.getOperand(2).getImm(); uint64_t Imm = Root.getOperand(2).getImm();
if (Root.getOperand(3).isImm()) { if (Root.getOperand(3).isImm()) {
unsigned Val = Root.getOperand(3).getImm(); unsigned Val = Root.getOperand(3).getImm();
@ -4506,7 +4506,7 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
case MachineCombinerPattern::FMLSv2f32_OP1: case MachineCombinerPattern::FMLSv2f32_OP1:
case MachineCombinerPattern::FMLSv2i32_indexed_OP1: { case MachineCombinerPattern::FMLSv2i32_indexed_OP1: {
RC = &AArch64::FPR64RegClass; RC = &AArch64::FPR64RegClass;
unsigned NewVR = MRI.createVirtualRegister(RC); Register NewVR = MRI.createVirtualRegister(RC);
MachineInstrBuilder MIB1 = MachineInstrBuilder MIB1 =
BuildMI(MF, Root.getDebugLoc(), TII->get(AArch64::FNEGv2f32), NewVR) BuildMI(MF, Root.getDebugLoc(), TII->get(AArch64::FNEGv2f32), NewVR)
.add(Root.getOperand(2)); .add(Root.getOperand(2));
@ -4526,7 +4526,7 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
case MachineCombinerPattern::FMLSv4f32_OP1: case MachineCombinerPattern::FMLSv4f32_OP1:
case MachineCombinerPattern::FMLSv4i32_indexed_OP1: { case MachineCombinerPattern::FMLSv4i32_indexed_OP1: {
RC = &AArch64::FPR128RegClass; RC = &AArch64::FPR128RegClass;
unsigned NewVR = MRI.createVirtualRegister(RC); Register NewVR = MRI.createVirtualRegister(RC);
MachineInstrBuilder MIB1 = MachineInstrBuilder MIB1 =
BuildMI(MF, Root.getDebugLoc(), TII->get(AArch64::FNEGv4f32), NewVR) BuildMI(MF, Root.getDebugLoc(), TII->get(AArch64::FNEGv4f32), NewVR)
.add(Root.getOperand(2)); .add(Root.getOperand(2));
@ -4546,7 +4546,7 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
case MachineCombinerPattern::FMLSv2f64_OP1: case MachineCombinerPattern::FMLSv2f64_OP1:
case MachineCombinerPattern::FMLSv2i64_indexed_OP1: { case MachineCombinerPattern::FMLSv2i64_indexed_OP1: {
RC = &AArch64::FPR128RegClass; RC = &AArch64::FPR128RegClass;
unsigned NewVR = MRI.createVirtualRegister(RC); Register NewVR = MRI.createVirtualRegister(RC);
MachineInstrBuilder MIB1 = MachineInstrBuilder MIB1 =
BuildMI(MF, Root.getDebugLoc(), TII->get(AArch64::FNEGv2f64), NewVR) BuildMI(MF, Root.getDebugLoc(), TII->get(AArch64::FNEGv2f64), NewVR)
.add(Root.getOperand(2)); .add(Root.getOperand(2));
@ -4647,7 +4647,7 @@ bool AArch64InstrInfo::optimizeCondBranch(MachineInstr &MI) const {
MachineBasicBlock *MBB = MI.getParent(); MachineBasicBlock *MBB = MI.getParent();
MachineFunction *MF = MBB->getParent(); MachineFunction *MF = MBB->getParent();
MachineRegisterInfo *MRI = &MF->getRegInfo(); MachineRegisterInfo *MRI = &MF->getRegInfo();
unsigned VReg = MI.getOperand(0).getReg(); Register VReg = MI.getOperand(0).getReg();
if (!Register::isVirtualRegister(VReg)) if (!Register::isVirtualRegister(VReg))
return false; return false;
@ -4655,7 +4655,7 @@ bool AArch64InstrInfo::optimizeCondBranch(MachineInstr &MI) const {
// Look through COPY instructions to find definition. // Look through COPY instructions to find definition.
while (DefMI->isCopy()) { while (DefMI->isCopy()) {
unsigned CopyVReg = DefMI->getOperand(1).getReg(); Register CopyVReg = DefMI->getOperand(1).getReg();
if (!MRI->hasOneNonDBGUse(CopyVReg)) if (!MRI->hasOneNonDBGUse(CopyVReg))
return false; return false;
if (!MRI->hasOneDef(CopyVReg)) if (!MRI->hasOneDef(CopyVReg))
@ -4683,7 +4683,7 @@ bool AArch64InstrInfo::optimizeCondBranch(MachineInstr &MI) const {
return false; return false;
MachineOperand &MO = DefMI->getOperand(1); MachineOperand &MO = DefMI->getOperand(1);
unsigned NewReg = MO.getReg(); Register NewReg = MO.getReg();
if (!Register::isVirtualRegister(NewReg)) if (!Register::isVirtualRegister(NewReg))
return false; return false;

View File

@ -511,8 +511,8 @@ static bool isValidCopy(const MachineInstr &I, const RegisterBank &DstBank,
const MachineRegisterInfo &MRI, const MachineRegisterInfo &MRI,
const TargetRegisterInfo &TRI, const TargetRegisterInfo &TRI,
const RegisterBankInfo &RBI) { const RegisterBankInfo &RBI) {
const unsigned DstReg = I.getOperand(0).getReg(); const Register DstReg = I.getOperand(0).getReg();
const unsigned SrcReg = I.getOperand(1).getReg(); const Register SrcReg = I.getOperand(1).getReg();
const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI); const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI); const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
@ -572,8 +572,8 @@ static std::pair<const TargetRegisterClass *, const TargetRegisterClass *>
getRegClassesForCopy(MachineInstr &I, const TargetInstrInfo &TII, getRegClassesForCopy(MachineInstr &I, const TargetInstrInfo &TII,
MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
const RegisterBankInfo &RBI) { const RegisterBankInfo &RBI) {
unsigned DstReg = I.getOperand(0).getReg(); Register DstReg = I.getOperand(0).getReg();
unsigned SrcReg = I.getOperand(1).getReg(); Register SrcReg = I.getOperand(1).getReg();
const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI); const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI); const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI); unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
@ -598,8 +598,8 @@ static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII,
MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
const RegisterBankInfo &RBI) { const RegisterBankInfo &RBI) {
unsigned DstReg = I.getOperand(0).getReg(); Register DstReg = I.getOperand(0).getReg();
unsigned SrcReg = I.getOperand(1).getReg(); Register SrcReg = I.getOperand(1).getReg();
const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI); const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI); const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
@ -675,7 +675,7 @@ static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII,
SrcSize == 16) { SrcSize == 16) {
// Special case for FPR16 to GPR32. // Special case for FPR16 to GPR32.
// FIXME: This can probably be generalized like the above case. // FIXME: This can probably be generalized like the above case.
unsigned PromoteReg = Register PromoteReg =
MRI.createVirtualRegister(&AArch64::FPR32RegClass); MRI.createVirtualRegister(&AArch64::FPR32RegClass);
BuildMI(*I.getParent(), I, I.getDebugLoc(), BuildMI(*I.getParent(), I, I.getDebugLoc(),
TII.get(AArch64::SUBREG_TO_REG), PromoteReg) TII.get(AArch64::SUBREG_TO_REG), PromoteReg)
@ -1115,8 +1115,8 @@ void AArch64InstructionSelector::preISelLower(MachineInstr &I) const {
// some reason we receive input GMIR that has an s64 shift amount that's not // some reason we receive input GMIR that has an s64 shift amount that's not
// a G_CONSTANT, insert a truncate so that we can still select the s32 // a G_CONSTANT, insert a truncate so that we can still select the s32
// register-register variant. // register-register variant.
unsigned SrcReg = I.getOperand(1).getReg(); Register SrcReg = I.getOperand(1).getReg();
unsigned ShiftReg = I.getOperand(2).getReg(); Register ShiftReg = I.getOperand(2).getReg();
const LLT ShiftTy = MRI.getType(ShiftReg); const LLT ShiftTy = MRI.getType(ShiftReg);
const LLT SrcTy = MRI.getType(SrcReg); const LLT SrcTy = MRI.getType(SrcReg);
if (SrcTy.isVector()) if (SrcTy.isVector())
@ -1767,7 +1767,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
const unsigned Size = MemSizeInBits / 8; const unsigned Size = MemSizeInBits / 8;
const unsigned Scale = Log2_32(Size); const unsigned Scale = Log2_32(Size);
if ((Imm & (Size - 1)) == 0 && Imm >= 0 && Imm < (0x1000 << Scale)) { if ((Imm & (Size - 1)) == 0 && Imm >= 0 && Imm < (0x1000 << Scale)) {
unsigned Ptr2Reg = PtrMI->getOperand(1).getReg(); Register Ptr2Reg = PtrMI->getOperand(1).getReg();
I.getOperand(1).setReg(Ptr2Reg); I.getOperand(1).setReg(Ptr2Reg);
PtrMI = MRI.getVRegDef(Ptr2Reg); PtrMI = MRI.getVRegDef(Ptr2Reg);
Offset = Imm / Size; Offset = Imm / Size;

View File

@ -705,7 +705,7 @@ bool AArch64LegalizerInfo::legalizeLoadStore(
auto Bitcast = MIRBuilder.buildBitcast({NewTy}, {ValReg}); auto Bitcast = MIRBuilder.buildBitcast({NewTy}, {ValReg});
MIRBuilder.buildStore(Bitcast.getReg(0), MI.getOperand(1).getReg(), MMO); MIRBuilder.buildStore(Bitcast.getReg(0), MI.getOperand(1).getReg(), MMO);
} else { } else {
unsigned NewReg = MRI.createGenericVirtualRegister(NewTy); Register NewReg = MRI.createGenericVirtualRegister(NewTy);
auto NewLoad = MIRBuilder.buildLoad(NewReg, MI.getOperand(1).getReg(), MMO); auto NewLoad = MIRBuilder.buildLoad(NewReg, MI.getOperand(1).getReg(), MMO);
MIRBuilder.buildBitcast({ValReg}, {NewLoad}); MIRBuilder.buildBitcast({ValReg}, {NewLoad});
} }

View File

@ -808,7 +808,7 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
// STRWui %w1, ... // STRWui %w1, ...
// USE kill %w1 ; need to clear kill flag when moving STRWui downwards // USE kill %w1 ; need to clear kill flag when moving STRWui downwards
// STRW %w0 // STRW %w0
unsigned Reg = getLdStRegOp(*I).getReg(); Register Reg = getLdStRegOp(*I).getReg();
for (MachineInstr &MI : make_range(std::next(I), Paired)) for (MachineInstr &MI : make_range(std::next(I), Paired))
MI.clearRegisterKills(Reg, TRI); MI.clearRegisterKills(Reg, TRI);
} }
@ -837,9 +837,9 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
MachineOperand &DstMO = MIB->getOperand(SExtIdx); MachineOperand &DstMO = MIB->getOperand(SExtIdx);
// Right now, DstMO has the extended register, since it comes from an // Right now, DstMO has the extended register, since it comes from an
// extended opcode. // extended opcode.
unsigned DstRegX = DstMO.getReg(); Register DstRegX = DstMO.getReg();
// Get the W variant of that register. // Get the W variant of that register.
unsigned DstRegW = TRI->getSubReg(DstRegX, AArch64::sub_32); Register DstRegW = TRI->getSubReg(DstRegX, AArch64::sub_32);
// Update the result of LDP to use the W instead of the X variant. // Update the result of LDP to use the W instead of the X variant.
DstMO.setReg(DstRegW); DstMO.setReg(DstRegW);
LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs())); LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
@ -882,9 +882,9 @@ AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
int LoadSize = getMemScale(*LoadI); int LoadSize = getMemScale(*LoadI);
int StoreSize = getMemScale(*StoreI); int StoreSize = getMemScale(*StoreI);
unsigned LdRt = getLdStRegOp(*LoadI).getReg(); Register LdRt = getLdStRegOp(*LoadI).getReg();
const MachineOperand &StMO = getLdStRegOp(*StoreI); const MachineOperand &StMO = getLdStRegOp(*StoreI);
unsigned StRt = getLdStRegOp(*StoreI).getReg(); Register StRt = getLdStRegOp(*StoreI).getReg();
bool IsStoreXReg = TRI->getRegClass(AArch64::GPR64RegClassID)->contains(StRt); bool IsStoreXReg = TRI->getRegClass(AArch64::GPR64RegClassID)->contains(StRt);
assert((IsStoreXReg || assert((IsStoreXReg ||
@ -933,10 +933,10 @@ AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
? getLdStOffsetOp(*StoreI).getImm() ? getLdStOffsetOp(*StoreI).getImm()
: getLdStOffsetOp(*StoreI).getImm() * StoreSize; : getLdStOffsetOp(*StoreI).getImm() * StoreSize;
int Width = LoadSize * 8; int Width = LoadSize * 8;
unsigned DestReg = IsStoreXReg unsigned DestReg =
? TRI->getMatchingSuperReg(LdRt, AArch64::sub_32, IsStoreXReg ? Register(TRI->getMatchingSuperReg(
&AArch64::GPR64RegClass) LdRt, AArch64::sub_32, &AArch64::GPR64RegClass))
: LdRt; : LdRt;
assert((UnscaledLdOffset >= UnscaledStOffset && assert((UnscaledLdOffset >= UnscaledStOffset &&
(UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) && (UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) &&
@ -1042,7 +1042,7 @@ bool AArch64LoadStoreOpt::findMatchingStore(
MachineBasicBlock::iterator B = I->getParent()->begin(); MachineBasicBlock::iterator B = I->getParent()->begin();
MachineBasicBlock::iterator MBBI = I; MachineBasicBlock::iterator MBBI = I;
MachineInstr &LoadMI = *I; MachineInstr &LoadMI = *I;
unsigned BaseReg = getLdStBaseOp(LoadMI).getReg(); Register BaseReg = getLdStBaseOp(LoadMI).getReg();
// If the load is the first instruction in the block, there's obviously // If the load is the first instruction in the block, there's obviously
// not any matching store. // not any matching store.
@ -1156,8 +1156,8 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
bool MayLoad = FirstMI.mayLoad(); bool MayLoad = FirstMI.mayLoad();
bool IsUnscaled = TII->isUnscaledLdSt(FirstMI); bool IsUnscaled = TII->isUnscaledLdSt(FirstMI);
unsigned Reg = getLdStRegOp(FirstMI).getReg(); Register Reg = getLdStRegOp(FirstMI).getReg();
unsigned BaseReg = getLdStBaseOp(FirstMI).getReg(); Register BaseReg = getLdStBaseOp(FirstMI).getReg();
int Offset = getLdStOffsetOp(FirstMI).getImm(); int Offset = getLdStOffsetOp(FirstMI).getImm();
int OffsetStride = IsUnscaled ? getMemScale(FirstMI) : 1; int OffsetStride = IsUnscaled ? getMemScale(FirstMI) : 1;
bool IsPromotableZeroStore = isPromotableZeroStoreInst(FirstMI); bool IsPromotableZeroStore = isPromotableZeroStoreInst(FirstMI);
@ -1188,7 +1188,7 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
// check for +1/-1. Make sure to check the new instruction offset is // check for +1/-1. Make sure to check the new instruction offset is
// actually an immediate and not a symbolic reference destined for // actually an immediate and not a symbolic reference destined for
// a relocation. // a relocation.
unsigned MIBaseReg = getLdStBaseOp(MI).getReg(); Register MIBaseReg = getLdStBaseOp(MI).getReg();
int MIOffset = getLdStOffsetOp(MI).getImm(); int MIOffset = getLdStOffsetOp(MI).getImm();
bool MIIsUnscaled = TII->isUnscaledLdSt(MI); bool MIIsUnscaled = TII->isUnscaledLdSt(MI);
if (IsUnscaled != MIIsUnscaled) { if (IsUnscaled != MIIsUnscaled) {
@ -1433,7 +1433,7 @@ MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
MachineInstr &MemMI = *I; MachineInstr &MemMI = *I;
MachineBasicBlock::iterator MBBI = I; MachineBasicBlock::iterator MBBI = I;
unsigned BaseReg = getLdStBaseOp(MemMI).getReg(); Register BaseReg = getLdStBaseOp(MemMI).getReg();
int MIUnscaledOffset = getLdStOffsetOp(MemMI).getImm() * getMemScale(MemMI); int MIUnscaledOffset = getLdStOffsetOp(MemMI).getImm() * getMemScale(MemMI);
// Scan forward looking for post-index opportunities. Updating instructions // Scan forward looking for post-index opportunities. Updating instructions
@ -1446,7 +1446,7 @@ MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
// merge the update. // merge the update.
bool IsPairedInsn = isPairedLdSt(MemMI); bool IsPairedInsn = isPairedLdSt(MemMI);
for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) { for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
unsigned DestReg = getLdStRegOp(MemMI, i).getReg(); Register DestReg = getLdStRegOp(MemMI, i).getReg();
if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg)) if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
return E; return E;
} }
@ -1487,7 +1487,7 @@ MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
MachineInstr &MemMI = *I; MachineInstr &MemMI = *I;
MachineBasicBlock::iterator MBBI = I; MachineBasicBlock::iterator MBBI = I;
unsigned BaseReg = getLdStBaseOp(MemMI).getReg(); Register BaseReg = getLdStBaseOp(MemMI).getReg();
int Offset = getLdStOffsetOp(MemMI).getImm(); int Offset = getLdStOffsetOp(MemMI).getImm();
// If the load/store is the first instruction in the block, there's obviously // If the load/store is the first instruction in the block, there's obviously
@ -1498,7 +1498,7 @@ MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
// merge the update. // merge the update.
bool IsPairedInsn = isPairedLdSt(MemMI); bool IsPairedInsn = isPairedLdSt(MemMI);
for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) { for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
unsigned DestReg = getLdStRegOp(MemMI, i).getReg(); Register DestReg = getLdStRegOp(MemMI, i).getReg();
if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg)) if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
return E; return E;
} }

View File

@ -359,8 +359,8 @@ void A57ChainingConstraint::apply(PBQPRAGraph &G) {
case AArch64::FMADDDrrr: case AArch64::FMADDDrrr:
case AArch64::FNMSUBDrrr: case AArch64::FNMSUBDrrr:
case AArch64::FNMADDDrrr: { case AArch64::FNMADDDrrr: {
unsigned Rd = MI.getOperand(0).getReg(); Register Rd = MI.getOperand(0).getReg();
unsigned Ra = MI.getOperand(3).getReg(); Register Ra = MI.getOperand(3).getReg();
if (addIntraChainConstraint(G, Rd, Ra)) if (addIntraChainConstraint(G, Rd, Ra))
addInterChainConstraint(G, Rd, Ra); addInterChainConstraint(G, Rd, Ra);
@ -369,7 +369,7 @@ void A57ChainingConstraint::apply(PBQPRAGraph &G) {
case AArch64::FMLAv2f32: case AArch64::FMLAv2f32:
case AArch64::FMLSv2f32: { case AArch64::FMLSv2f32: {
unsigned Rd = MI.getOperand(0).getReg(); Register Rd = MI.getOperand(0).getReg();
addInterChainConstraint(G, Rd, Rd); addInterChainConstraint(G, Rd, Rd);
break; break;
} }

View File

@ -563,8 +563,8 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
return getSameKindOfOperandsMapping(MI); return getSameKindOfOperandsMapping(MI);
} }
case TargetOpcode::COPY: { case TargetOpcode::COPY: {
unsigned DstReg = MI.getOperand(0).getReg(); Register DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = MI.getOperand(1).getReg(); Register SrcReg = MI.getOperand(1).getReg();
// Check if one of the register is not a generic register. // Check if one of the register is not a generic register.
if ((Register::isPhysicalRegister(DstReg) || if ((Register::isPhysicalRegister(DstReg) ||
!MRI.getType(DstReg).isValid()) || !MRI.getType(DstReg).isValid()) ||
@ -693,7 +693,7 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case TargetOpcode::G_STORE: case TargetOpcode::G_STORE:
// Check if that store is fed by fp instructions. // Check if that store is fed by fp instructions.
if (OpRegBankIdx[0] == PMI_FirstGPR) { if (OpRegBankIdx[0] == PMI_FirstGPR) {
unsigned VReg = MI.getOperand(0).getReg(); Register VReg = MI.getOperand(0).getReg();
if (!VReg) if (!VReg)
break; break;
MachineInstr *DefMI = MRI.getVRegDef(VReg); MachineInstr *DefMI = MRI.getVRegDef(VReg);
@ -745,7 +745,7 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
// This doesn't check the condition, since it's just whatever is in NZCV. // This doesn't check the condition, since it's just whatever is in NZCV.
// This isn't passed explicitly in a register to fcsel/csel. // This isn't passed explicitly in a register to fcsel/csel.
for (unsigned Idx = 2; Idx < 4; ++Idx) { for (unsigned Idx = 2; Idx < 4; ++Idx) {
unsigned VReg = MI.getOperand(Idx).getReg(); Register VReg = MI.getOperand(Idx).getReg();
MachineInstr *DefMI = MRI.getVRegDef(VReg); MachineInstr *DefMI = MRI.getVRegDef(VReg);
if (getRegBank(VReg, MRI, TRI) == &AArch64::FPRRegBank || if (getRegBank(VReg, MRI, TRI) == &AArch64::FPRRegBank ||
onlyDefinesFP(*DefMI, MRI, TRI)) onlyDefinesFP(*DefMI, MRI, TRI))
@ -813,7 +813,7 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
// sure that we preserve that. // sure that we preserve that.
if (OpRegBankIdx[1] != PMI_FirstGPR) if (OpRegBankIdx[1] != PMI_FirstGPR)
break; break;
unsigned VReg = MI.getOperand(1).getReg(); Register VReg = MI.getOperand(1).getReg();
if (!VReg) if (!VReg)
break; break;

View File

@ -497,7 +497,7 @@ void AArch64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// If we get here, the immediate doesn't fit into the instruction. We folded // If we get here, the immediate doesn't fit into the instruction. We folded
// as much as possible above. Handle the rest, providing a register that is // as much as possible above. Handle the rest, providing a register that is
// SP+LargeImm. // SP+LargeImm.
unsigned ScratchReg = Register ScratchReg =
MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass); MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, TII); emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, TII);
MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false, true); MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false, true);

View File

@ -426,16 +426,16 @@ bool AArch64SIMDInstrOpt::optimizeVectElement(MachineInstr &MI) {
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
// Get the operands of the current SIMD arithmetic instruction. // Get the operands of the current SIMD arithmetic instruction.
unsigned MulDest = MI.getOperand(0).getReg(); Register MulDest = MI.getOperand(0).getReg();
unsigned SrcReg0 = MI.getOperand(1).getReg(); Register SrcReg0 = MI.getOperand(1).getReg();
unsigned Src0IsKill = getKillRegState(MI.getOperand(1).isKill()); unsigned Src0IsKill = getKillRegState(MI.getOperand(1).isKill());
unsigned SrcReg1 = MI.getOperand(2).getReg(); Register SrcReg1 = MI.getOperand(2).getReg();
unsigned Src1IsKill = getKillRegState(MI.getOperand(2).isKill()); unsigned Src1IsKill = getKillRegState(MI.getOperand(2).isKill());
unsigned DupDest; unsigned DupDest;
// Instructions of interest have either 4 or 5 operands. // Instructions of interest have either 4 or 5 operands.
if (MI.getNumOperands() == 5) { if (MI.getNumOperands() == 5) {
unsigned SrcReg2 = MI.getOperand(3).getReg(); Register SrcReg2 = MI.getOperand(3).getReg();
unsigned Src2IsKill = getKillRegState(MI.getOperand(3).isKill()); unsigned Src2IsKill = getKillRegState(MI.getOperand(3).isKill());
unsigned LaneNumber = MI.getOperand(4).getImm(); unsigned LaneNumber = MI.getOperand(4).getImm();
// Create a new DUP instruction. Note that if an equivalent DUP instruction // Create a new DUP instruction. Note that if an equivalent DUP instruction

View File

@ -521,7 +521,7 @@ bool AArch64SpeculationHardening::slhLoads(MachineBasicBlock &MBB) {
for (auto Use : MI.uses()) { for (auto Use : MI.uses()) {
if (!Use.isReg()) if (!Use.isReg())
continue; continue;
unsigned Reg = Use.getReg(); Register Reg = Use.getReg();
// Some loads of floating point data have implicit defs/uses on a // Some loads of floating point data have implicit defs/uses on a
// super register of that floating point data. Some examples: // super register of that floating point data. Some examples:
// $s0 = LDRSui $sp, 22, implicit-def $q0 // $s0 = LDRSui $sp, 22, implicit-def $q0
@ -561,8 +561,8 @@ bool AArch64SpeculationHardening::expandSpeculationSafeValue(
// miss-speculation isn't happening because we're already inserting barriers // miss-speculation isn't happening because we're already inserting barriers
// to guarantee that. // to guarantee that.
if (!UseControlFlowSpeculationBarrier && !UsesFullSpeculationBarrier) { if (!UseControlFlowSpeculationBarrier && !UsesFullSpeculationBarrier) {
unsigned DstReg = MI.getOperand(0).getReg(); Register DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = MI.getOperand(1).getReg(); Register SrcReg = MI.getOperand(1).getReg();
// Mark this register and all its aliasing registers as needing to be // Mark this register and all its aliasing registers as needing to be
// value speculation hardened before its next use, by using a CSDB // value speculation hardened before its next use, by using a CSDB
// barrier instruction. // barrier instruction.

View File

@ -151,7 +151,7 @@ bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) {
int64_t Offset; int64_t Offset;
if (TII->getMemOperandWithOffset(MI, BaseOp, Offset, TRI) && if (TII->getMemOperandWithOffset(MI, BaseOp, Offset, TRI) &&
BaseOp->isReg()) { BaseOp->isReg()) {
unsigned BaseReg = BaseOp->getReg(); Register BaseReg = BaseOp->getReg();
if (PrevBaseReg == BaseReg) { if (PrevBaseReg == BaseReg) {
// If this block can take STPs, skip ahead to the next block. // If this block can take STPs, skip ahead to the next block.
if (!SuppressSTP && shouldAddSTPToBlock(MI.getParent())) if (!SuppressSTP && shouldAddSTPToBlock(MI.getParent()))