CodeGen: Introduce a class for registers

Avoids using a plain unsigned for registers throughoug codegen.
Doesn't attempt to change every register use, just something a little
more than the set needed to build after changing the return type of
MachineOperand::getReg().

llvm-svn: 364191
This commit is contained in:
Matt Arsenault 2019-06-24 15:50:29 +00:00
parent 3260ef16bb
commit e3a676e9ad
95 changed files with 553 additions and 487 deletions

View File

@ -163,8 +163,8 @@ public:
///
/// \return True if the lowering succeeds, false otherwise.
virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
ArrayRef<unsigned> VRegs,
unsigned SwiftErrorVReg) const {
ArrayRef<Register> VRegs,
Register SwiftErrorVReg) const {
if (!supportSwiftError()) {
assert(SwiftErrorVReg == 0 && "attempt to use unsupported swifterror");
return lowerReturn(MIRBuilder, Val, VRegs);
@ -175,7 +175,7 @@ public:
/// This hook behaves as the extended lowerReturn function, but for targets
/// that do not support swifterror value promotion.
virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
ArrayRef<unsigned> VRegs) const {
ArrayRef<Register> VRegs) const {
return false;
}
@ -191,7 +191,7 @@ public:
/// \return True if the lowering succeeded, false otherwise.
virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder,
const Function &F,
ArrayRef<unsigned> VRegs) const {
ArrayRef<Register> VRegs) const {
return false;
}
@ -216,7 +216,7 @@ public:
virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
const MachineOperand &Callee, const ArgInfo &OrigRet,
ArrayRef<ArgInfo> OrigArgs,
unsigned SwiftErrorVReg) const {
Register SwiftErrorVReg) const {
if (!supportSwiftError()) {
assert(SwiftErrorVReg == 0 && "trying to use unsupported swifterror");
return lowerCall(MIRBuilder, CallConv, Callee, OrigRet, OrigArgs);
@ -254,8 +254,8 @@ public:
///
/// \return true if the lowering succeeded, false otherwise.
bool lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS,
unsigned ResReg, ArrayRef<unsigned> ArgRegs,
unsigned SwiftErrorVReg,
Register ResReg, ArrayRef<Register> ArgRegs,
Register SwiftErrorVReg,
std::function<unsigned()> GetCalleeReg) const;
};

View File

@ -71,7 +71,7 @@ private:
public:
ValueToVRegInfo() = default;
using VRegListT = SmallVector<unsigned, 1>;
using VRegListT = SmallVector<Register, 1>;
using OffsetListT = SmallVector<uint64_t, 1>;
using const_vreg_iterator =
@ -559,9 +559,9 @@ private:
/// Non-aggregate types have just one corresponding VReg and the list can be
/// used as a single "unsigned". Aggregates get flattened. If such VRegs do
/// not exist, they are created.
ArrayRef<unsigned> getOrCreateVRegs(const Value &Val);
ArrayRef<Register> getOrCreateVRegs(const Value &Val);
unsigned getOrCreateVReg(const Value &Val) {
Register getOrCreateVReg(const Value &Val) {
auto Regs = getOrCreateVRegs(Val);
if (Regs.empty())
return 0;

View File

@ -39,11 +39,11 @@ public:
return false;
Builder.setInstr(MI);
unsigned DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
// aext(trunc x) - > aext/copy/trunc x
unsigned TruncSrc;
Register TruncSrc;
if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
Builder.buildAnyExtOrTrunc(DstReg, TruncSrc);
@ -52,7 +52,7 @@ public:
}
// aext([asz]ext x) -> [asz]ext x
unsigned ExtSrc;
Register ExtSrc;
MachineInstr *ExtMI;
if (mi_match(SrcReg, MRI,
m_all_of(m_MInstr(ExtMI), m_any_of(m_GAnyExt(m_Reg(ExtSrc)),
@ -89,11 +89,11 @@ public:
return false;
Builder.setInstr(MI);
unsigned DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
// zext(trunc x) - > and (aext/copy/trunc x), mask
unsigned TruncSrc;
Register TruncSrc;
if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
LLT DstTy = MRI.getType(DstReg);
if (isInstUnsupported({TargetOpcode::G_AND, {DstTy}}) ||
@ -118,11 +118,11 @@ public:
return false;
Builder.setInstr(MI);
unsigned DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
// sext(trunc x) - > ashr (shl (aext/copy/trunc x), c), c
unsigned TruncSrc;
Register TruncSrc;
if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
LLT DstTy = MRI.getType(DstReg);
// Guess on the RHS shift amount type, which should be re-legalized if
@ -156,7 +156,7 @@ public:
if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF,
MI.getOperand(1).getReg(), MRI)) {
Builder.setInstr(MI);
unsigned DstReg = MI.getOperand(0).getReg();
Register DstReg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
if (Opcode == TargetOpcode::G_ANYEXT) {
@ -224,7 +224,7 @@ public:
const unsigned NewNumDefs = NumDefs / NumMergeRegs;
for (unsigned Idx = 0; Idx < NumMergeRegs; ++Idx) {
SmallVector<unsigned, 2> DstRegs;
SmallVector<Register, 2> DstRegs;
for (unsigned j = 0, DefIdx = Idx * NewNumDefs; j < NewNumDefs;
++j, ++DefIdx)
DstRegs.push_back(MI.getOperand(DefIdx).getReg());
@ -246,7 +246,7 @@ public:
const unsigned NumRegs = NumMergeRegs / NumDefs;
for (unsigned DefIdx = 0; DefIdx < NumDefs; ++DefIdx) {
SmallVector<unsigned, 2> Regs;
SmallVector<Register, 2> Regs;
for (unsigned j = 0, Idx = NumRegs * DefIdx + 1; j < NumRegs;
++j, ++Idx)
Regs.push_back(MergeI->getOperand(Idx).getReg());
@ -457,8 +457,8 @@ private:
/// Looks through copy instructions and returns the actual
/// source register.
unsigned lookThroughCopyInstrs(unsigned Reg) {
unsigned TmpReg;
unsigned lookThroughCopyInstrs(Register Reg) {
Register TmpReg;
while (mi_match(Reg, MRI, m_Copy(m_Reg(TmpReg)))) {
if (MRI.getType(TmpReg).isValid())
Reg = TmpReg;

View File

@ -141,14 +141,14 @@ private:
/// Helper function to split a wide generic register into bitwise blocks with
/// the given Type (which implies the number of blocks needed). The generic
/// registers created are appended to Ops, starting at bit 0 of Reg.
void extractParts(unsigned Reg, LLT Ty, int NumParts,
SmallVectorImpl<unsigned> &VRegs);
void extractParts(Register Reg, LLT Ty, int NumParts,
SmallVectorImpl<Register> &VRegs);
/// Version which handles irregular splits.
bool extractParts(unsigned Reg, LLT RegTy, LLT MainTy,
bool extractParts(Register Reg, LLT RegTy, LLT MainTy,
LLT &LeftoverTy,
SmallVectorImpl<unsigned> &VRegs,
SmallVectorImpl<unsigned> &LeftoverVRegs);
SmallVectorImpl<Register> &VRegs,
SmallVectorImpl<Register> &LeftoverVRegs);
/// Helper function to build a wide generic register \p DstReg of type \p
/// RegTy from smaller parts. This will produce a G_MERGE_VALUES,
@ -159,16 +159,16 @@ private:
///
/// If \p ResultTy does not evenly break into \p PartTy sized pieces, the
/// remainder must be specified with \p LeftoverRegs of type \p LeftoverTy.
void insertParts(unsigned DstReg, LLT ResultTy,
LLT PartTy, ArrayRef<unsigned> PartRegs,
LLT LeftoverTy = LLT(), ArrayRef<unsigned> LeftoverRegs = {});
void insertParts(Register DstReg, LLT ResultTy,
LLT PartTy, ArrayRef<Register> PartRegs,
LLT LeftoverTy = LLT(), ArrayRef<Register> LeftoverRegs = {});
/// Perform generic multiplication of values held in multiple registers.
/// Generated instructions use only types NarrowTy and i1.
/// Destination can be same or two times size of the source.
void multiplyRegisters(SmallVectorImpl<unsigned> &DstRegs,
ArrayRef<unsigned> Src1Regs,
ArrayRef<unsigned> Src2Regs, LLT NarrowTy);
void multiplyRegisters(SmallVectorImpl<Register> &DstRegs,
ArrayRef<Register> Src1Regs,
ArrayRef<Register> Src2Regs, LLT NarrowTy);
LegalizeResult fewerElementsVectorImplicitDef(MachineInstr &MI,
unsigned TypeIdx, LLT NarrowTy);

View File

@ -160,7 +160,7 @@ template <typename Class> struct bind_ty {
}
};
inline bind_ty<unsigned> m_Reg(unsigned &R) { return R; }
inline bind_ty<Register> m_Reg(Register &R) { return R; }
inline bind_ty<MachineInstr *> m_MInstr(MachineInstr *&MI) { return MI; }
inline bind_ty<LLT> m_Type(LLT &Ty) { return Ty; }

View File

@ -66,6 +66,7 @@ class DstOp {
public:
enum class DstType { Ty_LLT, Ty_Reg, Ty_RC };
DstOp(unsigned R) : Reg(R), Ty(DstType::Ty_Reg) {}
DstOp(Register R) : Reg(R), Ty(DstType::Ty_Reg) {}
DstOp(const MachineOperand &Op) : Reg(Op.getReg()), Ty(DstType::Ty_Reg) {}
DstOp(const LLT &T) : LLTTy(T), Ty(DstType::Ty_LLT) {}
DstOp(const TargetRegisterClass *TRC) : RC(TRC), Ty(DstType::Ty_RC) {}
@ -126,6 +127,7 @@ class SrcOp {
public:
enum class SrcType { Ty_Reg, Ty_MIB, Ty_Predicate };
SrcOp(unsigned R) : Reg(R), Ty(SrcType::Ty_Reg) {}
SrcOp(Register R) : Reg(R), Ty(SrcType::Ty_Reg) {}
SrcOp(const MachineOperand &Op) : Reg(Op.getReg()), Ty(SrcType::Ty_Reg) {}
SrcOp(const MachineInstrBuilder &MIB) : SrcMIB(MIB), Ty(SrcType::Ty_MIB) {}
SrcOp(const CmpInst::Predicate P) : Pred(P), Ty(SrcType::Ty_Predicate) {}
@ -401,7 +403,7 @@ public:
/// type as \p Op0 or \p Op0 itself.
///
/// \return a MachineInstrBuilder for the newly created instruction.
Optional<MachineInstrBuilder> materializeGEP(unsigned &Res, unsigned Op0,
Optional<MachineInstrBuilder> materializeGEP(Register &Res, Register Op0,
const LLT &ValueTy,
uint64_t Value);
@ -717,7 +719,7 @@ public:
/// \pre The bits defined by each Op (derived from index and scalar size) must
/// not overlap.
/// \pre \p Indices must be in ascending order of bit position.
void buildSequence(unsigned Res, ArrayRef<unsigned> Ops,
void buildSequence(Register Res, ArrayRef<Register> Ops,
ArrayRef<uint64_t> Indices);
/// Build and insert \p Res = G_MERGE_VALUES \p Op0, ...
@ -731,7 +733,7 @@ public:
/// \pre The type of all \p Ops registers must be identical.
///
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef<unsigned> Ops);
MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef<Register> Ops);
/// Build and insert \p Res0, ... = G_UNMERGE_VALUES \p Op
///
@ -744,7 +746,7 @@ public:
///
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildUnmerge(ArrayRef<LLT> Res, const SrcOp &Op);
MachineInstrBuilder buildUnmerge(ArrayRef<unsigned> Res, const SrcOp &Op);
MachineInstrBuilder buildUnmerge(ArrayRef<Register> Res, const SrcOp &Op);
/// Build and insert an unmerge of \p Res sized pieces to cover \p Op
MachineInstrBuilder buildUnmerge(LLT Res, const SrcOp &Op);
@ -759,7 +761,7 @@ public:
///
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildBuildVector(const DstOp &Res,
ArrayRef<unsigned> Ops);
ArrayRef<Register> Ops);
/// Build and insert \p Res = G_BUILD_VECTOR with \p Src replicated to fill
/// the number of elements
@ -780,7 +782,7 @@ public:
///
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res,
ArrayRef<unsigned> Ops);
ArrayRef<Register> Ops);
/// Build and insert \p Res = G_CONCAT_VECTORS \p Op0, ...
///
@ -794,10 +796,10 @@ public:
///
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildConcatVectors(const DstOp &Res,
ArrayRef<unsigned> Ops);
ArrayRef<Register> Ops);
MachineInstrBuilder buildInsert(unsigned Res, unsigned Src,
unsigned Op, unsigned Index);
MachineInstrBuilder buildInsert(Register Res, Register Src,
Register Op, unsigned Index);
/// Build and insert either a G_INTRINSIC (if \p HasSideEffects is false) or
/// G_INTRINSIC_W_SIDE_EFFECTS instruction. Its first operand will be the
@ -809,7 +811,7 @@ public:
/// \pre setBasicBlock or setMI must have been called.
///
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef<unsigned> Res,
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef<Register> Res,
bool HasSideEffects);
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef<DstOp> Res,
bool HasSideEffects);

View File

@ -14,6 +14,7 @@
#define LLVM_CODEGEN_MACHINEOPERAND_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/Register.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/LowLevelTypeImpl.h"
@ -345,9 +346,9 @@ public:
//===--------------------------------------------------------------------===//
/// getReg - Returns the register number.
unsigned getReg() const {
Register getReg() const {
assert(isReg() && "This is not a register operand!");
return SmallContents.RegNo;
return Register(SmallContents.RegNo);
}
unsigned getSubReg() const {

View File

@ -712,12 +712,12 @@ public:
/// createVirtualRegister - Create and return a new virtual register in the
/// function with the specified register class.
unsigned createVirtualRegister(const TargetRegisterClass *RegClass,
Register createVirtualRegister(const TargetRegisterClass *RegClass,
StringRef Name = "");
/// Create and return a new virtual register in the function with the same
/// attributes as the given register.
unsigned cloneVirtualRegister(unsigned VReg, StringRef Name = "");
Register cloneVirtualRegister(Register VReg, StringRef Name = "");
/// Get the low-level type of \p Reg or LLT{} if Reg is not a generic
/// (target independent) virtual register.
@ -732,7 +732,7 @@ public:
/// Create and return a new generic virtual register with low-level
/// type \p Ty.
unsigned createGenericVirtualRegister(LLT Ty, StringRef Name = "");
Register createGenericVirtualRegister(LLT Ty, StringRef Name = "");
/// Remove all types associated to virtual registers (after instruction
/// selection and constraining of all generic virtual registers).

View File

@ -0,0 +1,60 @@
//===-- llvm/CodeGen/Register.h ---------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_REGISTER_H
#define LLVM_CODEGEN_REGISTER_H
#include <cassert>
namespace llvm {
/// Wrapper class representing virtual and physical registers. Should be passed
/// by value.
class Register {
unsigned Reg;
public:
Register(unsigned Val = 0): Reg(Val) {}
/// Return true if the specified register number is in the virtual register
/// namespace.
bool isVirtual() const {
return int(Reg) < 0;
}
/// Return true if the specified register number is in the physical register
/// namespace.
bool isPhysical() const {
return int(Reg) > 0;
}
/// Convert a virtual register number to a 0-based index. The first virtual
/// register in a function will get the index 0.
unsigned virtRegIndex() const {
assert(isVirtual() && "Not a virtual register");
return Reg & ~(1u << 31);
}
/// Convert a 0-based index to a virtual register number.
/// This is the inverse operation of VirtReg2IndexFunctor below.
static Register index2VirtReg(unsigned Index) {
return Register(Index | (1u << 31));
}
operator unsigned() const {
return Reg;
}
bool isValid() const {
return Reg != 0;
}
};
}
#endif

View File

@ -17,6 +17,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/Register.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/DebugLoc.h"
#include <functional>
@ -41,18 +42,18 @@ class SwiftErrorValueTracking {
/// A map from swifterror value in a basic block to the virtual register it is
/// currently represented by.
DenseMap<std::pair<const MachineBasicBlock *, const Value *>, unsigned>
DenseMap<std::pair<const MachineBasicBlock *, const Value *>, Register>
VRegDefMap;
/// A list of upward exposed vreg uses that need to be satisfied by either a
/// copy def or a phi node at the beginning of the basic block representing
/// the predecessor(s) swifterror value.
DenseMap<std::pair<const MachineBasicBlock *, const Value *>, unsigned>
DenseMap<std::pair<const MachineBasicBlock *, const Value *>, Register>
VRegUpwardsUse;
/// A map from instructions that define/use a swifterror value to the virtual
/// register that represents that def/use.
llvm::DenseMap<PointerIntPair<const Instruction *, 1, bool>, unsigned>
llvm::DenseMap<PointerIntPair<const Instruction *, 1, bool>, Register>
VRegDefUses;
/// The swifterror argument of the current function.
@ -80,7 +81,7 @@ public:
/// Set the swifterror virtual register in the VRegDefMap for this
/// basic block.
void setCurrentVReg(const MachineBasicBlock *MBB, const Value *, unsigned);
void setCurrentVReg(const MachineBasicBlock *MBB, const Value *, Register);
/// Get or create the swifterror value virtual register for a def of a
/// swifterror by an instruction.

View File

@ -990,7 +990,7 @@ public:
/// getFrameRegister - This method should return the register used as a base
/// for values allocated in the current stack frame.
virtual unsigned getFrameRegister(const MachineFunction &MF) const = 0;
virtual Register getFrameRegister(const MachineFunction &MF) const = 0;
/// Mark a register and all its aliases as reserved in the given set.
void markSuperRegs(BitVector &RegisterSet, unsigned Reg) const;

View File

@ -97,8 +97,8 @@ class TargetInstrInfo;
/// returns the physical register mapped to the specified
/// virtual register
unsigned getPhys(unsigned virtReg) const {
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
Register getPhys(Register virtReg) const {
assert(virtReg.isVirtual());
return Virt2PhysMap[virtReg];
}

View File

@ -38,12 +38,12 @@ using EntryIndex = DbgValueHistoryMap::EntryIndex;
// If @MI is a DBG_VALUE with debug value described by a
// defined register, returns the number of this register.
// In the other case, returns 0.
static unsigned isDescribedByReg(const MachineInstr &MI) {
static Register isDescribedByReg(const MachineInstr &MI) {
assert(MI.isDebugValue());
assert(MI.getNumOperands() == 4);
// If location of variable is described using a register (directly or
// indirectly), this register is always a first operand.
return MI.getOperand(0).isReg() ? MI.getOperand(0).getReg() : 0;
return MI.getOperand(0).isReg() ? MI.getOperand(0).getReg() : Register();
}
bool DbgValueHistoryMap::startDbgValue(InlinedEntity Var,

View File

@ -27,8 +27,8 @@ using namespace llvm;
void CallLowering::anchor() {}
bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS,
unsigned ResReg, ArrayRef<unsigned> ArgRegs,
unsigned SwiftErrorVReg,
Register ResReg, ArrayRef<Register> ArgRegs,
Register SwiftErrorVReg,
std::function<unsigned()> GetCalleeReg) const {
auto &DL = CS.getParent()->getParent()->getParent()->getDataLayout();
@ -131,7 +131,7 @@ bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder,
if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo)) {
// Try to use the register type if we couldn't assign the VT.
if (!Handler.isArgumentHandler() || !CurVT.isValid())
return false;
return false;
CurVT = TLI->getRegisterTypeForCallingConv(
F.getContext(), F.getCallingConv(), EVT(CurVT));
if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo))

View File

@ -169,7 +169,7 @@ IRTranslator::allocateVRegs(const Value &Val) {
return *Regs;
}
ArrayRef<unsigned> IRTranslator::getOrCreateVRegs(const Value &Val) {
ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
auto VRegsIt = VMap.findVRegs(Val);
if (VRegsIt != VMap.vregs_end())
return *VRegsIt->second;
@ -363,11 +363,11 @@ bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
Ret = nullptr;
ArrayRef<unsigned> VRegs;
ArrayRef<Register> VRegs;
if (Ret)
VRegs = getOrCreateVRegs(*Ret);
unsigned SwiftErrorVReg = 0;
Register SwiftErrorVReg = 0;
if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
&RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
@ -858,7 +858,7 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
if (DL->getTypeStoreSize(LI.getType()) == 0)
return true;
ArrayRef<unsigned> Regs = getOrCreateVRegs(LI);
ArrayRef<Register> Regs = getOrCreateVRegs(LI);
ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
unsigned Base = getOrCreateVReg(*LI.getPointerOperand());
@ -875,7 +875,7 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
for (unsigned i = 0; i < Regs.size(); ++i) {
unsigned Addr = 0;
Register Addr;
MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
@ -899,7 +899,7 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
return true;
ArrayRef<unsigned> Vals = getOrCreateVRegs(*SI.getValueOperand());
ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand());
ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
unsigned Base = getOrCreateVReg(*SI.getPointerOperand());
@ -916,7 +916,7 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
}
for (unsigned i = 0; i < Vals.size(); ++i) {
unsigned Addr = 0;
Register Addr;
MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
@ -958,7 +958,7 @@ bool IRTranslator::translateExtractValue(const User &U,
MachineIRBuilder &MIRBuilder) {
const Value *Src = U.getOperand(0);
uint64_t Offset = getOffsetFromIndices(U, *DL);
ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
auto &DstRegs = allocateVRegs(U);
@ -975,8 +975,8 @@ bool IRTranslator::translateInsertValue(const User &U,
uint64_t Offset = getOffsetFromIndices(U, *DL);
auto &DstRegs = allocateVRegs(U);
ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
ArrayRef<unsigned> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
auto InsertedIt = InsertedRegs.begin();
for (unsigned i = 0; i < DstRegs.size(); ++i) {
@ -992,9 +992,9 @@ bool IRTranslator::translateInsertValue(const User &U,
bool IRTranslator::translateSelect(const User &U,
MachineIRBuilder &MIRBuilder) {
unsigned Tst = getOrCreateVReg(*U.getOperand(0));
ArrayRef<unsigned> ResRegs = getOrCreateVRegs(U);
ArrayRef<unsigned> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
ArrayRef<unsigned> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
ArrayRef<Register> ResRegs = getOrCreateVRegs(U);
ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
const SelectInst &SI = cast<SelectInst>(U);
uint16_t Flags = 0;
@ -1186,7 +1186,7 @@ void IRTranslator::getStackGuard(unsigned DstReg,
bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
MachineIRBuilder &MIRBuilder) {
ArrayRef<unsigned> ResRegs = getOrCreateVRegs(CI);
ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
MIRBuilder.buildInstr(Op)
.addDef(ResRegs[0])
.addDef(ResRegs[1])
@ -1539,7 +1539,7 @@ bool IRTranslator::translateInlineAsm(const CallInst &CI,
unsigned IRTranslator::packRegs(const Value &V,
MachineIRBuilder &MIRBuilder) {
ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
ArrayRef<Register> Regs = getOrCreateVRegs(V);
ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
LLT BigTy = getLLTForType(*V.getType(), *DL);
@ -1558,7 +1558,7 @@ unsigned IRTranslator::packRegs(const Value &V,
void IRTranslator::unpackRegs(const Value &V, unsigned Src,
MachineIRBuilder &MIRBuilder) {
ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
ArrayRef<Register> Regs = getOrCreateVRegs(V);
ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
for (unsigned i = 0; i < Regs.size(); ++i)
@ -1586,12 +1586,12 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) {
bool IsSplitType = valueIsSplit(CI);
unsigned Res = IsSplitType ? MRI->createGenericVirtualRegister(
Register Res = IsSplitType ? MRI->createGenericVirtualRegister(
getLLTForType(*CI.getType(), *DL))
: getOrCreateVReg(CI);
SmallVector<unsigned, 8> Args;
unsigned SwiftErrorVReg = 0;
SmallVector<Register, 8> Args;
Register SwiftErrorVReg;
for (auto &Arg: CI.arg_operands()) {
if (CLI->supportSwiftError() && isSwiftError(Arg)) {
LLT Ty = getLLTForType(*Arg->getType(), *DL);
@ -1622,7 +1622,7 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
if (translateKnownIntrinsic(CI, ID, MIRBuilder))
return true;
ArrayRef<unsigned> ResultRegs;
ArrayRef<Register> ResultRegs;
if (!CI.getType()->isVoidTy())
ResultRegs = getOrCreateVRegs(CI);
@ -1690,8 +1690,8 @@ bool IRTranslator::translateInvoke(const User &U,
unsigned Res = 0;
if (!I.getType()->isVoidTy())
Res = MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL));
SmallVector<unsigned, 8> Args;
unsigned SwiftErrorVReg = 0;
SmallVector<Register, 8> Args;
Register SwiftErrorVReg;
for (auto &Arg : I.arg_operands()) {
if (CLI->supportSwiftError() && isSwiftError(Arg)) {
LLT Ty = getLLTForType(*Arg->getType(), *DL);
@ -1776,7 +1776,7 @@ bool IRTranslator::translateLandingPad(const User &U,
return false;
MBB.addLiveIn(ExceptionReg);
ArrayRef<unsigned> ResRegs = getOrCreateVRegs(LP);
ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
@ -2069,7 +2069,7 @@ void IRTranslator::finishPendingPhis() {
SmallSet<const MachineBasicBlock *, 16> SeenPreds;
for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
auto IRPred = PI->getIncomingBlock(i);
ArrayRef<unsigned> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
if (SeenPreds.count(Pred))
continue;
@ -2136,7 +2136,7 @@ bool IRTranslator::translate(const Constant &C, unsigned Reg) {
// Return the scalar if it is a <1 x Ty> vector.
if (CAZ->getNumElements() == 1)
return translate(*CAZ->getElementValue(0u), Reg);
SmallVector<unsigned, 4> Ops;
SmallVector<Register, 4> Ops;
for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
Constant &Elt = *CAZ->getElementValue(i);
Ops.push_back(getOrCreateVReg(Elt));
@ -2146,7 +2146,7 @@ bool IRTranslator::translate(const Constant &C, unsigned Reg) {
// Return the scalar if it is a <1 x Ty> vector.
if (CV->getNumElements() == 1)
return translate(*CV->getElementAsConstant(0), Reg);
SmallVector<unsigned, 4> Ops;
SmallVector<Register, 4> Ops;
for (unsigned i = 0; i < CV->getNumElements(); ++i) {
Constant &Elt = *CV->getElementAsConstant(i);
Ops.push_back(getOrCreateVReg(Elt));
@ -2164,7 +2164,7 @@ bool IRTranslator::translate(const Constant &C, unsigned Reg) {
} else if (auto CV = dyn_cast<ConstantVector>(&C)) {
if (CV->getNumOperands() == 1)
return translate(*CV->getOperand(0), Reg);
SmallVector<unsigned, 4> Ops;
SmallVector<Register, 4> Ops;
for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
}
@ -2274,7 +2274,7 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
EntryBB->addSuccessor(&getMBB(F.front()));
// Lower the actual args into this basic block.
SmallVector<unsigned, 8> VRegArgs;
SmallVector<Register, 8> VRegArgs;
for (const Argument &Arg: F.args()) {
if (DL->getTypeStoreSize(Arg.getType()) == 0)
continue; // Don't handle zero sized types.

View File

@ -115,17 +115,17 @@ LegalizerHelper::legalizeInstrStep(MachineInstr &MI) {
}
}
void LegalizerHelper::extractParts(unsigned Reg, LLT Ty, int NumParts,
SmallVectorImpl<unsigned> &VRegs) {
void LegalizerHelper::extractParts(Register Reg, LLT Ty, int NumParts,
SmallVectorImpl<Register> &VRegs) {
for (int i = 0; i < NumParts; ++i)
VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
MIRBuilder.buildUnmerge(VRegs, Reg);
}
bool LegalizerHelper::extractParts(unsigned Reg, LLT RegTy,
bool LegalizerHelper::extractParts(Register Reg, LLT RegTy,
LLT MainTy, LLT &LeftoverTy,
SmallVectorImpl<unsigned> &VRegs,
SmallVectorImpl<unsigned> &LeftoverRegs) {
SmallVectorImpl<Register> &VRegs,
SmallVectorImpl<Register> &LeftoverRegs) {
assert(!LeftoverTy.isValid() && "this is an out argument");
unsigned RegSize = RegTy.getSizeInBits();
@ -152,14 +152,14 @@ bool LegalizerHelper::extractParts(unsigned Reg, LLT RegTy,
// For irregular sizes, extract the individual parts.
for (unsigned I = 0; I != NumParts; ++I) {
unsigned NewReg = MRI.createGenericVirtualRegister(MainTy);
Register NewReg = MRI.createGenericVirtualRegister(MainTy);
VRegs.push_back(NewReg);
MIRBuilder.buildExtract(NewReg, Reg, MainSize * I);
}
for (unsigned Offset = MainSize * NumParts; Offset < RegSize;
Offset += LeftoverSize) {
unsigned NewReg = MRI.createGenericVirtualRegister(LeftoverTy);
Register NewReg = MRI.createGenericVirtualRegister(LeftoverTy);
LeftoverRegs.push_back(NewReg);
MIRBuilder.buildExtract(NewReg, Reg, Offset);
}
@ -167,11 +167,11 @@ bool LegalizerHelper::extractParts(unsigned Reg, LLT RegTy,
return true;
}
void LegalizerHelper::insertParts(unsigned DstReg,
void LegalizerHelper::insertParts(Register DstReg,
LLT ResultTy, LLT PartTy,
ArrayRef<unsigned> PartRegs,
ArrayRef<Register> PartRegs,
LLT LeftoverTy,
ArrayRef<unsigned> LeftoverRegs) {
ArrayRef<Register> LeftoverRegs) {
if (!LeftoverTy.isValid()) {
assert(LeftoverRegs.empty());
@ -469,7 +469,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
return UnableToLegalize;
int NumParts = SizeOp0 / NarrowSize;
SmallVector<unsigned, 2> DstRegs;
SmallVector<Register, 2> DstRegs;
for (int i = 0; i < NumParts; ++i)
DstRegs.push_back(
MIRBuilder.buildUndef(NarrowTy)->getOperand(0).getReg());
@ -489,7 +489,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
unsigned NarrowSize = NarrowTy.getSizeInBits();
int NumParts = TotalSize / NarrowSize;
SmallVector<unsigned, 4> PartRegs;
SmallVector<Register, 4> PartRegs;
for (int I = 0; I != NumParts; ++I) {
unsigned Offset = I * NarrowSize;
auto K = MIRBuilder.buildConstant(NarrowTy,
@ -499,7 +499,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
LLT LeftoverTy;
unsigned LeftoverBits = TotalSize - NumParts * NarrowSize;
SmallVector<unsigned, 1> LeftoverRegs;
SmallVector<Register, 1> LeftoverRegs;
if (LeftoverBits != 0) {
LeftoverTy = LLT::scalar(LeftoverBits);
auto K = MIRBuilder.buildConstant(
@ -522,7 +522,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
// Expand in terms of carry-setting/consuming G_ADDE instructions.
int NumParts = SizeOp0 / NarrowTy.getSizeInBits();
SmallVector<unsigned, 2> Src1Regs, Src2Regs, DstRegs;
SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs;
extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);
@ -555,7 +555,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
int NumParts = SizeOp0 / NarrowTy.getSizeInBits();
SmallVector<unsigned, 2> Src1Regs, Src2Regs, DstRegs;
SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs;
extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);
@ -761,7 +761,7 @@ void LegalizerHelper::moreElementsVectorSrc(MachineInstr &MI, LLT MoreTy,
// Use concat_vectors if the result is a multiple of the number of elements.
if (NumParts * OldElts == NewElts) {
SmallVector<unsigned, 8> Parts;
SmallVector<Register, 8> Parts;
Parts.push_back(MO.getReg());
unsigned ImpDef = MIRBuilder.buildUndef(OldTy).getReg(0);
@ -785,7 +785,7 @@ LegalizerHelper::widenScalarMergeValues(MachineInstr &MI, unsigned TypeIdx,
if (TypeIdx != 1)
return UnableToLegalize;
unsigned DstReg = MI.getOperand(0).getReg();
Register DstReg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
if (!DstTy.isScalar())
return UnableToLegalize;
@ -795,17 +795,17 @@ LegalizerHelper::widenScalarMergeValues(MachineInstr &MI, unsigned TypeIdx,
unsigned PartSize = DstTy.getSizeInBits() / NumSrc;
unsigned Src1 = MI.getOperand(1).getReg();
unsigned ResultReg = MIRBuilder.buildZExt(DstTy, Src1)->getOperand(0).getReg();
Register ResultReg = MIRBuilder.buildZExt(DstTy, Src1)->getOperand(0).getReg();
for (unsigned I = 2; I != NumOps; ++I) {
const unsigned Offset = (I - 1) * PartSize;
unsigned SrcReg = MI.getOperand(I).getReg();
Register SrcReg = MI.getOperand(I).getReg();
assert(MRI.getType(SrcReg) == LLT::scalar(PartSize));
auto ZextInput = MIRBuilder.buildZExt(DstTy, SrcReg);
unsigned NextResult = I + 1 == NumOps ? DstReg :
Register NextResult = I + 1 == NumOps ? DstReg :
MRI.createGenericVirtualRegister(DstTy);
auto ShiftAmt = MIRBuilder.buildConstant(DstTy, Offset);
@ -825,12 +825,12 @@ LegalizerHelper::widenScalarUnmergeValues(MachineInstr &MI, unsigned TypeIdx,
return UnableToLegalize;
unsigned NumDst = MI.getNumOperands() - 1;
unsigned SrcReg = MI.getOperand(NumDst).getReg();
Register SrcReg = MI.getOperand(NumDst).getReg();
LLT SrcTy = MRI.getType(SrcReg);
if (!SrcTy.isScalar())
return UnableToLegalize;
unsigned Dst0Reg = MI.getOperand(0).getReg();
Register Dst0Reg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(Dst0Reg);
if (!DstTy.isScalar())
return UnableToLegalize;
@ -861,8 +861,8 @@ LegalizerHelper::widenScalarUnmergeValues(MachineInstr &MI, unsigned TypeIdx,
LegalizerHelper::LegalizeResult
LegalizerHelper::widenScalarExtract(MachineInstr &MI, unsigned TypeIdx,
LLT WideTy) {
unsigned DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = MI.getOperand(1).getReg();
Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
LLT SrcTy = MRI.getType(SrcReg);
LLT DstTy = MRI.getType(DstReg);
@ -1617,7 +1617,7 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorImplicitDef(
MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy) {
SmallVector<unsigned, 2> DstRegs;
SmallVector<Register, 2> DstRegs;
unsigned NarrowSize = NarrowTy.getSizeInBits();
unsigned DstReg = MI.getOperand(0).getReg();
@ -1702,7 +1702,7 @@ LegalizerHelper::fewerElementsVectorBasic(MachineInstr &MI, unsigned TypeIdx,
return Legalized;
}
SmallVector<unsigned, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs;
SmallVector<Register, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs;
extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src0Regs);
@ -1773,8 +1773,8 @@ LegalizerHelper::fewerElementsVectorMultiEltType(
SmallVector<MachineInstrBuilder, 4> NewInsts;
SmallVector<unsigned, 4> DstRegs, LeftoverDstRegs;
SmallVector<unsigned, 4> PartRegs, LeftoverRegs;
SmallVector<Register, 4> DstRegs, LeftoverDstRegs;
SmallVector<Register, 4> PartRegs, LeftoverRegs;
for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
LLT LeftoverTy;
@ -1861,7 +1861,7 @@ LegalizerHelper::fewerElementsVectorCasts(MachineInstr &MI, unsigned TypeIdx,
NarrowTy1 = SrcTy.getElementType();
}
SmallVector<unsigned, 4> SrcRegs, DstRegs;
SmallVector<Register, 4> SrcRegs, DstRegs;
extractParts(SrcReg, NarrowTy1, NumParts, SrcRegs);
for (unsigned I = 0; I < NumParts; ++I) {
@ -1924,7 +1924,7 @@ LegalizerHelper::fewerElementsVectorCmp(MachineInstr &MI, unsigned TypeIdx,
CmpInst::Predicate Pred
= static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
SmallVector<unsigned, 2> Src1Regs, Src2Regs, DstRegs;
SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs;
extractParts(MI.getOperand(2).getReg(), NarrowTy1, NumParts, Src1Regs);
extractParts(MI.getOperand(3).getReg(), NarrowTy1, NumParts, Src2Regs);
@ -1953,8 +1953,8 @@ LegalizerHelper::fewerElementsVectorCmp(MachineInstr &MI, unsigned TypeIdx,
LegalizerHelper::LegalizeResult
LegalizerHelper::fewerElementsVectorSelect(MachineInstr &MI, unsigned TypeIdx,
LLT NarrowTy) {
unsigned DstReg = MI.getOperand(0).getReg();
unsigned CondReg = MI.getOperand(1).getReg();
Register DstReg = MI.getOperand(0).getReg();
Register CondReg = MI.getOperand(1).getReg();
unsigned NumParts = 0;
LLT NarrowTy0, NarrowTy1;
@ -1999,7 +1999,7 @@ LegalizerHelper::fewerElementsVectorSelect(MachineInstr &MI, unsigned TypeIdx,
}
}
SmallVector<unsigned, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs;
SmallVector<Register, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs;
if (CondTy.isVector())
extractParts(MI.getOperand(1).getReg(), NarrowTy1, NumParts, Src0Regs);
@ -2007,7 +2007,7 @@ LegalizerHelper::fewerElementsVectorSelect(MachineInstr &MI, unsigned TypeIdx,
extractParts(MI.getOperand(3).getReg(), NarrowTy0, NumParts, Src2Regs);
for (unsigned i = 0; i < NumParts; ++i) {
unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy0);
Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0);
MIRBuilder.buildSelect(DstReg, CondTy.isVector() ? Src0Regs[i] : CondReg,
Src1Regs[i], Src2Regs[i]);
DstRegs.push_back(DstReg);
@ -2038,7 +2038,7 @@ LegalizerHelper::fewerElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx,
if (NumParts < 0)
return UnableToLegalize;
SmallVector<unsigned, 4> DstRegs, LeftoverDstRegs;
SmallVector<Register, 4> DstRegs, LeftoverDstRegs;
SmallVector<MachineInstrBuilder, 4> NewInsts;
const int TotalNumParts = NumParts + NumLeftover;
@ -2046,7 +2046,7 @@ LegalizerHelper::fewerElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx,
// Insert the new phis in the result block first.
for (int I = 0; I != TotalNumParts; ++I) {
LLT Ty = I < NumParts ? NarrowTy : LeftoverTy;
unsigned PartDstReg = MRI.createGenericVirtualRegister(Ty);
Register PartDstReg = MRI.createGenericVirtualRegister(Ty);
NewInsts.push_back(MIRBuilder.buildInstr(TargetOpcode::G_PHI)
.addDef(PartDstReg));
if (I < NumParts)
@ -2059,7 +2059,7 @@ LegalizerHelper::fewerElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx,
MIRBuilder.setInsertPt(*MBB, MBB->getFirstNonPHI());
insertParts(DstReg, PhiTy, NarrowTy, DstRegs, LeftoverTy, LeftoverDstRegs);
SmallVector<unsigned, 4> PartRegs, LeftoverRegs;
SmallVector<Register, 4> PartRegs, LeftoverRegs;
// Insert code to extract the incoming values in each predecessor block.
for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
@ -2105,14 +2105,14 @@ LegalizerHelper::reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx,
return UnableToLegalize;
bool IsLoad = MI.getOpcode() == TargetOpcode::G_LOAD;
unsigned ValReg = MI.getOperand(0).getReg();
unsigned AddrReg = MI.getOperand(1).getReg();
Register ValReg = MI.getOperand(0).getReg();
Register AddrReg = MI.getOperand(1).getReg();
LLT ValTy = MRI.getType(ValReg);
int NumParts = -1;
int NumLeftover = -1;
LLT LeftoverTy;
SmallVector<unsigned, 8> NarrowRegs, NarrowLeftoverRegs;
SmallVector<Register, 8> NarrowRegs, NarrowLeftoverRegs;
if (IsLoad) {
std::tie(NumParts, NumLeftover) = getNarrowTypeBreakDown(ValTy, NarrowTy, LeftoverTy);
} else {
@ -2134,7 +2134,7 @@ LegalizerHelper::reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx,
// is a load, return the new registers in ValRegs. For a store, each elements
// of ValRegs should be PartTy. Returns the next offset that needs to be
// handled.
auto splitTypePieces = [=](LLT PartTy, SmallVectorImpl<unsigned> &ValRegs,
auto splitTypePieces = [=](LLT PartTy, SmallVectorImpl<Register> &ValRegs,
unsigned Offset) -> unsigned {
MachineFunction &MF = MIRBuilder.getMF();
unsigned PartSize = PartTy.getSizeInBits();
@ -2142,7 +2142,7 @@ LegalizerHelper::reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx,
Offset += PartSize, ++Idx) {
unsigned ByteSize = PartSize / 8;
unsigned ByteOffset = Offset / 8;
unsigned NewAddrReg = 0;
Register NewAddrReg;
MIRBuilder.materializeGEP(NewAddrReg, AddrReg, OffsetTy, ByteOffset);
@ -2150,7 +2150,7 @@ LegalizerHelper::reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx,
MF.getMachineMemOperand(MMO, ByteOffset, ByteSize);
if (IsLoad) {
unsigned Dst = MRI.createGenericVirtualRegister(PartTy);
Register Dst = MRI.createGenericVirtualRegister(PartTy);
ValRegs.push_back(Dst);
MIRBuilder.buildLoad(Dst, NewAddrReg, *NewMMO);
} else {
@ -2401,7 +2401,7 @@ LegalizerHelper::narrowScalarShift(MachineInstr &MI, unsigned TypeIdx,
auto IsShort = MIRBuilder.buildICmp(ICmpInst::ICMP_ULT, CondTy, Amt, NewBits);
auto IsZero = MIRBuilder.buildICmp(ICmpInst::ICMP_EQ, CondTy, Amt, Zero);
unsigned ResultRegs[2];
Register ResultRegs[2];
switch (MI.getOpcode()) {
case TargetOpcode::G_SHL: {
// Short: ShAmt < NewBitSize
@ -2556,9 +2556,9 @@ LegalizerHelper::moreElementsVector(MachineInstr &MI, unsigned TypeIdx,
}
}
void LegalizerHelper::multiplyRegisters(SmallVectorImpl<unsigned> &DstRegs,
ArrayRef<unsigned> Src1Regs,
ArrayRef<unsigned> Src2Regs,
void LegalizerHelper::multiplyRegisters(SmallVectorImpl<Register> &DstRegs,
ArrayRef<Register> Src1Regs,
ArrayRef<Register> Src2Regs,
LLT NarrowTy) {
MachineIRBuilder &B = MIRBuilder;
unsigned SrcParts = Src1Regs.size();
@ -2570,7 +2570,7 @@ void LegalizerHelper::multiplyRegisters(SmallVectorImpl<unsigned> &DstRegs,
DstRegs[DstIdx] = FactorSum;
unsigned CarrySumPrevDstIdx;
SmallVector<unsigned, 4> Factors;
SmallVector<Register, 4> Factors;
for (DstIdx = 1; DstIdx < DstParts; DstIdx++) {
// Collect low parts of muls for DstIdx.
@ -2621,9 +2621,9 @@ void LegalizerHelper::multiplyRegisters(SmallVectorImpl<unsigned> &DstRegs,
LegalizerHelper::LegalizeResult
LegalizerHelper::narrowScalarMul(MachineInstr &MI, LLT NarrowTy) {
unsigned DstReg = MI.getOperand(0).getReg();
unsigned Src1 = MI.getOperand(1).getReg();
unsigned Src2 = MI.getOperand(2).getReg();
Register DstReg = MI.getOperand(0).getReg();
Register Src1 = MI.getOperand(1).getReg();
Register Src2 = MI.getOperand(2).getReg();
LLT Ty = MRI.getType(DstReg);
if (Ty.isVector())
@ -2640,14 +2640,14 @@ LegalizerHelper::narrowScalarMul(MachineInstr &MI, LLT NarrowTy) {
bool IsMulHigh = MI.getOpcode() == TargetOpcode::G_UMULH;
unsigned DstTmpParts = NumDstParts * (IsMulHigh ? 2 : 1);
SmallVector<unsigned, 2> Src1Parts, Src2Parts, DstTmpRegs;
SmallVector<Register, 2> Src1Parts, Src2Parts, DstTmpRegs;
extractParts(Src1, NarrowTy, NumSrcParts, Src1Parts);
extractParts(Src2, NarrowTy, NumSrcParts, Src2Parts);
DstTmpRegs.resize(DstTmpParts);
multiplyRegisters(DstTmpRegs, Src1Parts, Src2Parts, NarrowTy);
// Take only high half of registers if this is high mul.
ArrayRef<unsigned> DstRegs(
ArrayRef<Register> DstRegs(
IsMulHigh ? &DstTmpRegs[DstTmpParts / 2] : &DstTmpRegs[0], NumDstParts);
MIRBuilder.buildMerge(DstReg, DstRegs);
MI.eraseFromParent();
@ -2669,7 +2669,7 @@ LegalizerHelper::narrowScalarExtract(MachineInstr &MI, unsigned TypeIdx,
return UnableToLegalize;
int NumParts = SizeOp1 / NarrowSize;
SmallVector<unsigned, 2> SrcRegs, DstRegs;
SmallVector<Register, 2> SrcRegs, DstRegs;
SmallVector<uint64_t, 2> Indexes;
extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs);
@ -2736,7 +2736,7 @@ LegalizerHelper::narrowScalarInsert(MachineInstr &MI, unsigned TypeIdx,
int NumParts = SizeOp0 / NarrowSize;
SmallVector<unsigned, 2> SrcRegs, DstRegs;
SmallVector<Register, 2> SrcRegs, DstRegs;
SmallVector<uint64_t, 2> Indexes;
extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs);
@ -2802,9 +2802,9 @@ LegalizerHelper::narrowScalarBasic(MachineInstr &MI, unsigned TypeIdx,
assert(MI.getNumOperands() == 3 && TypeIdx == 0);
SmallVector<unsigned, 4> DstRegs, DstLeftoverRegs;
SmallVector<unsigned, 4> Src0Regs, Src0LeftoverRegs;
SmallVector<unsigned, 4> Src1Regs, Src1LeftoverRegs;
SmallVector<Register, 4> DstRegs, DstLeftoverRegs;
SmallVector<Register, 4> Src0Regs, Src0LeftoverRegs;
SmallVector<Register, 4> Src1Regs, Src1LeftoverRegs;
LLT LeftoverTy;
if (!extractParts(MI.getOperand(1).getReg(), DstTy, NarrowTy, LeftoverTy,
Src0Regs, Src0LeftoverRegs))
@ -2849,9 +2849,9 @@ LegalizerHelper::narrowScalarSelect(MachineInstr &MI, unsigned TypeIdx,
unsigned DstReg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
SmallVector<unsigned, 4> DstRegs, DstLeftoverRegs;
SmallVector<unsigned, 4> Src1Regs, Src1LeftoverRegs;
SmallVector<unsigned, 4> Src2Regs, Src2LeftoverRegs;
SmallVector<Register, 4> DstRegs, DstLeftoverRegs;
SmallVector<Register, 4> Src1Regs, Src1LeftoverRegs;
SmallVector<Register, 4> Src2Regs, Src2LeftoverRegs;
LLT LeftoverTy;
if (!extractParts(MI.getOperand(2).getReg(), DstTy, NarrowTy, LeftoverTy,
Src1Regs, Src1LeftoverRegs))

View File

@ -210,7 +210,7 @@ MachineInstrBuilder MachineIRBuilder::buildGEP(unsigned Res, unsigned Op0,
}
Optional<MachineInstrBuilder>
MachineIRBuilder::materializeGEP(unsigned &Res, unsigned Op0,
MachineIRBuilder::materializeGEP(Register &Res, Register Op0,
const LLT &ValueTy, uint64_t Value) {
assert(Res == 0 && "Res is a result argument");
assert(ValueTy.isScalar() && "invalid offset type");
@ -506,7 +506,7 @@ MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst,
return Extract;
}
void MachineIRBuilder::buildSequence(unsigned Res, ArrayRef<unsigned> Ops,
void MachineIRBuilder::buildSequence(Register Res, ArrayRef<Register> Ops,
ArrayRef<uint64_t> Indices) {
#ifndef NDEBUG
assert(Ops.size() == Indices.size() && "incompatible args");
@ -535,11 +535,11 @@ void MachineIRBuilder::buildSequence(unsigned Res, ArrayRef<unsigned> Ops,
return;
}
unsigned ResIn = getMRI()->createGenericVirtualRegister(ResTy);
Register ResIn = getMRI()->createGenericVirtualRegister(ResTy);
buildUndef(ResIn);
for (unsigned i = 0; i < Ops.size(); ++i) {
unsigned ResOut = i + 1 == Ops.size()
Register ResOut = i + 1 == Ops.size()
? Res
: getMRI()->createGenericVirtualRegister(ResTy);
buildInsert(ResOut, ResIn, Ops[i], Indices[i]);
@ -552,7 +552,7 @@ MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
}
MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res,
ArrayRef<unsigned> Ops) {
ArrayRef<Register> Ops) {
// Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
// we need some temporary storage for the DstOp objects. Here we use a
// sufficiently large SmallVector to not go through the heap.
@ -572,13 +572,13 @@ MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
const SrcOp &Op) {
unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
SmallVector<unsigned, 8> TmpVec;
SmallVector<Register, 8> TmpVec;
for (unsigned I = 0; I != NumReg; ++I)
TmpVec.push_back(getMRI()->createGenericVirtualRegister(Res));
return buildUnmerge(TmpVec, Op);
}
MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<unsigned> Res,
MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
const SrcOp &Op) {
// Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<DstOp>,
// we need some temporary storage for the DstOp objects. Here we use a
@ -588,7 +588,7 @@ MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<unsigned> Res,
}
MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
ArrayRef<unsigned> Ops) {
ArrayRef<Register> Ops) {
// Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<SrcOp>,
// we need some temporary storage for the DstOp objects. Here we use a
// sufficiently large SmallVector to not go through the heap.
@ -604,7 +604,7 @@ MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res,
MachineInstrBuilder
MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
ArrayRef<unsigned> Ops) {
ArrayRef<Register> Ops) {
// Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<SrcOp>,
// we need some temporary storage for the DstOp objects. Here we use a
// sufficiently large SmallVector to not go through the heap.
@ -613,7 +613,7 @@ MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
}
MachineInstrBuilder
MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<unsigned> Ops) {
MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
// Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<SrcOp>,
// we need some temporary storage for the DstOp objects. Here we use a
// sufficiently large SmallVector to not go through the heap.
@ -621,8 +621,8 @@ MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<unsigned> Ops) {
return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
}
MachineInstrBuilder MachineIRBuilder::buildInsert(unsigned Res, unsigned Src,
unsigned Op, unsigned Index) {
MachineInstrBuilder MachineIRBuilder::buildInsert(Register Res, Register Src,
Register Op, unsigned Index) {
assert(Index + getMRI()->getType(Op).getSizeInBits() <=
getMRI()->getType(Res).getSizeInBits() &&
"insertion past the end of a register");
@ -640,7 +640,7 @@ MachineInstrBuilder MachineIRBuilder::buildInsert(unsigned Res, unsigned Src,
}
MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
ArrayRef<unsigned> ResultRegs,
ArrayRef<Register> ResultRegs,
bool HasSideEffects) {
auto MIB =
buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS

View File

@ -70,12 +70,12 @@ STATISTIC(NumInserted, "Number of DBG_VALUE instructions inserted");
// If @MI is a DBG_VALUE with debug value described by a defined
// register, returns the number of this register. In the other case, returns 0.
static unsigned isDbgValueDescribedByReg(const MachineInstr &MI) {
static Register isDbgValueDescribedByReg(const MachineInstr &MI) {
assert(MI.isDebugValue() && "expected a DBG_VALUE");
assert(MI.getNumOperands() == 4 && "malformed DBG_VALUE");
// If location of variable is described using a register (directly
// or indirectly), this register is always a first operand.
return MI.getOperand(0).isReg() ? MI.getOperand(0).getReg() : 0;
return MI.getOperand(0).isReg() ? MI.getOperand(0).getReg() : Register();
}
namespace {

View File

@ -342,7 +342,7 @@ hash_code llvm::hash_value(const MachineOperand &MO) {
switch (MO.getType()) {
case MachineOperand::MO_Register:
// Register operands don't have target flags.
return hash_combine(MO.getType(), MO.getReg(), MO.getSubReg(), MO.isDef());
return hash_combine(MO.getType(), (unsigned)MO.getReg(), MO.getSubReg(), MO.isDef());
case MachineOperand::MO_Immediate:
return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getImm());
case MachineOperand::MO_CImmediate:

View File

@ -154,7 +154,7 @@ unsigned MachineRegisterInfo::createIncompleteVirtualRegister(StringRef Name) {
/// createVirtualRegister - Create and return a new virtual register in the
/// function with the specified register class.
///
unsigned
Register
MachineRegisterInfo::createVirtualRegister(const TargetRegisterClass *RegClass,
StringRef Name) {
assert(RegClass && "Cannot create register without RegClass!");
@ -169,7 +169,7 @@ MachineRegisterInfo::createVirtualRegister(const TargetRegisterClass *RegClass,
return Reg;
}
unsigned MachineRegisterInfo::cloneVirtualRegister(unsigned VReg,
Register MachineRegisterInfo::cloneVirtualRegister(Register VReg,
StringRef Name) {
unsigned Reg = createIncompleteVirtualRegister(Name);
VRegInfo[Reg].first = VRegInfo[VReg].first;
@ -184,7 +184,7 @@ void MachineRegisterInfo::setType(unsigned VReg, LLT Ty) {
VRegToType[VReg] = Ty;
}
unsigned
Register
MachineRegisterInfo::createGenericVirtualRegister(LLT Ty, StringRef Name) {
// New virtual register number.
unsigned Reg = createIncompleteVirtualRegister(Name);

View File

@ -2874,14 +2874,14 @@ void RAGreedy::collectHintInfo(unsigned Reg, HintsInfo &Out) {
if (!Instr.isFullCopy())
continue;
// Look for the other end of the copy.
unsigned OtherReg = Instr.getOperand(0).getReg();
Register OtherReg = Instr.getOperand(0).getReg();
if (OtherReg == Reg) {
OtherReg = Instr.getOperand(1).getReg();
if (OtherReg == Reg)
continue;
}
// Get the current assignment.
unsigned OtherPhysReg = TargetRegisterInfo::isPhysicalRegister(OtherReg)
Register OtherPhysReg = TargetRegisterInfo::isPhysicalRegister(OtherReg)
? OtherReg
: VRM->getPhys(OtherReg);
// Push the collected information.

View File

@ -7879,7 +7879,7 @@ static void GetRegistersForValue(SelectionDAG &DAG, const SDLoc &DL,
for (; NumRegs; --NumRegs, ++I) {
assert(I != RC->end() && "Ran out of registers to allocate!");
auto R = (AssignedReg) ? *I : RegInfo.createVirtualRegister(RC);
Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
Regs.push_back(R);
}

View File

@ -570,7 +570,7 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
for (unsigned i = 0, e = FuncInfo->ArgDbgValues.size(); i != e; ++i) {
MachineInstr *MI = FuncInfo->ArgDbgValues[e-i-1];
bool hasFI = MI->getOperand(0).isFI();
unsigned Reg =
Register Reg =
hasFI ? TRI.getFrameRegister(*MF) : MI->getOperand(0).getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg))
EntryMBB->insert(EntryMBB->begin(), MI);

View File

@ -42,7 +42,7 @@ unsigned SwiftErrorValueTracking::getOrCreateVReg(const MachineBasicBlock *MBB,
}
void SwiftErrorValueTracking::setCurrentVReg(const MachineBasicBlock *MBB,
const Value *Val, unsigned VReg) {
const Value *Val, Register VReg) {
VRegDefMap[std::make_pair(MBB, Val)] = VReg;
}
@ -161,7 +161,7 @@ void SwiftErrorValueTracking::propagateVRegs() {
auto UUseIt = VRegUpwardsUse.find(Key);
auto VRegDefIt = VRegDefMap.find(Key);
bool UpwardsUse = UUseIt != VRegUpwardsUse.end();
unsigned UUseVReg = UpwardsUse ? UUseIt->second : 0;
Register UUseVReg = UpwardsUse ? UUseIt->second : Register();
bool DownwardDef = VRegDefIt != VRegDefMap.end();
assert(!(UpwardsUse && !DownwardDef) &&
"We can't have an upwards use but no downwards def");
@ -238,7 +238,7 @@ void SwiftErrorValueTracking::propagateVRegs() {
// destination virtual register number otherwise we generate a new one.
auto &DL = MF->getDataLayout();
auto const *RC = TLI->getRegClassFor(TLI->getPointerTy(DL));
unsigned PHIVReg =
Register PHIVReg =
UpwardsUse ? UUseVReg : MF->getRegInfo().createVirtualRegister(RC);
MachineInstrBuilder PHI =
BuildMI(*MBB, MBB->getFirstNonPHI(), DLoc,

View File

@ -163,9 +163,9 @@ MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI,
assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
"This only knows how to commute register operands so far");
unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0;
unsigned Reg1 = MI.getOperand(Idx1).getReg();
unsigned Reg2 = MI.getOperand(Idx2).getReg();
Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
Register Reg1 = MI.getOperand(Idx1).getReg();
Register Reg2 = MI.getOperand(Idx2).getReg();
unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();

View File

@ -232,8 +232,8 @@ void AArch64CallLowering::splitToValueTypes(
bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
const Value *Val,
ArrayRef<unsigned> VRegs,
unsigned SwiftErrorVReg) const {
ArrayRef<Register> VRegs,
Register SwiftErrorVReg) const {
auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR);
assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) &&
"Return value without a vreg");
@ -352,7 +352,7 @@ bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
bool AArch64CallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
const Function &F,
ArrayRef<unsigned> VRegs) const {
ArrayRef<Register> VRegs) const {
MachineFunction &MF = MIRBuilder.getMF();
MachineBasicBlock &MBB = MIRBuilder.getMBB();
MachineRegisterInfo &MRI = MF.getRegInfo();
@ -427,7 +427,7 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
const MachineOperand &Callee,
const ArgInfo &OrigRet,
ArrayRef<ArgInfo> OrigArgs,
unsigned SwiftErrorVReg) const {
Register SwiftErrorVReg) const {
MachineFunction &MF = MIRBuilder.getMF();
const Function &F = MF.getFunction();
MachineRegisterInfo &MRI = MF.getRegInfo();
@ -495,7 +495,7 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
SplitArgs.clear();
SmallVector<uint64_t, 8> RegOffsets;
SmallVector<unsigned, 8> SplitRegs;
SmallVector<Register, 8> SplitRegs;
splitToValueTypes(OrigRet, SplitArgs, DL, MRI, F.getCallingConv(),
[&](unsigned Reg, uint64_t Offset) {
RegOffsets.push_back(Offset);

View File

@ -34,16 +34,16 @@ public:
AArch64CallLowering(const AArch64TargetLowering &TLI);
bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
ArrayRef<unsigned> VRegs,
unsigned SwiftErrorVReg) const override;
ArrayRef<Register> VRegs,
Register SwiftErrorVReg) const override;
bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F,
ArrayRef<unsigned> VRegs) const override;
ArrayRef<Register> VRegs) const override;
bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
const MachineOperand &Callee, const ArgInfo &OrigRet,
ArrayRef<ArgInfo> OrigArgs,
unsigned SwiftErrorVReg) const override;
Register SwiftErrorVReg) const override;
bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
const MachineOperand &Callee, const ArgInfo &OrigRet,

View File

@ -212,8 +212,8 @@ private:
struct LoadInfo {
LoadInfo() = default;
unsigned DestReg = 0;
unsigned BaseReg = 0;
Register DestReg;
Register BaseReg;
int BaseRegIdx = -1;
const MachineOperand *OffsetOpnd = nullptr;
bool IsPrePost = false;
@ -647,7 +647,7 @@ static Optional<LoadInfo> getLoadInfo(const MachineInstr &MI) {
return None;
LoadInfo LI;
LI.DestReg = DestRegIdx == -1 ? 0 : MI.getOperand(DestRegIdx).getReg();
LI.DestReg = DestRegIdx == -1 ? Register() : MI.getOperand(DestRegIdx).getReg();
LI.BaseReg = BaseReg;
LI.BaseRegIdx = BaseRegIdx;
LI.OffsetOpnd = OffsetIdx == -1 ? nullptr : &MI.getOperand(OffsetIdx);

View File

@ -1018,9 +1018,9 @@ void AArch64InstructionSelector::materializeLargeCMVal(
MovZ->addOperand(MF, MachineOperand::CreateImm(0));
constrainSelectedInstRegOperands(*MovZ, TII, TRI, RBI);
auto BuildMovK = [&](unsigned SrcReg, unsigned char Flags, unsigned Offset,
unsigned ForceDstReg) {
unsigned DstReg = ForceDstReg
auto BuildMovK = [&](Register SrcReg, unsigned char Flags, unsigned Offset,
Register ForceDstReg) {
Register DstReg = ForceDstReg
? ForceDstReg
: MRI.createVirtualRegister(&AArch64::GPR64RegClass);
auto MovI = MIB.buildInstr(AArch64::MOVKXi).addDef(DstReg).addUse(SrcReg);

View File

@ -379,8 +379,8 @@ bool AArch64RedundantCopyElimination::optimizeBlock(MachineBasicBlock *MBB) {
bool IsCopy = MI->isCopy();
bool IsMoveImm = MI->isMoveImmediate();
if (IsCopy || IsMoveImm) {
MCPhysReg DefReg = MI->getOperand(0).getReg();
MCPhysReg SrcReg = IsCopy ? MI->getOperand(1).getReg() : 0;
Register DefReg = MI->getOperand(0).getReg();
Register SrcReg = IsCopy ? MI->getOperand(1).getReg() : Register();
int64_t SrcImm = IsMoveImm ? MI->getOperand(1).getImm() : 0;
if (!MRI->isReserved(DefReg) &&
((IsCopy && (SrcReg == AArch64::XZR || SrcReg == AArch64::WZR)) ||

View File

@ -279,7 +279,7 @@ bool AArch64RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
return false;
}
unsigned
Register
AArch64RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const AArch64FrameLowering *TFI = getFrameLowering(MF);
return TFI->hasFP(MF) ? AArch64::FP : AArch64::SP;

View File

@ -113,7 +113,7 @@ public:
unsigned getBaseRegister() const;
// Debug information queries.
unsigned getFrameRegister(const MachineFunction &MF) const override;
Register getFrameRegister(const MachineFunction &MF) const override;
unsigned getRegPressureLimit(const TargetRegisterClass *RC,
MachineFunction &MF) const override;

View File

@ -69,7 +69,7 @@ AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI)
bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
const Value *Val,
ArrayRef<unsigned> VRegs) const {
ArrayRef<Register> VRegs) const {
MachineFunction &MF = MIRBuilder.getMF();
MachineRegisterInfo &MRI = MF.getRegInfo();
@ -81,7 +81,7 @@ bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
return true;
}
unsigned VReg = VRegs[0];
Register VReg = VRegs[0];
const Function &F = MF.getFunction();
auto &DL = F.getParent()->getDataLayout();
@ -138,14 +138,14 @@ unsigned AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &MIRBuilder,
void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &MIRBuilder,
Type *ParamTy, uint64_t Offset,
unsigned Align,
unsigned DstReg) const {
Register DstReg) const {
MachineFunction &MF = MIRBuilder.getMF();
const Function &F = MF.getFunction();
const DataLayout &DL = F.getParent()->getDataLayout();
PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS);
MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
unsigned TypeSize = DL.getTypeStoreSize(ParamTy);
unsigned PtrReg = lowerParameterPtr(MIRBuilder, ParamTy, Offset);
Register PtrReg = lowerParameterPtr(MIRBuilder, ParamTy, Offset);
MachineMemOperand *MMO =
MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad |
@ -195,7 +195,7 @@ static void allocateSystemSGPRs(CCState &CCInfo,
bool AMDGPUCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
const Function &F,
ArrayRef<unsigned> VRegs) const {
ArrayRef<Register> VRegs) const {
// AMDGPU_GS and AMDGP_HS are not supported yet.
if (F.getCallingConv() == CallingConv::AMDGPU_GS ||
F.getCallingConv() == CallingConv::AMDGPU_HS)

View File

@ -27,15 +27,15 @@ class AMDGPUCallLowering: public CallLowering {
void lowerParameter(MachineIRBuilder &MIRBuilder, Type *ParamTy,
uint64_t Offset, unsigned Align,
unsigned DstReg) const;
Register DstReg) const;
public:
AMDGPUCallLowering(const AMDGPUTargetLowering &TLI);
bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
ArrayRef<unsigned> VRegs) const override;
ArrayRef<Register> VRegs) const override;
bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F,
ArrayRef<unsigned> VRegs) const override;
ArrayRef<Register> VRegs) const override;
static CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg);
static CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool IsVarArg);
};

View File

@ -791,8 +791,8 @@ unsigned AMDGPULegalizerInfo::getSegmentAperture(
4,
MinAlign(64, StructOffset));
unsigned LoadResult = MRI.createGenericVirtualRegister(S32);
unsigned LoadAddr = AMDGPU::NoRegister;
Register LoadResult = MRI.createGenericVirtualRegister(S32);
Register LoadAddr;
MIRBuilder.materializeGEP(LoadAddr, QueuePtr, LLT::scalar(64), StructOffset);
MIRBuilder.buildLoad(LoadResult, LoadAddr, *MMO);
@ -806,8 +806,8 @@ bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
MIRBuilder.setInstr(MI);
unsigned Dst = MI.getOperand(0).getReg();
unsigned Src = MI.getOperand(1).getReg();
Register Dst = MI.getOperand(0).getReg();
Register Src = MI.getOperand(1).getReg();
LLT DstTy = MRI.getType(Dst);
LLT SrcTy = MRI.getType(Src);

View File

@ -375,7 +375,7 @@ AMDGPURegisterBankInfo::getInstrAlternativeMappings(
void AMDGPURegisterBankInfo::split64BitValueForMapping(
MachineIRBuilder &B,
SmallVector<unsigned, 2> &Regs,
SmallVector<Register, 2> &Regs,
LLT HalfTy,
unsigned Reg) const {
assert(HalfTy.getSizeInBits() == 32);
@ -396,7 +396,7 @@ void AMDGPURegisterBankInfo::split64BitValueForMapping(
}
/// Replace the current type each register in \p Regs has with \p NewTy
static void setRegsToType(MachineRegisterInfo &MRI, ArrayRef<unsigned> Regs,
static void setRegsToType(MachineRegisterInfo &MRI, ArrayRef<Register> Regs,
LLT NewTy) {
for (unsigned Reg : Regs) {
assert(MRI.getType(Reg).getSizeInBits() == NewTy.getSizeInBits());
@ -445,7 +445,7 @@ void AMDGPURegisterBankInfo::executeInWaterfallLoop(
// Use a set to avoid extra readfirstlanes in the case where multiple operands
// are the same register.
SmallSet<unsigned, 4> SGPROperandRegs;
SmallSet<Register, 4> SGPROperandRegs;
for (unsigned Op : OpIndices) {
assert(MI.getOperand(Op).isUse());
unsigned Reg = MI.getOperand(Op).getReg();
@ -459,9 +459,9 @@ void AMDGPURegisterBankInfo::executeInWaterfallLoop(
return;
MachineIRBuilder B(MI);
SmallVector<unsigned, 4> ResultRegs;
SmallVector<unsigned, 4> InitResultRegs;
SmallVector<unsigned, 4> PhiRegs;
SmallVector<Register, 4> ResultRegs;
SmallVector<Register, 4> InitResultRegs;
SmallVector<Register, 4> PhiRegs;
for (MachineOperand &Def : MI.defs()) {
LLT ResTy = MRI.getType(Def.getReg());
const RegisterBank *DefBank = getRegBank(Def.getReg(), MRI, *TRI);
@ -575,7 +575,7 @@ void AMDGPURegisterBankInfo::executeInWaterfallLoop(
}
} else {
LLT S32 = LLT::scalar(32);
SmallVector<unsigned, 8> ReadlanePieces;
SmallVector<Register, 8> ReadlanePieces;
// The compares can be done as 64-bit, but the extract needs to be done
// in 32-bit pieces.
@ -732,10 +732,10 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
LLT HalfTy = getHalfSizedType(DstTy);
SmallVector<unsigned, 2> DefRegs(OpdMapper.getVRegs(0));
SmallVector<unsigned, 1> Src0Regs(OpdMapper.getVRegs(1));
SmallVector<unsigned, 2> Src1Regs(OpdMapper.getVRegs(2));
SmallVector<unsigned, 2> Src2Regs(OpdMapper.getVRegs(3));
SmallVector<Register, 2> DefRegs(OpdMapper.getVRegs(0));
SmallVector<Register, 1> Src0Regs(OpdMapper.getVRegs(1));
SmallVector<Register, 2> Src1Regs(OpdMapper.getVRegs(2));
SmallVector<Register, 2> Src2Regs(OpdMapper.getVRegs(3));
// All inputs are SGPRs, nothing special to do.
if (DefRegs.empty()) {
@ -781,9 +781,9 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
break;
LLT HalfTy = getHalfSizedType(DstTy);
SmallVector<unsigned, 2> DefRegs(OpdMapper.getVRegs(0));
SmallVector<unsigned, 2> Src0Regs(OpdMapper.getVRegs(1));
SmallVector<unsigned, 2> Src1Regs(OpdMapper.getVRegs(2));
SmallVector<Register, 2> DefRegs(OpdMapper.getVRegs(0));
SmallVector<Register, 2> Src0Regs(OpdMapper.getVRegs(1));
SmallVector<Register, 2> Src1Regs(OpdMapper.getVRegs(2));
// All inputs are SGPRs, nothing special to do.
if (DefRegs.empty()) {

View File

@ -13,6 +13,7 @@
#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUREGISTERBANKINFO_H
#define LLVM_LIB_TARGET_AMDGPU_AMDGPUREGISTERBANKINFO_H
#include "llvm/CodeGen/Register.h"
#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
#define GET_REGBANK_DECLARATIONS
@ -54,7 +55,7 @@ class AMDGPURegisterBankInfo : public AMDGPUGenRegisterBankInfo {
/// Split 64-bit value \p Reg into two 32-bit halves and populate them into \p
/// Regs. This appropriately sets the regbank of the new registers.
void split64BitValueForMapping(MachineIRBuilder &B,
SmallVector<unsigned, 2> &Regs,
SmallVector<Register, 2> &Regs,
LLT HalfTy,
unsigned Reg) const;

View File

@ -82,7 +82,7 @@ const uint32_t *SIRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
}
}
unsigned SIRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
Register SIRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const SIFrameLowering *TFI =
MF.getSubtarget<GCNSubtarget>().getFrameLowering();
const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();

View File

@ -186,8 +186,8 @@ public:
// Does MII and MIJ share the same pred_sel ?
int OpI = TII->getOperandIdx(MII->getOpcode(), R600::OpName::pred_sel),
OpJ = TII->getOperandIdx(MIJ->getOpcode(), R600::OpName::pred_sel);
unsigned PredI = (OpI > -1)?MII->getOperand(OpI).getReg():0,
PredJ = (OpJ > -1)?MIJ->getOperand(OpJ).getReg():0;
Register PredI = (OpI > -1)?MII->getOperand(OpI).getReg() : Register(),
PredJ = (OpJ > -1)?MIJ->getOperand(OpJ).getReg() : Register();
if (PredI != PredJ)
return false;
if (SUJ->isSucc(SUI)) {

View File

@ -67,7 +67,7 @@ const MCPhysReg *R600RegisterInfo::getCalleeSavedRegs(
return &CalleeSavedReg;
}
unsigned R600RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
Register R600RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
return R600::NoRegister;
}

View File

@ -26,7 +26,7 @@ struct R600RegisterInfo final : public R600GenRegisterInfo {
BitVector getReservedRegs(const MachineFunction &MF) const override;
const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override;
unsigned getFrameRegister(const MachineFunction &MF) const override;
Register getFrameRegister(const MachineFunction &MF) const override;
/// get the HW encoding for a register's channel.
unsigned getHWRegChan(unsigned reg) const;

View File

@ -185,7 +185,7 @@ void SILowerControlFlow::emitIf(MachineInstr &MI) {
assert(SaveExec.getSubReg() == AMDGPU::NoSubRegister &&
Cond.getSubReg() == AMDGPU::NoSubRegister);
unsigned SaveExecReg = SaveExec.getReg();
Register SaveExecReg = SaveExec.getReg();
MachineOperand &ImpDefSCC = MI.getOperand(4);
assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef());
@ -197,7 +197,7 @@ void SILowerControlFlow::emitIf(MachineInstr &MI) {
// Add an implicit def of exec to discourage scheduling VALU after this which
// will interfere with trying to form s_and_saveexec_b64 later.
unsigned CopyReg = SimpleIf ? SaveExecReg
Register CopyReg = SimpleIf ? SaveExecReg
: MRI->createVirtualRegister(BoolRC);
MachineInstr *CopyExec =
BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), CopyReg)
@ -266,7 +266,7 @@ void SILowerControlFlow::emitElse(MachineInstr &MI) {
MachineBasicBlock &MBB = *MI.getParent();
const DebugLoc &DL = MI.getDebugLoc();
unsigned DstReg = MI.getOperand(0).getReg();
Register DstReg = MI.getOperand(0).getReg();
assert(MI.getOperand(0).getSubReg() == AMDGPU::NoSubRegister);
bool ExecModified = MI.getOperand(3).getImm() != 0;
@ -275,14 +275,14 @@ void SILowerControlFlow::emitElse(MachineInstr &MI) {
// We are running before TwoAddressInstructions, and si_else's operands are
// tied. In order to correctly tie the registers, split this into a copy of
// the src like it does.
unsigned CopyReg = MRI->createVirtualRegister(BoolRC);
Register CopyReg = MRI->createVirtualRegister(BoolRC);
MachineInstr *CopyExec =
BuildMI(MBB, Start, DL, TII->get(AMDGPU::COPY), CopyReg)
.add(MI.getOperand(1)); // Saved EXEC
// This must be inserted before phis and any spill code inserted before the
// else.
unsigned SaveReg = ExecModified ?
Register SaveReg = ExecModified ?
MRI->createVirtualRegister(BoolRC) : DstReg;
MachineInstr *OrSaveExec =
BuildMI(MBB, Start, DL, TII->get(OrSaveExecOpc), SaveReg)

View File

@ -721,7 +721,7 @@ bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI,
if (SpillToSMEM && OnlyToVGPR)
return false;
unsigned FrameReg = getFrameRegister(*MF);
Register FrameReg = getFrameRegister(*MF);
assert(SpillToVGPR || (SuperReg != MFI->getStackPtrOffsetReg() &&
SuperReg != MFI->getFrameOffsetReg() &&
@ -914,7 +914,7 @@ bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI,
unsigned EltSize = 4;
unsigned ScalarLoadOp;
unsigned FrameReg = getFrameRegister(*MF);
Register FrameReg = getFrameRegister(*MF);
const TargetRegisterClass *RC = getPhysRegClass(SuperReg);
if (SpillToSMEM && isSGPRClass(RC)) {
@ -1063,7 +1063,7 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
MachineOperand &FIOp = MI->getOperand(FIOperandNum);
int Index = MI->getOperand(FIOperandNum).getIndex();
unsigned FrameReg = getFrameRegister(*MF);
Register FrameReg = getFrameRegister(*MF);
switch (MI->getOpcode()) {
// SGPR register spill
@ -1154,7 +1154,7 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
= MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
bool IsCopy = MI->getOpcode() == AMDGPU::V_MOV_B32_e32;
unsigned ResultReg = IsCopy ?
Register ResultReg = IsCopy ?
MI->getOperand(0).getReg() :
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);

View File

@ -70,7 +70,7 @@ public:
return 100;
}
unsigned getFrameRegister(const MachineFunction &MF) const override;
Register getFrameRegister(const MachineFunction &MF) const override;
bool canRealignStack(const MachineFunction &MF) const override;
bool requiresRegisterScavenging(const MachineFunction &Fn) const override;

View File

@ -424,7 +424,7 @@ bool ARCOptAddrMode::canSinkLoadStoreTo(MachineInstr *Ldst, MachineInstr *To) {
bool IsStore = Ldst->mayStore();
bool IsLoad = Ldst->mayLoad();
unsigned ValReg = IsLoad ? Ldst->getOperand(0).getReg() : 0;
Register ValReg = IsLoad ? Ldst->getOperand(0).getReg() : Register();
for (; MI != ME && MI != End; ++MI) {
if (MI->isDebugValue())
continue;

View File

@ -186,7 +186,7 @@ void ARCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// Special handling of DBG_VALUE instructions.
if (MI.isDebugValue()) {
unsigned FrameReg = getFrameRegister(MF);
Register FrameReg = getFrameRegister(MF);
MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/);
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
return;
@ -219,7 +219,7 @@ void ARCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
ObjSize, RS, SPAdj);
}
unsigned ARCRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
Register ARCRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const ARCFrameLowering *TFI = getFrameLowering(MF);
return TFI->hasFP(MF) ? ARC::FP : ARC::SP;
}

View File

@ -46,7 +46,7 @@ public:
CallingConv::ID CC) const override;
// Debug information queries.
unsigned getFrameRegister(const MachineFunction &MF) const override;
Register getFrameRegister(const MachineFunction &MF) const override;
//! Return whether to emit frame moves
static bool needsFrameMoves(const MachineFunction &MF);

View File

@ -426,7 +426,7 @@ cannotEliminateFrame(const MachineFunction &MF) const {
|| needsStackRealignment(MF);
}
unsigned
Register
ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
const ARMFrameLowering *TFI = getFrameLowering(MF);
@ -786,7 +786,7 @@ ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int PIdx = MI.findFirstPredOperandIdx();
ARMCC::CondCodes Pred = (PIdx == -1)
? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
unsigned PredReg = (PIdx == -1) ? 0 : MI.getOperand(PIdx+1).getReg();
Register PredReg = (PIdx == -1) ? Register() : MI.getOperand(PIdx+1).getReg();
if (Offset == 0)
// Must be addrmode4/6.
MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false, false, false);

View File

@ -173,7 +173,7 @@ public:
bool cannotEliminateFrame(const MachineFunction &MF) const;
// Debug information queries.
unsigned getFrameRegister(const MachineFunction &MF) const override;
Register getFrameRegister(const MachineFunction &MF) const override;
unsigned getBaseRegister() const { return BasePtr; }
bool isLowRegister(unsigned Reg) const;

View File

@ -151,7 +151,7 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler {
assert(VA.isRegLoc() && "Value should be in reg");
assert(NextVA.isRegLoc() && "Value should be in reg");
unsigned NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
Register NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
MRI.createGenericVirtualRegister(LLT::scalar(32))};
MIRBuilder.buildUnmerge(NewRegs, Arg.Reg);
@ -232,7 +232,7 @@ void ARMCallLowering::splitToValueTypes(
/// Lower the return value for the already existing \p Ret. This assumes that
/// \p MIRBuilder's insertion point is correct.
bool ARMCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder,
const Value *Val, ArrayRef<unsigned> VRegs,
const Value *Val, ArrayRef<Register> VRegs,
MachineInstrBuilder &Ret) const {
if (!Val)
// Nothing to do here.
@ -257,9 +257,9 @@ bool ARMCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder,
ArgInfo CurArgInfo(VRegs[i], SplitEVTs[i].getTypeForEVT(Ctx));
setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
SmallVector<unsigned, 4> Regs;
SmallVector<Register, 4> Regs;
splitToValueTypes(CurArgInfo, SplitVTs, MF,
[&](unsigned Reg) { Regs.push_back(Reg); });
[&](Register Reg) { Regs.push_back(Reg); });
if (Regs.size() > 1)
MIRBuilder.buildUnmerge(Regs, VRegs[i]);
}
@ -273,7 +273,7 @@ bool ARMCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder,
bool ARMCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
const Value *Val,
ArrayRef<unsigned> VRegs) const {
ArrayRef<Register> VRegs) const {
assert(!Val == VRegs.empty() && "Return value without a vreg");
auto const &ST = MIRBuilder.getMF().getSubtarget<ARMSubtarget>();
@ -386,7 +386,7 @@ struct IncomingValueHandler : public CallLowering::ValueHandler {
assert(VA.isRegLoc() && "Value should be in reg");
assert(NextVA.isRegLoc() && "Value should be in reg");
unsigned NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
Register NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
MRI.createGenericVirtualRegister(LLT::scalar(32))};
assignValueToReg(NewRegs[0], VA.getLocReg(), VA);
@ -421,7 +421,7 @@ struct FormalArgHandler : public IncomingValueHandler {
bool ARMCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
const Function &F,
ArrayRef<unsigned> VRegs) const {
ArrayRef<Register> VRegs) const {
auto &TLI = *getTLI<ARMTargetLowering>();
auto Subtarget = TLI.getSubtarget();
@ -453,7 +453,7 @@ bool ARMCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
AssignFn);
SmallVector<ArgInfo, 8> ArgInfos;
SmallVector<unsigned, 4> SplitRegs;
SmallVector<Register, 4> SplitRegs;
unsigned Idx = 0;
for (auto &Arg : F.args()) {
ArgInfo AInfo(VRegs[Idx], Arg.getType());
@ -462,7 +462,7 @@ bool ARMCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
SplitRegs.clear();
splitToValueTypes(AInfo, ArgInfos, MF,
[&](unsigned Reg) { SplitRegs.push_back(Reg); });
[&](Register Reg) { SplitRegs.push_back(Reg); });
if (!SplitRegs.empty())
MIRBuilder.buildMerge(VRegs[Idx], SplitRegs);
@ -568,7 +568,7 @@ bool ARMCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
if (Arg.Flags.isByVal())
return false;
SmallVector<unsigned, 8> Regs;
SmallVector<Register, 8> Regs;
splitToValueTypes(Arg, ArgInfos, MF,
[&](unsigned Reg) { Regs.push_back(Reg); });
@ -589,9 +589,9 @@ bool ARMCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
return false;
ArgInfos.clear();
SmallVector<unsigned, 8> SplitRegs;
SmallVector<Register, 8> SplitRegs;
splitToValueTypes(OrigRet, ArgInfos, MF,
[&](unsigned Reg) { SplitRegs.push_back(Reg); });
[&](Register Reg) { SplitRegs.push_back(Reg); });
auto RetAssignFn = TLI.CCAssignFnForReturn(CallConv, IsVarArg);
CallReturnHandler RetHandler(MIRBuilder, MRI, MIB, RetAssignFn);

View File

@ -33,10 +33,10 @@ public:
ARMCallLowering(const ARMTargetLowering &TLI);
bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
ArrayRef<unsigned> VRegs) const override;
ArrayRef<Register> VRegs) const override;
bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F,
ArrayRef<unsigned> VRegs) const override;
ArrayRef<Register> VRegs) const override;
bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
const MachineOperand &Callee, const ArgInfo &OrigRet,
@ -44,7 +44,7 @@ public:
private:
bool lowerReturnVal(MachineIRBuilder &MIRBuilder, const Value *Val,
ArrayRef<unsigned> VRegs,
ArrayRef<Register> VRegs,
MachineInstrBuilder &Ret) const;
using SplitArgTy = std::function<void(unsigned Reg)>;

View File

@ -121,6 +121,6 @@ void BPFRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
}
}
unsigned BPFRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
Register BPFRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
return BPF::R10;
}

View File

@ -32,7 +32,7 @@ struct BPFRegisterInfo : public BPFGenRegisterInfo {
unsigned FIOperandNum,
RegScavenger *RS = nullptr) const override;
unsigned getFrameRegister(const MachineFunction &MF) const override;
Register getFrameRegister(const MachineFunction &MF) const override;
};
}

View File

@ -254,8 +254,8 @@ static bool isUnsafeToMoveAcross(MachineInstr &MI, unsigned UseReg,
MI.isMetaInstruction();
}
static unsigned UseReg(const MachineOperand& MO) {
return MO.isReg() ? MO.getReg() : 0;
static Register UseReg(const MachineOperand& MO) {
return MO.isReg() ? MO.getReg() : Register();
}
/// isSafeToMoveTogether - Returns true if it is safe to move I1 next to I2 such

View File

@ -303,8 +303,8 @@ bool HexagonGenMux::genMuxInBlock(MachineBasicBlock &B) {
std::advance(It2, MaxX);
MachineInstr &Def1 = *It1, &Def2 = *It2;
MachineOperand *Src1 = &Def1.getOperand(2), *Src2 = &Def2.getOperand(2);
unsigned SR1 = Src1->isReg() ? Src1->getReg() : 0;
unsigned SR2 = Src2->isReg() ? Src2->getReg() : 0;
Register SR1 = Src1->isReg() ? Src1->getReg() : Register();
Register SR2 = Src2->isReg() ? Src2->getReg() : Register();
bool Failure = false, CanUp = true, CanDown = true;
for (unsigned X = MinX+1; X < MaxX; X++) {
const DefUseInfo &DU = DUM.lookup(X);

View File

@ -51,6 +51,7 @@ namespace {
RegisterSubReg(unsigned r = 0, unsigned s = 0) : R(r), S(s) {}
RegisterSubReg(const MachineOperand &MO) : R(MO.getReg()), S(MO.getSubReg()) {}
RegisterSubReg(const Register &Reg) : R(Reg), S(0) {}
bool operator== (const RegisterSubReg &Reg) const {
return R == Reg.R && S == Reg.S;

View File

@ -286,7 +286,7 @@ unsigned HexagonRegisterInfo::getRARegister() const {
}
unsigned HexagonRegisterInfo::getFrameRegister(const MachineFunction
Register HexagonRegisterInfo::getFrameRegister(const MachineFunction
&MF) const {
const HexagonFrameLowering *TFI = getFrameLowering(MF);
if (TFI->hasFP(MF))

View File

@ -66,7 +66,7 @@ public:
// Debug information queries.
unsigned getRARegister() const;
unsigned getFrameRegister(const MachineFunction &MF) const override;
Register getFrameRegister(const MachineFunction &MF) const override;
unsigned getFrameRegister() const;
unsigned getStackRegister() const;

View File

@ -258,12 +258,12 @@ bool LanaiRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
unsigned LanaiRegisterInfo::getRARegister() const { return Lanai::RCA; }
unsigned
Register
LanaiRegisterInfo::getFrameRegister(const MachineFunction & /*MF*/) const {
return Lanai::FP;
}
unsigned LanaiRegisterInfo::getBaseRegister() const { return Lanai::R14; }
Register LanaiRegisterInfo::getBaseRegister() const { return Lanai::R14; }
const uint32_t *
LanaiRegisterInfo::getCallPreservedMask(const MachineFunction & /*MF*/,

View File

@ -42,8 +42,8 @@ struct LanaiRegisterInfo : public LanaiGenRegisterInfo {
// Debug information queries.
unsigned getRARegister() const;
unsigned getFrameRegister(const MachineFunction &MF) const override;
unsigned getBaseRegister() const;
Register getFrameRegister(const MachineFunction &MF) const override;
Register getBaseRegister() const;
bool hasBasePointer(const MachineFunction &MF) const;
int getDwarfRegNum(unsigned RegNum, bool IsEH) const;

View File

@ -154,7 +154,7 @@ MSP430RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
}
unsigned MSP430RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
Register MSP430RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const MSP430FrameLowering *TFI = getFrameLowering(MF);
return TFI->hasFP(MF) ? MSP430::FP : MSP430::SP;
}

View File

@ -37,7 +37,7 @@ public:
RegScavenger *RS = nullptr) const override;
// Debug information queries.
unsigned getFrameRegister(const MachineFunction &MF) const override;
Register getFrameRegister(const MachineFunction &MF) const override;
};
} // end namespace llvm

View File

@ -24,7 +24,7 @@ using namespace llvm;
MipsCallLowering::MipsCallLowering(const MipsTargetLowering &TLI)
: CallLowering(&TLI) {}
bool MipsCallLowering::MipsHandler::assign(unsigned VReg, const CCValAssign &VA,
bool MipsCallLowering::MipsHandler::assign(Register VReg, const CCValAssign &VA,
const EVT &VT) {
if (VA.isRegLoc()) {
assignValueToReg(VReg, VA, VT);
@ -36,7 +36,7 @@ bool MipsCallLowering::MipsHandler::assign(unsigned VReg, const CCValAssign &VA,
return true;
}
bool MipsCallLowering::MipsHandler::assignVRegs(ArrayRef<unsigned> VRegs,
bool MipsCallLowering::MipsHandler::assignVRegs(ArrayRef<Register> VRegs,
ArrayRef<CCValAssign> ArgLocs,
unsigned ArgLocsStartIndex,
const EVT &VT) {
@ -47,14 +47,14 @@ bool MipsCallLowering::MipsHandler::assignVRegs(ArrayRef<unsigned> VRegs,
}
void MipsCallLowering::MipsHandler::setLeastSignificantFirst(
SmallVectorImpl<unsigned> &VRegs) {
SmallVectorImpl<Register> &VRegs) {
if (!MIRBuilder.getMF().getDataLayout().isLittleEndian())
std::reverse(VRegs.begin(), VRegs.end());
}
bool MipsCallLowering::MipsHandler::handle(
ArrayRef<CCValAssign> ArgLocs, ArrayRef<CallLowering::ArgInfo> Args) {
SmallVector<unsigned, 4> VRegs;
SmallVector<Register, 4> VRegs;
unsigned SplitLength;
const Function &F = MIRBuilder.getMF().getFunction();
const DataLayout &DL = F.getParent()->getDataLayout();
@ -90,17 +90,17 @@ public:
: MipsHandler(MIRBuilder, MRI) {}
private:
void assignValueToReg(unsigned ValVReg, const CCValAssign &VA,
void assignValueToReg(Register ValVReg, const CCValAssign &VA,
const EVT &VT) override;
unsigned getStackAddress(const CCValAssign &VA,
MachineMemOperand *&MMO) override;
void assignValueToAddress(unsigned ValVReg, const CCValAssign &VA) override;
void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override;
bool handleSplit(SmallVectorImpl<unsigned> &VRegs,
bool handleSplit(SmallVectorImpl<Register> &VRegs,
ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex,
unsigned ArgsReg, const EVT &VT) override;
Register ArgsReg, const EVT &VT) override;
virtual void markPhysRegUsed(unsigned PhysReg) {
MIRBuilder.getMBB().addLiveIn(PhysReg);
@ -129,7 +129,7 @@ private:
} // end anonymous namespace
void IncomingValueHandler::assignValueToReg(unsigned ValVReg,
void IncomingValueHandler::assignValueToReg(Register ValVReg,
const CCValAssign &VA,
const EVT &VT) {
const MipsSubtarget &STI =
@ -194,22 +194,22 @@ unsigned IncomingValueHandler::getStackAddress(const CCValAssign &VA,
return AddrReg;
}
void IncomingValueHandler::assignValueToAddress(unsigned ValVReg,
void IncomingValueHandler::assignValueToAddress(Register ValVReg,
const CCValAssign &VA) {
if (VA.getLocInfo() == CCValAssign::SExt ||
VA.getLocInfo() == CCValAssign::ZExt ||
VA.getLocInfo() == CCValAssign::AExt) {
unsigned LoadReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
Register LoadReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
buildLoad(LoadReg, VA);
MIRBuilder.buildTrunc(ValVReg, LoadReg);
} else
buildLoad(ValVReg, VA);
}
bool IncomingValueHandler::handleSplit(SmallVectorImpl<unsigned> &VRegs,
bool IncomingValueHandler::handleSplit(SmallVectorImpl<Register> &VRegs,
ArrayRef<CCValAssign> ArgLocs,
unsigned ArgLocsStartIndex,
unsigned ArgsReg, const EVT &VT) {
Register ArgsReg, const EVT &VT) {
if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT))
return false;
setLeastSignificantFirst(VRegs);
@ -225,28 +225,28 @@ public:
: MipsHandler(MIRBuilder, MRI), MIB(MIB) {}
private:
void assignValueToReg(unsigned ValVReg, const CCValAssign &VA,
void assignValueToReg(Register ValVReg, const CCValAssign &VA,
const EVT &VT) override;
unsigned getStackAddress(const CCValAssign &VA,
MachineMemOperand *&MMO) override;
void assignValueToAddress(unsigned ValVReg, const CCValAssign &VA) override;
void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override;
bool handleSplit(SmallVectorImpl<unsigned> &VRegs,
bool handleSplit(SmallVectorImpl<Register> &VRegs,
ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex,
unsigned ArgsReg, const EVT &VT) override;
Register ArgsReg, const EVT &VT) override;
unsigned extendRegister(unsigned ValReg, const CCValAssign &VA);
unsigned extendRegister(Register ValReg, const CCValAssign &VA);
MachineInstrBuilder &MIB;
};
} // end anonymous namespace
void OutgoingValueHandler::assignValueToReg(unsigned ValVReg,
void OutgoingValueHandler::assignValueToReg(Register ValVReg,
const CCValAssign &VA,
const EVT &VT) {
unsigned PhysReg = VA.getLocReg();
Register PhysReg = VA.getLocReg();
const MipsSubtarget &STI =
static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
@ -287,14 +287,14 @@ unsigned OutgoingValueHandler::getStackAddress(const CCValAssign &VA,
LLT p0 = LLT::pointer(0, 32);
LLT s32 = LLT::scalar(32);
unsigned SPReg = MRI.createGenericVirtualRegister(p0);
Register SPReg = MRI.createGenericVirtualRegister(p0);
MIRBuilder.buildCopy(SPReg, Mips::SP);
unsigned OffsetReg = MRI.createGenericVirtualRegister(s32);
Register OffsetReg = MRI.createGenericVirtualRegister(s32);
unsigned Offset = VA.getLocMemOffset();
MIRBuilder.buildConstant(OffsetReg, Offset);
unsigned AddrReg = MRI.createGenericVirtualRegister(p0);
Register AddrReg = MRI.createGenericVirtualRegister(p0);
MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
MachinePointerInfo MPO =
@ -306,30 +306,30 @@ unsigned OutgoingValueHandler::getStackAddress(const CCValAssign &VA,
return AddrReg;
}
void OutgoingValueHandler::assignValueToAddress(unsigned ValVReg,
void OutgoingValueHandler::assignValueToAddress(Register ValVReg,
const CCValAssign &VA) {
MachineMemOperand *MMO;
unsigned Addr = getStackAddress(VA, MMO);
Register Addr = getStackAddress(VA, MMO);
unsigned ExtReg = extendRegister(ValVReg, VA);
MIRBuilder.buildStore(ExtReg, Addr, *MMO);
}
unsigned OutgoingValueHandler::extendRegister(unsigned ValReg,
unsigned OutgoingValueHandler::extendRegister(Register ValReg,
const CCValAssign &VA) {
LLT LocTy{VA.getLocVT()};
switch (VA.getLocInfo()) {
case CCValAssign::SExt: {
unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy);
Register ExtReg = MRI.createGenericVirtualRegister(LocTy);
MIRBuilder.buildSExt(ExtReg, ValReg);
return ExtReg;
}
case CCValAssign::ZExt: {
unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy);
Register ExtReg = MRI.createGenericVirtualRegister(LocTy);
MIRBuilder.buildZExt(ExtReg, ValReg);
return ExtReg;
}
case CCValAssign::AExt: {
unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy);
Register ExtReg = MRI.createGenericVirtualRegister(LocTy);
MIRBuilder.buildAnyExt(ExtReg, ValReg);
return ExtReg;
}
@ -342,10 +342,10 @@ unsigned OutgoingValueHandler::extendRegister(unsigned ValReg,
llvm_unreachable("unable to extend register");
}
bool OutgoingValueHandler::handleSplit(SmallVectorImpl<unsigned> &VRegs,
bool OutgoingValueHandler::handleSplit(SmallVectorImpl<Register> &VRegs,
ArrayRef<CCValAssign> ArgLocs,
unsigned ArgLocsStartIndex,
unsigned ArgsReg, const EVT &VT) {
Register ArgsReg, const EVT &VT) {
MIRBuilder.buildUnmerge(VRegs, ArgsReg);
setLeastSignificantFirst(VRegs);
if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT))
@ -396,7 +396,7 @@ static void setLocInfo(SmallVectorImpl<CCValAssign> &ArgLocs,
bool MipsCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
const Value *Val,
ArrayRef<unsigned> VRegs) const {
ArrayRef<Register> VRegs) const {
MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(Mips::RetRA);
@ -444,7 +444,7 @@ bool MipsCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
bool MipsCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
const Function &F,
ArrayRef<unsigned> VRegs) const {
ArrayRef<Register> VRegs) const {
// Quick exit if there aren't any args.
if (F.arg_empty())

View File

@ -34,39 +34,39 @@ public:
ArrayRef<CallLowering::ArgInfo> Args);
protected:
bool assignVRegs(ArrayRef<unsigned> VRegs, ArrayRef<CCValAssign> ArgLocs,
bool assignVRegs(ArrayRef<Register> VRegs, ArrayRef<CCValAssign> ArgLocs,
unsigned ArgLocsStartIndex, const EVT &VT);
void setLeastSignificantFirst(SmallVectorImpl<unsigned> &VRegs);
void setLeastSignificantFirst(SmallVectorImpl<Register> &VRegs);
MachineIRBuilder &MIRBuilder;
MachineRegisterInfo &MRI;
private:
bool assign(unsigned VReg, const CCValAssign &VA, const EVT &VT);
bool assign(Register VReg, const CCValAssign &VA, const EVT &VT);
virtual unsigned getStackAddress(const CCValAssign &VA,
MachineMemOperand *&MMO) = 0;
virtual void assignValueToReg(unsigned ValVReg, const CCValAssign &VA,
virtual void assignValueToReg(Register ValVReg, const CCValAssign &VA,
const EVT &VT) = 0;
virtual void assignValueToAddress(unsigned ValVReg,
virtual void assignValueToAddress(Register ValVReg,
const CCValAssign &VA) = 0;
virtual bool handleSplit(SmallVectorImpl<unsigned> &VRegs,
virtual bool handleSplit(SmallVectorImpl<Register> &VRegs,
ArrayRef<CCValAssign> ArgLocs,
unsigned ArgLocsStartIndex, unsigned ArgsReg,
unsigned ArgLocsStartIndex, Register ArgsReg,
const EVT &VT) = 0;
};
MipsCallLowering(const MipsTargetLowering &TLI);
bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
ArrayRef<unsigned> VRegs) const override;
ArrayRef<Register> VRegs) const override;
bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F,
ArrayRef<unsigned> VRegs) const override;
ArrayRef<Register> VRegs) const override;
bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
const MachineOperand &Callee, const ArgInfo &OrigRet,

View File

@ -277,7 +277,7 @@ eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
eliminateFI(MI, FIOperandNum, FrameIndex, stackSize, spOffset);
}
unsigned MipsRegisterInfo::
Register MipsRegisterInfo::
getFrameRegister(const MachineFunction &MF) const {
const MipsSubtarget &Subtarget = MF.getSubtarget<MipsSubtarget>();
const TargetFrameLowering *TFI = Subtarget.getFrameLowering();

View File

@ -69,7 +69,7 @@ public:
bool canRealignStack(const MachineFunction &MF) const override;
/// Debug information queries.
unsigned getFrameRegister(const MachineFunction &MF) const override;
Register getFrameRegister(const MachineFunction &MF) const override;
/// Return GPR register class.
virtual const TargetRegisterClass *intRegClass(unsigned Size) const = 0;

View File

@ -3763,8 +3763,8 @@ MipsSETargetLowering::emitFPEXTEND_PSEUDO(MachineInstr &MI,
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
unsigned Fd = MI.getOperand(0).getReg();
unsigned Ws = MI.getOperand(1).getReg();
Register Fd = MI.getOperand(0).getReg();
Register Ws = MI.getOperand(1).getReg();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
const TargetRegisterClass *GPRRC =
@ -3772,10 +3772,10 @@ MipsSETargetLowering::emitFPEXTEND_PSEUDO(MachineInstr &MI,
unsigned MTC1Opc = IsFGR64onMips64
? Mips::DMTC1
: (IsFGR64onMips32 ? Mips::MTC1_D64 : Mips::MTC1);
unsigned COPYOpc = IsFGR64onMips64 ? Mips::COPY_S_D : Mips::COPY_S_W;
Register COPYOpc = IsFGR64onMips64 ? Mips::COPY_S_D : Mips::COPY_S_W;
unsigned Wtemp = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
unsigned WPHI = Wtemp;
Register Wtemp = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
Register WPHI = Wtemp;
BuildMI(*BB, MI, DL, TII->get(Mips::FEXUPR_W), Wtemp).addReg(Ws);
if (IsFGR64) {
@ -3784,15 +3784,15 @@ MipsSETargetLowering::emitFPEXTEND_PSEUDO(MachineInstr &MI,
}
// Perform the safety regclass copy mentioned above.
unsigned Rtemp = RegInfo.createVirtualRegister(GPRRC);
unsigned FPRPHI = IsFGR64onMips32
Register Rtemp = RegInfo.createVirtualRegister(GPRRC);
Register FPRPHI = IsFGR64onMips32
? RegInfo.createVirtualRegister(&Mips::FGR64RegClass)
: Fd;
BuildMI(*BB, MI, DL, TII->get(COPYOpc), Rtemp).addReg(WPHI).addImm(0);
BuildMI(*BB, MI, DL, TII->get(MTC1Opc), FPRPHI).addReg(Rtemp);
if (IsFGR64onMips32) {
unsigned Rtemp2 = RegInfo.createVirtualRegister(GPRRC);
Register Rtemp2 = RegInfo.createVirtualRegister(GPRRC);
BuildMI(*BB, MI, DL, TII->get(Mips::COPY_S_W), Rtemp2)
.addReg(WPHI)
.addImm(1);

View File

@ -126,6 +126,6 @@ void NVPTXRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
}
unsigned NVPTXRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
Register NVPTXRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
return NVPTX::VRFrame;
}

View File

@ -42,7 +42,7 @@ public:
unsigned FIOperandNum,
RegScavenger *RS = nullptr) const override;
unsigned getFrameRegister(const MachineFunction &MF) const override;
Register getFrameRegister(const MachineFunction &MF) const override;
ManagedStringPool *getStrPool() const {
return const_cast<ManagedStringPool *>(&ManagedStrPool);

View File

@ -2448,7 +2448,7 @@ bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
/// Returns true if we should use a direct load into vector instruction
/// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) {
// If there are any other uses other than scalar to vector, then we should
// keep it as a scalar load -> direct move pattern to prevent multiple
// loads.
@ -5109,7 +5109,7 @@ PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain,
// We cannot add R2/X2 as an operand here for PATCHPOINT, because there is
// no way to mark dependencies as implicit here.
// We will add the R2/X2 dependency in EmitInstrWithCustomInserter.
if (!isPatchPoint)
if (!isPatchPoint)
Ops.push_back(DAG.getRegister(isPPC64 ? PPC::X2
: PPC::R2, PtrVT));
}
@ -7087,7 +7087,7 @@ SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op,
// undefined):
// < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to
// < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u>
//
//
// The same operation in little-endian ordering will be:
// <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to
// <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1>
@ -9839,7 +9839,7 @@ SDValue PPCTargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const {
BifID = Intrinsic::ppc_altivec_vmaxsh;
else if (VT == MVT::v16i8)
BifID = Intrinsic::ppc_altivec_vmaxsb;
return BuildIntrinsicOp(BifID, X, Y, DAG, dl, VT);
}
@ -10119,10 +10119,10 @@ PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
MachineFunction *F = BB->getParent();
MachineFunction::iterator It = ++BB->getIterator();
unsigned dest = MI.getOperand(0).getReg();
unsigned ptrA = MI.getOperand(1).getReg();
unsigned ptrB = MI.getOperand(2).getReg();
unsigned incr = MI.getOperand(3).getReg();
Register dest = MI.getOperand(0).getReg();
Register ptrA = MI.getOperand(1).getReg();
Register ptrB = MI.getOperand(2).getReg();
Register incr = MI.getOperand(3).getReg();
DebugLoc dl = MI.getDebugLoc();
MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
@ -10138,7 +10138,7 @@ PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
MachineRegisterInfo &RegInfo = F->getRegInfo();
unsigned TmpReg = (!BinOpcode) ? incr :
Register TmpReg = (!BinOpcode) ? incr :
RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
: &PPC::GPRCRegClass);
@ -10246,20 +10246,20 @@ MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary(
is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
unsigned PtrReg = RegInfo.createVirtualRegister(RC);
unsigned Shift1Reg = RegInfo.createVirtualRegister(GPRC);
unsigned ShiftReg =
Register PtrReg = RegInfo.createVirtualRegister(RC);
Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
Register ShiftReg =
isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
unsigned Incr2Reg = RegInfo.createVirtualRegister(GPRC);
unsigned MaskReg = RegInfo.createVirtualRegister(GPRC);
unsigned Mask2Reg = RegInfo.createVirtualRegister(GPRC);
unsigned Mask3Reg = RegInfo.createVirtualRegister(GPRC);
unsigned Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
unsigned Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
unsigned Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
unsigned TmpDestReg = RegInfo.createVirtualRegister(GPRC);
unsigned Ptr1Reg;
unsigned TmpReg =
Register Incr2Reg = RegInfo.createVirtualRegister(GPRC);
Register MaskReg = RegInfo.createVirtualRegister(GPRC);
Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
Register Ptr1Reg;
Register TmpReg =
(!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC);
// thisMBB:
@ -11061,23 +11061,23 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
unsigned PtrReg = RegInfo.createVirtualRegister(RC);
unsigned Shift1Reg = RegInfo.createVirtualRegister(GPRC);
unsigned ShiftReg =
Register PtrReg = RegInfo.createVirtualRegister(RC);
Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
Register ShiftReg =
isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
unsigned NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
unsigned NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
unsigned OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
unsigned OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
unsigned MaskReg = RegInfo.createVirtualRegister(GPRC);
unsigned Mask2Reg = RegInfo.createVirtualRegister(GPRC);
unsigned Mask3Reg = RegInfo.createVirtualRegister(GPRC);
unsigned Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
unsigned Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
unsigned TmpDestReg = RegInfo.createVirtualRegister(GPRC);
unsigned Ptr1Reg;
unsigned TmpReg = RegInfo.createVirtualRegister(GPRC);
unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
Register MaskReg = RegInfo.createVirtualRegister(GPRC);
Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
Register Ptr1Reg;
Register TmpReg = RegInfo.createVirtualRegister(GPRC);
Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
// thisMBB:
// ...
// fallthrough --> loopMBB
@ -11273,7 +11273,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
// Save FPSCR value.
BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
// The floating point rounding mode is in the bits 62:63 of FPCSR, and has
// The floating point rounding mode is in the bits 62:63 of FPCSR, and has
// the following settings:
// 00 Round to nearest
// 01 Round to 0
@ -11293,7 +11293,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
// Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg
// or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg.
// If the target doesn't have DirectMove, we should use stack to do the
// If the target doesn't have DirectMove, we should use stack to do the
// conversion, because the target doesn't have the instructions like mtvsrd
// or mfvsrd to do this conversion directly.
auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) {
@ -11339,8 +11339,8 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx),
MFI.getObjectAlignment(FrameIdx));
// Load from the stack where SrcReg is stored, and save to DestReg,
// so we have done the RegClass conversion from RegClass::SrcReg to
// Load from the stack where SrcReg is stored, and save to DestReg,
// so we have done the RegClass conversion from RegClass::SrcReg to
// RegClass::DestReg.
BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg)
.addImm(0)
@ -11350,14 +11350,14 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
};
unsigned OldFPSCRReg = MI.getOperand(0).getReg();
// Save FPSCR value.
BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
// When the operand is gprc register, use two least significant bits of the
// register and mtfsf instruction to set the bits 62:63 of FPSCR.
//
// copy OldFPSCRTmpReg, OldFPSCRReg
// register and mtfsf instruction to set the bits 62:63 of FPSCR.
//
// copy OldFPSCRTmpReg, OldFPSCRReg
// (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1)
// rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62
// copy NewFPSCRReg, NewFPSCRTmpReg
@ -11367,7 +11367,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
unsigned OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg);
unsigned ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
unsigned ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
@ -13791,9 +13791,9 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
}
case ISD::BUILD_VECTOR:
return DAGCombineBuildVector(N, DCI);
case ISD::ABS:
case ISD::ABS:
return combineABS(N, DCI);
case ISD::VSELECT:
case ISD::VSELECT:
return combineVSelect(N, DCI);
}
@ -13891,10 +13891,10 @@ unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
if (!DisableInnermostLoopAlign32) {
// If the nested loop is an innermost loop, prefer to a 32-byte alignment,
// so that we can decrease cache misses and branch-prediction misses.
// so that we can decrease cache misses and branch-prediction misses.
// Actual alignment of the loop will depend on the hotness check and other
// logic in alignBlocks.
if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
return 5;
}
@ -14310,7 +14310,7 @@ bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const {
if (CModel == CodeModel::Small || CModel == CodeModel::Large)
return true;
// JumpTable and BlockAddress are accessed as got-indirect.
// JumpTable and BlockAddress are accessed as got-indirect.
if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA))
return true;

View File

@ -391,9 +391,9 @@ MachineInstr *PPCInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
// Swap op1/op2
assert(((OpIdx1 == 1 && OpIdx2 == 2) || (OpIdx1 == 2 && OpIdx2 == 1)) &&
"Only the operands 1 and 2 can be swapped in RLSIMI/RLWIMIo.");
unsigned Reg0 = MI.getOperand(0).getReg();
unsigned Reg1 = MI.getOperand(1).getReg();
unsigned Reg2 = MI.getOperand(2).getReg();
Register Reg0 = MI.getOperand(0).getReg();
Register Reg1 = MI.getOperand(1).getReg();
Register Reg2 = MI.getOperand(2).getReg();
unsigned SubReg1 = MI.getOperand(1).getSubReg();
unsigned SubReg2 = MI.getOperand(2).getSubReg();
bool Reg1IsKill = MI.getOperand(1).isKill();
@ -421,7 +421,7 @@ MachineInstr *PPCInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
if (NewMI) {
// Create a new instruction.
unsigned Reg0 = ChangeReg0 ? Reg2 : MI.getOperand(0).getReg();
Register Reg0 = ChangeReg0 ? Reg2 : MI.getOperand(0).getReg();
bool Reg0IsDead = MI.getOperand(0).isDead();
return BuildMI(MF, MI.getDebugLoc(), MI.getDesc())
.addReg(Reg0, RegState::Define | getDeadRegState(Reg0IsDead))
@ -2400,7 +2400,7 @@ MachineInstr *PPCInstrInfo::getForwardingDefMI(
return &*It;
}
break;
} else if (It->readsRegister(Reg, &getRegisterInfo()))
} else if (It->readsRegister(Reg, &getRegisterInfo()))
// If we see another use of this reg between the def and the MI,
// we want to flat it so the def isn't deleted.
SeenIntermediateUse = true;
@ -3218,7 +3218,7 @@ static void swapMIOperands(MachineInstr &MI, unsigned Op1, unsigned Op2) {
}
}
// Check if the 'MI' that has the index OpNoForForwarding
// Check if the 'MI' that has the index OpNoForForwarding
// meets the requirement described in the ImmInstrInfo.
bool PPCInstrInfo::isUseMIElgibleForForwarding(MachineInstr &MI,
const ImmInstrInfo &III,
@ -3264,7 +3264,7 @@ bool PPCInstrInfo::isDefMIElgibleForForwarding(MachineInstr &DefMI,
MachineOperand *&RegMO) const {
unsigned Opc = DefMI.getOpcode();
if (Opc != PPC::ADDItocL && Opc != PPC::ADDI && Opc != PPC::ADDI8)
return false;
return false;
assert(DefMI.getNumOperands() >= 3 &&
"Add inst must have at least three operands");
@ -3436,7 +3436,7 @@ bool PPCInstrInfo::transformToImmFormFedByAdd(
// Otherwise, it is Constant Pool Index(CPI) or Global,
// which is relocation in fact. We need to replace the special zero
// register with ImmMO.
// Before that, we need to fixup the target flags for imm.
// Before that, we need to fixup the target flags for imm.
// For some reason, we miss to set the flag for the ImmMO if it is CPI.
if (DefMI.getOpcode() == PPC::ADDItocL)
ImmMO->setTargetFlags(PPCII::MO_TOC_LO);

View File

@ -1114,7 +1114,7 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MI.getOperand(OperandBase + 1).ChangeToRegister(SReg, false, false, true);
}
unsigned PPCRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
Register PPCRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const PPCFrameLowering *TFI = getFrameLowering(MF);
if (!TM.isPPC64())
@ -1123,7 +1123,7 @@ unsigned PPCRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
return TFI->hasFP(MF) ? PPC::X31 : PPC::X1;
}
unsigned PPCRegisterInfo::getBaseRegister(const MachineFunction &MF) const {
Register PPCRegisterInfo::getBaseRegister(const MachineFunction &MF) const {
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
if (!hasBasePointer(MF))
return getFrameRegister(MF);

View File

@ -132,10 +132,10 @@ public:
int64_t Offset) const override;
// Debug information queries.
unsigned getFrameRegister(const MachineFunction &MF) const override;
Register getFrameRegister(const MachineFunction &MF) const override;
// Base pointer (stack realignment) support.
unsigned getBaseRegister(const MachineFunction &MF) const;
Register getBaseRegister(const MachineFunction &MF) const;
bool hasBasePointer(const MachineFunction &MF) const;
/// stripRegisterPrefix - This method strips the character prefix from a

View File

@ -124,7 +124,7 @@ void RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
}
unsigned RISCVRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
Register RISCVRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const TargetFrameLowering *TFI = getFrameLowering(MF);
return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X2;
}

View File

@ -39,7 +39,7 @@ struct RISCVRegisterInfo : public RISCVGenRegisterInfo {
unsigned FIOperandNum,
RegScavenger *RS = nullptr) const override;
unsigned getFrameRegister(const MachineFunction &MF) const override;
Register getFrameRegister(const MachineFunction &MF) const override;
bool requiresRegisterScavenging(const MachineFunction &MF) const override {
return true;

View File

@ -212,7 +212,7 @@ SparcRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
}
unsigned SparcRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
Register SparcRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
return SP::I6;
}

View File

@ -38,7 +38,7 @@ struct SparcRegisterInfo : public SparcGenRegisterInfo {
int SPAdj, unsigned FIOperandNum,
RegScavenger *RS = nullptr) const override;
unsigned getFrameRegister(const MachineFunction &MF) const override;
Register getFrameRegister(const MachineFunction &MF) const override;
bool canRealignStack(const MachineFunction &MF) const override;

View File

@ -525,9 +525,9 @@ bool SystemZElimCompare::fuseCompareOperations(
// SrcReg2 is the register if the source operand is a register,
// 0 if the source operand is immediate, and the base register
// if the source operand is memory (index is not supported).
unsigned SrcReg = Compare.getOperand(0).getReg();
unsigned SrcReg2 =
Compare.getOperand(1).isReg() ? Compare.getOperand(1).getReg() : 0;
Register SrcReg = Compare.getOperand(0).getReg();
Register SrcReg2 =
Compare.getOperand(1).isReg() ? Compare.getOperand(1).getReg() : Register();
MachineBasicBlock::iterator MBBI = Compare, MBBE = Branch;
for (++MBBI; MBBI != MBBE; ++MBBI)
if (MBBI->modifiesRegister(SrcReg, TRI) ||

View File

@ -6249,7 +6249,7 @@ static MachineBasicBlock *splitBlockBefore(MachineBasicBlock::iterator MI,
}
// Force base value Base into a register before MI. Return the register.
static unsigned forceReg(MachineInstr &MI, MachineOperand &Base,
static Register forceReg(MachineInstr &MI, MachineOperand &Base,
const SystemZInstrInfo *TII) {
if (Base.isReg())
return Base.getReg();
@ -6258,7 +6258,7 @@ static unsigned forceReg(MachineInstr &MI, MachineOperand &Base,
MachineFunction &MF = *MBB->getParent();
MachineRegisterInfo &MRI = MF.getRegInfo();
unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
Register Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LA), Reg)
.add(Base)
.addImm(0)
@ -6542,8 +6542,8 @@ MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary(
MachineOperand Base = earlyUseOperand(MI.getOperand(1));
int64_t Disp = MI.getOperand(2).getImm();
MachineOperand Src2 = earlyUseOperand(MI.getOperand(3));
unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0);
unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0);
Register BitShift = IsSubWord ? MI.getOperand(4).getReg() : Register();
Register NegBitShift = IsSubWord ? MI.getOperand(5).getReg() : Register();
DebugLoc DL = MI.getDebugLoc();
if (IsSubWord)
BitSize = MI.getOperand(6).getImm();
@ -6561,12 +6561,12 @@ MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary(
assert(LOpcode && CSOpcode && "Displacement out of range");
// Create virtual registers for temporary results.
unsigned OrigVal = MRI.createVirtualRegister(RC);
unsigned OldVal = MRI.createVirtualRegister(RC);
unsigned NewVal = (BinOpcode || IsSubWord ?
Register OrigVal = MRI.createVirtualRegister(RC);
Register OldVal = MRI.createVirtualRegister(RC);
Register NewVal = (BinOpcode || IsSubWord ?
MRI.createVirtualRegister(RC) : Src2.getReg());
unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
Register RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
Register RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
// Insert a basic block for the main loop.
MachineBasicBlock *StartMBB = MBB;
@ -6659,9 +6659,9 @@ MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadMinMax(
unsigned Dest = MI.getOperand(0).getReg();
MachineOperand Base = earlyUseOperand(MI.getOperand(1));
int64_t Disp = MI.getOperand(2).getImm();
unsigned Src2 = MI.getOperand(3).getReg();
unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0);
unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0);
Register Src2 = MI.getOperand(3).getReg();
Register BitShift = (IsSubWord ? MI.getOperand(4).getReg() : Register());
Register NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : Register());
DebugLoc DL = MI.getDebugLoc();
if (IsSubWord)
BitSize = MI.getOperand(6).getImm();
@ -6679,12 +6679,12 @@ MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadMinMax(
assert(LOpcode && CSOpcode && "Displacement out of range");
// Create virtual registers for temporary results.
unsigned OrigVal = MRI.createVirtualRegister(RC);
unsigned OldVal = MRI.createVirtualRegister(RC);
unsigned NewVal = MRI.createVirtualRegister(RC);
unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2);
unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
Register OrigVal = MRI.createVirtualRegister(RC);
Register OldVal = MRI.createVirtualRegister(RC);
Register NewVal = MRI.createVirtualRegister(RC);
Register RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
Register RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2);
Register RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
// Insert 3 basic blocks for the loop.
MachineBasicBlock *StartMBB = MBB;
@ -6967,22 +6967,22 @@ MachineBasicBlock *SystemZTargetLowering::emitMemMemWrapper(
if (MI.getNumExplicitOperands() > 5) {
bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase);
uint64_t StartCountReg = MI.getOperand(5).getReg();
uint64_t StartSrcReg = forceReg(MI, SrcBase, TII);
uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg :
Register StartCountReg = MI.getOperand(5).getReg();
Register StartSrcReg = forceReg(MI, SrcBase, TII);
Register StartDestReg = (HaveSingleBase ? StartSrcReg :
forceReg(MI, DestBase, TII));
const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass;
uint64_t ThisSrcReg = MRI.createVirtualRegister(RC);
uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg :
Register ThisSrcReg = MRI.createVirtualRegister(RC);
Register ThisDestReg = (HaveSingleBase ? ThisSrcReg :
MRI.createVirtualRegister(RC));
uint64_t NextSrcReg = MRI.createVirtualRegister(RC);
uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg :
Register NextSrcReg = MRI.createVirtualRegister(RC);
Register NextDestReg = (HaveSingleBase ? NextSrcReg :
MRI.createVirtualRegister(RC));
RC = &SystemZ::GR64BitRegClass;
uint64_t ThisCountReg = MRI.createVirtualRegister(RC);
uint64_t NextCountReg = MRI.createVirtualRegister(RC);
Register ThisCountReg = MRI.createVirtualRegister(RC);
Register NextCountReg = MRI.createVirtualRegister(RC);
MachineBasicBlock *StartMBB = MBB;
MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);

View File

@ -1179,13 +1179,13 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
MemOpcode = -1;
else {
assert(NumOps == 3 && "Expected two source registers.");
unsigned DstReg = MI.getOperand(0).getReg();
unsigned DstPhys =
Register DstReg = MI.getOperand(0).getReg();
Register DstPhys =
(TRI->isVirtualRegister(DstReg) ? VRM->getPhys(DstReg) : DstReg);
unsigned SrcReg = (OpNum == 2 ? MI.getOperand(1).getReg()
Register SrcReg = (OpNum == 2 ? MI.getOperand(1).getReg()
: ((OpNum == 1 && MI.isCommutable())
? MI.getOperand(2).getReg()
: 0));
: Register()));
if (DstPhys && !SystemZ::GRH32BitRegClass.contains(DstPhys) && SrcReg &&
TRI->isVirtualRegister(SrcReg) && DstPhys == VRM->getPhys(SrcReg))
NeedsCommute = (OpNum == 1);

View File

@ -164,8 +164,8 @@ SystemZRegisterInfo::getRegAllocationHints(unsigned VirtReg,
continue;
auto tryAddHint = [&](const MachineOperand *MO) -> void {
unsigned Reg = MO->getReg();
unsigned PhysReg = isPhysicalRegister(Reg) ? Reg : VRM->getPhys(Reg);
Register Reg = MO->getReg();
Register PhysReg = isPhysicalRegister(Reg) ? Reg : VRM->getPhys(Reg);
if (PhysReg) {
if (MO->getSubReg())
PhysReg = getSubReg(PhysReg, MO->getSubReg());
@ -399,7 +399,7 @@ bool SystemZRegisterInfo::shouldCoalesce(MachineInstr *MI,
return true;
}
unsigned
Register
SystemZRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const SystemZFrameLowering *TFI = getFrameLowering(MF);
return TFI->hasFP(MF) ? SystemZ::R11D : SystemZ::R15D;

View File

@ -83,7 +83,7 @@ public:
const TargetRegisterClass *NewRC,
LiveIntervals &LIS) const override;
unsigned getFrameRegister(const MachineFunction &MF) const override;
Register getFrameRegister(const MachineFunction &MF) const override;
};
} // end namespace llvm

View File

@ -66,7 +66,7 @@ void WebAssemblyRegisterInfo::eliminateFrameIndex(
assert(MFI.getObjectSize(FrameIndex) != 0 &&
"We assume that variable-sized objects have already been lowered, "
"and don't use FrameIndex operands.");
unsigned FrameRegister = getFrameRegister(MF);
Register FrameRegister = getFrameRegister(MF);
// If this is the address operand of a load or store, make it relative to SP
// and fold the frame offset directly in.
@ -130,7 +130,7 @@ void WebAssemblyRegisterInfo::eliminateFrameIndex(
MI.getOperand(FIOperandNum).ChangeToRegister(FIRegOperand, /*IsDef=*/false);
}
unsigned
Register
WebAssemblyRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
static const unsigned Regs[2][2] = {
/* !isArch64Bit isArch64Bit */

View File

@ -39,7 +39,7 @@ public:
RegScavenger *RS = nullptr) const override;
// Debug information queries.
unsigned getFrameRegister(const MachineFunction &MF) const override;
Register getFrameRegister(const MachineFunction &MF) const override;
const TargetRegisterClass *
getPointerRegClass(const MachineFunction &MF,

View File

@ -75,7 +75,7 @@ bool X86CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
return true;
}
SmallVector<unsigned, 8> SplitRegs;
SmallVector<Register, 8> SplitRegs;
EVT PartVT = TLI.getRegisterType(Context, VT);
Type *PartTy = PartVT.getTypeForEVT(Context);
@ -182,7 +182,7 @@ protected:
bool X86CallLowering::lowerReturn(
MachineIRBuilder &MIRBuilder, const Value *Val,
ArrayRef<unsigned> VRegs) const {
ArrayRef<Register> VRegs) const {
assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) &&
"Return value without a vreg");
auto MIB = MIRBuilder.buildInstrNoInsert(X86::RET).addImm(0);
@ -205,7 +205,7 @@ bool X86CallLowering::lowerReturn(
ArgInfo CurArgInfo = ArgInfo{VRegs[i], SplitEVTs[i].getTypeForEVT(Ctx)};
setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
if (!splitToValueTypes(CurArgInfo, SplitArgs, DL, MRI,
[&](ArrayRef<unsigned> Regs) {
[&](ArrayRef<Register> Regs) {
MIRBuilder.buildUnmerge(Regs, VRegs[i]);
}))
return false;
@ -321,7 +321,7 @@ protected:
bool X86CallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
const Function &F,
ArrayRef<unsigned> VRegs) const {
ArrayRef<Register> VRegs) const {
if (F.arg_empty())
return true;
@ -349,7 +349,7 @@ bool X86CallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
ArgInfo OrigArg(VRegs[Idx], Arg.getType());
setArgFlags(OrigArg, Idx + AttributeList::FirstArgIndex, DL, F);
if (!splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
[&](ArrayRef<unsigned> Regs) {
[&](ArrayRef<Register> Regs) {
MIRBuilder.buildMerge(VRegs[Idx], Regs);
}))
return false;
@ -409,7 +409,7 @@ bool X86CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
return false;
if (!splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
[&](ArrayRef<unsigned> Regs) {
[&](ArrayRef<Register> Regs) {
MIRBuilder.buildUnmerge(Regs, OrigArg.Reg);
}))
return false;
@ -452,10 +452,10 @@ bool X86CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
if (OrigRet.Reg) {
SplitArgs.clear();
SmallVector<unsigned, 8> NewRegs;
SmallVector<Register, 8> NewRegs;
if (!splitToValueTypes(OrigRet, SplitArgs, DL, MRI,
[&](ArrayRef<unsigned> Regs) {
[&](ArrayRef<Register> Regs) {
NewRegs.assign(Regs.begin(), Regs.end());
}))
return false;

View File

@ -29,10 +29,10 @@ public:
X86CallLowering(const X86TargetLowering &TLI);
bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
ArrayRef<unsigned> VRegs) const override;
ArrayRef<Register> VRegs) const override;
bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F,
ArrayRef<unsigned> VRegs) const override;
ArrayRef<Register> VRegs) const override;
bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
const MachineOperand &Callee, const ArgInfo &OrigRet,
@ -40,7 +40,7 @@ public:
private:
/// A function of this type is used to perform value split action.
using SplitArgTy = std::function<void(ArrayRef<unsigned>)>;
using SplitArgTy = std::function<void(ArrayRef<Register>)>;
bool splitToValueTypes(const ArgInfo &OrigArgInfo,
SmallVectorImpl<ArgInfo> &SplitArgs,

View File

@ -584,23 +584,23 @@ void X86FrameLowering::emitStackProbeInline(MachineFunction &MF,
// registers. For the prolog expansion we use RAX, RCX and RDX.
MachineRegisterInfo &MRI = MF.getRegInfo();
const TargetRegisterClass *RegClass = &X86::GR64RegClass;
const unsigned SizeReg = InProlog ? (unsigned)X86::RAX
const Register SizeReg = InProlog ? X86::RAX
: MRI.createVirtualRegister(RegClass),
ZeroReg = InProlog ? (unsigned)X86::RCX
ZeroReg = InProlog ? X86::RCX
: MRI.createVirtualRegister(RegClass),
CopyReg = InProlog ? (unsigned)X86::RDX
CopyReg = InProlog ? X86::RDX
: MRI.createVirtualRegister(RegClass),
TestReg = InProlog ? (unsigned)X86::RDX
TestReg = InProlog ? X86::RDX
: MRI.createVirtualRegister(RegClass),
FinalReg = InProlog ? (unsigned)X86::RDX
FinalReg = InProlog ? X86::RDX
: MRI.createVirtualRegister(RegClass),
RoundedReg = InProlog ? (unsigned)X86::RDX
RoundedReg = InProlog ? X86::RDX
: MRI.createVirtualRegister(RegClass),
LimitReg = InProlog ? (unsigned)X86::RCX
LimitReg = InProlog ? X86::RCX
: MRI.createVirtualRegister(RegClass),
JoinReg = InProlog ? (unsigned)X86::RCX
JoinReg = InProlog ? X86::RCX
: MRI.createVirtualRegister(RegClass),
ProbeReg = InProlog ? (unsigned)X86::RCX
ProbeReg = InProlog ? X86::RCX
: MRI.createVirtualRegister(RegClass);
// SP-relative offsets where we can save RCX and RDX.
@ -874,7 +874,7 @@ void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
bool X86FrameLowering::has128ByteRedZone(const MachineFunction& MF) const {
// x86-64 (non Win64) has a 128 byte red zone which is guaranteed not to be
// clobbered by any interrupt handler.
// clobbered by any interrupt handler.
assert(&STI == &MF.getSubtarget<X86Subtarget>() &&
"MF used frame lowering for wrong subtarget");
const Function &Fn = MF.getFunction();

View File

@ -4748,9 +4748,9 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
unsigned CommuteOpIdx1 = OpNum, CommuteOpIdx2 = CommuteAnyOperandIndex;
if (findCommutedOpIndices(MI, CommuteOpIdx1, CommuteOpIdx2)) {
bool HasDef = MI.getDesc().getNumDefs();
unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0;
unsigned Reg1 = MI.getOperand(CommuteOpIdx1).getReg();
unsigned Reg2 = MI.getOperand(CommuteOpIdx2).getReg();
Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
Register Reg1 = MI.getOperand(CommuteOpIdx1).getReg();
Register Reg2 = MI.getOperand(CommuteOpIdx2).getReg();
bool Tied1 =
0 == MI.getDesc().getOperandConstraint(CommuteOpIdx1, MCOI::TIED_TO);
bool Tied2 =

View File

@ -765,7 +765,7 @@ X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
}
}
unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
Register X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const X86FrameLowering *TFI = getFrameLowering(MF);
return TFI->hasFP(MF) ? FramePtr : StackPtr;
}

View File

@ -133,7 +133,7 @@ public:
RegScavenger *RS = nullptr) const override;
// Debug information queries.
unsigned getFrameRegister(const MachineFunction &MF) const override;
Register getFrameRegister(const MachineFunction &MF) const override;
unsigned getPtrSizedFrameRegister(const MachineFunction &MF) const;
unsigned getPtrSizedStackRegister(const MachineFunction &MF) const;
unsigned getStackRegister() const { return StackPtr; }

View File

@ -283,7 +283,7 @@ XCoreRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
Offset += StackSize;
unsigned FrameReg = getFrameRegister(MF);
Register FrameReg = getFrameRegister(MF);
// Special handling of DBG_VALUE instructions.
if (MI.isDebugValue()) {
@ -321,7 +321,7 @@ XCoreRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
}
unsigned XCoreRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
Register XCoreRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const XCoreFrameLowering *TFI = getFrameLowering(MF);
return TFI->hasFP(MF) ? XCore::R10 : XCore::SP;

View File

@ -43,7 +43,7 @@ public:
RegScavenger *RS = nullptr) const override;
// Debug information queries.
unsigned getFrameRegister(const MachineFunction &MF) const override;
Register getFrameRegister(const MachineFunction &MF) const override;
//! Return whether to emit frame moves
static bool needsFrameMoves(const MachineFunction &MF);

View File

@ -124,7 +124,7 @@ static MachineFunction *getMFFromMMI(const Module *M,
return MF;
}
static void collectCopies(SmallVectorImpl<unsigned> &Copies,
static void collectCopies(SmallVectorImpl<Register> &Copies,
MachineFunction *MF) {
for (auto &MBB : *MF)
for (MachineInstr &MI : MBB) {
@ -152,7 +152,7 @@ protected:
MachineFunction *MF;
std::pair<std::unique_ptr<Module>, std::unique_ptr<MachineModuleInfo>>
ModuleMMIPair;
SmallVector<unsigned, 4> Copies;
SmallVector<Register, 4> Copies;
MachineBasicBlock *EntryMBB;
MachineIRBuilder B;
MachineRegisterInfo *MRI;

View File

@ -75,7 +75,7 @@ TEST_F(GISelMITest, DstOpSrcOp) {
if (!TM)
return;
SmallVector<unsigned, 4> Copies;
SmallVector<Register, 4> Copies;
collectCopies(Copies, MF);
LLT s64 = LLT::scalar(64);
@ -100,7 +100,7 @@ TEST_F(GISelMITest, BuildUnmerge) {
if (!TM)
return;
SmallVector<unsigned, 4> Copies;
SmallVector<Register, 4> Copies;
collectCopies(Copies, MF);
B.buildUnmerge(LLT::scalar(32), Copies[0]);
B.buildUnmerge(LLT::scalar(16), Copies[1]);
@ -120,7 +120,7 @@ TEST_F(GISelMITest, TestBuildFPInsts) {
if (!TM)
return;
SmallVector<unsigned, 4> Copies;
SmallVector<Register, 4> Copies;
collectCopies(Copies, MF);
LLT S64 = LLT::scalar(64);
@ -152,7 +152,7 @@ TEST_F(GISelMITest, BuildIntrinsic) {
return;
LLT S64 = LLT::scalar(64);
SmallVector<unsigned, 4> Copies;
SmallVector<Register, 4> Copies;
collectCopies(Copies, MF);
// Make sure DstOp version works. sqrt is just a placeholder intrinsic.
@ -160,7 +160,7 @@ TEST_F(GISelMITest, BuildIntrinsic) {
.addUse(Copies[0]);
// Make sure register version works
SmallVector<unsigned, 1> Results;
SmallVector<Register, 1> Results;
Results.push_back(MRI->createGenericVirtualRegister(S64));
B.buildIntrinsic(Intrinsic::sqrt, Results, false)
.addUse(Copies[1]);
@ -181,7 +181,7 @@ TEST_F(GISelMITest, BuildXor) {
LLT S64 = LLT::scalar(64);
LLT S128 = LLT::scalar(128);
SmallVector<unsigned, 4> Copies;
SmallVector<Register, 4> Copies;
collectCopies(Copies, MF);
B.buildXor(S64, Copies[0], Copies[1]);
B.buildNot(S64, Copies[0]);
@ -208,7 +208,7 @@ TEST_F(GISelMITest, BuildBitCounts) {
return;
LLT S32 = LLT::scalar(32);
SmallVector<unsigned, 4> Copies;
SmallVector<Register, 4> Copies;
collectCopies(Copies, MF);
B.buildCTPOP(S32, Copies[0]);
@ -235,7 +235,7 @@ TEST_F(GISelMITest, BuildCasts) {
return;
LLT S32 = LLT::scalar(32);
SmallVector<unsigned, 4> Copies;
SmallVector<Register, 4> Copies;
collectCopies(Copies, MF);
B.buildUITOFP(S32, Copies[0]);
@ -259,7 +259,7 @@ TEST_F(GISelMITest, BuildMinMax) {
return;
LLT S64 = LLT::scalar(64);
SmallVector<unsigned, 4> Copies;
SmallVector<Register, 4> Copies;
collectCopies(Copies, MF);
B.buildSMin(S64, Copies[0], Copies[1]);

View File

@ -161,7 +161,7 @@ TEST(PatternMatchInstr, MatchBinaryOp) {
bool match =
mi_match(MIBAdd->getOperand(0).getReg(), MRI, m_GAdd(m_Reg(), m_Reg()));
EXPECT_TRUE(match);
unsigned Src0, Src1, Src2;
Register Src0, Src1, Src2;
match = mi_match(MIBAdd->getOperand(0).getReg(), MRI,
m_GAdd(m_Reg(Src0), m_Reg(Src1)));
EXPECT_TRUE(match);
@ -292,7 +292,7 @@ TEST(PatternMatchInstr, MatchFPUnaryOp) {
bool match = mi_match(MIBFabs->getOperand(0).getReg(), MRI, m_GFabs(m_Reg()));
EXPECT_TRUE(match);
unsigned Src;
Register Src;
auto MIBFNeg = B.buildInstr(TargetOpcode::G_FNEG, {s32}, {Copy0s32});
match = mi_match(MIBFNeg->getOperand(0).getReg(), MRI, m_GFNeg(m_Reg(Src)));
EXPECT_TRUE(match);
@ -360,7 +360,7 @@ TEST(PatternMatchInstr, MatchExtendsTrunc) {
auto MIBAExt = B.buildAnyExt(s64, MIBTrunc);
auto MIBZExt = B.buildZExt(s64, MIBTrunc);
auto MIBSExt = B.buildSExt(s64, MIBTrunc);
unsigned Src0;
Register Src0;
bool match =
mi_match(MIBTrunc->getOperand(0).getReg(), MRI, m_GTrunc(m_Reg(Src0)));
EXPECT_TRUE(match);
@ -433,7 +433,7 @@ TEST(PatternMatchInstr, MatchSpecificType) {
LLT PtrTy = LLT::pointer(0, 64);
auto MIBIntToPtr = B.buildCast(PtrTy, Copies[0]);
auto MIBPtrToInt = B.buildCast(s64, MIBIntToPtr);
unsigned Src0;
Register Src0;
// match the ptrtoint(inttoptr reg)
bool match = mi_match(MIBPtrToInt->getOperand(0).getReg(), MRI,
@ -459,7 +459,7 @@ TEST(PatternMatchInstr, MatchCombinators) {
LLT s64 = LLT::scalar(64);
LLT s32 = LLT::scalar(32);
auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
unsigned Src0, Src1;
Register Src0, Src1;
bool match =
mi_match(MIBAdd->getOperand(0).getReg(), MRI,
m_all_of(m_SpecificType(s64), m_GAdd(m_Reg(Src0), m_Reg(Src1))));