forked from OSchip/llvm-project
Make all pointers to TargetRegisterClass const since they are all pointers to static data that should not be modified.
llvm-svn: 151134
This commit is contained in:
parent
91d5bb1ee5
commit
760b134ffa
|
@ -201,9 +201,9 @@ public:
|
|||
|
||||
/// getRegClassFor - Return the register class that should be used for the
|
||||
/// specified value type.
|
||||
virtual TargetRegisterClass *getRegClassFor(EVT VT) const {
|
||||
virtual const TargetRegisterClass *getRegClassFor(EVT VT) const {
|
||||
assert(VT.isSimple() && "getRegClassFor called on illegal type!");
|
||||
TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT().SimpleTy];
|
||||
const TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT().SimpleTy];
|
||||
assert(RC && "This value type is not natively supported!");
|
||||
return RC;
|
||||
}
|
||||
|
@ -1043,7 +1043,7 @@ protected:
|
|||
/// addRegisterClass - Add the specified register class as an available
|
||||
/// regclass for the specified value type. This indicates the selector can
|
||||
/// handle values of that class natively.
|
||||
void addRegisterClass(EVT VT, TargetRegisterClass *RC) {
|
||||
void addRegisterClass(EVT VT, const TargetRegisterClass *RC) {
|
||||
assert((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
|
||||
AvailableRegClasses.push_back(std::make_pair(VT, RC));
|
||||
RegClassForVT[VT.getSimpleVT().SimpleTy] = RC;
|
||||
|
@ -1760,7 +1760,7 @@ private:
|
|||
|
||||
/// RegClassForVT - This indicates the default register class to use for
|
||||
/// each ValueType the target supports natively.
|
||||
TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
|
||||
const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
|
||||
unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
|
||||
EVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
|
||||
|
||||
|
@ -1934,7 +1934,7 @@ private:
|
|||
return LegalizeKind(TypeSplitVector, NVT);
|
||||
}
|
||||
|
||||
std::vector<std::pair<EVT, TargetRegisterClass*> > AvailableRegClasses;
|
||||
std::vector<std::pair<EVT, const TargetRegisterClass*> > AvailableRegClasses;
|
||||
|
||||
/// TargetDAGCombineArray - Targets can specify ISD nodes that they would
|
||||
/// like PerformDAGCombine callbacks for by calling setTargetDAGCombine(),
|
||||
|
|
|
@ -39,7 +39,7 @@ public:
|
|||
// AntiDepBreakMode - Type of anti-dependence breaking that should
|
||||
// be performed before post-RA scheduling.
|
||||
typedef enum { ANTIDEP_NONE, ANTIDEP_CRITICAL, ANTIDEP_ALL } AntiDepBreakMode;
|
||||
typedef SmallVectorImpl<TargetRegisterClass*> RegClassVector;
|
||||
typedef SmallVectorImpl<const TargetRegisterClass*> RegClassVector;
|
||||
|
||||
virtual ~TargetSubtargetInfo();
|
||||
|
||||
|
|
|
@ -134,7 +134,7 @@ namespace {
|
|||
MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT,
|
||||
AliasAnalysis *AA, const RegisterClassInfo&,
|
||||
TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,
|
||||
SmallVectorImpl<TargetRegisterClass*> &CriticalPathRCs);
|
||||
SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs);
|
||||
|
||||
~SchedulePostRATDList();
|
||||
|
||||
|
@ -184,7 +184,7 @@ SchedulePostRATDList::SchedulePostRATDList(
|
|||
MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT,
|
||||
AliasAnalysis *AA, const RegisterClassInfo &RCI,
|
||||
TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,
|
||||
SmallVectorImpl<TargetRegisterClass*> &CriticalPathRCs)
|
||||
SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs)
|
||||
: ScheduleDAGInstrs(MF, MLI, MDT, /*IsPostRA=*/true), Topo(SUnits), AA(AA),
|
||||
KillIndices(TRI->getNumRegs())
|
||||
{
|
||||
|
@ -216,7 +216,7 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
|
|||
// Check for explicit enable/disable of post-ra scheduling.
|
||||
TargetSubtargetInfo::AntiDepBreakMode AntiDepMode =
|
||||
TargetSubtargetInfo::ANTIDEP_NONE;
|
||||
SmallVector<TargetRegisterClass*, 4> CriticalPathRCs;
|
||||
SmallVector<const TargetRegisterClass*, 4> CriticalPathRCs;
|
||||
if (EnablePostRAScheduler.getPosition() > 0) {
|
||||
if (!EnablePostRAScheduler)
|
||||
return false;
|
||||
|
|
|
@ -725,8 +725,8 @@ bool FastISel::SelectBitCast(const User *I) {
|
|||
// First, try to perform the bitcast by inserting a reg-reg copy.
|
||||
unsigned ResultReg = 0;
|
||||
if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
|
||||
TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
|
||||
TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
|
||||
const TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
|
||||
const TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
|
||||
// Don't attempt a cross-class copy. It will likely fail.
|
||||
if (SrcClass == DstClass) {
|
||||
ResultReg = createResultReg(DstClass);
|
||||
|
|
|
@ -728,7 +728,7 @@ unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
|
|||
// This will get lowered later into the correct offsets and registers
|
||||
// via rewriteXFrameIndex.
|
||||
if (SI != FuncInfo.StaticAllocaMap.end()) {
|
||||
TargetRegisterClass* RC = TLI.getRegClassFor(VT);
|
||||
const TargetRegisterClass* RC = TLI.getRegClassFor(VT);
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
|
||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
||||
|
@ -911,8 +911,8 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) {
|
|||
// put the alloca address into a register, set the base type back to
|
||||
// register and continue. This should almost never happen.
|
||||
if (needsLowering && Addr.BaseType == Address::FrameIndexBase) {
|
||||
TargetRegisterClass *RC = isThumb2 ? ARM::tGPRRegisterClass :
|
||||
ARM::GPRRegisterClass;
|
||||
const TargetRegisterClass *RC = isThumb2 ? ARM::tGPRRegisterClass
|
||||
: ARM::GPRRegisterClass;
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
|
||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
||||
|
@ -987,7 +987,7 @@ bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
|
|||
unsigned Opc;
|
||||
bool useAM3 = false;
|
||||
bool needVMOV = false;
|
||||
TargetRegisterClass *RC;
|
||||
const TargetRegisterClass *RC;
|
||||
switch (VT.getSimpleVT().SimpleTy) {
|
||||
// This is mostly going to be Neon/vector support.
|
||||
default: return false;
|
||||
|
@ -1490,7 +1490,7 @@ bool ARMFastISel::SelectCmp(const Instruction *I) {
|
|||
// Now set a register based on the comparison. Explicitly set the predicates
|
||||
// here.
|
||||
unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
|
||||
TargetRegisterClass *RC = isThumb2 ? ARM::rGPRRegisterClass
|
||||
const TargetRegisterClass *RC = isThumb2 ? ARM::rGPRRegisterClass
|
||||
: ARM::GPRRegisterClass;
|
||||
unsigned DestReg = createResultReg(RC);
|
||||
Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0);
|
||||
|
@ -1955,7 +1955,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
|
|||
// For this move we copy into two registers and then move into the
|
||||
// double fp reg we want.
|
||||
EVT DestVT = RVLocs[0].getValVT();
|
||||
TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT);
|
||||
const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT);
|
||||
unsigned ResultReg = createResultReg(DstRC);
|
||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
||||
TII.get(ARM::VMOVDRR), ResultReg)
|
||||
|
@ -1975,7 +1975,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
|
|||
if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
|
||||
CopyVT = MVT::i32;
|
||||
|
||||
TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
|
||||
const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
|
||||
|
||||
unsigned ResultReg = createResultReg(DstRC);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
|
||||
|
|
|
@ -1011,7 +1011,7 @@ EVT ARMTargetLowering::getSetCCResultType(EVT VT) const {
|
|||
|
||||
/// getRegClassFor - Return the register class that should be used for the
|
||||
/// specified value type.
|
||||
TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const {
|
||||
const TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const {
|
||||
// Map v4i64 to QQ registers but do not make the type legal. Similarly map
|
||||
// v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to
|
||||
// load / store 4 to 8 consecutive D registers.
|
||||
|
@ -2422,7 +2422,7 @@ ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
|
|||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
||||
|
||||
TargetRegisterClass *RC;
|
||||
const TargetRegisterClass *RC;
|
||||
if (AFI->isThumb1OnlyFunction())
|
||||
RC = ARM::tGPRRegisterClass;
|
||||
else
|
||||
|
@ -2508,7 +2508,7 @@ ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
|
|||
|
||||
SmallVector<SDValue, 4> MemOps;
|
||||
for (; firstRegToSaveIndex < 4; ++firstRegToSaveIndex) {
|
||||
TargetRegisterClass *RC;
|
||||
const TargetRegisterClass *RC;
|
||||
if (AFI->isThumb1OnlyFunction())
|
||||
RC = ARM::tGPRRegisterClass;
|
||||
else
|
||||
|
@ -2591,7 +2591,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
|
|||
ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
|
||||
|
||||
} else {
|
||||
TargetRegisterClass *RC;
|
||||
const TargetRegisterClass *RC;
|
||||
|
||||
if (RegVT == MVT::f32)
|
||||
RC = ARM::SPRRegisterClass;
|
||||
|
@ -5299,7 +5299,7 @@ ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
|
|||
BB->end());
|
||||
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
TargetRegisterClass *TRC =
|
||||
const TargetRegisterClass *TRC =
|
||||
isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass;
|
||||
unsigned scratch = MRI.createVirtualRegister(TRC);
|
||||
unsigned scratch2 = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC);
|
||||
|
@ -5409,7 +5409,7 @@ ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI,
|
|||
BB->end());
|
||||
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
TargetRegisterClass *TRC =
|
||||
const TargetRegisterClass *TRC =
|
||||
isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass;
|
||||
unsigned scratch = MRI.createVirtualRegister(TRC);
|
||||
unsigned scratch2 = MRI.createVirtualRegister(TRC);
|
||||
|
@ -5519,7 +5519,7 @@ ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB,
|
|||
BB->end());
|
||||
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
TargetRegisterClass *TRC =
|
||||
const TargetRegisterClass *TRC =
|
||||
isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass;
|
||||
unsigned storesuccess = MRI.createVirtualRegister(TRC);
|
||||
|
||||
|
|
|
@ -345,7 +345,7 @@ namespace llvm {
|
|||
|
||||
/// getRegClassFor - Return the register class that should be used for the
|
||||
/// specified value type.
|
||||
virtual TargetRegisterClass *getRegClassFor(EVT VT) const;
|
||||
virtual const TargetRegisterClass *getRegClassFor(EVT VT) const;
|
||||
|
||||
/// getMaximalGlobalOffset - Returns the maximal possible offset which can
|
||||
/// be used for loads / stores from the global.
|
||||
|
|
|
@ -896,7 +896,7 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
|
|||
if (VA.isRegLoc()) {
|
||||
MVT RegVT = VA.getLocVT();
|
||||
ArgRegEnd = VA.getLocReg();
|
||||
TargetRegisterClass *RC = 0;
|
||||
const TargetRegisterClass *RC;
|
||||
|
||||
if (RegVT == MVT::i32)
|
||||
RC = MBlaze::GPRRegisterClass;
|
||||
|
@ -964,7 +964,7 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
|
|||
StackPtr = DAG.getRegister(StackReg, getPointerTy());
|
||||
|
||||
// The last register argument that must be saved is MBlaze::R10
|
||||
TargetRegisterClass *RC = MBlaze::GPRRegisterClass;
|
||||
const TargetRegisterClass *RC = MBlaze::GPRRegisterClass;
|
||||
|
||||
unsigned Begin = getMBlazeRegisterNumbering(MBlaze::R5);
|
||||
unsigned Start = getMBlazeRegisterNumbering(ArgRegEnd+1);
|
||||
|
|
|
@ -710,7 +710,7 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const
|
|||
// MachineFunction as a live in value. It also creates a corresponding
|
||||
// virtual register for it.
|
||||
static unsigned
|
||||
AddLiveIn(MachineFunction &MF, unsigned PReg, TargetRegisterClass *RC)
|
||||
AddLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)
|
||||
{
|
||||
assert(RC->contains(PReg) && "Not the correct regclass!");
|
||||
unsigned VReg = MF.getRegInfo().createVirtualRegister(RC);
|
||||
|
@ -2601,7 +2601,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
|
|||
if (IsRegLoc) {
|
||||
EVT RegVT = VA.getLocVT();
|
||||
unsigned ArgReg = VA.getLocReg();
|
||||
TargetRegisterClass *RC = 0;
|
||||
const TargetRegisterClass *RC;
|
||||
|
||||
if (RegVT == MVT::i32)
|
||||
RC = Mips::CPURegsRegisterClass;
|
||||
|
@ -2684,7 +2684,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
|
|||
const unsigned *ArgRegs = IsO32 ? O32IntRegs : Mips64IntRegs;
|
||||
unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs, NumOfRegs);
|
||||
int FirstRegSlotOffset = IsO32 ? 0 : -64 ; // offset of $a0's slot.
|
||||
TargetRegisterClass *RC
|
||||
const TargetRegisterClass *RC
|
||||
= IsO32 ? Mips::CPURegsRegisterClass : Mips::CPU64RegsRegisterClass;
|
||||
unsigned RegSize = RC->getSize();
|
||||
int RegSlotOffset = FirstRegSlotOffset + Idx * RegSize;
|
||||
|
|
|
@ -241,31 +241,24 @@ SDValue PTXTargetLowering::
|
|||
else {
|
||||
for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
|
||||
EVT RegVT = Ins[i].VT;
|
||||
TargetRegisterClass* TRC = getRegClassFor(RegVT);
|
||||
const TargetRegisterClass* TRC = getRegClassFor(RegVT);
|
||||
unsigned RegType;
|
||||
|
||||
// Determine which register class we need
|
||||
if (RegVT == MVT::i1) {
|
||||
if (RegVT == MVT::i1)
|
||||
RegType = PTXRegisterType::Pred;
|
||||
}
|
||||
else if (RegVT == MVT::i16) {
|
||||
else if (RegVT == MVT::i16)
|
||||
RegType = PTXRegisterType::B16;
|
||||
}
|
||||
else if (RegVT == MVT::i32) {
|
||||
else if (RegVT == MVT::i32)
|
||||
RegType = PTXRegisterType::B32;
|
||||
}
|
||||
else if (RegVT == MVT::i64) {
|
||||
else if (RegVT == MVT::i64)
|
||||
RegType = PTXRegisterType::B64;
|
||||
}
|
||||
else if (RegVT == MVT::f32) {
|
||||
else if (RegVT == MVT::f32)
|
||||
RegType = PTXRegisterType::F32;
|
||||
}
|
||||
else if (RegVT == MVT::f64) {
|
||||
else if (RegVT == MVT::f64)
|
||||
RegType = PTXRegisterType::F64;
|
||||
}
|
||||
else {
|
||||
else
|
||||
llvm_unreachable("Unknown parameter type");
|
||||
}
|
||||
|
||||
// Use a unique index in the instruction to prevent instruction folding.
|
||||
// Yes, this is a hack.
|
||||
|
@ -326,7 +319,7 @@ SDValue PTXTargetLowering::
|
|||
} else {
|
||||
for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
|
||||
EVT RegVT = Outs[i].VT;
|
||||
TargetRegisterClass* TRC = 0;
|
||||
const TargetRegisterClass* TRC;
|
||||
unsigned RegType;
|
||||
|
||||
// Determine which register class we need
|
||||
|
|
|
@ -1698,7 +1698,7 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
|
|||
|
||||
// Arguments stored in registers.
|
||||
if (VA.isRegLoc()) {
|
||||
TargetRegisterClass *RC;
|
||||
const TargetRegisterClass *RC;
|
||||
EVT ValVT = VA.getValVT();
|
||||
|
||||
switch (ValVT.getSimpleVT().SimpleTy) {
|
||||
|
|
|
@ -2104,7 +2104,7 @@ unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) {
|
|||
if (!X86SelectAddress(C, AM))
|
||||
return 0;
|
||||
unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
|
||||
TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
|
||||
const TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
||||
TII.get(Opc), ResultReg), AM);
|
||||
|
|
|
@ -1829,7 +1829,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
|
|||
|
||||
if (VA.isRegLoc()) {
|
||||
EVT RegVT = VA.getLocVT();
|
||||
TargetRegisterClass *RC = NULL;
|
||||
const TargetRegisterClass *RC;
|
||||
if (RegVT == MVT::i32)
|
||||
RC = X86::GR32RegisterClass;
|
||||
else if (Is64Bit && RegVT == MVT::i64)
|
||||
|
@ -11209,7 +11209,7 @@ X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr,
|
|||
unsigned CXchgOpc,
|
||||
unsigned notOpc,
|
||||
unsigned EAXreg,
|
||||
TargetRegisterClass *RC,
|
||||
const TargetRegisterClass *RC,
|
||||
bool invSrc) const {
|
||||
// For the atomic bitwise operator, we generate
|
||||
// thisMBB:
|
||||
|
|
|
@ -832,7 +832,7 @@ namespace llvm {
|
|||
unsigned cxchgOpc,
|
||||
unsigned notOpc,
|
||||
unsigned EAXreg,
|
||||
TargetRegisterClass *RC,
|
||||
const TargetRegisterClass *RC,
|
||||
bool invSrc = false) const;
|
||||
|
||||
MachineBasicBlock *EmitAtomicBit6432WithCustomInserter(
|
||||
|
|
|
@ -1835,7 +1835,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
|
|||
case X86::ADD32rr_DB: {
|
||||
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
|
||||
unsigned Opc;
|
||||
TargetRegisterClass *RC;
|
||||
const TargetRegisterClass *RC;
|
||||
if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB) {
|
||||
Opc = X86::LEA64r;
|
||||
RC = X86::GR64_NOSPRegisterClass;
|
||||
|
|
|
@ -145,7 +145,7 @@ bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) {
|
|||
// to insert any VZEROUPPER instructions. This is constant-time, so it is
|
||||
// cheap in the common case of no ymm use.
|
||||
bool YMMUsed = false;
|
||||
TargetRegisterClass *RC = X86::VR256RegisterClass;
|
||||
const TargetRegisterClass *RC = X86::VR256RegisterClass;
|
||||
for (TargetRegisterClass::iterator i = RC->begin(), e = RC->end();
|
||||
i != e; i++) {
|
||||
if (MRI.isPhysRegUsed(*i)) {
|
||||
|
|
|
@ -482,8 +482,8 @@ RegisterInfoEmitter::runTargetHeader(raw_ostream &OS, CodeGenTarget &Target,
|
|||
// Output the extern for the instance.
|
||||
OS << " extern " << Name << "Class\t" << Name << "RegClass;\n";
|
||||
// Output the extern for the pointer to the instance (should remove).
|
||||
OS << " static TargetRegisterClass * const "<< Name <<"RegisterClass = &"
|
||||
<< Name << "RegClass;\n";
|
||||
OS << " static const TargetRegisterClass * const " << Name
|
||||
<< "RegisterClass = &" << Name << "RegClass;\n";
|
||||
}
|
||||
OS << "} // end of namespace " << TargetName << "\n\n";
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue