forked from OSchip/llvm-project
Don't cache the instruction info and register info objects.
These objects are internal to the TargetMachine object and may change. llvm-svn: 183485
This commit is contained in:
parent
60711600da
commit
496dc33b9f
|
@ -33,7 +33,6 @@ namespace {
|
|||
|
||||
class AArch64DAGToDAGISel : public SelectionDAGISel {
|
||||
AArch64TargetMachine &TM;
|
||||
const AArch64InstrInfo *TII;
|
||||
|
||||
/// Keep a pointer to the AArch64Subtarget around so that we can
|
||||
/// make the right decision when generating code for different targets.
|
||||
|
@ -43,7 +42,6 @@ public:
|
|||
explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
|
||||
CodeGenOpt::Level OptLevel)
|
||||
: SelectionDAGISel(tm, OptLevel), TM(tm),
|
||||
TII(static_cast<const AArch64InstrInfo*>(TM.getInstrInfo())),
|
||||
Subtarget(&TM.getSubtarget<AArch64Subtarget>()) {
|
||||
}
|
||||
|
||||
|
@ -243,12 +241,12 @@ SDNode *AArch64DAGToDAGISel::TrySelectToMoveImm(SDNode *Node) {
|
|||
SDValue
|
||||
AArch64DAGToDAGISel::getConstantPoolItemAddress(SDLoc DL,
|
||||
const Constant *CV) {
|
||||
EVT PtrVT = TLI->getPointerTy();
|
||||
EVT PtrVT = getTargetLowering()->getPointerTy();
|
||||
|
||||
switch (TLI->getTargetMachine().getCodeModel()) {
|
||||
switch (getTargetLowering()->getTargetMachine().getCodeModel()) {
|
||||
case CodeModel::Small: {
|
||||
unsigned Alignment =
|
||||
TLI->getDataLayout()->getABITypeAlignment(CV->getType());
|
||||
getTargetLowering()->getDataLayout()->getABITypeAlignment(CV->getType());
|
||||
return CurDAG->getNode(
|
||||
AArch64ISD::WrapperSmall, DL, PtrVT,
|
||||
CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_NO_FLAG),
|
||||
|
@ -312,7 +310,8 @@ SDNode *AArch64DAGToDAGISel::SelectToLitPool(SDNode *Node) {
|
|||
MemType.getSizeInBits()),
|
||||
UnsignedVal);
|
||||
SDValue PoolAddr = getConstantPoolItemAddress(DL, CV);
|
||||
unsigned Alignment = TLI->getDataLayout()->getABITypeAlignment(CV->getType());
|
||||
unsigned Alignment =
|
||||
getTargetLowering()->getDataLayout()->getABITypeAlignment(CV->getType());
|
||||
|
||||
return CurDAG->getExtLoad(Extension, DL, DestType, CurDAG->getEntryNode(),
|
||||
PoolAddr,
|
||||
|
@ -327,7 +326,8 @@ SDNode *AArch64DAGToDAGISel::LowerToFPLitPool(SDNode *Node) {
|
|||
const ConstantFP *FV = cast<ConstantFPSDNode>(Node)->getConstantFPValue();
|
||||
EVT DestType = Node->getValueType(0);
|
||||
|
||||
unsigned Alignment = TLI->getDataLayout()->getABITypeAlignment(FV->getType());
|
||||
unsigned Alignment =
|
||||
getTargetLowering()->getDataLayout()->getABITypeAlignment(FV->getType());
|
||||
SDValue PoolAddr = getConstantPoolItemAddress(DL, FV);
|
||||
|
||||
return CurDAG->getLoad(DestType, DL, CurDAG->getEntryNode(), PoolAddr,
|
||||
|
@ -473,7 +473,7 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
|
|||
AArch64::ATOMIC_CMP_SWAP_I64);
|
||||
case ISD::FrameIndex: {
|
||||
int FI = cast<FrameIndexSDNode>(Node)->getIndex();
|
||||
EVT PtrTy = TLI->getPointerTy();
|
||||
EVT PtrTy = getTargetLowering()->getPointerTy();
|
||||
SDValue TFI = CurDAG->getTargetFrameIndex(FI, PtrTy);
|
||||
return CurDAG->SelectNodeTo(Node, AArch64::ADDxxi_lsl0_s, PtrTy,
|
||||
TFI, CurDAG->getTargetConstant(0, PtrTy));
|
||||
|
|
|
@ -39,12 +39,8 @@ static TargetLoweringObjectFile *createTLOF(AArch64TargetMachine &TM) {
|
|||
llvm_unreachable("unknown subtarget type");
|
||||
}
|
||||
|
||||
|
||||
AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM)
|
||||
: TargetLowering(TM, createTLOF(TM)),
|
||||
Subtarget(&TM.getSubtarget<AArch64Subtarget>()),
|
||||
RegInfo(TM.getRegisterInfo()),
|
||||
Itins(TM.getInstrItineraryData()) {
|
||||
: TargetLowering(TM, createTLOF(TM)), Itins(TM.getInstrItineraryData()) {
|
||||
|
||||
// SIMD compares set the entire lane's bits to 1
|
||||
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
|
||||
|
@ -1928,7 +1924,7 @@ AArch64TargetLowering::LowerGlobalAddressELFSmall(SDValue Op,
|
|||
}
|
||||
|
||||
unsigned char HiFixup, LoFixup;
|
||||
bool UseGOT = Subtarget->GVIsIndirectSymbol(GV, RelocM);
|
||||
bool UseGOT = getSubtarget()->GVIsIndirectSymbol(GV, RelocM);
|
||||
|
||||
if (UseGOT) {
|
||||
HiFixup = AArch64II::MO_GOT;
|
||||
|
@ -2024,7 +2020,7 @@ SDValue AArch64TargetLowering::LowerTLSDescCall(SDValue SymAddr,
|
|||
SDValue
|
||||
AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
|
||||
SelectionDAG &DAG) const {
|
||||
assert(Subtarget->isTargetELF() &&
|
||||
assert(getSubtarget()->isTargetELF() &&
|
||||
"TLS not implemented for non-ELF targets");
|
||||
assert(getTargetMachine().getCodeModel() == CodeModel::Small
|
||||
&& "TLS only supported in small memory model");
|
||||
|
@ -2799,7 +2795,7 @@ AArch64TargetLowering::PerformDAGCombine(SDNode *N,
|
|||
switch (N->getOpcode()) {
|
||||
default: break;
|
||||
case ISD::AND: return PerformANDCombine(N, DCI);
|
||||
case ISD::OR: return PerformORCombine(N, DCI, Subtarget);
|
||||
case ISD::OR: return PerformORCombine(N, DCI, getSubtarget());
|
||||
case ISD::SRA: return PerformSRACombine(N, DCI);
|
||||
}
|
||||
return SDValue();
|
||||
|
|
|
@ -247,9 +247,11 @@ public:
|
|||
std::pair<unsigned, const TargetRegisterClass*>
|
||||
getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const;
|
||||
private:
|
||||
const AArch64Subtarget *Subtarget;
|
||||
const TargetRegisterInfo *RegInfo;
|
||||
const InstrItineraryData *Itins;
|
||||
|
||||
const AArch64Subtarget *getSubtarget() const {
|
||||
return &getTargetMachine().getSubtarget<AArch64Subtarget>();
|
||||
}
|
||||
};
|
||||
} // namespace llvm
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ using namespace llvm;
|
|||
|
||||
AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
|
||||
: AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP),
|
||||
RI(*this, STI), Subtarget(STI) {}
|
||||
Subtarget(STI) {}
|
||||
|
||||
void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I, DebugLoc DL,
|
||||
|
|
|
@ -29,9 +29,8 @@
|
|||
|
||||
using namespace llvm;
|
||||
|
||||
AArch64RegisterInfo::AArch64RegisterInfo(const AArch64InstrInfo &tii,
|
||||
const AArch64Subtarget &sti)
|
||||
: AArch64GenRegisterInfo(AArch64::X30), TII(tii) {
|
||||
AArch64RegisterInfo::AArch64RegisterInfo()
|
||||
: AArch64GenRegisterInfo(AArch64::X30) {
|
||||
}
|
||||
|
||||
const uint16_t *
|
||||
|
@ -122,6 +121,8 @@ AArch64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MBBI,
|
|||
return;
|
||||
}
|
||||
|
||||
const AArch64InstrInfo &TII =
|
||||
*static_cast<const AArch64InstrInfo*>(MF.getTarget().getInstrInfo());
|
||||
int MinOffset, MaxOffset, OffsetScale;
|
||||
if (MI.getOpcode() == AArch64::ADDxxi_lsl0_s) {
|
||||
MinOffset = 0;
|
||||
|
|
|
@ -25,12 +25,7 @@ class AArch64InstrInfo;
|
|||
class AArch64Subtarget;
|
||||
|
||||
struct AArch64RegisterInfo : public AArch64GenRegisterInfo {
|
||||
private:
|
||||
const AArch64InstrInfo &TII;
|
||||
|
||||
public:
|
||||
AArch64RegisterInfo(const AArch64InstrInfo &tii,
|
||||
const AArch64Subtarget &sti);
|
||||
AArch64RegisterInfo();
|
||||
|
||||
const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
|
||||
const uint32_t *getCallPreservedMask(CallingConv::ID) const;
|
||||
|
|
Loading…
Reference in New Issue