forked from OSchip/llvm-project
AMDGPU: Move subtarget specific code out of AMDGPUInstrInfo.cpp
Summary: Also delete all the stub functions that are identical to the implementations in TargetInstrInfo.cpp. Reviewers: arsenm Subscribers: arsenm, llvm-commits Differential Revision: http://reviews.llvm.org/D16609 llvm-svn: 259054
This commit is contained in:
parent
7a2e2bed67
commit
2ff726272a
|
@ -37,157 +37,6 @@ const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
|
|||
return RI;
|
||||
}
|
||||
|
||||
bool AMDGPUInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
|
||||
unsigned &SrcReg, unsigned &DstReg,
|
||||
unsigned &SubIdx) const {
|
||||
// TODO: Implement this function
|
||||
return false;
|
||||
}
|
||||
|
||||
unsigned AMDGPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
|
||||
int &FrameIndex) const {
|
||||
// TODO: Implement this function
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned AMDGPUInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
|
||||
int &FrameIndex) const {
|
||||
// TODO: Implement this function
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool AMDGPUInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
|
||||
const MachineMemOperand *&MMO,
|
||||
int &FrameIndex) const {
|
||||
// TODO: Implement this function
|
||||
return false;
|
||||
}
|
||||
unsigned AMDGPUInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
|
||||
int &FrameIndex) const {
|
||||
// TODO: Implement this function
|
||||
return 0;
|
||||
}
|
||||
unsigned AMDGPUInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
|
||||
int &FrameIndex) const {
|
||||
// TODO: Implement this function
|
||||
return 0;
|
||||
}
|
||||
bool AMDGPUInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
|
||||
const MachineMemOperand *&MMO,
|
||||
int &FrameIndex) const {
|
||||
// TODO: Implement this function
|
||||
return false;
|
||||
}
|
||||
|
||||
MachineInstr *
|
||||
AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
|
||||
MachineBasicBlock::iterator &MBBI,
|
||||
LiveVariables *LV) const {
|
||||
// TODO: Implement this function
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void
|
||||
AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI,
|
||||
unsigned SrcReg, bool isKill,
|
||||
int FrameIndex,
|
||||
const TargetRegisterClass *RC,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
llvm_unreachable("Not Implemented");
|
||||
}
|
||||
|
||||
void
|
||||
AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI,
|
||||
unsigned DestReg, int FrameIndex,
|
||||
const TargetRegisterClass *RC,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
llvm_unreachable("Not Implemented");
|
||||
}
|
||||
|
||||
bool AMDGPUInstrInfo::expandPostRAPseudo (MachineBasicBlock::iterator MI) const {
|
||||
MachineBasicBlock *MBB = MI->getParent();
|
||||
int OffsetOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
|
||||
AMDGPU::OpName::addr);
|
||||
// addr is a custom operand with multiple MI operands, and only the
|
||||
// first MI operand is given a name.
|
||||
int RegOpIdx = OffsetOpIdx + 1;
|
||||
int ChanOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
|
||||
AMDGPU::OpName::chan);
|
||||
if (isRegisterLoad(*MI)) {
|
||||
int DstOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
|
||||
AMDGPU::OpName::dst);
|
||||
unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
|
||||
unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
|
||||
unsigned Address = calculateIndirectAddress(RegIndex, Channel);
|
||||
unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
|
||||
if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
|
||||
buildMovInstr(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
|
||||
getIndirectAddrRegClass()->getRegister(Address));
|
||||
} else {
|
||||
buildIndirectRead(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
|
||||
Address, OffsetReg);
|
||||
}
|
||||
} else if (isRegisterStore(*MI)) {
|
||||
int ValOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
|
||||
AMDGPU::OpName::val);
|
||||
unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
|
||||
unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
|
||||
unsigned Address = calculateIndirectAddress(RegIndex, Channel);
|
||||
unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
|
||||
if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
|
||||
buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
|
||||
MI->getOperand(ValOpIdx).getReg());
|
||||
} else {
|
||||
buildIndirectWrite(MBB, MI, MI->getOperand(ValOpIdx).getReg(),
|
||||
calculateIndirectAddress(RegIndex, Channel),
|
||||
OffsetReg);
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
||||
MBB->erase(MI);
|
||||
return true;
|
||||
}
|
||||
|
||||
MachineInstr *AMDGPUInstrInfo::foldMemoryOperandImpl(
|
||||
MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
|
||||
MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
|
||||
// TODO: Implement this function
|
||||
return nullptr;
|
||||
}
|
||||
MachineInstr *AMDGPUInstrInfo::foldMemoryOperandImpl(
|
||||
MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
|
||||
MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const {
|
||||
// TODO: Implement this function
|
||||
return nullptr;
|
||||
}
|
||||
bool
|
||||
AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
|
||||
unsigned Reg, bool UnfoldLoad,
|
||||
bool UnfoldStore,
|
||||
SmallVectorImpl<MachineInstr*> &NewMIs) const {
|
||||
// TODO: Implement this function
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
AMDGPUInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
|
||||
SmallVectorImpl<SDNode*> &NewNodes) const {
|
||||
// TODO: Implement this function
|
||||
return false;
|
||||
}
|
||||
|
||||
unsigned
|
||||
AMDGPUInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
|
||||
bool UnfoldLoad, bool UnfoldStore,
|
||||
unsigned *LoadRegIndex) const {
|
||||
// TODO: Implement this function
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool AMDGPUInstrInfo::enableClusterLoads() const {
|
||||
return true;
|
||||
}
|
||||
|
@ -214,53 +63,6 @@ bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1,
|
|||
return (NumLoads <= 16 && (Offset1 - Offset0) < 64);
|
||||
}
|
||||
|
||||
bool
|
||||
AMDGPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
|
||||
const {
|
||||
// TODO: Implement this function
|
||||
return true;
|
||||
}
|
||||
void AMDGPUInstrInfo::insertNoop(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI) const {
|
||||
// TODO: Implement this function
|
||||
}
|
||||
|
||||
bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const {
|
||||
// TODO: Implement this function
|
||||
return false;
|
||||
}
|
||||
|
||||
bool AMDGPUInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
|
||||
ArrayRef<MachineOperand> Pred2) const {
|
||||
// TODO: Implement this function
|
||||
return false;
|
||||
}
|
||||
|
||||
bool AMDGPUInstrInfo::DefinesPredicate(MachineInstr *MI,
|
||||
std::vector<MachineOperand> &Pred) const {
|
||||
// TODO: Implement this function
|
||||
return false;
|
||||
}
|
||||
|
||||
bool AMDGPUInstrInfo::isPredicable(MachineInstr *MI) const {
|
||||
// TODO: Implement this function
|
||||
return MI->getDesc().isPredicable();
|
||||
}
|
||||
|
||||
bool
|
||||
AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
|
||||
// TODO: Implement this function
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AMDGPUInstrInfo::isRegisterStore(const MachineInstr &MI) const {
|
||||
return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE;
|
||||
}
|
||||
|
||||
bool AMDGPUInstrInfo::isRegisterLoad(const MachineInstr &MI) const {
|
||||
return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD;
|
||||
}
|
||||
|
||||
int AMDGPUInstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
|
||||
const MachineRegisterInfo &MRI = MF.getRegInfo();
|
||||
const MachineFrameInfo *MFI = MF.getFrameInfo();
|
||||
|
@ -364,14 +166,3 @@ int AMDGPUInstrInfo::pseudoToMCOpcode(int Opcode) const {
|
|||
|
||||
return MCOp;
|
||||
}
|
||||
|
||||
ArrayRef<std::pair<int, const char *>>
|
||||
AMDGPUInstrInfo::getSerializableTargetIndices() const {
|
||||
static const std::pair<int, const char *> TargetIndices[] = {
|
||||
{AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"},
|
||||
{AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"},
|
||||
{AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"},
|
||||
{AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"},
|
||||
{AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}};
|
||||
return makeArrayRef(TargetIndices);
|
||||
}
|
||||
|
|
|
@ -48,52 +48,6 @@ public:
|
|||
|
||||
virtual const AMDGPURegisterInfo &getRegisterInfo() const = 0;
|
||||
|
||||
bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
|
||||
unsigned &DstReg, unsigned &SubIdx) const override;
|
||||
|
||||
unsigned isLoadFromStackSlot(const MachineInstr *MI,
|
||||
int &FrameIndex) const override;
|
||||
unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
|
||||
int &FrameIndex) const override;
|
||||
bool hasLoadFromStackSlot(const MachineInstr *MI,
|
||||
const MachineMemOperand *&MMO,
|
||||
int &FrameIndex) const override;
|
||||
unsigned isStoreFromStackSlot(const MachineInstr *MI, int &FrameIndex) const;
|
||||
unsigned isStoreFromStackSlotPostFE(const MachineInstr *MI,
|
||||
int &FrameIndex) const;
|
||||
bool hasStoreFromStackSlot(const MachineInstr *MI,
|
||||
const MachineMemOperand *&MMO,
|
||||
int &FrameIndex) const;
|
||||
|
||||
MachineInstr *
|
||||
convertToThreeAddress(MachineFunction::iterator &MFI,
|
||||
MachineBasicBlock::iterator &MBBI,
|
||||
LiveVariables *LV) const override;
|
||||
|
||||
|
||||
bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
|
||||
|
||||
void storeRegToStackSlot(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI,
|
||||
unsigned SrcReg, bool isKill, int FrameIndex,
|
||||
const TargetRegisterClass *RC,
|
||||
const TargetRegisterInfo *TRI) const override;
|
||||
void loadRegFromStackSlot(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI,
|
||||
unsigned DestReg, int FrameIndex,
|
||||
const TargetRegisterClass *RC,
|
||||
const TargetRegisterInfo *TRI) const override;
|
||||
|
||||
protected:
|
||||
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops,
|
||||
MachineBasicBlock::iterator InsertPt,
|
||||
int FrameIndex) const override;
|
||||
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops,
|
||||
MachineBasicBlock::iterator InsertPt,
|
||||
MachineInstr *LoadMI) const override;
|
||||
|
||||
public:
|
||||
/// \returns the smallest register index that will be accessed by an indirect
|
||||
/// read or write or -1 if indirect addressing is not used by this program.
|
||||
|
@ -103,57 +57,22 @@ public:
|
|||
/// read or write or -1 if indirect addressing is not used by this program.
|
||||
int getIndirectIndexEnd(const MachineFunction &MF) const;
|
||||
|
||||
bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
|
||||
unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
|
||||
SmallVectorImpl<MachineInstr *> &NewMIs) const override;
|
||||
bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
|
||||
SmallVectorImpl<SDNode *> &NewNodes) const override;
|
||||
unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
|
||||
bool UnfoldLoad, bool UnfoldStore,
|
||||
unsigned *LoadRegIndex = nullptr) const override;
|
||||
|
||||
bool enableClusterLoads() const override;
|
||||
|
||||
bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
|
||||
int64_t Offset1, int64_t Offset2,
|
||||
unsigned NumLoads) const override;
|
||||
|
||||
bool
|
||||
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
|
||||
void insertNoop(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI) const override;
|
||||
bool isPredicated(const MachineInstr *MI) const override;
|
||||
bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
|
||||
ArrayRef<MachineOperand> Pred2) const override;
|
||||
bool DefinesPredicate(MachineInstr *MI,
|
||||
std::vector<MachineOperand> &Pred) const override;
|
||||
bool isPredicable(MachineInstr *MI) const override;
|
||||
bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const override;
|
||||
|
||||
// Helper functions that check the opcode for status information
|
||||
bool isRegisterStore(const MachineInstr &MI) const;
|
||||
bool isRegisterLoad(const MachineInstr &MI) const;
|
||||
|
||||
/// \brief Return a target-specific opcode if Opcode is a pseudo instruction.
|
||||
/// Return -1 if the target-specific opcode for the pseudo instruction does
|
||||
/// not exist. If Opcode is not a pseudo instruction, this is identity.
|
||||
int pseudoToMCOpcode(int Opcode) const;
|
||||
|
||||
/// \brief Return the descriptor of the target-specific machine instruction
|
||||
/// that corresponds to the specified pseudo or native opcode.
|
||||
const MCInstrDesc &getMCOpcodeFromPseudo(unsigned Opcode) const {
|
||||
return get(pseudoToMCOpcode(Opcode));
|
||||
}
|
||||
|
||||
ArrayRef<std::pair<int, const char *>>
|
||||
getSerializableTargetIndices() const override;
|
||||
|
||||
//===---------------------------------------------------------------------===//
|
||||
// Pure virtual funtions to be implemented by sub-classes.
|
||||
//===---------------------------------------------------------------------===//
|
||||
|
||||
virtual bool isMov(unsigned opcode) const = 0;
|
||||
|
||||
/// \brief Calculate the "Indirect Address" for the given \p RegIndex and
|
||||
/// \p Channel
|
||||
///
|
||||
|
@ -184,11 +103,6 @@ public:
|
|||
unsigned ValueReg, unsigned Address,
|
||||
unsigned OffsetReg) const = 0;
|
||||
|
||||
/// \brief Build a MOV instruction.
|
||||
virtual MachineInstr *buildMovInstr(MachineBasicBlock *MBB,
|
||||
MachineBasicBlock::iterator I,
|
||||
unsigned DstReg, unsigned SrcReg) const = 0;
|
||||
|
||||
/// \brief Given a MIMG \p Opcode that writes all 4 channels, return the
|
||||
/// equivalent opcode that writes \p Channels Channels.
|
||||
int getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const;
|
||||
|
|
|
@ -1050,7 +1050,51 @@ unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
|
|||
bool R600InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
|
||||
|
||||
switch(MI->getOpcode()) {
|
||||
default: return AMDGPUInstrInfo::expandPostRAPseudo(MI);
|
||||
default: {
|
||||
MachineBasicBlock *MBB = MI->getParent();
|
||||
int OffsetOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
|
||||
AMDGPU::OpName::addr);
|
||||
// addr is a custom operand with multiple MI operands, and only the
|
||||
// first MI operand is given a name.
|
||||
int RegOpIdx = OffsetOpIdx + 1;
|
||||
int ChanOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
|
||||
AMDGPU::OpName::chan);
|
||||
if (isRegisterLoad(*MI)) {
|
||||
int DstOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
|
||||
AMDGPU::OpName::dst);
|
||||
unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
|
||||
unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
|
||||
unsigned Address = calculateIndirectAddress(RegIndex, Channel);
|
||||
unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
|
||||
if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
|
||||
buildMovInstr(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
|
||||
getIndirectAddrRegClass()->getRegister(Address));
|
||||
} else {
|
||||
buildIndirectRead(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
|
||||
Address, OffsetReg);
|
||||
}
|
||||
} else if (isRegisterStore(*MI)) {
|
||||
int ValOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
|
||||
AMDGPU::OpName::val);
|
||||
unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
|
||||
unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
|
||||
unsigned Address = calculateIndirectAddress(RegIndex, Channel);
|
||||
unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
|
||||
if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
|
||||
buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
|
||||
MI->getOperand(ValOpIdx).getReg());
|
||||
} else {
|
||||
buildIndirectWrite(MBB, MI, MI->getOperand(ValOpIdx).getReg(),
|
||||
calculateIndirectAddress(RegIndex, Channel),
|
||||
OffsetReg);
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
||||
MBB->erase(MI);
|
||||
return true;
|
||||
}
|
||||
case AMDGPU::R600_EXTRACT_ELT_V2:
|
||||
case AMDGPU::R600_EXTRACT_ELT_V4:
|
||||
buildIndirectRead(MI->getParent(), MI, MI->getOperand(0).getReg(),
|
||||
|
@ -1428,3 +1472,11 @@ void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand,
|
|||
FlagOp.setImm(InstFlags);
|
||||
}
|
||||
}
|
||||
|
||||
bool R600InstrInfo::isRegisterStore(const MachineInstr &MI) const {
|
||||
return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE;
|
||||
}
|
||||
|
||||
bool R600InstrInfo::isRegisterLoad(const MachineInstr &MI) const {
|
||||
return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD;
|
||||
}
|
||||
|
|
|
@ -152,7 +152,7 @@ namespace llvm {
|
|||
/// instruction slots within an instruction group.
|
||||
bool isVector(const MachineInstr &MI) const;
|
||||
|
||||
bool isMov(unsigned Opcode) const override;
|
||||
bool isMov(unsigned Opcode) const;
|
||||
|
||||
DFAPacketizer *
|
||||
CreateTargetScheduleState(const TargetSubtargetInfo &) const override;
|
||||
|
@ -257,7 +257,7 @@ namespace llvm {
|
|||
|
||||
MachineInstr *buildMovInstr(MachineBasicBlock *MBB,
|
||||
MachineBasicBlock::iterator I,
|
||||
unsigned DstReg, unsigned SrcReg) const override;
|
||||
unsigned DstReg, unsigned SrcReg) const;
|
||||
|
||||
/// \brief Get the index of Op in the MachineInstr.
|
||||
///
|
||||
|
@ -290,6 +290,11 @@ namespace llvm {
|
|||
|
||||
/// \brief Clear the specified flag on the instruction.
|
||||
void clearFlag(MachineInstr *MI, unsigned Operand, unsigned Flag) const;
|
||||
|
||||
// Helper functions that check the opcode for status information
|
||||
bool isRegisterStore(const MachineInstr &MI) const;
|
||||
bool isRegisterLoad(const MachineInstr &MI) const;
|
||||
|
||||
};
|
||||
|
||||
namespace AMDGPU {
|
||||
|
|
|
@ -1011,25 +1011,6 @@ bool SIInstrInfo::findCommutedOpIndices(MachineInstr *MI,
|
|||
return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx);
|
||||
}
|
||||
|
||||
MachineInstr *SIInstrInfo::buildMovInstr(MachineBasicBlock *MBB,
|
||||
MachineBasicBlock::iterator I,
|
||||
unsigned DstReg,
|
||||
unsigned SrcReg) const {
|
||||
return BuildMI(*MBB, I, MBB->findDebugLoc(I), get(AMDGPU::V_MOV_B32_e32),
|
||||
DstReg) .addReg(SrcReg);
|
||||
}
|
||||
|
||||
bool SIInstrInfo::isMov(unsigned Opcode) const {
|
||||
switch(Opcode) {
|
||||
default: return false;
|
||||
case AMDGPU::S_MOV_B32:
|
||||
case AMDGPU::S_MOV_B64:
|
||||
case AMDGPU::V_MOV_B32_e32:
|
||||
case AMDGPU::V_MOV_B32_e64:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
static void removeModOperands(MachineInstr &MI) {
|
||||
unsigned Opc = MI.getOpcode();
|
||||
int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc,
|
||||
|
@ -3091,3 +3072,14 @@ bool SIInstrInfo::isHighLatencyInstruction(const MachineInstr *MI) const {
|
|||
|
||||
return isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc);
|
||||
}
|
||||
|
||||
ArrayRef<std::pair<int, const char *>>
|
||||
SIInstrInfo::getSerializableTargetIndices() const {
|
||||
static const std::pair<int, const char *> TargetIndices[] = {
|
||||
{AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"},
|
||||
{AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"},
|
||||
{AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"},
|
||||
{AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"},
|
||||
{AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}};
|
||||
return makeArrayRef(TargetIndices);
|
||||
}
|
||||
|
|
|
@ -137,11 +137,6 @@ public:
|
|||
MachineInstr *MIa, MachineInstr *MIb,
|
||||
AliasAnalysis *AA = nullptr) const override;
|
||||
|
||||
MachineInstr *buildMovInstr(MachineBasicBlock *MBB,
|
||||
MachineBasicBlock::iterator I,
|
||||
unsigned DstReg, unsigned SrcReg) const override;
|
||||
bool isMov(unsigned Opcode) const override;
|
||||
|
||||
bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
|
||||
unsigned Reg, MachineRegisterInfo *MRI) const final;
|
||||
|
||||
|
@ -465,6 +460,16 @@ public:
|
|||
|
||||
bool isLowLatencyInstruction(const MachineInstr *MI) const;
|
||||
bool isHighLatencyInstruction(const MachineInstr *MI) const;
|
||||
|
||||
/// \brief Return the descriptor of the target-specific machine instruction
|
||||
/// that corresponds to the specified pseudo or native opcode.
|
||||
const MCInstrDesc &getMCOpcodeFromPseudo(unsigned Opcode) const {
|
||||
return get(pseudoToMCOpcode(Opcode));
|
||||
}
|
||||
|
||||
ArrayRef<std::pair<int, const char *>>
|
||||
getSerializableTargetIndices() const override;
|
||||
|
||||
};
|
||||
|
||||
namespace AMDGPU {
|
||||
|
|
Loading…
Reference in New Issue