forked from OSchip/llvm-project
Remove TargetInstrInfo::canFoldMemoryOperand
canFoldMemoryOperand is not actually used anywhere in the codebase - all existing users instead call foldMemoryOperand directly when they wish to fold and can correctly deduce what they need from the return value. This patch removes the canFoldMemoryOperand base function and the target implementations; only x86 had a real (bit-rotted) implementation, although AMDGPU had a preparatory stub that had never needed to be completed. Differential Revision: http://reviews.llvm.org/D11331 llvm-svn: 242638
This commit is contained in:
parent
17b906058e
commit
ba51d116c4
llvm
include/llvm/Target
lib
CodeGen
Target
|
@ -819,10 +819,6 @@ protected:
|
|||
}
|
||||
|
||||
public:
|
||||
/// Returns true for the specified load / store if folding is possible.
|
||||
virtual bool canFoldMemoryOperand(const MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops) const;
|
||||
|
||||
/// unfoldMemoryOperand - Separate a single instruction which folded a load or
|
||||
/// a store or a load and a store into two or more instruction. If this is
|
||||
/// possible, returns true as well as the new instructions by reference.
|
||||
|
|
|
@ -384,11 +384,6 @@ void TargetInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
|
|||
llvm_unreachable("Not a MachO target");
|
||||
}
|
||||
|
||||
bool TargetInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops) const {
|
||||
return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]);
|
||||
}
|
||||
|
||||
static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops, int FrameIndex,
|
||||
const TargetInstrInfo &TII) {
|
||||
|
|
|
@ -164,11 +164,6 @@ MachineInstr *AMDGPUInstrInfo::foldMemoryOperandImpl(
|
|||
// TODO: Implement this function
|
||||
return nullptr;
|
||||
}
|
||||
bool AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops) const {
|
||||
// TODO: Implement this function
|
||||
return false;
|
||||
}
|
||||
bool
|
||||
AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
|
||||
unsigned Reg, bool UnfoldLoad,
|
||||
|
|
|
@ -103,8 +103,6 @@ public:
|
|||
/// read or write or -1 if indirect addressing is not used by this program.
|
||||
int getIndirectIndexEnd(const MachineFunction &MF) const;
|
||||
|
||||
bool canFoldMemoryOperand(const MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops) const override;
|
||||
bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
|
||||
unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
|
||||
SmallVectorImpl<MachineInstr *> &NewMIs) const override;
|
||||
|
|
|
@ -5469,62 +5469,6 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
|
|||
/*Size=*/0, Alignment, /*AllowCommute=*/true);
|
||||
}
|
||||
|
||||
bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops) const {
|
||||
// Check switch flag
|
||||
if (NoFusing) return 0;
|
||||
|
||||
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
|
||||
switch (MI->getOpcode()) {
|
||||
default: return false;
|
||||
case X86::TEST8rr:
|
||||
case X86::TEST16rr:
|
||||
case X86::TEST32rr:
|
||||
case X86::TEST64rr:
|
||||
return true;
|
||||
case X86::ADD32ri:
|
||||
// FIXME: AsmPrinter doesn't know how to handle
|
||||
// X86II::MO_GOT_ABSOLUTE_ADDRESS after folding.
|
||||
if (MI->getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS)
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (Ops.size() != 1)
|
||||
return false;
|
||||
|
||||
unsigned OpNum = Ops[0];
|
||||
unsigned Opc = MI->getOpcode();
|
||||
unsigned NumOps = MI->getDesc().getNumOperands();
|
||||
bool isTwoAddr = NumOps > 1 &&
|
||||
MI->getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1;
|
||||
|
||||
// Folding a memory location into the two-address part of a two-address
|
||||
// instruction is different than folding it other places. It requires
|
||||
// replacing the *two* registers with the memory location.
|
||||
const DenseMap<unsigned,
|
||||
std::pair<unsigned,unsigned> > *OpcodeTablePtr = nullptr;
|
||||
if (isTwoAddr && NumOps >= 2 && OpNum < 2) {
|
||||
OpcodeTablePtr = &RegOp2MemOpTable2Addr;
|
||||
} else if (OpNum == 0) {
|
||||
if (Opc == X86::MOV32r0)
|
||||
return true;
|
||||
|
||||
OpcodeTablePtr = &RegOp2MemOpTable0;
|
||||
} else if (OpNum == 1) {
|
||||
OpcodeTablePtr = &RegOp2MemOpTable1;
|
||||
} else if (OpNum == 2) {
|
||||
OpcodeTablePtr = &RegOp2MemOpTable2;
|
||||
} else if (OpNum == 3) {
|
||||
OpcodeTablePtr = &RegOp2MemOpTable3;
|
||||
}
|
||||
|
||||
if (OpcodeTablePtr && OpcodeTablePtr->count(Opc))
|
||||
return true;
|
||||
return TargetInstrInfo::canFoldMemoryOperand(MI, Ops);
|
||||
}
|
||||
|
||||
bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
|
||||
unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
|
||||
SmallVectorImpl<MachineInstr*> &NewMIs) const {
|
||||
|
|
|
@ -342,11 +342,6 @@ public:
|
|||
MachineBasicBlock::iterator InsertPt,
|
||||
MachineInstr *LoadMI) const override;
|
||||
|
||||
/// canFoldMemoryOperand - Returns true if the specified load / store is
|
||||
/// folding is possible.
|
||||
bool canFoldMemoryOperand(const MachineInstr *,
|
||||
ArrayRef<unsigned>) const override;
|
||||
|
||||
/// unfoldMemoryOperand - Separate a single instruction which folded a load or
|
||||
/// a store or a load and a store into two or more instruction. If this is
|
||||
/// possible, returns true as well as the new instructions by reference.
|
||||
|
|
Loading…
Reference in New Issue