forked from OSchip/llvm-project
replace MachineCombinerPattern namespace and enum with enum class; NFCI
Also, remove an enum hack where enum values were used as indexes into an array. We may want to make this a real class to allow pattern-based queries/customization (D13417). llvm-svn: 252196
This commit is contained in:
parent
2cd0d6d625
commit
387e66e79f
|
@ -17,35 +17,30 @@
|
|||
|
||||
namespace llvm {
|
||||
|
||||
/// Enumeration of instruction pattern supported by machine combiner
|
||||
///
|
||||
///
|
||||
namespace MachineCombinerPattern {
|
||||
// Forward declaration
|
||||
enum MC_PATTERN : int {
|
||||
/// These are instruction patterns matched by the machine combiner pass.
|
||||
enum class MachineCombinerPattern {
|
||||
// These are commutative variants for reassociating a computation chain. See
|
||||
// the comments before getMachineCombinerPatterns() in TargetInstrInfo.cpp.
|
||||
MC_REASSOC_AX_BY = 0,
|
||||
MC_REASSOC_AX_YB = 1,
|
||||
MC_REASSOC_XA_BY = 2,
|
||||
MC_REASSOC_XA_YB = 3,
|
||||
REASSOC_AX_BY,
|
||||
REASSOC_AX_YB,
|
||||
REASSOC_XA_BY,
|
||||
REASSOC_XA_YB,
|
||||
|
||||
/// Enumeration of instruction pattern supported by AArch64 machine combiner
|
||||
MC_NONE,
|
||||
MC_MULADDW_OP1,
|
||||
MC_MULADDW_OP2,
|
||||
MC_MULSUBW_OP1,
|
||||
MC_MULSUBW_OP2,
|
||||
MC_MULADDWI_OP1,
|
||||
MC_MULSUBWI_OP1,
|
||||
MC_MULADDX_OP1,
|
||||
MC_MULADDX_OP2,
|
||||
MC_MULSUBX_OP1,
|
||||
MC_MULSUBX_OP2,
|
||||
MC_MULADDXI_OP1,
|
||||
MC_MULSUBXI_OP1
|
||||
// These are multiply-add patterns matched by the AArch64 machine combiner.
|
||||
MULADDW_OP1,
|
||||
MULADDW_OP2,
|
||||
MULSUBW_OP1,
|
||||
MULSUBW_OP2,
|
||||
MULADDWI_OP1,
|
||||
MULSUBWI_OP1,
|
||||
MULADDX_OP1,
|
||||
MULADDX_OP2,
|
||||
MULSUBX_OP1,
|
||||
MULSUBX_OP2,
|
||||
MULADDXI_OP1,
|
||||
MULSUBXI_OP1
|
||||
};
|
||||
} // end namespace MachineCombinerPattern
|
||||
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
||||
|
|
|
@ -800,7 +800,7 @@ public:
|
|||
/// \param Patterns - Vector of possible combination patterns
|
||||
virtual bool getMachineCombinerPatterns(
|
||||
MachineInstr &Root,
|
||||
SmallVectorImpl<MachineCombinerPattern::MC_PATTERN> &Patterns) const;
|
||||
SmallVectorImpl<MachineCombinerPattern> &Patterns) const;
|
||||
|
||||
/// Return true if the input \P Inst is part of a chain of dependent ops
|
||||
/// that are suitable for reassociation, otherwise return false.
|
||||
|
@ -832,7 +832,7 @@ public:
|
|||
/// \param InstrIdxForVirtReg - map of virtual register to instruction in
|
||||
/// InsInstr that defines it
|
||||
virtual void genAlternativeCodeSequence(
|
||||
MachineInstr &Root, MachineCombinerPattern::MC_PATTERN Pattern,
|
||||
MachineInstr &Root, MachineCombinerPattern Pattern,
|
||||
SmallVectorImpl<MachineInstr *> &InsInstrs,
|
||||
SmallVectorImpl<MachineInstr *> &DelInstrs,
|
||||
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const;
|
||||
|
@ -840,7 +840,7 @@ public:
|
|||
/// Attempt to reassociate \P Root and \P Prev according to \P Pattern to
|
||||
/// reduce critical path length.
|
||||
void reassociateOps(MachineInstr &Root, MachineInstr &Prev,
|
||||
MachineCombinerPattern::MC_PATTERN Pattern,
|
||||
MachineCombinerPattern Pattern,
|
||||
SmallVectorImpl<MachineInstr *> &InsInstrs,
|
||||
SmallVectorImpl<MachineInstr *> &DelInstrs,
|
||||
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const;
|
||||
|
|
|
@ -336,7 +336,7 @@ bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) {
|
|||
auto &MI = *BlockIter++;
|
||||
|
||||
DEBUG(dbgs() << "INSTR "; MI.dump(); dbgs() << "\n";);
|
||||
SmallVector<MachineCombinerPattern::MC_PATTERN, 16> Patterns;
|
||||
SmallVector<MachineCombinerPattern, 16> Patterns;
|
||||
// The motivating example is:
|
||||
//
|
||||
// MUL Other MUL_op1 MUL_op2 Other
|
||||
|
|
|
@ -636,7 +636,7 @@ bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst,
|
|||
// that pattern.
|
||||
bool TargetInstrInfo::getMachineCombinerPatterns(
|
||||
MachineInstr &Root,
|
||||
SmallVectorImpl<MachineCombinerPattern::MC_PATTERN> &Patterns) const {
|
||||
SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
|
||||
|
||||
bool Commute;
|
||||
if (isReassociationCandidate(Root, Commute)) {
|
||||
|
@ -645,11 +645,11 @@ bool TargetInstrInfo::getMachineCombinerPatterns(
|
|||
// possibility for the Prev instruction in the sequence and let the
|
||||
// machine combiner decide if changing the operands is worthwhile.
|
||||
if (Commute) {
|
||||
Patterns.push_back(MachineCombinerPattern::MC_REASSOC_AX_YB);
|
||||
Patterns.push_back(MachineCombinerPattern::MC_REASSOC_XA_YB);
|
||||
Patterns.push_back(MachineCombinerPattern::REASSOC_AX_YB);
|
||||
Patterns.push_back(MachineCombinerPattern::REASSOC_XA_YB);
|
||||
} else {
|
||||
Patterns.push_back(MachineCombinerPattern::MC_REASSOC_AX_BY);
|
||||
Patterns.push_back(MachineCombinerPattern::MC_REASSOC_XA_BY);
|
||||
Patterns.push_back(MachineCombinerPattern::REASSOC_AX_BY);
|
||||
Patterns.push_back(MachineCombinerPattern::REASSOC_XA_BY);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -661,7 +661,7 @@ bool TargetInstrInfo::getMachineCombinerPatterns(
|
|||
/// See the above comments before getMachineCombinerPatterns().
|
||||
void TargetInstrInfo::reassociateOps(
|
||||
MachineInstr &Root, MachineInstr &Prev,
|
||||
MachineCombinerPattern::MC_PATTERN Pattern,
|
||||
MachineCombinerPattern Pattern,
|
||||
SmallVectorImpl<MachineInstr *> &InsInstrs,
|
||||
SmallVectorImpl<MachineInstr *> &DelInstrs,
|
||||
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
|
||||
|
@ -681,10 +681,19 @@ void TargetInstrInfo::reassociateOps(
|
|||
{ 2, 2, 1, 1 }
|
||||
};
|
||||
|
||||
MachineOperand &OpA = Prev.getOperand(OpIdx[Pattern][0]);
|
||||
MachineOperand &OpB = Root.getOperand(OpIdx[Pattern][1]);
|
||||
MachineOperand &OpX = Prev.getOperand(OpIdx[Pattern][2]);
|
||||
MachineOperand &OpY = Root.getOperand(OpIdx[Pattern][3]);
|
||||
int Row;
|
||||
switch (Pattern) {
|
||||
case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break;
|
||||
case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break;
|
||||
case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break;
|
||||
case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break;
|
||||
default: llvm_unreachable("unexpected MachineCombinerPattern");
|
||||
}
|
||||
|
||||
MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]);
|
||||
MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]);
|
||||
MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]);
|
||||
MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]);
|
||||
MachineOperand &OpC = Root.getOperand(0);
|
||||
|
||||
unsigned RegA = OpA.getReg();
|
||||
|
@ -735,7 +744,7 @@ void TargetInstrInfo::reassociateOps(
|
|||
}
|
||||
|
||||
void TargetInstrInfo::genAlternativeCodeSequence(
|
||||
MachineInstr &Root, MachineCombinerPattern::MC_PATTERN Pattern,
|
||||
MachineInstr &Root, MachineCombinerPattern Pattern,
|
||||
SmallVectorImpl<MachineInstr *> &InsInstrs,
|
||||
SmallVectorImpl<MachineInstr *> &DelInstrs,
|
||||
DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
|
||||
|
@ -744,12 +753,12 @@ void TargetInstrInfo::genAlternativeCodeSequence(
|
|||
// Select the previous instruction in the sequence based on the input pattern.
|
||||
MachineInstr *Prev = nullptr;
|
||||
switch (Pattern) {
|
||||
case MachineCombinerPattern::MC_REASSOC_AX_BY:
|
||||
case MachineCombinerPattern::MC_REASSOC_XA_BY:
|
||||
case MachineCombinerPattern::REASSOC_AX_BY:
|
||||
case MachineCombinerPattern::REASSOC_XA_BY:
|
||||
Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg());
|
||||
break;
|
||||
case MachineCombinerPattern::MC_REASSOC_AX_YB:
|
||||
case MachineCombinerPattern::MC_REASSOC_XA_YB:
|
||||
case MachineCombinerPattern::REASSOC_AX_YB:
|
||||
case MachineCombinerPattern::REASSOC_XA_YB:
|
||||
Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg());
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -2495,7 +2495,7 @@ static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO,
|
|||
|
||||
bool AArch64InstrInfo::getMachineCombinerPatterns(
|
||||
MachineInstr &Root,
|
||||
SmallVectorImpl<MachineCombinerPattern::MC_PATTERN> &Patterns) const {
|
||||
SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
|
||||
unsigned Opc = Root.getOpcode();
|
||||
MachineBasicBlock &MBB = *Root.getParent();
|
||||
bool Found = false;
|
||||
|
@ -2523,76 +2523,76 @@ bool AArch64InstrInfo::getMachineCombinerPatterns(
|
|||
"ADDWrr does not have register operands");
|
||||
if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
|
||||
AArch64::WZR)) {
|
||||
Patterns.push_back(MachineCombinerPattern::MC_MULADDW_OP1);
|
||||
Patterns.push_back(MachineCombinerPattern::MULADDW_OP1);
|
||||
Found = true;
|
||||
}
|
||||
if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
|
||||
AArch64::WZR)) {
|
||||
Patterns.push_back(MachineCombinerPattern::MC_MULADDW_OP2);
|
||||
Patterns.push_back(MachineCombinerPattern::MULADDW_OP2);
|
||||
Found = true;
|
||||
}
|
||||
break;
|
||||
case AArch64::ADDXrr:
|
||||
if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
|
||||
AArch64::XZR)) {
|
||||
Patterns.push_back(MachineCombinerPattern::MC_MULADDX_OP1);
|
||||
Patterns.push_back(MachineCombinerPattern::MULADDX_OP1);
|
||||
Found = true;
|
||||
}
|
||||
if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
|
||||
AArch64::XZR)) {
|
||||
Patterns.push_back(MachineCombinerPattern::MC_MULADDX_OP2);
|
||||
Patterns.push_back(MachineCombinerPattern::MULADDX_OP2);
|
||||
Found = true;
|
||||
}
|
||||
break;
|
||||
case AArch64::SUBWrr:
|
||||
if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
|
||||
AArch64::WZR)) {
|
||||
Patterns.push_back(MachineCombinerPattern::MC_MULSUBW_OP1);
|
||||
Patterns.push_back(MachineCombinerPattern::MULSUBW_OP1);
|
||||
Found = true;
|
||||
}
|
||||
if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
|
||||
AArch64::WZR)) {
|
||||
Patterns.push_back(MachineCombinerPattern::MC_MULSUBW_OP2);
|
||||
Patterns.push_back(MachineCombinerPattern::MULSUBW_OP2);
|
||||
Found = true;
|
||||
}
|
||||
break;
|
||||
case AArch64::SUBXrr:
|
||||
if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
|
||||
AArch64::XZR)) {
|
||||
Patterns.push_back(MachineCombinerPattern::MC_MULSUBX_OP1);
|
||||
Patterns.push_back(MachineCombinerPattern::MULSUBX_OP1);
|
||||
Found = true;
|
||||
}
|
||||
if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
|
||||
AArch64::XZR)) {
|
||||
Patterns.push_back(MachineCombinerPattern::MC_MULSUBX_OP2);
|
||||
Patterns.push_back(MachineCombinerPattern::MULSUBX_OP2);
|
||||
Found = true;
|
||||
}
|
||||
break;
|
||||
case AArch64::ADDWri:
|
||||
if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
|
||||
AArch64::WZR)) {
|
||||
Patterns.push_back(MachineCombinerPattern::MC_MULADDWI_OP1);
|
||||
Patterns.push_back(MachineCombinerPattern::MULADDWI_OP1);
|
||||
Found = true;
|
||||
}
|
||||
break;
|
||||
case AArch64::ADDXri:
|
||||
if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
|
||||
AArch64::XZR)) {
|
||||
Patterns.push_back(MachineCombinerPattern::MC_MULADDXI_OP1);
|
||||
Patterns.push_back(MachineCombinerPattern::MULADDXI_OP1);
|
||||
Found = true;
|
||||
}
|
||||
break;
|
||||
case AArch64::SUBWri:
|
||||
if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
|
||||
AArch64::WZR)) {
|
||||
Patterns.push_back(MachineCombinerPattern::MC_MULSUBWI_OP1);
|
||||
Patterns.push_back(MachineCombinerPattern::MULSUBWI_OP1);
|
||||
Found = true;
|
||||
}
|
||||
break;
|
||||
case AArch64::SUBXri:
|
||||
if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
|
||||
AArch64::XZR)) {
|
||||
Patterns.push_back(MachineCombinerPattern::MC_MULSUBXI_OP1);
|
||||
Patterns.push_back(MachineCombinerPattern::MULSUBXI_OP1);
|
||||
Found = true;
|
||||
}
|
||||
break;
|
||||
|
@ -2699,7 +2699,7 @@ static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
|
|||
/// this function generates the instructions that could replace the
|
||||
/// original code sequence
|
||||
void AArch64InstrInfo::genAlternativeCodeSequence(
|
||||
MachineInstr &Root, MachineCombinerPattern::MC_PATTERN Pattern,
|
||||
MachineInstr &Root, MachineCombinerPattern Pattern,
|
||||
SmallVectorImpl<MachineInstr *> &InsInstrs,
|
||||
SmallVectorImpl<MachineInstr *> &DelInstrs,
|
||||
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
|
||||
|
@ -2715,13 +2715,13 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
|
|||
default:
|
||||
// signal error.
|
||||
break;
|
||||
case MachineCombinerPattern::MC_MULADDW_OP1:
|
||||
case MachineCombinerPattern::MC_MULADDX_OP1:
|
||||
case MachineCombinerPattern::MULADDW_OP1:
|
||||
case MachineCombinerPattern::MULADDX_OP1:
|
||||
// MUL I=A,B,0
|
||||
// ADD R,I,C
|
||||
// ==> MADD R,A,B,C
|
||||
// --- Create(MADD);
|
||||
if (Pattern == MachineCombinerPattern::MC_MULADDW_OP1) {
|
||||
if (Pattern == MachineCombinerPattern::MULADDW_OP1) {
|
||||
Opc = AArch64::MADDWrrr;
|
||||
RC = &AArch64::GPR32RegClass;
|
||||
} else {
|
||||
|
@ -2730,13 +2730,13 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
|
|||
}
|
||||
MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
|
||||
break;
|
||||
case MachineCombinerPattern::MC_MULADDW_OP2:
|
||||
case MachineCombinerPattern::MC_MULADDX_OP2:
|
||||
case MachineCombinerPattern::MULADDW_OP2:
|
||||
case MachineCombinerPattern::MULADDX_OP2:
|
||||
// MUL I=A,B,0
|
||||
// ADD R,C,I
|
||||
// ==> MADD R,A,B,C
|
||||
// --- Create(MADD);
|
||||
if (Pattern == MachineCombinerPattern::MC_MULADDW_OP2) {
|
||||
if (Pattern == MachineCombinerPattern::MULADDW_OP2) {
|
||||
Opc = AArch64::MADDWrrr;
|
||||
RC = &AArch64::GPR32RegClass;
|
||||
} else {
|
||||
|
@ -2745,8 +2745,8 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
|
|||
}
|
||||
MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
|
||||
break;
|
||||
case MachineCombinerPattern::MC_MULADDWI_OP1:
|
||||
case MachineCombinerPattern::MC_MULADDXI_OP1: {
|
||||
case MachineCombinerPattern::MULADDWI_OP1:
|
||||
case MachineCombinerPattern::MULADDXI_OP1: {
|
||||
// MUL I=A,B,0
|
||||
// ADD R,I,Imm
|
||||
// ==> ORR V, ZR, Imm
|
||||
|
@ -2754,7 +2754,7 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
|
|||
// --- Create(MADD);
|
||||
const TargetRegisterClass *OrrRC;
|
||||
unsigned BitSize, OrrOpc, ZeroReg;
|
||||
if (Pattern == MachineCombinerPattern::MC_MULADDWI_OP1) {
|
||||
if (Pattern == MachineCombinerPattern::MULADDWI_OP1) {
|
||||
OrrOpc = AArch64::ORRWri;
|
||||
OrrRC = &AArch64::GPR32spRegClass;
|
||||
BitSize = 32;
|
||||
|
@ -2789,8 +2789,8 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
|
|||
}
|
||||
break;
|
||||
}
|
||||
case MachineCombinerPattern::MC_MULSUBW_OP1:
|
||||
case MachineCombinerPattern::MC_MULSUBX_OP1: {
|
||||
case MachineCombinerPattern::MULSUBW_OP1:
|
||||
case MachineCombinerPattern::MULSUBX_OP1: {
|
||||
// MUL I=A,B,0
|
||||
// SUB R,I, C
|
||||
// ==> SUB V, 0, C
|
||||
|
@ -2798,7 +2798,7 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
|
|||
// --- Create(MADD);
|
||||
const TargetRegisterClass *SubRC;
|
||||
unsigned SubOpc, ZeroReg;
|
||||
if (Pattern == MachineCombinerPattern::MC_MULSUBW_OP1) {
|
||||
if (Pattern == MachineCombinerPattern::MULSUBW_OP1) {
|
||||
SubOpc = AArch64::SUBWrr;
|
||||
SubRC = &AArch64::GPR32spRegClass;
|
||||
ZeroReg = AArch64::WZR;
|
||||
|
@ -2822,13 +2822,13 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
|
|||
MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
|
||||
break;
|
||||
}
|
||||
case MachineCombinerPattern::MC_MULSUBW_OP2:
|
||||
case MachineCombinerPattern::MC_MULSUBX_OP2:
|
||||
case MachineCombinerPattern::MULSUBW_OP2:
|
||||
case MachineCombinerPattern::MULSUBX_OP2:
|
||||
// MUL I=A,B,0
|
||||
// SUB R,C,I
|
||||
// ==> MSUB R,A,B,C (computes C - A*B)
|
||||
// --- Create(MSUB);
|
||||
if (Pattern == MachineCombinerPattern::MC_MULSUBW_OP2) {
|
||||
if (Pattern == MachineCombinerPattern::MULSUBW_OP2) {
|
||||
Opc = AArch64::MSUBWrrr;
|
||||
RC = &AArch64::GPR32RegClass;
|
||||
} else {
|
||||
|
@ -2837,8 +2837,8 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
|
|||
}
|
||||
MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
|
||||
break;
|
||||
case MachineCombinerPattern::MC_MULSUBWI_OP1:
|
||||
case MachineCombinerPattern::MC_MULSUBXI_OP1: {
|
||||
case MachineCombinerPattern::MULSUBWI_OP1:
|
||||
case MachineCombinerPattern::MULSUBXI_OP1: {
|
||||
// MUL I=A,B,0
|
||||
// SUB R,I, Imm
|
||||
// ==> ORR V, ZR, -Imm
|
||||
|
@ -2846,7 +2846,7 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
|
|||
// --- Create(MADD);
|
||||
const TargetRegisterClass *OrrRC;
|
||||
unsigned BitSize, OrrOpc, ZeroReg;
|
||||
if (Pattern == MachineCombinerPattern::MC_MULSUBWI_OP1) {
|
||||
if (Pattern == MachineCombinerPattern::MULSUBWI_OP1) {
|
||||
OrrOpc = AArch64::ORRWri;
|
||||
OrrRC = &AArch64::GPR32spRegClass;
|
||||
BitSize = 32;
|
||||
|
|
|
@ -167,13 +167,13 @@ public:
|
|||
/// for an instruction chain ending in <Root>. All potential patterns are
|
||||
/// listed in the <Patterns> array.
|
||||
bool getMachineCombinerPatterns(MachineInstr &Root,
|
||||
SmallVectorImpl<MachineCombinerPattern::MC_PATTERN> &Patterns)
|
||||
SmallVectorImpl<MachineCombinerPattern> &Patterns)
|
||||
const override;
|
||||
|
||||
/// When getMachineCombinerPatterns() finds patterns, this function generates
|
||||
/// the instructions that could replace the original code sequence
|
||||
void genAlternativeCodeSequence(
|
||||
MachineInstr &Root, MachineCombinerPattern::MC_PATTERN Pattern,
|
||||
MachineInstr &Root, MachineCombinerPattern Pattern,
|
||||
SmallVectorImpl<MachineInstr *> &InsInstrs,
|
||||
SmallVectorImpl<MachineInstr *> &DelInstrs,
|
||||
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override;
|
||||
|
|
|
@ -230,7 +230,7 @@ bool PPCInstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const {
|
|||
|
||||
bool PPCInstrInfo::getMachineCombinerPatterns(
|
||||
MachineInstr &Root,
|
||||
SmallVectorImpl<MachineCombinerPattern::MC_PATTERN> &Patterns) const {
|
||||
SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
|
||||
// Using the machine combiner in this way is potentially expensive, so
|
||||
// restrict to when aggressive optimizations are desired.
|
||||
if (Subtarget.getTargetMachine().getOptLevel() != CodeGenOpt::Aggressive)
|
||||
|
|
|
@ -145,7 +145,7 @@ public:
|
|||
/// output in the <Pattern> array.
|
||||
bool getMachineCombinerPatterns(
|
||||
MachineInstr &Root,
|
||||
SmallVectorImpl<MachineCombinerPattern::MC_PATTERN> &P) const override;
|
||||
SmallVectorImpl<MachineCombinerPattern> &P) const override;
|
||||
|
||||
bool isAssociativeAndCommutative(const MachineInstr &Inst) const override;
|
||||
|
||||
|
|
Loading…
Reference in New Issue