From 387e66e79fc368760418f7394a6e93eeeb3f0ec8 Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Thu, 5 Nov 2015 19:34:57 +0000 Subject: [PATCH] replace MachineCombinerPattern namespace and enum with enum class; NFCI Also, remove an enum hack where enum values were used as indexes into an array. We may want to make this a real class to allow pattern-based queries/customization (D13417). llvm-svn: 252196 --- .../llvm/CodeGen/MachineCombinerPattern.h | 45 ++++++------- llvm/include/llvm/Target/TargetInstrInfo.h | 6 +- llvm/lib/CodeGen/MachineCombiner.cpp | 2 +- llvm/lib/CodeGen/TargetInstrInfo.cpp | 39 ++++++----- llvm/lib/Target/AArch64/AArch64InstrInfo.cpp | 64 +++++++++---------- llvm/lib/Target/AArch64/AArch64InstrInfo.h | 4 +- llvm/lib/Target/PowerPC/PPCInstrInfo.cpp | 2 +- llvm/lib/Target/PowerPC/PPCInstrInfo.h | 2 +- 8 files changed, 84 insertions(+), 80 deletions(-) diff --git a/llvm/include/llvm/CodeGen/MachineCombinerPattern.h b/llvm/include/llvm/CodeGen/MachineCombinerPattern.h index d8462888ec77..f3891227746f 100644 --- a/llvm/include/llvm/CodeGen/MachineCombinerPattern.h +++ b/llvm/include/llvm/CodeGen/MachineCombinerPattern.h @@ -17,35 +17,30 @@ namespace llvm { -/// Enumeration of instruction pattern supported by machine combiner -/// -/// -namespace MachineCombinerPattern { -// Forward declaration -enum MC_PATTERN : int { +/// These are instruction patterns matched by the machine combiner pass. +enum class MachineCombinerPattern { // These are commutative variants for reassociating a computation chain. See // the comments before getMachineCombinerPatterns() in TargetInstrInfo.cpp. - MC_REASSOC_AX_BY = 0, - MC_REASSOC_AX_YB = 1, - MC_REASSOC_XA_BY = 2, - MC_REASSOC_XA_YB = 3, + REASSOC_AX_BY, + REASSOC_AX_YB, + REASSOC_XA_BY, + REASSOC_XA_YB, - /// Enumeration of instruction pattern supported by AArch64 machine combiner - MC_NONE, - MC_MULADDW_OP1, - MC_MULADDW_OP2, - MC_MULSUBW_OP1, - MC_MULSUBW_OP2, - MC_MULADDWI_OP1, - MC_MULSUBWI_OP1, - MC_MULADDX_OP1, - MC_MULADDX_OP2, - MC_MULSUBX_OP1, - MC_MULSUBX_OP2, - MC_MULADDXI_OP1, - MC_MULSUBXI_OP1 + // These are multiply-add patterns matched by the AArch64 machine combiner. + MULADDW_OP1, + MULADDW_OP2, + MULSUBW_OP1, + MULSUBW_OP2, + MULADDWI_OP1, + MULSUBWI_OP1, + MULADDX_OP1, + MULADDX_OP2, + MULSUBX_OP1, + MULSUBX_OP2, + MULADDXI_OP1, + MULSUBXI_OP1 }; -} // end namespace MachineCombinerPattern + } // end namespace llvm #endif diff --git a/llvm/include/llvm/Target/TargetInstrInfo.h b/llvm/include/llvm/Target/TargetInstrInfo.h index 0a35d4178d9c..0cebcf1c6b5d 100644 --- a/llvm/include/llvm/Target/TargetInstrInfo.h +++ b/llvm/include/llvm/Target/TargetInstrInfo.h @@ -800,7 +800,7 @@ public: /// \param Patterns - Vector of possible combination patterns virtual bool getMachineCombinerPatterns( MachineInstr &Root, - SmallVectorImpl &Patterns) const; + SmallVectorImpl &Patterns) const; /// Return true if the input \P Inst is part of a chain of dependent ops /// that are suitable for reassociation, otherwise return false. @@ -832,7 +832,7 @@ public: /// \param InstrIdxForVirtReg - map of virtual register to instruction in /// InsInstr that defines it virtual void genAlternativeCodeSequence( - MachineInstr &Root, MachineCombinerPattern::MC_PATTERN Pattern, + MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl &InsInstrs, SmallVectorImpl &DelInstrs, DenseMap &InstrIdxForVirtReg) const; @@ -840,7 +840,7 @@ public: /// Attempt to reassociate \P Root and \P Prev according to \P Pattern to /// reduce critical path length. void reassociateOps(MachineInstr &Root, MachineInstr &Prev, - MachineCombinerPattern::MC_PATTERN Pattern, + MachineCombinerPattern Pattern, SmallVectorImpl &InsInstrs, SmallVectorImpl &DelInstrs, DenseMap &InstrIdxForVirtReg) const; diff --git a/llvm/lib/CodeGen/MachineCombiner.cpp b/llvm/lib/CodeGen/MachineCombiner.cpp index bfa5a6afc5f4..11b6a79a8b1d 100644 --- a/llvm/lib/CodeGen/MachineCombiner.cpp +++ b/llvm/lib/CodeGen/MachineCombiner.cpp @@ -336,7 +336,7 @@ bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) { auto &MI = *BlockIter++; DEBUG(dbgs() << "INSTR "; MI.dump(); dbgs() << "\n";); - SmallVector Patterns; + SmallVector Patterns; // The motivating example is: // // MUL Other MUL_op1 MUL_op2 Other diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp index 44d3b61c9f16..6eaf991ac700 100644 --- a/llvm/lib/CodeGen/TargetInstrInfo.cpp +++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp @@ -636,7 +636,7 @@ bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst, // that pattern. bool TargetInstrInfo::getMachineCombinerPatterns( MachineInstr &Root, - SmallVectorImpl &Patterns) const { + SmallVectorImpl &Patterns) const { bool Commute; if (isReassociationCandidate(Root, Commute)) { @@ -645,11 +645,11 @@ bool TargetInstrInfo::getMachineCombinerPatterns( // possibility for the Prev instruction in the sequence and let the // machine combiner decide if changing the operands is worthwhile. if (Commute) { - Patterns.push_back(MachineCombinerPattern::MC_REASSOC_AX_YB); - Patterns.push_back(MachineCombinerPattern::MC_REASSOC_XA_YB); + Patterns.push_back(MachineCombinerPattern::REASSOC_AX_YB); + Patterns.push_back(MachineCombinerPattern::REASSOC_XA_YB); } else { - Patterns.push_back(MachineCombinerPattern::MC_REASSOC_AX_BY); - Patterns.push_back(MachineCombinerPattern::MC_REASSOC_XA_BY); + Patterns.push_back(MachineCombinerPattern::REASSOC_AX_BY); + Patterns.push_back(MachineCombinerPattern::REASSOC_XA_BY); } return true; } @@ -661,7 +661,7 @@ bool TargetInstrInfo::getMachineCombinerPatterns( /// See the above comments before getMachineCombinerPatterns(). void TargetInstrInfo::reassociateOps( MachineInstr &Root, MachineInstr &Prev, - MachineCombinerPattern::MC_PATTERN Pattern, + MachineCombinerPattern Pattern, SmallVectorImpl &InsInstrs, SmallVectorImpl &DelInstrs, DenseMap &InstrIdxForVirtReg) const { @@ -681,10 +681,19 @@ void TargetInstrInfo::reassociateOps( { 2, 2, 1, 1 } }; - MachineOperand &OpA = Prev.getOperand(OpIdx[Pattern][0]); - MachineOperand &OpB = Root.getOperand(OpIdx[Pattern][1]); - MachineOperand &OpX = Prev.getOperand(OpIdx[Pattern][2]); - MachineOperand &OpY = Root.getOperand(OpIdx[Pattern][3]); + int Row; + switch (Pattern) { + case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break; + case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break; + case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break; + case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break; + default: llvm_unreachable("unexpected MachineCombinerPattern"); + } + + MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]); + MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]); + MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]); + MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]); MachineOperand &OpC = Root.getOperand(0); unsigned RegA = OpA.getReg(); @@ -735,7 +744,7 @@ void TargetInstrInfo::reassociateOps( } void TargetInstrInfo::genAlternativeCodeSequence( - MachineInstr &Root, MachineCombinerPattern::MC_PATTERN Pattern, + MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl &InsInstrs, SmallVectorImpl &DelInstrs, DenseMap &InstIdxForVirtReg) const { @@ -744,12 +753,12 @@ void TargetInstrInfo::genAlternativeCodeSequence( // Select the previous instruction in the sequence based on the input pattern. MachineInstr *Prev = nullptr; switch (Pattern) { - case MachineCombinerPattern::MC_REASSOC_AX_BY: - case MachineCombinerPattern::MC_REASSOC_XA_BY: + case MachineCombinerPattern::REASSOC_AX_BY: + case MachineCombinerPattern::REASSOC_XA_BY: Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg()); break; - case MachineCombinerPattern::MC_REASSOC_AX_YB: - case MachineCombinerPattern::MC_REASSOC_XA_YB: + case MachineCombinerPattern::REASSOC_AX_YB: + case MachineCombinerPattern::REASSOC_XA_YB: Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg()); break; default: diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index fdae8d18ada3..3ef3c8b840cb 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -2495,7 +2495,7 @@ static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO, bool AArch64InstrInfo::getMachineCombinerPatterns( MachineInstr &Root, - SmallVectorImpl &Patterns) const { + SmallVectorImpl &Patterns) const { unsigned Opc = Root.getOpcode(); MachineBasicBlock &MBB = *Root.getParent(); bool Found = false; @@ -2523,76 +2523,76 @@ bool AArch64InstrInfo::getMachineCombinerPatterns( "ADDWrr does not have register operands"); if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr, AArch64::WZR)) { - Patterns.push_back(MachineCombinerPattern::MC_MULADDW_OP1); + Patterns.push_back(MachineCombinerPattern::MULADDW_OP1); Found = true; } if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr, AArch64::WZR)) { - Patterns.push_back(MachineCombinerPattern::MC_MULADDW_OP2); + Patterns.push_back(MachineCombinerPattern::MULADDW_OP2); Found = true; } break; case AArch64::ADDXrr: if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr, AArch64::XZR)) { - Patterns.push_back(MachineCombinerPattern::MC_MULADDX_OP1); + Patterns.push_back(MachineCombinerPattern::MULADDX_OP1); Found = true; } if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr, AArch64::XZR)) { - Patterns.push_back(MachineCombinerPattern::MC_MULADDX_OP2); + Patterns.push_back(MachineCombinerPattern::MULADDX_OP2); Found = true; } break; case AArch64::SUBWrr: if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr, AArch64::WZR)) { - Patterns.push_back(MachineCombinerPattern::MC_MULSUBW_OP1); + Patterns.push_back(MachineCombinerPattern::MULSUBW_OP1); Found = true; } if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr, AArch64::WZR)) { - Patterns.push_back(MachineCombinerPattern::MC_MULSUBW_OP2); + Patterns.push_back(MachineCombinerPattern::MULSUBW_OP2); Found = true; } break; case AArch64::SUBXrr: if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr, AArch64::XZR)) { - Patterns.push_back(MachineCombinerPattern::MC_MULSUBX_OP1); + Patterns.push_back(MachineCombinerPattern::MULSUBX_OP1); Found = true; } if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr, AArch64::XZR)) { - Patterns.push_back(MachineCombinerPattern::MC_MULSUBX_OP2); + Patterns.push_back(MachineCombinerPattern::MULSUBX_OP2); Found = true; } break; case AArch64::ADDWri: if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr, AArch64::WZR)) { - Patterns.push_back(MachineCombinerPattern::MC_MULADDWI_OP1); + Patterns.push_back(MachineCombinerPattern::MULADDWI_OP1); Found = true; } break; case AArch64::ADDXri: if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr, AArch64::XZR)) { - Patterns.push_back(MachineCombinerPattern::MC_MULADDXI_OP1); + Patterns.push_back(MachineCombinerPattern::MULADDXI_OP1); Found = true; } break; case AArch64::SUBWri: if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr, AArch64::WZR)) { - Patterns.push_back(MachineCombinerPattern::MC_MULSUBWI_OP1); + Patterns.push_back(MachineCombinerPattern::MULSUBWI_OP1); Found = true; } break; case AArch64::SUBXri: if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr, AArch64::XZR)) { - Patterns.push_back(MachineCombinerPattern::MC_MULSUBXI_OP1); + Patterns.push_back(MachineCombinerPattern::MULSUBXI_OP1); Found = true; } break; @@ -2699,7 +2699,7 @@ static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI, /// this function generates the instructions that could replace the /// original code sequence void AArch64InstrInfo::genAlternativeCodeSequence( - MachineInstr &Root, MachineCombinerPattern::MC_PATTERN Pattern, + MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl &InsInstrs, SmallVectorImpl &DelInstrs, DenseMap &InstrIdxForVirtReg) const { @@ -2715,13 +2715,13 @@ void AArch64InstrInfo::genAlternativeCodeSequence( default: // signal error. break; - case MachineCombinerPattern::MC_MULADDW_OP1: - case MachineCombinerPattern::MC_MULADDX_OP1: + case MachineCombinerPattern::MULADDW_OP1: + case MachineCombinerPattern::MULADDX_OP1: // MUL I=A,B,0 // ADD R,I,C // ==> MADD R,A,B,C // --- Create(MADD); - if (Pattern == MachineCombinerPattern::MC_MULADDW_OP1) { + if (Pattern == MachineCombinerPattern::MULADDW_OP1) { Opc = AArch64::MADDWrrr; RC = &AArch64::GPR32RegClass; } else { @@ -2730,13 +2730,13 @@ void AArch64InstrInfo::genAlternativeCodeSequence( } MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC); break; - case MachineCombinerPattern::MC_MULADDW_OP2: - case MachineCombinerPattern::MC_MULADDX_OP2: + case MachineCombinerPattern::MULADDW_OP2: + case MachineCombinerPattern::MULADDX_OP2: // MUL I=A,B,0 // ADD R,C,I // ==> MADD R,A,B,C // --- Create(MADD); - if (Pattern == MachineCombinerPattern::MC_MULADDW_OP2) { + if (Pattern == MachineCombinerPattern::MULADDW_OP2) { Opc = AArch64::MADDWrrr; RC = &AArch64::GPR32RegClass; } else { @@ -2745,8 +2745,8 @@ void AArch64InstrInfo::genAlternativeCodeSequence( } MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC); break; - case MachineCombinerPattern::MC_MULADDWI_OP1: - case MachineCombinerPattern::MC_MULADDXI_OP1: { + case MachineCombinerPattern::MULADDWI_OP1: + case MachineCombinerPattern::MULADDXI_OP1: { // MUL I=A,B,0 // ADD R,I,Imm // ==> ORR V, ZR, Imm @@ -2754,7 +2754,7 @@ void AArch64InstrInfo::genAlternativeCodeSequence( // --- Create(MADD); const TargetRegisterClass *OrrRC; unsigned BitSize, OrrOpc, ZeroReg; - if (Pattern == MachineCombinerPattern::MC_MULADDWI_OP1) { + if (Pattern == MachineCombinerPattern::MULADDWI_OP1) { OrrOpc = AArch64::ORRWri; OrrRC = &AArch64::GPR32spRegClass; BitSize = 32; @@ -2789,8 +2789,8 @@ void AArch64InstrInfo::genAlternativeCodeSequence( } break; } - case MachineCombinerPattern::MC_MULSUBW_OP1: - case MachineCombinerPattern::MC_MULSUBX_OP1: { + case MachineCombinerPattern::MULSUBW_OP1: + case MachineCombinerPattern::MULSUBX_OP1: { // MUL I=A,B,0 // SUB R,I, C // ==> SUB V, 0, C @@ -2798,7 +2798,7 @@ void AArch64InstrInfo::genAlternativeCodeSequence( // --- Create(MADD); const TargetRegisterClass *SubRC; unsigned SubOpc, ZeroReg; - if (Pattern == MachineCombinerPattern::MC_MULSUBW_OP1) { + if (Pattern == MachineCombinerPattern::MULSUBW_OP1) { SubOpc = AArch64::SUBWrr; SubRC = &AArch64::GPR32spRegClass; ZeroReg = AArch64::WZR; @@ -2822,13 +2822,13 @@ void AArch64InstrInfo::genAlternativeCodeSequence( MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC); break; } - case MachineCombinerPattern::MC_MULSUBW_OP2: - case MachineCombinerPattern::MC_MULSUBX_OP2: + case MachineCombinerPattern::MULSUBW_OP2: + case MachineCombinerPattern::MULSUBX_OP2: // MUL I=A,B,0 // SUB R,C,I // ==> MSUB R,A,B,C (computes C - A*B) // --- Create(MSUB); - if (Pattern == MachineCombinerPattern::MC_MULSUBW_OP2) { + if (Pattern == MachineCombinerPattern::MULSUBW_OP2) { Opc = AArch64::MSUBWrrr; RC = &AArch64::GPR32RegClass; } else { @@ -2837,8 +2837,8 @@ void AArch64InstrInfo::genAlternativeCodeSequence( } MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC); break; - case MachineCombinerPattern::MC_MULSUBWI_OP1: - case MachineCombinerPattern::MC_MULSUBXI_OP1: { + case MachineCombinerPattern::MULSUBWI_OP1: + case MachineCombinerPattern::MULSUBXI_OP1: { // MUL I=A,B,0 // SUB R,I, Imm // ==> ORR V, ZR, -Imm @@ -2846,7 +2846,7 @@ void AArch64InstrInfo::genAlternativeCodeSequence( // --- Create(MADD); const TargetRegisterClass *OrrRC; unsigned BitSize, OrrOpc, ZeroReg; - if (Pattern == MachineCombinerPattern::MC_MULSUBWI_OP1) { + if (Pattern == MachineCombinerPattern::MULSUBWI_OP1) { OrrOpc = AArch64::ORRWri; OrrRC = &AArch64::GPR32spRegClass; BitSize = 32; diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h index ce7a3058ef98..ae02822a32e6 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h @@ -167,13 +167,13 @@ public: /// for an instruction chain ending in . All potential patterns are /// listed in the array. bool getMachineCombinerPatterns(MachineInstr &Root, - SmallVectorImpl &Patterns) + SmallVectorImpl &Patterns) const override; /// When getMachineCombinerPatterns() finds patterns, this function generates /// the instructions that could replace the original code sequence void genAlternativeCodeSequence( - MachineInstr &Root, MachineCombinerPattern::MC_PATTERN Pattern, + MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl &InsInstrs, SmallVectorImpl &DelInstrs, DenseMap &InstrIdxForVirtReg) const override; diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp index ae1b07bcd12a..51c85f6ea673 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp +++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -230,7 +230,7 @@ bool PPCInstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const { bool PPCInstrInfo::getMachineCombinerPatterns( MachineInstr &Root, - SmallVectorImpl &Patterns) const { + SmallVectorImpl &Patterns) const { // Using the machine combiner in this way is potentially expensive, so // restrict to when aggressive optimizations are desired. if (Subtarget.getTargetMachine().getOptLevel() != CodeGenOpt::Aggressive) diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.h b/llvm/lib/Target/PowerPC/PPCInstrInfo.h index da4585f2df48..c3c3a480a6aa 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrInfo.h +++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.h @@ -145,7 +145,7 @@ public: /// output in the array. bool getMachineCombinerPatterns( MachineInstr &Root, - SmallVectorImpl &P) const override; + SmallVectorImpl &P) const override; bool isAssociativeAndCommutative(const MachineInstr &Inst) const override;