2015-11-11 05:38:26 +08:00
|
|
|
//===-------------- PPCMIPeephole.cpp - MI Peephole Cleanups -------------===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2015-11-11 05:38:26 +08:00
|
|
|
//
|
|
|
|
//===---------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This pass performs peephole optimizations to clean up ugly code
|
|
|
|
// sequences at the MachineInstruction layer. It runs at the end of
|
|
|
|
// the SSA phases, following VSX swap removal. A pass of dead code
|
|
|
|
// elimination follows this one for quick clean-up of any dead
|
|
|
|
// instructions introduced here. Although we could do this as callbacks
|
|
|
|
// from the generic peephole pass, this would have a couple of bad
|
|
|
|
// effects: it might remove optimization opportunities for VSX swap
|
|
|
|
// removal, and it would miss cleanups made possible following VSX
|
|
|
|
// swap removal.
|
|
|
|
//
|
|
|
|
//===---------------------------------------------------------------------===//
|
|
|
|
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-14 05:15:01 +08:00
|
|
|
#include "MCTargetDesc/PPCPredicates.h"
|
2015-11-11 05:38:26 +08:00
|
|
|
#include "PPC.h"
|
|
|
|
#include "PPCInstrBuilder.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "PPCInstrInfo.h"
|
2019-07-06 02:38:09 +08:00
|
|
|
#include "PPCMachineFunctionInfo.h"
|
2015-11-11 05:38:26 +08:00
|
|
|
#include "PPCTargetMachine.h"
|
2017-09-20 00:14:37 +08:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2019-07-06 02:38:09 +08:00
|
|
|
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
|
2017-09-20 00:14:37 +08:00
|
|
|
#include "llvm/CodeGen/MachineDominators.h"
|
2015-11-11 05:38:26 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-14 05:15:01 +08:00
|
|
|
#include "llvm/CodeGen/MachinePostDominators.h"
|
2015-11-11 05:38:26 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-14 05:15:01 +08:00
|
|
|
#include "llvm/InitializePasses.h"
|
2015-11-11 05:38:26 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "ppc-mi-peepholes"
|
|
|
|
|
2017-11-28 04:26:36 +08:00
|
|
|
STATISTIC(RemoveTOCSave, "Number of TOC saves removed");
|
|
|
|
STATISTIC(MultiTOCSaves,
|
|
|
|
"Number of functions with multiple TOC saves that must be kept");
|
2019-07-06 02:38:09 +08:00
|
|
|
STATISTIC(NumTOCSavesInPrologue, "Number of TOC saves placed in the prologue");
|
2017-10-16 12:12:57 +08:00
|
|
|
STATISTIC(NumEliminatedSExt, "Number of eliminated sign-extensions");
|
|
|
|
STATISTIC(NumEliminatedZExt, "Number of eliminated zero-extensions");
|
2017-09-20 00:14:37 +08:00
|
|
|
STATISTIC(NumOptADDLIs, "Number of optimized ADD instruction fed by LI");
|
2017-12-15 15:27:53 +08:00
|
|
|
STATISTIC(NumConvertedToImmediateForm,
|
|
|
|
"Number of instructions converted to their immediate form");
|
|
|
|
STATISTIC(NumFunctionsEnteredInMIPeephole,
|
|
|
|
"Number of functions entered in PPC MI Peepholes");
|
|
|
|
STATISTIC(NumFixedPointIterations,
|
|
|
|
"Number of fixed-point iterations converting reg-reg instructions "
|
|
|
|
"to reg-imm ones");
|
2019-06-05 10:36:40 +08:00
|
|
|
STATISTIC(NumRotatesCollapsed,
|
|
|
|
"Number of pairs of rotate left, clear left/right collapsed");
|
2019-07-09 10:55:08 +08:00
|
|
|
STATISTIC(NumEXTSWAndSLDICombined,
|
|
|
|
"Number of pairs of EXTSW and SLDI combined as EXTSWSLI");
|
2017-12-15 15:27:53 +08:00
|
|
|
|
|
|
|
static cl::opt<bool>
|
|
|
|
FixedPointRegToImm("ppc-reg-to-imm-fixed-point", cl::Hidden, cl::init(true),
|
|
|
|
cl::desc("Iterate to a fixed point when attempting to "
|
|
|
|
"convert reg-reg instructions to reg-imm"));
|
|
|
|
|
|
|
|
static cl::opt<bool>
|
2017-12-29 20:22:27 +08:00
|
|
|
ConvertRegReg("ppc-convert-rr-to-ri", cl::Hidden, cl::init(true),
|
2017-12-15 15:27:53 +08:00
|
|
|
cl::desc("Convert eligible reg+reg instructions to reg+imm"));
|
2017-09-20 00:14:37 +08:00
|
|
|
|
2017-10-16 12:12:57 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
EnableSExtElimination("ppc-eliminate-signext",
|
|
|
|
cl::desc("enable elimination of sign-extensions"),
|
2017-10-20 08:36:46 +08:00
|
|
|
cl::init(false), cl::Hidden);
|
2017-10-16 12:12:57 +08:00
|
|
|
|
|
|
|
static cl::opt<bool>
|
|
|
|
EnableZExtElimination("ppc-eliminate-zeroext",
|
|
|
|
cl::desc("enable elimination of zero-extensions"),
|
2017-10-20 08:36:46 +08:00
|
|
|
cl::init(false), cl::Hidden);
|
2017-10-16 12:12:57 +08:00
|
|
|
|
2015-11-11 05:38:26 +08:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
struct PPCMIPeephole : public MachineFunctionPass {
|
|
|
|
|
|
|
|
static char ID;
|
|
|
|
const PPCInstrInfo *TII;
|
|
|
|
MachineFunction *MF;
|
|
|
|
MachineRegisterInfo *MRI;
|
|
|
|
|
|
|
|
PPCMIPeephole() : MachineFunctionPass(ID) {
|
|
|
|
initializePPCMIPeepholePass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2017-09-20 00:14:37 +08:00
|
|
|
MachineDominatorTree *MDT;
|
2019-07-06 02:38:09 +08:00
|
|
|
MachinePostDominatorTree *MPDT;
|
|
|
|
MachineBlockFrequencyInfo *MBFI;
|
|
|
|
uint64_t EntryFreq;
|
2017-09-20 00:14:37 +08:00
|
|
|
|
2015-11-11 05:38:26 +08:00
|
|
|
// Initialize class variables.
|
|
|
|
void initialize(MachineFunction &MFParm);
|
|
|
|
|
|
|
|
// Perform peepholes.
|
|
|
|
bool simplifyCode(void);
|
|
|
|
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
// Perform peepholes.
|
|
|
|
bool eliminateRedundantCompare(void);
|
2017-11-28 04:26:36 +08:00
|
|
|
bool eliminateRedundantTOCSaves(std::map<MachineInstr *, bool> &TOCSaves);
|
2019-07-09 10:55:08 +08:00
|
|
|
bool combineSEXTAndSHL(MachineInstr &MI, MachineInstr *&ToErase);
|
2019-06-26 13:25:16 +08:00
|
|
|
bool emitRLDICWhenLoweringJumpTables(MachineInstr &MI);
|
2017-11-28 04:26:36 +08:00
|
|
|
void UpdateTOCSaves(std::map<MachineInstr *, bool> &TOCSaves,
|
|
|
|
MachineInstr *MI);
|
2015-11-11 05:38:26 +08:00
|
|
|
|
|
|
|
public:
|
2017-09-20 00:14:37 +08:00
|
|
|
|
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
|
|
AU.addRequired<MachineDominatorTree>();
|
2019-07-06 02:38:09 +08:00
|
|
|
AU.addRequired<MachinePostDominatorTree>();
|
|
|
|
AU.addRequired<MachineBlockFrequencyInfo>();
|
2017-09-20 00:14:37 +08:00
|
|
|
AU.addPreserved<MachineDominatorTree>();
|
2019-07-06 02:38:09 +08:00
|
|
|
AU.addPreserved<MachinePostDominatorTree>();
|
|
|
|
AU.addPreserved<MachineBlockFrequencyInfo>();
|
2017-09-20 00:14:37 +08:00
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
}
|
|
|
|
|
2015-11-11 05:38:26 +08:00
|
|
|
// Main entry point for this pass.
|
|
|
|
bool runOnMachineFunction(MachineFunction &MF) override {
|
2017-12-16 06:22:58 +08:00
|
|
|
if (skipFunction(MF.getFunction()))
|
2016-04-28 03:39:32 +08:00
|
|
|
return false;
|
2015-11-11 05:38:26 +08:00
|
|
|
initialize(MF);
|
|
|
|
return simplifyCode();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Initialize class variables.
|
|
|
|
void PPCMIPeephole::initialize(MachineFunction &MFParm) {
|
|
|
|
MF = &MFParm;
|
|
|
|
MRI = &MF->getRegInfo();
|
2017-09-20 00:14:37 +08:00
|
|
|
MDT = &getAnalysis<MachineDominatorTree>();
|
2019-07-06 02:38:09 +08:00
|
|
|
MPDT = &getAnalysis<MachinePostDominatorTree>();
|
|
|
|
MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
|
|
|
|
EntryFreq = MBFI->getEntryFreq();
|
2015-11-11 05:38:26 +08:00
|
|
|
TII = MF->getSubtarget<PPCSubtarget>().getInstrInfo();
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "*** PowerPC MI peephole pass ***\n\n");
|
|
|
|
LLVM_DEBUG(MF->dump());
|
2015-11-11 05:38:26 +08:00
|
|
|
}
|
|
|
|
|
2017-09-20 00:14:37 +08:00
|
|
|
static MachineInstr *getVRegDefOrNull(MachineOperand *Op,
|
|
|
|
MachineRegisterInfo *MRI) {
|
|
|
|
assert(Op && "Invalid Operand!");
|
|
|
|
if (!Op->isReg())
|
|
|
|
return nullptr;
|
|
|
|
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register Reg = Op->getReg();
|
2019-08-02 07:27:28 +08:00
|
|
|
if (!Register::isVirtualRegister(Reg))
|
2017-09-20 00:14:37 +08:00
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
return MRI->getVRegDef(Reg);
|
|
|
|
}
|
|
|
|
|
2017-10-16 12:12:57 +08:00
|
|
|
// This function returns number of known zero bits in output of MI
|
|
|
|
// starting from the most significant bit.
|
|
|
|
static unsigned
|
|
|
|
getKnownLeadingZeroCount(MachineInstr *MI, const PPCInstrInfo *TII) {
|
|
|
|
unsigned Opcode = MI->getOpcode();
|
|
|
|
if (Opcode == PPC::RLDICL || Opcode == PPC::RLDICLo ||
|
|
|
|
Opcode == PPC::RLDCL || Opcode == PPC::RLDCLo)
|
|
|
|
return MI->getOperand(3).getImm();
|
|
|
|
|
|
|
|
if ((Opcode == PPC::RLDIC || Opcode == PPC::RLDICo) &&
|
|
|
|
MI->getOperand(3).getImm() <= 63 - MI->getOperand(2).getImm())
|
|
|
|
return MI->getOperand(3).getImm();
|
|
|
|
|
|
|
|
if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINMo ||
|
|
|
|
Opcode == PPC::RLWNM || Opcode == PPC::RLWNMo ||
|
|
|
|
Opcode == PPC::RLWINM8 || Opcode == PPC::RLWNM8) &&
|
|
|
|
MI->getOperand(3).getImm() <= MI->getOperand(4).getImm())
|
|
|
|
return 32 + MI->getOperand(3).getImm();
|
|
|
|
|
|
|
|
if (Opcode == PPC::ANDIo) {
|
|
|
|
uint16_t Imm = MI->getOperand(2).getImm();
|
|
|
|
return 48 + countLeadingZeros(Imm);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Opcode == PPC::CNTLZW || Opcode == PPC::CNTLZWo ||
|
|
|
|
Opcode == PPC::CNTTZW || Opcode == PPC::CNTTZWo ||
|
|
|
|
Opcode == PPC::CNTLZW8 || Opcode == PPC::CNTTZW8)
|
|
|
|
// The result ranges from 0 to 32.
|
|
|
|
return 58;
|
|
|
|
|
|
|
|
if (Opcode == PPC::CNTLZD || Opcode == PPC::CNTLZDo ||
|
|
|
|
Opcode == PPC::CNTTZD || Opcode == PPC::CNTTZDo)
|
|
|
|
// The result ranges from 0 to 64.
|
|
|
|
return 57;
|
|
|
|
|
|
|
|
if (Opcode == PPC::LHZ || Opcode == PPC::LHZX ||
|
|
|
|
Opcode == PPC::LHZ8 || Opcode == PPC::LHZX8 ||
|
|
|
|
Opcode == PPC::LHZU || Opcode == PPC::LHZUX ||
|
|
|
|
Opcode == PPC::LHZU8 || Opcode == PPC::LHZUX8)
|
|
|
|
return 48;
|
|
|
|
|
|
|
|
if (Opcode == PPC::LBZ || Opcode == PPC::LBZX ||
|
|
|
|
Opcode == PPC::LBZ8 || Opcode == PPC::LBZX8 ||
|
|
|
|
Opcode == PPC::LBZU || Opcode == PPC::LBZUX ||
|
|
|
|
Opcode == PPC::LBZU8 || Opcode == PPC::LBZUX8)
|
|
|
|
return 56;
|
|
|
|
|
|
|
|
if (TII->isZeroExtended(*MI))
|
|
|
|
return 32;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-11-28 04:26:36 +08:00
|
|
|
// This function maintains a map for the pairs <TOC Save Instr, Keep>
|
2018-06-13 16:54:13 +08:00
|
|
|
// Each time a new TOC save is encountered, it checks if any of the existing
|
|
|
|
// ones are dominated by the new one. If so, it marks the existing one as
|
2017-11-28 04:26:36 +08:00
|
|
|
// redundant by setting it's entry in the map as false. It then adds the new
|
|
|
|
// instruction to the map with either true or false depending on if any
|
2018-06-13 16:54:13 +08:00
|
|
|
// existing instructions dominated the new one.
|
2017-11-28 04:26:36 +08:00
|
|
|
void PPCMIPeephole::UpdateTOCSaves(
|
|
|
|
std::map<MachineInstr *, bool> &TOCSaves, MachineInstr *MI) {
|
|
|
|
assert(TII->isTOCSaveMI(*MI) && "Expecting a TOC save instruction here");
|
2019-07-06 02:38:09 +08:00
|
|
|
assert(MF->getSubtarget<PPCSubtarget>().isELFv2ABI() &&
|
|
|
|
"TOC-save removal only supported on ELFv2");
|
|
|
|
PPCFunctionInfo *FI = MF->getInfo<PPCFunctionInfo>();
|
|
|
|
|
|
|
|
MachineBasicBlock *Entry = &MF->front();
|
|
|
|
uint64_t CurrBlockFreq = MBFI->getBlockFreq(MI->getParent()).getFrequency();
|
|
|
|
|
|
|
|
// If the block in which the TOC save resides is in a block that
|
|
|
|
// post-dominates Entry, or a block that is hotter than entry (keep in mind
|
|
|
|
// that early MachineLICM has already run so the TOC save won't be hoisted)
|
|
|
|
// we can just do the save in the prologue.
|
|
|
|
if (CurrBlockFreq > EntryFreq || MPDT->dominates(MI->getParent(), Entry))
|
|
|
|
FI->setMustSaveTOC(true);
|
|
|
|
|
|
|
|
// If we are saving the TOC in the prologue, all the TOC saves can be removed
|
|
|
|
// from the code.
|
|
|
|
if (FI->mustSaveTOC()) {
|
|
|
|
for (auto &TOCSave : TOCSaves)
|
|
|
|
TOCSave.second = false;
|
|
|
|
// Add new instruction to map.
|
|
|
|
TOCSaves[MI] = false;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-11-28 04:26:36 +08:00
|
|
|
bool Keep = true;
|
|
|
|
for (auto It = TOCSaves.begin(); It != TOCSaves.end(); It++ ) {
|
|
|
|
MachineInstr *CurrInst = It->first;
|
2018-06-13 16:54:13 +08:00
|
|
|
// If new instruction dominates an existing one, mark existing one as
|
2017-11-28 04:26:36 +08:00
|
|
|
// redundant.
|
|
|
|
if (It->second && MDT->dominates(MI, CurrInst))
|
|
|
|
It->second = false;
|
|
|
|
// Check if the new instruction is redundant.
|
|
|
|
if (MDT->dominates(CurrInst, MI)) {
|
|
|
|
Keep = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Add new instruction to map.
|
|
|
|
TOCSaves[MI] = Keep;
|
|
|
|
}
|
|
|
|
|
2015-11-11 05:38:26 +08:00
|
|
|
// Perform peephole optimizations.
|
|
|
|
bool PPCMIPeephole::simplifyCode(void) {
|
|
|
|
bool Simplified = false;
|
|
|
|
MachineInstr* ToErase = nullptr;
|
2017-11-28 04:26:36 +08:00
|
|
|
std::map<MachineInstr *, bool> TOCSaves;
|
2018-03-23 23:28:15 +08:00
|
|
|
const TargetRegisterInfo *TRI = &TII->getRegisterInfo();
|
2017-12-15 15:27:53 +08:00
|
|
|
NumFunctionsEnteredInMIPeephole++;
|
|
|
|
if (ConvertRegReg) {
|
|
|
|
// Fixed-point conversion of reg/reg instructions fed by load-immediate
|
|
|
|
// into reg/imm instructions. FIXME: This is expensive, control it with
|
|
|
|
// an option.
|
|
|
|
bool SomethingChanged = false;
|
|
|
|
do {
|
|
|
|
NumFixedPointIterations++;
|
|
|
|
SomethingChanged = false;
|
|
|
|
for (MachineBasicBlock &MBB : *MF) {
|
|
|
|
for (MachineInstr &MI : MBB) {
|
2018-05-09 10:42:00 +08:00
|
|
|
if (MI.isDebugInstr())
|
2017-12-15 15:27:53 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (TII->convertToImmediateForm(MI)) {
|
|
|
|
// We don't erase anything in case the def has other uses. Let DCE
|
|
|
|
// remove it if it can be removed.
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Converted instruction to imm form: ");
|
|
|
|
LLVM_DEBUG(MI.dump());
|
2017-12-15 15:27:53 +08:00
|
|
|
NumConvertedToImmediateForm++;
|
|
|
|
SomethingChanged = true;
|
|
|
|
Simplified = true;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} while (SomethingChanged && FixedPointRegToImm);
|
|
|
|
}
|
|
|
|
|
2015-11-11 05:38:26 +08:00
|
|
|
for (MachineBasicBlock &MBB : *MF) {
|
|
|
|
for (MachineInstr &MI : MBB) {
|
|
|
|
|
|
|
|
// If the previous instruction was marked for elimination,
|
|
|
|
// remove it now.
|
|
|
|
if (ToErase) {
|
|
|
|
ToErase->eraseFromParent();
|
|
|
|
ToErase = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ignore debug instructions.
|
2018-05-09 10:42:00 +08:00
|
|
|
if (MI.isDebugInstr())
|
2015-11-11 05:38:26 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// Per-opcode peepholes.
|
|
|
|
switch (MI.getOpcode()) {
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
|
2017-11-28 04:26:36 +08:00
|
|
|
case PPC::STD: {
|
|
|
|
MachineFrameInfo &MFI = MF->getFrameInfo();
|
|
|
|
if (MFI.hasVarSizedObjects() ||
|
|
|
|
!MF->getSubtarget<PPCSubtarget>().isELFv2ABI())
|
|
|
|
break;
|
|
|
|
// When encountering a TOC save instruction, call UpdateTOCSaves
|
2018-06-13 16:54:13 +08:00
|
|
|
// to add it to the TOCSaves map and mark any existing TOC saves
|
2017-11-28 04:26:36 +08:00
|
|
|
// it dominates as redundant.
|
|
|
|
if (TII->isTOCSaveMI(MI))
|
|
|
|
UpdateTOCSaves(TOCSaves, &MI);
|
|
|
|
break;
|
|
|
|
}
|
2015-11-11 05:38:26 +08:00
|
|
|
case PPC::XXPERMDI: {
|
|
|
|
// Perform simplifications of 2x64 vector swaps and splats.
|
|
|
|
// A swap is identified by an immediate value of 2, and a splat
|
|
|
|
// is identified by an immediate value of 0 or 3.
|
|
|
|
int Immed = MI.getOperand(3).getImm();
|
|
|
|
|
|
|
|
if (Immed != 1) {
|
|
|
|
|
|
|
|
// For each of these simplifications, we need the two source
|
|
|
|
// regs to match. Unfortunately, MachineCSE ignores COPY and
|
|
|
|
// SUBREG_TO_REG, so for example we can see
|
|
|
|
// XXPERMDI t, SUBREG_TO_REG(s), SUBREG_TO_REG(s), immed.
|
|
|
|
// We have to look through chains of COPY and SUBREG_TO_REG
|
|
|
|
// to find the real source values for comparison.
|
2017-12-15 15:27:53 +08:00
|
|
|
unsigned TrueReg1 =
|
2018-03-23 23:28:15 +08:00
|
|
|
TRI->lookThruCopyLike(MI.getOperand(1).getReg(), MRI);
|
2017-12-15 15:27:53 +08:00
|
|
|
unsigned TrueReg2 =
|
2018-03-23 23:28:15 +08:00
|
|
|
TRI->lookThruCopyLike(MI.getOperand(2).getReg(), MRI);
|
2015-11-11 05:38:26 +08:00
|
|
|
|
2019-08-02 07:27:28 +08:00
|
|
|
if (TrueReg1 == TrueReg2 && Register::isVirtualRegister(TrueReg1)) {
|
2015-11-11 05:38:26 +08:00
|
|
|
MachineInstr *DefMI = MRI->getVRegDef(TrueReg1);
|
2016-12-06 19:47:14 +08:00
|
|
|
unsigned DefOpc = DefMI ? DefMI->getOpcode() : 0;
|
|
|
|
|
|
|
|
// If this is a splat fed by a splatting load, the splat is
|
|
|
|
// redundant. Replace with a copy. This doesn't happen directly due
|
|
|
|
// to code in PPCDAGToDAGISel.cpp, but it can happen when converting
|
|
|
|
// a load of a double to a vector of 64-bit integers.
|
|
|
|
auto isConversionOfLoadAndSplat = [=]() -> bool {
|
|
|
|
if (DefOpc != PPC::XVCVDPSXDS && DefOpc != PPC::XVCVDPUXDS)
|
|
|
|
return false;
|
2017-12-15 15:27:53 +08:00
|
|
|
unsigned DefReg =
|
2018-03-23 23:28:15 +08:00
|
|
|
TRI->lookThruCopyLike(DefMI->getOperand(1).getReg(), MRI);
|
2019-08-02 07:27:28 +08:00
|
|
|
if (Register::isVirtualRegister(DefReg)) {
|
2016-12-06 19:47:14 +08:00
|
|
|
MachineInstr *LoadMI = MRI->getVRegDef(DefReg);
|
|
|
|
if (LoadMI && LoadMI->getOpcode() == PPC::LXVDSX)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
};
|
|
|
|
if (DefMI && (Immed == 0 || Immed == 3)) {
|
|
|
|
if (DefOpc == PPC::LXVDSX || isConversionOfLoadAndSplat()) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Optimizing load-and-splat/splat "
|
|
|
|
"to load-and-splat/copy: ");
|
|
|
|
LLVM_DEBUG(MI.dump());
|
2017-01-13 17:58:52 +08:00
|
|
|
BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(PPC::COPY),
|
|
|
|
MI.getOperand(0).getReg())
|
|
|
|
.add(MI.getOperand(1));
|
2016-12-06 19:47:14 +08:00
|
|
|
ToErase = &MI;
|
|
|
|
Simplified = true;
|
|
|
|
}
|
|
|
|
}
|
2015-11-11 05:38:26 +08:00
|
|
|
|
|
|
|
// If this is a splat or a swap fed by another splat, we
|
|
|
|
// can replace it with a copy.
|
2016-12-06 19:47:14 +08:00
|
|
|
if (DefOpc == PPC::XXPERMDI) {
|
2015-11-11 05:38:26 +08:00
|
|
|
unsigned FeedImmed = DefMI->getOperand(3).getImm();
|
2017-12-15 15:27:53 +08:00
|
|
|
unsigned FeedReg1 =
|
2018-03-23 23:28:15 +08:00
|
|
|
TRI->lookThruCopyLike(DefMI->getOperand(1).getReg(), MRI);
|
2017-12-15 15:27:53 +08:00
|
|
|
unsigned FeedReg2 =
|
2018-03-23 23:28:15 +08:00
|
|
|
TRI->lookThruCopyLike(DefMI->getOperand(2).getReg(), MRI);
|
2015-11-11 05:38:26 +08:00
|
|
|
|
|
|
|
if ((FeedImmed == 0 || FeedImmed == 3) && FeedReg1 == FeedReg2) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Optimizing splat/swap or splat/splat "
|
|
|
|
"to splat/copy: ");
|
|
|
|
LLVM_DEBUG(MI.dump());
|
2017-01-13 17:58:52 +08:00
|
|
|
BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(PPC::COPY),
|
|
|
|
MI.getOperand(0).getReg())
|
|
|
|
.add(MI.getOperand(1));
|
2015-11-11 05:38:26 +08:00
|
|
|
ToErase = &MI;
|
|
|
|
Simplified = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this is a splat fed by a swap, we can simplify modify
|
|
|
|
// the splat to splat the other value from the swap's input
|
|
|
|
// parameter.
|
|
|
|
else if ((Immed == 0 || Immed == 3)
|
|
|
|
&& FeedImmed == 2 && FeedReg1 == FeedReg2) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Optimizing swap/splat => splat: ");
|
|
|
|
LLVM_DEBUG(MI.dump());
|
2015-11-11 05:38:26 +08:00
|
|
|
MI.getOperand(1).setReg(DefMI->getOperand(1).getReg());
|
|
|
|
MI.getOperand(2).setReg(DefMI->getOperand(2).getReg());
|
|
|
|
MI.getOperand(3).setImm(3 - Immed);
|
|
|
|
Simplified = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this is a swap fed by a swap, we can replace it
|
|
|
|
// with a copy from the first swap's input.
|
|
|
|
else if (Immed == 2 && FeedImmed == 2 && FeedReg1 == FeedReg2) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Optimizing swap/swap => copy: ");
|
|
|
|
LLVM_DEBUG(MI.dump());
|
2017-01-13 17:58:52 +08:00
|
|
|
BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(PPC::COPY),
|
|
|
|
MI.getOperand(0).getReg())
|
|
|
|
.add(DefMI->getOperand(1));
|
2015-11-11 05:38:26 +08:00
|
|
|
ToErase = &MI;
|
|
|
|
Simplified = true;
|
|
|
|
}
|
2016-12-06 19:47:14 +08:00
|
|
|
} else if ((Immed == 0 || Immed == 3) && DefOpc == PPC::XXPERMDIs &&
|
|
|
|
(DefMI->getOperand(2).getImm() == 0 ||
|
|
|
|
DefMI->getOperand(2).getImm() == 3)) {
|
2016-10-04 14:59:23 +08:00
|
|
|
// Splat fed by another splat - switch the output of the first
|
|
|
|
// and remove the second.
|
|
|
|
DefMI->getOperand(0).setReg(MI.getOperand(0).getReg());
|
|
|
|
ToErase = &MI;
|
|
|
|
Simplified = true;
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Removing redundant splat: ");
|
|
|
|
LLVM_DEBUG(MI.dump());
|
2015-11-11 05:38:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2016-10-04 14:59:23 +08:00
|
|
|
case PPC::VSPLTB:
|
|
|
|
case PPC::VSPLTH:
|
|
|
|
case PPC::XXSPLTW: {
|
|
|
|
unsigned MyOpcode = MI.getOpcode();
|
|
|
|
unsigned OpNo = MyOpcode == PPC::XXSPLTW ? 1 : 2;
|
2017-12-15 15:27:53 +08:00
|
|
|
unsigned TrueReg =
|
2018-03-23 23:28:15 +08:00
|
|
|
TRI->lookThruCopyLike(MI.getOperand(OpNo).getReg(), MRI);
|
2019-08-02 07:27:28 +08:00
|
|
|
if (!Register::isVirtualRegister(TrueReg))
|
2016-12-06 19:47:14 +08:00
|
|
|
break;
|
2016-10-04 14:59:23 +08:00
|
|
|
MachineInstr *DefMI = MRI->getVRegDef(TrueReg);
|
|
|
|
if (!DefMI)
|
|
|
|
break;
|
|
|
|
unsigned DefOpcode = DefMI->getOpcode();
|
2016-12-06 19:47:14 +08:00
|
|
|
auto isConvertOfSplat = [=]() -> bool {
|
|
|
|
if (DefOpcode != PPC::XVCVSPSXWS && DefOpcode != PPC::XVCVSPUXWS)
|
|
|
|
return false;
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register ConvReg = DefMI->getOperand(1).getReg();
|
2019-08-02 07:27:28 +08:00
|
|
|
if (!Register::isVirtualRegister(ConvReg))
|
2016-12-06 19:47:14 +08:00
|
|
|
return false;
|
|
|
|
MachineInstr *Splt = MRI->getVRegDef(ConvReg);
|
|
|
|
return Splt && (Splt->getOpcode() == PPC::LXVWSX ||
|
|
|
|
Splt->getOpcode() == PPC::XXSPLTW);
|
|
|
|
};
|
|
|
|
bool AlreadySplat = (MyOpcode == DefOpcode) ||
|
2016-10-04 14:59:23 +08:00
|
|
|
(MyOpcode == PPC::VSPLTB && DefOpcode == PPC::VSPLTBs) ||
|
|
|
|
(MyOpcode == PPC::VSPLTH && DefOpcode == PPC::VSPLTHs) ||
|
2016-12-06 19:47:14 +08:00
|
|
|
(MyOpcode == PPC::XXSPLTW && DefOpcode == PPC::XXSPLTWs) ||
|
|
|
|
(MyOpcode == PPC::XXSPLTW && DefOpcode == PPC::LXVWSX) ||
|
|
|
|
(MyOpcode == PPC::XXSPLTW && DefOpcode == PPC::MTVSRWS)||
|
|
|
|
(MyOpcode == PPC::XXSPLTW && isConvertOfSplat());
|
|
|
|
// If the instruction[s] that feed this splat have already splat
|
|
|
|
// the value, this splat is redundant.
|
|
|
|
if (AlreadySplat) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Changing redundant splat to a copy: ");
|
|
|
|
LLVM_DEBUG(MI.dump());
|
2016-10-12 08:48:25 +08:00
|
|
|
BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(PPC::COPY),
|
|
|
|
MI.getOperand(0).getReg())
|
2017-01-13 17:58:52 +08:00
|
|
|
.add(MI.getOperand(OpNo));
|
2016-10-04 14:59:23 +08:00
|
|
|
ToErase = &MI;
|
|
|
|
Simplified = true;
|
|
|
|
}
|
|
|
|
// Splat fed by a shift. Usually when we align value to splat into
|
|
|
|
// vector element zero.
|
|
|
|
if (DefOpcode == PPC::XXSLDWI) {
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register ShiftRes = DefMI->getOperand(0).getReg();
|
|
|
|
Register ShiftOp1 = DefMI->getOperand(1).getReg();
|
|
|
|
Register ShiftOp2 = DefMI->getOperand(2).getReg();
|
2016-10-04 14:59:23 +08:00
|
|
|
unsigned ShiftImm = DefMI->getOperand(3).getImm();
|
|
|
|
unsigned SplatImm = MI.getOperand(2).getImm();
|
|
|
|
if (ShiftOp1 == ShiftOp2) {
|
|
|
|
unsigned NewElem = (SplatImm + ShiftImm) & 0x3;
|
|
|
|
if (MRI->hasOneNonDBGUse(ShiftRes)) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Removing redundant shift: ");
|
|
|
|
LLVM_DEBUG(DefMI->dump());
|
2016-10-04 14:59:23 +08:00
|
|
|
ToErase = DefMI;
|
|
|
|
}
|
|
|
|
Simplified = true;
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Changing splat immediate from " << SplatImm
|
|
|
|
<< " to " << NewElem << " in instruction: ");
|
|
|
|
LLVM_DEBUG(MI.dump());
|
2016-10-04 14:59:23 +08:00
|
|
|
MI.getOperand(1).setReg(ShiftOp1);
|
|
|
|
MI.getOperand(2).setImm(NewElem);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2016-12-06 19:47:14 +08:00
|
|
|
case PPC::XVCVDPSP: {
|
|
|
|
// If this is a DP->SP conversion fed by an FRSP, the FRSP is redundant.
|
2017-12-15 15:27:53 +08:00
|
|
|
unsigned TrueReg =
|
2018-03-23 23:28:15 +08:00
|
|
|
TRI->lookThruCopyLike(MI.getOperand(1).getReg(), MRI);
|
2019-08-02 07:27:28 +08:00
|
|
|
if (!Register::isVirtualRegister(TrueReg))
|
2016-12-06 19:47:14 +08:00
|
|
|
break;
|
|
|
|
MachineInstr *DefMI = MRI->getVRegDef(TrueReg);
|
|
|
|
|
|
|
|
// This can occur when building a vector of single precision or integer
|
|
|
|
// values.
|
|
|
|
if (DefMI && DefMI->getOpcode() == PPC::XXPERMDI) {
|
2017-12-15 15:27:53 +08:00
|
|
|
unsigned DefsReg1 =
|
2018-03-23 23:28:15 +08:00
|
|
|
TRI->lookThruCopyLike(DefMI->getOperand(1).getReg(), MRI);
|
2017-12-15 15:27:53 +08:00
|
|
|
unsigned DefsReg2 =
|
2018-03-23 23:28:15 +08:00
|
|
|
TRI->lookThruCopyLike(DefMI->getOperand(2).getReg(), MRI);
|
2019-08-02 07:27:28 +08:00
|
|
|
if (!Register::isVirtualRegister(DefsReg1) ||
|
|
|
|
!Register::isVirtualRegister(DefsReg2))
|
2016-12-06 19:47:14 +08:00
|
|
|
break;
|
|
|
|
MachineInstr *P1 = MRI->getVRegDef(DefsReg1);
|
|
|
|
MachineInstr *P2 = MRI->getVRegDef(DefsReg2);
|
|
|
|
|
|
|
|
if (!P1 || !P2)
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Remove the passed FRSP instruction if it only feeds this MI and
|
|
|
|
// set any uses of that FRSP (in this MI) to the source of the FRSP.
|
|
|
|
auto removeFRSPIfPossible = [&](MachineInstr *RoundInstr) {
|
|
|
|
if (RoundInstr->getOpcode() == PPC::FRSP &&
|
|
|
|
MRI->hasOneNonDBGUse(RoundInstr->getOperand(0).getReg())) {
|
|
|
|
Simplified = true;
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register ConvReg1 = RoundInstr->getOperand(1).getReg();
|
|
|
|
Register FRSPDefines = RoundInstr->getOperand(0).getReg();
|
2016-12-06 19:47:14 +08:00
|
|
|
MachineInstr &Use = *(MRI->use_instr_begin(FRSPDefines));
|
|
|
|
for (int i = 0, e = Use.getNumOperands(); i < e; ++i)
|
|
|
|
if (Use.getOperand(i).isReg() &&
|
|
|
|
Use.getOperand(i).getReg() == FRSPDefines)
|
|
|
|
Use.getOperand(i).setReg(ConvReg1);
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Removing redundant FRSP:\n");
|
|
|
|
LLVM_DEBUG(RoundInstr->dump());
|
|
|
|
LLVM_DEBUG(dbgs() << "As it feeds instruction:\n");
|
|
|
|
LLVM_DEBUG(MI.dump());
|
|
|
|
LLVM_DEBUG(dbgs() << "Through instruction:\n");
|
|
|
|
LLVM_DEBUG(DefMI->dump());
|
2016-12-06 19:47:14 +08:00
|
|
|
RoundInstr->eraseFromParent();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// If the input to XVCVDPSP is a vector that was built (even
|
|
|
|
// partially) out of FRSP's, the FRSP(s) can safely be removed
|
|
|
|
// since this instruction performs the same operation.
|
|
|
|
if (P1 != P2) {
|
|
|
|
removeFRSPIfPossible(P1);
|
|
|
|
removeFRSPIfPossible(P2);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
removeFRSPIfPossible(P1);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2017-10-16 12:12:57 +08:00
|
|
|
case PPC::EXTSH:
|
|
|
|
case PPC::EXTSH8:
|
|
|
|
case PPC::EXTSH8_32_64: {
|
|
|
|
if (!EnableSExtElimination) break;
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register NarrowReg = MI.getOperand(1).getReg();
|
2019-08-02 07:27:28 +08:00
|
|
|
if (!Register::isVirtualRegister(NarrowReg))
|
2017-10-16 12:12:57 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
MachineInstr *SrcMI = MRI->getVRegDef(NarrowReg);
|
|
|
|
// If we've used a zero-extending load that we will sign-extend,
|
|
|
|
// just do a sign-extending load.
|
|
|
|
if (SrcMI->getOpcode() == PPC::LHZ ||
|
|
|
|
SrcMI->getOpcode() == PPC::LHZX) {
|
|
|
|
if (!MRI->hasOneNonDBGUse(SrcMI->getOperand(0).getReg()))
|
|
|
|
break;
|
|
|
|
auto is64Bit = [] (unsigned Opcode) {
|
|
|
|
return Opcode == PPC::EXTSH8;
|
|
|
|
};
|
|
|
|
auto isXForm = [] (unsigned Opcode) {
|
|
|
|
return Opcode == PPC::LHZX;
|
|
|
|
};
|
|
|
|
auto getSextLoadOp = [] (bool is64Bit, bool isXForm) {
|
|
|
|
if (is64Bit)
|
|
|
|
if (isXForm) return PPC::LHAX8;
|
|
|
|
else return PPC::LHA8;
|
|
|
|
else
|
|
|
|
if (isXForm) return PPC::LHAX;
|
|
|
|
else return PPC::LHA;
|
|
|
|
};
|
|
|
|
unsigned Opc = getSextLoadOp(is64Bit(MI.getOpcode()),
|
|
|
|
isXForm(SrcMI->getOpcode()));
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Zero-extending load\n");
|
|
|
|
LLVM_DEBUG(SrcMI->dump());
|
|
|
|
LLVM_DEBUG(dbgs() << "and sign-extension\n");
|
|
|
|
LLVM_DEBUG(MI.dump());
|
|
|
|
LLVM_DEBUG(dbgs() << "are merged into sign-extending load\n");
|
2017-10-16 12:12:57 +08:00
|
|
|
SrcMI->setDesc(TII->get(Opc));
|
|
|
|
SrcMI->getOperand(0).setReg(MI.getOperand(0).getReg());
|
|
|
|
ToErase = &MI;
|
|
|
|
Simplified = true;
|
|
|
|
NumEliminatedSExt++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case PPC::EXTSW:
|
|
|
|
case PPC::EXTSW_32:
|
|
|
|
case PPC::EXTSW_32_64: {
|
|
|
|
if (!EnableSExtElimination) break;
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register NarrowReg = MI.getOperand(1).getReg();
|
2019-08-02 07:27:28 +08:00
|
|
|
if (!Register::isVirtualRegister(NarrowReg))
|
2017-10-16 12:12:57 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
MachineInstr *SrcMI = MRI->getVRegDef(NarrowReg);
|
|
|
|
// If we've used a zero-extending load that we will sign-extend,
|
|
|
|
// just do a sign-extending load.
|
|
|
|
if (SrcMI->getOpcode() == PPC::LWZ ||
|
|
|
|
SrcMI->getOpcode() == PPC::LWZX) {
|
|
|
|
if (!MRI->hasOneNonDBGUse(SrcMI->getOperand(0).getReg()))
|
|
|
|
break;
|
|
|
|
auto is64Bit = [] (unsigned Opcode) {
|
|
|
|
return Opcode == PPC::EXTSW || Opcode == PPC::EXTSW_32_64;
|
|
|
|
};
|
|
|
|
auto isXForm = [] (unsigned Opcode) {
|
|
|
|
return Opcode == PPC::LWZX;
|
|
|
|
};
|
|
|
|
auto getSextLoadOp = [] (bool is64Bit, bool isXForm) {
|
|
|
|
if (is64Bit)
|
|
|
|
if (isXForm) return PPC::LWAX;
|
|
|
|
else return PPC::LWA;
|
|
|
|
else
|
|
|
|
if (isXForm) return PPC::LWAX_32;
|
|
|
|
else return PPC::LWA_32;
|
|
|
|
};
|
|
|
|
unsigned Opc = getSextLoadOp(is64Bit(MI.getOpcode()),
|
|
|
|
isXForm(SrcMI->getOpcode()));
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Zero-extending load\n");
|
|
|
|
LLVM_DEBUG(SrcMI->dump());
|
|
|
|
LLVM_DEBUG(dbgs() << "and sign-extension\n");
|
|
|
|
LLVM_DEBUG(MI.dump());
|
|
|
|
LLVM_DEBUG(dbgs() << "are merged into sign-extending load\n");
|
2017-10-16 12:12:57 +08:00
|
|
|
SrcMI->setDesc(TII->get(Opc));
|
|
|
|
SrcMI->getOperand(0).setReg(MI.getOperand(0).getReg());
|
|
|
|
ToErase = &MI;
|
|
|
|
Simplified = true;
|
|
|
|
NumEliminatedSExt++;
|
|
|
|
} else if (MI.getOpcode() == PPC::EXTSW_32_64 &&
|
|
|
|
TII->isSignExtended(*SrcMI)) {
|
|
|
|
// We can eliminate EXTSW if the input is known to be already
|
|
|
|
// sign-extended.
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Removing redundant sign-extension\n");
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register TmpReg =
|
|
|
|
MF->getRegInfo().createVirtualRegister(&PPC::G8RCRegClass);
|
2017-10-16 12:12:57 +08:00
|
|
|
BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(PPC::IMPLICIT_DEF),
|
|
|
|
TmpReg);
|
|
|
|
BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(PPC::INSERT_SUBREG),
|
|
|
|
MI.getOperand(0).getReg())
|
|
|
|
.addReg(TmpReg)
|
|
|
|
.addReg(NarrowReg)
|
|
|
|
.addImm(PPC::sub_32);
|
|
|
|
ToErase = &MI;
|
|
|
|
Simplified = true;
|
|
|
|
NumEliminatedSExt++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case PPC::RLDICL: {
|
|
|
|
// We can eliminate RLDICL (e.g. for zero-extension)
|
|
|
|
// if all bits to clear are already zero in the input.
|
|
|
|
// This code assume following code sequence for zero-extension.
|
2017-12-07 18:40:31 +08:00
|
|
|
// %6 = COPY %5:sub_32; (optional)
|
|
|
|
// %8 = IMPLICIT_DEF;
|
2017-11-30 20:12:19 +08:00
|
|
|
// %7<def,tied1> = INSERT_SUBREG %8<tied0>, %6, sub_32;
|
2017-10-16 12:12:57 +08:00
|
|
|
if (!EnableZExtElimination) break;
|
|
|
|
|
|
|
|
if (MI.getOperand(2).getImm() != 0)
|
|
|
|
break;
|
|
|
|
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register SrcReg = MI.getOperand(1).getReg();
|
2019-08-02 07:27:28 +08:00
|
|
|
if (!Register::isVirtualRegister(SrcReg))
|
2017-10-16 12:12:57 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
MachineInstr *SrcMI = MRI->getVRegDef(SrcReg);
|
|
|
|
if (!(SrcMI && SrcMI->getOpcode() == PPC::INSERT_SUBREG &&
|
|
|
|
SrcMI->getOperand(0).isReg() && SrcMI->getOperand(1).isReg()))
|
|
|
|
break;
|
|
|
|
|
|
|
|
MachineInstr *ImpDefMI, *SubRegMI;
|
|
|
|
ImpDefMI = MRI->getVRegDef(SrcMI->getOperand(1).getReg());
|
|
|
|
SubRegMI = MRI->getVRegDef(SrcMI->getOperand(2).getReg());
|
|
|
|
if (ImpDefMI->getOpcode() != PPC::IMPLICIT_DEF) break;
|
|
|
|
|
|
|
|
SrcMI = SubRegMI;
|
|
|
|
if (SubRegMI->getOpcode() == PPC::COPY) {
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register CopyReg = SubRegMI->getOperand(1).getReg();
|
2019-08-02 07:27:28 +08:00
|
|
|
if (Register::isVirtualRegister(CopyReg))
|
2017-10-16 12:12:57 +08:00
|
|
|
SrcMI = MRI->getVRegDef(CopyReg);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned KnownZeroCount = getKnownLeadingZeroCount(SrcMI, TII);
|
|
|
|
if (MI.getOperand(3).getImm() <= KnownZeroCount) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Removing redundant zero-extension\n");
|
2017-10-16 12:12:57 +08:00
|
|
|
BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(PPC::COPY),
|
|
|
|
MI.getOperand(0).getReg())
|
|
|
|
.addReg(SrcReg);
|
|
|
|
ToErase = &MI;
|
|
|
|
Simplified = true;
|
|
|
|
NumEliminatedZExt++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2017-09-20 00:14:37 +08:00
|
|
|
|
|
|
|
// TODO: Any instruction that has an immediate form fed only by a PHI
|
|
|
|
// whose operands are all load immediate can be folded away. We currently
|
|
|
|
// do this for ADD instructions, but should expand it to arithmetic and
|
|
|
|
// binary instructions with immediate forms in the future.
|
|
|
|
case PPC::ADD4:
|
|
|
|
case PPC::ADD8: {
|
|
|
|
auto isSingleUsePHI = [&](MachineOperand *PhiOp) {
|
|
|
|
assert(PhiOp && "Invalid Operand!");
|
|
|
|
MachineInstr *DefPhiMI = getVRegDefOrNull(PhiOp, MRI);
|
|
|
|
|
|
|
|
return DefPhiMI && (DefPhiMI->getOpcode() == PPC::PHI) &&
|
|
|
|
MRI->hasOneNonDBGUse(DefPhiMI->getOperand(0).getReg());
|
|
|
|
};
|
|
|
|
|
|
|
|
auto dominatesAllSingleUseLIs = [&](MachineOperand *DominatorOp,
|
|
|
|
MachineOperand *PhiOp) {
|
|
|
|
assert(PhiOp && "Invalid Operand!");
|
|
|
|
assert(DominatorOp && "Invalid Operand!");
|
|
|
|
MachineInstr *DefPhiMI = getVRegDefOrNull(PhiOp, MRI);
|
|
|
|
MachineInstr *DefDomMI = getVRegDefOrNull(DominatorOp, MRI);
|
|
|
|
|
|
|
|
// Note: the vregs only show up at odd indices position of PHI Node,
|
|
|
|
// the even indices position save the BB info.
|
|
|
|
for (unsigned i = 1; i < DefPhiMI->getNumOperands(); i += 2) {
|
|
|
|
MachineInstr *LiMI =
|
|
|
|
getVRegDefOrNull(&DefPhiMI->getOperand(i), MRI);
|
2017-10-10 16:46:10 +08:00
|
|
|
if (!LiMI ||
|
|
|
|
(LiMI->getOpcode() != PPC::LI && LiMI->getOpcode() != PPC::LI8)
|
|
|
|
|| !MRI->hasOneNonDBGUse(LiMI->getOperand(0).getReg()) ||
|
|
|
|
!MDT->dominates(DefDomMI, LiMI))
|
2017-09-20 00:14:37 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
};
|
|
|
|
|
|
|
|
MachineOperand Op1 = MI.getOperand(1);
|
|
|
|
MachineOperand Op2 = MI.getOperand(2);
|
|
|
|
if (isSingleUsePHI(&Op2) && dominatesAllSingleUseLIs(&Op1, &Op2))
|
|
|
|
std::swap(Op1, Op2);
|
|
|
|
else if (!isSingleUsePHI(&Op1) || !dominatesAllSingleUseLIs(&Op2, &Op1))
|
|
|
|
break; // We don't have an ADD fed by LI's that can be transformed
|
|
|
|
|
|
|
|
// Now we know that Op1 is the PHI node and Op2 is the dominator
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register DominatorReg = Op2.getReg();
|
2017-09-20 00:14:37 +08:00
|
|
|
|
|
|
|
const TargetRegisterClass *TRC = MI.getOpcode() == PPC::ADD8
|
|
|
|
? &PPC::G8RC_and_G8RC_NOX0RegClass
|
|
|
|
: &PPC::GPRC_and_GPRC_NOR0RegClass;
|
|
|
|
MRI->setRegClass(DominatorReg, TRC);
|
|
|
|
|
|
|
|
// replace LIs with ADDIs
|
|
|
|
MachineInstr *DefPhiMI = getVRegDefOrNull(&Op1, MRI);
|
|
|
|
for (unsigned i = 1; i < DefPhiMI->getNumOperands(); i += 2) {
|
|
|
|
MachineInstr *LiMI = getVRegDefOrNull(&DefPhiMI->getOperand(i), MRI);
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Optimizing LI to ADDI: ");
|
|
|
|
LLVM_DEBUG(LiMI->dump());
|
2017-09-20 00:14:37 +08:00
|
|
|
|
2017-12-07 18:40:31 +08:00
|
|
|
// There could be repeated registers in the PHI, e.g: %1 =
|
2017-12-05 01:18:51 +08:00
|
|
|
// PHI %6, <%bb.2>, %8, <%bb.3>, %8, <%bb.6>; So if we've
|
2017-09-20 00:14:37 +08:00
|
|
|
// already replaced the def instruction, skip.
|
|
|
|
if (LiMI->getOpcode() == PPC::ADDI || LiMI->getOpcode() == PPC::ADDI8)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
assert((LiMI->getOpcode() == PPC::LI ||
|
|
|
|
LiMI->getOpcode() == PPC::LI8) &&
|
|
|
|
"Invalid Opcode!");
|
|
|
|
auto LiImm = LiMI->getOperand(1).getImm(); // save the imm of LI
|
|
|
|
LiMI->RemoveOperand(1); // remove the imm of LI
|
|
|
|
LiMI->setDesc(TII->get(LiMI->getOpcode() == PPC::LI ? PPC::ADDI
|
|
|
|
: PPC::ADDI8));
|
|
|
|
MachineInstrBuilder(*LiMI->getParent()->getParent(), *LiMI)
|
|
|
|
.addReg(DominatorReg)
|
|
|
|
.addImm(LiImm); // restore the imm of LI
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(LiMI->dump());
|
2017-09-20 00:14:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Replace ADD with COPY
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Optimizing ADD to COPY: ");
|
|
|
|
LLVM_DEBUG(MI.dump());
|
2017-09-20 00:14:37 +08:00
|
|
|
BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(PPC::COPY),
|
|
|
|
MI.getOperand(0).getReg())
|
|
|
|
.add(Op1);
|
|
|
|
ToErase = &MI;
|
|
|
|
Simplified = true;
|
|
|
|
NumOptADDLIs++;
|
|
|
|
break;
|
|
|
|
}
|
2019-06-05 10:36:40 +08:00
|
|
|
case PPC::RLDICR: {
|
2019-07-09 10:55:08 +08:00
|
|
|
Simplified |= emitRLDICWhenLoweringJumpTables(MI) ||
|
|
|
|
combineSEXTAndSHL(MI, ToErase);
|
2019-06-05 10:36:40 +08:00
|
|
|
break;
|
|
|
|
}
|
2015-11-11 05:38:26 +08:00
|
|
|
}
|
|
|
|
}
|
2017-09-20 00:14:37 +08:00
|
|
|
|
2015-11-11 05:38:26 +08:00
|
|
|
// If the last instruction was marked for elimination,
|
|
|
|
// remove it now.
|
|
|
|
if (ToErase) {
|
|
|
|
ToErase->eraseFromParent();
|
|
|
|
ToErase = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-28 04:26:36 +08:00
|
|
|
// Eliminate all the TOC save instructions which are redundant.
|
|
|
|
Simplified |= eliminateRedundantTOCSaves(TOCSaves);
|
2019-07-06 02:38:09 +08:00
|
|
|
PPCFunctionInfo *FI = MF->getInfo<PPCFunctionInfo>();
|
|
|
|
if (FI->mustSaveTOC())
|
|
|
|
NumTOCSavesInPrologue++;
|
|
|
|
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
// We try to eliminate redundant compare instruction.
|
2017-12-15 19:47:48 +08:00
|
|
|
Simplified |= eliminateRedundantCompare();
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
|
|
|
|
return Simplified;
|
|
|
|
}
|
|
|
|
|
|
|
|
// helper functions for eliminateRedundantCompare
|
|
|
|
static bool isEqOrNe(MachineInstr *BI) {
|
|
|
|
PPC::Predicate Pred = (PPC::Predicate)BI->getOperand(0).getImm();
|
|
|
|
unsigned PredCond = PPC::getPredicateCondition(Pred);
|
|
|
|
return (PredCond == PPC::PRED_EQ || PredCond == PPC::PRED_NE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool isSupportedCmpOp(unsigned opCode) {
|
|
|
|
return (opCode == PPC::CMPLD || opCode == PPC::CMPD ||
|
|
|
|
opCode == PPC::CMPLW || opCode == PPC::CMPW ||
|
|
|
|
opCode == PPC::CMPLDI || opCode == PPC::CMPDI ||
|
|
|
|
opCode == PPC::CMPLWI || opCode == PPC::CMPWI);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool is64bitCmpOp(unsigned opCode) {
|
|
|
|
return (opCode == PPC::CMPLD || opCode == PPC::CMPD ||
|
|
|
|
opCode == PPC::CMPLDI || opCode == PPC::CMPDI);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool isSignedCmpOp(unsigned opCode) {
|
|
|
|
return (opCode == PPC::CMPD || opCode == PPC::CMPW ||
|
|
|
|
opCode == PPC::CMPDI || opCode == PPC::CMPWI);
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned getSignedCmpOpCode(unsigned opCode) {
|
|
|
|
if (opCode == PPC::CMPLD) return PPC::CMPD;
|
|
|
|
if (opCode == PPC::CMPLW) return PPC::CMPW;
|
|
|
|
if (opCode == PPC::CMPLDI) return PPC::CMPDI;
|
|
|
|
if (opCode == PPC::CMPLWI) return PPC::CMPWI;
|
|
|
|
return opCode;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We can decrement immediate x in (GE x) by changing it to (GT x-1) or
|
|
|
|
// (LT x) to (LE x-1)
|
|
|
|
static unsigned getPredicateToDecImm(MachineInstr *BI, MachineInstr *CMPI) {
|
|
|
|
uint64_t Imm = CMPI->getOperand(2).getImm();
|
|
|
|
bool SignedCmp = isSignedCmpOp(CMPI->getOpcode());
|
|
|
|
if ((!SignedCmp && Imm == 0) || (SignedCmp && Imm == 0x8000))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
PPC::Predicate Pred = (PPC::Predicate)BI->getOperand(0).getImm();
|
|
|
|
unsigned PredCond = PPC::getPredicateCondition(Pred);
|
|
|
|
unsigned PredHint = PPC::getPredicateHint(Pred);
|
|
|
|
if (PredCond == PPC::PRED_GE)
|
|
|
|
return PPC::getPredicate(PPC::PRED_GT, PredHint);
|
|
|
|
if (PredCond == PPC::PRED_LT)
|
|
|
|
return PPC::getPredicate(PPC::PRED_LE, PredHint);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We can increment immediate x in (GT x) by changing it to (GE x+1) or
|
|
|
|
// (LE x) to (LT x+1)
|
|
|
|
static unsigned getPredicateToIncImm(MachineInstr *BI, MachineInstr *CMPI) {
|
|
|
|
uint64_t Imm = CMPI->getOperand(2).getImm();
|
|
|
|
bool SignedCmp = isSignedCmpOp(CMPI->getOpcode());
|
|
|
|
if ((!SignedCmp && Imm == 0xFFFF) || (SignedCmp && Imm == 0x7FFF))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
PPC::Predicate Pred = (PPC::Predicate)BI->getOperand(0).getImm();
|
|
|
|
unsigned PredCond = PPC::getPredicateCondition(Pred);
|
|
|
|
unsigned PredHint = PPC::getPredicateHint(Pred);
|
|
|
|
if (PredCond == PPC::PRED_GT)
|
|
|
|
return PPC::getPredicate(PPC::PRED_GE, PredHint);
|
|
|
|
if (PredCond == PPC::PRED_LE)
|
|
|
|
return PPC::getPredicate(PPC::PRED_LT, PredHint);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-06-13 16:54:13 +08:00
|
|
|
// This takes a Phi node and returns a register value for the specified BB.
|
2017-09-28 16:38:19 +08:00
|
|
|
static unsigned getIncomingRegForBlock(MachineInstr *Phi,
|
|
|
|
MachineBasicBlock *MBB) {
|
|
|
|
for (unsigned I = 2, E = Phi->getNumOperands() + 1; I != E; I += 2) {
|
|
|
|
MachineOperand &MO = Phi->getOperand(I);
|
|
|
|
if (MO.getMBB() == MBB)
|
|
|
|
return Phi->getOperand(I-1).getReg();
|
|
|
|
}
|
|
|
|
llvm_unreachable("invalid src basic block for this Phi node\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// This function tracks the source of the register through register copy.
|
|
|
|
// If BB1 and BB2 are non-NULL, we also track PHI instruction in BB2
|
|
|
|
// assuming that the control comes from BB1 into BB2.
|
|
|
|
static unsigned getSrcVReg(unsigned Reg, MachineBasicBlock *BB1,
|
|
|
|
MachineBasicBlock *BB2, MachineRegisterInfo *MRI) {
|
|
|
|
unsigned SrcReg = Reg;
|
|
|
|
while (1) {
|
|
|
|
unsigned NextReg = SrcReg;
|
|
|
|
MachineInstr *Inst = MRI->getVRegDef(SrcReg);
|
|
|
|
if (BB1 && Inst->getOpcode() == PPC::PHI && Inst->getParent() == BB2) {
|
|
|
|
NextReg = getIncomingRegForBlock(Inst, BB1);
|
|
|
|
// We track through PHI only once to avoid infinite loop.
|
|
|
|
BB1 = nullptr;
|
|
|
|
}
|
|
|
|
else if (Inst->isFullCopy())
|
|
|
|
NextReg = Inst->getOperand(1).getReg();
|
2019-08-02 07:27:28 +08:00
|
|
|
if (NextReg == SrcReg || !Register::isVirtualRegister(NextReg))
|
2017-09-28 16:38:19 +08:00
|
|
|
break;
|
|
|
|
SrcReg = NextReg;
|
|
|
|
}
|
|
|
|
return SrcReg;
|
|
|
|
}
|
|
|
|
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
static bool eligibleForCompareElimination(MachineBasicBlock &MBB,
|
2017-09-28 16:38:19 +08:00
|
|
|
MachineBasicBlock *&PredMBB,
|
|
|
|
MachineBasicBlock *&MBBtoMoveCmp,
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
MachineRegisterInfo *MRI) {
|
|
|
|
|
|
|
|
auto isEligibleBB = [&](MachineBasicBlock &BB) {
|
|
|
|
auto BII = BB.getFirstInstrTerminator();
|
|
|
|
// We optimize BBs ending with a conditional branch.
|
|
|
|
// We check only for BCC here, not BCCLR, because BCCLR
|
2018-07-31 03:41:25 +08:00
|
|
|
// will be formed only later in the pipeline.
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
if (BB.succ_size() == 2 &&
|
|
|
|
BII != BB.instr_end() &&
|
|
|
|
(*BII).getOpcode() == PPC::BCC &&
|
|
|
|
(*BII).getOperand(1).isReg()) {
|
|
|
|
// We optimize only if the condition code is used only by one BCC.
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register CndReg = (*BII).getOperand(1).getReg();
|
2019-08-02 07:27:28 +08:00
|
|
|
if (!Register::isVirtualRegister(CndReg) || !MRI->hasOneNonDBGUse(CndReg))
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
MachineInstr *CMPI = MRI->getVRegDef(CndReg);
|
2017-11-15 12:23:26 +08:00
|
|
|
// We assume compare and branch are in the same BB for ease of analysis.
|
|
|
|
if (CMPI->getParent() != &BB)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// We skip this BB if a physical register is used in comparison.
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
for (MachineOperand &MO : CMPI->operands())
|
2019-08-02 07:27:28 +08:00
|
|
|
if (MO.isReg() && !Register::isVirtualRegister(MO.getReg()))
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
};
|
|
|
|
|
2017-09-28 16:38:19 +08:00
|
|
|
// If this BB has more than one successor, we can create a new BB and
|
|
|
|
// move the compare instruction in the new BB.
|
|
|
|
// So far, we do not move compare instruction to a BB having multiple
|
|
|
|
// successors to avoid potentially increasing code size.
|
|
|
|
auto isEligibleForMoveCmp = [](MachineBasicBlock &BB) {
|
|
|
|
return BB.succ_size() == 1;
|
|
|
|
};
|
|
|
|
|
|
|
|
if (!isEligibleBB(MBB))
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
return false;
|
|
|
|
|
2017-09-28 16:38:19 +08:00
|
|
|
unsigned NumPredBBs = MBB.pred_size();
|
|
|
|
if (NumPredBBs == 1) {
|
|
|
|
MachineBasicBlock *TmpMBB = *MBB.pred_begin();
|
|
|
|
if (isEligibleBB(*TmpMBB)) {
|
|
|
|
PredMBB = TmpMBB;
|
|
|
|
MBBtoMoveCmp = nullptr;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (NumPredBBs == 2) {
|
|
|
|
// We check for partially redundant case.
|
|
|
|
// So far, we support cases with only two predecessors
|
|
|
|
// to avoid increasing the number of instructions.
|
|
|
|
MachineBasicBlock::pred_iterator PI = MBB.pred_begin();
|
|
|
|
MachineBasicBlock *Pred1MBB = *PI;
|
|
|
|
MachineBasicBlock *Pred2MBB = *(PI+1);
|
|
|
|
|
|
|
|
if (isEligibleBB(*Pred1MBB) && isEligibleForMoveCmp(*Pred2MBB)) {
|
|
|
|
// We assume Pred1MBB is the BB containing the compare to be merged and
|
|
|
|
// Pred2MBB is the BB to which we will append a compare instruction.
|
|
|
|
// Hence we can proceed as is.
|
|
|
|
}
|
|
|
|
else if (isEligibleBB(*Pred2MBB) && isEligibleForMoveCmp(*Pred1MBB)) {
|
|
|
|
// We need to swap Pred1MBB and Pred2MBB to canonicalize.
|
|
|
|
std::swap(Pred1MBB, Pred2MBB);
|
|
|
|
}
|
|
|
|
else return false;
|
|
|
|
|
|
|
|
// Here, Pred2MBB is the BB to which we need to append a compare inst.
|
|
|
|
// We cannot move the compare instruction if operands are not available
|
|
|
|
// in Pred2MBB (i.e. defined in MBB by an instruction other than PHI).
|
|
|
|
MachineInstr *BI = &*MBB.getFirstInstrTerminator();
|
|
|
|
MachineInstr *CMPI = MRI->getVRegDef(BI->getOperand(1).getReg());
|
|
|
|
for (int I = 1; I <= 2; I++)
|
|
|
|
if (CMPI->getOperand(I).isReg()) {
|
|
|
|
MachineInstr *Inst = MRI->getVRegDef(CMPI->getOperand(I).getReg());
|
|
|
|
if (Inst->getParent() == &MBB && Inst->getOpcode() != PPC::PHI)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
PredMBB = Pred1MBB;
|
|
|
|
MBBtoMoveCmp = Pred2MBB;
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
return true;
|
2017-09-28 16:38:19 +08:00
|
|
|
}
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-11-28 04:26:36 +08:00
|
|
|
// This function will iterate over the input map containing a pair of TOC save
|
2018-03-13 23:49:05 +08:00
|
|
|
// instruction and a flag. The flag will be set to false if the TOC save is
|
|
|
|
// proven redundant. This function will erase from the basic block all the TOC
|
|
|
|
// saves marked as redundant.
|
2017-11-28 04:26:36 +08:00
|
|
|
bool PPCMIPeephole::eliminateRedundantTOCSaves(
|
|
|
|
std::map<MachineInstr *, bool> &TOCSaves) {
|
|
|
|
bool Simplified = false;
|
|
|
|
int NumKept = 0;
|
|
|
|
for (auto TOCSave : TOCSaves) {
|
|
|
|
if (!TOCSave.second) {
|
|
|
|
TOCSave.first->eraseFromParent();
|
|
|
|
RemoveTOCSave++;
|
|
|
|
Simplified = true;
|
|
|
|
} else {
|
|
|
|
NumKept++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (NumKept > 1)
|
|
|
|
MultiTOCSaves++;
|
|
|
|
|
|
|
|
return Simplified;
|
|
|
|
}
|
|
|
|
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
// If multiple conditional branches are executed based on the (essentially)
|
|
|
|
// same comparison, we merge compare instructions into one and make multiple
|
|
|
|
// conditional branches on this comparison.
|
|
|
|
// For example,
|
|
|
|
// if (a == 0) { ... }
|
|
|
|
// else if (a < 0) { ... }
|
|
|
|
// can be executed by one compare and two conditional branches instead of
|
|
|
|
// two pairs of a compare and a conditional branch.
|
|
|
|
//
|
|
|
|
// This method merges two compare instructions in two MBBs and modifies the
|
|
|
|
// compare and conditional branch instructions if needed.
|
|
|
|
// For the above example, the input for this pass looks like:
|
|
|
|
// cmplwi r3, 0
|
|
|
|
// beq 0, .LBB0_3
|
|
|
|
// cmpwi r3, -1
|
|
|
|
// bgt 0, .LBB0_4
|
|
|
|
// So, before merging two compares, we need to modify these instructions as
|
|
|
|
// cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
|
|
|
|
// beq 0, .LBB0_3
|
|
|
|
// cmpwi r3, 0 ; greather than -1 means greater or equal to 0
|
|
|
|
// bge 0, .LBB0_4
|
|
|
|
|
|
|
|
bool PPCMIPeephole::eliminateRedundantCompare(void) {
|
|
|
|
bool Simplified = false;
|
|
|
|
|
|
|
|
for (MachineBasicBlock &MBB2 : *MF) {
|
2017-09-28 16:38:19 +08:00
|
|
|
MachineBasicBlock *MBB1 = nullptr, *MBBtoMoveCmp = nullptr;
|
|
|
|
|
|
|
|
// For fully redundant case, we select two basic blocks MBB1 and MBB2
|
|
|
|
// as an optimization target if
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
// - both MBBs end with a conditional branch,
|
|
|
|
// - MBB1 is the only predecessor of MBB2, and
|
|
|
|
// - compare does not take a physical register as a operand in both MBBs.
|
2017-09-28 16:38:19 +08:00
|
|
|
// In this case, eligibleForCompareElimination sets MBBtoMoveCmp nullptr.
|
|
|
|
//
|
|
|
|
// As partially redundant case, we additionally handle if MBB2 has one
|
|
|
|
// additional predecessor, which has only one successor (MBB2).
|
2017-11-15 12:23:26 +08:00
|
|
|
// In this case, we move the compare instruction originally in MBB2 into
|
2017-09-28 16:38:19 +08:00
|
|
|
// MBBtoMoveCmp. This partially redundant case is typically appear by
|
|
|
|
// compiling a while loop; here, MBBtoMoveCmp is the loop preheader.
|
|
|
|
//
|
|
|
|
// Overview of CFG of related basic blocks
|
|
|
|
// Fully redundant case Partially redundant case
|
|
|
|
// -------- ---------------- --------
|
|
|
|
// | MBB1 | (w/ 2 succ) | MBBtoMoveCmp | | MBB1 | (w/ 2 succ)
|
|
|
|
// -------- ---------------- --------
|
|
|
|
// | \ (w/ 1 succ) \ | \
|
|
|
|
// | \ \ | \
|
|
|
|
// | \ |
|
|
|
|
// -------- --------
|
|
|
|
// | MBB2 | (w/ 1 pred | MBB2 | (w/ 2 pred
|
|
|
|
// -------- and 2 succ) -------- and 2 succ)
|
|
|
|
// | \ | \
|
|
|
|
// | \ | \
|
|
|
|
//
|
|
|
|
if (!eligibleForCompareElimination(MBB2, MBB1, MBBtoMoveCmp, MRI))
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
MachineInstr *BI1 = &*MBB1->getFirstInstrTerminator();
|
|
|
|
MachineInstr *CMPI1 = MRI->getVRegDef(BI1->getOperand(1).getReg());
|
|
|
|
|
|
|
|
MachineInstr *BI2 = &*MBB2.getFirstInstrTerminator();
|
|
|
|
MachineInstr *CMPI2 = MRI->getVRegDef(BI2->getOperand(1).getReg());
|
2017-09-28 16:38:19 +08:00
|
|
|
bool IsPartiallyRedundant = (MBBtoMoveCmp != nullptr);
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
|
|
|
|
// We cannot optimize an unsupported compare opcode or
|
|
|
|
// a mix of 32-bit and 64-bit comaprisons
|
|
|
|
if (!isSupportedCmpOp(CMPI1->getOpcode()) ||
|
|
|
|
!isSupportedCmpOp(CMPI2->getOpcode()) ||
|
|
|
|
is64bitCmpOp(CMPI1->getOpcode()) != is64bitCmpOp(CMPI2->getOpcode()))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
unsigned NewOpCode = 0;
|
|
|
|
unsigned NewPredicate1 = 0, NewPredicate2 = 0;
|
|
|
|
int16_t Imm1 = 0, NewImm1 = 0, Imm2 = 0, NewImm2 = 0;
|
2017-09-28 16:38:19 +08:00
|
|
|
bool SwapOperands = false;
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
|
|
|
|
if (CMPI1->getOpcode() != CMPI2->getOpcode()) {
|
|
|
|
// Typically, unsigned comparison is used for equality check, but
|
|
|
|
// we replace it with a signed comparison if the comparison
|
|
|
|
// to be merged is a signed comparison.
|
|
|
|
// In other cases of opcode mismatch, we cannot optimize this.
|
2017-12-20 13:18:19 +08:00
|
|
|
|
|
|
|
// We cannot change opcode when comparing against an immediate
|
|
|
|
// if the most significant bit of the immediate is one
|
|
|
|
// due to the difference in sign extension.
|
|
|
|
auto CmpAgainstImmWithSignBit = [](MachineInstr *I) {
|
|
|
|
if (!I->getOperand(2).isImm())
|
|
|
|
return false;
|
|
|
|
int16_t Imm = (int16_t)I->getOperand(2).getImm();
|
|
|
|
return Imm < 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
if (isEqOrNe(BI2) && !CmpAgainstImmWithSignBit(CMPI2) &&
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
CMPI1->getOpcode() == getSignedCmpOpCode(CMPI2->getOpcode()))
|
|
|
|
NewOpCode = CMPI1->getOpcode();
|
2017-12-20 13:18:19 +08:00
|
|
|
else if (isEqOrNe(BI1) && !CmpAgainstImmWithSignBit(CMPI1) &&
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
getSignedCmpOpCode(CMPI1->getOpcode()) == CMPI2->getOpcode())
|
|
|
|
NewOpCode = CMPI2->getOpcode();
|
|
|
|
else continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (CMPI1->getOperand(2).isReg() && CMPI2->getOperand(2).isReg()) {
|
|
|
|
// In case of comparisons between two registers, these two registers
|
|
|
|
// must be same to merge two comparisons.
|
2017-09-28 16:38:19 +08:00
|
|
|
unsigned Cmp1Operand1 = getSrcVReg(CMPI1->getOperand(1).getReg(),
|
|
|
|
nullptr, nullptr, MRI);
|
|
|
|
unsigned Cmp1Operand2 = getSrcVReg(CMPI1->getOperand(2).getReg(),
|
|
|
|
nullptr, nullptr, MRI);
|
|
|
|
unsigned Cmp2Operand1 = getSrcVReg(CMPI2->getOperand(1).getReg(),
|
|
|
|
MBB1, &MBB2, MRI);
|
|
|
|
unsigned Cmp2Operand2 = getSrcVReg(CMPI2->getOperand(2).getReg(),
|
|
|
|
MBB1, &MBB2, MRI);
|
|
|
|
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
if (Cmp1Operand1 == Cmp2Operand1 && Cmp1Operand2 == Cmp2Operand2) {
|
|
|
|
// Same pair of registers in the same order; ready to merge as is.
|
|
|
|
}
|
|
|
|
else if (Cmp1Operand1 == Cmp2Operand2 && Cmp1Operand2 == Cmp2Operand1) {
|
|
|
|
// Same pair of registers in different order.
|
|
|
|
// We reverse the predicate to merge compare instructions.
|
|
|
|
PPC::Predicate Pred = (PPC::Predicate)BI2->getOperand(0).getImm();
|
|
|
|
NewPredicate2 = (unsigned)PPC::getSwappedPredicate(Pred);
|
2017-09-28 16:38:19 +08:00
|
|
|
// In case of partial redundancy, we need to swap operands
|
|
|
|
// in another compare instruction.
|
|
|
|
SwapOperands = true;
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
}
|
|
|
|
else continue;
|
|
|
|
}
|
2017-10-03 15:28:58 +08:00
|
|
|
else if (CMPI1->getOperand(2).isImm() && CMPI2->getOperand(2).isImm()) {
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
// In case of comparisons between a register and an immediate,
|
|
|
|
// the operand register must be same for two compare instructions.
|
2017-09-28 16:38:19 +08:00
|
|
|
unsigned Cmp1Operand1 = getSrcVReg(CMPI1->getOperand(1).getReg(),
|
|
|
|
nullptr, nullptr, MRI);
|
|
|
|
unsigned Cmp2Operand1 = getSrcVReg(CMPI2->getOperand(1).getReg(),
|
|
|
|
MBB1, &MBB2, MRI);
|
|
|
|
if (Cmp1Operand1 != Cmp2Operand1)
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
NewImm1 = Imm1 = (int16_t)CMPI1->getOperand(2).getImm();
|
|
|
|
NewImm2 = Imm2 = (int16_t)CMPI2->getOperand(2).getImm();
|
|
|
|
|
|
|
|
// If immediate are not same, we try to adjust by changing predicate;
|
|
|
|
// e.g. GT imm means GE (imm+1).
|
|
|
|
if (Imm1 != Imm2 && (!isEqOrNe(BI2) || !isEqOrNe(BI1))) {
|
|
|
|
int Diff = Imm1 - Imm2;
|
|
|
|
if (Diff < -2 || Diff > 2)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
unsigned PredToInc1 = getPredicateToIncImm(BI1, CMPI1);
|
|
|
|
unsigned PredToDec1 = getPredicateToDecImm(BI1, CMPI1);
|
|
|
|
unsigned PredToInc2 = getPredicateToIncImm(BI2, CMPI2);
|
|
|
|
unsigned PredToDec2 = getPredicateToDecImm(BI2, CMPI2);
|
|
|
|
if (Diff == 2) {
|
|
|
|
if (PredToInc2 && PredToDec1) {
|
|
|
|
NewPredicate2 = PredToInc2;
|
|
|
|
NewPredicate1 = PredToDec1;
|
|
|
|
NewImm2++;
|
|
|
|
NewImm1--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (Diff == 1) {
|
|
|
|
if (PredToInc2) {
|
|
|
|
NewImm2++;
|
|
|
|
NewPredicate2 = PredToInc2;
|
|
|
|
}
|
|
|
|
else if (PredToDec1) {
|
|
|
|
NewImm1--;
|
|
|
|
NewPredicate1 = PredToDec1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (Diff == -1) {
|
|
|
|
if (PredToDec2) {
|
|
|
|
NewImm2--;
|
|
|
|
NewPredicate2 = PredToDec2;
|
|
|
|
}
|
|
|
|
else if (PredToInc1) {
|
|
|
|
NewImm1++;
|
|
|
|
NewPredicate1 = PredToInc1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (Diff == -2) {
|
|
|
|
if (PredToDec2 && PredToInc1) {
|
|
|
|
NewPredicate2 = PredToDec2;
|
|
|
|
NewPredicate1 = PredToInc1;
|
|
|
|
NewImm2--;
|
|
|
|
NewImm1++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-13 16:54:13 +08:00
|
|
|
// We cannot merge two compares if the immediates are not same.
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
if (NewImm2 != NewImm1)
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Optimize two pairs of compare and branch:\n");
|
|
|
|
LLVM_DEBUG(CMPI1->dump());
|
|
|
|
LLVM_DEBUG(BI1->dump());
|
|
|
|
LLVM_DEBUG(CMPI2->dump());
|
|
|
|
LLVM_DEBUG(BI2->dump());
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
|
|
|
|
// We adjust opcode, predicates and immediate as we determined above.
|
|
|
|
if (NewOpCode != 0 && NewOpCode != CMPI1->getOpcode()) {
|
|
|
|
CMPI1->setDesc(TII->get(NewOpCode));
|
|
|
|
}
|
|
|
|
if (NewPredicate1) {
|
|
|
|
BI1->getOperand(0).setImm(NewPredicate1);
|
|
|
|
}
|
|
|
|
if (NewPredicate2) {
|
|
|
|
BI2->getOperand(0).setImm(NewPredicate2);
|
|
|
|
}
|
|
|
|
if (NewImm1 != Imm1) {
|
|
|
|
CMPI1->getOperand(2).setImm(NewImm1);
|
|
|
|
}
|
|
|
|
|
2017-09-28 16:38:19 +08:00
|
|
|
if (IsPartiallyRedundant) {
|
|
|
|
// We touch up the compare instruction in MBB2 and move it to
|
|
|
|
// a previous BB to handle partially redundant case.
|
|
|
|
if (SwapOperands) {
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register Op1 = CMPI2->getOperand(1).getReg();
|
|
|
|
Register Op2 = CMPI2->getOperand(2).getReg();
|
2017-09-28 16:38:19 +08:00
|
|
|
CMPI2->getOperand(1).setReg(Op2);
|
|
|
|
CMPI2->getOperand(2).setReg(Op1);
|
|
|
|
}
|
|
|
|
if (NewImm2 != Imm2)
|
|
|
|
CMPI2->getOperand(2).setImm(NewImm2);
|
|
|
|
|
|
|
|
for (int I = 1; I <= 2; I++) {
|
|
|
|
if (CMPI2->getOperand(I).isReg()) {
|
|
|
|
MachineInstr *Inst = MRI->getVRegDef(CMPI2->getOperand(I).getReg());
|
|
|
|
if (Inst->getParent() != &MBB2)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
assert(Inst->getOpcode() == PPC::PHI &&
|
|
|
|
"We cannot support if an operand comes from this BB.");
|
|
|
|
unsigned SrcReg = getIncomingRegForBlock(Inst, MBBtoMoveCmp);
|
|
|
|
CMPI2->getOperand(I).setReg(SrcReg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
auto I = MachineBasicBlock::iterator(MBBtoMoveCmp->getFirstTerminator());
|
|
|
|
MBBtoMoveCmp->splice(I, &MBB2, MachineBasicBlock::iterator(CMPI2));
|
|
|
|
|
|
|
|
DebugLoc DL = CMPI2->getDebugLoc();
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register NewVReg = MRI->createVirtualRegister(&PPC::CRRCRegClass);
|
2017-09-28 16:38:19 +08:00
|
|
|
BuildMI(MBB2, MBB2.begin(), DL,
|
|
|
|
TII->get(PPC::PHI), NewVReg)
|
|
|
|
.addReg(BI1->getOperand(1).getReg()).addMBB(MBB1)
|
|
|
|
.addReg(BI2->getOperand(1).getReg()).addMBB(MBBtoMoveCmp);
|
|
|
|
BI2->getOperand(1).setReg(NewVReg);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
// We finally eliminate compare instruction in MBB2.
|
|
|
|
BI2->getOperand(1).setReg(BI1->getOperand(1).getReg());
|
|
|
|
CMPI2->eraseFromParent();
|
|
|
|
}
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
BI2->getOperand(1).setIsKill(true);
|
|
|
|
BI1->getOperand(1).setIsKill(false);
|
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "into a compare and two branches:\n");
|
|
|
|
LLVM_DEBUG(CMPI1->dump());
|
|
|
|
LLVM_DEBUG(BI1->dump());
|
|
|
|
LLVM_DEBUG(BI2->dump());
|
2017-09-28 16:38:19 +08:00
|
|
|
if (IsPartiallyRedundant) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "The following compare is moved into "
|
|
|
|
<< printMBBReference(*MBBtoMoveCmp)
|
|
|
|
<< " to handle partial redundancy.\n");
|
|
|
|
LLVM_DEBUG(CMPI2->dump());
|
2017-09-28 16:38:19 +08:00
|
|
|
}
|
[PowerPC] eliminate redundant compare instruction
If multiple conditional branches are executed based on the same comparison, we can execute multiple conditional branches based on the result of one comparison on PPC. For example,
if (a == 0) { ... }
else if (a < 0) { ... }
can be executed by one compare and two conditional branches instead of two pairs of a compare and a conditional branch.
This patch identifies a code sequence of the two pairs of a compare and a conditional branch and merge the compares if possible.
To maximize the opportunity, we do canonicalization of code sequence before merging compares.
For the above example, the input for this pass looks like:
cmplwi r3, 0
beq 0, .LBB0_3
cmpwi r3, -1
bgt 0, .LBB0_4
So, before merging two compares, we canonicalize it as
cmpwi r3, 0 ; cmplwi and cmpwi yield same result for beq
beq 0, .LBB0_3
cmpwi r3, 0 ; greather than -1 means greater or equal to 0
bge 0, .LBB0_4
The generated code should be
cmpwi r3, 0
beq 0, .LBB0_3
bge 0, .LBB0_4
Differential Revision: https://reviews.llvm.org/D37211
llvm-svn: 312514
2017-09-05 12:15:17 +08:00
|
|
|
|
|
|
|
Simplified = true;
|
|
|
|
}
|
|
|
|
|
2015-11-11 05:38:26 +08:00
|
|
|
return Simplified;
|
|
|
|
}
|
|
|
|
|
2019-06-26 09:34:37 +08:00
|
|
|
// We miss the opportunity to emit an RLDIC when lowering jump tables
|
|
|
|
// since ISEL sees only a single basic block. When selecting, the clear
|
|
|
|
// and shift left will be in different blocks.
|
2019-06-26 13:25:16 +08:00
|
|
|
bool PPCMIPeephole::emitRLDICWhenLoweringJumpTables(MachineInstr &MI) {
|
2019-06-26 09:34:37 +08:00
|
|
|
if (MI.getOpcode() != PPC::RLDICR)
|
2019-06-26 13:25:16 +08:00
|
|
|
return false;
|
2019-06-26 09:34:37 +08:00
|
|
|
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register SrcReg = MI.getOperand(1).getReg();
|
2019-08-02 07:27:28 +08:00
|
|
|
if (!Register::isVirtualRegister(SrcReg))
|
2019-06-26 13:25:16 +08:00
|
|
|
return false;
|
2019-06-26 09:34:37 +08:00
|
|
|
|
|
|
|
MachineInstr *SrcMI = MRI->getVRegDef(SrcReg);
|
|
|
|
if (SrcMI->getOpcode() != PPC::RLDICL)
|
2019-06-26 13:25:16 +08:00
|
|
|
return false;
|
2019-06-26 09:34:37 +08:00
|
|
|
|
|
|
|
MachineOperand MOpSHSrc = SrcMI->getOperand(2);
|
|
|
|
MachineOperand MOpMBSrc = SrcMI->getOperand(3);
|
|
|
|
MachineOperand MOpSHMI = MI.getOperand(2);
|
|
|
|
MachineOperand MOpMEMI = MI.getOperand(3);
|
|
|
|
if (!(MOpSHSrc.isImm() && MOpMBSrc.isImm() && MOpSHMI.isImm() &&
|
|
|
|
MOpMEMI.isImm()))
|
2019-06-26 13:25:16 +08:00
|
|
|
return false;
|
2019-06-26 09:34:37 +08:00
|
|
|
|
|
|
|
uint64_t SHSrc = MOpSHSrc.getImm();
|
|
|
|
uint64_t MBSrc = MOpMBSrc.getImm();
|
|
|
|
uint64_t SHMI = MOpSHMI.getImm();
|
|
|
|
uint64_t MEMI = MOpMEMI.getImm();
|
|
|
|
uint64_t NewSH = SHSrc + SHMI;
|
|
|
|
uint64_t NewMB = MBSrc - SHMI;
|
|
|
|
if (NewMB > 63 || NewSH > 63)
|
2019-06-26 13:25:16 +08:00
|
|
|
return false;
|
2019-06-26 09:34:37 +08:00
|
|
|
|
|
|
|
// The bits cleared with RLDICL are [0, MBSrc).
|
|
|
|
// The bits cleared with RLDICR are (MEMI, 63].
|
|
|
|
// After the sequence, the bits cleared are:
|
|
|
|
// [0, MBSrc-SHMI) and (MEMI, 63).
|
|
|
|
//
|
|
|
|
// The bits cleared with RLDIC are [0, NewMB) and (63-NewSH, 63].
|
|
|
|
if ((63 - NewSH) != MEMI)
|
2019-06-26 13:25:16 +08:00
|
|
|
return false;
|
2019-06-26 09:34:37 +08:00
|
|
|
|
|
|
|
LLVM_DEBUG(dbgs() << "Converting pair: ");
|
|
|
|
LLVM_DEBUG(SrcMI->dump());
|
|
|
|
LLVM_DEBUG(MI.dump());
|
|
|
|
|
|
|
|
MI.setDesc(TII->get(PPC::RLDIC));
|
|
|
|
MI.getOperand(1).setReg(SrcMI->getOperand(1).getReg());
|
|
|
|
MI.getOperand(2).setImm(NewSH);
|
|
|
|
MI.getOperand(3).setImm(NewMB);
|
|
|
|
|
|
|
|
LLVM_DEBUG(dbgs() << "To: ");
|
|
|
|
LLVM_DEBUG(MI.dump());
|
|
|
|
NumRotatesCollapsed++;
|
2019-06-26 13:25:16 +08:00
|
|
|
return true;
|
2019-06-26 09:34:37 +08:00
|
|
|
}
|
|
|
|
|
2019-07-09 10:55:08 +08:00
|
|
|
// For case in LLVM IR
|
|
|
|
// entry:
|
|
|
|
// %iconv = sext i32 %index to i64
|
|
|
|
// br i1 undef label %true, label %false
|
|
|
|
// true:
|
|
|
|
// %ptr = getelementptr inbounds i32, i32* null, i64 %iconv
|
|
|
|
// ...
|
|
|
|
// PPCISelLowering::combineSHL fails to combine, because sext and shl are in
|
|
|
|
// different BBs when conducting instruction selection. We can do a peephole
|
|
|
|
// optimization to combine these two instructions into extswsli after
|
|
|
|
// instruction selection.
|
|
|
|
bool PPCMIPeephole::combineSEXTAndSHL(MachineInstr &MI,
|
|
|
|
MachineInstr *&ToErase) {
|
|
|
|
if (MI.getOpcode() != PPC::RLDICR)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!MF->getSubtarget<PPCSubtarget>().isISA3_0())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
assert(MI.getNumOperands() == 4 && "RLDICR should have 4 operands");
|
|
|
|
|
|
|
|
MachineOperand MOpSHMI = MI.getOperand(2);
|
|
|
|
MachineOperand MOpMEMI = MI.getOperand(3);
|
|
|
|
if (!(MOpSHMI.isImm() && MOpMEMI.isImm()))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
uint64_t SHMI = MOpSHMI.getImm();
|
|
|
|
uint64_t MEMI = MOpMEMI.getImm();
|
|
|
|
if (SHMI + MEMI != 63)
|
|
|
|
return false;
|
|
|
|
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register SrcReg = MI.getOperand(1).getReg();
|
2019-08-02 07:27:28 +08:00
|
|
|
if (!Register::isVirtualRegister(SrcReg))
|
2019-07-09 10:55:08 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
MachineInstr *SrcMI = MRI->getVRegDef(SrcReg);
|
|
|
|
if (SrcMI->getOpcode() != PPC::EXTSW &&
|
|
|
|
SrcMI->getOpcode() != PPC::EXTSW_32_64)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If the register defined by extsw has more than one use, combination is not
|
|
|
|
// needed.
|
|
|
|
if (!MRI->hasOneNonDBGUse(SrcReg))
|
|
|
|
return false;
|
|
|
|
|
2019-08-02 11:14:17 +08:00
|
|
|
assert(SrcMI->getNumOperands() == 2 && "EXTSW should have 2 operands");
|
|
|
|
assert(SrcMI->getOperand(1).isReg() &&
|
|
|
|
"EXTSW's second operand should be a register");
|
|
|
|
if (!Register::isVirtualRegister(SrcMI->getOperand(1).getReg()))
|
|
|
|
return false;
|
|
|
|
|
2019-07-09 10:55:08 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Combining pair: ");
|
|
|
|
LLVM_DEBUG(SrcMI->dump());
|
|
|
|
LLVM_DEBUG(MI.dump());
|
|
|
|
|
|
|
|
MachineInstr *NewInstr =
|
|
|
|
BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(),
|
|
|
|
SrcMI->getOpcode() == PPC::EXTSW ? TII->get(PPC::EXTSWSLI)
|
|
|
|
: TII->get(PPC::EXTSWSLI_32_64),
|
|
|
|
MI.getOperand(0).getReg())
|
|
|
|
.add(SrcMI->getOperand(1))
|
|
|
|
.add(MOpSHMI);
|
2019-07-09 11:33:04 +08:00
|
|
|
(void)NewInstr;
|
2019-07-09 10:55:08 +08:00
|
|
|
|
|
|
|
LLVM_DEBUG(dbgs() << "TO: ");
|
|
|
|
LLVM_DEBUG(NewInstr->dump());
|
|
|
|
++NumEXTSWAndSLDICombined;
|
|
|
|
ToErase = &MI;
|
|
|
|
// SrcMI, which is extsw, is of no use now, erase it.
|
|
|
|
SrcMI->eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-11-11 05:38:26 +08:00
|
|
|
} // end default namespace
|
|
|
|
|
|
|
|
INITIALIZE_PASS_BEGIN(PPCMIPeephole, DEBUG_TYPE,
|
|
|
|
"PowerPC MI Peephole Optimization", false, false)
|
2019-07-06 02:38:09 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(MachineBlockFrequencyInfo)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree)
|
2015-11-11 05:38:26 +08:00
|
|
|
INITIALIZE_PASS_END(PPCMIPeephole, DEBUG_TYPE,
|
|
|
|
"PowerPC MI Peephole Optimization", false, false)
|
|
|
|
|
|
|
|
char PPCMIPeephole::ID = 0;
|
|
|
|
FunctionPass*
|
|
|
|
llvm::createPPCMIPeepholePass() { return new PPCMIPeephole(); }
|
|
|
|
|