2012-02-18 20:03:15 +08:00
|
|
|
//===-- PPCInstrInfo.cpp - PowerPC Instruction Information ----------------===//
|
2005-04-22 07:30:14 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2005-04-22 07:30:14 +08:00
|
|
|
//
|
2004-08-17 12:55:41 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file contains the PowerPC implementation of the TargetInstrInfo class.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2005-10-15 07:59:06 +08:00
|
|
|
#include "PPCInstrInfo.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "MCTargetDesc/PPCPredicates.h"
|
2011-07-11 11:57:24 +08:00
|
|
|
#include "PPC.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "PPCHazardRecognizers.h"
|
2008-01-02 05:11:32 +08:00
|
|
|
#include "PPCInstrBuilder.h"
|
2008-03-04 06:19:16 +08:00
|
|
|
#include "PPCMachineFunctionInfo.h"
|
2006-06-17 08:01:04 +08:00
|
|
|
#include "PPCTargetMachine.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2014-01-07 19:48:04 +08:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2017-12-13 10:51:04 +08:00
|
|
|
#include "llvm/CodeGen/LiveIntervals.h"
|
2010-07-17 02:22:00 +08:00
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
2013-04-09 00:24:03 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
2004-08-17 12:55:41 +08:00
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2010-07-17 02:22:00 +08:00
|
|
|
#include "llvm/CodeGen/MachineMemOperand.h"
|
2010-02-27 05:09:24 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2012-04-02 03:22:40 +08:00
|
|
|
#include "llvm/CodeGen/PseudoSourceValue.h"
|
2014-06-13 05:48:52 +08:00
|
|
|
#include "llvm/CodeGen/ScheduleDAG.h"
|
[PowerPC] Select between VSX A-type and M-type FMA instructions just before RA
The VSX instruction set has two types of FMA instructions: A-type (where the
addend is taken from the output register) and M-type (where one of the product
operands is taken from the output register). This adds a small pass that runs
just after MI scheduling (and, thus, just before register allocation) that
mutates A-type instructions (that are created during isel) into M-type
instructions when:
1. This will eliminate an otherwise-necessary copy of the addend
2. One of the product operands is killed by the instruction
The "right" moment to make this decision is in between scheduling and register
allocation, because only there do we know whether or not one of the product
operands is killed by any particular instruction. Unfortunately, this also
makes the implementation somewhat complicated, because the MIs are not in SSA
form and we need to preserve the LiveIntervals analysis.
As a simple example, if we have:
%vreg5<def> = COPY %vreg9; VSLRC:%vreg5,%vreg9
%vreg5<def,tied1> = XSMADDADP %vreg5<tied0>, %vreg17, %vreg16,
%RM<imp-use>; VSLRC:%vreg5,%vreg17,%vreg16
...
%vreg9<def,tied1> = XSMADDADP %vreg9<tied0>, %vreg17, %vreg19,
%RM<imp-use>; VSLRC:%vreg9,%vreg17,%vreg19
...
We can eliminate the copy by changing from the A-type to the
M-type instruction. This means:
%vreg5<def,tied1> = XSMADDADP %vreg5<tied0>, %vreg17, %vreg16,
%RM<imp-use>; VSLRC:%vreg5,%vreg17,%vreg16
is replaced by:
%vreg16<def,tied1> = XSMADDMDP %vreg16<tied0>, %vreg18, %vreg9,
%RM<imp-use>; VSLRC:%vreg16,%vreg18,%vreg9
and we remove: %vreg5<def> = COPY %vreg9; VSLRC:%vreg5,%vreg9
llvm-svn: 204768
2014-03-26 07:29:21 +08:00
|
|
|
#include "llvm/CodeGen/SlotIndexes.h"
|
Revert "r225811 - Revert "r225808 - [PowerPC] Add StackMap/PatchPoint support""
This re-applies r225808, fixed to avoid problems with SDAG dependencies along
with the preceding fix to ScheduleDAGSDNodes::RegDefIter::InitNodeNumDefs.
These problems caused the original regression tests to assert/segfault on many
(but not all) systems.
Original commit message:
This commit does two things:
1. Refactors PPCFastISel to use more of the common infrastructure for call
lowering (this lets us take advantage of this common code for lowering some
common intrinsics, stackmap/patchpoint among them).
2. Adds support for stackmap/patchpoint lowering. For the most part, this is
very similar to the support in the AArch64 target, with the obvious differences
(different registers, NOP instructions, etc.). The test cases are adapted
from the AArch64 test cases.
One difference of note is that the patchpoint call sequence takes 24 bytes, so
you can't use less than that (on AArch64 you can go down to 16). Also, as noted
in the docs, we take the patchpoint address to be the actual code address
(assuming the call is local in the TOC-sharing sense), which should yield
higher performance than generating the full cross-DSO indirect-call sequence
and is likely just as useful for JITed code (if not, we'll change it).
StackMaps and Patchpoints are still marked as experimental, and so this support
is doubly experimental. So go ahead and experiment!
llvm-svn: 225909
2015-01-14 09:07:51 +08:00
|
|
|
#include "llvm/CodeGen/StackMaps.h"
|
2011-07-11 11:57:24 +08:00
|
|
|
#include "llvm/MC/MCAsmInfo.h"
|
2015-05-16 05:58:42 +08:00
|
|
|
#include "llvm/MC/MCInst.h"
|
2008-03-05 07:13:51 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
[PowerPC] Select between VSX A-type and M-type FMA instructions just before RA
The VSX instruction set has two types of FMA instructions: A-type (where the
addend is taken from the output register) and M-type (where one of the product
operands is taken from the output register). This adds a small pass that runs
just after MI scheduling (and, thus, just before register allocation) that
mutates A-type instructions (that are created during isel) into M-type
instructions when:
1. This will eliminate an otherwise-necessary copy of the addend
2. One of the product operands is killed by the instruction
The "right" moment to make this decision is in between scheduling and register
allocation, because only there do we know whether or not one of the product
operands is killed by any particular instruction. Unfortunately, this also
makes the implementation somewhat complicated, because the MIs are not in SSA
form and we need to preserve the LiveIntervals analysis.
As a simple example, if we have:
%vreg5<def> = COPY %vreg9; VSLRC:%vreg5,%vreg9
%vreg5<def,tied1> = XSMADDADP %vreg5<tied0>, %vreg17, %vreg16,
%RM<imp-use>; VSLRC:%vreg5,%vreg17,%vreg16
...
%vreg9<def,tied1> = XSMADDADP %vreg9<tied0>, %vreg17, %vreg19,
%RM<imp-use>; VSLRC:%vreg9,%vreg17,%vreg19
...
We can eliminate the copy by changing from the A-type to the
M-type instruction. This means:
%vreg5<def,tied1> = XSMADDADP %vreg5<tied0>, %vreg17, %vreg16,
%RM<imp-use>; VSLRC:%vreg5,%vreg17,%vreg16
is replaced by:
%vreg16<def,tied1> = XSMADDMDP %vreg16<tied0>, %vreg18, %vreg9,
%RM<imp-use>; VSLRC:%vreg16,%vreg18,%vreg9
and we remove: %vreg5<def> = COPY %vreg9; VSLRC:%vreg5,%vreg9
llvm-svn: 204768
2014-03-26 07:29:21 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2009-07-09 04:53:28 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2011-08-25 02:08:43 +08:00
|
|
|
#include "llvm/Support/TargetRegistry.h"
|
2009-07-09 04:53:28 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2004-08-17 12:55:41 +08:00
|
|
|
|
2010-04-16 01:20:57 +08:00
|
|
|
using namespace llvm;
|
2008-03-05 07:13:51 +08:00
|
|
|
|
2014-04-22 06:55:11 +08:00
|
|
|
#define DEBUG_TYPE "ppc-instr-info"
|
|
|
|
|
2014-04-22 10:03:14 +08:00
|
|
|
#define GET_INSTRMAP_INFO
|
|
|
|
#define GET_INSTRINFO_CTOR_DTOR
|
|
|
|
#include "PPCGenInstrInfo.inc"
|
|
|
|
|
2017-09-22 00:12:33 +08:00
|
|
|
STATISTIC(NumStoreSPILLVSRRCAsVec,
|
|
|
|
"Number of spillvsrrc spilled to stack as vec");
|
|
|
|
STATISTIC(NumStoreSPILLVSRRCAsGpr,
|
|
|
|
"Number of spillvsrrc spilled to stack as gpr");
|
|
|
|
STATISTIC(NumGPRtoVSRSpill, "Number of gpr spills to spillvsrrc");
|
2017-12-15 15:27:53 +08:00
|
|
|
STATISTIC(CmpIselsConverted,
|
|
|
|
"Number of ISELs that depend on comparison of constants converted");
|
|
|
|
STATISTIC(MissedConvertibleImmediateInstrs,
|
|
|
|
"Number of compare-immediate instructions fed by constants");
|
2018-03-06 03:27:16 +08:00
|
|
|
STATISTIC(NumRcRotatesConvertedToRcAnd,
|
|
|
|
"Number of record-form rotates converted to record-form andi");
|
2017-09-22 00:12:33 +08:00
|
|
|
|
2012-06-08 23:38:25 +08:00
|
|
|
static cl::
|
2012-06-09 03:19:53 +08:00
|
|
|
opt<bool> DisableCTRLoopAnal("disable-ppc-ctrloop-analysis", cl::Hidden,
|
|
|
|
cl::desc("Disable analysis for CTR loops"));
|
2012-06-08 23:38:25 +08:00
|
|
|
|
2013-04-20 06:08:38 +08:00
|
|
|
static cl::opt<bool> DisableCmpOpt("disable-ppc-cmp-opt",
|
2013-04-19 06:54:25 +08:00
|
|
|
cl::desc("Disable compare instruction optimization"), cl::Hidden);
|
|
|
|
|
2014-03-28 06:46:28 +08:00
|
|
|
static cl::opt<bool> VSXSelfCopyCrash("crash-on-ppc-vsx-self-copy",
|
|
|
|
cl::desc("Causes the backend to crash instead of generating a nop VSX copy"),
|
|
|
|
cl::Hidden);
|
|
|
|
|
[PowerPC] Fix the PPCInstrInfo::getInstrLatency implementation
PowerPC uses itineraries to describe processor pipelines (and dispatch-group
restrictions for P7/P8 cores). Unfortunately, the target-independent
implementation of TII.getInstrLatency calls ItinData->getStageLatency, and that
looks for the largest cycle count in the pipeline for any given instruction.
This, however, yields the wrong answer for the PPC itineraries, because we
don't encode the full pipeline. Because the functional units are fully
pipelined, we only model the initial stages (there are no relevant hazards in
the later stages to model), and so the technique employed by getStageLatency
does not really work. Instead, we should take the maximum output operand
latency, and that's what PPCInstrInfo::getInstrLatency now does.
This caused some test-case churn, including two unfortunate side effects.
First, the new arrangement of copies we get from function parameters now
sometimes blocks VSX FMA mutation (a FIXME has been added to the code and the
test cases), and we have one significant test-suite regression:
SingleSource/Benchmarks/BenchmarkGame/spectral-norm
56.4185% +/- 18.9398%
In this benchmark we have a loop with a vectorized FP divide, and it with the
new scheduling both divides end up in the same dispatch group (which in this
case seems to cause a problem, although why is not exactly clear). The grouping
structure is hard to predict from the bottom of the loop, and there may not be
much we can do to fix this.
Very few other test-suite performance effects were really significant, but
almost all weakly favor this change. However, in light of the issues
highlighted above, I've left the old behavior available via a
command-line flag.
llvm-svn: 242188
2015-07-15 04:02:02 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
UseOldLatencyCalc("ppc-old-latency-calc", cl::Hidden,
|
|
|
|
cl::desc("Use the old (incorrect) instruction latency calculation"));
|
|
|
|
|
2018-03-27 01:39:18 +08:00
|
|
|
// Index into the OpcodesForSpill array.
|
|
|
|
enum SpillOpcodeKey {
|
|
|
|
SOK_Int4Spill,
|
|
|
|
SOK_Int8Spill,
|
|
|
|
SOK_Float8Spill,
|
|
|
|
SOK_Float4Spill,
|
|
|
|
SOK_CRSpill,
|
|
|
|
SOK_CRBitSpill,
|
|
|
|
SOK_VRVectorSpill,
|
|
|
|
SOK_VSXVectorSpill,
|
|
|
|
SOK_VectorFloat8Spill,
|
|
|
|
SOK_VectorFloat4Spill,
|
|
|
|
SOK_VRSaveSpill,
|
|
|
|
SOK_QuadFloat8Spill,
|
|
|
|
SOK_QuadFloat4Spill,
|
|
|
|
SOK_QuadBitSpill,
|
|
|
|
SOK_SpillToVSR,
|
2018-07-18 12:25:10 +08:00
|
|
|
SOK_SPESpill,
|
|
|
|
SOK_SPE4Spill,
|
2018-03-27 01:39:18 +08:00
|
|
|
SOK_LastOpcodeSpill // This must be last on the enum.
|
|
|
|
};
|
|
|
|
|
2013-11-19 08:57:56 +08:00
|
|
|
// Pin the vtable to this file.
|
|
|
|
void PPCInstrInfo::anchor() {}
|
|
|
|
|
2014-06-13 05:48:52 +08:00
|
|
|
PPCInstrInfo::PPCInstrInfo(PPCSubtarget &STI)
|
[XRay] Implement powerpc64le xray.
Summary:
powerpc64 big-endian is not supported, but I believe that most logic can
be shared, except for xray_powerpc64.cc.
Also add a function InvalidateInstructionCache to xray_util.h, which is
copied from llvm/Support/Memory.cpp. I'm not sure if I need to add a unittest,
and I don't know how.
Reviewers: dberris, echristo, iteratee, kbarton, hfinkel
Subscribers: mehdi_amini, nemanjai, mgorny, llvm-commits
Differential Revision: https://reviews.llvm.org/D29742
llvm-svn: 294781
2017-02-11 05:03:24 +08:00
|
|
|
: PPCGenInstrInfo(PPC::ADJCALLSTACKDOWN, PPC::ADJCALLSTACKUP,
|
|
|
|
/* CatchRetOpcode */ -1,
|
|
|
|
STI.isPPC64() ? PPC::BLR8 : PPC::BLR),
|
2015-03-12 09:42:51 +08:00
|
|
|
Subtarget(STI), RI(STI.getTargetMachine()) {}
|
2006-06-17 08:01:04 +08:00
|
|
|
|
Various bits of framework needed for precise machine-level selection
DAG scheduling during isel. Most new functionality is currently
guarded by -enable-sched-cycles and -enable-sched-hazard.
Added InstrItineraryData::IssueWidth field, currently derived from
ARM itineraries, but could be initialized differently on other targets.
Added ScheduleHazardRecognizer::MaxLookAhead to indicate whether it is
active, and if so how many cycles of state it holds.
Added SchedulingPriorityQueue::HasReadyFilter to allowing gating entry
into the scheduler's available queue.
ScoreboardHazardRecognizer now accesses the ScheduleDAG in order to
get information about it's SUnits, provides RecedeCycle for bottom-up
scheduling, correctly computes scoreboard depth, tracks IssueCount, and
considers potential stall cycles when checking for hazards.
ScheduleDAGRRList now models machine cycles and hazards (under
flags). It tracks MinAvailableCycle, drives the hazard recognizer and
priority queue's ready filter, manages a new PendingQueue, properly
accounts for stall cycles, etc.
llvm-svn: 122541
2010-12-24 13:03:26 +08:00
|
|
|
/// CreateTargetHazardRecognizer - Return the hazard recognizer to use for
|
|
|
|
/// this target when scheduling the DAG.
|
2014-06-14 06:38:52 +08:00
|
|
|
ScheduleHazardRecognizer *
|
|
|
|
PPCInstrInfo::CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
|
|
|
|
const ScheduleDAG *DAG) const {
|
|
|
|
unsigned Directive =
|
|
|
|
static_cast<const PPCSubtarget *>(STI)->getDarwinDirective();
|
2012-08-29 00:12:39 +08:00
|
|
|
if (Directive == PPC::DIR_440 || Directive == PPC::DIR_A2 ||
|
|
|
|
Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500) {
|
2014-06-14 06:38:52 +08:00
|
|
|
const InstrItineraryData *II =
|
2014-08-05 05:25:23 +08:00
|
|
|
static_cast<const PPCSubtarget *>(STI)->getInstrItineraryData();
|
2013-12-03 07:52:46 +08:00
|
|
|
return new ScoreboardHazardRecognizer(II, DAG);
|
2011-10-17 12:03:49 +08:00
|
|
|
}
|
2011-12-02 12:58:02 +08:00
|
|
|
|
2014-06-14 06:38:52 +08:00
|
|
|
return TargetInstrInfo::CreateTargetHazardRecognizer(STI, DAG);
|
Various bits of framework needed for precise machine-level selection
DAG scheduling during isel. Most new functionality is currently
guarded by -enable-sched-cycles and -enable-sched-hazard.
Added InstrItineraryData::IssueWidth field, currently derived from
ARM itineraries, but could be initialized differently on other targets.
Added ScheduleHazardRecognizer::MaxLookAhead to indicate whether it is
active, and if so how many cycles of state it holds.
Added SchedulingPriorityQueue::HasReadyFilter to allowing gating entry
into the scheduler's available queue.
ScoreboardHazardRecognizer now accesses the ScheduleDAG in order to
get information about it's SUnits, provides RecedeCycle for bottom-up
scheduling, correctly computes scoreboard depth, tracks IssueCount, and
considers potential stall cycles when checking for hazards.
ScheduleDAGRRList now models machine cycles and hazards (under
flags). It tracks MinAvailableCycle, drives the hazard recognizer and
priority queue's ready filter, manages a new PendingQueue, properly
accounts for stall cycles, etc.
llvm-svn: 122541
2010-12-24 13:03:26 +08:00
|
|
|
}
|
|
|
|
|
2011-12-02 12:58:02 +08:00
|
|
|
/// CreateTargetPostRAHazardRecognizer - Return the postRA hazard recognizer
|
|
|
|
/// to use for this target when scheduling the DAG.
|
2015-01-31 06:02:31 +08:00
|
|
|
ScheduleHazardRecognizer *
|
|
|
|
PPCInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
|
|
|
|
const ScheduleDAG *DAG) const {
|
2014-06-13 05:48:52 +08:00
|
|
|
unsigned Directive =
|
2015-01-31 06:02:31 +08:00
|
|
|
DAG->MF.getSubtarget<PPCSubtarget>().getDarwinDirective();
|
2011-12-02 12:58:02 +08:00
|
|
|
|
2016-05-10 02:54:58 +08:00
|
|
|
// FIXME: Leaving this as-is until we have POWER9 scheduling info
|
2014-06-26 21:36:19 +08:00
|
|
|
if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8)
|
2013-12-12 08:19:11 +08:00
|
|
|
return new PPCDispatchGroupSBHazardRecognizer(II, DAG);
|
|
|
|
|
2011-12-02 12:58:02 +08:00
|
|
|
// Most subtargets use a PPC970 recognizer.
|
2012-08-29 00:12:39 +08:00
|
|
|
if (Directive != PPC::DIR_440 && Directive != PPC::DIR_A2 &&
|
|
|
|
Directive != PPC::DIR_E500mc && Directive != PPC::DIR_E5500) {
|
2014-06-13 05:48:52 +08:00
|
|
|
assert(DAG->TII && "No InstrInfo?");
|
2011-12-02 12:58:02 +08:00
|
|
|
|
2014-06-13 05:48:52 +08:00
|
|
|
return new PPCHazardRecognizer970(*DAG);
|
2011-12-02 12:58:02 +08:00
|
|
|
}
|
|
|
|
|
2013-12-03 07:52:46 +08:00
|
|
|
return new ScoreboardHazardRecognizer(II, DAG);
|
2011-12-02 12:58:02 +08:00
|
|
|
}
|
2012-06-20 05:14:34 +08:00
|
|
|
|
[PowerPC] Fix the PPCInstrInfo::getInstrLatency implementation
PowerPC uses itineraries to describe processor pipelines (and dispatch-group
restrictions for P7/P8 cores). Unfortunately, the target-independent
implementation of TII.getInstrLatency calls ItinData->getStageLatency, and that
looks for the largest cycle count in the pipeline for any given instruction.
This, however, yields the wrong answer for the PPC itineraries, because we
don't encode the full pipeline. Because the functional units are fully
pipelined, we only model the initial stages (there are no relevant hazards in
the later stages to model), and so the technique employed by getStageLatency
does not really work. Instead, we should take the maximum output operand
latency, and that's what PPCInstrInfo::getInstrLatency now does.
This caused some test-case churn, including two unfortunate side effects.
First, the new arrangement of copies we get from function parameters now
sometimes blocks VSX FMA mutation (a FIXME has been added to the code and the
test cases), and we have one significant test-suite regression:
SingleSource/Benchmarks/BenchmarkGame/spectral-norm
56.4185% +/- 18.9398%
In this benchmark we have a loop with a vectorized FP divide, and it with the
new scheduling both divides end up in the same dispatch group (which in this
case seems to cause a problem, although why is not exactly clear). The grouping
structure is hard to predict from the bottom of the loop, and there may not be
much we can do to fix this.
Very few other test-suite performance effects were really significant, but
almost all weakly favor this change. However, in light of the issues
highlighted above, I've left the old behavior available via a
command-line flag.
llvm-svn: 242188
2015-07-15 04:02:02 +08:00
|
|
|
unsigned PPCInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
|
2016-06-30 08:01:54 +08:00
|
|
|
const MachineInstr &MI,
|
[PowerPC] Fix the PPCInstrInfo::getInstrLatency implementation
PowerPC uses itineraries to describe processor pipelines (and dispatch-group
restrictions for P7/P8 cores). Unfortunately, the target-independent
implementation of TII.getInstrLatency calls ItinData->getStageLatency, and that
looks for the largest cycle count in the pipeline for any given instruction.
This, however, yields the wrong answer for the PPC itineraries, because we
don't encode the full pipeline. Because the functional units are fully
pipelined, we only model the initial stages (there are no relevant hazards in
the later stages to model), and so the technique employed by getStageLatency
does not really work. Instead, we should take the maximum output operand
latency, and that's what PPCInstrInfo::getInstrLatency now does.
This caused some test-case churn, including two unfortunate side effects.
First, the new arrangement of copies we get from function parameters now
sometimes blocks VSX FMA mutation (a FIXME has been added to the code and the
test cases), and we have one significant test-suite regression:
SingleSource/Benchmarks/BenchmarkGame/spectral-norm
56.4185% +/- 18.9398%
In this benchmark we have a loop with a vectorized FP divide, and it with the
new scheduling both divides end up in the same dispatch group (which in this
case seems to cause a problem, although why is not exactly clear). The grouping
structure is hard to predict from the bottom of the loop, and there may not be
much we can do to fix this.
Very few other test-suite performance effects were really significant, but
almost all weakly favor this change. However, in light of the issues
highlighted above, I've left the old behavior available via a
command-line flag.
llvm-svn: 242188
2015-07-15 04:02:02 +08:00
|
|
|
unsigned *PredCost) const {
|
|
|
|
if (!ItinData || UseOldLatencyCalc)
|
|
|
|
return PPCGenInstrInfo::getInstrLatency(ItinData, MI, PredCost);
|
|
|
|
|
|
|
|
// The default implementation of getInstrLatency calls getStageLatency, but
|
|
|
|
// getStageLatency does not do the right thing for us. While we have
|
|
|
|
// itinerary, most cores are fully pipelined, and so the itineraries only
|
|
|
|
// express the first part of the pipeline, not every stage. Instead, we need
|
|
|
|
// to use the listed output operand cycle number (using operand 0 here, which
|
|
|
|
// is an output).
|
|
|
|
|
|
|
|
unsigned Latency = 1;
|
2016-06-30 08:01:54 +08:00
|
|
|
unsigned DefClass = MI.getDesc().getSchedClass();
|
|
|
|
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
|
|
|
|
const MachineOperand &MO = MI.getOperand(i);
|
[PowerPC] Fix the PPCInstrInfo::getInstrLatency implementation
PowerPC uses itineraries to describe processor pipelines (and dispatch-group
restrictions for P7/P8 cores). Unfortunately, the target-independent
implementation of TII.getInstrLatency calls ItinData->getStageLatency, and that
looks for the largest cycle count in the pipeline for any given instruction.
This, however, yields the wrong answer for the PPC itineraries, because we
don't encode the full pipeline. Because the functional units are fully
pipelined, we only model the initial stages (there are no relevant hazards in
the later stages to model), and so the technique employed by getStageLatency
does not really work. Instead, we should take the maximum output operand
latency, and that's what PPCInstrInfo::getInstrLatency now does.
This caused some test-case churn, including two unfortunate side effects.
First, the new arrangement of copies we get from function parameters now
sometimes blocks VSX FMA mutation (a FIXME has been added to the code and the
test cases), and we have one significant test-suite regression:
SingleSource/Benchmarks/BenchmarkGame/spectral-norm
56.4185% +/- 18.9398%
In this benchmark we have a loop with a vectorized FP divide, and it with the
new scheduling both divides end up in the same dispatch group (which in this
case seems to cause a problem, although why is not exactly clear). The grouping
structure is hard to predict from the bottom of the loop, and there may not be
much we can do to fix this.
Very few other test-suite performance effects were really significant, but
almost all weakly favor this change. However, in light of the issues
highlighted above, I've left the old behavior available via a
command-line flag.
llvm-svn: 242188
2015-07-15 04:02:02 +08:00
|
|
|
if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
int Cycle = ItinData->getOperandCycle(DefClass, i);
|
|
|
|
if (Cycle < 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
Latency = std::max(Latency, (unsigned) Cycle);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Latency;
|
|
|
|
}
|
2013-12-12 08:19:11 +08:00
|
|
|
|
|
|
|
int PPCInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
|
2016-06-30 08:01:54 +08:00
|
|
|
const MachineInstr &DefMI, unsigned DefIdx,
|
|
|
|
const MachineInstr &UseMI,
|
2013-12-12 08:19:11 +08:00
|
|
|
unsigned UseIdx) const {
|
|
|
|
int Latency = PPCGenInstrInfo::getOperandLatency(ItinData, DefMI, DefIdx,
|
|
|
|
UseMI, UseIdx);
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
if (!DefMI.getParent())
|
2015-07-15 16:23:05 +08:00
|
|
|
return Latency;
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
const MachineOperand &DefMO = DefMI.getOperand(DefIdx);
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register Reg = DefMO.getReg();
|
2013-12-12 08:19:11 +08:00
|
|
|
|
|
|
|
bool IsRegCR;
|
2019-08-02 07:27:28 +08:00
|
|
|
if (Register::isVirtualRegister(Reg)) {
|
2013-12-12 08:19:11 +08:00
|
|
|
const MachineRegisterInfo *MRI =
|
2016-06-30 08:01:54 +08:00
|
|
|
&DefMI.getParent()->getParent()->getRegInfo();
|
2013-12-12 08:19:11 +08:00
|
|
|
IsRegCR = MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRRCRegClass) ||
|
|
|
|
MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRBITRCRegClass);
|
|
|
|
} else {
|
|
|
|
IsRegCR = PPC::CRRCRegClass.contains(Reg) ||
|
|
|
|
PPC::CRBITRCRegClass.contains(Reg);
|
|
|
|
}
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
if (UseMI.isBranch() && IsRegCR) {
|
2013-12-12 08:19:11 +08:00
|
|
|
if (Latency < 0)
|
|
|
|
Latency = getInstrLatency(ItinData, DefMI);
|
|
|
|
|
|
|
|
// On some cores, there is an additional delay between writing to a condition
|
|
|
|
// register, and using it from a branch.
|
2014-06-13 05:48:52 +08:00
|
|
|
unsigned Directive = Subtarget.getDarwinDirective();
|
2013-12-12 08:19:11 +08:00
|
|
|
switch (Directive) {
|
|
|
|
default: break;
|
|
|
|
case PPC::DIR_7400:
|
|
|
|
case PPC::DIR_750:
|
|
|
|
case PPC::DIR_970:
|
|
|
|
case PPC::DIR_E5500:
|
|
|
|
case PPC::DIR_PWR4:
|
|
|
|
case PPC::DIR_PWR5:
|
|
|
|
case PPC::DIR_PWR5X:
|
|
|
|
case PPC::DIR_PWR6:
|
|
|
|
case PPC::DIR_PWR6X:
|
|
|
|
case PPC::DIR_PWR7:
|
2014-06-26 21:36:19 +08:00
|
|
|
case PPC::DIR_PWR8:
|
2016-05-10 02:54:58 +08:00
|
|
|
// FIXME: Is this needed for POWER9?
|
2013-12-12 08:19:11 +08:00
|
|
|
Latency += 2;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Latency;
|
|
|
|
}
|
|
|
|
|
2015-07-15 16:23:05 +08:00
|
|
|
// This function does not list all associative and commutative operations, but
|
|
|
|
// only those worth feeding through the machine combiner in an attempt to
|
|
|
|
// reduce the critical path. Mostly, this means floating-point operations,
|
|
|
|
// because they have high latencies (compared to other operations, such and
|
|
|
|
// and/or, which are also associative and commutative, but have low latencies).
|
2015-09-21 23:09:11 +08:00
|
|
|
bool PPCInstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const {
|
|
|
|
switch (Inst.getOpcode()) {
|
2015-07-15 16:23:05 +08:00
|
|
|
// FP Add:
|
|
|
|
case PPC::FADD:
|
|
|
|
case PPC::FADDS:
|
|
|
|
// FP Multiply:
|
|
|
|
case PPC::FMUL:
|
|
|
|
case PPC::FMULS:
|
|
|
|
// Altivec Add:
|
|
|
|
case PPC::VADDFP:
|
|
|
|
// VSX Add:
|
|
|
|
case PPC::XSADDDP:
|
|
|
|
case PPC::XVADDDP:
|
|
|
|
case PPC::XVADDSP:
|
|
|
|
case PPC::XSADDSP:
|
|
|
|
// VSX Multiply:
|
|
|
|
case PPC::XSMULDP:
|
|
|
|
case PPC::XVMULDP:
|
|
|
|
case PPC::XVMULSP:
|
|
|
|
case PPC::XSMULSP:
|
|
|
|
// QPX Add:
|
|
|
|
case PPC::QVFADD:
|
|
|
|
case PPC::QVFADDS:
|
|
|
|
case PPC::QVFADDSs:
|
|
|
|
// QPX Multiply:
|
|
|
|
case PPC::QVFMUL:
|
|
|
|
case PPC::QVFMULS:
|
|
|
|
case PPC::QVFMULSs:
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-21 23:09:11 +08:00
|
|
|
bool PPCInstrInfo::getMachineCombinerPatterns(
|
|
|
|
MachineInstr &Root,
|
2015-11-06 03:34:57 +08:00
|
|
|
SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
|
2015-07-15 16:23:05 +08:00
|
|
|
// Using the machine combiner in this way is potentially expensive, so
|
|
|
|
// restrict to when aggressive optimizations are desired.
|
|
|
|
if (Subtarget.getTargetMachine().getOptLevel() != CodeGenOpt::Aggressive)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// FP reassociation is only legal when we don't need strict IEEE semantics.
|
|
|
|
if (!Root.getParent()->getParent()->getTarget().Options.UnsafeFPMath)
|
|
|
|
return false;
|
|
|
|
|
2015-09-21 23:09:11 +08:00
|
|
|
return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns);
|
2015-07-15 16:23:05 +08:00
|
|
|
}
|
|
|
|
|
2012-06-20 05:14:34 +08:00
|
|
|
// Detect 32 -> 64-bit extensions where we may reuse the low sub-register.
|
|
|
|
bool PPCInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
|
|
|
|
unsigned &SrcReg, unsigned &DstReg,
|
|
|
|
unsigned &SubIdx) const {
|
|
|
|
switch (MI.getOpcode()) {
|
|
|
|
default: return false;
|
|
|
|
case PPC::EXTSW:
|
2017-10-16 12:12:57 +08:00
|
|
|
case PPC::EXTSW_32:
|
2012-06-20 05:14:34 +08:00
|
|
|
case PPC::EXTSW_32_64:
|
|
|
|
SrcReg = MI.getOperand(1).getReg();
|
|
|
|
DstReg = MI.getOperand(0).getReg();
|
|
|
|
SubIdx = PPC::sub_32;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
|
2006-03-17 06:24:02 +08:00
|
|
|
int &FrameIndex) const {
|
2018-03-27 01:39:18 +08:00
|
|
|
unsigned Opcode = MI.getOpcode();
|
|
|
|
const unsigned *OpcodesForSpill = getLoadOpcodesForSpillArray();
|
|
|
|
const unsigned *End = OpcodesForSpill + SOK_LastOpcodeSpill;
|
|
|
|
|
|
|
|
if (End != std::find(OpcodesForSpill, End, Opcode)) {
|
2013-03-28 05:21:15 +08:00
|
|
|
// Check for the operands added by addFrameReference (the immediate is the
|
|
|
|
// offset which defaults to 0).
|
2016-06-30 08:01:54 +08:00
|
|
|
if (MI.getOperand(1).isImm() && !MI.getOperand(1).getImm() &&
|
|
|
|
MI.getOperand(2).isFI()) {
|
|
|
|
FrameIndex = MI.getOperand(2).getIndex();
|
|
|
|
return MI.getOperand(0).getReg();
|
2006-02-03 04:12:32 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
2006-02-03 04:16:12 +08:00
|
|
|
}
|
2006-02-03 04:12:32 +08:00
|
|
|
|
2017-06-22 01:17:56 +08:00
|
|
|
// For opcodes with the ReMaterializable flag set, this function is called to
|
2018-07-31 03:41:25 +08:00
|
|
|
// verify the instruction is really rematable.
|
2017-06-22 01:17:56 +08:00
|
|
|
bool PPCInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI,
|
|
|
|
AliasAnalysis *AA) const {
|
|
|
|
switch (MI.getOpcode()) {
|
2018-07-31 03:41:25 +08:00
|
|
|
default:
|
2017-06-22 01:17:56 +08:00
|
|
|
// This function should only be called for opcodes with the ReMaterializable
|
|
|
|
// flag set.
|
|
|
|
llvm_unreachable("Unknown rematerializable operation!");
|
|
|
|
break;
|
|
|
|
case PPC::LI:
|
|
|
|
case PPC::LI8:
|
|
|
|
case PPC::LIS:
|
|
|
|
case PPC::LIS8:
|
|
|
|
case PPC::QVGPCI:
|
2019-07-23 03:55:33 +08:00
|
|
|
case PPC::ADDIStocHA8:
|
2017-06-22 01:17:56 +08:00
|
|
|
case PPC::ADDItocL:
|
|
|
|
case PPC::LOAD_STACK_GUARD:
|
2019-03-13 02:27:09 +08:00
|
|
|
case PPC::XXLXORz:
|
|
|
|
case PPC::XXLXORspz:
|
|
|
|
case PPC::XXLXORdpz:
|
2019-08-15 22:32:51 +08:00
|
|
|
case PPC::XXLEQVOnes:
|
2019-03-13 02:27:09 +08:00
|
|
|
case PPC::V_SET0B:
|
|
|
|
case PPC::V_SET0H:
|
|
|
|
case PPC::V_SET0:
|
|
|
|
case PPC::V_SETALLONESB:
|
|
|
|
case PPC::V_SETALLONESH:
|
|
|
|
case PPC::V_SETALLONES:
|
[PowerPC] Remove CRBits Copy Of Unset/set CBit
For the situation, where we generate the following code:
crxor 8, 8, 8
< Some instructions>
.LBB0_1:
< Some instructions>
cror 1, 8, 8
cror (COPY of CRbit) depends on the result of the crxor instruction.
CR8 is known to be zero as crxor is equivalent to CRUNSET. We can simply use
crxor 1, 1, 1 instead to zero out CR1, which does not have any dependency on
any previous instruction.
This patch will optimize it to:
< Some instructions>
.LBB0_1:
< Some instructions>
cror 1, 1, 1
Patch By: Victor Huang (NeHuang)
Differential Revision: https://reviews.llvm.org/D62044
llvm-svn: 361632
2019-05-24 20:05:37 +08:00
|
|
|
case PPC::CRSET:
|
|
|
|
case PPC::CRUNSET:
|
2017-06-22 01:17:56 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
unsigned PPCInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
|
2006-02-03 04:16:12 +08:00
|
|
|
int &FrameIndex) const {
|
2018-03-27 01:39:18 +08:00
|
|
|
unsigned Opcode = MI.getOpcode();
|
|
|
|
const unsigned *OpcodesForSpill = getStoreOpcodesForSpillArray();
|
|
|
|
const unsigned *End = OpcodesForSpill + SOK_LastOpcodeSpill;
|
|
|
|
|
|
|
|
if (End != std::find(OpcodesForSpill, End, Opcode)) {
|
2016-06-30 08:01:54 +08:00
|
|
|
if (MI.getOperand(1).isImm() && !MI.getOperand(1).getImm() &&
|
|
|
|
MI.getOperand(2).isFI()) {
|
|
|
|
FrameIndex = MI.getOperand(2).getIndex();
|
|
|
|
return MI.getOperand(0).getReg();
|
2006-02-03 04:16:12 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2006-02-03 04:12:32 +08:00
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
MachineInstr *PPCInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
|
2015-09-29 04:33:22 +08:00
|
|
|
unsigned OpIdx1,
|
|
|
|
unsigned OpIdx2) const {
|
2016-06-30 08:01:54 +08:00
|
|
|
MachineFunction &MF = *MI.getParent()->getParent();
|
2008-07-08 07:14:23 +08:00
|
|
|
|
2005-09-10 02:17:41 +08:00
|
|
|
// Normal instructions can be commuted the obvious way.
|
2016-06-30 08:01:54 +08:00
|
|
|
if (MI.getOpcode() != PPC::RLWIMI && MI.getOpcode() != PPC::RLWIMIo)
|
2015-09-29 04:33:22 +08:00
|
|
|
return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
|
2014-12-13 07:59:36 +08:00
|
|
|
// Note that RLWIMI can be commuted as a 32-bit instruction, but not as a
|
|
|
|
// 64-bit instruction (so we don't handle PPC::RLWIMI8 here), because
|
|
|
|
// changing the relative order of the mask operands might change what happens
|
|
|
|
// to the high-bits of the mask (and, thus, the result).
|
2010-12-24 12:28:06 +08:00
|
|
|
|
2005-09-10 02:17:41 +08:00
|
|
|
// Cannot commute if it has a non-zero rotate count.
|
2016-06-30 08:01:54 +08:00
|
|
|
if (MI.getOperand(3).getImm() != 0)
|
2014-04-25 13:30:21 +08:00
|
|
|
return nullptr;
|
2010-12-24 12:28:06 +08:00
|
|
|
|
2005-09-10 02:17:41 +08:00
|
|
|
// If we have a zero rotate count, we have:
|
|
|
|
// M = mask(MB,ME)
|
|
|
|
// Op0 = (Op1 & ~M) | (Op2 & M)
|
|
|
|
// Change this to:
|
|
|
|
// M = mask((ME+1)&31, (MB-1)&31)
|
|
|
|
// Op0 = (Op2 & ~M) | (Op1 & M)
|
|
|
|
|
|
|
|
// Swap op1/op2
|
2015-09-29 04:33:22 +08:00
|
|
|
assert(((OpIdx1 == 1 && OpIdx2 == 2) || (OpIdx1 == 2 && OpIdx2 == 1)) &&
|
|
|
|
"Only the operands 1 and 2 can be swapped in RLSIMI/RLWIMIo.");
|
2019-06-24 23:50:29 +08:00
|
|
|
Register Reg0 = MI.getOperand(0).getReg();
|
|
|
|
Register Reg1 = MI.getOperand(1).getReg();
|
|
|
|
Register Reg2 = MI.getOperand(2).getReg();
|
2016-06-30 08:01:54 +08:00
|
|
|
unsigned SubReg1 = MI.getOperand(1).getSubReg();
|
|
|
|
unsigned SubReg2 = MI.getOperand(2).getSubReg();
|
|
|
|
bool Reg1IsKill = MI.getOperand(1).isKill();
|
|
|
|
bool Reg2IsKill = MI.getOperand(2).isKill();
|
2008-06-16 15:33:11 +08:00
|
|
|
bool ChangeReg0 = false;
|
2008-02-13 10:46:49 +08:00
|
|
|
// If machine instrs are no longer in two-address forms, update
|
|
|
|
// destination register as well.
|
|
|
|
if (Reg0 == Reg1) {
|
|
|
|
// Must be two address instruction!
|
2016-06-30 08:01:54 +08:00
|
|
|
assert(MI.getDesc().getOperandConstraint(0, MCOI::TIED_TO) &&
|
2008-02-13 10:46:49 +08:00
|
|
|
"Expecting a two-address instruction!");
|
2016-06-30 08:01:54 +08:00
|
|
|
assert(MI.getOperand(0).getSubReg() == SubReg1 && "Tied subreg mismatch");
|
2008-02-13 10:46:49 +08:00
|
|
|
Reg2IsKill = false;
|
2008-06-16 15:33:11 +08:00
|
|
|
ChangeReg0 = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Masks.
|
2016-06-30 08:01:54 +08:00
|
|
|
unsigned MB = MI.getOperand(4).getImm();
|
|
|
|
unsigned ME = MI.getOperand(5).getImm();
|
2008-06-16 15:33:11 +08:00
|
|
|
|
2015-09-06 12:17:30 +08:00
|
|
|
// We can't commute a trivial mask (there is no way to represent an all-zero
|
|
|
|
// mask).
|
|
|
|
if (MB == 0 && ME == 31)
|
|
|
|
return nullptr;
|
|
|
|
|
2008-06-16 15:33:11 +08:00
|
|
|
if (NewMI) {
|
|
|
|
// Create a new instruction.
|
2019-06-24 23:50:29 +08:00
|
|
|
Register Reg0 = ChangeReg0 ? Reg2 : MI.getOperand(0).getReg();
|
2016-06-30 08:01:54 +08:00
|
|
|
bool Reg0IsDead = MI.getOperand(0).isDead();
|
|
|
|
return BuildMI(MF, MI.getDebugLoc(), MI.getDesc())
|
|
|
|
.addReg(Reg0, RegState::Define | getDeadRegState(Reg0IsDead))
|
|
|
|
.addReg(Reg2, getKillRegState(Reg2IsKill))
|
|
|
|
.addReg(Reg1, getKillRegState(Reg1IsKill))
|
|
|
|
.addImm((ME + 1) & 31)
|
|
|
|
.addImm((MB - 1) & 31);
|
2008-02-13 10:46:49 +08:00
|
|
|
}
|
2008-06-16 15:33:11 +08:00
|
|
|
|
2013-12-17 12:50:45 +08:00
|
|
|
if (ChangeReg0) {
|
2016-06-30 08:01:54 +08:00
|
|
|
MI.getOperand(0).setReg(Reg2);
|
|
|
|
MI.getOperand(0).setSubReg(SubReg2);
|
2013-12-17 12:50:45 +08:00
|
|
|
}
|
2016-06-30 08:01:54 +08:00
|
|
|
MI.getOperand(2).setReg(Reg1);
|
|
|
|
MI.getOperand(1).setReg(Reg2);
|
|
|
|
MI.getOperand(2).setSubReg(SubReg1);
|
|
|
|
MI.getOperand(1).setSubReg(SubReg2);
|
|
|
|
MI.getOperand(2).setIsKill(Reg1IsKill);
|
|
|
|
MI.getOperand(1).setIsKill(Reg2IsKill);
|
2010-12-24 12:28:06 +08:00
|
|
|
|
2005-09-10 02:17:41 +08:00
|
|
|
// Swap the mask around.
|
2016-06-30 08:01:54 +08:00
|
|
|
MI.getOperand(4).setImm((ME + 1) & 31);
|
|
|
|
MI.getOperand(5).setImm((MB - 1) & 31);
|
|
|
|
return &MI;
|
2005-09-10 02:17:41 +08:00
|
|
|
}
|
2006-03-06 07:49:55 +08:00
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
bool PPCInstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
|
2014-03-26 03:26:43 +08:00
|
|
|
unsigned &SrcOpIdx2) const {
|
|
|
|
// For VSX A-Type FMA instructions, it is the first two operands that can be
|
|
|
|
// commuted, however, because the non-encoded tied input operand is listed
|
|
|
|
// first, the operands to swap are actually the second and third.
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
int AltOpc = PPC::getAltVSXFMAOpcode(MI.getOpcode());
|
2014-03-26 03:26:43 +08:00
|
|
|
if (AltOpc == -1)
|
|
|
|
return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
|
|
|
|
|
2015-09-29 04:33:22 +08:00
|
|
|
// The commutable operand indices are 2 and 3. Return them in SrcOpIdx1
|
|
|
|
// and SrcOpIdx2.
|
|
|
|
return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
|
2014-03-26 03:26:43 +08:00
|
|
|
}
|
|
|
|
|
2010-12-24 12:28:06 +08:00
|
|
|
void PPCInstrInfo::insertNoop(MachineBasicBlock &MBB,
|
2006-03-06 07:49:55 +08:00
|
|
|
MachineBasicBlock::iterator MI) const {
|
2013-12-12 08:19:11 +08:00
|
|
|
// This function is used for scheduling, and the nop wanted here is the type
|
|
|
|
// that terminates dispatch groups on the POWER cores.
|
2014-06-13 05:48:52 +08:00
|
|
|
unsigned Directive = Subtarget.getDarwinDirective();
|
2013-12-12 08:19:11 +08:00
|
|
|
unsigned Opcode;
|
|
|
|
switch (Directive) {
|
|
|
|
default: Opcode = PPC::NOP; break;
|
|
|
|
case PPC::DIR_PWR6: Opcode = PPC::NOP_GT_PWR6; break;
|
|
|
|
case PPC::DIR_PWR7: Opcode = PPC::NOP_GT_PWR7; break;
|
2014-06-26 21:36:19 +08:00
|
|
|
case PPC::DIR_PWR8: Opcode = PPC::NOP_GT_PWR7; break; /* FIXME: Update when P8 InstrScheduling model is ready */
|
2016-05-10 02:54:58 +08:00
|
|
|
// FIXME: Update when POWER9 scheduling model is ready.
|
|
|
|
case PPC::DIR_PWR9: Opcode = PPC::NOP_GT_PWR7; break;
|
2013-12-12 08:19:11 +08:00
|
|
|
}
|
|
|
|
|
2010-04-03 04:16:16 +08:00
|
|
|
DebugLoc DL;
|
2013-12-12 08:19:11 +08:00
|
|
|
BuildMI(MBB, MI, DL, get(Opcode));
|
2006-03-06 07:49:55 +08:00
|
|
|
}
|
2006-10-14 05:21:17 +08:00
|
|
|
|
2017-04-22 05:48:41 +08:00
|
|
|
/// Return the noop instruction to use for a noop.
|
|
|
|
void PPCInstrInfo::getNoop(MCInst &NopInst) const {
|
2014-08-09 03:13:23 +08:00
|
|
|
NopInst.setOpcode(PPC::NOP);
|
|
|
|
}
|
|
|
|
|
2006-10-14 05:21:17 +08:00
|
|
|
// Branch analysis.
|
2012-06-08 23:38:21 +08:00
|
|
|
// Note: If the condition register is set to CTR or CTR8 then this is a
|
|
|
|
// BDNZ (imm == 1) or BDZ (imm == 0) branch.
|
2016-07-15 22:41:04 +08:00
|
|
|
bool PPCInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock *&TBB,
|
2006-10-14 05:21:17 +08:00
|
|
|
MachineBasicBlock *&FBB,
|
2009-02-09 15:14:22 +08:00
|
|
|
SmallVectorImpl<MachineOperand> &Cond,
|
|
|
|
bool AllowModify) const {
|
2014-06-13 05:48:52 +08:00
|
|
|
bool isPPC64 = Subtarget.isPPC64();
|
2012-06-08 23:38:21 +08:00
|
|
|
|
2006-10-14 05:21:17 +08:00
|
|
|
// If the block has no terminators, it just falls into the block after it.
|
2015-06-25 21:39:03 +08:00
|
|
|
MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
|
|
|
|
if (I == MBB.end())
|
2010-04-02 09:38:09 +08:00
|
|
|
return false;
|
2015-06-25 21:39:03 +08:00
|
|
|
|
2016-02-23 10:46:52 +08:00
|
|
|
if (!isUnpredicatedTerminator(*I))
|
2006-10-14 05:21:17 +08:00
|
|
|
return false;
|
|
|
|
|
2017-09-27 18:33:02 +08:00
|
|
|
if (AllowModify) {
|
|
|
|
// If the BB ends with an unconditional branch to the fallthrough BB,
|
|
|
|
// we eliminate the branch instruction.
|
|
|
|
if (I->getOpcode() == PPC::B &&
|
|
|
|
MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
|
|
|
|
I->eraseFromParent();
|
|
|
|
|
|
|
|
// We update iterator after deleting the last branch.
|
|
|
|
I = MBB.getLastNonDebugInstr();
|
|
|
|
if (I == MBB.end() || !isUnpredicatedTerminator(*I))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-10-14 05:21:17 +08:00
|
|
|
// Get the last instruction in the block.
|
2016-07-27 21:24:16 +08:00
|
|
|
MachineInstr &LastInst = *I;
|
2010-12-24 12:28:06 +08:00
|
|
|
|
2006-10-14 05:21:17 +08:00
|
|
|
// If there is only one terminator instruction, process it.
|
2016-02-23 10:46:52 +08:00
|
|
|
if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
|
2016-07-27 21:24:16 +08:00
|
|
|
if (LastInst.getOpcode() == PPC::B) {
|
|
|
|
if (!LastInst.getOperand(0).isMBB())
|
2009-05-09 07:09:25 +08:00
|
|
|
return true;
|
2016-07-27 21:24:16 +08:00
|
|
|
TBB = LastInst.getOperand(0).getMBB();
|
2006-10-14 05:21:17 +08:00
|
|
|
return false;
|
2016-07-27 21:24:16 +08:00
|
|
|
} else if (LastInst.getOpcode() == PPC::BCC) {
|
|
|
|
if (!LastInst.getOperand(2).isMBB())
|
2009-05-09 07:09:25 +08:00
|
|
|
return true;
|
2006-10-14 05:21:17 +08:00
|
|
|
// Block ends with fall-through condbranch.
|
2016-07-27 21:24:16 +08:00
|
|
|
TBB = LastInst.getOperand(2).getMBB();
|
|
|
|
Cond.push_back(LastInst.getOperand(0));
|
|
|
|
Cond.push_back(LastInst.getOperand(1));
|
2006-10-21 14:03:11 +08:00
|
|
|
return false;
|
2016-07-27 21:24:16 +08:00
|
|
|
} else if (LastInst.getOpcode() == PPC::BC) {
|
|
|
|
if (!LastInst.getOperand(1).isMBB())
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
return true;
|
|
|
|
// Block ends with fall-through condbranch.
|
2016-07-27 21:24:16 +08:00
|
|
|
TBB = LastInst.getOperand(1).getMBB();
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
|
2016-07-27 21:24:16 +08:00
|
|
|
Cond.push_back(LastInst.getOperand(0));
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
return false;
|
2016-07-27 21:24:16 +08:00
|
|
|
} else if (LastInst.getOpcode() == PPC::BCn) {
|
|
|
|
if (!LastInst.getOperand(1).isMBB())
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
return true;
|
|
|
|
// Block ends with fall-through condbranch.
|
2016-07-27 21:24:16 +08:00
|
|
|
TBB = LastInst.getOperand(1).getMBB();
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_UNSET));
|
2016-07-27 21:24:16 +08:00
|
|
|
Cond.push_back(LastInst.getOperand(0));
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
return false;
|
2016-07-27 21:24:16 +08:00
|
|
|
} else if (LastInst.getOpcode() == PPC::BDNZ8 ||
|
|
|
|
LastInst.getOpcode() == PPC::BDNZ) {
|
|
|
|
if (!LastInst.getOperand(0).isMBB())
|
2012-06-08 23:38:21 +08:00
|
|
|
return true;
|
2012-06-09 03:19:53 +08:00
|
|
|
if (DisableCTRLoopAnal)
|
2012-06-08 23:38:25 +08:00
|
|
|
return true;
|
2016-07-27 21:24:16 +08:00
|
|
|
TBB = LastInst.getOperand(0).getMBB();
|
2012-06-08 23:38:21 +08:00
|
|
|
Cond.push_back(MachineOperand::CreateImm(1));
|
|
|
|
Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR8 : PPC::CTR,
|
|
|
|
true));
|
|
|
|
return false;
|
2016-07-27 21:24:16 +08:00
|
|
|
} else if (LastInst.getOpcode() == PPC::BDZ8 ||
|
|
|
|
LastInst.getOpcode() == PPC::BDZ) {
|
|
|
|
if (!LastInst.getOperand(0).isMBB())
|
2012-06-08 23:38:21 +08:00
|
|
|
return true;
|
2012-06-09 03:19:53 +08:00
|
|
|
if (DisableCTRLoopAnal)
|
2012-06-08 23:38:25 +08:00
|
|
|
return true;
|
2016-07-27 21:24:16 +08:00
|
|
|
TBB = LastInst.getOperand(0).getMBB();
|
2012-06-08 23:38:21 +08:00
|
|
|
Cond.push_back(MachineOperand::CreateImm(0));
|
|
|
|
Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR8 : PPC::CTR,
|
|
|
|
true));
|
|
|
|
return false;
|
2006-10-14 05:21:17 +08:00
|
|
|
}
|
2012-06-08 23:38:21 +08:00
|
|
|
|
2006-10-14 05:21:17 +08:00
|
|
|
// Otherwise, don't know what this is.
|
|
|
|
return true;
|
|
|
|
}
|
2010-12-24 12:28:06 +08:00
|
|
|
|
2006-10-14 05:21:17 +08:00
|
|
|
// Get the instruction before it if it's a terminator.
|
2016-07-27 21:24:16 +08:00
|
|
|
MachineInstr &SecondLastInst = *I;
|
2006-10-14 05:21:17 +08:00
|
|
|
|
|
|
|
// If there are three terminators, we don't know what sort of block this is.
|
2016-07-27 21:24:16 +08:00
|
|
|
if (I != MBB.begin() && isUnpredicatedTerminator(*--I))
|
2006-10-14 05:21:17 +08:00
|
|
|
return true;
|
2010-12-24 12:28:06 +08:00
|
|
|
|
2006-11-18 06:14:47 +08:00
|
|
|
// If the block ends with PPC::B and PPC:BCC, handle it.
|
2016-07-27 21:24:16 +08:00
|
|
|
if (SecondLastInst.getOpcode() == PPC::BCC &&
|
|
|
|
LastInst.getOpcode() == PPC::B) {
|
|
|
|
if (!SecondLastInst.getOperand(2).isMBB() ||
|
|
|
|
!LastInst.getOperand(0).isMBB())
|
2009-05-09 07:09:25 +08:00
|
|
|
return true;
|
2016-07-27 21:24:16 +08:00
|
|
|
TBB = SecondLastInst.getOperand(2).getMBB();
|
|
|
|
Cond.push_back(SecondLastInst.getOperand(0));
|
|
|
|
Cond.push_back(SecondLastInst.getOperand(1));
|
|
|
|
FBB = LastInst.getOperand(0).getMBB();
|
2006-10-14 05:21:17 +08:00
|
|
|
return false;
|
2016-07-27 21:24:16 +08:00
|
|
|
} else if (SecondLastInst.getOpcode() == PPC::BC &&
|
|
|
|
LastInst.getOpcode() == PPC::B) {
|
|
|
|
if (!SecondLastInst.getOperand(1).isMBB() ||
|
|
|
|
!LastInst.getOperand(0).isMBB())
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
return true;
|
2016-07-27 21:24:16 +08:00
|
|
|
TBB = SecondLastInst.getOperand(1).getMBB();
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
|
2016-07-27 21:24:16 +08:00
|
|
|
Cond.push_back(SecondLastInst.getOperand(0));
|
|
|
|
FBB = LastInst.getOperand(0).getMBB();
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
return false;
|
2016-07-27 21:24:16 +08:00
|
|
|
} else if (SecondLastInst.getOpcode() == PPC::BCn &&
|
|
|
|
LastInst.getOpcode() == PPC::B) {
|
|
|
|
if (!SecondLastInst.getOperand(1).isMBB() ||
|
|
|
|
!LastInst.getOperand(0).isMBB())
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
return true;
|
2016-07-27 21:24:16 +08:00
|
|
|
TBB = SecondLastInst.getOperand(1).getMBB();
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_UNSET));
|
2016-07-27 21:24:16 +08:00
|
|
|
Cond.push_back(SecondLastInst.getOperand(0));
|
|
|
|
FBB = LastInst.getOperand(0).getMBB();
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
return false;
|
2016-07-27 21:24:16 +08:00
|
|
|
} else if ((SecondLastInst.getOpcode() == PPC::BDNZ8 ||
|
|
|
|
SecondLastInst.getOpcode() == PPC::BDNZ) &&
|
|
|
|
LastInst.getOpcode() == PPC::B) {
|
|
|
|
if (!SecondLastInst.getOperand(0).isMBB() ||
|
|
|
|
!LastInst.getOperand(0).isMBB())
|
2012-06-08 23:38:21 +08:00
|
|
|
return true;
|
2012-06-09 03:19:53 +08:00
|
|
|
if (DisableCTRLoopAnal)
|
2012-06-08 23:38:25 +08:00
|
|
|
return true;
|
2016-07-27 21:24:16 +08:00
|
|
|
TBB = SecondLastInst.getOperand(0).getMBB();
|
2012-06-08 23:38:21 +08:00
|
|
|
Cond.push_back(MachineOperand::CreateImm(1));
|
|
|
|
Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR8 : PPC::CTR,
|
|
|
|
true));
|
2016-07-27 21:24:16 +08:00
|
|
|
FBB = LastInst.getOperand(0).getMBB();
|
2012-06-08 23:38:21 +08:00
|
|
|
return false;
|
2016-07-27 21:24:16 +08:00
|
|
|
} else if ((SecondLastInst.getOpcode() == PPC::BDZ8 ||
|
|
|
|
SecondLastInst.getOpcode() == PPC::BDZ) &&
|
|
|
|
LastInst.getOpcode() == PPC::B) {
|
|
|
|
if (!SecondLastInst.getOperand(0).isMBB() ||
|
|
|
|
!LastInst.getOperand(0).isMBB())
|
2012-06-08 23:38:21 +08:00
|
|
|
return true;
|
2012-06-09 03:19:53 +08:00
|
|
|
if (DisableCTRLoopAnal)
|
2012-06-08 23:38:25 +08:00
|
|
|
return true;
|
2016-07-27 21:24:16 +08:00
|
|
|
TBB = SecondLastInst.getOperand(0).getMBB();
|
2012-06-08 23:38:21 +08:00
|
|
|
Cond.push_back(MachineOperand::CreateImm(0));
|
|
|
|
Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR8 : PPC::CTR,
|
|
|
|
true));
|
2016-07-27 21:24:16 +08:00
|
|
|
FBB = LastInst.getOperand(0).getMBB();
|
2012-06-08 23:38:21 +08:00
|
|
|
return false;
|
2006-10-14 05:21:17 +08:00
|
|
|
}
|
2010-12-24 12:28:06 +08:00
|
|
|
|
2007-06-14 01:59:52 +08:00
|
|
|
// If the block ends with two PPC:Bs, handle it. The second one is not
|
|
|
|
// executed, so remove it.
|
2016-07-27 21:24:16 +08:00
|
|
|
if (SecondLastInst.getOpcode() == PPC::B && LastInst.getOpcode() == PPC::B) {
|
|
|
|
if (!SecondLastInst.getOperand(0).isMBB())
|
2009-05-09 07:09:25 +08:00
|
|
|
return true;
|
2016-07-27 21:24:16 +08:00
|
|
|
TBB = SecondLastInst.getOperand(0).getMBB();
|
2007-06-14 01:59:52 +08:00
|
|
|
I = LastInst;
|
2009-02-09 15:14:22 +08:00
|
|
|
if (AllowModify)
|
|
|
|
I->eraseFromParent();
|
2007-06-14 01:59:52 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2006-10-14 05:21:17 +08:00
|
|
|
// Otherwise, can't handle this.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-09-15 04:43:16 +08:00
|
|
|
unsigned PPCInstrInfo::removeBranch(MachineBasicBlock &MBB,
|
2016-09-15 01:23:48 +08:00
|
|
|
int *BytesRemoved) const {
|
|
|
|
assert(!BytesRemoved && "code size not handled");
|
|
|
|
|
2015-06-25 21:39:03 +08:00
|
|
|
MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
|
|
|
|
if (I == MBB.end())
|
|
|
|
return 0;
|
|
|
|
|
2012-06-08 23:38:21 +08:00
|
|
|
if (I->getOpcode() != PPC::B && I->getOpcode() != PPC::BCC &&
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
I->getOpcode() != PPC::BC && I->getOpcode() != PPC::BCn &&
|
2012-06-08 23:38:21 +08:00
|
|
|
I->getOpcode() != PPC::BDNZ8 && I->getOpcode() != PPC::BDNZ &&
|
|
|
|
I->getOpcode() != PPC::BDZ8 && I->getOpcode() != PPC::BDZ)
|
2007-05-18 08:05:48 +08:00
|
|
|
return 0;
|
2010-12-24 12:28:06 +08:00
|
|
|
|
2006-10-14 05:21:17 +08:00
|
|
|
// Remove the branch.
|
|
|
|
I->eraseFromParent();
|
2010-12-24 12:28:06 +08:00
|
|
|
|
2006-10-14 05:21:17 +08:00
|
|
|
I = MBB.end();
|
|
|
|
|
2007-05-18 08:05:48 +08:00
|
|
|
if (I == MBB.begin()) return 1;
|
2006-10-14 05:21:17 +08:00
|
|
|
--I;
|
2012-06-08 23:38:21 +08:00
|
|
|
if (I->getOpcode() != PPC::BCC &&
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
I->getOpcode() != PPC::BC && I->getOpcode() != PPC::BCn &&
|
2012-06-08 23:38:21 +08:00
|
|
|
I->getOpcode() != PPC::BDNZ8 && I->getOpcode() != PPC::BDNZ &&
|
|
|
|
I->getOpcode() != PPC::BDZ8 && I->getOpcode() != PPC::BDZ)
|
2007-05-18 08:05:48 +08:00
|
|
|
return 1;
|
2010-12-24 12:28:06 +08:00
|
|
|
|
2006-10-14 05:21:17 +08:00
|
|
|
// Remove the branch.
|
|
|
|
I->eraseFromParent();
|
2007-05-18 08:05:48 +08:00
|
|
|
return 2;
|
2006-10-14 05:21:17 +08:00
|
|
|
}
|
|
|
|
|
2016-09-15 01:24:15 +08:00
|
|
|
unsigned PPCInstrInfo::insertBranch(MachineBasicBlock &MBB,
|
2016-06-12 23:39:02 +08:00
|
|
|
MachineBasicBlock *TBB,
|
|
|
|
MachineBasicBlock *FBB,
|
|
|
|
ArrayRef<MachineOperand> Cond,
|
2016-09-15 01:23:48 +08:00
|
|
|
const DebugLoc &DL,
|
|
|
|
int *BytesAdded) const {
|
2006-10-18 02:06:55 +08:00
|
|
|
// Shouldn't be a fall through.
|
2016-09-15 01:24:15 +08:00
|
|
|
assert(TBB && "insertBranch must not be told to insert a fallthrough");
|
2010-12-24 12:28:06 +08:00
|
|
|
assert((Cond.size() == 2 || Cond.size() == 0) &&
|
2006-10-21 13:36:13 +08:00
|
|
|
"PPC branch conditions have two components!");
|
2016-09-15 01:23:48 +08:00
|
|
|
assert(!BytesAdded && "code size not handled");
|
2010-12-24 12:28:06 +08:00
|
|
|
|
2014-06-13 05:48:52 +08:00
|
|
|
bool isPPC64 = Subtarget.isPPC64();
|
2012-06-08 23:38:21 +08:00
|
|
|
|
2006-10-21 13:36:13 +08:00
|
|
|
// One-way branch.
|
2014-04-25 13:30:21 +08:00
|
|
|
if (!FBB) {
|
2006-10-21 13:36:13 +08:00
|
|
|
if (Cond.empty()) // Unconditional branch
|
2010-06-18 06:43:56 +08:00
|
|
|
BuildMI(&MBB, DL, get(PPC::B)).addMBB(TBB);
|
2012-06-08 23:38:21 +08:00
|
|
|
else if (Cond[1].getReg() == PPC::CTR || Cond[1].getReg() == PPC::CTR8)
|
|
|
|
BuildMI(&MBB, DL, get(Cond[0].getImm() ?
|
|
|
|
(isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
|
|
|
|
(isPPC64 ? PPC::BDZ8 : PPC::BDZ))).addMBB(TBB);
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
else if (Cond[0].getImm() == PPC::PRED_BIT_SET)
|
2017-01-13 17:58:52 +08:00
|
|
|
BuildMI(&MBB, DL, get(PPC::BC)).add(Cond[1]).addMBB(TBB);
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
else if (Cond[0].getImm() == PPC::PRED_BIT_UNSET)
|
2017-01-13 17:58:52 +08:00
|
|
|
BuildMI(&MBB, DL, get(PPC::BCn)).add(Cond[1]).addMBB(TBB);
|
2006-10-21 13:36:13 +08:00
|
|
|
else // Conditional branch
|
2010-06-18 06:43:56 +08:00
|
|
|
BuildMI(&MBB, DL, get(PPC::BCC))
|
2017-01-13 17:58:52 +08:00
|
|
|
.addImm(Cond[0].getImm())
|
|
|
|
.add(Cond[1])
|
|
|
|
.addMBB(TBB);
|
2007-05-18 08:05:48 +08:00
|
|
|
return 1;
|
2006-10-18 02:06:55 +08:00
|
|
|
}
|
2010-12-24 12:28:06 +08:00
|
|
|
|
2006-10-21 13:42:09 +08:00
|
|
|
// Two-way Conditional Branch.
|
2012-06-08 23:38:21 +08:00
|
|
|
if (Cond[1].getReg() == PPC::CTR || Cond[1].getReg() == PPC::CTR8)
|
|
|
|
BuildMI(&MBB, DL, get(Cond[0].getImm() ?
|
|
|
|
(isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
|
|
|
|
(isPPC64 ? PPC::BDZ8 : PPC::BDZ))).addMBB(TBB);
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
else if (Cond[0].getImm() == PPC::PRED_BIT_SET)
|
2017-01-13 17:58:52 +08:00
|
|
|
BuildMI(&MBB, DL, get(PPC::BC)).add(Cond[1]).addMBB(TBB);
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
else if (Cond[0].getImm() == PPC::PRED_BIT_UNSET)
|
2017-01-13 17:58:52 +08:00
|
|
|
BuildMI(&MBB, DL, get(PPC::BCn)).add(Cond[1]).addMBB(TBB);
|
2012-06-08 23:38:21 +08:00
|
|
|
else
|
|
|
|
BuildMI(&MBB, DL, get(PPC::BCC))
|
2017-01-13 17:58:52 +08:00
|
|
|
.addImm(Cond[0].getImm())
|
|
|
|
.add(Cond[1])
|
|
|
|
.addMBB(TBB);
|
2010-06-18 06:43:56 +08:00
|
|
|
BuildMI(&MBB, DL, get(PPC::B)).addMBB(FBB);
|
2007-05-18 08:05:48 +08:00
|
|
|
return 2;
|
2006-10-14 05:21:17 +08:00
|
|
|
}
|
|
|
|
|
2013-04-06 07:29:01 +08:00
|
|
|
// Select analysis.
|
|
|
|
bool PPCInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
|
2015-06-12 03:30:37 +08:00
|
|
|
ArrayRef<MachineOperand> Cond,
|
2013-04-06 07:29:01 +08:00
|
|
|
unsigned TrueReg, unsigned FalseReg,
|
|
|
|
int &CondCycles, int &TrueCycles, int &FalseCycles) const {
|
|
|
|
if (Cond.size() != 2)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If this is really a bdnz-like condition, then it cannot be turned into a
|
|
|
|
// select.
|
|
|
|
if (Cond[1].getReg() == PPC::CTR || Cond[1].getReg() == PPC::CTR8)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check register classes.
|
|
|
|
const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
|
|
|
|
const TargetRegisterClass *RC =
|
|
|
|
RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
|
|
|
|
if (!RC)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// isel is for regular integer GPRs only.
|
|
|
|
if (!PPC::GPRCRegClass.hasSubClassEq(RC) &&
|
Fix register subclass handling in PPCInstrInfo::insertSelect
PPCInstrInfo::insertSelect and PPCInstrInfo::canInsertSelect were computing the
common subclass of the true and false inputs, and then selecting either the
32-bit or the 64-bit isel variant based on the result of calling
PPC::GPRCRegClass.hasSubClassEq(RC) and PPC::G8RCRegClass.hasSubClassEq(RC)
(where RC is the common subclass). Unfortunately, this is not quite right: if
we have something like this:
%vreg8<def> = SELECT_CC_I8 %vreg4<kill>, %vreg7<kill>, %vreg6<kill>, 76;
G8RC_and_G8RC_NOX0:%vreg8 CRRC:%vreg4 G8RC_NOX0:%vreg7,%vreg6
then the common subclass of G8RC_and_G8RC_NOX0 and G8RC_NOX0 is G8RC_NOX0, and
G8RC_NOX0 is not a subclass of G8RC (because it also contains the ZERO8
pseudo-register). As a result, we also need to check the common subclass
against GPRC_NOR0 and G8RC_NOX0 explicitly.
This had not been a problem for clients of insertSelect that called
canInsertSelect first (because it had a compensating mistake), but insertSelect
is also used by the PPC pseudo-instruction expander, and this error was causing
a problem in that context.
This problem was found by csmith.
llvm-svn: 186343
2013-07-16 04:22:58 +08:00
|
|
|
!PPC::GPRC_NOR0RegClass.hasSubClassEq(RC) &&
|
|
|
|
!PPC::G8RCRegClass.hasSubClassEq(RC) &&
|
|
|
|
!PPC::G8RC_NOX0RegClass.hasSubClassEq(RC))
|
2013-04-06 07:29:01 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// FIXME: These numbers are for the A2, how well they work for other cores is
|
|
|
|
// an open question. On the A2, the isel instruction has a 2-cycle latency
|
|
|
|
// but single-cycle throughput. These numbers are used in combination with
|
|
|
|
// the MispredictPenalty setting from the active SchedMachineModel.
|
|
|
|
CondCycles = 1;
|
|
|
|
TrueCycles = 1;
|
|
|
|
FalseCycles = 1;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void PPCInstrInfo::insertSelect(MachineBasicBlock &MBB,
|
2016-06-12 23:39:02 +08:00
|
|
|
MachineBasicBlock::iterator MI,
|
|
|
|
const DebugLoc &dl, unsigned DestReg,
|
|
|
|
ArrayRef<MachineOperand> Cond, unsigned TrueReg,
|
|
|
|
unsigned FalseReg) const {
|
2013-04-06 07:29:01 +08:00
|
|
|
assert(Cond.size() == 2 &&
|
|
|
|
"PPC branch conditions have two components!");
|
|
|
|
|
|
|
|
// Get the register classes.
|
|
|
|
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
|
|
|
|
const TargetRegisterClass *RC =
|
|
|
|
RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
|
|
|
|
assert(RC && "TrueReg and FalseReg must have overlapping register classes");
|
Fix register subclass handling in PPCInstrInfo::insertSelect
PPCInstrInfo::insertSelect and PPCInstrInfo::canInsertSelect were computing the
common subclass of the true and false inputs, and then selecting either the
32-bit or the 64-bit isel variant based on the result of calling
PPC::GPRCRegClass.hasSubClassEq(RC) and PPC::G8RCRegClass.hasSubClassEq(RC)
(where RC is the common subclass). Unfortunately, this is not quite right: if
we have something like this:
%vreg8<def> = SELECT_CC_I8 %vreg4<kill>, %vreg7<kill>, %vreg6<kill>, 76;
G8RC_and_G8RC_NOX0:%vreg8 CRRC:%vreg4 G8RC_NOX0:%vreg7,%vreg6
then the common subclass of G8RC_and_G8RC_NOX0 and G8RC_NOX0 is G8RC_NOX0, and
G8RC_NOX0 is not a subclass of G8RC (because it also contains the ZERO8
pseudo-register). As a result, we also need to check the common subclass
against GPRC_NOR0 and G8RC_NOX0 explicitly.
This had not been a problem for clients of insertSelect that called
canInsertSelect first (because it had a compensating mistake), but insertSelect
is also used by the PPC pseudo-instruction expander, and this error was causing
a problem in that context.
This problem was found by csmith.
llvm-svn: 186343
2013-07-16 04:22:58 +08:00
|
|
|
|
|
|
|
bool Is64Bit = PPC::G8RCRegClass.hasSubClassEq(RC) ||
|
|
|
|
PPC::G8RC_NOX0RegClass.hasSubClassEq(RC);
|
|
|
|
assert((Is64Bit ||
|
|
|
|
PPC::GPRCRegClass.hasSubClassEq(RC) ||
|
|
|
|
PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) &&
|
2013-04-06 07:29:01 +08:00
|
|
|
"isel is for regular integer GPRs only");
|
|
|
|
|
Fix register subclass handling in PPCInstrInfo::insertSelect
PPCInstrInfo::insertSelect and PPCInstrInfo::canInsertSelect were computing the
common subclass of the true and false inputs, and then selecting either the
32-bit or the 64-bit isel variant based on the result of calling
PPC::GPRCRegClass.hasSubClassEq(RC) and PPC::G8RCRegClass.hasSubClassEq(RC)
(where RC is the common subclass). Unfortunately, this is not quite right: if
we have something like this:
%vreg8<def> = SELECT_CC_I8 %vreg4<kill>, %vreg7<kill>, %vreg6<kill>, 76;
G8RC_and_G8RC_NOX0:%vreg8 CRRC:%vreg4 G8RC_NOX0:%vreg7,%vreg6
then the common subclass of G8RC_and_G8RC_NOX0 and G8RC_NOX0 is G8RC_NOX0, and
G8RC_NOX0 is not a subclass of G8RC (because it also contains the ZERO8
pseudo-register). As a result, we also need to check the common subclass
against GPRC_NOR0 and G8RC_NOX0 explicitly.
This had not been a problem for clients of insertSelect that called
canInsertSelect first (because it had a compensating mistake), but insertSelect
is also used by the PPC pseudo-instruction expander, and this error was causing
a problem in that context.
This problem was found by csmith.
llvm-svn: 186343
2013-07-16 04:22:58 +08:00
|
|
|
unsigned OpCode = Is64Bit ? PPC::ISEL8 : PPC::ISEL;
|
2016-01-13 05:00:43 +08:00
|
|
|
auto SelectPred = static_cast<PPC::Predicate>(Cond[0].getImm());
|
2013-04-06 07:29:01 +08:00
|
|
|
|
2016-01-16 03:20:06 +08:00
|
|
|
unsigned SubIdx = 0;
|
|
|
|
bool SwapOps = false;
|
2013-04-06 07:29:01 +08:00
|
|
|
switch (SelectPred) {
|
2016-01-13 05:00:43 +08:00
|
|
|
case PPC::PRED_EQ:
|
|
|
|
case PPC::PRED_EQ_MINUS:
|
|
|
|
case PPC::PRED_EQ_PLUS:
|
|
|
|
SubIdx = PPC::sub_eq; SwapOps = false; break;
|
|
|
|
case PPC::PRED_NE:
|
|
|
|
case PPC::PRED_NE_MINUS:
|
|
|
|
case PPC::PRED_NE_PLUS:
|
|
|
|
SubIdx = PPC::sub_eq; SwapOps = true; break;
|
|
|
|
case PPC::PRED_LT:
|
|
|
|
case PPC::PRED_LT_MINUS:
|
|
|
|
case PPC::PRED_LT_PLUS:
|
|
|
|
SubIdx = PPC::sub_lt; SwapOps = false; break;
|
|
|
|
case PPC::PRED_GE:
|
|
|
|
case PPC::PRED_GE_MINUS:
|
|
|
|
case PPC::PRED_GE_PLUS:
|
|
|
|
SubIdx = PPC::sub_lt; SwapOps = true; break;
|
|
|
|
case PPC::PRED_GT:
|
|
|
|
case PPC::PRED_GT_MINUS:
|
|
|
|
case PPC::PRED_GT_PLUS:
|
|
|
|
SubIdx = PPC::sub_gt; SwapOps = false; break;
|
|
|
|
case PPC::PRED_LE:
|
|
|
|
case PPC::PRED_LE_MINUS:
|
|
|
|
case PPC::PRED_LE_PLUS:
|
|
|
|
SubIdx = PPC::sub_gt; SwapOps = true; break;
|
|
|
|
case PPC::PRED_UN:
|
|
|
|
case PPC::PRED_UN_MINUS:
|
|
|
|
case PPC::PRED_UN_PLUS:
|
|
|
|
SubIdx = PPC::sub_un; SwapOps = false; break;
|
|
|
|
case PPC::PRED_NU:
|
|
|
|
case PPC::PRED_NU_MINUS:
|
|
|
|
case PPC::PRED_NU_PLUS:
|
|
|
|
SubIdx = PPC::sub_un; SwapOps = true; break;
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
case PPC::PRED_BIT_SET: SubIdx = 0; SwapOps = false; break;
|
|
|
|
case PPC::PRED_BIT_UNSET: SubIdx = 0; SwapOps = true; break;
|
2013-04-06 07:29:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned FirstReg = SwapOps ? FalseReg : TrueReg,
|
|
|
|
SecondReg = SwapOps ? TrueReg : FalseReg;
|
|
|
|
|
|
|
|
// The first input register of isel cannot be r0. If it is a member
|
|
|
|
// of a register class that can be r0, then copy it first (the
|
|
|
|
// register allocator should eliminate the copy).
|
|
|
|
if (MRI.getRegClass(FirstReg)->contains(PPC::R0) ||
|
|
|
|
MRI.getRegClass(FirstReg)->contains(PPC::X0)) {
|
|
|
|
const TargetRegisterClass *FirstRC =
|
|
|
|
MRI.getRegClass(FirstReg)->contains(PPC::X0) ?
|
|
|
|
&PPC::G8RC_NOX0RegClass : &PPC::GPRC_NOR0RegClass;
|
|
|
|
unsigned OldFirstReg = FirstReg;
|
|
|
|
FirstReg = MRI.createVirtualRegister(FirstRC);
|
|
|
|
BuildMI(MBB, MI, dl, get(TargetOpcode::COPY), FirstReg)
|
|
|
|
.addReg(OldFirstReg);
|
|
|
|
}
|
|
|
|
|
|
|
|
BuildMI(MBB, MI, dl, get(OpCode), DestReg)
|
|
|
|
.addReg(FirstReg).addReg(SecondReg)
|
|
|
|
.addReg(Cond[1].getReg(), 0, SubIdx);
|
|
|
|
}
|
|
|
|
|
2015-03-26 03:36:23 +08:00
|
|
|
static unsigned getCRBitValue(unsigned CRBit) {
|
|
|
|
unsigned Ret = 4;
|
|
|
|
if (CRBit == PPC::CR0LT || CRBit == PPC::CR1LT ||
|
|
|
|
CRBit == PPC::CR2LT || CRBit == PPC::CR3LT ||
|
|
|
|
CRBit == PPC::CR4LT || CRBit == PPC::CR5LT ||
|
|
|
|
CRBit == PPC::CR6LT || CRBit == PPC::CR7LT)
|
|
|
|
Ret = 3;
|
|
|
|
if (CRBit == PPC::CR0GT || CRBit == PPC::CR1GT ||
|
|
|
|
CRBit == PPC::CR2GT || CRBit == PPC::CR3GT ||
|
|
|
|
CRBit == PPC::CR4GT || CRBit == PPC::CR5GT ||
|
|
|
|
CRBit == PPC::CR6GT || CRBit == PPC::CR7GT)
|
|
|
|
Ret = 2;
|
|
|
|
if (CRBit == PPC::CR0EQ || CRBit == PPC::CR1EQ ||
|
|
|
|
CRBit == PPC::CR2EQ || CRBit == PPC::CR3EQ ||
|
|
|
|
CRBit == PPC::CR4EQ || CRBit == PPC::CR5EQ ||
|
|
|
|
CRBit == PPC::CR6EQ || CRBit == PPC::CR7EQ)
|
|
|
|
Ret = 1;
|
|
|
|
if (CRBit == PPC::CR0UN || CRBit == PPC::CR1UN ||
|
|
|
|
CRBit == PPC::CR2UN || CRBit == PPC::CR3UN ||
|
|
|
|
CRBit == PPC::CR4UN || CRBit == PPC::CR5UN ||
|
|
|
|
CRBit == PPC::CR6UN || CRBit == PPC::CR7UN)
|
|
|
|
Ret = 0;
|
|
|
|
|
|
|
|
assert(Ret != 4 && "Invalid CR bit register");
|
|
|
|
return Ret;
|
|
|
|
}
|
|
|
|
|
2010-07-11 15:31:00 +08:00
|
|
|
void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
|
2016-06-12 23:39:02 +08:00
|
|
|
MachineBasicBlock::iterator I,
|
|
|
|
const DebugLoc &DL, unsigned DestReg,
|
|
|
|
unsigned SrcReg, bool KillSrc) const {
|
[PowerPC] Initial support for the VSX instruction set
VSX is an ISA extension supported on the POWER7 and later cores that enhances
floating-point vector and scalar capabilities. Among other things, this adds
<2 x double> support and generally helps to reduce register pressure.
The interesting part of this ISA feature is the register configuration: there
are 64 new 128-bit vector registers, the 32 of which are super-registers of the
existing 32 scalar floating-point registers, and the second 32 of which overlap
with the 32 Altivec vector registers. This makes things like vector insertion
and extraction tricky: this can be free but only if we force a restriction to
the right register subclass when needed. A new "minipass" PPCVSXCopy takes care
of this (although it could do a more-optimal job of it; see the comment about
unnecessary copies below).
Please note that, currently, VSX is not enabled by default when targeting
anything because it is not yet ready for that. The assembler and disassembler
are fully implemented and tested. However:
- CodeGen support causes miscompiles; test-suite runtime failures:
MultiSource/Benchmarks/FreeBench/distray/distray
MultiSource/Benchmarks/McCat/08-main/main
MultiSource/Benchmarks/Olden/voronoi/voronoi
MultiSource/Benchmarks/mafft/pairlocalalign
MultiSource/Benchmarks/tramp3d-v4/tramp3d-v4
SingleSource/Benchmarks/CoyoteBench/almabench
SingleSource/Benchmarks/Misc/matmul_f64_4x4
- The lowering currently falls back to using Altivec instructions far more
than it should. Worse, there are some things that are scalarized through the
stack that shouldn't be.
- A lot of unnecessary copies make it past the optimizers, and this needs to
be fixed.
- Many more regression tests are needed.
Normally, I'd fix these things prior to committing, but there are some
students and other contributors who would like to work this, and so it makes
sense to move this development process upstream where it can be subject to the
regular code-review procedures.
llvm-svn: 203768
2014-03-13 15:58:58 +08:00
|
|
|
// We can end up with self copies and similar things as a result of VSX copy
|
2014-03-28 06:46:28 +08:00
|
|
|
// legalization. Promote them here.
|
[PowerPC] Initial support for the VSX instruction set
VSX is an ISA extension supported on the POWER7 and later cores that enhances
floating-point vector and scalar capabilities. Among other things, this adds
<2 x double> support and generally helps to reduce register pressure.
The interesting part of this ISA feature is the register configuration: there
are 64 new 128-bit vector registers, the 32 of which are super-registers of the
existing 32 scalar floating-point registers, and the second 32 of which overlap
with the 32 Altivec vector registers. This makes things like vector insertion
and extraction tricky: this can be free but only if we force a restriction to
the right register subclass when needed. A new "minipass" PPCVSXCopy takes care
of this (although it could do a more-optimal job of it; see the comment about
unnecessary copies below).
Please note that, currently, VSX is not enabled by default when targeting
anything because it is not yet ready for that. The assembler and disassembler
are fully implemented and tested. However:
- CodeGen support causes miscompiles; test-suite runtime failures:
MultiSource/Benchmarks/FreeBench/distray/distray
MultiSource/Benchmarks/McCat/08-main/main
MultiSource/Benchmarks/Olden/voronoi/voronoi
MultiSource/Benchmarks/mafft/pairlocalalign
MultiSource/Benchmarks/tramp3d-v4/tramp3d-v4
SingleSource/Benchmarks/CoyoteBench/almabench
SingleSource/Benchmarks/Misc/matmul_f64_4x4
- The lowering currently falls back to using Altivec instructions far more
than it should. Worse, there are some things that are scalarized through the
stack that shouldn't be.
- A lot of unnecessary copies make it past the optimizers, and this needs to
be fixed.
- Many more regression tests are needed.
Normally, I'd fix these things prior to committing, but there are some
students and other contributors who would like to work this, and so it makes
sense to move this development process upstream where it can be subject to the
regular code-review procedures.
llvm-svn: 203768
2014-03-13 15:58:58 +08:00
|
|
|
const TargetRegisterInfo *TRI = &getRegisterInfo();
|
|
|
|
if (PPC::F8RCRegClass.contains(DestReg) &&
|
2015-02-17 07:46:30 +08:00
|
|
|
PPC::VSRCRegClass.contains(SrcReg)) {
|
[PowerPC] Initial support for the VSX instruction set
VSX is an ISA extension supported on the POWER7 and later cores that enhances
floating-point vector and scalar capabilities. Among other things, this adds
<2 x double> support and generally helps to reduce register pressure.
The interesting part of this ISA feature is the register configuration: there
are 64 new 128-bit vector registers, the 32 of which are super-registers of the
existing 32 scalar floating-point registers, and the second 32 of which overlap
with the 32 Altivec vector registers. This makes things like vector insertion
and extraction tricky: this can be free but only if we force a restriction to
the right register subclass when needed. A new "minipass" PPCVSXCopy takes care
of this (although it could do a more-optimal job of it; see the comment about
unnecessary copies below).
Please note that, currently, VSX is not enabled by default when targeting
anything because it is not yet ready for that. The assembler and disassembler
are fully implemented and tested. However:
- CodeGen support causes miscompiles; test-suite runtime failures:
MultiSource/Benchmarks/FreeBench/distray/distray
MultiSource/Benchmarks/McCat/08-main/main
MultiSource/Benchmarks/Olden/voronoi/voronoi
MultiSource/Benchmarks/mafft/pairlocalalign
MultiSource/Benchmarks/tramp3d-v4/tramp3d-v4
SingleSource/Benchmarks/CoyoteBench/almabench
SingleSource/Benchmarks/Misc/matmul_f64_4x4
- The lowering currently falls back to using Altivec instructions far more
than it should. Worse, there are some things that are scalarized through the
stack that shouldn't be.
- A lot of unnecessary copies make it past the optimizers, and this needs to
be fixed.
- Many more regression tests are needed.
Normally, I'd fix these things prior to committing, but there are some
students and other contributors who would like to work this, and so it makes
sense to move this development process upstream where it can be subject to the
regular code-review procedures.
llvm-svn: 203768
2014-03-13 15:58:58 +08:00
|
|
|
unsigned SuperReg =
|
|
|
|
TRI->getMatchingSuperReg(DestReg, PPC::sub_64, &PPC::VSRCRegClass);
|
|
|
|
|
2014-03-28 06:46:28 +08:00
|
|
|
if (VSXSelfCopyCrash && SrcReg == SuperReg)
|
|
|
|
llvm_unreachable("nop VSX copy");
|
[PowerPC] Initial support for the VSX instruction set
VSX is an ISA extension supported on the POWER7 and later cores that enhances
floating-point vector and scalar capabilities. Among other things, this adds
<2 x double> support and generally helps to reduce register pressure.
The interesting part of this ISA feature is the register configuration: there
are 64 new 128-bit vector registers, the 32 of which are super-registers of the
existing 32 scalar floating-point registers, and the second 32 of which overlap
with the 32 Altivec vector registers. This makes things like vector insertion
and extraction tricky: this can be free but only if we force a restriction to
the right register subclass when needed. A new "minipass" PPCVSXCopy takes care
of this (although it could do a more-optimal job of it; see the comment about
unnecessary copies below).
Please note that, currently, VSX is not enabled by default when targeting
anything because it is not yet ready for that. The assembler and disassembler
are fully implemented and tested. However:
- CodeGen support causes miscompiles; test-suite runtime failures:
MultiSource/Benchmarks/FreeBench/distray/distray
MultiSource/Benchmarks/McCat/08-main/main
MultiSource/Benchmarks/Olden/voronoi/voronoi
MultiSource/Benchmarks/mafft/pairlocalalign
MultiSource/Benchmarks/tramp3d-v4/tramp3d-v4
SingleSource/Benchmarks/CoyoteBench/almabench
SingleSource/Benchmarks/Misc/matmul_f64_4x4
- The lowering currently falls back to using Altivec instructions far more
than it should. Worse, there are some things that are scalarized through the
stack that shouldn't be.
- A lot of unnecessary copies make it past the optimizers, and this needs to
be fixed.
- Many more regression tests are needed.
Normally, I'd fix these things prior to committing, but there are some
students and other contributors who would like to work this, and so it makes
sense to move this development process upstream where it can be subject to the
regular code-review procedures.
llvm-svn: 203768
2014-03-13 15:58:58 +08:00
|
|
|
|
|
|
|
DestReg = SuperReg;
|
|
|
|
} else if (PPC::F8RCRegClass.contains(SrcReg) &&
|
2015-02-17 07:46:30 +08:00
|
|
|
PPC::VSRCRegClass.contains(DestReg)) {
|
[PowerPC] Initial support for the VSX instruction set
VSX is an ISA extension supported on the POWER7 and later cores that enhances
floating-point vector and scalar capabilities. Among other things, this adds
<2 x double> support and generally helps to reduce register pressure.
The interesting part of this ISA feature is the register configuration: there
are 64 new 128-bit vector registers, the 32 of which are super-registers of the
existing 32 scalar floating-point registers, and the second 32 of which overlap
with the 32 Altivec vector registers. This makes things like vector insertion
and extraction tricky: this can be free but only if we force a restriction to
the right register subclass when needed. A new "minipass" PPCVSXCopy takes care
of this (although it could do a more-optimal job of it; see the comment about
unnecessary copies below).
Please note that, currently, VSX is not enabled by default when targeting
anything because it is not yet ready for that. The assembler and disassembler
are fully implemented and tested. However:
- CodeGen support causes miscompiles; test-suite runtime failures:
MultiSource/Benchmarks/FreeBench/distray/distray
MultiSource/Benchmarks/McCat/08-main/main
MultiSource/Benchmarks/Olden/voronoi/voronoi
MultiSource/Benchmarks/mafft/pairlocalalign
MultiSource/Benchmarks/tramp3d-v4/tramp3d-v4
SingleSource/Benchmarks/CoyoteBench/almabench
SingleSource/Benchmarks/Misc/matmul_f64_4x4
- The lowering currently falls back to using Altivec instructions far more
than it should. Worse, there are some things that are scalarized through the
stack that shouldn't be.
- A lot of unnecessary copies make it past the optimizers, and this needs to
be fixed.
- Many more regression tests are needed.
Normally, I'd fix these things prior to committing, but there are some
students and other contributors who would like to work this, and so it makes
sense to move this development process upstream where it can be subject to the
regular code-review procedures.
llvm-svn: 203768
2014-03-13 15:58:58 +08:00
|
|
|
unsigned SuperReg =
|
|
|
|
TRI->getMatchingSuperReg(SrcReg, PPC::sub_64, &PPC::VSRCRegClass);
|
|
|
|
|
2014-03-28 06:46:28 +08:00
|
|
|
if (VSXSelfCopyCrash && DestReg == SuperReg)
|
|
|
|
llvm_unreachable("nop VSX copy");
|
[PowerPC] Initial support for the VSX instruction set
VSX is an ISA extension supported on the POWER7 and later cores that enhances
floating-point vector and scalar capabilities. Among other things, this adds
<2 x double> support and generally helps to reduce register pressure.
The interesting part of this ISA feature is the register configuration: there
are 64 new 128-bit vector registers, the 32 of which are super-registers of the
existing 32 scalar floating-point registers, and the second 32 of which overlap
with the 32 Altivec vector registers. This makes things like vector insertion
and extraction tricky: this can be free but only if we force a restriction to
the right register subclass when needed. A new "minipass" PPCVSXCopy takes care
of this (although it could do a more-optimal job of it; see the comment about
unnecessary copies below).
Please note that, currently, VSX is not enabled by default when targeting
anything because it is not yet ready for that. The assembler and disassembler
are fully implemented and tested. However:
- CodeGen support causes miscompiles; test-suite runtime failures:
MultiSource/Benchmarks/FreeBench/distray/distray
MultiSource/Benchmarks/McCat/08-main/main
MultiSource/Benchmarks/Olden/voronoi/voronoi
MultiSource/Benchmarks/mafft/pairlocalalign
MultiSource/Benchmarks/tramp3d-v4/tramp3d-v4
SingleSource/Benchmarks/CoyoteBench/almabench
SingleSource/Benchmarks/Misc/matmul_f64_4x4
- The lowering currently falls back to using Altivec instructions far more
than it should. Worse, there are some things that are scalarized through the
stack that shouldn't be.
- A lot of unnecessary copies make it past the optimizers, and this needs to
be fixed.
- Many more regression tests are needed.
Normally, I'd fix these things prior to committing, but there are some
students and other contributors who would like to work this, and so it makes
sense to move this development process upstream where it can be subject to the
regular code-review procedures.
llvm-svn: 203768
2014-03-13 15:58:58 +08:00
|
|
|
|
|
|
|
SrcReg = SuperReg;
|
|
|
|
}
|
|
|
|
|
2015-03-26 03:36:23 +08:00
|
|
|
// Different class register copy
|
|
|
|
if (PPC::CRBITRCRegClass.contains(SrcReg) &&
|
|
|
|
PPC::GPRCRegClass.contains(DestReg)) {
|
|
|
|
unsigned CRReg = getCRFromCRBit(SrcReg);
|
2016-02-19 06:09:30 +08:00
|
|
|
BuildMI(MBB, I, DL, get(PPC::MFOCRF), DestReg).addReg(CRReg);
|
|
|
|
getKillRegState(KillSrc);
|
2015-03-26 03:36:23 +08:00
|
|
|
// Rotate the CR bit in the CR fields to be the least significant bit and
|
|
|
|
// then mask with 0x1 (MB = ME = 31).
|
|
|
|
BuildMI(MBB, I, DL, get(PPC::RLWINM), DestReg)
|
|
|
|
.addReg(DestReg, RegState::Kill)
|
|
|
|
.addImm(TRI->getEncodingValue(CRReg) * 4 + (4 - getCRBitValue(SrcReg)))
|
|
|
|
.addImm(31)
|
|
|
|
.addImm(31);
|
|
|
|
return;
|
|
|
|
} else if (PPC::CRRCRegClass.contains(SrcReg) &&
|
|
|
|
PPC::G8RCRegClass.contains(DestReg)) {
|
2016-02-19 06:09:30 +08:00
|
|
|
BuildMI(MBB, I, DL, get(PPC::MFOCRF8), DestReg).addReg(SrcReg);
|
|
|
|
getKillRegState(KillSrc);
|
2015-03-26 03:36:23 +08:00
|
|
|
return;
|
|
|
|
} else if (PPC::CRRCRegClass.contains(SrcReg) &&
|
|
|
|
PPC::GPRCRegClass.contains(DestReg)) {
|
2016-02-19 06:09:30 +08:00
|
|
|
BuildMI(MBB, I, DL, get(PPC::MFOCRF), DestReg).addReg(SrcReg);
|
|
|
|
getKillRegState(KillSrc);
|
2015-03-26 03:36:23 +08:00
|
|
|
return;
|
2017-09-22 00:12:33 +08:00
|
|
|
} else if (PPC::G8RCRegClass.contains(SrcReg) &&
|
|
|
|
PPC::VSFRCRegClass.contains(DestReg)) {
|
2019-03-12 22:01:29 +08:00
|
|
|
assert(Subtarget.hasDirectMove() &&
|
|
|
|
"Subtarget doesn't support directmove, don't know how to copy.");
|
2017-09-22 00:12:33 +08:00
|
|
|
BuildMI(MBB, I, DL, get(PPC::MTVSRD), DestReg).addReg(SrcReg);
|
|
|
|
NumGPRtoVSRSpill++;
|
|
|
|
getKillRegState(KillSrc);
|
|
|
|
return;
|
|
|
|
} else if (PPC::VSFRCRegClass.contains(SrcReg) &&
|
|
|
|
PPC::G8RCRegClass.contains(DestReg)) {
|
2019-03-12 22:01:29 +08:00
|
|
|
assert(Subtarget.hasDirectMove() &&
|
|
|
|
"Subtarget doesn't support directmove, don't know how to copy.");
|
2017-09-22 00:12:33 +08:00
|
|
|
BuildMI(MBB, I, DL, get(PPC::MFVSRD), DestReg).addReg(SrcReg);
|
|
|
|
getKillRegState(KillSrc);
|
|
|
|
return;
|
2018-07-18 12:25:10 +08:00
|
|
|
} else if (PPC::SPERCRegClass.contains(SrcReg) &&
|
|
|
|
PPC::SPE4RCRegClass.contains(DestReg)) {
|
|
|
|
BuildMI(MBB, I, DL, get(PPC::EFSCFD), DestReg).addReg(SrcReg);
|
|
|
|
getKillRegState(KillSrc);
|
|
|
|
return;
|
|
|
|
} else if (PPC::SPE4RCRegClass.contains(SrcReg) &&
|
|
|
|
PPC::SPERCRegClass.contains(DestReg)) {
|
|
|
|
BuildMI(MBB, I, DL, get(PPC::EFDCFS), DestReg).addReg(SrcReg);
|
|
|
|
getKillRegState(KillSrc);
|
|
|
|
return;
|
2017-09-22 00:12:33 +08:00
|
|
|
}
|
2015-03-26 03:36:23 +08:00
|
|
|
|
2010-07-11 15:31:00 +08:00
|
|
|
unsigned Opc;
|
|
|
|
if (PPC::GPRCRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opc = PPC::OR;
|
|
|
|
else if (PPC::G8RCRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opc = PPC::OR8;
|
|
|
|
else if (PPC::F4RCRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opc = PPC::FMR;
|
|
|
|
else if (PPC::CRRCRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opc = PPC::MCRF;
|
|
|
|
else if (PPC::VRRCRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opc = PPC::VOR;
|
[PowerPC] Initial support for the VSX instruction set
VSX is an ISA extension supported on the POWER7 and later cores that enhances
floating-point vector and scalar capabilities. Among other things, this adds
<2 x double> support and generally helps to reduce register pressure.
The interesting part of this ISA feature is the register configuration: there
are 64 new 128-bit vector registers, the 32 of which are super-registers of the
existing 32 scalar floating-point registers, and the second 32 of which overlap
with the 32 Altivec vector registers. This makes things like vector insertion
and extraction tricky: this can be free but only if we force a restriction to
the right register subclass when needed. A new "minipass" PPCVSXCopy takes care
of this (although it could do a more-optimal job of it; see the comment about
unnecessary copies below).
Please note that, currently, VSX is not enabled by default when targeting
anything because it is not yet ready for that. The assembler and disassembler
are fully implemented and tested. However:
- CodeGen support causes miscompiles; test-suite runtime failures:
MultiSource/Benchmarks/FreeBench/distray/distray
MultiSource/Benchmarks/McCat/08-main/main
MultiSource/Benchmarks/Olden/voronoi/voronoi
MultiSource/Benchmarks/mafft/pairlocalalign
MultiSource/Benchmarks/tramp3d-v4/tramp3d-v4
SingleSource/Benchmarks/CoyoteBench/almabench
SingleSource/Benchmarks/Misc/matmul_f64_4x4
- The lowering currently falls back to using Altivec instructions far more
than it should. Worse, there are some things that are scalarized through the
stack that shouldn't be.
- A lot of unnecessary copies make it past the optimizers, and this needs to
be fixed.
- Many more regression tests are needed.
Normally, I'd fix these things prior to committing, but there are some
students and other contributors who would like to work this, and so it makes
sense to move this development process upstream where it can be subject to the
regular code-review procedures.
llvm-svn: 203768
2014-03-13 15:58:58 +08:00
|
|
|
else if (PPC::VSRCRegClass.contains(DestReg, SrcReg))
|
[PowerPC] Update comment re: VSX copy-instruction selection
I've done some experimentation with this, and it looks like using the
lower-latency (but lower throughput) copy instruction is essentially always the
right thing to do.
My assumption is that, in order to be relatively sure that the higher-latency
copy will increase throughput, we'd want to have it unlikely to be in-flight
with its use. On the P7, the global completion table (GCT) can hold a maximum
of 120 instructions, shared among all active threads (up to 4), giving 30
instructions per thread. So specifically, I'd require at least that many
instructions between the copy and the use before the high-latency variant is
used.
Trying this, however, over the entire test suite resulted in zero cases where
the high-latency form would be preferable. This may be a consequence of the
fact that the scheduler views copies as free, and so they tend to end up close
to their uses. For this experiment I created a function:
unsigned chooseVSXCopy(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg, unsigned SrcReg,
unsigned StartDist = 1,
unsigned Depth = 3) const;
with an implementation like:
if (!Depth)
return PPC::XXLOR;
const unsigned MaxDist = 30;
unsigned Dist = StartDist;
for (auto J = I, JE = MBB.end(); J != JE && Dist <= MaxDist; ++J) {
if (J->isTransient() && !J->isCopy())
continue;
if (J->isCall() || J->isReturn() || J->readsRegister(DestReg, TRI))
return PPC::XXLOR;
++Dist;
}
// We've exceeded the required distance for the high-latency form, use it.
if (Dist > MaxDist)
return PPC::XVCPSGNDP;
// If this is only an exit block, use the low-latency form.
if (MBB.succ_empty())
return PPC::XXLOR;
// We've reached the end of the block, check the successor blocks (up to some
// depth), and use the high-latency form if that is okay with all successors.
for (auto J = MBB.succ_begin(), JE = MBB.succ_end(); J != JE; ++J) {
if (chooseVSXCopy(**J, (*J)->begin(), DestReg, SrcReg,
Dist, --Depth) == PPC::XXLOR)
return PPC::XXLOR;
}
// All of our successor blocks seem okay with the high-latency variant, so
// we'll use it.
return PPC::XVCPSGNDP;
and then changed the copy opcode selection from:
Opc = PPC::XXLOR;
to:
Opc = chooseVSXCopy(MBB, std::next(I), DestReg, SrcReg);
In conclusion, I'm removing the FIXME from the comment, because I believe that
there is, at least absent other examples, nothing to fix.
llvm-svn: 204591
2014-03-24 17:36:36 +08:00
|
|
|
// There are two different ways this can be done:
|
[PowerPC] Initial support for the VSX instruction set
VSX is an ISA extension supported on the POWER7 and later cores that enhances
floating-point vector and scalar capabilities. Among other things, this adds
<2 x double> support and generally helps to reduce register pressure.
The interesting part of this ISA feature is the register configuration: there
are 64 new 128-bit vector registers, the 32 of which are super-registers of the
existing 32 scalar floating-point registers, and the second 32 of which overlap
with the 32 Altivec vector registers. This makes things like vector insertion
and extraction tricky: this can be free but only if we force a restriction to
the right register subclass when needed. A new "minipass" PPCVSXCopy takes care
of this (although it could do a more-optimal job of it; see the comment about
unnecessary copies below).
Please note that, currently, VSX is not enabled by default when targeting
anything because it is not yet ready for that. The assembler and disassembler
are fully implemented and tested. However:
- CodeGen support causes miscompiles; test-suite runtime failures:
MultiSource/Benchmarks/FreeBench/distray/distray
MultiSource/Benchmarks/McCat/08-main/main
MultiSource/Benchmarks/Olden/voronoi/voronoi
MultiSource/Benchmarks/mafft/pairlocalalign
MultiSource/Benchmarks/tramp3d-v4/tramp3d-v4
SingleSource/Benchmarks/CoyoteBench/almabench
SingleSource/Benchmarks/Misc/matmul_f64_4x4
- The lowering currently falls back to using Altivec instructions far more
than it should. Worse, there are some things that are scalarized through the
stack that shouldn't be.
- A lot of unnecessary copies make it past the optimizers, and this needs to
be fixed.
- Many more regression tests are needed.
Normally, I'd fix these things prior to committing, but there are some
students and other contributors who would like to work this, and so it makes
sense to move this development process upstream where it can be subject to the
regular code-review procedures.
llvm-svn: 203768
2014-03-13 15:58:58 +08:00
|
|
|
// 1. xxlor : This has lower latency (on the P7), 2 cycles, but can only
|
|
|
|
// issue in VSU pipeline 0.
|
|
|
|
// 2. xmovdp/xmovsp: This has higher latency (on the P7), 6 cycles, but
|
|
|
|
// can go to either pipeline.
|
[PowerPC] Update comment re: VSX copy-instruction selection
I've done some experimentation with this, and it looks like using the
lower-latency (but lower throughput) copy instruction is essentially always the
right thing to do.
My assumption is that, in order to be relatively sure that the higher-latency
copy will increase throughput, we'd want to have it unlikely to be in-flight
with its use. On the P7, the global completion table (GCT) can hold a maximum
of 120 instructions, shared among all active threads (up to 4), giving 30
instructions per thread. So specifically, I'd require at least that many
instructions between the copy and the use before the high-latency variant is
used.
Trying this, however, over the entire test suite resulted in zero cases where
the high-latency form would be preferable. This may be a consequence of the
fact that the scheduler views copies as free, and so they tend to end up close
to their uses. For this experiment I created a function:
unsigned chooseVSXCopy(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg, unsigned SrcReg,
unsigned StartDist = 1,
unsigned Depth = 3) const;
with an implementation like:
if (!Depth)
return PPC::XXLOR;
const unsigned MaxDist = 30;
unsigned Dist = StartDist;
for (auto J = I, JE = MBB.end(); J != JE && Dist <= MaxDist; ++J) {
if (J->isTransient() && !J->isCopy())
continue;
if (J->isCall() || J->isReturn() || J->readsRegister(DestReg, TRI))
return PPC::XXLOR;
++Dist;
}
// We've exceeded the required distance for the high-latency form, use it.
if (Dist > MaxDist)
return PPC::XVCPSGNDP;
// If this is only an exit block, use the low-latency form.
if (MBB.succ_empty())
return PPC::XXLOR;
// We've reached the end of the block, check the successor blocks (up to some
// depth), and use the high-latency form if that is okay with all successors.
for (auto J = MBB.succ_begin(), JE = MBB.succ_end(); J != JE; ++J) {
if (chooseVSXCopy(**J, (*J)->begin(), DestReg, SrcReg,
Dist, --Depth) == PPC::XXLOR)
return PPC::XXLOR;
}
// All of our successor blocks seem okay with the high-latency variant, so
// we'll use it.
return PPC::XVCPSGNDP;
and then changed the copy opcode selection from:
Opc = PPC::XXLOR;
to:
Opc = chooseVSXCopy(MBB, std::next(I), DestReg, SrcReg);
In conclusion, I'm removing the FIXME from the comment, because I believe that
there is, at least absent other examples, nothing to fix.
llvm-svn: 204591
2014-03-24 17:36:36 +08:00
|
|
|
// We'll always use xxlor here, because in practically all cases where
|
|
|
|
// copies are generated, they are close enough to some use that the
|
|
|
|
// lower-latency form is preferable.
|
[PowerPC] Initial support for the VSX instruction set
VSX is an ISA extension supported on the POWER7 and later cores that enhances
floating-point vector and scalar capabilities. Among other things, this adds
<2 x double> support and generally helps to reduce register pressure.
The interesting part of this ISA feature is the register configuration: there
are 64 new 128-bit vector registers, the 32 of which are super-registers of the
existing 32 scalar floating-point registers, and the second 32 of which overlap
with the 32 Altivec vector registers. This makes things like vector insertion
and extraction tricky: this can be free but only if we force a restriction to
the right register subclass when needed. A new "minipass" PPCVSXCopy takes care
of this (although it could do a more-optimal job of it; see the comment about
unnecessary copies below).
Please note that, currently, VSX is not enabled by default when targeting
anything because it is not yet ready for that. The assembler and disassembler
are fully implemented and tested. However:
- CodeGen support causes miscompiles; test-suite runtime failures:
MultiSource/Benchmarks/FreeBench/distray/distray
MultiSource/Benchmarks/McCat/08-main/main
MultiSource/Benchmarks/Olden/voronoi/voronoi
MultiSource/Benchmarks/mafft/pairlocalalign
MultiSource/Benchmarks/tramp3d-v4/tramp3d-v4
SingleSource/Benchmarks/CoyoteBench/almabench
SingleSource/Benchmarks/Misc/matmul_f64_4x4
- The lowering currently falls back to using Altivec instructions far more
than it should. Worse, there are some things that are scalarized through the
stack that shouldn't be.
- A lot of unnecessary copies make it past the optimizers, and this needs to
be fixed.
- Many more regression tests are needed.
Normally, I'd fix these things prior to committing, but there are some
students and other contributors who would like to work this, and so it makes
sense to move this development process upstream where it can be subject to the
regular code-review procedures.
llvm-svn: 203768
2014-03-13 15:58:58 +08:00
|
|
|
Opc = PPC::XXLOR;
|
2015-05-08 02:24:05 +08:00
|
|
|
else if (PPC::VSFRCRegClass.contains(DestReg, SrcReg) ||
|
|
|
|
PPC::VSSRCRegClass.contains(DestReg, SrcReg))
|
2018-08-25 04:00:24 +08:00
|
|
|
Opc = (Subtarget.hasP9Vector()) ? PPC::XSCPSGNDP : PPC::XXLORf;
|
[PowerPC] Add support for the QPX vector instruction set
This adds support for the QPX vector instruction set, which is used by the
enhanced A2 cores on the IBM BG/Q supercomputers. QPX vectors are 256 bytes
wide, holding 4 double-precision floating-point values. Boolean values, modeled
here as <4 x i1> are actually also represented as floating-point values
(essentially { -1, 1 } for { false, true }). QPX shares many features with
Altivec and VSX, but is distinct from both of them. One major difference is
that, instead of adding completely-separate vector registers, QPX vector
registers are extensions of the scalar floating-point registers (lane 0 is the
corresponding scalar floating-point value). The operations supported on QPX
vectors mirrors that supported on the scalar floating-point values (with some
additional ones for permutations and logical/comparison operations).
I've been maintaining this support out-of-tree, as part of the bgclang project,
for several years. This is not the entire bgclang patch set, but is most of the
subset that can be cleanly integrated into LLVM proper at this time. Adding
this to the LLVM backend is part of my efforts to rebase bgclang to the current
LLVM trunk, but is independently useful (especially for codes that use LLVM as
a JIT in library form).
The assembler/disassembler test coverage is complete. The CodeGen test coverage
is not, but I've included some tests, and more will be added as follow-up work.
llvm-svn: 230413
2015-02-25 09:06:45 +08:00
|
|
|
else if (PPC::QFRCRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opc = PPC::QVFMR;
|
|
|
|
else if (PPC::QSRCRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opc = PPC::QVFMRs;
|
|
|
|
else if (PPC::QBRCRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opc = PPC::QVFMRb;
|
2010-07-11 15:31:00 +08:00
|
|
|
else if (PPC::CRBITRCRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opc = PPC::CROR;
|
2019-07-17 20:30:48 +08:00
|
|
|
else if (PPC::SPE4RCRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opc = PPC::OR;
|
2018-07-18 12:25:10 +08:00
|
|
|
else if (PPC::SPERCRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opc = PPC::EVOR;
|
2010-07-11 15:31:00 +08:00
|
|
|
else
|
|
|
|
llvm_unreachable("Impossible reg-to-reg copy");
|
2007-12-31 14:32:00 +08:00
|
|
|
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &MCID = get(Opc);
|
|
|
|
if (MCID.getNumOperands() == 3)
|
|
|
|
BuildMI(MBB, I, DL, MCID, DestReg)
|
2010-07-11 15:31:00 +08:00
|
|
|
.addReg(SrcReg).addReg(SrcReg, getKillRegState(KillSrc));
|
|
|
|
else
|
2011-06-29 03:10:37 +08:00
|
|
|
BuildMI(MBB, I, DL, MCID, DestReg).addReg(SrcReg, getKillRegState(KillSrc));
|
2007-12-31 14:32:00 +08:00
|
|
|
}
|
|
|
|
|
2018-03-27 01:39:18 +08:00
|
|
|
unsigned PPCInstrInfo::getStoreOpcodeForSpill(unsigned Reg,
|
|
|
|
const TargetRegisterClass *RC)
|
|
|
|
const {
|
|
|
|
const unsigned *OpcodesForSpill = getStoreOpcodesForSpillArray();
|
|
|
|
int OpcodeIndex = 0;
|
|
|
|
|
|
|
|
if (RC != nullptr) {
|
|
|
|
if (PPC::GPRCRegClass.hasSubClassEq(RC) ||
|
|
|
|
PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_Int4Spill;
|
|
|
|
} else if (PPC::G8RCRegClass.hasSubClassEq(RC) ||
|
|
|
|
PPC::G8RC_NOX0RegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_Int8Spill;
|
|
|
|
} else if (PPC::F8RCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_Float8Spill;
|
|
|
|
} else if (PPC::F4RCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_Float4Spill;
|
2018-07-18 12:25:10 +08:00
|
|
|
} else if (PPC::SPERCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_SPESpill;
|
|
|
|
} else if (PPC::SPE4RCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_SPE4Spill;
|
2018-03-27 01:39:18 +08:00
|
|
|
} else if (PPC::CRRCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_CRSpill;
|
|
|
|
} else if (PPC::CRBITRCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_CRBitSpill;
|
|
|
|
} else if (PPC::VRRCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_VRVectorSpill;
|
|
|
|
} else if (PPC::VSRCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_VSXVectorSpill;
|
|
|
|
} else if (PPC::VSFRCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_VectorFloat8Spill;
|
|
|
|
} else if (PPC::VSSRCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_VectorFloat4Spill;
|
|
|
|
} else if (PPC::VRSAVERCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_VRSaveSpill;
|
|
|
|
} else if (PPC::QFRCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_QuadFloat8Spill;
|
|
|
|
} else if (PPC::QSRCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_QuadFloat4Spill;
|
|
|
|
} else if (PPC::QBRCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_QuadBitSpill;
|
|
|
|
} else if (PPC::SPILLTOVSRRCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_SpillToVSR;
|
|
|
|
} else {
|
|
|
|
llvm_unreachable("Unknown regclass!");
|
|
|
|
}
|
2008-01-02 05:11:32 +08:00
|
|
|
} else {
|
2018-03-27 01:39:18 +08:00
|
|
|
if (PPC::GPRCRegClass.contains(Reg) ||
|
|
|
|
PPC::GPRC_NOR0RegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_Int4Spill;
|
|
|
|
} else if (PPC::G8RCRegClass.contains(Reg) ||
|
|
|
|
PPC::G8RC_NOX0RegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_Int8Spill;
|
|
|
|
} else if (PPC::F8RCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_Float8Spill;
|
|
|
|
} else if (PPC::F4RCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_Float4Spill;
|
2019-02-28 20:23:28 +08:00
|
|
|
} else if (PPC::SPERCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_SPESpill;
|
|
|
|
} else if (PPC::SPE4RCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_SPE4Spill;
|
2018-03-27 01:39:18 +08:00
|
|
|
} else if (PPC::CRRCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_CRSpill;
|
|
|
|
} else if (PPC::CRBITRCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_CRBitSpill;
|
|
|
|
} else if (PPC::VRRCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_VRVectorSpill;
|
|
|
|
} else if (PPC::VSRCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_VSXVectorSpill;
|
|
|
|
} else if (PPC::VSFRCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_VectorFloat8Spill;
|
|
|
|
} else if (PPC::VSSRCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_VectorFloat4Spill;
|
|
|
|
} else if (PPC::VRSAVERCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_VRSaveSpill;
|
|
|
|
} else if (PPC::QFRCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_QuadFloat8Spill;
|
|
|
|
} else if (PPC::QSRCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_QuadFloat4Spill;
|
|
|
|
} else if (PPC::QBRCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_QuadBitSpill;
|
|
|
|
} else if (PPC::SPILLTOVSRRCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_SpillToVSR;
|
|
|
|
} else {
|
|
|
|
llvm_unreachable("Unknown regclass!");
|
|
|
|
}
|
2008-01-02 05:11:32 +08:00
|
|
|
}
|
2018-03-27 01:39:18 +08:00
|
|
|
return OpcodesForSpill[OpcodeIndex];
|
|
|
|
}
|
2008-03-04 06:19:16 +08:00
|
|
|
|
2018-03-27 01:39:18 +08:00
|
|
|
unsigned
|
|
|
|
PPCInstrInfo::getLoadOpcodeForSpill(unsigned Reg,
|
|
|
|
const TargetRegisterClass *RC) const {
|
|
|
|
const unsigned *OpcodesForSpill = getLoadOpcodesForSpillArray();
|
|
|
|
int OpcodeIndex = 0;
|
|
|
|
|
|
|
|
if (RC != nullptr) {
|
|
|
|
if (PPC::GPRCRegClass.hasSubClassEq(RC) ||
|
|
|
|
PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_Int4Spill;
|
|
|
|
} else if (PPC::G8RCRegClass.hasSubClassEq(RC) ||
|
|
|
|
PPC::G8RC_NOX0RegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_Int8Spill;
|
|
|
|
} else if (PPC::F8RCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_Float8Spill;
|
|
|
|
} else if (PPC::F4RCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_Float4Spill;
|
2018-07-18 12:25:10 +08:00
|
|
|
} else if (PPC::SPERCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_SPESpill;
|
|
|
|
} else if (PPC::SPE4RCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_SPE4Spill;
|
2018-03-27 01:39:18 +08:00
|
|
|
} else if (PPC::CRRCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_CRSpill;
|
|
|
|
} else if (PPC::CRBITRCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_CRBitSpill;
|
|
|
|
} else if (PPC::VRRCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_VRVectorSpill;
|
|
|
|
} else if (PPC::VSRCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_VSXVectorSpill;
|
|
|
|
} else if (PPC::VSFRCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_VectorFloat8Spill;
|
|
|
|
} else if (PPC::VSSRCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_VectorFloat4Spill;
|
|
|
|
} else if (PPC::VRSAVERCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_VRSaveSpill;
|
|
|
|
} else if (PPC::QFRCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_QuadFloat8Spill;
|
|
|
|
} else if (PPC::QSRCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_QuadFloat4Spill;
|
|
|
|
} else if (PPC::QBRCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_QuadBitSpill;
|
|
|
|
} else if (PPC::SPILLTOVSRRCRegClass.hasSubClassEq(RC)) {
|
|
|
|
OpcodeIndex = SOK_SpillToVSR;
|
|
|
|
} else {
|
|
|
|
llvm_unreachable("Unknown regclass!");
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (PPC::GPRCRegClass.contains(Reg) ||
|
|
|
|
PPC::GPRC_NOR0RegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_Int4Spill;
|
|
|
|
} else if (PPC::G8RCRegClass.contains(Reg) ||
|
|
|
|
PPC::G8RC_NOX0RegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_Int8Spill;
|
|
|
|
} else if (PPC::F8RCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_Float8Spill;
|
|
|
|
} else if (PPC::F4RCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_Float4Spill;
|
2019-02-28 20:23:28 +08:00
|
|
|
} else if (PPC::SPERCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_SPESpill;
|
|
|
|
} else if (PPC::SPE4RCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_SPE4Spill;
|
2018-03-27 01:39:18 +08:00
|
|
|
} else if (PPC::CRRCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_CRSpill;
|
|
|
|
} else if (PPC::CRBITRCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_CRBitSpill;
|
|
|
|
} else if (PPC::VRRCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_VRVectorSpill;
|
|
|
|
} else if (PPC::VSRCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_VSXVectorSpill;
|
|
|
|
} else if (PPC::VSFRCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_VectorFloat8Spill;
|
|
|
|
} else if (PPC::VSSRCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_VectorFloat4Spill;
|
|
|
|
} else if (PPC::VRSAVERCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_VRSaveSpill;
|
|
|
|
} else if (PPC::QFRCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_QuadFloat8Spill;
|
|
|
|
} else if (PPC::QSRCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_QuadFloat4Spill;
|
|
|
|
} else if (PPC::QBRCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_QuadBitSpill;
|
|
|
|
} else if (PPC::SPILLTOVSRRCRegClass.contains(Reg)) {
|
|
|
|
OpcodeIndex = SOK_SpillToVSR;
|
|
|
|
} else {
|
|
|
|
llvm_unreachable("Unknown regclass!");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return OpcodesForSpill[OpcodeIndex];
|
2008-01-02 05:11:32 +08:00
|
|
|
}
|
|
|
|
|
2018-03-27 01:39:18 +08:00
|
|
|
void PPCInstrInfo::StoreRegToStackSlot(
|
|
|
|
MachineFunction &MF, unsigned SrcReg, bool isKill, int FrameIdx,
|
|
|
|
const TargetRegisterClass *RC,
|
|
|
|
SmallVectorImpl<MachineInstr *> &NewMIs) const {
|
|
|
|
unsigned Opcode = getStoreOpcodeForSpill(PPC::NoRegister, RC);
|
|
|
|
DebugLoc DL;
|
2008-03-04 06:19:16 +08:00
|
|
|
|
2013-03-15 13:06:04 +08:00
|
|
|
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
|
|
|
|
FuncInfo->setHasSpills();
|
|
|
|
|
2018-03-27 01:39:18 +08:00
|
|
|
NewMIs.push_back(addFrameReference(
|
|
|
|
BuildMI(MF, DL, get(Opcode)).addReg(SrcReg, getKillRegState(isKill)),
|
|
|
|
FrameIdx));
|
|
|
|
|
|
|
|
if (PPC::CRRCRegClass.hasSubClassEq(RC) ||
|
|
|
|
PPC::CRBITRCRegClass.hasSubClassEq(RC))
|
|
|
|
FuncInfo->setSpillsCR();
|
|
|
|
|
|
|
|
if (PPC::VRSAVERCRegClass.hasSubClassEq(RC))
|
|
|
|
FuncInfo->setSpillsVRSAVE();
|
|
|
|
|
|
|
|
if (isXFormMemOp(Opcode))
|
|
|
|
FuncInfo->setHasNonRISpills();
|
|
|
|
}
|
|
|
|
|
|
|
|
void PPCInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator MI,
|
|
|
|
unsigned SrcReg, bool isKill,
|
|
|
|
int FrameIdx,
|
|
|
|
const TargetRegisterClass *RC,
|
|
|
|
const TargetRegisterInfo *TRI) const {
|
|
|
|
MachineFunction &MF = *MBB.getParent();
|
|
|
|
SmallVector<MachineInstr *, 4> NewMIs;
|
|
|
|
|
2016-10-04 14:59:23 +08:00
|
|
|
// We need to avoid a situation in which the value from a VRRC register is
|
|
|
|
// spilled using an Altivec instruction and reloaded into a VSRC register
|
|
|
|
// using a VSX instruction. The issue with this is that the VSX
|
|
|
|
// load/store instructions swap the doublewords in the vector and the Altivec
|
|
|
|
// ones don't. The register classes on the spill/reload may be different if
|
|
|
|
// the register is defined using an Altivec instruction and is then used by a
|
|
|
|
// VSX instruction.
|
|
|
|
RC = updatedRC(RC);
|
|
|
|
|
2018-03-27 01:39:18 +08:00
|
|
|
StoreRegToStackSlot(MF, SrcReg, isKill, FrameIdx, RC, NewMIs);
|
2013-03-17 12:43:44 +08:00
|
|
|
|
2008-01-02 05:11:32 +08:00
|
|
|
for (unsigned i = 0, e = NewMIs.size(); i != e; ++i)
|
|
|
|
MBB.insert(MI, NewMIs[i]);
|
2010-07-17 02:22:00 +08:00
|
|
|
|
2016-07-29 02:40:00 +08:00
|
|
|
const MachineFrameInfo &MFI = MF.getFrameInfo();
|
2015-08-12 07:09:45 +08:00
|
|
|
MachineMemOperand *MMO = MF.getMachineMemOperand(
|
|
|
|
MachinePointerInfo::getFixedStack(MF, FrameIdx),
|
|
|
|
MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx),
|
|
|
|
MFI.getObjectAlignment(FrameIdx));
|
2010-07-17 02:22:00 +08:00
|
|
|
NewMIs.back()->addMemOperand(MF, MMO);
|
2008-01-02 05:11:32 +08:00
|
|
|
}
|
|
|
|
|
2018-03-27 01:39:18 +08:00
|
|
|
void PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, const DebugLoc &DL,
|
2016-06-12 23:39:02 +08:00
|
|
|
unsigned DestReg, int FrameIdx,
|
|
|
|
const TargetRegisterClass *RC,
|
2018-03-27 01:39:18 +08:00
|
|
|
SmallVectorImpl<MachineInstr *> &NewMIs)
|
|
|
|
const {
|
|
|
|
unsigned Opcode = getLoadOpcodeForSpill(PPC::NoRegister, RC);
|
|
|
|
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(Opcode), DestReg),
|
|
|
|
FrameIdx));
|
|
|
|
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
|
2011-12-07 04:55:36 +08:00
|
|
|
|
2018-03-27 01:39:18 +08:00
|
|
|
if (PPC::CRRCRegClass.hasSubClassEq(RC) ||
|
|
|
|
PPC::CRBITRCRegClass.hasSubClassEq(RC))
|
|
|
|
FuncInfo->setSpillsCR();
|
|
|
|
|
|
|
|
if (PPC::VRSAVERCRegClass.hasSubClassEq(RC))
|
|
|
|
FuncInfo->setSpillsVRSAVE();
|
|
|
|
|
|
|
|
if (isXFormMemOp(Opcode))
|
|
|
|
FuncInfo->setHasNonRISpills();
|
2008-01-02 05:11:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
PPCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
|
2008-03-04 06:19:16 +08:00
|
|
|
MachineBasicBlock::iterator MI,
|
|
|
|
unsigned DestReg, int FrameIdx,
|
2010-05-07 03:06:44 +08:00
|
|
|
const TargetRegisterClass *RC,
|
|
|
|
const TargetRegisterInfo *TRI) const {
|
2008-07-08 07:14:23 +08:00
|
|
|
MachineFunction &MF = *MBB.getParent();
|
2008-01-02 05:11:32 +08:00
|
|
|
SmallVector<MachineInstr*, 4> NewMIs;
|
2010-04-03 04:16:16 +08:00
|
|
|
DebugLoc DL;
|
2009-02-12 08:02:55 +08:00
|
|
|
if (MI != MBB.end()) DL = MI->getDebugLoc();
|
2013-03-17 12:43:44 +08:00
|
|
|
|
|
|
|
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
|
|
|
|
FuncInfo->setHasSpills();
|
|
|
|
|
2016-10-04 14:59:23 +08:00
|
|
|
// We need to avoid a situation in which the value from a VRRC register is
|
|
|
|
// spilled using an Altivec instruction and reloaded into a VSRC register
|
|
|
|
// using a VSX instruction. The issue with this is that the VSX
|
|
|
|
// load/store instructions swap the doublewords in the vector and the Altivec
|
|
|
|
// ones don't. The register classes on the spill/reload may be different if
|
|
|
|
// the register is defined using an Altivec instruction and is then used by a
|
|
|
|
// VSX instruction.
|
|
|
|
if (Subtarget.hasVSX() && RC == &PPC::VRRCRegClass)
|
|
|
|
RC = &PPC::VSRCRegClass;
|
|
|
|
|
2018-03-27 01:39:18 +08:00
|
|
|
LoadRegFromStackSlot(MF, DL, DestReg, FrameIdx, RC, NewMIs);
|
2013-03-17 12:43:44 +08:00
|
|
|
|
2008-01-02 05:11:32 +08:00
|
|
|
for (unsigned i = 0, e = NewMIs.size(); i != e; ++i)
|
|
|
|
MBB.insert(MI, NewMIs[i]);
|
2010-07-17 02:22:00 +08:00
|
|
|
|
2016-07-29 02:40:00 +08:00
|
|
|
const MachineFrameInfo &MFI = MF.getFrameInfo();
|
2015-08-12 07:09:45 +08:00
|
|
|
MachineMemOperand *MMO = MF.getMachineMemOperand(
|
|
|
|
MachinePointerInfo::getFixedStack(MF, FrameIdx),
|
|
|
|
MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx),
|
|
|
|
MFI.getObjectAlignment(FrameIdx));
|
2010-07-17 02:22:00 +08:00
|
|
|
NewMIs.back()->addMemOperand(MF, MMO);
|
2008-01-02 05:11:32 +08:00
|
|
|
}
|
|
|
|
|
2006-10-14 05:21:17 +08:00
|
|
|
bool PPCInstrInfo::
|
2016-09-15 04:43:16 +08:00
|
|
|
reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
|
2006-10-21 14:03:11 +08:00
|
|
|
assert(Cond.size() == 2 && "Invalid PPC branch opcode!");
|
2012-06-08 23:38:21 +08:00
|
|
|
if (Cond[1].getReg() == PPC::CTR8 || Cond[1].getReg() == PPC::CTR)
|
|
|
|
Cond[0].setImm(Cond[0].getImm() == 0 ? 1 : 0);
|
|
|
|
else
|
|
|
|
// Leave the CR# the same, but invert the condition.
|
|
|
|
Cond[0].setImm(PPC::InvertPredicate((PPC::Predicate)Cond[0].getImm()));
|
2006-10-21 14:03:11 +08:00
|
|
|
return false;
|
2006-10-14 05:21:17 +08:00
|
|
|
}
|
2008-04-17 04:10:13 +08:00
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
bool PPCInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
|
|
|
|
unsigned Reg, MachineRegisterInfo *MRI) const {
|
2013-04-07 03:30:30 +08:00
|
|
|
// For some instructions, it is legal to fold ZERO into the RA register field.
|
|
|
|
// A zero immediate should always be loaded with a single li.
|
2016-06-30 08:01:54 +08:00
|
|
|
unsigned DefOpc = DefMI.getOpcode();
|
2013-04-07 03:30:30 +08:00
|
|
|
if (DefOpc != PPC::LI && DefOpc != PPC::LI8)
|
|
|
|
return false;
|
2016-06-30 08:01:54 +08:00
|
|
|
if (!DefMI.getOperand(1).isImm())
|
2013-04-07 03:30:30 +08:00
|
|
|
return false;
|
2016-06-30 08:01:54 +08:00
|
|
|
if (DefMI.getOperand(1).getImm() != 0)
|
2013-04-07 03:30:30 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Note that we cannot here invert the arguments of an isel in order to fold
|
|
|
|
// a ZERO into what is presented as the second argument. All we have here
|
|
|
|
// is the condition bit, and that might come from a CR-logical bit operation.
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
const MCInstrDesc &UseMCID = UseMI.getDesc();
|
2013-04-07 03:30:30 +08:00
|
|
|
|
|
|
|
// Only fold into real machine instructions.
|
|
|
|
if (UseMCID.isPseudo())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned UseIdx;
|
2016-06-30 08:01:54 +08:00
|
|
|
for (UseIdx = 0; UseIdx < UseMI.getNumOperands(); ++UseIdx)
|
|
|
|
if (UseMI.getOperand(UseIdx).isReg() &&
|
|
|
|
UseMI.getOperand(UseIdx).getReg() == Reg)
|
2013-04-07 03:30:30 +08:00
|
|
|
break;
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
assert(UseIdx < UseMI.getNumOperands() && "Cannot find Reg in UseMI");
|
2013-04-07 03:30:30 +08:00
|
|
|
assert(UseIdx < UseMCID.getNumOperands() && "No operand description for Reg");
|
|
|
|
|
|
|
|
const MCOperandInfo *UseInfo = &UseMCID.OpInfo[UseIdx];
|
|
|
|
|
|
|
|
// We can fold the zero if this register requires a GPRC_NOR0/G8RC_NOX0
|
|
|
|
// register (which might also be specified as a pointer class kind).
|
|
|
|
if (UseInfo->isLookupPtrRegClass()) {
|
|
|
|
if (UseInfo->RegClass /* Kind */ != 1)
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
if (UseInfo->RegClass != PPC::GPRC_NOR0RegClassID &&
|
|
|
|
UseInfo->RegClass != PPC::G8RC_NOX0RegClassID)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure this is not tied to an output register (or otherwise
|
|
|
|
// constrained). This is true for ST?UX registers, for example, which
|
|
|
|
// are tied to their output registers.
|
|
|
|
if (UseInfo->Constraints != 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned ZeroReg;
|
|
|
|
if (UseInfo->isLookupPtrRegClass()) {
|
2014-06-13 05:48:52 +08:00
|
|
|
bool isPPC64 = Subtarget.isPPC64();
|
2013-04-07 03:30:30 +08:00
|
|
|
ZeroReg = isPPC64 ? PPC::ZERO8 : PPC::ZERO;
|
|
|
|
} else {
|
|
|
|
ZeroReg = UseInfo->RegClass == PPC::G8RC_NOX0RegClassID ?
|
|
|
|
PPC::ZERO8 : PPC::ZERO;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
|
2016-06-30 08:01:54 +08:00
|
|
|
UseMI.getOperand(UseIdx).setReg(ZeroReg);
|
2013-04-07 03:30:30 +08:00
|
|
|
|
|
|
|
if (DeleteDef)
|
2016-06-30 08:01:54 +08:00
|
|
|
DefMI.eraseFromParent();
|
2013-04-07 03:30:30 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-04-11 02:30:16 +08:00
|
|
|
static bool MBBDefinesCTR(MachineBasicBlock &MBB) {
|
|
|
|
for (MachineBasicBlock::iterator I = MBB.begin(), IE = MBB.end();
|
|
|
|
I != IE; ++I)
|
|
|
|
if (I->definesRegister(PPC::CTR) || I->definesRegister(PPC::CTR8))
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should make sure that, if we're going to predicate both sides of a
|
|
|
|
// condition (a diamond), that both sides don't define the counter register. We
|
|
|
|
// can predicate counter-decrement-based branches, but while that predicates
|
|
|
|
// the branching, it does not predicate the counter decrement. If we tried to
|
|
|
|
// merge the triangle into one predicated block, we'd decrement the counter
|
|
|
|
// twice.
|
|
|
|
bool PPCInstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
|
|
|
|
unsigned NumT, unsigned ExtraT,
|
|
|
|
MachineBasicBlock &FMBB,
|
|
|
|
unsigned NumF, unsigned ExtraF,
|
2015-09-11 07:10:42 +08:00
|
|
|
BranchProbability Probability) const {
|
2013-04-11 02:30:16 +08:00
|
|
|
return !(MBBDefinesCTR(TMBB) && MBBDefinesCTR(FMBB));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-02-23 10:46:52 +08:00
|
|
|
bool PPCInstrInfo::isPredicated(const MachineInstr &MI) const {
|
2013-04-11 09:23:34 +08:00
|
|
|
// The predicated branches are identified by their type, not really by the
|
|
|
|
// explicit presence of a predicate. Furthermore, some of them can be
|
|
|
|
// predicated more than once. Because if conversion won't try to predicate
|
|
|
|
// any instruction which already claims to be predicated (by returning true
|
|
|
|
// here), always return false. In doing so, we let isPredicable() be the
|
|
|
|
// final word on whether not the instruction can be (further) predicated.
|
|
|
|
|
|
|
|
return false;
|
2013-04-10 06:58:37 +08:00
|
|
|
}
|
|
|
|
|
2016-02-23 10:46:52 +08:00
|
|
|
bool PPCInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const {
|
|
|
|
if (!MI.isTerminator())
|
2013-04-10 06:58:37 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Conditional branch is a special case.
|
2016-02-23 10:46:52 +08:00
|
|
|
if (MI.isBranch() && !MI.isBarrier())
|
2013-04-10 06:58:37 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
return !isPredicated(MI);
|
|
|
|
}
|
|
|
|
|
2016-02-23 10:46:52 +08:00
|
|
|
bool PPCInstrInfo::PredicateInstruction(MachineInstr &MI,
|
2015-06-12 03:30:37 +08:00
|
|
|
ArrayRef<MachineOperand> Pred) const {
|
2016-02-23 10:46:52 +08:00
|
|
|
unsigned OpC = MI.getOpcode();
|
2015-01-14 01:47:54 +08:00
|
|
|
if (OpC == PPC::BLR || OpC == PPC::BLR8) {
|
2013-04-10 06:58:37 +08:00
|
|
|
if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) {
|
2014-06-13 05:48:52 +08:00
|
|
|
bool isPPC64 = Subtarget.isPPC64();
|
2016-02-23 10:46:52 +08:00
|
|
|
MI.setDesc(get(Pred[0].getImm() ? (isPPC64 ? PPC::BDNZLR8 : PPC::BDNZLR)
|
|
|
|
: (isPPC64 ? PPC::BDZLR8 : PPC::BDZLR)));
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
} else if (Pred[0].getImm() == PPC::PRED_BIT_SET) {
|
2016-02-23 10:46:52 +08:00
|
|
|
MI.setDesc(get(PPC::BCLR));
|
2018-12-07 13:25:16 +08:00
|
|
|
MachineInstrBuilder(*MI.getParent()->getParent(), MI).add(Pred[1]);
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
} else if (Pred[0].getImm() == PPC::PRED_BIT_UNSET) {
|
2016-02-23 10:46:52 +08:00
|
|
|
MI.setDesc(get(PPC::BCLRn));
|
2018-12-07 13:25:16 +08:00
|
|
|
MachineInstrBuilder(*MI.getParent()->getParent(), MI).add(Pred[1]);
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
} else {
|
2016-02-23 10:46:52 +08:00
|
|
|
MI.setDesc(get(PPC::BCCLR));
|
|
|
|
MachineInstrBuilder(*MI.getParent()->getParent(), MI)
|
|
|
|
.addImm(Pred[0].getImm())
|
2018-12-07 13:25:16 +08:00
|
|
|
.add(Pred[1]);
|
2013-04-10 06:58:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
} else if (OpC == PPC::B) {
|
|
|
|
if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) {
|
2014-06-13 05:48:52 +08:00
|
|
|
bool isPPC64 = Subtarget.isPPC64();
|
2016-02-23 10:46:52 +08:00
|
|
|
MI.setDesc(get(Pred[0].getImm() ? (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ)
|
|
|
|
: (isPPC64 ? PPC::BDZ8 : PPC::BDZ)));
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
} else if (Pred[0].getImm() == PPC::PRED_BIT_SET) {
|
2016-02-23 10:46:52 +08:00
|
|
|
MachineBasicBlock *MBB = MI.getOperand(0).getMBB();
|
|
|
|
MI.RemoveOperand(0);
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
|
2016-02-23 10:46:52 +08:00
|
|
|
MI.setDesc(get(PPC::BC));
|
|
|
|
MachineInstrBuilder(*MI.getParent()->getParent(), MI)
|
2018-12-07 13:25:16 +08:00
|
|
|
.add(Pred[1])
|
2016-02-23 10:46:52 +08:00
|
|
|
.addMBB(MBB);
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
} else if (Pred[0].getImm() == PPC::PRED_BIT_UNSET) {
|
2016-02-23 10:46:52 +08:00
|
|
|
MachineBasicBlock *MBB = MI.getOperand(0).getMBB();
|
|
|
|
MI.RemoveOperand(0);
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
|
2016-02-23 10:46:52 +08:00
|
|
|
MI.setDesc(get(PPC::BCn));
|
|
|
|
MachineInstrBuilder(*MI.getParent()->getParent(), MI)
|
2018-12-07 13:25:16 +08:00
|
|
|
.add(Pred[1])
|
2016-02-23 10:46:52 +08:00
|
|
|
.addMBB(MBB);
|
2013-04-10 06:58:37 +08:00
|
|
|
} else {
|
2016-02-23 10:46:52 +08:00
|
|
|
MachineBasicBlock *MBB = MI.getOperand(0).getMBB();
|
|
|
|
MI.RemoveOperand(0);
|
|
|
|
|
|
|
|
MI.setDesc(get(PPC::BCC));
|
|
|
|
MachineInstrBuilder(*MI.getParent()->getParent(), MI)
|
|
|
|
.addImm(Pred[0].getImm())
|
2018-12-07 13:25:16 +08:00
|
|
|
.add(Pred[1])
|
2016-02-23 10:46:52 +08:00
|
|
|
.addMBB(MBB);
|
2013-04-10 06:58:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
2018-12-07 13:25:16 +08:00
|
|
|
} else if (OpC == PPC::BCTR || OpC == PPC::BCTR8 || OpC == PPC::BCTRL ||
|
|
|
|
OpC == PPC::BCTRL8) {
|
PPC: Prep for if conversion of bctr[l]
This adds in-principle support for if-converting the bctr[l] instructions.
These instructions are used for indirect branching. It seems, however, that the
current if converter will never actually predicate these. To do so, it would
need the ability to hoist a few setup insts. out of the conditionally-executed
block. For example, code like this:
void foo(int a, int (*bar)()) { if (a != 0) bar(); }
becomes:
...
beq 0, .LBB0_2
std 2, 40(1)
mr 12, 4
ld 3, 0(4)
ld 11, 16(4)
ld 2, 8(4)
mtctr 3
bctrl
ld 2, 40(1)
.LBB0_2:
...
and it would be safe to do all of this unconditionally with a predicated
beqctrl instruction.
llvm-svn: 179156
2013-04-10 14:42:34 +08:00
|
|
|
if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR)
|
|
|
|
llvm_unreachable("Cannot predicate bctr[l] on the ctr register");
|
|
|
|
|
|
|
|
bool setLR = OpC == PPC::BCTRL || OpC == PPC::BCTRL8;
|
2014-06-13 05:48:52 +08:00
|
|
|
bool isPPC64 = Subtarget.isPPC64();
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
|
|
|
|
if (Pred[0].getImm() == PPC::PRED_BIT_SET) {
|
2016-02-23 10:46:52 +08:00
|
|
|
MI.setDesc(get(isPPC64 ? (setLR ? PPC::BCCTRL8 : PPC::BCCTR8)
|
|
|
|
: (setLR ? PPC::BCCTRL : PPC::BCCTR)));
|
2018-12-07 13:25:16 +08:00
|
|
|
MachineInstrBuilder(*MI.getParent()->getParent(), MI).add(Pred[1]);
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
return true;
|
|
|
|
} else if (Pred[0].getImm() == PPC::PRED_BIT_UNSET) {
|
2016-02-23 10:46:52 +08:00
|
|
|
MI.setDesc(get(isPPC64 ? (setLR ? PPC::BCCTRL8n : PPC::BCCTR8n)
|
|
|
|
: (setLR ? PPC::BCCTRLn : PPC::BCCTRn)));
|
2018-12-07 13:25:16 +08:00
|
|
|
MachineInstrBuilder(*MI.getParent()->getParent(), MI).add(Pred[1]);
|
Add CR-bit tracking to the PowerPC backend for i1 values
This change enables tracking i1 values in the PowerPC backend using the
condition register bits. These bits can be treated on PowerPC as separate
registers; individual bit operations (and, or, xor, etc.) are supported.
Tracking booleans in CR bits has several advantages:
- Reduction in register pressure (because we no longer need GPRs to store
boolean values).
- Logical operations on booleans can be handled more efficiently; we used to
have to move all results from comparisons into GPRs, perform promoted
logical operations in GPRs, and then move the result back into condition
register bits to be used by conditional branches. This can be very
inefficient, because the throughput of these CR <-> GPR moves have high
latency and low throughput (especially when other associated instructions
are accounted for).
- On the POWER7 and similar cores, we can increase total throughput by using
the CR bits. CR bit operations have a dedicated functional unit.
Most of this is more-or-less mechanical: Adjustments were needed in the
calling-convention code, support was added for spilling/restoring individual
condition-register bits, and conditional branch instruction definitions taking
specific CR bits were added (plus patterns and code for generating bit-level
operations).
This is enabled by default when running at -O2 and higher. For -O0 and -O1,
where the ability to debug is more important, this feature is disabled by
default. Individual CR bits do not have assigned DWARF register numbers,
and storing values in CR bits makes them invisible to the debugger.
It is critical, however, that we don't move i1 values that have been promoted
to larger values (such as those passed as function arguments) into bit
registers only to quickly turn around and move the values back into GPRs (such
as happens when values are returned by functions). A pair of target-specific
DAG combines are added to remove the trunc/extends in:
trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
and:
zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
In short, we only want to use CR bits where some of the i1 values come from
comparisons or are used by conditional branches or selects. To put it another
way, if we can do the entire i1 computation in GPRs, then we probably should
(on the POWER7, the GPR-operation throughput is higher, and for all cores, the
CR <-> GPR moves are expensive).
POWER7 test-suite performance results (from 10 runs in each configuration):
SingleSource/Benchmarks/Misc/mandel-2: 35% speedup
MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup
MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup
SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup
SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup
SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup
SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown
MultiSource/Applications/lemon/lemon: 8% slowdown
llvm-svn: 202451
2014-02-28 08:27:01 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-02-23 10:46:52 +08:00
|
|
|
MI.setDesc(get(isPPC64 ? (setLR ? PPC::BCCCTRL8 : PPC::BCCCTR8)
|
|
|
|
: (setLR ? PPC::BCCCTRL : PPC::BCCCTR)));
|
|
|
|
MachineInstrBuilder(*MI.getParent()->getParent(), MI)
|
|
|
|
.addImm(Pred[0].getImm())
|
2018-12-07 13:25:16 +08:00
|
|
|
.add(Pred[1]);
|
PPC: Prep for if conversion of bctr[l]
This adds in-principle support for if-converting the bctr[l] instructions.
These instructions are used for indirect branching. It seems, however, that the
current if converter will never actually predicate these. To do so, it would
need the ability to hoist a few setup insts. out of the conditionally-executed
block. For example, code like this:
void foo(int a, int (*bar)()) { if (a != 0) bar(); }
becomes:
...
beq 0, .LBB0_2
std 2, 40(1)
mr 12, 4
ld 3, 0(4)
ld 11, 16(4)
ld 2, 8(4)
mtctr 3
bctrl
ld 2, 40(1)
.LBB0_2:
...
and it would be safe to do all of this unconditionally with a predicated
beqctrl instruction.
llvm-svn: 179156
2013-04-10 14:42:34 +08:00
|
|
|
return true;
|
2013-04-10 06:58:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-06-12 03:30:37 +08:00
|
|
|
bool PPCInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
|
|
|
|
ArrayRef<MachineOperand> Pred2) const {
|
2013-04-10 06:58:37 +08:00
|
|
|
assert(Pred1.size() == 2 && "Invalid PPC first predicate");
|
|
|
|
assert(Pred2.size() == 2 && "Invalid PPC second predicate");
|
|
|
|
|
|
|
|
if (Pred1[1].getReg() == PPC::CTR8 || Pred1[1].getReg() == PPC::CTR)
|
|
|
|
return false;
|
|
|
|
if (Pred2[1].getReg() == PPC::CTR8 || Pred2[1].getReg() == PPC::CTR)
|
|
|
|
return false;
|
|
|
|
|
2013-12-12 07:12:25 +08:00
|
|
|
// P1 can only subsume P2 if they test the same condition register.
|
|
|
|
if (Pred1[1].getReg() != Pred2[1].getReg())
|
|
|
|
return false;
|
|
|
|
|
2013-04-10 06:58:37 +08:00
|
|
|
PPC::Predicate P1 = (PPC::Predicate) Pred1[0].getImm();
|
|
|
|
PPC::Predicate P2 = (PPC::Predicate) Pred2[0].getImm();
|
|
|
|
|
|
|
|
if (P1 == P2)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Does P1 subsume P2, e.g. GE subsumes GT.
|
|
|
|
if (P1 == PPC::PRED_LE &&
|
|
|
|
(P2 == PPC::PRED_LT || P2 == PPC::PRED_EQ))
|
|
|
|
return true;
|
|
|
|
if (P1 == PPC::PRED_GE &&
|
|
|
|
(P2 == PPC::PRED_GT || P2 == PPC::PRED_EQ))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-02-23 10:46:52 +08:00
|
|
|
bool PPCInstrInfo::DefinesPredicate(MachineInstr &MI,
|
2013-04-10 06:58:37 +08:00
|
|
|
std::vector<MachineOperand> &Pred) const {
|
|
|
|
// Note: At the present time, the contents of Pred from this function is
|
|
|
|
// unused by IfConversion. This implementation follows ARM by pushing the
|
|
|
|
// CR-defining operand. Because the 'DZ' and 'DNZ' count as types of
|
|
|
|
// predicate, instructions defining CTR or CTR8 are also included as
|
|
|
|
// predicate-defining instructions.
|
|
|
|
|
|
|
|
const TargetRegisterClass *RCs[] =
|
|
|
|
{ &PPC::CRRCRegClass, &PPC::CRBITRCRegClass,
|
|
|
|
&PPC::CTRRCRegClass, &PPC::CTRRC8RegClass };
|
|
|
|
|
|
|
|
bool Found = false;
|
2016-02-23 10:46:52 +08:00
|
|
|
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
|
|
|
|
const MachineOperand &MO = MI.getOperand(i);
|
2013-04-10 15:17:47 +08:00
|
|
|
for (unsigned c = 0; c < array_lengthof(RCs) && !Found; ++c) {
|
2013-04-10 06:58:37 +08:00
|
|
|
const TargetRegisterClass *RC = RCs[c];
|
2013-04-10 15:17:47 +08:00
|
|
|
if (MO.isReg()) {
|
|
|
|
if (MO.isDef() && RC->contains(MO.getReg())) {
|
2013-04-10 06:58:37 +08:00
|
|
|
Pred.push_back(MO);
|
|
|
|
Found = true;
|
|
|
|
}
|
2013-04-10 15:17:47 +08:00
|
|
|
} else if (MO.isRegMask()) {
|
|
|
|
for (TargetRegisterClass::iterator I = RC->begin(),
|
|
|
|
IE = RC->end(); I != IE; ++I)
|
|
|
|
if (MO.clobbersPhysReg(*I)) {
|
|
|
|
Pred.push_back(MO);
|
|
|
|
Found = true;
|
|
|
|
}
|
2013-04-10 06:58:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Found;
|
|
|
|
}
|
|
|
|
|
2017-03-04 02:30:54 +08:00
|
|
|
bool PPCInstrInfo::isPredicable(const MachineInstr &MI) const {
|
2016-02-23 10:46:52 +08:00
|
|
|
unsigned OpC = MI.getOpcode();
|
2013-04-10 06:58:37 +08:00
|
|
|
switch (OpC) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case PPC::B:
|
|
|
|
case PPC::BLR:
|
2015-01-14 01:47:54 +08:00
|
|
|
case PPC::BLR8:
|
PPC: Prep for if conversion of bctr[l]
This adds in-principle support for if-converting the bctr[l] instructions.
These instructions are used for indirect branching. It seems, however, that the
current if converter will never actually predicate these. To do so, it would
need the ability to hoist a few setup insts. out of the conditionally-executed
block. For example, code like this:
void foo(int a, int (*bar)()) { if (a != 0) bar(); }
becomes:
...
beq 0, .LBB0_2
std 2, 40(1)
mr 12, 4
ld 3, 0(4)
ld 11, 16(4)
ld 2, 8(4)
mtctr 3
bctrl
ld 2, 40(1)
.LBB0_2:
...
and it would be safe to do all of this unconditionally with a predicated
beqctrl instruction.
llvm-svn: 179156
2013-04-10 14:42:34 +08:00
|
|
|
case PPC::BCTR:
|
|
|
|
case PPC::BCTR8:
|
|
|
|
case PPC::BCTRL:
|
|
|
|
case PPC::BCTRL8:
|
2013-04-10 06:58:37 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
bool PPCInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
|
|
|
|
unsigned &SrcReg2, int &Mask,
|
|
|
|
int &Value) const {
|
|
|
|
unsigned Opc = MI.getOpcode();
|
2013-04-19 06:15:08 +08:00
|
|
|
|
|
|
|
switch (Opc) {
|
|
|
|
default: return false;
|
|
|
|
case PPC::CMPWI:
|
|
|
|
case PPC::CMPLWI:
|
|
|
|
case PPC::CMPDI:
|
|
|
|
case PPC::CMPLDI:
|
2016-06-30 08:01:54 +08:00
|
|
|
SrcReg = MI.getOperand(1).getReg();
|
2013-04-19 06:15:08 +08:00
|
|
|
SrcReg2 = 0;
|
2016-06-30 08:01:54 +08:00
|
|
|
Value = MI.getOperand(2).getImm();
|
2013-04-19 06:15:08 +08:00
|
|
|
Mask = 0xFFFF;
|
|
|
|
return true;
|
|
|
|
case PPC::CMPW:
|
|
|
|
case PPC::CMPLW:
|
|
|
|
case PPC::CMPD:
|
|
|
|
case PPC::CMPLD:
|
|
|
|
case PPC::FCMPUS:
|
|
|
|
case PPC::FCMPUD:
|
2016-06-30 08:01:54 +08:00
|
|
|
SrcReg = MI.getOperand(1).getReg();
|
|
|
|
SrcReg2 = MI.getOperand(2).getReg();
|
Summary
PPC backend eliminates compare instructions by using record-form instructions in PPCInstrInfo::optimizeCompareInstr, which is called from peephole optimization pass.
This patch improves this optimization to eliminate more compare instructions in two types of common case.
- comparison against a constant 1 or -1
The record-form instructions set CR bit based on signed comparison against 0. So, the current implementation does not exploit the record-form instruction for comparison against a non-zero constant.
This patch enables record-form optimization for constant of 1 or -1 if possible; it changes the condition "greater than -1" into "greater than or equal to 0" and "less than 1" into "less than or equal to 0".
With this patch, compare can be eliminated in the following code sequence, as an example.
uint64_t a, b;
if ((a | b) & 0x8000000000000000ull) { ... }
else { ... }
- andi for 32-bit comparison on PPC64
Since record-form instructions execute 64-bit signed comparison and so we have limitation in eliminating 32-bit comparison, i.e. with cmplwi, using the record-form. The original implementation already has such checks but andi. is not recognized as an instruction which executes implicit zero extension and hence safe to convert into record-form if used for equality check.
%1 = and i32 %a, 10
%2 = icmp ne i32 %1, 0
br i1 %2, label %foo, label %bar
In this simple example, LLVM generates andi. + cmplwi + beq on PPC64.
This patch make it possible to eliminate the cmplwi for this case.
I added andi. for optimization targets if it is safe to do so.
Differential Revision: https://reviews.llvm.org/D30081
llvm-svn: 303500
2017-05-21 14:00:05 +08:00
|
|
|
Value = 0;
|
|
|
|
Mask = 0;
|
2013-04-19 06:15:08 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
2013-04-20 06:08:38 +08:00
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
bool PPCInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
|
|
|
|
unsigned SrcReg2, int Mask, int Value,
|
2013-04-19 06:15:08 +08:00
|
|
|
const MachineRegisterInfo *MRI) const {
|
2013-04-19 06:54:25 +08:00
|
|
|
if (DisableCmpOpt)
|
|
|
|
return false;
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
int OpC = CmpInstr.getOpcode();
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register CRReg = CmpInstr.getOperand(0).getReg();
|
2013-05-08 20:16:14 +08:00
|
|
|
|
2018-06-13 16:54:13 +08:00
|
|
|
// FP record forms set CR1 based on the exception status bits, not a
|
2013-05-08 20:16:14 +08:00
|
|
|
// comparison with zero.
|
|
|
|
if (OpC == PPC::FCMPUS || OpC == PPC::FCMPUD)
|
|
|
|
return false;
|
2013-04-19 06:15:08 +08:00
|
|
|
|
2019-06-04 03:09:15 +08:00
|
|
|
const TargetRegisterInfo *TRI = &getRegisterInfo();
|
2013-04-19 06:15:08 +08:00
|
|
|
// The record forms set the condition register based on a signed comparison
|
|
|
|
// with zero (so says the ISA manual). This is not as straightforward as it
|
|
|
|
// seems, however, because this is always a 64-bit comparison on PPC64, even
|
|
|
|
// for instructions that are 32-bit in nature (like slw for example).
|
|
|
|
// So, on PPC32, for unsigned comparisons, we can use the record forms only
|
|
|
|
// for equality checks (as those don't depend on the sign). On PPC64,
|
|
|
|
// we are restricted to equality for unsigned 64-bit comparisons and for
|
|
|
|
// signed 32-bit comparisons the applicability is more restricted.
|
2014-06-13 05:48:52 +08:00
|
|
|
bool isPPC64 = Subtarget.isPPC64();
|
2013-04-19 06:15:08 +08:00
|
|
|
bool is32BitSignedCompare = OpC == PPC::CMPWI || OpC == PPC::CMPW;
|
|
|
|
bool is32BitUnsignedCompare = OpC == PPC::CMPLWI || OpC == PPC::CMPLW;
|
|
|
|
bool is64BitUnsignedCompare = OpC == PPC::CMPLDI || OpC == PPC::CMPLD;
|
|
|
|
|
2019-06-04 03:09:15 +08:00
|
|
|
// Look through copies unless that gets us to a physical register.
|
|
|
|
unsigned ActualSrc = TRI->lookThruCopyLike(SrcReg, MRI);
|
2019-08-02 07:27:28 +08:00
|
|
|
if (Register::isVirtualRegister(ActualSrc))
|
2019-06-04 03:09:15 +08:00
|
|
|
SrcReg = ActualSrc;
|
|
|
|
|
2013-04-19 06:15:08 +08:00
|
|
|
// Get the unique definition of SrcReg.
|
|
|
|
MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
|
|
|
|
if (!MI) return false;
|
|
|
|
|
|
|
|
bool equalityOnly = false;
|
|
|
|
bool noSub = false;
|
|
|
|
if (isPPC64) {
|
|
|
|
if (is32BitSignedCompare) {
|
|
|
|
// We can perform this optimization only if MI is sign-extending.
|
2017-10-18 18:31:19 +08:00
|
|
|
if (isSignExtended(*MI))
|
2013-04-19 06:15:08 +08:00
|
|
|
noSub = true;
|
2017-10-18 18:31:19 +08:00
|
|
|
else
|
2013-04-19 06:15:08 +08:00
|
|
|
return false;
|
|
|
|
} else if (is32BitUnsignedCompare) {
|
|
|
|
// We can perform this optimization, equality only, if MI is
|
|
|
|
// zero-extending.
|
2017-10-18 18:31:19 +08:00
|
|
|
if (isZeroExtended(*MI)) {
|
2013-04-19 06:15:08 +08:00
|
|
|
noSub = true;
|
|
|
|
equalityOnly = true;
|
|
|
|
} else
|
|
|
|
return false;
|
2013-05-08 20:16:14 +08:00
|
|
|
} else
|
2013-04-19 06:15:08 +08:00
|
|
|
equalityOnly = is64BitUnsignedCompare;
|
2013-05-08 20:16:14 +08:00
|
|
|
} else
|
2013-04-19 06:15:08 +08:00
|
|
|
equalityOnly = is32BitUnsignedCompare;
|
|
|
|
|
|
|
|
if (equalityOnly) {
|
|
|
|
// We need to check the uses of the condition register in order to reject
|
|
|
|
// non-equality comparisons.
|
2017-07-18 21:31:40 +08:00
|
|
|
for (MachineRegisterInfo::use_instr_iterator
|
|
|
|
I = MRI->use_instr_begin(CRReg), IE = MRI->use_instr_end();
|
|
|
|
I != IE; ++I) {
|
2013-04-19 06:15:08 +08:00
|
|
|
MachineInstr *UseMI = &*I;
|
|
|
|
if (UseMI->getOpcode() == PPC::BCC) {
|
2017-07-27 16:14:48 +08:00
|
|
|
PPC::Predicate Pred = (PPC::Predicate)UseMI->getOperand(0).getImm();
|
|
|
|
unsigned PredCond = PPC::getPredicateCondition(Pred);
|
|
|
|
// We ignore hint bits when checking for non-equality comparisons.
|
|
|
|
if (PredCond != PPC::PRED_EQ && PredCond != PPC::PRED_NE)
|
2013-05-08 01:49:55 +08:00
|
|
|
return false;
|
2013-04-19 06:15:08 +08:00
|
|
|
} else if (UseMI->getOpcode() == PPC::ISEL ||
|
|
|
|
UseMI->getOpcode() == PPC::ISEL8) {
|
|
|
|
unsigned SubIdx = UseMI->getOperand(3).getSubReg();
|
2013-05-08 01:49:55 +08:00
|
|
|
if (SubIdx != PPC::sub_eq)
|
|
|
|
return false;
|
2013-04-19 06:15:08 +08:00
|
|
|
} else
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-08 01:49:55 +08:00
|
|
|
MachineBasicBlock::iterator I = CmpInstr;
|
2013-04-19 06:15:08 +08:00
|
|
|
|
|
|
|
// Scan forward to find the first use of the compare.
|
2016-06-30 08:01:54 +08:00
|
|
|
for (MachineBasicBlock::iterator EL = CmpInstr.getParent()->end(); I != EL;
|
|
|
|
++I) {
|
2013-04-19 06:15:08 +08:00
|
|
|
bool FoundUse = false;
|
2017-07-18 21:31:40 +08:00
|
|
|
for (MachineRegisterInfo::use_instr_iterator
|
|
|
|
J = MRI->use_instr_begin(CRReg), JE = MRI->use_instr_end();
|
|
|
|
J != JE; ++J)
|
2013-04-19 06:15:08 +08:00
|
|
|
if (&*J == &*I) {
|
|
|
|
FoundUse = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (FoundUse)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
Summary
PPC backend eliminates compare instructions by using record-form instructions in PPCInstrInfo::optimizeCompareInstr, which is called from peephole optimization pass.
This patch improves this optimization to eliminate more compare instructions in two types of common case.
- comparison against a constant 1 or -1
The record-form instructions set CR bit based on signed comparison against 0. So, the current implementation does not exploit the record-form instruction for comparison against a non-zero constant.
This patch enables record-form optimization for constant of 1 or -1 if possible; it changes the condition "greater than -1" into "greater than or equal to 0" and "less than 1" into "less than or equal to 0".
With this patch, compare can be eliminated in the following code sequence, as an example.
uint64_t a, b;
if ((a | b) & 0x8000000000000000ull) { ... }
else { ... }
- andi for 32-bit comparison on PPC64
Since record-form instructions execute 64-bit signed comparison and so we have limitation in eliminating 32-bit comparison, i.e. with cmplwi, using the record-form. The original implementation already has such checks but andi. is not recognized as an instruction which executes implicit zero extension and hence safe to convert into record-form if used for equality check.
%1 = and i32 %a, 10
%2 = icmp ne i32 %1, 0
br i1 %2, label %foo, label %bar
In this simple example, LLVM generates andi. + cmplwi + beq on PPC64.
This patch make it possible to eliminate the cmplwi for this case.
I added andi. for optimization targets if it is safe to do so.
Differential Revision: https://reviews.llvm.org/D30081
llvm-svn: 303500
2017-05-21 14:00:05 +08:00
|
|
|
SmallVector<std::pair<MachineOperand*, PPC::Predicate>, 4> PredsToUpdate;
|
|
|
|
SmallVector<std::pair<MachineOperand*, unsigned>, 4> SubRegsToUpdate;
|
|
|
|
|
2013-04-19 06:15:08 +08:00
|
|
|
// There are two possible candidates which can be changed to set CR[01].
|
|
|
|
// One is MI, the other is a SUB instruction.
|
|
|
|
// For CMPrr(r1,r2), we are looking for SUB(r1,r2) or SUB(r2,r1).
|
2014-04-25 13:30:21 +08:00
|
|
|
MachineInstr *Sub = nullptr;
|
2013-04-19 06:15:08 +08:00
|
|
|
if (SrcReg2 != 0)
|
|
|
|
// MI is not a candidate for CMPrr.
|
2014-04-25 13:30:21 +08:00
|
|
|
MI = nullptr;
|
2013-04-19 06:15:08 +08:00
|
|
|
// FIXME: Conservatively refuse to convert an instruction which isn't in the
|
|
|
|
// same BB as the comparison. This is to allow the check below to avoid calls
|
|
|
|
// (and other explicit clobbers); instead we should really check for these
|
|
|
|
// more explicitly (in at least a few predecessors).
|
Summary
PPC backend eliminates compare instructions by using record-form instructions in PPCInstrInfo::optimizeCompareInstr, which is called from peephole optimization pass.
This patch improves this optimization to eliminate more compare instructions in two types of common case.
- comparison against a constant 1 or -1
The record-form instructions set CR bit based on signed comparison against 0. So, the current implementation does not exploit the record-form instruction for comparison against a non-zero constant.
This patch enables record-form optimization for constant of 1 or -1 if possible; it changes the condition "greater than -1" into "greater than or equal to 0" and "less than 1" into "less than or equal to 0".
With this patch, compare can be eliminated in the following code sequence, as an example.
uint64_t a, b;
if ((a | b) & 0x8000000000000000ull) { ... }
else { ... }
- andi for 32-bit comparison on PPC64
Since record-form instructions execute 64-bit signed comparison and so we have limitation in eliminating 32-bit comparison, i.e. with cmplwi, using the record-form. The original implementation already has such checks but andi. is not recognized as an instruction which executes implicit zero extension and hence safe to convert into record-form if used for equality check.
%1 = and i32 %a, 10
%2 = icmp ne i32 %1, 0
br i1 %2, label %foo, label %bar
In this simple example, LLVM generates andi. + cmplwi + beq on PPC64.
This patch make it possible to eliminate the cmplwi for this case.
I added andi. for optimization targets if it is safe to do so.
Differential Revision: https://reviews.llvm.org/D30081
llvm-svn: 303500
2017-05-21 14:00:05 +08:00
|
|
|
else if (MI->getParent() != CmpInstr.getParent())
|
2013-04-19 06:15:08 +08:00
|
|
|
return false;
|
Summary
PPC backend eliminates compare instructions by using record-form instructions in PPCInstrInfo::optimizeCompareInstr, which is called from peephole optimization pass.
This patch improves this optimization to eliminate more compare instructions in two types of common case.
- comparison against a constant 1 or -1
The record-form instructions set CR bit based on signed comparison against 0. So, the current implementation does not exploit the record-form instruction for comparison against a non-zero constant.
This patch enables record-form optimization for constant of 1 or -1 if possible; it changes the condition "greater than -1" into "greater than or equal to 0" and "less than 1" into "less than or equal to 0".
With this patch, compare can be eliminated in the following code sequence, as an example.
uint64_t a, b;
if ((a | b) & 0x8000000000000000ull) { ... }
else { ... }
- andi for 32-bit comparison on PPC64
Since record-form instructions execute 64-bit signed comparison and so we have limitation in eliminating 32-bit comparison, i.e. with cmplwi, using the record-form. The original implementation already has such checks but andi. is not recognized as an instruction which executes implicit zero extension and hence safe to convert into record-form if used for equality check.
%1 = and i32 %a, 10
%2 = icmp ne i32 %1, 0
br i1 %2, label %foo, label %bar
In this simple example, LLVM generates andi. + cmplwi + beq on PPC64.
This patch make it possible to eliminate the cmplwi for this case.
I added andi. for optimization targets if it is safe to do so.
Differential Revision: https://reviews.llvm.org/D30081
llvm-svn: 303500
2017-05-21 14:00:05 +08:00
|
|
|
else if (Value != 0) {
|
2017-10-26 17:01:51 +08:00
|
|
|
// The record-form instructions set CR bit based on signed comparison
|
|
|
|
// against 0. We try to convert a compare against 1 or -1 into a compare
|
|
|
|
// against 0 to exploit record-form instructions. For example, we change
|
|
|
|
// the condition "greater than -1" into "greater than or equal to 0"
|
|
|
|
// and "less than 1" into "less than or equal to 0".
|
|
|
|
|
|
|
|
// Since we optimize comparison based on a specific branch condition,
|
|
|
|
// we don't optimize if condition code is used by more than once.
|
|
|
|
if (equalityOnly || !MRI->hasOneUse(CRReg))
|
|
|
|
return false;
|
Summary
PPC backend eliminates compare instructions by using record-form instructions in PPCInstrInfo::optimizeCompareInstr, which is called from peephole optimization pass.
This patch improves this optimization to eliminate more compare instructions in two types of common case.
- comparison against a constant 1 or -1
The record-form instructions set CR bit based on signed comparison against 0. So, the current implementation does not exploit the record-form instruction for comparison against a non-zero constant.
This patch enables record-form optimization for constant of 1 or -1 if possible; it changes the condition "greater than -1" into "greater than or equal to 0" and "less than 1" into "less than or equal to 0".
With this patch, compare can be eliminated in the following code sequence, as an example.
uint64_t a, b;
if ((a | b) & 0x8000000000000000ull) { ... }
else { ... }
- andi for 32-bit comparison on PPC64
Since record-form instructions execute 64-bit signed comparison and so we have limitation in eliminating 32-bit comparison, i.e. with cmplwi, using the record-form. The original implementation already has such checks but andi. is not recognized as an instruction which executes implicit zero extension and hence safe to convert into record-form if used for equality check.
%1 = and i32 %a, 10
%2 = icmp ne i32 %1, 0
br i1 %2, label %foo, label %bar
In this simple example, LLVM generates andi. + cmplwi + beq on PPC64.
This patch make it possible to eliminate the cmplwi for this case.
I added andi. for optimization targets if it is safe to do so.
Differential Revision: https://reviews.llvm.org/D30081
llvm-svn: 303500
2017-05-21 14:00:05 +08:00
|
|
|
|
2017-10-26 17:01:51 +08:00
|
|
|
MachineInstr *UseMI = &*MRI->use_instr_begin(CRReg);
|
|
|
|
if (UseMI->getOpcode() != PPC::BCC)
|
Summary
PPC backend eliminates compare instructions by using record-form instructions in PPCInstrInfo::optimizeCompareInstr, which is called from peephole optimization pass.
This patch improves this optimization to eliminate more compare instructions in two types of common case.
- comparison against a constant 1 or -1
The record-form instructions set CR bit based on signed comparison against 0. So, the current implementation does not exploit the record-form instruction for comparison against a non-zero constant.
This patch enables record-form optimization for constant of 1 or -1 if possible; it changes the condition "greater than -1" into "greater than or equal to 0" and "less than 1" into "less than or equal to 0".
With this patch, compare can be eliminated in the following code sequence, as an example.
uint64_t a, b;
if ((a | b) & 0x8000000000000000ull) { ... }
else { ... }
- andi for 32-bit comparison on PPC64
Since record-form instructions execute 64-bit signed comparison and so we have limitation in eliminating 32-bit comparison, i.e. with cmplwi, using the record-form. The original implementation already has such checks but andi. is not recognized as an instruction which executes implicit zero extension and hence safe to convert into record-form if used for equality check.
%1 = and i32 %a, 10
%2 = icmp ne i32 %1, 0
br i1 %2, label %foo, label %bar
In this simple example, LLVM generates andi. + cmplwi + beq on PPC64.
This patch make it possible to eliminate the cmplwi for this case.
I added andi. for optimization targets if it is safe to do so.
Differential Revision: https://reviews.llvm.org/D30081
llvm-svn: 303500
2017-05-21 14:00:05 +08:00
|
|
|
return false;
|
2017-10-26 17:01:51 +08:00
|
|
|
|
|
|
|
PPC::Predicate Pred = (PPC::Predicate)UseMI->getOperand(0).getImm();
|
|
|
|
unsigned PredCond = PPC::getPredicateCondition(Pred);
|
|
|
|
unsigned PredHint = PPC::getPredicateHint(Pred);
|
|
|
|
int16_t Immed = (int16_t)Value;
|
|
|
|
|
2018-06-13 16:54:13 +08:00
|
|
|
// When modifying the condition in the predicate, we propagate hint bits
|
2017-10-26 17:01:51 +08:00
|
|
|
// from the original predicate to the new one.
|
|
|
|
if (Immed == -1 && PredCond == PPC::PRED_GT)
|
|
|
|
// We convert "greater than -1" into "greater than or equal to 0",
|
|
|
|
// since we are assuming signed comparison by !equalityOnly
|
2019-07-12 22:58:15 +08:00
|
|
|
Pred = PPC::getPredicate(PPC::PRED_GE, PredHint);
|
2017-10-26 17:01:51 +08:00
|
|
|
else if (Immed == -1 && PredCond == PPC::PRED_LE)
|
|
|
|
// We convert "less than or equal to -1" into "less than 0".
|
2019-07-12 22:58:15 +08:00
|
|
|
Pred = PPC::getPredicate(PPC::PRED_LT, PredHint);
|
2017-10-26 17:01:51 +08:00
|
|
|
else if (Immed == 1 && PredCond == PPC::PRED_LT)
|
|
|
|
// We convert "less than 1" into "less than or equal to 0".
|
2019-07-12 22:58:15 +08:00
|
|
|
Pred = PPC::getPredicate(PPC::PRED_LE, PredHint);
|
2017-10-26 17:01:51 +08:00
|
|
|
else if (Immed == 1 && PredCond == PPC::PRED_GE)
|
|
|
|
// We convert "greater than or equal to 1" into "greater than 0".
|
2019-07-12 22:58:15 +08:00
|
|
|
Pred = PPC::getPredicate(PPC::PRED_GT, PredHint);
|
2017-10-26 17:01:51 +08:00
|
|
|
else
|
|
|
|
return false;
|
|
|
|
|
2019-07-12 22:58:15 +08:00
|
|
|
PredsToUpdate.push_back(std::make_pair(&(UseMI->getOperand(0)), Pred));
|
2013-04-19 06:15:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Search for Sub.
|
|
|
|
--I;
|
2013-05-08 01:49:55 +08:00
|
|
|
|
|
|
|
// Get ready to iterate backward from CmpInstr.
|
2016-06-30 08:01:54 +08:00
|
|
|
MachineBasicBlock::iterator E = MI, B = CmpInstr.getParent()->begin();
|
2013-05-08 01:49:55 +08:00
|
|
|
|
2013-04-19 06:15:08 +08:00
|
|
|
for (; I != E && !noSub; --I) {
|
|
|
|
const MachineInstr &Instr = *I;
|
|
|
|
unsigned IOpC = Instr.getOpcode();
|
|
|
|
|
2016-06-30 08:01:54 +08:00
|
|
|
if (&*I != &CmpInstr && (Instr.modifiesRegister(PPC::CR0, TRI) ||
|
|
|
|
Instr.readsRegister(PPC::CR0, TRI)))
|
2013-04-19 06:15:08 +08:00
|
|
|
// This instruction modifies or uses the record condition register after
|
|
|
|
// the one we want to change. While we could do this transformation, it
|
|
|
|
// would likely not be profitable. This transformation removes one
|
|
|
|
// instruction, and so even forcing RA to generate one move probably
|
|
|
|
// makes it unprofitable.
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check whether CmpInstr can be made redundant by the current instruction.
|
|
|
|
if ((OpC == PPC::CMPW || OpC == PPC::CMPLW ||
|
|
|
|
OpC == PPC::CMPD || OpC == PPC::CMPLD) &&
|
|
|
|
(IOpC == PPC::SUBF || IOpC == PPC::SUBF8) &&
|
|
|
|
((Instr.getOperand(1).getReg() == SrcReg &&
|
|
|
|
Instr.getOperand(2).getReg() == SrcReg2) ||
|
|
|
|
(Instr.getOperand(1).getReg() == SrcReg2 &&
|
|
|
|
Instr.getOperand(2).getReg() == SrcReg))) {
|
|
|
|
Sub = &*I;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (I == B)
|
|
|
|
// The 'and' is below the comparison instruction.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return false if no candidates exist.
|
|
|
|
if (!MI && !Sub)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// The single candidate is called MI.
|
|
|
|
if (!MI) MI = Sub;
|
|
|
|
|
|
|
|
int NewOpC = -1;
|
2017-10-18 18:31:19 +08:00
|
|
|
int MIOpC = MI->getOpcode();
|
2018-09-18 21:21:58 +08:00
|
|
|
if (MIOpC == PPC::ANDIo || MIOpC == PPC::ANDIo8 ||
|
|
|
|
MIOpC == PPC::ANDISo || MIOpC == PPC::ANDISo8)
|
2013-04-19 06:15:08 +08:00
|
|
|
NewOpC = MIOpC;
|
|
|
|
else {
|
|
|
|
NewOpC = PPC::getRecordFormOpcode(MIOpC);
|
|
|
|
if (NewOpC == -1 && PPC::getNonRecordFormOpcode(MIOpC) != -1)
|
|
|
|
NewOpC = MIOpC;
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: On the non-embedded POWER architectures, only some of the record
|
|
|
|
// forms are fast, and we should use only the fast ones.
|
|
|
|
|
|
|
|
// The defining instruction has a record form (or is already a record
|
|
|
|
// form). It is possible, however, that we'll need to reverse the condition
|
|
|
|
// code of the users.
|
|
|
|
if (NewOpC == -1)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If we have SUB(r1, r2) and CMP(r2, r1), the condition code based on CMP
|
|
|
|
// needs to be updated to be based on SUB. Push the condition code
|
|
|
|
// operands to OperandsToUpdate. If it is safe to remove CmpInstr, the
|
|
|
|
// condition code of these operands will be modified.
|
Summary
PPC backend eliminates compare instructions by using record-form instructions in PPCInstrInfo::optimizeCompareInstr, which is called from peephole optimization pass.
This patch improves this optimization to eliminate more compare instructions in two types of common case.
- comparison against a constant 1 or -1
The record-form instructions set CR bit based on signed comparison against 0. So, the current implementation does not exploit the record-form instruction for comparison against a non-zero constant.
This patch enables record-form optimization for constant of 1 or -1 if possible; it changes the condition "greater than -1" into "greater than or equal to 0" and "less than 1" into "less than or equal to 0".
With this patch, compare can be eliminated in the following code sequence, as an example.
uint64_t a, b;
if ((a | b) & 0x8000000000000000ull) { ... }
else { ... }
- andi for 32-bit comparison on PPC64
Since record-form instructions execute 64-bit signed comparison and so we have limitation in eliminating 32-bit comparison, i.e. with cmplwi, using the record-form. The original implementation already has such checks but andi. is not recognized as an instruction which executes implicit zero extension and hence safe to convert into record-form if used for equality check.
%1 = and i32 %a, 10
%2 = icmp ne i32 %1, 0
br i1 %2, label %foo, label %bar
In this simple example, LLVM generates andi. + cmplwi + beq on PPC64.
This patch make it possible to eliminate the cmplwi for this case.
I added andi. for optimization targets if it is safe to do so.
Differential Revision: https://reviews.llvm.org/D30081
llvm-svn: 303500
2017-05-21 14:00:05 +08:00
|
|
|
// Here, Value == 0 means we haven't converted comparison against 1 or -1 to
|
|
|
|
// comparison against 0, which may modify predicate.
|
2013-04-19 06:15:08 +08:00
|
|
|
bool ShouldSwap = false;
|
Summary
PPC backend eliminates compare instructions by using record-form instructions in PPCInstrInfo::optimizeCompareInstr, which is called from peephole optimization pass.
This patch improves this optimization to eliminate more compare instructions in two types of common case.
- comparison against a constant 1 or -1
The record-form instructions set CR bit based on signed comparison against 0. So, the current implementation does not exploit the record-form instruction for comparison against a non-zero constant.
This patch enables record-form optimization for constant of 1 or -1 if possible; it changes the condition "greater than -1" into "greater than or equal to 0" and "less than 1" into "less than or equal to 0".
With this patch, compare can be eliminated in the following code sequence, as an example.
uint64_t a, b;
if ((a | b) & 0x8000000000000000ull) { ... }
else { ... }
- andi for 32-bit comparison on PPC64
Since record-form instructions execute 64-bit signed comparison and so we have limitation in eliminating 32-bit comparison, i.e. with cmplwi, using the record-form. The original implementation already has such checks but andi. is not recognized as an instruction which executes implicit zero extension and hence safe to convert into record-form if used for equality check.
%1 = and i32 %a, 10
%2 = icmp ne i32 %1, 0
br i1 %2, label %foo, label %bar
In this simple example, LLVM generates andi. + cmplwi + beq on PPC64.
This patch make it possible to eliminate the cmplwi for this case.
I added andi. for optimization targets if it is safe to do so.
Differential Revision: https://reviews.llvm.org/D30081
llvm-svn: 303500
2017-05-21 14:00:05 +08:00
|
|
|
if (Sub && Value == 0) {
|
2013-04-19 06:15:08 +08:00
|
|
|
ShouldSwap = SrcReg2 != 0 && Sub->getOperand(1).getReg() == SrcReg2 &&
|
|
|
|
Sub->getOperand(2).getReg() == SrcReg;
|
|
|
|
|
|
|
|
// The operands to subf are the opposite of sub, so only in the fixed-point
|
|
|
|
// case, invert the order.
|
2013-05-08 20:16:14 +08:00
|
|
|
ShouldSwap = !ShouldSwap;
|
2013-04-19 06:15:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ShouldSwap)
|
2014-03-14 07:12:04 +08:00
|
|
|
for (MachineRegisterInfo::use_instr_iterator
|
|
|
|
I = MRI->use_instr_begin(CRReg), IE = MRI->use_instr_end();
|
|
|
|
I != IE; ++I) {
|
2013-04-19 06:15:08 +08:00
|
|
|
MachineInstr *UseMI = &*I;
|
|
|
|
if (UseMI->getOpcode() == PPC::BCC) {
|
|
|
|
PPC::Predicate Pred = (PPC::Predicate) UseMI->getOperand(0).getImm();
|
2017-07-27 16:14:48 +08:00
|
|
|
unsigned PredCond = PPC::getPredicateCondition(Pred);
|
2013-04-20 06:08:38 +08:00
|
|
|
assert((!equalityOnly ||
|
2017-07-27 16:14:48 +08:00
|
|
|
PredCond == PPC::PRED_EQ || PredCond == PPC::PRED_NE) &&
|
2013-04-20 06:08:38 +08:00
|
|
|
"Invalid predicate for equality-only optimization");
|
2017-07-27 16:14:48 +08:00
|
|
|
(void)PredCond; // To suppress warning in release build.
|
2014-03-14 07:12:04 +08:00
|
|
|
PredsToUpdate.push_back(std::make_pair(&(UseMI->getOperand(0)),
|
2013-04-20 13:16:26 +08:00
|
|
|
PPC::getSwappedPredicate(Pred)));
|
2013-04-19 06:15:08 +08:00
|
|
|
} else if (UseMI->getOpcode() == PPC::ISEL ||
|
|
|
|
UseMI->getOpcode() == PPC::ISEL8) {
|
2013-04-20 06:08:38 +08:00
|
|
|
unsigned NewSubReg = UseMI->getOperand(3).getSubReg();
|
|
|
|
assert((!equalityOnly || NewSubReg == PPC::sub_eq) &&
|
|
|
|
"Invalid CR bit for equality-only optimization");
|
|
|
|
|
|
|
|
if (NewSubReg == PPC::sub_lt)
|
|
|
|
NewSubReg = PPC::sub_gt;
|
|
|
|
else if (NewSubReg == PPC::sub_gt)
|
|
|
|
NewSubReg = PPC::sub_lt;
|
|
|
|
|
2014-03-14 07:12:04 +08:00
|
|
|
SubRegsToUpdate.push_back(std::make_pair(&(UseMI->getOperand(3)),
|
2013-04-20 06:08:38 +08:00
|
|
|
NewSubReg));
|
2013-04-19 06:15:08 +08:00
|
|
|
} else // We need to abort on a user we don't understand.
|
|
|
|
return false;
|
|
|
|
}
|
Summary
PPC backend eliminates compare instructions by using record-form instructions in PPCInstrInfo::optimizeCompareInstr, which is called from peephole optimization pass.
This patch improves this optimization to eliminate more compare instructions in two types of common case.
- comparison against a constant 1 or -1
The record-form instructions set CR bit based on signed comparison against 0. So, the current implementation does not exploit the record-form instruction for comparison against a non-zero constant.
This patch enables record-form optimization for constant of 1 or -1 if possible; it changes the condition "greater than -1" into "greater than or equal to 0" and "less than 1" into "less than or equal to 0".
With this patch, compare can be eliminated in the following code sequence, as an example.
uint64_t a, b;
if ((a | b) & 0x8000000000000000ull) { ... }
else { ... }
- andi for 32-bit comparison on PPC64
Since record-form instructions execute 64-bit signed comparison and so we have limitation in eliminating 32-bit comparison, i.e. with cmplwi, using the record-form. The original implementation already has such checks but andi. is not recognized as an instruction which executes implicit zero extension and hence safe to convert into record-form if used for equality check.
%1 = and i32 %a, 10
%2 = icmp ne i32 %1, 0
br i1 %2, label %foo, label %bar
In this simple example, LLVM generates andi. + cmplwi + beq on PPC64.
This patch make it possible to eliminate the cmplwi for this case.
I added andi. for optimization targets if it is safe to do so.
Differential Revision: https://reviews.llvm.org/D30081
llvm-svn: 303500
2017-05-21 14:00:05 +08:00
|
|
|
assert(!(Value != 0 && ShouldSwap) &&
|
|
|
|
"Non-zero immediate support and ShouldSwap"
|
|
|
|
"may conflict in updating predicate");
|
2013-04-19 06:15:08 +08:00
|
|
|
|
|
|
|
// Create a new virtual register to hold the value of the CR set by the
|
|
|
|
// record-form instruction. If the instruction was not previously in
|
|
|
|
// record form, then set the kill flag on the CR.
|
2016-06-30 08:01:54 +08:00
|
|
|
CmpInstr.eraseFromParent();
|
2013-04-19 06:15:08 +08:00
|
|
|
|
|
|
|
MachineBasicBlock::iterator MII = MI;
|
2014-03-02 20:27:27 +08:00
|
|
|
BuildMI(*MI->getParent(), std::next(MII), MI->getDebugLoc(),
|
2013-04-19 06:15:08 +08:00
|
|
|
get(TargetOpcode::COPY), CRReg)
|
2013-05-08 20:16:14 +08:00
|
|
|
.addReg(PPC::CR0, MIOpC != NewOpC ? RegState::Kill : 0);
|
2013-04-19 06:15:08 +08:00
|
|
|
|
2016-04-12 11:10:52 +08:00
|
|
|
// Even if CR0 register were dead before, it is alive now since the
|
|
|
|
// instruction we just built uses it.
|
|
|
|
MI->clearRegisterDeads(PPC::CR0);
|
|
|
|
|
2013-04-19 06:15:08 +08:00
|
|
|
if (MIOpC != NewOpC) {
|
|
|
|
// We need to be careful here: we're replacing one instruction with
|
|
|
|
// another, and we need to make sure that we get all of the right
|
|
|
|
// implicit uses and defs. On the other hand, the caller may be holding
|
|
|
|
// an iterator to this instruction, and so we can't delete it (this is
|
|
|
|
// specifically the case if this is the instruction directly after the
|
|
|
|
// compare).
|
|
|
|
|
2018-03-06 03:27:16 +08:00
|
|
|
// Rotates are expensive instructions. If we're emitting a record-form
|
2018-09-18 21:43:16 +08:00
|
|
|
// rotate that can just be an andi/andis, we should just emit that.
|
|
|
|
if (MIOpC == PPC::RLWINM || MIOpC == PPC::RLWINM8) {
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register GPRRes = MI->getOperand(0).getReg();
|
2018-09-18 21:43:16 +08:00
|
|
|
int64_t SH = MI->getOperand(2).getImm();
|
2018-03-06 03:27:16 +08:00
|
|
|
int64_t MB = MI->getOperand(3).getImm();
|
|
|
|
int64_t ME = MI->getOperand(4).getImm();
|
2018-09-18 21:43:16 +08:00
|
|
|
// We can only do this if both the start and end of the mask are in the
|
|
|
|
// same halfword.
|
|
|
|
bool MBInLoHWord = MB >= 16;
|
|
|
|
bool MEInLoHWord = ME >= 16;
|
|
|
|
uint64_t Mask = ~0LLU;
|
|
|
|
|
|
|
|
if (MB <= ME && MBInLoHWord == MEInLoHWord && SH == 0) {
|
|
|
|
Mask = ((1LLU << (32 - MB)) - 1) & ~((1LLU << (31 - ME)) - 1);
|
|
|
|
// The mask value needs to shift right 16 if we're emitting andis.
|
|
|
|
Mask >>= MBInLoHWord ? 0 : 16;
|
|
|
|
NewOpC = MIOpC == PPC::RLWINM ?
|
|
|
|
(MBInLoHWord ? PPC::ANDIo : PPC::ANDISo) :
|
|
|
|
(MBInLoHWord ? PPC::ANDIo8 :PPC::ANDISo8);
|
|
|
|
} else if (MRI->use_empty(GPRRes) && (ME == 31) &&
|
|
|
|
(ME - MB + 1 == SH) && (MB >= 16)) {
|
|
|
|
// If we are rotating by the exact number of bits as are in the mask
|
|
|
|
// and the mask is in the least significant bits of the register,
|
|
|
|
// that's just an andis. (as long as the GPR result has no uses).
|
|
|
|
Mask = ((1LLU << 32) - 1) & ~((1LLU << (32 - SH)) - 1);
|
|
|
|
Mask >>= 16;
|
|
|
|
NewOpC = MIOpC == PPC::RLWINM ? PPC::ANDISo :PPC::ANDISo8;
|
|
|
|
}
|
|
|
|
// If we've set the mask, we can transform.
|
|
|
|
if (Mask != ~0LLU) {
|
2018-03-06 03:27:16 +08:00
|
|
|
MI->RemoveOperand(4);
|
|
|
|
MI->RemoveOperand(3);
|
|
|
|
MI->getOperand(2).setImm(Mask);
|
|
|
|
NumRcRotatesConvertedToRcAnd++;
|
|
|
|
}
|
|
|
|
} else if (MIOpC == PPC::RLDICL && MI->getOperand(2).getImm() == 0) {
|
|
|
|
int64_t MB = MI->getOperand(3).getImm();
|
|
|
|
if (MB >= 48) {
|
|
|
|
uint64_t Mask = (1LLU << (63 - MB + 1)) - 1;
|
|
|
|
NewOpC = PPC::ANDIo8;
|
|
|
|
MI->RemoveOperand(3);
|
|
|
|
MI->getOperand(2).setImm(Mask);
|
|
|
|
NumRcRotatesConvertedToRcAnd++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-19 06:15:08 +08:00
|
|
|
const MCInstrDesc &NewDesc = get(NewOpC);
|
|
|
|
MI->setDesc(NewDesc);
|
|
|
|
|
|
|
|
if (NewDesc.ImplicitDefs)
|
2015-12-05 15:13:35 +08:00
|
|
|
for (const MCPhysReg *ImpDefs = NewDesc.getImplicitDefs();
|
2013-04-19 06:15:08 +08:00
|
|
|
*ImpDefs; ++ImpDefs)
|
|
|
|
if (!MI->definesRegister(*ImpDefs))
|
|
|
|
MI->addOperand(*MI->getParent()->getParent(),
|
|
|
|
MachineOperand::CreateReg(*ImpDefs, true, true));
|
|
|
|
if (NewDesc.ImplicitUses)
|
2015-12-05 15:13:35 +08:00
|
|
|
for (const MCPhysReg *ImpUses = NewDesc.getImplicitUses();
|
2013-04-19 06:15:08 +08:00
|
|
|
*ImpUses; ++ImpUses)
|
|
|
|
if (!MI->readsRegister(*ImpUses))
|
|
|
|
MI->addOperand(*MI->getParent()->getParent(),
|
|
|
|
MachineOperand::CreateReg(*ImpUses, false, true));
|
|
|
|
}
|
2016-06-02 04:31:07 +08:00
|
|
|
assert(MI->definesRegister(PPC::CR0) &&
|
|
|
|
"Record-form instruction does not define cr0?");
|
2013-04-19 06:15:08 +08:00
|
|
|
|
|
|
|
// Modify the condition code of operands in OperandsToUpdate.
|
|
|
|
// Since we have SUB(r1, r2) and CMP(r2, r1), the condition code needs to
|
|
|
|
// be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc.
|
2013-04-20 06:08:38 +08:00
|
|
|
for (unsigned i = 0, e = PredsToUpdate.size(); i < e; i++)
|
|
|
|
PredsToUpdate[i].first->setImm(PredsToUpdate[i].second);
|
2013-04-19 06:15:08 +08:00
|
|
|
|
2013-04-20 06:08:38 +08:00
|
|
|
for (unsigned i = 0, e = SubRegsToUpdate.size(); i < e; i++)
|
|
|
|
SubRegsToUpdate[i].first->setSubReg(SubRegsToUpdate[i].second);
|
2013-04-19 06:15:08 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2008-04-17 04:10:13 +08:00
|
|
|
/// GetInstSize - Return the number of bytes of code the specified
|
|
|
|
/// instruction may be. This returns the maximum number of bytes.
|
|
|
|
///
|
2016-07-29 00:32:22 +08:00
|
|
|
unsigned PPCInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
|
2016-06-30 08:01:54 +08:00
|
|
|
unsigned Opcode = MI.getOpcode();
|
2014-02-02 14:12:27 +08:00
|
|
|
|
2019-06-01 07:02:13 +08:00
|
|
|
if (Opcode == PPC::INLINEASM || Opcode == PPC::INLINEASM_BR) {
|
2016-06-30 08:01:54 +08:00
|
|
|
const MachineFunction *MF = MI.getParent()->getParent();
|
|
|
|
const char *AsmStr = MI.getOperand(0).getSymbolName();
|
2009-08-23 04:48:53 +08:00
|
|
|
return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
|
Revert "r225811 - Revert "r225808 - [PowerPC] Add StackMap/PatchPoint support""
This re-applies r225808, fixed to avoid problems with SDAG dependencies along
with the preceding fix to ScheduleDAGSDNodes::RegDefIter::InitNodeNumDefs.
These problems caused the original regression tests to assert/segfault on many
(but not all) systems.
Original commit message:
This commit does two things:
1. Refactors PPCFastISel to use more of the common infrastructure for call
lowering (this lets us take advantage of this common code for lowering some
common intrinsics, stackmap/patchpoint among them).
2. Adds support for stackmap/patchpoint lowering. For the most part, this is
very similar to the support in the AArch64 target, with the obvious differences
(different registers, NOP instructions, etc.). The test cases are adapted
from the AArch64 test cases.
One difference of note is that the patchpoint call sequence takes 24 bytes, so
you can't use less than that (on AArch64 you can go down to 16). Also, as noted
in the docs, we take the patchpoint address to be the actual code address
(assuming the call is local in the TOC-sharing sense), which should yield
higher performance than generating the full cross-DSO indirect-call sequence
and is likely just as useful for JITed code (if not, we'll change it).
StackMaps and Patchpoints are still marked as experimental, and so this support
is doubly experimental. So go ahead and experiment!
llvm-svn: 225909
2015-01-14 09:07:51 +08:00
|
|
|
} else if (Opcode == TargetOpcode::STACKMAP) {
|
2016-08-24 07:33:29 +08:00
|
|
|
StackMapOpers Opers(&MI);
|
|
|
|
return Opers.getNumPatchBytes();
|
Revert "r225811 - Revert "r225808 - [PowerPC] Add StackMap/PatchPoint support""
This re-applies r225808, fixed to avoid problems with SDAG dependencies along
with the preceding fix to ScheduleDAGSDNodes::RegDefIter::InitNodeNumDefs.
These problems caused the original regression tests to assert/segfault on many
(but not all) systems.
Original commit message:
This commit does two things:
1. Refactors PPCFastISel to use more of the common infrastructure for call
lowering (this lets us take advantage of this common code for lowering some
common intrinsics, stackmap/patchpoint among them).
2. Adds support for stackmap/patchpoint lowering. For the most part, this is
very similar to the support in the AArch64 target, with the obvious differences
(different registers, NOP instructions, etc.). The test cases are adapted
from the AArch64 test cases.
One difference of note is that the patchpoint call sequence takes 24 bytes, so
you can't use less than that (on AArch64 you can go down to 16). Also, as noted
in the docs, we take the patchpoint address to be the actual code address
(assuming the call is local in the TOC-sharing sense), which should yield
higher performance than generating the full cross-DSO indirect-call sequence
and is likely just as useful for JITed code (if not, we'll change it).
StackMaps and Patchpoints are still marked as experimental, and so this support
is doubly experimental. So go ahead and experiment!
llvm-svn: 225909
2015-01-14 09:07:51 +08:00
|
|
|
} else if (Opcode == TargetOpcode::PATCHPOINT) {
|
2016-06-30 08:01:54 +08:00
|
|
|
PatchPointOpers Opers(&MI);
|
2016-08-24 07:33:29 +08:00
|
|
|
return Opers.getNumPatchBytes();
|
2014-02-02 14:12:27 +08:00
|
|
|
} else {
|
2017-03-28 06:40:51 +08:00
|
|
|
return get(Opcode).getSize();
|
2008-04-17 04:10:13 +08:00
|
|
|
}
|
|
|
|
}
|
2013-04-09 00:24:03 +08:00
|
|
|
|
2015-08-30 15:50:35 +08:00
|
|
|
std::pair<unsigned, unsigned>
|
|
|
|
PPCInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
|
|
|
|
const unsigned Mask = PPCII::MO_ACCESS_MASK;
|
|
|
|
return std::make_pair(TF & Mask, TF & ~Mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
ArrayRef<std::pair<unsigned, const char *>>
|
|
|
|
PPCInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
|
|
|
|
using namespace PPCII;
|
2015-08-30 16:07:29 +08:00
|
|
|
static const std::pair<unsigned, const char *> TargetFlags[] = {
|
2015-08-30 15:50:35 +08:00
|
|
|
{MO_LO, "ppc-lo"},
|
|
|
|
{MO_HA, "ppc-ha"},
|
|
|
|
{MO_TPREL_LO, "ppc-tprel-lo"},
|
|
|
|
{MO_TPREL_HA, "ppc-tprel-ha"},
|
|
|
|
{MO_DTPREL_LO, "ppc-dtprel-lo"},
|
|
|
|
{MO_TLSLD_LO, "ppc-tlsld-lo"},
|
|
|
|
{MO_TOC_LO, "ppc-toc-lo"},
|
|
|
|
{MO_TLS, "ppc-tls"}};
|
|
|
|
return makeArrayRef(TargetFlags);
|
|
|
|
}
|
|
|
|
|
|
|
|
ArrayRef<std::pair<unsigned, const char *>>
|
|
|
|
PPCInstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
|
|
|
|
using namespace PPCII;
|
2015-08-30 16:07:29 +08:00
|
|
|
static const std::pair<unsigned, const char *> TargetFlags[] = {
|
2016-06-29 22:59:50 +08:00
|
|
|
{MO_PLT, "ppc-plt"},
|
2015-08-30 15:50:35 +08:00
|
|
|
{MO_PIC_FLAG, "ppc-pic"},
|
|
|
|
{MO_NLP_FLAG, "ppc-nlp"},
|
|
|
|
{MO_NLP_HIDDEN_FLAG, "ppc-nlp-hidden"}};
|
|
|
|
return makeArrayRef(TargetFlags);
|
|
|
|
}
|
|
|
|
|
2017-11-20 22:38:30 +08:00
|
|
|
// Expand VSX Memory Pseudo instruction to either a VSX or a FP instruction.
|
|
|
|
// The VSX versions have the advantage of a full 64-register target whereas
|
|
|
|
// the FP ones have the advantage of lower latency and higher throughput. So
|
|
|
|
// what we are after is using the faster instructions in low register pressure
|
|
|
|
// situations and using the larger register file in high register pressure
|
|
|
|
// situations.
|
|
|
|
bool PPCInstrInfo::expandVSXMemPseudo(MachineInstr &MI) const {
|
2016-10-04 19:25:52 +08:00
|
|
|
unsigned UpperOpcode, LowerOpcode;
|
|
|
|
switch (MI.getOpcode()) {
|
|
|
|
case PPC::DFLOADf32:
|
|
|
|
UpperOpcode = PPC::LXSSP;
|
|
|
|
LowerOpcode = PPC::LFS;
|
|
|
|
break;
|
|
|
|
case PPC::DFLOADf64:
|
|
|
|
UpperOpcode = PPC::LXSD;
|
|
|
|
LowerOpcode = PPC::LFD;
|
|
|
|
break;
|
|
|
|
case PPC::DFSTOREf32:
|
|
|
|
UpperOpcode = PPC::STXSSP;
|
|
|
|
LowerOpcode = PPC::STFS;
|
|
|
|
break;
|
|
|
|
case PPC::DFSTOREf64:
|
|
|
|
UpperOpcode = PPC::STXSD;
|
|
|
|
LowerOpcode = PPC::STFD;
|
|
|
|
break;
|
2017-11-20 22:38:30 +08:00
|
|
|
case PPC::XFLOADf32:
|
|
|
|
UpperOpcode = PPC::LXSSPX;
|
|
|
|
LowerOpcode = PPC::LFSX;
|
|
|
|
break;
|
|
|
|
case PPC::XFLOADf64:
|
|
|
|
UpperOpcode = PPC::LXSDX;
|
|
|
|
LowerOpcode = PPC::LFDX;
|
|
|
|
break;
|
|
|
|
case PPC::XFSTOREf32:
|
|
|
|
UpperOpcode = PPC::STXSSPX;
|
|
|
|
LowerOpcode = PPC::STFSX;
|
|
|
|
break;
|
|
|
|
case PPC::XFSTOREf64:
|
|
|
|
UpperOpcode = PPC::STXSDX;
|
|
|
|
LowerOpcode = PPC::STFDX;
|
|
|
|
break;
|
|
|
|
case PPC::LIWAX:
|
|
|
|
UpperOpcode = PPC::LXSIWAX;
|
|
|
|
LowerOpcode = PPC::LFIWAX;
|
|
|
|
break;
|
|
|
|
case PPC::LIWZX:
|
|
|
|
UpperOpcode = PPC::LXSIWZX;
|
|
|
|
LowerOpcode = PPC::LFIWZX;
|
|
|
|
break;
|
|
|
|
case PPC::STIWX:
|
|
|
|
UpperOpcode = PPC::STXSIWX;
|
|
|
|
LowerOpcode = PPC::STFIWX;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unknown Operation!");
|
2016-10-04 19:25:52 +08:00
|
|
|
}
|
2017-11-20 22:38:30 +08:00
|
|
|
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register TargetReg = MI.getOperand(0).getReg();
|
2016-10-04 19:25:52 +08:00
|
|
|
unsigned Opcode;
|
|
|
|
if ((TargetReg >= PPC::F0 && TargetReg <= PPC::F31) ||
|
|
|
|
(TargetReg >= PPC::VSL0 && TargetReg <= PPC::VSL31))
|
|
|
|
Opcode = LowerOpcode;
|
|
|
|
else
|
|
|
|
Opcode = UpperOpcode;
|
|
|
|
MI.setDesc(get(Opcode));
|
|
|
|
return true;
|
2017-11-20 22:38:30 +08:00
|
|
|
}
|
|
|
|
|
2018-06-19 14:54:51 +08:00
|
|
|
static bool isAnImmediateOperand(const MachineOperand &MO) {
|
|
|
|
return MO.isCPI() || MO.isGlobal() || MO.isImm();
|
|
|
|
}
|
|
|
|
|
2017-11-20 22:38:30 +08:00
|
|
|
bool PPCInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
|
|
|
|
auto &MBB = *MI.getParent();
|
|
|
|
auto DL = MI.getDebugLoc();
|
|
|
|
|
|
|
|
switch (MI.getOpcode()) {
|
|
|
|
case TargetOpcode::LOAD_STACK_GUARD: {
|
|
|
|
assert(Subtarget.isTargetLinux() &&
|
|
|
|
"Only Linux target is expected to contain LOAD_STACK_GUARD");
|
|
|
|
const int64_t Offset = Subtarget.isPPC64() ? -0x7010 : -0x7008;
|
|
|
|
const unsigned Reg = Subtarget.isPPC64() ? PPC::X13 : PPC::R2;
|
|
|
|
MI.setDesc(get(Subtarget.isPPC64() ? PPC::LD : PPC::LWZ));
|
|
|
|
MachineInstrBuilder(*MI.getParent()->getParent(), MI)
|
|
|
|
.addImm(Offset)
|
|
|
|
.addReg(Reg);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
case PPC::DFLOADf32:
|
|
|
|
case PPC::DFLOADf64:
|
|
|
|
case PPC::DFSTOREf32:
|
|
|
|
case PPC::DFSTOREf64: {
|
|
|
|
assert(Subtarget.hasP9Vector() &&
|
|
|
|
"Invalid D-Form Pseudo-ops on Pre-P9 target.");
|
2018-06-19 14:54:51 +08:00
|
|
|
assert(MI.getOperand(2).isReg() &&
|
|
|
|
isAnImmediateOperand(MI.getOperand(1)) &&
|
2017-11-20 22:38:30 +08:00
|
|
|
"D-form op must have register and immediate operands");
|
|
|
|
return expandVSXMemPseudo(MI);
|
|
|
|
}
|
|
|
|
case PPC::XFLOADf32:
|
|
|
|
case PPC::XFSTOREf32:
|
|
|
|
case PPC::LIWAX:
|
|
|
|
case PPC::LIWZX:
|
|
|
|
case PPC::STIWX: {
|
|
|
|
assert(Subtarget.hasP8Vector() &&
|
|
|
|
"Invalid X-Form Pseudo-ops on Pre-P8 target.");
|
|
|
|
assert(MI.getOperand(2).isReg() && MI.getOperand(1).isReg() &&
|
|
|
|
"X-form op must have register and register operands");
|
|
|
|
return expandVSXMemPseudo(MI);
|
|
|
|
}
|
|
|
|
case PPC::XFLOADf64:
|
|
|
|
case PPC::XFSTOREf64: {
|
|
|
|
assert(Subtarget.hasVSX() &&
|
|
|
|
"Invalid X-Form Pseudo-ops on target that has no VSX.");
|
|
|
|
assert(MI.getOperand(2).isReg() && MI.getOperand(1).isReg() &&
|
|
|
|
"X-form op must have register and register operands");
|
|
|
|
return expandVSXMemPseudo(MI);
|
2016-10-04 19:25:52 +08:00
|
|
|
}
|
2017-09-22 00:12:33 +08:00
|
|
|
case PPC::SPILLTOVSR_LD: {
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register TargetReg = MI.getOperand(0).getReg();
|
2017-09-22 00:12:33 +08:00
|
|
|
if (PPC::VSFRCRegClass.contains(TargetReg)) {
|
|
|
|
MI.setDesc(get(PPC::DFLOADf64));
|
|
|
|
return expandPostRAPseudo(MI);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
MI.setDesc(get(PPC::LD));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
case PPC::SPILLTOVSR_ST: {
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register SrcReg = MI.getOperand(0).getReg();
|
2017-09-22 00:12:33 +08:00
|
|
|
if (PPC::VSFRCRegClass.contains(SrcReg)) {
|
|
|
|
NumStoreSPILLVSRRCAsVec++;
|
|
|
|
MI.setDesc(get(PPC::DFSTOREf64));
|
|
|
|
return expandPostRAPseudo(MI);
|
|
|
|
} else {
|
|
|
|
NumStoreSPILLVSRRCAsGpr++;
|
|
|
|
MI.setDesc(get(PPC::STD));
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
case PPC::SPILLTOVSR_LDX: {
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register TargetReg = MI.getOperand(0).getReg();
|
2017-09-22 00:12:33 +08:00
|
|
|
if (PPC::VSFRCRegClass.contains(TargetReg))
|
|
|
|
MI.setDesc(get(PPC::LXSDX));
|
|
|
|
else
|
|
|
|
MI.setDesc(get(PPC::LDX));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
case PPC::SPILLTOVSR_STX: {
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register SrcReg = MI.getOperand(0).getReg();
|
2017-09-22 00:12:33 +08:00
|
|
|
if (PPC::VSFRCRegClass.contains(SrcReg)) {
|
|
|
|
NumStoreSPILLVSRRCAsVec++;
|
|
|
|
MI.setDesc(get(PPC::STXSDX));
|
|
|
|
} else {
|
|
|
|
NumStoreSPILLVSRRCAsGpr++;
|
|
|
|
MI.setDesc(get(PPC::STDX));
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-05-17 04:18:06 +08:00
|
|
|
case PPC::CFENCE8: {
|
|
|
|
auto Val = MI.getOperand(0).getReg();
|
2017-06-16 00:51:28 +08:00
|
|
|
BuildMI(MBB, MI, DL, get(PPC::CMPD), PPC::CR7).addReg(Val).addReg(Val);
|
2017-05-17 04:18:06 +08:00
|
|
|
BuildMI(MBB, MI, DL, get(PPC::CTRL_DEP))
|
|
|
|
.addImm(PPC::PRED_NE_MINUS)
|
|
|
|
.addReg(PPC::CR7)
|
|
|
|
.addImm(1);
|
|
|
|
MI.setDesc(get(PPC::ISYNC));
|
|
|
|
MI.RemoveOperand(0);
|
|
|
|
return true;
|
|
|
|
}
|
2016-04-20 04:14:52 +08:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2016-10-04 14:59:23 +08:00
|
|
|
|
2017-12-15 15:27:53 +08:00
|
|
|
// Essentially a compile-time implementation of a compare->isel sequence.
|
|
|
|
// It takes two constants to compare, along with the true/false registers
|
|
|
|
// and the comparison type (as a subreg to a CR field) and returns one
|
|
|
|
// of the true/false registers, depending on the comparison results.
|
|
|
|
static unsigned selectReg(int64_t Imm1, int64_t Imm2, unsigned CompareOpc,
|
|
|
|
unsigned TrueReg, unsigned FalseReg,
|
|
|
|
unsigned CRSubReg) {
|
|
|
|
// Signed comparisons. The immediates are assumed to be sign-extended.
|
|
|
|
if (CompareOpc == PPC::CMPWI || CompareOpc == PPC::CMPDI) {
|
|
|
|
switch (CRSubReg) {
|
|
|
|
default: llvm_unreachable("Unknown integer comparison type.");
|
|
|
|
case PPC::sub_lt:
|
|
|
|
return Imm1 < Imm2 ? TrueReg : FalseReg;
|
|
|
|
case PPC::sub_gt:
|
|
|
|
return Imm1 > Imm2 ? TrueReg : FalseReg;
|
|
|
|
case PPC::sub_eq:
|
|
|
|
return Imm1 == Imm2 ? TrueReg : FalseReg;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Unsigned comparisons.
|
|
|
|
else if (CompareOpc == PPC::CMPLWI || CompareOpc == PPC::CMPLDI) {
|
|
|
|
switch (CRSubReg) {
|
|
|
|
default: llvm_unreachable("Unknown integer comparison type.");
|
|
|
|
case PPC::sub_lt:
|
|
|
|
return (uint64_t)Imm1 < (uint64_t)Imm2 ? TrueReg : FalseReg;
|
|
|
|
case PPC::sub_gt:
|
|
|
|
return (uint64_t)Imm1 > (uint64_t)Imm2 ? TrueReg : FalseReg;
|
|
|
|
case PPC::sub_eq:
|
|
|
|
return Imm1 == Imm2 ? TrueReg : FalseReg;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return PPC::NoRegister;
|
|
|
|
}
|
|
|
|
|
2018-12-28 11:38:09 +08:00
|
|
|
void PPCInstrInfo::replaceInstrOperandWithImm(MachineInstr &MI,
|
|
|
|
unsigned OpNo,
|
|
|
|
int64_t Imm) const {
|
|
|
|
assert(MI.getOperand(OpNo).isReg() && "Operand must be a REG");
|
|
|
|
// Replace the REG with the Immediate.
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register InUseReg = MI.getOperand(OpNo).getReg();
|
2018-12-28 11:38:09 +08:00
|
|
|
MI.getOperand(OpNo).ChangeToImmediate(Imm);
|
|
|
|
|
|
|
|
if (empty(MI.implicit_operands()))
|
|
|
|
return;
|
|
|
|
|
|
|
|
// We need to make sure that the MI didn't have any implicit use
|
|
|
|
// of this REG any more.
|
|
|
|
const TargetRegisterInfo *TRI = &getRegisterInfo();
|
|
|
|
int UseOpIdx = MI.findRegisterUseOperandIdx(InUseReg, false, TRI);
|
|
|
|
if (UseOpIdx >= 0) {
|
|
|
|
MachineOperand &MO = MI.getOperand(UseOpIdx);
|
|
|
|
if (MO.isImplicit())
|
|
|
|
// The operands must always be in the following order:
|
|
|
|
// - explicit reg defs,
|
|
|
|
// - other explicit operands (reg uses, immediates, etc.),
|
|
|
|
// - implicit reg defs
|
|
|
|
// - implicit reg uses
|
|
|
|
// Therefore, removing the implicit operand won't change the explicit
|
|
|
|
// operands layout.
|
|
|
|
MI.RemoveOperand(UseOpIdx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-15 15:27:53 +08:00
|
|
|
// Replace an instruction with one that materializes a constant (and sets
|
|
|
|
// CR0 if the original instruction was a record-form instruction).
|
|
|
|
void PPCInstrInfo::replaceInstrWithLI(MachineInstr &MI,
|
|
|
|
const LoadImmediateInfo &LII) const {
|
|
|
|
// Remove existing operands.
|
|
|
|
int OperandToKeep = LII.SetCR ? 1 : 0;
|
|
|
|
for (int i = MI.getNumOperands() - 1; i > OperandToKeep; i--)
|
|
|
|
MI.RemoveOperand(i);
|
|
|
|
|
|
|
|
// Replace the instruction.
|
2017-12-15 17:51:34 +08:00
|
|
|
if (LII.SetCR) {
|
2017-12-15 15:27:53 +08:00
|
|
|
MI.setDesc(get(LII.Is64Bit ? PPC::ANDIo8 : PPC::ANDIo));
|
2017-12-15 17:51:34 +08:00
|
|
|
// Set the immediate.
|
|
|
|
MachineInstrBuilder(*MI.getParent()->getParent(), MI)
|
|
|
|
.addImm(LII.Imm).addReg(PPC::CR0, RegState::ImplicitDefine);
|
|
|
|
return;
|
|
|
|
}
|
2017-12-15 15:27:53 +08:00
|
|
|
else
|
|
|
|
MI.setDesc(get(LII.Is64Bit ? PPC::LI8 : PPC::LI));
|
|
|
|
|
|
|
|
// Set the immediate.
|
|
|
|
MachineInstrBuilder(*MI.getParent()->getParent(), MI)
|
|
|
|
.addImm(LII.Imm);
|
|
|
|
}
|
|
|
|
|
2019-07-25 15:47:52 +08:00
|
|
|
MachineInstr *PPCInstrInfo::getDefMIPostRA(unsigned Reg, MachineInstr &MI,
|
|
|
|
bool &SeenIntermediateUse) const {
|
|
|
|
assert(!MI.getParent()->getParent()->getRegInfo().isSSA() &&
|
|
|
|
"Should be called after register allocation.");
|
|
|
|
const TargetRegisterInfo *TRI = &getRegisterInfo();
|
|
|
|
MachineBasicBlock::reverse_iterator E = MI.getParent()->rend(), It = MI;
|
|
|
|
It++;
|
|
|
|
SeenIntermediateUse = false;
|
|
|
|
for (; It != E; ++It) {
|
|
|
|
if (It->modifiesRegister(Reg, TRI))
|
|
|
|
return &*It;
|
|
|
|
if (It->readsRegister(Reg, TRI))
|
|
|
|
SeenIntermediateUse = true;
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2018-08-20 10:52:55 +08:00
|
|
|
MachineInstr *PPCInstrInfo::getForwardingDefMI(
|
|
|
|
MachineInstr &MI,
|
|
|
|
unsigned &OpNoForForwarding,
|
|
|
|
bool &SeenIntermediateUse) const {
|
|
|
|
OpNoForForwarding = ~0U;
|
2017-12-15 15:27:53 +08:00
|
|
|
MachineInstr *DefMI = nullptr;
|
|
|
|
MachineRegisterInfo *MRI = &MI.getParent()->getParent()->getRegInfo();
|
2018-03-23 23:28:15 +08:00
|
|
|
const TargetRegisterInfo *TRI = &getRegisterInfo();
|
2018-06-13 16:54:13 +08:00
|
|
|
// If we're in SSA, get the defs through the MRI. Otherwise, only look
|
2017-12-15 15:27:53 +08:00
|
|
|
// within the basic block to see if the register is defined using an LI/LI8.
|
|
|
|
if (MRI->isSSA()) {
|
|
|
|
for (int i = 1, e = MI.getNumOperands(); i < e; i++) {
|
|
|
|
if (!MI.getOperand(i).isReg())
|
|
|
|
continue;
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register Reg = MI.getOperand(i).getReg();
|
2019-08-02 07:27:28 +08:00
|
|
|
if (!Register::isVirtualRegister(Reg))
|
2017-12-15 15:27:53 +08:00
|
|
|
continue;
|
2018-03-23 23:28:15 +08:00
|
|
|
unsigned TrueReg = TRI->lookThruCopyLike(Reg, MRI);
|
2019-08-02 07:27:28 +08:00
|
|
|
if (Register::isVirtualRegister(TrueReg)) {
|
2017-12-15 15:27:53 +08:00
|
|
|
DefMI = MRI->getVRegDef(TrueReg);
|
|
|
|
if (DefMI->getOpcode() == PPC::LI || DefMI->getOpcode() == PPC::LI8) {
|
2018-08-20 10:52:55 +08:00
|
|
|
OpNoForForwarding = i;
|
2017-12-15 15:27:53 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Looking back through the definition for each operand could be expensive,
|
|
|
|
// so exit early if this isn't an instruction that either has an immediate
|
|
|
|
// form or is already an immediate form that we can handle.
|
|
|
|
ImmInstrInfo III;
|
|
|
|
unsigned Opc = MI.getOpcode();
|
|
|
|
bool ConvertibleImmForm =
|
|
|
|
Opc == PPC::CMPWI || Opc == PPC::CMPLWI ||
|
|
|
|
Opc == PPC::CMPDI || Opc == PPC::CMPLDI ||
|
|
|
|
Opc == PPC::ADDI || Opc == PPC::ADDI8 ||
|
|
|
|
Opc == PPC::ORI || Opc == PPC::ORI8 ||
|
|
|
|
Opc == PPC::XORI || Opc == PPC::XORI8 ||
|
|
|
|
Opc == PPC::RLDICL || Opc == PPC::RLDICLo ||
|
|
|
|
Opc == PPC::RLDICL_32 || Opc == PPC::RLDICL_32_64 ||
|
|
|
|
Opc == PPC::RLWINM || Opc == PPC::RLWINMo ||
|
|
|
|
Opc == PPC::RLWINM8 || Opc == PPC::RLWINM8o;
|
2019-07-24 12:50:23 +08:00
|
|
|
bool IsVFReg = (MI.getNumOperands() && MI.getOperand(0).isReg())
|
|
|
|
? isVFRegister(MI.getOperand(0).getReg())
|
|
|
|
: false;
|
|
|
|
if (!ConvertibleImmForm && !instrHasImmForm(Opc, IsVFReg, III, true))
|
2017-12-15 15:27:53 +08:00
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
// Don't convert or %X, %Y, %Y since that's just a register move.
|
|
|
|
if ((Opc == PPC::OR || Opc == PPC::OR8) &&
|
|
|
|
MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
|
|
|
|
return nullptr;
|
|
|
|
for (int i = 1, e = MI.getNumOperands(); i < e; i++) {
|
|
|
|
MachineOperand &MO = MI.getOperand(i);
|
|
|
|
SeenIntermediateUse = false;
|
|
|
|
if (MO.isReg() && MO.isUse() && !MO.isImplicit()) {
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register Reg = MI.getOperand(i).getReg();
|
2019-07-25 15:47:52 +08:00
|
|
|
// If we see another use of this reg between the def and the MI,
|
|
|
|
// we want to flat it so the def isn't deleted.
|
|
|
|
MachineInstr *DefMI = getDefMIPostRA(Reg, MI, SeenIntermediateUse);
|
|
|
|
if (DefMI) {
|
|
|
|
// Is this register defined by some form of add-immediate (including
|
|
|
|
// load-immediate) within this basic block?
|
|
|
|
switch (DefMI->getOpcode()) {
|
|
|
|
default:
|
2018-08-20 10:52:55 +08:00
|
|
|
break;
|
2019-07-25 15:47:52 +08:00
|
|
|
case PPC::LI:
|
|
|
|
case PPC::LI8:
|
|
|
|
case PPC::ADDItocL:
|
|
|
|
case PPC::ADDI:
|
|
|
|
case PPC::ADDI8:
|
|
|
|
OpNoForForwarding = i;
|
|
|
|
return DefMI;
|
|
|
|
}
|
2017-12-15 15:27:53 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-08-20 10:52:55 +08:00
|
|
|
return OpNoForForwarding == ~0U ? nullptr : DefMI;
|
2017-12-15 15:27:53 +08:00
|
|
|
}
|
|
|
|
|
2018-03-27 01:39:18 +08:00
|
|
|
const unsigned *PPCInstrInfo::getStoreOpcodesForSpillArray() const {
|
|
|
|
static const unsigned OpcodesForSpill[2][SOK_LastOpcodeSpill] = {
|
|
|
|
// Power 8
|
|
|
|
{PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR,
|
|
|
|
PPC::SPILL_CRBIT, PPC::STVX, PPC::STXVD2X, PPC::STXSDX, PPC::STXSSPX,
|
|
|
|
PPC::SPILL_VRSAVE, PPC::QVSTFDX, PPC::QVSTFSXs, PPC::QVSTFDXb,
|
2018-07-18 12:25:10 +08:00
|
|
|
PPC::SPILLTOVSR_ST, PPC::EVSTDD, PPC::SPESTW},
|
2018-03-27 01:39:18 +08:00
|
|
|
// Power 9
|
|
|
|
{PPC::STW, PPC::STD, PPC::STFD, PPC::STFS, PPC::SPILL_CR,
|
|
|
|
PPC::SPILL_CRBIT, PPC::STVX, PPC::STXV, PPC::DFSTOREf64, PPC::DFSTOREf32,
|
|
|
|
PPC::SPILL_VRSAVE, PPC::QVSTFDX, PPC::QVSTFSXs, PPC::QVSTFDXb,
|
|
|
|
PPC::SPILLTOVSR_ST}};
|
|
|
|
|
|
|
|
return OpcodesForSpill[(Subtarget.hasP9Vector()) ? 1 : 0];
|
|
|
|
}
|
|
|
|
|
|
|
|
const unsigned *PPCInstrInfo::getLoadOpcodesForSpillArray() const {
|
|
|
|
static const unsigned OpcodesForSpill[2][SOK_LastOpcodeSpill] = {
|
|
|
|
// Power 8
|
|
|
|
{PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR,
|
|
|
|
PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXVD2X, PPC::LXSDX, PPC::LXSSPX,
|
|
|
|
PPC::RESTORE_VRSAVE, PPC::QVLFDX, PPC::QVLFSXs, PPC::QVLFDXb,
|
2018-07-18 12:25:10 +08:00
|
|
|
PPC::SPILLTOVSR_LD, PPC::EVLDD, PPC::SPELWZ},
|
2018-03-27 01:39:18 +08:00
|
|
|
// Power 9
|
|
|
|
{PPC::LWZ, PPC::LD, PPC::LFD, PPC::LFS, PPC::RESTORE_CR,
|
|
|
|
PPC::RESTORE_CRBIT, PPC::LVX, PPC::LXV, PPC::DFLOADf64, PPC::DFLOADf32,
|
|
|
|
PPC::RESTORE_VRSAVE, PPC::QVLFDX, PPC::QVLFSXs, PPC::QVLFDXb,
|
|
|
|
PPC::SPILLTOVSR_LD}};
|
|
|
|
|
|
|
|
return OpcodesForSpill[(Subtarget.hasP9Vector()) ? 1 : 0];
|
|
|
|
}
|
|
|
|
|
2019-03-05 12:56:54 +08:00
|
|
|
void PPCInstrInfo::fixupIsDeadOrKill(MachineInstr &StartMI, MachineInstr &EndMI,
|
|
|
|
unsigned RegNo) const {
|
|
|
|
const MachineRegisterInfo &MRI =
|
|
|
|
StartMI.getParent()->getParent()->getRegInfo();
|
|
|
|
if (MRI.isSSA())
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Instructions between [StartMI, EndMI] should be in same basic block.
|
|
|
|
assert((StartMI.getParent() == EndMI.getParent()) &&
|
|
|
|
"Instructions are not in same basic block");
|
|
|
|
|
|
|
|
bool IsKillSet = false;
|
|
|
|
|
|
|
|
auto clearOperandKillInfo = [=] (MachineInstr &MI, unsigned Index) {
|
|
|
|
MachineOperand &MO = MI.getOperand(Index);
|
|
|
|
if (MO.isReg() && MO.isUse() && MO.isKill() &&
|
|
|
|
getRegisterInfo().regsOverlap(MO.getReg(), RegNo))
|
|
|
|
MO.setIsKill(false);
|
|
|
|
};
|
|
|
|
|
|
|
|
// Set killed flag for EndMI.
|
|
|
|
// No need to do anything if EndMI defines RegNo.
|
|
|
|
int UseIndex =
|
|
|
|
EndMI.findRegisterUseOperandIdx(RegNo, false, &getRegisterInfo());
|
|
|
|
if (UseIndex != -1) {
|
|
|
|
EndMI.getOperand(UseIndex).setIsKill(true);
|
|
|
|
IsKillSet = true;
|
|
|
|
// Clear killed flag for other EndMI operands related to RegNo. In some
|
|
|
|
// upexpected cases, killed may be set multiple times for same register
|
|
|
|
// operand in same MI.
|
|
|
|
for (int i = 0, e = EndMI.getNumOperands(); i != e; ++i)
|
|
|
|
if (i != UseIndex)
|
|
|
|
clearOperandKillInfo(EndMI, i);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Walking the inst in reverse order (EndMI -> StartMI].
|
|
|
|
MachineBasicBlock::reverse_iterator It = EndMI;
|
|
|
|
MachineBasicBlock::reverse_iterator E = EndMI.getParent()->rend();
|
|
|
|
// EndMI has been handled above, skip it here.
|
|
|
|
It++;
|
|
|
|
MachineOperand *MO = nullptr;
|
|
|
|
for (; It != E; ++It) {
|
|
|
|
// Skip insturctions which could not be a def/use of RegNo.
|
|
|
|
if (It->isDebugInstr() || It->isPosition())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Clear killed flag for all It operands related to RegNo. In some
|
|
|
|
// upexpected cases, killed may be set multiple times for same register
|
|
|
|
// operand in same MI.
|
|
|
|
for (int i = 0, e = It->getNumOperands(); i != e; ++i)
|
|
|
|
clearOperandKillInfo(*It, i);
|
|
|
|
|
|
|
|
// If killed is not set, set killed for its last use or set dead for its def
|
|
|
|
// if no use found.
|
|
|
|
if (!IsKillSet) {
|
|
|
|
if ((MO = It->findRegisterUseOperand(RegNo, false, &getRegisterInfo()))) {
|
|
|
|
// Use found, set it killed.
|
|
|
|
IsKillSet = true;
|
|
|
|
MO->setIsKill(true);
|
|
|
|
continue;
|
|
|
|
} else if ((MO = It->findRegisterDefOperand(RegNo, false, true,
|
|
|
|
&getRegisterInfo()))) {
|
|
|
|
// No use found, set dead for its def.
|
|
|
|
assert(&*It == &StartMI && "No new def between StartMI and EndMI.");
|
|
|
|
MO->setIsDead(true);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((&*It) == &StartMI)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// Ensure RegMo liveness is killed after EndMI.
|
|
|
|
assert((IsKillSet || (MO && MO->isDead())) &&
|
|
|
|
"RegNo should be killed or dead");
|
|
|
|
}
|
|
|
|
|
2017-12-15 15:27:53 +08:00
|
|
|
// If this instruction has an immediate form and one of its operands is a
|
2018-08-20 10:52:55 +08:00
|
|
|
// result of a load-immediate or an add-immediate, convert it to
|
|
|
|
// the immediate form if the constant is in range.
|
2017-12-15 15:27:53 +08:00
|
|
|
bool PPCInstrInfo::convertToImmediateForm(MachineInstr &MI,
|
|
|
|
MachineInstr **KilledDef) const {
|
|
|
|
MachineFunction *MF = MI.getParent()->getParent();
|
|
|
|
MachineRegisterInfo *MRI = &MF->getRegInfo();
|
|
|
|
bool PostRA = !MRI->isSSA();
|
|
|
|
bool SeenIntermediateUse = true;
|
2018-08-20 10:52:55 +08:00
|
|
|
unsigned ForwardingOperand = ~0U;
|
|
|
|
MachineInstr *DefMI = getForwardingDefMI(MI, ForwardingOperand,
|
|
|
|
SeenIntermediateUse);
|
|
|
|
if (!DefMI)
|
|
|
|
return false;
|
|
|
|
assert(ForwardingOperand < MI.getNumOperands() &&
|
|
|
|
"The forwarding operand needs to be valid at this point");
|
2019-03-05 12:56:54 +08:00
|
|
|
bool IsForwardingOperandKilled = MI.getOperand(ForwardingOperand).isKill();
|
|
|
|
bool KillFwdDefMI = !SeenIntermediateUse && IsForwardingOperandKilled;
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register ForwardingOperandReg = MI.getOperand(ForwardingOperand).getReg();
|
2018-08-20 10:52:55 +08:00
|
|
|
if (KilledDef && KillFwdDefMI)
|
|
|
|
*KilledDef = DefMI;
|
|
|
|
|
|
|
|
ImmInstrInfo III;
|
2019-07-24 12:50:23 +08:00
|
|
|
bool IsVFReg = MI.getOperand(0).isReg()
|
|
|
|
? isVFRegister(MI.getOperand(0).getReg())
|
|
|
|
: false;
|
|
|
|
bool HasImmForm = instrHasImmForm(MI.getOpcode(), IsVFReg, III, PostRA);
|
2018-08-20 10:52:55 +08:00
|
|
|
// If this is a reg+reg instruction that has a reg+imm form,
|
|
|
|
// and one of the operands is produced by an add-immediate,
|
|
|
|
// try to convert it.
|
2019-03-05 12:56:54 +08:00
|
|
|
if (HasImmForm &&
|
|
|
|
transformToImmFormFedByAdd(MI, III, ForwardingOperand, *DefMI,
|
|
|
|
KillFwdDefMI))
|
2018-08-20 10:52:55 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
if ((DefMI->getOpcode() != PPC::LI && DefMI->getOpcode() != PPC::LI8) ||
|
|
|
|
!DefMI->getOperand(1).isImm())
|
2017-12-15 15:27:53 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
int64_t Immediate = DefMI->getOperand(1).getImm();
|
|
|
|
// Sign-extend to 64-bits.
|
|
|
|
int64_t SExtImm = ((uint64_t)Immediate & ~0x7FFFuLL) != 0 ?
|
|
|
|
(Immediate | 0xFFFFFFFFFFFF0000) : Immediate;
|
|
|
|
|
2018-08-20 10:52:55 +08:00
|
|
|
// If this is a reg+reg instruction that has a reg+imm form,
|
|
|
|
// and one of the operands is produced by LI, convert it now.
|
|
|
|
if (HasImmForm)
|
2019-03-05 12:56:54 +08:00
|
|
|
return transformToImmFormFedByLI(MI, III, ForwardingOperand, *DefMI, SExtImm);
|
2017-12-15 15:27:53 +08:00
|
|
|
|
|
|
|
bool ReplaceWithLI = false;
|
|
|
|
bool Is64BitLI = false;
|
|
|
|
int64_t NewImm = 0;
|
|
|
|
bool SetCR = false;
|
|
|
|
unsigned Opc = MI.getOpcode();
|
|
|
|
switch (Opc) {
|
|
|
|
default: return false;
|
|
|
|
|
|
|
|
// FIXME: Any branches conditional on such a comparison can be made
|
|
|
|
// unconditional. At this time, this happens too infrequently to be worth
|
|
|
|
// the implementation effort, but if that ever changes, we could convert
|
|
|
|
// such a pattern here.
|
|
|
|
case PPC::CMPWI:
|
|
|
|
case PPC::CMPLWI:
|
|
|
|
case PPC::CMPDI:
|
|
|
|
case PPC::CMPLDI: {
|
|
|
|
// Doing this post-RA would require dataflow analysis to reliably find uses
|
|
|
|
// of the CR register set by the compare.
|
2019-03-05 12:56:54 +08:00
|
|
|
// No need to fixup killed/dead flag since this transformation is only valid
|
|
|
|
// before RA.
|
2017-12-15 15:27:53 +08:00
|
|
|
if (PostRA)
|
|
|
|
return false;
|
|
|
|
// If a compare-immediate is fed by an immediate and is itself an input of
|
|
|
|
// an ISEL (the most common case) into a COPY of the correct register.
|
|
|
|
bool Changed = false;
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register DefReg = MI.getOperand(0).getReg();
|
2017-12-15 15:27:53 +08:00
|
|
|
int64_t Comparand = MI.getOperand(2).getImm();
|
|
|
|
int64_t SExtComparand = ((uint64_t)Comparand & ~0x7FFFuLL) != 0 ?
|
|
|
|
(Comparand | 0xFFFFFFFFFFFF0000) : Comparand;
|
|
|
|
|
|
|
|
for (auto &CompareUseMI : MRI->use_instructions(DefReg)) {
|
|
|
|
unsigned UseOpc = CompareUseMI.getOpcode();
|
|
|
|
if (UseOpc != PPC::ISEL && UseOpc != PPC::ISEL8)
|
|
|
|
continue;
|
|
|
|
unsigned CRSubReg = CompareUseMI.getOperand(3).getSubReg();
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register TrueReg = CompareUseMI.getOperand(1).getReg();
|
|
|
|
Register FalseReg = CompareUseMI.getOperand(2).getReg();
|
2017-12-15 15:27:53 +08:00
|
|
|
unsigned RegToCopy = selectReg(SExtImm, SExtComparand, Opc, TrueReg,
|
|
|
|
FalseReg, CRSubReg);
|
|
|
|
if (RegToCopy == PPC::NoRegister)
|
|
|
|
continue;
|
|
|
|
// Can't use PPC::COPY to copy PPC::ZERO[8]. Convert it to LI[8] 0.
|
|
|
|
if (RegToCopy == PPC::ZERO || RegToCopy == PPC::ZERO8) {
|
|
|
|
CompareUseMI.setDesc(get(UseOpc == PPC::ISEL8 ? PPC::LI8 : PPC::LI));
|
2018-12-28 11:38:09 +08:00
|
|
|
replaceInstrOperandWithImm(CompareUseMI, 1, 0);
|
2017-12-15 15:27:53 +08:00
|
|
|
CompareUseMI.RemoveOperand(3);
|
|
|
|
CompareUseMI.RemoveOperand(2);
|
|
|
|
continue;
|
|
|
|
}
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(
|
|
|
|
dbgs() << "Found LI -> CMPI -> ISEL, replacing with a copy.\n");
|
|
|
|
LLVM_DEBUG(DefMI->dump(); MI.dump(); CompareUseMI.dump());
|
|
|
|
LLVM_DEBUG(dbgs() << "Is converted to:\n");
|
2017-12-15 15:27:53 +08:00
|
|
|
// Convert to copy and remove unneeded operands.
|
|
|
|
CompareUseMI.setDesc(get(PPC::COPY));
|
|
|
|
CompareUseMI.RemoveOperand(3);
|
|
|
|
CompareUseMI.RemoveOperand(RegToCopy == TrueReg ? 2 : 1);
|
|
|
|
CmpIselsConverted++;
|
|
|
|
Changed = true;
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(CompareUseMI.dump());
|
2017-12-15 15:27:53 +08:00
|
|
|
}
|
|
|
|
if (Changed)
|
|
|
|
return true;
|
|
|
|
// This may end up incremented multiple times since this function is called
|
|
|
|
// during a fixed-point transformation, but it is only meant to indicate the
|
|
|
|
// presence of this opportunity.
|
|
|
|
MissedConvertibleImmediateInstrs++;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Immediate forms - may simply be convertable to an LI.
|
|
|
|
case PPC::ADDI:
|
|
|
|
case PPC::ADDI8: {
|
|
|
|
// Does the sum fit in a 16-bit signed field?
|
|
|
|
int64_t Addend = MI.getOperand(2).getImm();
|
|
|
|
if (isInt<16>(Addend + SExtImm)) {
|
|
|
|
ReplaceWithLI = true;
|
|
|
|
Is64BitLI = Opc == PPC::ADDI8;
|
|
|
|
NewImm = Addend + SExtImm;
|
|
|
|
break;
|
|
|
|
}
|
2017-12-15 19:47:48 +08:00
|
|
|
return false;
|
2017-12-15 15:27:53 +08:00
|
|
|
}
|
|
|
|
case PPC::RLDICL:
|
|
|
|
case PPC::RLDICLo:
|
|
|
|
case PPC::RLDICL_32:
|
|
|
|
case PPC::RLDICL_32_64: {
|
|
|
|
// Use APInt's rotate function.
|
|
|
|
int64_t SH = MI.getOperand(2).getImm();
|
|
|
|
int64_t MB = MI.getOperand(3).getImm();
|
2018-04-12 05:25:44 +08:00
|
|
|
APInt InVal((Opc == PPC::RLDICL || Opc == PPC::RLDICLo) ?
|
|
|
|
64 : 32, SExtImm, true);
|
2017-12-15 15:27:53 +08:00
|
|
|
InVal = InVal.rotl(SH);
|
2017-12-29 20:22:27 +08:00
|
|
|
uint64_t Mask = (1LLU << (63 - MB + 1)) - 1;
|
2017-12-15 15:27:53 +08:00
|
|
|
InVal &= Mask;
|
|
|
|
// Can't replace negative values with an LI as that will sign-extend
|
|
|
|
// and not clear the left bits. If we're setting the CR bit, we will use
|
|
|
|
// ANDIo which won't sign extend, so that's safe.
|
|
|
|
if (isUInt<15>(InVal.getSExtValue()) ||
|
|
|
|
(Opc == PPC::RLDICLo && isUInt<16>(InVal.getSExtValue()))) {
|
|
|
|
ReplaceWithLI = true;
|
|
|
|
Is64BitLI = Opc != PPC::RLDICL_32;
|
|
|
|
NewImm = InVal.getSExtValue();
|
|
|
|
SetCR = Opc == PPC::RLDICLo;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
case PPC::RLWINM:
|
|
|
|
case PPC::RLWINM8:
|
|
|
|
case PPC::RLWINMo:
|
|
|
|
case PPC::RLWINM8o: {
|
|
|
|
int64_t SH = MI.getOperand(2).getImm();
|
|
|
|
int64_t MB = MI.getOperand(3).getImm();
|
|
|
|
int64_t ME = MI.getOperand(4).getImm();
|
|
|
|
APInt InVal(32, SExtImm, true);
|
|
|
|
InVal = InVal.rotl(SH);
|
2018-07-13 23:21:03 +08:00
|
|
|
// Set the bits ( MB + 32 ) to ( ME + 32 ).
|
2017-12-29 20:22:27 +08:00
|
|
|
uint64_t Mask = ((1LLU << (32 - MB)) - 1) & ~((1LLU << (31 - ME)) - 1);
|
2017-12-15 15:27:53 +08:00
|
|
|
InVal &= Mask;
|
|
|
|
// Can't replace negative values with an LI as that will sign-extend
|
|
|
|
// and not clear the left bits. If we're setting the CR bit, we will use
|
|
|
|
// ANDIo which won't sign extend, so that's safe.
|
|
|
|
bool ValueFits = isUInt<15>(InVal.getSExtValue());
|
|
|
|
ValueFits |= ((Opc == PPC::RLWINMo || Opc == PPC::RLWINM8o) &&
|
|
|
|
isUInt<16>(InVal.getSExtValue()));
|
|
|
|
if (ValueFits) {
|
|
|
|
ReplaceWithLI = true;
|
|
|
|
Is64BitLI = Opc == PPC::RLWINM8 || Opc == PPC::RLWINM8o;
|
|
|
|
NewImm = InVal.getSExtValue();
|
|
|
|
SetCR = Opc == PPC::RLWINMo || Opc == PPC::RLWINM8o;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
case PPC::ORI:
|
|
|
|
case PPC::ORI8:
|
|
|
|
case PPC::XORI:
|
|
|
|
case PPC::XORI8: {
|
|
|
|
int64_t LogicalImm = MI.getOperand(2).getImm();
|
|
|
|
int64_t Result = 0;
|
|
|
|
if (Opc == PPC::ORI || Opc == PPC::ORI8)
|
|
|
|
Result = LogicalImm | SExtImm;
|
|
|
|
else
|
|
|
|
Result = LogicalImm ^ SExtImm;
|
|
|
|
if (isInt<16>(Result)) {
|
|
|
|
ReplaceWithLI = true;
|
|
|
|
Is64BitLI = Opc == PPC::ORI8 || Opc == PPC::XORI8;
|
|
|
|
NewImm = Result;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ReplaceWithLI) {
|
2018-07-13 23:21:03 +08:00
|
|
|
// We need to be careful with CR-setting instructions we're replacing.
|
|
|
|
if (SetCR) {
|
|
|
|
// We don't know anything about uses when we're out of SSA, so only
|
|
|
|
// replace if the new immediate will be reproduced.
|
|
|
|
bool ImmChanged = (SExtImm & NewImm) != NewImm;
|
|
|
|
if (PostRA && ImmChanged)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!PostRA) {
|
|
|
|
// If the defining load-immediate has no other uses, we can just replace
|
|
|
|
// the immediate with the new immediate.
|
|
|
|
if (MRI->hasOneUse(DefMI->getOperand(0).getReg()))
|
|
|
|
DefMI->getOperand(1).setImm(NewImm);
|
|
|
|
|
|
|
|
// If we're not using the GPR result of the CR-setting instruction, we
|
|
|
|
// just need to and with zero/non-zero depending on the new immediate.
|
|
|
|
else if (MRI->use_empty(MI.getOperand(0).getReg())) {
|
|
|
|
if (NewImm) {
|
|
|
|
assert(Immediate && "Transformation converted zero to non-zero?");
|
|
|
|
NewImm = Immediate;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (ImmChanged)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Replacing instruction:\n");
|
|
|
|
LLVM_DEBUG(MI.dump());
|
|
|
|
LLVM_DEBUG(dbgs() << "Fed by:\n");
|
|
|
|
LLVM_DEBUG(DefMI->dump());
|
2017-12-15 15:27:53 +08:00
|
|
|
LoadImmediateInfo LII;
|
|
|
|
LII.Imm = NewImm;
|
|
|
|
LII.Is64Bit = Is64BitLI;
|
|
|
|
LII.SetCR = SetCR;
|
|
|
|
// If we're setting the CR, the original load-immediate must be kept (as an
|
|
|
|
// operand to ANDIo/ANDI8o).
|
|
|
|
if (KilledDef && SetCR)
|
|
|
|
*KilledDef = nullptr;
|
|
|
|
replaceInstrWithLI(MI, LII);
|
2019-03-05 12:56:54 +08:00
|
|
|
|
|
|
|
// Fixup killed/dead flag after transformation.
|
|
|
|
// Pattern:
|
|
|
|
// ForwardingOperandReg = LI imm1
|
|
|
|
// y = op2 imm2, ForwardingOperandReg(killed)
|
|
|
|
if (IsForwardingOperandKilled)
|
|
|
|
fixupIsDeadOrKill(*DefMI, MI, ForwardingOperandReg);
|
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "With:\n");
|
|
|
|
LLVM_DEBUG(MI.dump());
|
2017-12-15 15:27:53 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-07-24 12:50:23 +08:00
|
|
|
bool PPCInstrInfo::instrHasImmForm(unsigned Opc, bool IsVFReg,
|
2018-10-22 19:22:59 +08:00
|
|
|
ImmInstrInfo &III, bool PostRA) const {
|
2017-12-15 15:27:53 +08:00
|
|
|
// The vast majority of the instructions would need their operand 2 replaced
|
|
|
|
// with an immediate when switching to the reg+imm form. A marked exception
|
|
|
|
// are the update form loads/stores for which a constant operand 2 would need
|
|
|
|
// to turn into a displacement and move operand 1 to the operand 2 position.
|
|
|
|
III.ImmOpNo = 2;
|
2018-08-20 10:52:55 +08:00
|
|
|
III.OpNoForForwarding = 2;
|
2017-12-15 15:27:53 +08:00
|
|
|
III.ImmWidth = 16;
|
|
|
|
III.ImmMustBeMultipleOf = 1;
|
2017-12-29 20:22:27 +08:00
|
|
|
III.TruncateImmTo = 0;
|
2018-08-20 10:52:55 +08:00
|
|
|
III.IsSummingOperands = false;
|
2017-12-15 15:27:53 +08:00
|
|
|
switch (Opc) {
|
|
|
|
default: return false;
|
|
|
|
case PPC::ADD4:
|
|
|
|
case PPC::ADD8:
|
|
|
|
III.SignedImm = true;
|
|
|
|
III.ZeroIsSpecialOrig = 0;
|
|
|
|
III.ZeroIsSpecialNew = 1;
|
|
|
|
III.IsCommutative = true;
|
2018-08-20 10:52:55 +08:00
|
|
|
III.IsSummingOperands = true;
|
2017-12-15 15:27:53 +08:00
|
|
|
III.ImmOpcode = Opc == PPC::ADD4 ? PPC::ADDI : PPC::ADDI8;
|
|
|
|
break;
|
|
|
|
case PPC::ADDC:
|
|
|
|
case PPC::ADDC8:
|
|
|
|
III.SignedImm = true;
|
|
|
|
III.ZeroIsSpecialOrig = 0;
|
|
|
|
III.ZeroIsSpecialNew = 0;
|
|
|
|
III.IsCommutative = true;
|
2018-08-20 10:52:55 +08:00
|
|
|
III.IsSummingOperands = true;
|
2017-12-15 15:27:53 +08:00
|
|
|
III.ImmOpcode = Opc == PPC::ADDC ? PPC::ADDIC : PPC::ADDIC8;
|
|
|
|
break;
|
|
|
|
case PPC::ADDCo:
|
|
|
|
III.SignedImm = true;
|
|
|
|
III.ZeroIsSpecialOrig = 0;
|
|
|
|
III.ZeroIsSpecialNew = 0;
|
|
|
|
III.IsCommutative = true;
|
2018-08-20 10:52:55 +08:00
|
|
|
III.IsSummingOperands = true;
|
2017-12-15 15:27:53 +08:00
|
|
|
III.ImmOpcode = PPC::ADDICo;
|
|
|
|
break;
|
|
|
|
case PPC::SUBFC:
|
|
|
|
case PPC::SUBFC8:
|
|
|
|
III.SignedImm = true;
|
|
|
|
III.ZeroIsSpecialOrig = 0;
|
|
|
|
III.ZeroIsSpecialNew = 0;
|
|
|
|
III.IsCommutative = false;
|
|
|
|
III.ImmOpcode = Opc == PPC::SUBFC ? PPC::SUBFIC : PPC::SUBFIC8;
|
|
|
|
break;
|
|
|
|
case PPC::CMPW:
|
|
|
|
case PPC::CMPD:
|
|
|
|
III.SignedImm = true;
|
|
|
|
III.ZeroIsSpecialOrig = 0;
|
|
|
|
III.ZeroIsSpecialNew = 0;
|
|
|
|
III.IsCommutative = false;
|
|
|
|
III.ImmOpcode = Opc == PPC::CMPW ? PPC::CMPWI : PPC::CMPDI;
|
|
|
|
break;
|
|
|
|
case PPC::CMPLW:
|
|
|
|
case PPC::CMPLD:
|
|
|
|
III.SignedImm = false;
|
|
|
|
III.ZeroIsSpecialOrig = 0;
|
|
|
|
III.ZeroIsSpecialNew = 0;
|
|
|
|
III.IsCommutative = false;
|
|
|
|
III.ImmOpcode = Opc == PPC::CMPLW ? PPC::CMPLWI : PPC::CMPLDI;
|
|
|
|
break;
|
|
|
|
case PPC::ANDo:
|
|
|
|
case PPC::AND8o:
|
|
|
|
case PPC::OR:
|
|
|
|
case PPC::OR8:
|
|
|
|
case PPC::XOR:
|
|
|
|
case PPC::XOR8:
|
|
|
|
III.SignedImm = false;
|
|
|
|
III.ZeroIsSpecialOrig = 0;
|
|
|
|
III.ZeroIsSpecialNew = 0;
|
|
|
|
III.IsCommutative = true;
|
|
|
|
switch(Opc) {
|
|
|
|
default: llvm_unreachable("Unknown opcode");
|
|
|
|
case PPC::ANDo: III.ImmOpcode = PPC::ANDIo; break;
|
|
|
|
case PPC::AND8o: III.ImmOpcode = PPC::ANDIo8; break;
|
|
|
|
case PPC::OR: III.ImmOpcode = PPC::ORI; break;
|
|
|
|
case PPC::OR8: III.ImmOpcode = PPC::ORI8; break;
|
|
|
|
case PPC::XOR: III.ImmOpcode = PPC::XORI; break;
|
|
|
|
case PPC::XOR8: III.ImmOpcode = PPC::XORI8; break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case PPC::RLWNM:
|
|
|
|
case PPC::RLWNM8:
|
|
|
|
case PPC::RLWNMo:
|
|
|
|
case PPC::RLWNM8o:
|
|
|
|
case PPC::SLW:
|
|
|
|
case PPC::SLW8:
|
|
|
|
case PPC::SLWo:
|
|
|
|
case PPC::SLW8o:
|
|
|
|
case PPC::SRW:
|
|
|
|
case PPC::SRW8:
|
|
|
|
case PPC::SRWo:
|
|
|
|
case PPC::SRW8o:
|
|
|
|
case PPC::SRAW:
|
|
|
|
case PPC::SRAWo:
|
|
|
|
III.SignedImm = false;
|
|
|
|
III.ZeroIsSpecialOrig = 0;
|
|
|
|
III.ZeroIsSpecialNew = 0;
|
|
|
|
III.IsCommutative = false;
|
|
|
|
// This isn't actually true, but the instructions ignore any of the
|
|
|
|
// upper bits, so any immediate loaded with an LI is acceptable.
|
2017-12-29 20:22:27 +08:00
|
|
|
// This does not apply to shift right algebraic because a value
|
|
|
|
// out of range will produce a -1/0.
|
2017-12-15 15:27:53 +08:00
|
|
|
III.ImmWidth = 16;
|
2017-12-29 20:22:27 +08:00
|
|
|
if (Opc == PPC::RLWNM || Opc == PPC::RLWNM8 ||
|
|
|
|
Opc == PPC::RLWNMo || Opc == PPC::RLWNM8o)
|
|
|
|
III.TruncateImmTo = 5;
|
|
|
|
else
|
|
|
|
III.TruncateImmTo = 6;
|
2017-12-15 15:27:53 +08:00
|
|
|
switch(Opc) {
|
|
|
|
default: llvm_unreachable("Unknown opcode");
|
|
|
|
case PPC::RLWNM: III.ImmOpcode = PPC::RLWINM; break;
|
|
|
|
case PPC::RLWNM8: III.ImmOpcode = PPC::RLWINM8; break;
|
|
|
|
case PPC::RLWNMo: III.ImmOpcode = PPC::RLWINMo; break;
|
|
|
|
case PPC::RLWNM8o: III.ImmOpcode = PPC::RLWINM8o; break;
|
|
|
|
case PPC::SLW: III.ImmOpcode = PPC::RLWINM; break;
|
|
|
|
case PPC::SLW8: III.ImmOpcode = PPC::RLWINM8; break;
|
|
|
|
case PPC::SLWo: III.ImmOpcode = PPC::RLWINMo; break;
|
|
|
|
case PPC::SLW8o: III.ImmOpcode = PPC::RLWINM8o; break;
|
|
|
|
case PPC::SRW: III.ImmOpcode = PPC::RLWINM; break;
|
|
|
|
case PPC::SRW8: III.ImmOpcode = PPC::RLWINM8; break;
|
|
|
|
case PPC::SRWo: III.ImmOpcode = PPC::RLWINMo; break;
|
|
|
|
case PPC::SRW8o: III.ImmOpcode = PPC::RLWINM8o; break;
|
2017-12-29 20:22:27 +08:00
|
|
|
case PPC::SRAW:
|
|
|
|
III.ImmWidth = 5;
|
|
|
|
III.TruncateImmTo = 0;
|
|
|
|
III.ImmOpcode = PPC::SRAWI;
|
|
|
|
break;
|
|
|
|
case PPC::SRAWo:
|
|
|
|
III.ImmWidth = 5;
|
|
|
|
III.TruncateImmTo = 0;
|
|
|
|
III.ImmOpcode = PPC::SRAWIo;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case PPC::RLDCL:
|
|
|
|
case PPC::RLDCLo:
|
|
|
|
case PPC::RLDCR:
|
|
|
|
case PPC::RLDCRo:
|
|
|
|
case PPC::SLD:
|
|
|
|
case PPC::SLDo:
|
|
|
|
case PPC::SRD:
|
|
|
|
case PPC::SRDo:
|
|
|
|
case PPC::SRAD:
|
|
|
|
case PPC::SRADo:
|
|
|
|
III.SignedImm = false;
|
|
|
|
III.ZeroIsSpecialOrig = 0;
|
|
|
|
III.ZeroIsSpecialNew = 0;
|
|
|
|
III.IsCommutative = false;
|
|
|
|
// This isn't actually true, but the instructions ignore any of the
|
|
|
|
// upper bits, so any immediate loaded with an LI is acceptable.
|
|
|
|
// This does not apply to shift right algebraic because a value
|
|
|
|
// out of range will produce a -1/0.
|
|
|
|
III.ImmWidth = 16;
|
|
|
|
if (Opc == PPC::RLDCL || Opc == PPC::RLDCLo ||
|
|
|
|
Opc == PPC::RLDCR || Opc == PPC::RLDCRo)
|
|
|
|
III.TruncateImmTo = 6;
|
|
|
|
else
|
|
|
|
III.TruncateImmTo = 7;
|
|
|
|
switch(Opc) {
|
|
|
|
default: llvm_unreachable("Unknown opcode");
|
|
|
|
case PPC::RLDCL: III.ImmOpcode = PPC::RLDICL; break;
|
|
|
|
case PPC::RLDCLo: III.ImmOpcode = PPC::RLDICLo; break;
|
|
|
|
case PPC::RLDCR: III.ImmOpcode = PPC::RLDICR; break;
|
|
|
|
case PPC::RLDCRo: III.ImmOpcode = PPC::RLDICRo; break;
|
2017-12-15 15:27:53 +08:00
|
|
|
case PPC::SLD: III.ImmOpcode = PPC::RLDICR; break;
|
|
|
|
case PPC::SLDo: III.ImmOpcode = PPC::RLDICRo; break;
|
|
|
|
case PPC::SRD: III.ImmOpcode = PPC::RLDICL; break;
|
|
|
|
case PPC::SRDo: III.ImmOpcode = PPC::RLDICLo; break;
|
2017-12-29 20:22:27 +08:00
|
|
|
case PPC::SRAD:
|
|
|
|
III.ImmWidth = 6;
|
|
|
|
III.TruncateImmTo = 0;
|
|
|
|
III.ImmOpcode = PPC::SRADI;
|
|
|
|
break;
|
|
|
|
case PPC::SRADo:
|
|
|
|
III.ImmWidth = 6;
|
|
|
|
III.TruncateImmTo = 0;
|
|
|
|
III.ImmOpcode = PPC::SRADIo;
|
|
|
|
break;
|
2017-12-15 15:27:53 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
// Loads and stores:
|
|
|
|
case PPC::LBZX:
|
|
|
|
case PPC::LBZX8:
|
|
|
|
case PPC::LHZX:
|
|
|
|
case PPC::LHZX8:
|
|
|
|
case PPC::LHAX:
|
|
|
|
case PPC::LHAX8:
|
|
|
|
case PPC::LWZX:
|
|
|
|
case PPC::LWZX8:
|
|
|
|
case PPC::LWAX:
|
|
|
|
case PPC::LDX:
|
|
|
|
case PPC::LFSX:
|
|
|
|
case PPC::LFDX:
|
|
|
|
case PPC::STBX:
|
|
|
|
case PPC::STBX8:
|
|
|
|
case PPC::STHX:
|
|
|
|
case PPC::STHX8:
|
|
|
|
case PPC::STWX:
|
|
|
|
case PPC::STWX8:
|
|
|
|
case PPC::STDX:
|
|
|
|
case PPC::STFSX:
|
|
|
|
case PPC::STFDX:
|
|
|
|
III.SignedImm = true;
|
|
|
|
III.ZeroIsSpecialOrig = 1;
|
|
|
|
III.ZeroIsSpecialNew = 2;
|
|
|
|
III.IsCommutative = true;
|
2018-08-20 10:52:55 +08:00
|
|
|
III.IsSummingOperands = true;
|
2017-12-15 15:27:53 +08:00
|
|
|
III.ImmOpNo = 1;
|
2018-08-20 10:52:55 +08:00
|
|
|
III.OpNoForForwarding = 2;
|
2017-12-15 15:27:53 +08:00
|
|
|
switch(Opc) {
|
|
|
|
default: llvm_unreachable("Unknown opcode");
|
|
|
|
case PPC::LBZX: III.ImmOpcode = PPC::LBZ; break;
|
|
|
|
case PPC::LBZX8: III.ImmOpcode = PPC::LBZ8; break;
|
|
|
|
case PPC::LHZX: III.ImmOpcode = PPC::LHZ; break;
|
|
|
|
case PPC::LHZX8: III.ImmOpcode = PPC::LHZ8; break;
|
|
|
|
case PPC::LHAX: III.ImmOpcode = PPC::LHA; break;
|
|
|
|
case PPC::LHAX8: III.ImmOpcode = PPC::LHA8; break;
|
|
|
|
case PPC::LWZX: III.ImmOpcode = PPC::LWZ; break;
|
|
|
|
case PPC::LWZX8: III.ImmOpcode = PPC::LWZ8; break;
|
|
|
|
case PPC::LWAX:
|
|
|
|
III.ImmOpcode = PPC::LWA;
|
|
|
|
III.ImmMustBeMultipleOf = 4;
|
|
|
|
break;
|
|
|
|
case PPC::LDX: III.ImmOpcode = PPC::LD; III.ImmMustBeMultipleOf = 4; break;
|
|
|
|
case PPC::LFSX: III.ImmOpcode = PPC::LFS; break;
|
|
|
|
case PPC::LFDX: III.ImmOpcode = PPC::LFD; break;
|
|
|
|
case PPC::STBX: III.ImmOpcode = PPC::STB; break;
|
|
|
|
case PPC::STBX8: III.ImmOpcode = PPC::STB8; break;
|
|
|
|
case PPC::STHX: III.ImmOpcode = PPC::STH; break;
|
|
|
|
case PPC::STHX8: III.ImmOpcode = PPC::STH8; break;
|
|
|
|
case PPC::STWX: III.ImmOpcode = PPC::STW; break;
|
|
|
|
case PPC::STWX8: III.ImmOpcode = PPC::STW8; break;
|
|
|
|
case PPC::STDX:
|
|
|
|
III.ImmOpcode = PPC::STD;
|
|
|
|
III.ImmMustBeMultipleOf = 4;
|
|
|
|
break;
|
|
|
|
case PPC::STFSX: III.ImmOpcode = PPC::STFS; break;
|
|
|
|
case PPC::STFDX: III.ImmOpcode = PPC::STFD; break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case PPC::LBZUX:
|
|
|
|
case PPC::LBZUX8:
|
|
|
|
case PPC::LHZUX:
|
|
|
|
case PPC::LHZUX8:
|
|
|
|
case PPC::LHAUX:
|
|
|
|
case PPC::LHAUX8:
|
|
|
|
case PPC::LWZUX:
|
|
|
|
case PPC::LWZUX8:
|
|
|
|
case PPC::LDUX:
|
|
|
|
case PPC::LFSUX:
|
|
|
|
case PPC::LFDUX:
|
|
|
|
case PPC::STBUX:
|
|
|
|
case PPC::STBUX8:
|
|
|
|
case PPC::STHUX:
|
|
|
|
case PPC::STHUX8:
|
|
|
|
case PPC::STWUX:
|
|
|
|
case PPC::STWUX8:
|
|
|
|
case PPC::STDUX:
|
|
|
|
case PPC::STFSUX:
|
|
|
|
case PPC::STFDUX:
|
|
|
|
III.SignedImm = true;
|
|
|
|
III.ZeroIsSpecialOrig = 2;
|
|
|
|
III.ZeroIsSpecialNew = 3;
|
|
|
|
III.IsCommutative = false;
|
2018-08-20 10:52:55 +08:00
|
|
|
III.IsSummingOperands = true;
|
2017-12-15 15:27:53 +08:00
|
|
|
III.ImmOpNo = 2;
|
2018-08-20 10:52:55 +08:00
|
|
|
III.OpNoForForwarding = 3;
|
2017-12-15 15:27:53 +08:00
|
|
|
switch(Opc) {
|
|
|
|
default: llvm_unreachable("Unknown opcode");
|
|
|
|
case PPC::LBZUX: III.ImmOpcode = PPC::LBZU; break;
|
|
|
|
case PPC::LBZUX8: III.ImmOpcode = PPC::LBZU8; break;
|
|
|
|
case PPC::LHZUX: III.ImmOpcode = PPC::LHZU; break;
|
|
|
|
case PPC::LHZUX8: III.ImmOpcode = PPC::LHZU8; break;
|
|
|
|
case PPC::LHAUX: III.ImmOpcode = PPC::LHAU; break;
|
|
|
|
case PPC::LHAUX8: III.ImmOpcode = PPC::LHAU8; break;
|
|
|
|
case PPC::LWZUX: III.ImmOpcode = PPC::LWZU; break;
|
|
|
|
case PPC::LWZUX8: III.ImmOpcode = PPC::LWZU8; break;
|
|
|
|
case PPC::LDUX:
|
|
|
|
III.ImmOpcode = PPC::LDU;
|
|
|
|
III.ImmMustBeMultipleOf = 4;
|
|
|
|
break;
|
|
|
|
case PPC::LFSUX: III.ImmOpcode = PPC::LFSU; break;
|
|
|
|
case PPC::LFDUX: III.ImmOpcode = PPC::LFDU; break;
|
|
|
|
case PPC::STBUX: III.ImmOpcode = PPC::STBU; break;
|
|
|
|
case PPC::STBUX8: III.ImmOpcode = PPC::STBU8; break;
|
|
|
|
case PPC::STHUX: III.ImmOpcode = PPC::STHU; break;
|
|
|
|
case PPC::STHUX8: III.ImmOpcode = PPC::STHU8; break;
|
|
|
|
case PPC::STWUX: III.ImmOpcode = PPC::STWU; break;
|
|
|
|
case PPC::STWUX8: III.ImmOpcode = PPC::STWU8; break;
|
|
|
|
case PPC::STDUX:
|
|
|
|
III.ImmOpcode = PPC::STDU;
|
|
|
|
III.ImmMustBeMultipleOf = 4;
|
|
|
|
break;
|
|
|
|
case PPC::STFSUX: III.ImmOpcode = PPC::STFSU; break;
|
|
|
|
case PPC::STFDUX: III.ImmOpcode = PPC::STFDU; break;
|
|
|
|
}
|
|
|
|
break;
|
2018-10-22 19:22:59 +08:00
|
|
|
// Power9 and up only. For some of these, the X-Form version has access to all
|
|
|
|
// 64 VSR's whereas the D-Form only has access to the VR's. We replace those
|
|
|
|
// with pseudo-ops pre-ra and for post-ra, we check that the register loaded
|
|
|
|
// into or stored from is one of the VR registers.
|
2017-12-15 15:27:53 +08:00
|
|
|
case PPC::LXVX:
|
|
|
|
case PPC::LXSSPX:
|
|
|
|
case PPC::LXSDX:
|
|
|
|
case PPC::STXVX:
|
|
|
|
case PPC::STXSSPX:
|
|
|
|
case PPC::STXSDX:
|
2018-10-22 19:22:59 +08:00
|
|
|
case PPC::XFLOADf32:
|
|
|
|
case PPC::XFLOADf64:
|
|
|
|
case PPC::XFSTOREf32:
|
|
|
|
case PPC::XFSTOREf64:
|
2017-12-15 15:27:53 +08:00
|
|
|
if (!Subtarget.hasP9Vector())
|
|
|
|
return false;
|
|
|
|
III.SignedImm = true;
|
|
|
|
III.ZeroIsSpecialOrig = 1;
|
|
|
|
III.ZeroIsSpecialNew = 2;
|
|
|
|
III.IsCommutative = true;
|
2018-08-20 10:52:55 +08:00
|
|
|
III.IsSummingOperands = true;
|
2017-12-15 15:27:53 +08:00
|
|
|
III.ImmOpNo = 1;
|
2018-08-20 10:52:55 +08:00
|
|
|
III.OpNoForForwarding = 2;
|
2018-10-22 19:22:59 +08:00
|
|
|
III.ImmMustBeMultipleOf = 4;
|
2017-12-15 15:27:53 +08:00
|
|
|
switch(Opc) {
|
|
|
|
default: llvm_unreachable("Unknown opcode");
|
|
|
|
case PPC::LXVX:
|
|
|
|
III.ImmOpcode = PPC::LXV;
|
|
|
|
III.ImmMustBeMultipleOf = 16;
|
|
|
|
break;
|
|
|
|
case PPC::LXSSPX:
|
2018-10-22 19:22:59 +08:00
|
|
|
if (PostRA) {
|
2019-07-24 12:50:23 +08:00
|
|
|
if (IsVFReg)
|
2018-10-22 19:22:59 +08:00
|
|
|
III.ImmOpcode = PPC::LXSSP;
|
2018-12-03 11:32:57 +08:00
|
|
|
else {
|
2018-10-22 19:22:59 +08:00
|
|
|
III.ImmOpcode = PPC::LFS;
|
2018-12-03 11:32:57 +08:00
|
|
|
III.ImmMustBeMultipleOf = 1;
|
|
|
|
}
|
2018-10-22 19:22:59 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
LLVM_FALLTHROUGH;
|
|
|
|
case PPC::XFLOADf32:
|
|
|
|
III.ImmOpcode = PPC::DFLOADf32;
|
2017-12-15 15:27:53 +08:00
|
|
|
break;
|
|
|
|
case PPC::LXSDX:
|
2018-10-22 19:22:59 +08:00
|
|
|
if (PostRA) {
|
2019-07-24 12:50:23 +08:00
|
|
|
if (IsVFReg)
|
2018-10-22 19:22:59 +08:00
|
|
|
III.ImmOpcode = PPC::LXSD;
|
2018-12-03 11:32:57 +08:00
|
|
|
else {
|
2018-10-22 19:22:59 +08:00
|
|
|
III.ImmOpcode = PPC::LFD;
|
2018-12-03 11:32:57 +08:00
|
|
|
III.ImmMustBeMultipleOf = 1;
|
|
|
|
}
|
2018-10-22 19:22:59 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
LLVM_FALLTHROUGH;
|
|
|
|
case PPC::XFLOADf64:
|
|
|
|
III.ImmOpcode = PPC::DFLOADf64;
|
2017-12-15 15:27:53 +08:00
|
|
|
break;
|
|
|
|
case PPC::STXVX:
|
|
|
|
III.ImmOpcode = PPC::STXV;
|
|
|
|
III.ImmMustBeMultipleOf = 16;
|
|
|
|
break;
|
|
|
|
case PPC::STXSSPX:
|
2018-10-22 19:22:59 +08:00
|
|
|
if (PostRA) {
|
2019-07-24 12:50:23 +08:00
|
|
|
if (IsVFReg)
|
2018-10-22 19:22:59 +08:00
|
|
|
III.ImmOpcode = PPC::STXSSP;
|
2018-12-03 11:32:57 +08:00
|
|
|
else {
|
2018-10-22 19:22:59 +08:00
|
|
|
III.ImmOpcode = PPC::STFS;
|
2018-12-03 11:32:57 +08:00
|
|
|
III.ImmMustBeMultipleOf = 1;
|
|
|
|
}
|
2018-10-22 19:22:59 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
LLVM_FALLTHROUGH;
|
|
|
|
case PPC::XFSTOREf32:
|
|
|
|
III.ImmOpcode = PPC::DFSTOREf32;
|
2017-12-15 15:27:53 +08:00
|
|
|
break;
|
|
|
|
case PPC::STXSDX:
|
2018-10-22 19:22:59 +08:00
|
|
|
if (PostRA) {
|
2019-07-24 12:50:23 +08:00
|
|
|
if (IsVFReg)
|
2018-10-22 19:22:59 +08:00
|
|
|
III.ImmOpcode = PPC::STXSD;
|
2018-12-03 11:32:57 +08:00
|
|
|
else {
|
2018-10-22 19:22:59 +08:00
|
|
|
III.ImmOpcode = PPC::STFD;
|
2018-12-03 11:32:57 +08:00
|
|
|
III.ImmMustBeMultipleOf = 1;
|
|
|
|
}
|
2018-10-22 19:22:59 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
LLVM_FALLTHROUGH;
|
|
|
|
case PPC::XFSTOREf64:
|
|
|
|
III.ImmOpcode = PPC::DFSTOREf64;
|
2017-12-15 15:27:53 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Utility function for swaping two arbitrary operands of an instruction.
|
|
|
|
static void swapMIOperands(MachineInstr &MI, unsigned Op1, unsigned Op2) {
|
|
|
|
assert(Op1 != Op2 && "Cannot swap operand with itself.");
|
|
|
|
|
|
|
|
unsigned MaxOp = std::max(Op1, Op2);
|
|
|
|
unsigned MinOp = std::min(Op1, Op2);
|
|
|
|
MachineOperand MOp1 = MI.getOperand(MinOp);
|
|
|
|
MachineOperand MOp2 = MI.getOperand(MaxOp);
|
|
|
|
MI.RemoveOperand(std::max(Op1, Op2));
|
|
|
|
MI.RemoveOperand(std::min(Op1, Op2));
|
|
|
|
|
|
|
|
// If the operands we are swapping are the two at the end (the common case)
|
|
|
|
// we can just remove both and add them in the opposite order.
|
|
|
|
if (MaxOp - MinOp == 1 && MI.getNumOperands() == MinOp) {
|
|
|
|
MI.addOperand(MOp2);
|
|
|
|
MI.addOperand(MOp1);
|
|
|
|
} else {
|
|
|
|
// Store all operands in a temporary vector, remove them and re-add in the
|
|
|
|
// right order.
|
|
|
|
SmallVector<MachineOperand, 2> MOps;
|
|
|
|
unsigned TotalOps = MI.getNumOperands() + 2; // We've already removed 2 ops.
|
|
|
|
for (unsigned i = MI.getNumOperands() - 1; i >= MinOp; i--) {
|
|
|
|
MOps.push_back(MI.getOperand(i));
|
|
|
|
MI.RemoveOperand(i);
|
|
|
|
}
|
|
|
|
// MOp2 needs to be added next.
|
|
|
|
MI.addOperand(MOp2);
|
|
|
|
// Now add the rest.
|
|
|
|
for (unsigned i = MI.getNumOperands(); i < TotalOps; i++) {
|
|
|
|
if (i == MaxOp)
|
|
|
|
MI.addOperand(MOp1);
|
|
|
|
else {
|
|
|
|
MI.addOperand(MOps.back());
|
|
|
|
MOps.pop_back();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-24 23:50:29 +08:00
|
|
|
// Check if the 'MI' that has the index OpNoForForwarding
|
2018-08-20 10:52:55 +08:00
|
|
|
// meets the requirement described in the ImmInstrInfo.
|
|
|
|
bool PPCInstrInfo::isUseMIElgibleForForwarding(MachineInstr &MI,
|
|
|
|
const ImmInstrInfo &III,
|
|
|
|
unsigned OpNoForForwarding
|
|
|
|
) const {
|
|
|
|
// As the algorithm of checking for PPC::ZERO/PPC::ZERO8
|
|
|
|
// would not work pre-RA, we can only do the check post RA.
|
|
|
|
MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
|
|
|
|
if (MRI.isSSA())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Cannot do the transform if MI isn't summing the operands.
|
|
|
|
if (!III.IsSummingOperands)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// The instruction we are trying to replace must have the ZeroIsSpecialOrig set.
|
|
|
|
if (!III.ZeroIsSpecialOrig)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// We cannot do the transform if the operand we are trying to replace
|
|
|
|
// isn't the same as the operand the instruction allows.
|
|
|
|
if (OpNoForForwarding != III.OpNoForForwarding)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check if the instruction we are trying to transform really has
|
|
|
|
// the special zero register as its operand.
|
|
|
|
if (MI.getOperand(III.ZeroIsSpecialOrig).getReg() != PPC::ZERO &&
|
|
|
|
MI.getOperand(III.ZeroIsSpecialOrig).getReg() != PPC::ZERO8)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// This machine instruction is convertible if it is,
|
|
|
|
// 1. summing the operands.
|
|
|
|
// 2. one of the operands is special zero register.
|
|
|
|
// 3. the operand we are trying to replace is allowed by the MI.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the DefMI is the add inst and set the ImmMO and RegMO
|
|
|
|
// accordingly.
|
|
|
|
bool PPCInstrInfo::isDefMIElgibleForForwarding(MachineInstr &DefMI,
|
|
|
|
const ImmInstrInfo &III,
|
|
|
|
MachineOperand *&ImmMO,
|
|
|
|
MachineOperand *&RegMO) const {
|
|
|
|
unsigned Opc = DefMI.getOpcode();
|
|
|
|
if (Opc != PPC::ADDItocL && Opc != PPC::ADDI && Opc != PPC::ADDI8)
|
2019-06-24 23:50:29 +08:00
|
|
|
return false;
|
2018-08-20 10:52:55 +08:00
|
|
|
|
|
|
|
assert(DefMI.getNumOperands() >= 3 &&
|
|
|
|
"Add inst must have at least three operands");
|
|
|
|
RegMO = &DefMI.getOperand(1);
|
|
|
|
ImmMO = &DefMI.getOperand(2);
|
|
|
|
|
|
|
|
// This DefMI is elgible for forwarding if it is:
|
|
|
|
// 1. add inst
|
|
|
|
// 2. one of the operands is Imm/CPI/Global.
|
|
|
|
return isAnImmediateOperand(*ImmMO);
|
|
|
|
}
|
|
|
|
|
2019-03-05 12:56:54 +08:00
|
|
|
bool PPCInstrInfo::isRegElgibleForForwarding(
|
|
|
|
const MachineOperand &RegMO, const MachineInstr &DefMI,
|
|
|
|
const MachineInstr &MI, bool KillDefMI,
|
|
|
|
bool &IsFwdFeederRegKilled) const {
|
2018-08-20 10:52:55 +08:00
|
|
|
// x = addi y, imm
|
|
|
|
// ...
|
|
|
|
// z = lfdx 0, x -> z = lfd imm(y)
|
|
|
|
// The Reg "y" can be forwarded to the MI(z) only when there is no DEF
|
|
|
|
// of "y" between the DEF of "x" and "z".
|
|
|
|
// The query is only valid post RA.
|
|
|
|
const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
|
|
|
|
if (MRI.isSSA())
|
|
|
|
return false;
|
|
|
|
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register Reg = RegMO.getReg();
|
2018-08-20 10:52:55 +08:00
|
|
|
|
|
|
|
// Walking the inst in reverse(MI-->DefMI) to get the last DEF of the Reg.
|
|
|
|
MachineBasicBlock::const_reverse_iterator It = MI;
|
|
|
|
MachineBasicBlock::const_reverse_iterator E = MI.getParent()->rend();
|
|
|
|
It++;
|
|
|
|
for (; It != E; ++It) {
|
|
|
|
if (It->modifiesRegister(Reg, &getRegisterInfo()) && (&*It) != &DefMI)
|
|
|
|
return false;
|
2019-03-05 12:56:54 +08:00
|
|
|
else if (It->killsRegister(Reg, &getRegisterInfo()) && (&*It) != &DefMI)
|
|
|
|
IsFwdFeederRegKilled = true;
|
2018-08-20 10:52:55 +08:00
|
|
|
// Made it to DefMI without encountering a clobber.
|
|
|
|
if ((&*It) == &DefMI)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
assert((&*It) == &DefMI && "DefMI is missing");
|
|
|
|
|
2019-01-30 09:57:01 +08:00
|
|
|
// If DefMI also defines the register to be forwarded, we can only forward it
|
2018-08-20 10:52:55 +08:00
|
|
|
// if DefMI is being erased.
|
2019-01-30 09:57:01 +08:00
|
|
|
if (DefMI.modifiesRegister(Reg, &getRegisterInfo()))
|
2018-08-20 10:52:55 +08:00
|
|
|
return KillDefMI;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool PPCInstrInfo::isImmElgibleForForwarding(const MachineOperand &ImmMO,
|
|
|
|
const MachineInstr &DefMI,
|
|
|
|
const ImmInstrInfo &III,
|
|
|
|
int64_t &Imm) const {
|
|
|
|
assert(isAnImmediateOperand(ImmMO) && "ImmMO is NOT an immediate");
|
|
|
|
if (DefMI.getOpcode() == PPC::ADDItocL) {
|
|
|
|
// The operand for ADDItocL is CPI, which isn't imm at compiling time,
|
|
|
|
// However, we know that, it is 16-bit width, and has the alignment of 4.
|
|
|
|
// Check if the instruction met the requirement.
|
|
|
|
if (III.ImmMustBeMultipleOf > 4 ||
|
|
|
|
III.TruncateImmTo || III.ImmWidth != 16)
|
|
|
|
return false;
|
|
|
|
|
2018-10-02 04:16:27 +08:00
|
|
|
// Going from XForm to DForm loads means that the displacement needs to be
|
|
|
|
// not just an immediate but also a multiple of 4, or 16 depending on the
|
|
|
|
// load. A DForm load cannot be represented if it is a multiple of say 2.
|
|
|
|
// XForm loads do not have this restriction.
|
|
|
|
if (ImmMO.isGlobal() &&
|
|
|
|
ImmMO.getGlobal()->getAlignment() < III.ImmMustBeMultipleOf)
|
|
|
|
return false;
|
|
|
|
|
2018-08-20 10:52:55 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ImmMO.isImm()) {
|
|
|
|
// It is Imm, we need to check if the Imm fit the range.
|
|
|
|
int64_t Immediate = ImmMO.getImm();
|
|
|
|
// Sign-extend to 64-bits.
|
|
|
|
Imm = ((uint64_t)Immediate & ~0x7FFFuLL) != 0 ?
|
|
|
|
(Immediate | 0xFFFFFFFFFFFF0000) : Immediate;
|
|
|
|
|
|
|
|
if (Imm % III.ImmMustBeMultipleOf)
|
|
|
|
return false;
|
|
|
|
if (III.TruncateImmTo)
|
|
|
|
Imm &= ((1 << III.TruncateImmTo) - 1);
|
|
|
|
if (III.SignedImm) {
|
|
|
|
APInt ActualValue(64, Imm, true);
|
|
|
|
if (!ActualValue.isSignedIntN(III.ImmWidth))
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
uint64_t UnsignedMax = (1 << III.ImmWidth) - 1;
|
|
|
|
if ((uint64_t)Imm > UnsignedMax)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// This ImmMO is forwarded if it meets the requriement describle
|
|
|
|
// in ImmInstrInfo
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If an X-Form instruction is fed by an add-immediate and one of its operands
|
|
|
|
// is the literal zero, attempt to forward the source of the add-immediate to
|
|
|
|
// the corresponding D-Form instruction with the displacement coming from
|
|
|
|
// the immediate being added.
|
2019-03-05 12:56:54 +08:00
|
|
|
bool PPCInstrInfo::transformToImmFormFedByAdd(
|
|
|
|
MachineInstr &MI, const ImmInstrInfo &III, unsigned OpNoForForwarding,
|
|
|
|
MachineInstr &DefMI, bool KillDefMI) const {
|
2018-08-20 10:52:55 +08:00
|
|
|
// RegMO ImmMO
|
|
|
|
// | |
|
|
|
|
// x = addi reg, imm <----- DefMI
|
|
|
|
// y = op 0 , x <----- MI
|
|
|
|
// |
|
|
|
|
// OpNoForForwarding
|
|
|
|
// Check if the MI meet the requirement described in the III.
|
|
|
|
if (!isUseMIElgibleForForwarding(MI, III, OpNoForForwarding))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check if the DefMI meet the requirement
|
|
|
|
// described in the III. If yes, set the ImmMO and RegMO accordingly.
|
|
|
|
MachineOperand *ImmMO = nullptr;
|
|
|
|
MachineOperand *RegMO = nullptr;
|
|
|
|
if (!isDefMIElgibleForForwarding(DefMI, III, ImmMO, RegMO))
|
|
|
|
return false;
|
|
|
|
assert(ImmMO && RegMO && "Imm and Reg operand must have been set");
|
|
|
|
|
|
|
|
// As we get the Imm operand now, we need to check if the ImmMO meet
|
|
|
|
// the requirement described in the III. If yes set the Imm.
|
|
|
|
int64_t Imm = 0;
|
|
|
|
if (!isImmElgibleForForwarding(*ImmMO, DefMI, III, Imm))
|
|
|
|
return false;
|
|
|
|
|
2019-03-05 12:56:54 +08:00
|
|
|
bool IsFwdFeederRegKilled = false;
|
2018-08-20 10:52:55 +08:00
|
|
|
// Check if the RegMO can be forwarded to MI.
|
2019-03-05 12:56:54 +08:00
|
|
|
if (!isRegElgibleForForwarding(*RegMO, DefMI, MI, KillDefMI,
|
|
|
|
IsFwdFeederRegKilled))
|
2018-08-20 10:52:55 +08:00
|
|
|
return false;
|
|
|
|
|
2019-03-05 12:56:54 +08:00
|
|
|
// Get killed info in case fixup needed after transformation.
|
|
|
|
unsigned ForwardKilledOperandReg = ~0U;
|
|
|
|
MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
|
|
|
|
bool PostRA = !MRI.isSSA();
|
|
|
|
if (PostRA && MI.getOperand(OpNoForForwarding).isKill())
|
|
|
|
ForwardKilledOperandReg = MI.getOperand(OpNoForForwarding).getReg();
|
|
|
|
|
2018-08-20 10:52:55 +08:00
|
|
|
// We know that, the MI and DefMI both meet the pattern, and
|
|
|
|
// the Imm also meet the requirement with the new Imm-form.
|
|
|
|
// It is safe to do the transformation now.
|
|
|
|
LLVM_DEBUG(dbgs() << "Replacing instruction:\n");
|
|
|
|
LLVM_DEBUG(MI.dump());
|
|
|
|
LLVM_DEBUG(dbgs() << "Fed by:\n");
|
|
|
|
LLVM_DEBUG(DefMI.dump());
|
|
|
|
|
|
|
|
// Update the base reg first.
|
|
|
|
MI.getOperand(III.OpNoForForwarding).ChangeToRegister(RegMO->getReg(),
|
|
|
|
false, false,
|
|
|
|
RegMO->isKill());
|
|
|
|
|
|
|
|
// Then, update the imm.
|
|
|
|
if (ImmMO->isImm()) {
|
|
|
|
// If the ImmMO is Imm, change the operand that has ZERO to that Imm
|
|
|
|
// directly.
|
2018-12-28 11:38:09 +08:00
|
|
|
replaceInstrOperandWithImm(MI, III.ZeroIsSpecialOrig, Imm);
|
2018-08-20 10:52:55 +08:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
// Otherwise, it is Constant Pool Index(CPI) or Global,
|
|
|
|
// which is relocation in fact. We need to replace the special zero
|
|
|
|
// register with ImmMO.
|
2019-06-24 23:50:29 +08:00
|
|
|
// Before that, we need to fixup the target flags for imm.
|
2018-08-20 10:52:55 +08:00
|
|
|
// For some reason, we miss to set the flag for the ImmMO if it is CPI.
|
|
|
|
if (DefMI.getOpcode() == PPC::ADDItocL)
|
|
|
|
ImmMO->setTargetFlags(PPCII::MO_TOC_LO);
|
|
|
|
|
|
|
|
// MI didn't have the interface such as MI.setOperand(i) though
|
|
|
|
// it has MI.getOperand(i). To repalce the ZERO MachineOperand with
|
|
|
|
// ImmMO, we need to remove ZERO operand and all the operands behind it,
|
|
|
|
// and, add the ImmMO, then, move back all the operands behind ZERO.
|
|
|
|
SmallVector<MachineOperand, 2> MOps;
|
|
|
|
for (unsigned i = MI.getNumOperands() - 1; i >= III.ZeroIsSpecialOrig; i--) {
|
|
|
|
MOps.push_back(MI.getOperand(i));
|
|
|
|
MI.RemoveOperand(i);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the last MO in the list, which is ZERO operand in fact.
|
|
|
|
MOps.pop_back();
|
|
|
|
// Add the imm operand.
|
|
|
|
MI.addOperand(*ImmMO);
|
|
|
|
// Now add the rest back.
|
|
|
|
for (auto &MO : MOps)
|
|
|
|
MI.addOperand(MO);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the opcode.
|
|
|
|
MI.setDesc(get(III.ImmOpcode));
|
|
|
|
|
2019-03-05 12:56:54 +08:00
|
|
|
// Fix up killed/dead flag after transformation.
|
|
|
|
// Pattern 1:
|
|
|
|
// x = ADD KilledFwdFeederReg, imm
|
|
|
|
// n = opn KilledFwdFeederReg(killed), regn
|
|
|
|
// y = XOP 0, x
|
|
|
|
// Pattern 2:
|
|
|
|
// x = ADD reg(killed), imm
|
|
|
|
// y = XOP 0, x
|
|
|
|
if (IsFwdFeederRegKilled || RegMO->isKill())
|
|
|
|
fixupIsDeadOrKill(DefMI, MI, RegMO->getReg());
|
|
|
|
// Pattern 3:
|
|
|
|
// ForwardKilledOperandReg = ADD reg, imm
|
|
|
|
// y = XOP 0, ForwardKilledOperandReg(killed)
|
|
|
|
if (ForwardKilledOperandReg != ~0U)
|
|
|
|
fixupIsDeadOrKill(DefMI, MI, ForwardKilledOperandReg);
|
|
|
|
|
2018-08-20 10:52:55 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "With:\n");
|
|
|
|
LLVM_DEBUG(MI.dump());
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool PPCInstrInfo::transformToImmFormFedByLI(MachineInstr &MI,
|
|
|
|
const ImmInstrInfo &III,
|
|
|
|
unsigned ConstantOpNo,
|
2019-03-05 12:56:54 +08:00
|
|
|
MachineInstr &DefMI,
|
2018-08-20 10:52:55 +08:00
|
|
|
int64_t Imm) const {
|
2017-12-15 15:27:53 +08:00
|
|
|
MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
|
|
|
|
bool PostRA = !MRI.isSSA();
|
|
|
|
// Exit early if we can't convert this.
|
2018-08-20 10:52:55 +08:00
|
|
|
if ((ConstantOpNo != III.OpNoForForwarding) && !III.IsCommutative)
|
2017-12-15 15:27:53 +08:00
|
|
|
return false;
|
|
|
|
if (Imm % III.ImmMustBeMultipleOf)
|
|
|
|
return false;
|
2017-12-29 20:22:27 +08:00
|
|
|
if (III.TruncateImmTo)
|
|
|
|
Imm &= ((1 << III.TruncateImmTo) - 1);
|
2017-12-15 15:27:53 +08:00
|
|
|
if (III.SignedImm) {
|
|
|
|
APInt ActualValue(64, Imm, true);
|
|
|
|
if (!ActualValue.isSignedIntN(III.ImmWidth))
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
uint64_t UnsignedMax = (1 << III.ImmWidth) - 1;
|
|
|
|
if ((uint64_t)Imm > UnsignedMax)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we're post-RA, the instructions don't agree on whether register zero is
|
|
|
|
// special, we can transform this as long as the register operand that will
|
|
|
|
// end up in the location where zero is special isn't R0.
|
|
|
|
if (PostRA && III.ZeroIsSpecialOrig != III.ZeroIsSpecialNew) {
|
|
|
|
unsigned PosForOrigZero = III.ZeroIsSpecialOrig ? III.ZeroIsSpecialOrig :
|
|
|
|
III.ZeroIsSpecialNew + 1;
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register OrigZeroReg = MI.getOperand(PosForOrigZero).getReg();
|
|
|
|
Register NewZeroReg = MI.getOperand(III.ZeroIsSpecialNew).getReg();
|
2017-12-15 15:27:53 +08:00
|
|
|
// If R0 is in the operand where zero is special for the new instruction,
|
|
|
|
// it is unsafe to transform if the constant operand isn't that operand.
|
|
|
|
if ((NewZeroReg == PPC::R0 || NewZeroReg == PPC::X0) &&
|
|
|
|
ConstantOpNo != III.ZeroIsSpecialNew)
|
|
|
|
return false;
|
|
|
|
if ((OrigZeroReg == PPC::R0 || OrigZeroReg == PPC::X0) &&
|
|
|
|
ConstantOpNo != PosForOrigZero)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-03-05 12:56:54 +08:00
|
|
|
// Get killed info in case fixup needed after transformation.
|
|
|
|
unsigned ForwardKilledOperandReg = ~0U;
|
|
|
|
if (PostRA && MI.getOperand(ConstantOpNo).isKill())
|
|
|
|
ForwardKilledOperandReg = MI.getOperand(ConstantOpNo).getReg();
|
|
|
|
|
2017-12-15 15:27:53 +08:00
|
|
|
unsigned Opc = MI.getOpcode();
|
|
|
|
bool SpecialShift32 =
|
|
|
|
Opc == PPC::SLW || Opc == PPC::SLWo || Opc == PPC::SRW || Opc == PPC::SRWo;
|
|
|
|
bool SpecialShift64 =
|
|
|
|
Opc == PPC::SLD || Opc == PPC::SLDo || Opc == PPC::SRD || Opc == PPC::SRDo;
|
|
|
|
bool SetCR = Opc == PPC::SLWo || Opc == PPC::SRWo ||
|
|
|
|
Opc == PPC::SLDo || Opc == PPC::SRDo;
|
|
|
|
bool RightShift =
|
|
|
|
Opc == PPC::SRW || Opc == PPC::SRWo || Opc == PPC::SRD || Opc == PPC::SRDo;
|
|
|
|
|
|
|
|
MI.setDesc(get(III.ImmOpcode));
|
2018-08-20 10:52:55 +08:00
|
|
|
if (ConstantOpNo == III.OpNoForForwarding) {
|
2017-12-15 15:27:53 +08:00
|
|
|
// Converting shifts to immediate form is a bit tricky since they may do
|
|
|
|
// one of three things:
|
|
|
|
// 1. If the shift amount is between OpSize and 2*OpSize, the result is zero
|
|
|
|
// 2. If the shift amount is zero, the result is unchanged (save for maybe
|
|
|
|
// setting CR0)
|
|
|
|
// 3. If the shift amount is in [1, OpSize), it's just a shift
|
|
|
|
if (SpecialShift32 || SpecialShift64) {
|
|
|
|
LoadImmediateInfo LII;
|
|
|
|
LII.Imm = 0;
|
|
|
|
LII.SetCR = SetCR;
|
|
|
|
LII.Is64Bit = SpecialShift64;
|
|
|
|
uint64_t ShAmt = Imm & (SpecialShift32 ? 0x1F : 0x3F);
|
|
|
|
if (Imm & (SpecialShift32 ? 0x20 : 0x40))
|
|
|
|
replaceInstrWithLI(MI, LII);
|
|
|
|
// Shifts by zero don't change the value. If we don't need to set CR0,
|
|
|
|
// just convert this to a COPY. Can't do this post-RA since we've already
|
|
|
|
// cleaned up the copies.
|
|
|
|
else if (!SetCR && ShAmt == 0 && !PostRA) {
|
|
|
|
MI.RemoveOperand(2);
|
|
|
|
MI.setDesc(get(PPC::COPY));
|
|
|
|
} else {
|
|
|
|
// The 32 bit and 64 bit instructions are quite different.
|
|
|
|
if (SpecialShift32) {
|
|
|
|
// Left shifts use (N, 0, 31-N), right shifts use (32-N, N, 31).
|
|
|
|
uint64_t SH = RightShift ? 32 - ShAmt : ShAmt;
|
|
|
|
uint64_t MB = RightShift ? ShAmt : 0;
|
|
|
|
uint64_t ME = RightShift ? 31 : 31 - ShAmt;
|
2018-12-28 11:38:09 +08:00
|
|
|
replaceInstrOperandWithImm(MI, III.OpNoForForwarding, SH);
|
2017-12-15 15:27:53 +08:00
|
|
|
MachineInstrBuilder(*MI.getParent()->getParent(), MI).addImm(MB)
|
|
|
|
.addImm(ME);
|
|
|
|
} else {
|
|
|
|
// Left shifts use (N, 63-N), right shifts use (64-N, N).
|
|
|
|
uint64_t SH = RightShift ? 64 - ShAmt : ShAmt;
|
|
|
|
uint64_t ME = RightShift ? ShAmt : 63 - ShAmt;
|
2018-12-28 11:38:09 +08:00
|
|
|
replaceInstrOperandWithImm(MI, III.OpNoForForwarding, SH);
|
2017-12-15 15:27:53 +08:00
|
|
|
MachineInstrBuilder(*MI.getParent()->getParent(), MI).addImm(ME);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else
|
2018-12-28 11:38:09 +08:00
|
|
|
replaceInstrOperandWithImm(MI, ConstantOpNo, Imm);
|
2017-12-15 15:27:53 +08:00
|
|
|
}
|
|
|
|
// Convert commutative instructions (switch the operands and convert the
|
|
|
|
// desired one to an immediate.
|
|
|
|
else if (III.IsCommutative) {
|
2018-12-28 11:38:09 +08:00
|
|
|
replaceInstrOperandWithImm(MI, ConstantOpNo, Imm);
|
2018-08-20 10:52:55 +08:00
|
|
|
swapMIOperands(MI, ConstantOpNo, III.OpNoForForwarding);
|
2017-12-15 15:27:53 +08:00
|
|
|
} else
|
|
|
|
llvm_unreachable("Should have exited early!");
|
|
|
|
|
|
|
|
// For instructions for which the constant register replaces a different
|
|
|
|
// operand than where the immediate goes, we need to swap them.
|
2018-08-20 10:52:55 +08:00
|
|
|
if (III.OpNoForForwarding != III.ImmOpNo)
|
|
|
|
swapMIOperands(MI, III.OpNoForForwarding, III.ImmOpNo);
|
2017-12-15 15:27:53 +08:00
|
|
|
|
2018-12-28 09:02:35 +08:00
|
|
|
// If the special R0/X0 register index are different for original instruction
|
|
|
|
// and new instruction, we need to fix up the register class in new
|
|
|
|
// instruction.
|
2017-12-15 15:27:53 +08:00
|
|
|
if (!PostRA && III.ZeroIsSpecialOrig != III.ZeroIsSpecialNew) {
|
2018-12-28 09:02:35 +08:00
|
|
|
if (III.ZeroIsSpecialNew) {
|
|
|
|
// If operand at III.ZeroIsSpecialNew is physical reg(eg: ZERO/ZERO8), no
|
|
|
|
// need to fix up register class.
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register RegToModify = MI.getOperand(III.ZeroIsSpecialNew).getReg();
|
2019-08-02 07:27:28 +08:00
|
|
|
if (Register::isVirtualRegister(RegToModify)) {
|
2018-12-28 09:02:35 +08:00
|
|
|
const TargetRegisterClass *NewRC =
|
|
|
|
MRI.getRegClass(RegToModify)->hasSuperClassEq(&PPC::GPRCRegClass) ?
|
|
|
|
&PPC::GPRC_and_GPRC_NOR0RegClass : &PPC::G8RC_and_G8RC_NOX0RegClass;
|
|
|
|
MRI.setRegClass(RegToModify, NewRC);
|
|
|
|
}
|
2017-12-15 15:27:53 +08:00
|
|
|
}
|
|
|
|
}
|
2019-03-05 12:56:54 +08:00
|
|
|
|
|
|
|
// Fix up killed/dead flag after transformation.
|
|
|
|
// Pattern:
|
|
|
|
// ForwardKilledOperandReg = LI imm
|
|
|
|
// y = XOP reg, ForwardKilledOperandReg(killed)
|
|
|
|
if (ForwardKilledOperandReg != ~0U)
|
|
|
|
fixupIsDeadOrKill(DefMI, MI, ForwardKilledOperandReg);
|
2017-12-15 15:27:53 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-10-04 14:59:23 +08:00
|
|
|
const TargetRegisterClass *
|
|
|
|
PPCInstrInfo::updatedRC(const TargetRegisterClass *RC) const {
|
|
|
|
if (Subtarget.hasVSX() && RC == &PPC::VRRCRegClass)
|
|
|
|
return &PPC::VSRCRegClass;
|
|
|
|
return RC;
|
|
|
|
}
|
2017-05-31 13:40:25 +08:00
|
|
|
|
|
|
|
int PPCInstrInfo::getRecordFormOpcode(unsigned Opcode) {
|
|
|
|
return PPC::getRecordFormOpcode(Opcode);
|
|
|
|
}
|
2017-10-16 12:12:57 +08:00
|
|
|
|
|
|
|
// This function returns true if the machine instruction
|
|
|
|
// always outputs a value by sign-extending a 32 bit value,
|
|
|
|
// i.e. 0 to 31-th bits are same as 32-th bit.
|
|
|
|
static bool isSignExtendingOp(const MachineInstr &MI) {
|
|
|
|
int Opcode = MI.getOpcode();
|
|
|
|
if (Opcode == PPC::LI || Opcode == PPC::LI8 ||
|
|
|
|
Opcode == PPC::LIS || Opcode == PPC::LIS8 ||
|
|
|
|
Opcode == PPC::SRAW || Opcode == PPC::SRAWo ||
|
|
|
|
Opcode == PPC::SRAWI || Opcode == PPC::SRAWIo ||
|
|
|
|
Opcode == PPC::LWA || Opcode == PPC::LWAX ||
|
|
|
|
Opcode == PPC::LWA_32 || Opcode == PPC::LWAX_32 ||
|
|
|
|
Opcode == PPC::LHA || Opcode == PPC::LHAX ||
|
|
|
|
Opcode == PPC::LHA8 || Opcode == PPC::LHAX8 ||
|
|
|
|
Opcode == PPC::LBZ || Opcode == PPC::LBZX ||
|
|
|
|
Opcode == PPC::LBZ8 || Opcode == PPC::LBZX8 ||
|
|
|
|
Opcode == PPC::LBZU || Opcode == PPC::LBZUX ||
|
|
|
|
Opcode == PPC::LBZU8 || Opcode == PPC::LBZUX8 ||
|
|
|
|
Opcode == PPC::LHZ || Opcode == PPC::LHZX ||
|
|
|
|
Opcode == PPC::LHZ8 || Opcode == PPC::LHZX8 ||
|
|
|
|
Opcode == PPC::LHZU || Opcode == PPC::LHZUX ||
|
|
|
|
Opcode == PPC::LHZU8 || Opcode == PPC::LHZUX8 ||
|
|
|
|
Opcode == PPC::EXTSB || Opcode == PPC::EXTSBo ||
|
|
|
|
Opcode == PPC::EXTSH || Opcode == PPC::EXTSHo ||
|
|
|
|
Opcode == PPC::EXTSB8 || Opcode == PPC::EXTSH8 ||
|
|
|
|
Opcode == PPC::EXTSW || Opcode == PPC::EXTSWo ||
|
[PowerPC] Exploit power9 new instruction setb
Check the expected pattens feeding to SELECT_CC like:
(select_cc lhs, rhs, 1, (sext (setcc [lr]hs, [lr]hs, cc2)), cc1)
(select_cc lhs, rhs, -1, (zext (setcc [lr]hs, [lr]hs, cc2)), cc1)
(select_cc lhs, rhs, 0, (select_cc [lr]hs, [lr]hs, 1, -1, cc2), seteq)
(select_cc lhs, rhs, 0, (select_cc [lr]hs, [lr]hs, -1, 1, cc2), seteq)
Further transform the sequence to comparison + setb if hits.
Differential Revision: https://reviews.llvm.org/D53275
llvm-svn: 349445
2018-12-18 15:53:26 +08:00
|
|
|
Opcode == PPC::SETB || Opcode == PPC::SETB8 ||
|
2017-10-16 12:12:57 +08:00
|
|
|
Opcode == PPC::EXTSH8_32_64 || Opcode == PPC::EXTSW_32_64 ||
|
|
|
|
Opcode == PPC::EXTSB8_32_64)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (Opcode == PPC::RLDICL && MI.getOperand(3).getImm() >= 33)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINMo ||
|
|
|
|
Opcode == PPC::RLWNM || Opcode == PPC::RLWNMo) &&
|
|
|
|
MI.getOperand(3).getImm() > 0 &&
|
|
|
|
MI.getOperand(3).getImm() <= MI.getOperand(4).getImm())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// This function returns true if the machine instruction
|
|
|
|
// always outputs zeros in higher 32 bits.
|
|
|
|
static bool isZeroExtendingOp(const MachineInstr &MI) {
|
|
|
|
int Opcode = MI.getOpcode();
|
|
|
|
// The 16-bit immediate is sign-extended in li/lis.
|
|
|
|
// If the most significant bit is zero, all higher bits are zero.
|
|
|
|
if (Opcode == PPC::LI || Opcode == PPC::LI8 ||
|
|
|
|
Opcode == PPC::LIS || Opcode == PPC::LIS8) {
|
|
|
|
int64_t Imm = MI.getOperand(1).getImm();
|
|
|
|
if (((uint64_t)Imm & ~0x7FFFuLL) == 0)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We have some variations of rotate-and-mask instructions
|
|
|
|
// that clear higher 32-bits.
|
|
|
|
if ((Opcode == PPC::RLDICL || Opcode == PPC::RLDICLo ||
|
|
|
|
Opcode == PPC::RLDCL || Opcode == PPC::RLDCLo ||
|
|
|
|
Opcode == PPC::RLDICL_32_64) &&
|
|
|
|
MI.getOperand(3).getImm() >= 32)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if ((Opcode == PPC::RLDIC || Opcode == PPC::RLDICo) &&
|
|
|
|
MI.getOperand(3).getImm() >= 32 &&
|
|
|
|
MI.getOperand(3).getImm() <= 63 - MI.getOperand(2).getImm())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINMo ||
|
|
|
|
Opcode == PPC::RLWNM || Opcode == PPC::RLWNMo ||
|
|
|
|
Opcode == PPC::RLWINM8 || Opcode == PPC::RLWNM8) &&
|
|
|
|
MI.getOperand(3).getImm() <= MI.getOperand(4).getImm())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// There are other instructions that clear higher 32-bits.
|
|
|
|
if (Opcode == PPC::CNTLZW || Opcode == PPC::CNTLZWo ||
|
|
|
|
Opcode == PPC::CNTTZW || Opcode == PPC::CNTTZWo ||
|
|
|
|
Opcode == PPC::CNTLZW8 || Opcode == PPC::CNTTZW8 ||
|
|
|
|
Opcode == PPC::CNTLZD || Opcode == PPC::CNTLZDo ||
|
|
|
|
Opcode == PPC::CNTTZD || Opcode == PPC::CNTTZDo ||
|
|
|
|
Opcode == PPC::POPCNTD || Opcode == PPC::POPCNTW ||
|
|
|
|
Opcode == PPC::SLW || Opcode == PPC::SLWo ||
|
|
|
|
Opcode == PPC::SRW || Opcode == PPC::SRWo ||
|
|
|
|
Opcode == PPC::SLW8 || Opcode == PPC::SRW8 ||
|
|
|
|
Opcode == PPC::SLWI || Opcode == PPC::SLWIo ||
|
|
|
|
Opcode == PPC::SRWI || Opcode == PPC::SRWIo ||
|
|
|
|
Opcode == PPC::LWZ || Opcode == PPC::LWZX ||
|
|
|
|
Opcode == PPC::LWZU || Opcode == PPC::LWZUX ||
|
|
|
|
Opcode == PPC::LWBRX || Opcode == PPC::LHBRX ||
|
|
|
|
Opcode == PPC::LHZ || Opcode == PPC::LHZX ||
|
|
|
|
Opcode == PPC::LHZU || Opcode == PPC::LHZUX ||
|
|
|
|
Opcode == PPC::LBZ || Opcode == PPC::LBZX ||
|
|
|
|
Opcode == PPC::LBZU || Opcode == PPC::LBZUX ||
|
|
|
|
Opcode == PPC::LWZ8 || Opcode == PPC::LWZX8 ||
|
|
|
|
Opcode == PPC::LWZU8 || Opcode == PPC::LWZUX8 ||
|
|
|
|
Opcode == PPC::LWBRX8 || Opcode == PPC::LHBRX8 ||
|
|
|
|
Opcode == PPC::LHZ8 || Opcode == PPC::LHZX8 ||
|
|
|
|
Opcode == PPC::LHZU8 || Opcode == PPC::LHZUX8 ||
|
|
|
|
Opcode == PPC::LBZ8 || Opcode == PPC::LBZX8 ||
|
|
|
|
Opcode == PPC::LBZU8 || Opcode == PPC::LBZUX8 ||
|
|
|
|
Opcode == PPC::ANDIo || Opcode == PPC::ANDISo ||
|
|
|
|
Opcode == PPC::ROTRWI || Opcode == PPC::ROTRWIo ||
|
|
|
|
Opcode == PPC::EXTLWI || Opcode == PPC::EXTLWIo ||
|
|
|
|
Opcode == PPC::MFVSRWZ)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-11-28 04:26:36 +08:00
|
|
|
// This function returns true if the input MachineInstr is a TOC save
|
|
|
|
// instruction.
|
|
|
|
bool PPCInstrInfo::isTOCSaveMI(const MachineInstr &MI) const {
|
|
|
|
if (!MI.getOperand(1).isImm() || !MI.getOperand(2).isReg())
|
|
|
|
return false;
|
|
|
|
unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
|
|
|
|
unsigned StackOffset = MI.getOperand(1).getImm();
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register StackReg = MI.getOperand(2).getReg();
|
2017-11-28 04:26:36 +08:00
|
|
|
if (StackReg == PPC::X1 && StackOffset == TOCSaveOffset)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-10-16 12:12:57 +08:00
|
|
|
// We limit the max depth to track incoming values of PHIs or binary ops
|
2018-06-13 16:54:13 +08:00
|
|
|
// (e.g. AND) to avoid excessive cost.
|
2017-10-16 12:12:57 +08:00
|
|
|
const unsigned MAX_DEPTH = 1;
|
|
|
|
|
|
|
|
bool
|
|
|
|
PPCInstrInfo::isSignOrZeroExtended(const MachineInstr &MI, bool SignExt,
|
|
|
|
const unsigned Depth) const {
|
|
|
|
const MachineFunction *MF = MI.getParent()->getParent();
|
|
|
|
const MachineRegisterInfo *MRI = &MF->getRegInfo();
|
|
|
|
|
2017-10-18 18:31:19 +08:00
|
|
|
// If we know this instruction returns sign- or zero-extended result,
|
|
|
|
// return true.
|
|
|
|
if (SignExt ? isSignExtendingOp(MI):
|
|
|
|
isZeroExtendingOp(MI))
|
|
|
|
return true;
|
|
|
|
|
2017-10-16 12:12:57 +08:00
|
|
|
switch (MI.getOpcode()) {
|
|
|
|
case PPC::COPY: {
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register SrcReg = MI.getOperand(1).getReg();
|
2017-10-16 12:12:57 +08:00
|
|
|
|
|
|
|
// In both ELFv1 and v2 ABI, method parameters and the return value
|
|
|
|
// are sign- or zero-extended.
|
|
|
|
if (MF->getSubtarget<PPCSubtarget>().isSVR4ABI()) {
|
|
|
|
const PPCFunctionInfo *FuncInfo = MF->getInfo<PPCFunctionInfo>();
|
|
|
|
// We check the ZExt/SExt flags for a method parameter.
|
|
|
|
if (MI.getParent()->getBasicBlock() ==
|
2017-12-16 06:22:58 +08:00
|
|
|
&MF->getFunction().getEntryBlock()) {
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register VReg = MI.getOperand(0).getReg();
|
2017-10-16 12:12:57 +08:00
|
|
|
if (MF->getRegInfo().isLiveIn(VReg))
|
|
|
|
return SignExt ? FuncInfo->isLiveInSExt(VReg) :
|
|
|
|
FuncInfo->isLiveInZExt(VReg);
|
|
|
|
}
|
|
|
|
|
|
|
|
// For a method return value, we check the ZExt/SExt flags in attribute.
|
|
|
|
// We assume the following code sequence for method call.
|
2017-12-07 18:40:31 +08:00
|
|
|
// ADJCALLSTACKDOWN 32, implicit dead %r1, implicit %r1
|
2017-12-14 18:03:09 +08:00
|
|
|
// BL8_NOP @func,...
|
2017-12-07 18:40:31 +08:00
|
|
|
// ADJCALLSTACKUP 32, 0, implicit dead %r1, implicit %r1
|
|
|
|
// %5 = COPY %x3; G8RC:%5
|
2017-10-16 12:12:57 +08:00
|
|
|
if (SrcReg == PPC::X3) {
|
|
|
|
const MachineBasicBlock *MBB = MI.getParent();
|
|
|
|
MachineBasicBlock::const_instr_iterator II =
|
|
|
|
MachineBasicBlock::const_instr_iterator(&MI);
|
|
|
|
if (II != MBB->instr_begin() &&
|
|
|
|
(--II)->getOpcode() == PPC::ADJCALLSTACKUP) {
|
|
|
|
const MachineInstr &CallMI = *(--II);
|
|
|
|
if (CallMI.isCall() && CallMI.getOperand(0).isGlobal()) {
|
|
|
|
const Function *CalleeFn =
|
|
|
|
dyn_cast<Function>(CallMI.getOperand(0).getGlobal());
|
2017-10-16 20:11:15 +08:00
|
|
|
if (!CalleeFn)
|
|
|
|
return false;
|
2017-10-16 12:12:57 +08:00
|
|
|
const IntegerType *IntTy =
|
|
|
|
dyn_cast<IntegerType>(CalleeFn->getReturnType());
|
|
|
|
const AttributeSet &Attrs =
|
|
|
|
CalleeFn->getAttributes().getRetAttributes();
|
|
|
|
if (IntTy && IntTy->getBitWidth() <= 32)
|
|
|
|
return Attrs.hasAttribute(SignExt ? Attribute::SExt :
|
|
|
|
Attribute::ZExt);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this is a copy from another register, we recursively check source.
|
2019-08-02 07:27:28 +08:00
|
|
|
if (!Register::isVirtualRegister(SrcReg))
|
2017-10-16 12:12:57 +08:00
|
|
|
return false;
|
|
|
|
const MachineInstr *SrcMI = MRI->getVRegDef(SrcReg);
|
|
|
|
if (SrcMI != NULL)
|
|
|
|
return isSignOrZeroExtended(*SrcMI, SignExt, Depth);
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
case PPC::ANDIo:
|
|
|
|
case PPC::ANDISo:
|
|
|
|
case PPC::ORI:
|
|
|
|
case PPC::ORIS:
|
|
|
|
case PPC::XORI:
|
|
|
|
case PPC::XORIS:
|
|
|
|
case PPC::ANDIo8:
|
|
|
|
case PPC::ANDISo8:
|
|
|
|
case PPC::ORI8:
|
|
|
|
case PPC::ORIS8:
|
|
|
|
case PPC::XORI8:
|
|
|
|
case PPC::XORIS8: {
|
|
|
|
// logical operation with 16-bit immediate does not change the upper bits.
|
|
|
|
// So, we track the operand register as we do for register copy.
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register SrcReg = MI.getOperand(1).getReg();
|
2019-08-02 07:27:28 +08:00
|
|
|
if (!Register::isVirtualRegister(SrcReg))
|
2017-10-16 12:12:57 +08:00
|
|
|
return false;
|
|
|
|
const MachineInstr *SrcMI = MRI->getVRegDef(SrcReg);
|
|
|
|
if (SrcMI != NULL)
|
|
|
|
return isSignOrZeroExtended(*SrcMI, SignExt, Depth);
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If all incoming values are sign-/zero-extended,
|
2017-11-29 12:09:29 +08:00
|
|
|
// the output of OR, ISEL or PHI is also sign-/zero-extended.
|
2017-10-16 12:12:57 +08:00
|
|
|
case PPC::OR:
|
|
|
|
case PPC::OR8:
|
|
|
|
case PPC::ISEL:
|
|
|
|
case PPC::PHI: {
|
|
|
|
if (Depth >= MAX_DEPTH)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// The input registers for PHI are operand 1, 3, ...
|
|
|
|
// The input registers for others are operand 1 and 2.
|
|
|
|
unsigned E = 3, D = 1;
|
|
|
|
if (MI.getOpcode() == PPC::PHI) {
|
|
|
|
E = MI.getNumOperands();
|
|
|
|
D = 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned I = 1; I != E; I += D) {
|
|
|
|
if (MI.getOperand(I).isReg()) {
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register SrcReg = MI.getOperand(I).getReg();
|
2019-08-02 07:27:28 +08:00
|
|
|
if (!Register::isVirtualRegister(SrcReg))
|
2017-10-16 12:12:57 +08:00
|
|
|
return false;
|
|
|
|
const MachineInstr *SrcMI = MRI->getVRegDef(SrcReg);
|
|
|
|
if (SrcMI == NULL || !isSignOrZeroExtended(*SrcMI, SignExt, Depth+1))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-11-29 12:09:29 +08:00
|
|
|
// If at least one of the incoming values of an AND is zero extended
|
|
|
|
// then the output is also zero-extended. If both of the incoming values
|
|
|
|
// are sign-extended then the output is also sign extended.
|
|
|
|
case PPC::AND:
|
|
|
|
case PPC::AND8: {
|
|
|
|
if (Depth >= MAX_DEPTH)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
assert(MI.getOperand(1).isReg() && MI.getOperand(2).isReg());
|
|
|
|
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register SrcReg1 = MI.getOperand(1).getReg();
|
|
|
|
Register SrcReg2 = MI.getOperand(2).getReg();
|
2017-11-29 12:09:29 +08:00
|
|
|
|
2019-08-02 07:27:28 +08:00
|
|
|
if (!Register::isVirtualRegister(SrcReg1) ||
|
|
|
|
!Register::isVirtualRegister(SrcReg2))
|
|
|
|
return false;
|
2017-11-29 12:09:29 +08:00
|
|
|
|
|
|
|
const MachineInstr *MISrc1 = MRI->getVRegDef(SrcReg1);
|
|
|
|
const MachineInstr *MISrc2 = MRI->getVRegDef(SrcReg2);
|
|
|
|
if (!MISrc1 || !MISrc2)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if(SignExt)
|
|
|
|
return isSignOrZeroExtended(*MISrc1, SignExt, Depth+1) &&
|
|
|
|
isSignOrZeroExtended(*MISrc2, SignExt, Depth+1);
|
|
|
|
else
|
|
|
|
return isSignOrZeroExtended(*MISrc1, SignExt, Depth+1) ||
|
|
|
|
isSignOrZeroExtended(*MISrc2, SignExt, Depth+1);
|
|
|
|
}
|
|
|
|
|
2017-10-16 12:12:57 +08:00
|
|
|
default:
|
2017-10-18 18:31:19 +08:00
|
|
|
break;
|
2017-10-16 12:12:57 +08:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2019-06-12 01:40:39 +08:00
|
|
|
|
|
|
|
bool PPCInstrInfo::isBDNZ(unsigned Opcode) const {
|
|
|
|
return (Opcode == (Subtarget.isPPC64() ? PPC::BDNZ8 : PPC::BDNZ));
|
|
|
|
}
|
|
|
|
|
|
|
|
bool PPCInstrInfo::analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst,
|
|
|
|
MachineInstr *&CmpInst) const {
|
|
|
|
MachineBasicBlock *LoopEnd = L.getBottomBlock();
|
|
|
|
MachineBasicBlock::iterator I = LoopEnd->getFirstTerminator();
|
|
|
|
// We really "analyze" only CTR loops right now.
|
|
|
|
if (I != LoopEnd->end() && isBDNZ(I->getOpcode())) {
|
|
|
|
IndVarInst = nullptr;
|
|
|
|
CmpInst = &*I;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
MachineInstr *
|
|
|
|
PPCInstrInfo::findLoopInstr(MachineBasicBlock &PreHeader) const {
|
|
|
|
|
|
|
|
unsigned LOOPi = (Subtarget.isPPC64() ? PPC::MTCTR8loop : PPC::MTCTRloop);
|
|
|
|
|
|
|
|
// The loop set-up instruction should be in preheader
|
|
|
|
for (auto &I : PreHeader.instrs())
|
|
|
|
if (I.getOpcode() == LOOPi)
|
|
|
|
return &I;
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned PPCInstrInfo::reduceLoopCount(
|
|
|
|
MachineBasicBlock &MBB, MachineBasicBlock &PreHeader, MachineInstr *IndVar,
|
|
|
|
MachineInstr &Cmp, SmallVectorImpl<MachineOperand> &Cond,
|
|
|
|
SmallVectorImpl<MachineInstr *> &PrevInsts, unsigned Iter,
|
|
|
|
unsigned MaxIter) const {
|
|
|
|
// We expect a hardware loop currently. This means that IndVar is set
|
|
|
|
// to null, and the compare is the ENDLOOP instruction.
|
|
|
|
assert((!IndVar) && isBDNZ(Cmp.getOpcode()) && "Expecting a CTR loop");
|
|
|
|
MachineFunction *MF = MBB.getParent();
|
|
|
|
DebugLoc DL = Cmp.getDebugLoc();
|
|
|
|
MachineInstr *Loop = findLoopInstr(PreHeader);
|
|
|
|
if (!Loop)
|
|
|
|
return 0;
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-16 03:22:08 +08:00
|
|
|
Register LoopCountReg = Loop->getOperand(0).getReg();
|
2019-06-12 01:40:39 +08:00
|
|
|
MachineRegisterInfo &MRI = MF->getRegInfo();
|
|
|
|
MachineInstr *LoopCount = MRI.getUniqueVRegDef(LoopCountReg);
|
|
|
|
|
|
|
|
if (!LoopCount)
|
|
|
|
return 0;
|
|
|
|
// If the loop trip count is a compile-time value, then just change the
|
|
|
|
// value.
|
|
|
|
if (LoopCount->getOpcode() == PPC::LI8 || LoopCount->getOpcode() == PPC::LI) {
|
|
|
|
int64_t Offset = LoopCount->getOperand(1).getImm();
|
|
|
|
if (Offset <= 1) {
|
|
|
|
LoopCount->eraseFromParent();
|
|
|
|
Loop->eraseFromParent();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
LoopCount->getOperand(1).setImm(Offset - 1);
|
|
|
|
return Offset - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The loop trip count is a run-time value.
|
|
|
|
// We need to subtract one from the trip count,
|
|
|
|
// and insert branch later to check if we're done with the loop.
|
|
|
|
|
|
|
|
// Since BDZ/BDZ8 that we will insert will also decrease the ctr by 1,
|
|
|
|
// so we don't need to generate any thing here.
|
|
|
|
Cond.push_back(MachineOperand::CreateImm(0));
|
|
|
|
Cond.push_back(MachineOperand::CreateReg(
|
|
|
|
Subtarget.isPPC64() ? PPC::CTR8 : PPC::CTR, true));
|
|
|
|
return LoopCountReg;
|
|
|
|
}
|
|
|
|
|
2019-07-02 11:28:52 +08:00
|
|
|
// Return true if get the base operand, byte offset of an instruction and the
|
|
|
|
// memory width. Width is the size of memory that is being loaded/stored.
|
|
|
|
bool PPCInstrInfo::getMemOperandWithOffsetWidth(
|
|
|
|
const MachineInstr &LdSt,
|
|
|
|
const MachineOperand *&BaseReg,
|
|
|
|
int64_t &Offset,
|
|
|
|
unsigned &Width,
|
|
|
|
const TargetRegisterInfo *TRI) const {
|
|
|
|
assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
|
|
|
|
|
|
|
|
// Handle only loads/stores with base register followed by immediate offset.
|
|
|
|
if (LdSt.getNumExplicitOperands() != 3)
|
|
|
|
return false;
|
|
|
|
if (!LdSt.getOperand(1).isImm() || !LdSt.getOperand(2).isReg())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!LdSt.hasOneMemOperand())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Width = (*LdSt.memoperands_begin())->getSize();
|
|
|
|
Offset = LdSt.getOperand(1).getImm();
|
|
|
|
BaseReg = &LdSt.getOperand(2);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool PPCInstrInfo::areMemAccessesTriviallyDisjoint(
|
|
|
|
const MachineInstr &MIa, const MachineInstr &MIb,
|
|
|
|
AliasAnalysis * /*AA*/) const {
|
|
|
|
assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
|
|
|
|
assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
|
|
|
|
|
|
|
|
if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
|
|
|
|
MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Retrieve the base register, offset from the base register and width. Width
|
|
|
|
// is the size of memory that is being loaded/stored (e.g. 1, 2, 4). If
|
|
|
|
// base registers are identical, and the offset of a lower memory access +
|
|
|
|
// the width doesn't overlap the offset of a higher memory access,
|
|
|
|
// then the memory accesses are different.
|
|
|
|
const TargetRegisterInfo *TRI = &getRegisterInfo();
|
|
|
|
const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
|
|
|
|
int64_t OffsetA = 0, OffsetB = 0;
|
|
|
|
unsigned int WidthA = 0, WidthB = 0;
|
|
|
|
if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
|
|
|
|
getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
|
|
|
|
if (BaseOpA->isIdenticalTo(*BaseOpB)) {
|
|
|
|
int LowOffset = std::min(OffsetA, OffsetB);
|
|
|
|
int HighOffset = std::max(OffsetA, OffsetB);
|
|
|
|
int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
|
|
|
|
if (LowOffset + LowWidth <= HighOffset)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|