2015-02-02 05:51:22 +08:00
|
|
|
//===--------------- PPCVSXFMAMutate.cpp - VSX FMA Mutation ---------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This pass mutates the form of VSX FMA instructions to avoid unnecessary
|
|
|
|
// copies.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "MCTargetDesc/PPCPredicates.h"
|
|
|
|
#include "PPC.h"
|
|
|
|
#include "PPCInstrBuilder.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "PPCInstrInfo.h"
|
2015-02-02 05:51:22 +08:00
|
|
|
#include "PPCMachineFunctionInfo.h"
|
|
|
|
#include "PPCTargetMachine.h"
|
|
|
|
#include "llvm/ADT/STLExtras.h"
|
|
|
|
#include "llvm/ADT/Statistic.h"
|
2017-12-13 10:51:04 +08:00
|
|
|
#include "llvm/CodeGen/LiveIntervals.h"
|
2016-11-30 21:31:09 +08:00
|
|
|
#include "llvm/CodeGen/MachineDominators.h"
|
2015-02-02 05:51:22 +08:00
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
|
|
|
#include "llvm/CodeGen/MachineMemOperand.h"
|
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
|
|
|
#include "llvm/CodeGen/PseudoSourceValue.h"
|
|
|
|
#include "llvm/CodeGen/ScheduleDAG.h"
|
|
|
|
#include "llvm/CodeGen/SlotIndexes.h"
|
|
|
|
#include "llvm/MC/MCAsmInfo.h"
|
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
|
|
|
#include "llvm/Support/TargetRegistry.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2016-03-03 09:27:35 +08:00
|
|
|
// Temporarily disable FMA mutation by default, since it doesn't handle
|
|
|
|
// cross-basic-block intervals well.
|
|
|
|
// See: http://lists.llvm.org/pipermail/llvm-dev/2016-February/095669.html
|
|
|
|
// http://reviews.llvm.org/D17087
|
|
|
|
static cl::opt<bool> DisableVSXFMAMutate(
|
|
|
|
"disable-ppc-vsx-fma-mutation",
|
|
|
|
cl::desc("Disable VSX FMA instruction mutation"), cl::init(true),
|
|
|
|
cl::Hidden);
|
2015-02-02 05:51:22 +08:00
|
|
|
|
|
|
|
#define DEBUG_TYPE "ppc-vsx-fma-mutate"
|
|
|
|
|
|
|
|
namespace llvm { namespace PPC {
|
|
|
|
int getAltVSXFMAOpcode(uint16_t Opcode);
|
|
|
|
} }
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
// PPCVSXFMAMutate pass - For copies between VSX registers and non-VSX registers
|
|
|
|
// (Altivec and scalar floating-point registers), we need to transform the
|
|
|
|
// copies into subregister copies with other restrictions.
|
|
|
|
struct PPCVSXFMAMutate : public MachineFunctionPass {
|
|
|
|
static char ID;
|
|
|
|
PPCVSXFMAMutate() : MachineFunctionPass(ID) {
|
|
|
|
initializePPCVSXFMAMutatePass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
|
|
|
LiveIntervals *LIS;
|
|
|
|
const PPCInstrInfo *TII;
|
|
|
|
|
|
|
|
protected:
|
|
|
|
bool processBlock(MachineBasicBlock &MBB) {
|
|
|
|
bool Changed = false;
|
|
|
|
|
|
|
|
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
|
|
|
|
const TargetRegisterInfo *TRI = &TII->getRegisterInfo();
|
|
|
|
for (MachineBasicBlock::iterator I = MBB.begin(), IE = MBB.end();
|
|
|
|
I != IE; ++I) {
|
2016-07-27 21:24:16 +08:00
|
|
|
MachineInstr &MI = *I;
|
2015-02-02 05:51:22 +08:00
|
|
|
|
|
|
|
// The default (A-type) VSX FMA form kills the addend (it is taken from
|
|
|
|
// the target register, which is then updated to reflect the result of
|
|
|
|
// the FMA). If the instruction, however, kills one of the registers
|
|
|
|
// used for the product, then we can use the M-form instruction (which
|
|
|
|
// will take that value from the to-be-defined register).
|
|
|
|
|
2016-07-27 21:24:16 +08:00
|
|
|
int AltOpc = PPC::getAltVSXFMAOpcode(MI.getOpcode());
|
2015-02-02 05:51:22 +08:00
|
|
|
if (AltOpc == -1)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// This pass is run after register coalescing, and so we're looking for
|
|
|
|
// a situation like this:
|
|
|
|
// ...
|
2017-12-07 18:40:31 +08:00
|
|
|
// %5 = COPY %9; VSLRC:%5,%9
|
2017-11-30 20:12:19 +08:00
|
|
|
// %5<def,tied1> = XSMADDADP %5<tied0>, %17, %16,
|
2017-12-07 18:40:31 +08:00
|
|
|
// implicit %rm; VSLRC:%5,%17,%16
|
2015-02-02 05:51:22 +08:00
|
|
|
// ...
|
2017-11-30 20:12:19 +08:00
|
|
|
// %9<def,tied1> = XSMADDADP %9<tied0>, %17, %19,
|
2017-12-07 18:40:31 +08:00
|
|
|
// implicit %rm; VSLRC:%9,%17,%19
|
2015-02-02 05:51:22 +08:00
|
|
|
// ...
|
|
|
|
// Where we can eliminate the copy by changing from the A-type to the
|
|
|
|
// M-type instruction. Specifically, for this example, this means:
|
2017-11-30 20:12:19 +08:00
|
|
|
// %5<def,tied1> = XSMADDADP %5<tied0>, %17, %16,
|
2017-12-07 18:40:31 +08:00
|
|
|
// implicit %rm; VSLRC:%5,%17,%16
|
2015-02-02 05:51:22 +08:00
|
|
|
// is replaced by:
|
2017-11-30 20:12:19 +08:00
|
|
|
// %16<def,tied1> = XSMADDMDP %16<tied0>, %18, %9,
|
2017-12-07 18:40:31 +08:00
|
|
|
// implicit %rm; VSLRC:%16,%18,%9
|
|
|
|
// and we remove: %5 = COPY %9; VSLRC:%5,%9
|
2015-02-02 05:51:22 +08:00
|
|
|
|
2016-07-27 21:24:16 +08:00
|
|
|
SlotIndex FMAIdx = LIS->getInstructionIndex(MI);
|
2015-02-02 05:51:22 +08:00
|
|
|
|
|
|
|
VNInfo *AddendValNo =
|
2016-07-27 21:24:16 +08:00
|
|
|
LIS->getInterval(MI.getOperand(1).getReg()).Query(FMAIdx).valueIn();
|
2015-08-27 07:41:53 +08:00
|
|
|
|
|
|
|
// This can be null if the register is undef.
|
|
|
|
if (!AddendValNo)
|
2015-08-22 05:34:24 +08:00
|
|
|
continue;
|
|
|
|
|
2015-02-02 05:51:22 +08:00
|
|
|
MachineInstr *AddendMI = LIS->getInstructionFromIndex(AddendValNo->def);
|
|
|
|
|
|
|
|
// The addend and this instruction must be in the same block.
|
|
|
|
|
2016-07-27 21:24:16 +08:00
|
|
|
if (!AddendMI || AddendMI->getParent() != MI.getParent())
|
2015-02-02 05:51:22 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// The addend must be a full copy within the same register class.
|
|
|
|
|
|
|
|
if (!AddendMI->isFullCopy())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
unsigned AddendSrcReg = AddendMI->getOperand(1).getReg();
|
|
|
|
if (TargetRegisterInfo::isVirtualRegister(AddendSrcReg)) {
|
|
|
|
if (MRI.getRegClass(AddendMI->getOperand(0).getReg()) !=
|
|
|
|
MRI.getRegClass(AddendSrcReg))
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
// If AddendSrcReg is a physical register, make sure the destination
|
|
|
|
// register class contains it.
|
|
|
|
if (!MRI.getRegClass(AddendMI->getOperand(0).getReg())
|
|
|
|
->contains(AddendSrcReg))
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// In theory, there could be other uses of the addend copy before this
|
|
|
|
// fma. We could deal with this, but that would require additional
|
|
|
|
// logic below and I suspect it will not occur in any relevant
|
|
|
|
// situations. Additionally, check whether the copy source is killed
|
|
|
|
// prior to the fma. In order to replace the addend here with the
|
|
|
|
// source of the copy, it must still be live here. We can't use
|
|
|
|
// interval testing for a physical register, so as long as we're
|
|
|
|
// walking the MIs we may as well test liveness here.
|
[PowerPC] Fix the PPCInstrInfo::getInstrLatency implementation
PowerPC uses itineraries to describe processor pipelines (and dispatch-group
restrictions for P7/P8 cores). Unfortunately, the target-independent
implementation of TII.getInstrLatency calls ItinData->getStageLatency, and that
looks for the largest cycle count in the pipeline for any given instruction.
This, however, yields the wrong answer for the PPC itineraries, because we
don't encode the full pipeline. Because the functional units are fully
pipelined, we only model the initial stages (there are no relevant hazards in
the later stages to model), and so the technique employed by getStageLatency
does not really work. Instead, we should take the maximum output operand
latency, and that's what PPCInstrInfo::getInstrLatency now does.
This caused some test-case churn, including two unfortunate side effects.
First, the new arrangement of copies we get from function parameters now
sometimes blocks VSX FMA mutation (a FIXME has been added to the code and the
test cases), and we have one significant test-suite regression:
SingleSource/Benchmarks/BenchmarkGame/spectral-norm
56.4185% +/- 18.9398%
In this benchmark we have a loop with a vectorized FP divide, and it with the
new scheduling both divides end up in the same dispatch group (which in this
case seems to cause a problem, although why is not exactly clear). The grouping
structure is hard to predict from the bottom of the loop, and there may not be
much we can do to fix this.
Very few other test-suite performance effects were really significant, but
almost all weakly favor this change. However, in light of the issues
highlighted above, I've left the old behavior available via a
command-line flag.
llvm-svn: 242188
2015-07-15 04:02:02 +08:00
|
|
|
//
|
|
|
|
// FIXME: There is a case that occurs in practice, like this:
|
2017-12-07 18:40:31 +08:00
|
|
|
// %9 = COPY %f1; VSSRC:%9
|
[PowerPC] Fix the PPCInstrInfo::getInstrLatency implementation
PowerPC uses itineraries to describe processor pipelines (and dispatch-group
restrictions for P7/P8 cores). Unfortunately, the target-independent
implementation of TII.getInstrLatency calls ItinData->getStageLatency, and that
looks for the largest cycle count in the pipeline for any given instruction.
This, however, yields the wrong answer for the PPC itineraries, because we
don't encode the full pipeline. Because the functional units are fully
pipelined, we only model the initial stages (there are no relevant hazards in
the later stages to model), and so the technique employed by getStageLatency
does not really work. Instead, we should take the maximum output operand
latency, and that's what PPCInstrInfo::getInstrLatency now does.
This caused some test-case churn, including two unfortunate side effects.
First, the new arrangement of copies we get from function parameters now
sometimes blocks VSX FMA mutation (a FIXME has been added to the code and the
test cases), and we have one significant test-suite regression:
SingleSource/Benchmarks/BenchmarkGame/spectral-norm
56.4185% +/- 18.9398%
In this benchmark we have a loop with a vectorized FP divide, and it with the
new scheduling both divides end up in the same dispatch group (which in this
case seems to cause a problem, although why is not exactly clear). The grouping
structure is hard to predict from the bottom of the loop, and there may not be
much we can do to fix this.
Very few other test-suite performance effects were really significant, but
almost all weakly favor this change. However, in light of the issues
highlighted above, I've left the old behavior available via a
command-line flag.
llvm-svn: 242188
2015-07-15 04:02:02 +08:00
|
|
|
// ...
|
2017-12-07 18:40:31 +08:00
|
|
|
// %6 = COPY %9; VSSRC:%6,%9
|
|
|
|
// %7 = COPY %9; VSSRC:%7,%9
|
2017-11-30 20:12:19 +08:00
|
|
|
// %9<def,tied1> = XSMADDASP %9<tied0>, %1, %4; VSSRC:
|
|
|
|
// %6<def,tied1> = XSMADDASP %6<tied0>, %1, %2; VSSRC:
|
|
|
|
// %7<def,tied1> = XSMADDASP %7<tied0>, %1, %3; VSSRC:
|
[PowerPC] Fix the PPCInstrInfo::getInstrLatency implementation
PowerPC uses itineraries to describe processor pipelines (and dispatch-group
restrictions for P7/P8 cores). Unfortunately, the target-independent
implementation of TII.getInstrLatency calls ItinData->getStageLatency, and that
looks for the largest cycle count in the pipeline for any given instruction.
This, however, yields the wrong answer for the PPC itineraries, because we
don't encode the full pipeline. Because the functional units are fully
pipelined, we only model the initial stages (there are no relevant hazards in
the later stages to model), and so the technique employed by getStageLatency
does not really work. Instead, we should take the maximum output operand
latency, and that's what PPCInstrInfo::getInstrLatency now does.
This caused some test-case churn, including two unfortunate side effects.
First, the new arrangement of copies we get from function parameters now
sometimes blocks VSX FMA mutation (a FIXME has been added to the code and the
test cases), and we have one significant test-suite regression:
SingleSource/Benchmarks/BenchmarkGame/spectral-norm
56.4185% +/- 18.9398%
In this benchmark we have a loop with a vectorized FP divide, and it with the
new scheduling both divides end up in the same dispatch group (which in this
case seems to cause a problem, although why is not exactly clear). The grouping
structure is hard to predict from the bottom of the loop, and there may not be
much we can do to fix this.
Very few other test-suite performance effects were really significant, but
almost all weakly favor this change. However, in light of the issues
highlighted above, I've left the old behavior available via a
command-line flag.
llvm-svn: 242188
2015-07-15 04:02:02 +08:00
|
|
|
// which prevents an otherwise-profitable transformation.
|
2015-02-02 05:51:22 +08:00
|
|
|
bool OtherUsers = false, KillsAddendSrc = false;
|
|
|
|
for (auto J = std::prev(I), JE = MachineBasicBlock::iterator(AddendMI);
|
|
|
|
J != JE; --J) {
|
|
|
|
if (J->readsVirtualRegister(AddendMI->getOperand(0).getReg())) {
|
|
|
|
OtherUsers = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (J->modifiesRegister(AddendSrcReg, TRI) ||
|
|
|
|
J->killsRegister(AddendSrcReg, TRI)) {
|
|
|
|
KillsAddendSrc = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (OtherUsers || KillsAddendSrc)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
Codegen: [PPC] Fix PPCVSXFMAMutate to handle duplicates.
The purpose of PPCVSXFMAMutate is to elide copies by changing FMA forms
on PPC.
%vreg6<def> = COPY %vreg96
%vreg6<def,tied1> = XSMADDASP %vreg6<tied0>, %vreg5<kill>, %vreg7
;v6 = v6 + v5 * v7
is replaced by
%vreg5<def,tied1> = XSMADDMSP %vreg5<tied0>, %vreg7, %vreg96
;v5 = v5 * v7 + v96
This was broken in the case where the target register was also used as a
multiplicand. Fix this case by checking for it and replacing both uses
with the copied register.
%vreg6<def> = COPY %vreg96
%vreg6<def,tied1> = XSMADDASP %vreg6<tied0>, %vreg5<kill>, %vreg6
;v6 = v6 + v5 * v6
is replaced by
%vreg5<def,tied1> = XSMADDMSP %vreg5<tied0>, %vreg96, %vreg96
;v5 = v5 * v96 + v96
llvm-svn: 259617
2016-02-03 09:41:09 +08:00
|
|
|
// The transformation doesn't work well with things like:
|
2017-11-30 20:12:19 +08:00
|
|
|
// %5 = A-form-op %5, %11, %5;
|
|
|
|
// unless %11 is also a kill, so skip when it is not,
|
Codegen: [PPC] Fix PPCVSXFMAMutate to handle duplicates.
The purpose of PPCVSXFMAMutate is to elide copies by changing FMA forms
on PPC.
%vreg6<def> = COPY %vreg96
%vreg6<def,tied1> = XSMADDASP %vreg6<tied0>, %vreg5<kill>, %vreg7
;v6 = v6 + v5 * v7
is replaced by
%vreg5<def,tied1> = XSMADDMSP %vreg5<tied0>, %vreg7, %vreg96
;v5 = v5 * v7 + v96
This was broken in the case where the target register was also used as a
multiplicand. Fix this case by checking for it and replacing both uses
with the copied register.
%vreg6<def> = COPY %vreg96
%vreg6<def,tied1> = XSMADDASP %vreg6<tied0>, %vreg5<kill>, %vreg6
;v6 = v6 + v5 * v6
is replaced by
%vreg5<def,tied1> = XSMADDMSP %vreg5<tied0>, %vreg96, %vreg96
;v5 = v5 * v96 + v96
llvm-svn: 259617
2016-02-03 09:41:09 +08:00
|
|
|
// and check operand 3 to see it is also a kill to handle the case:
|
2017-11-30 20:12:19 +08:00
|
|
|
// %5 = A-form-op %5, %5, %11;
|
|
|
|
// where %5 and %11 are both kills. This case would be skipped
|
Codegen: [PPC] Fix PPCVSXFMAMutate to handle duplicates.
The purpose of PPCVSXFMAMutate is to elide copies by changing FMA forms
on PPC.
%vreg6<def> = COPY %vreg96
%vreg6<def,tied1> = XSMADDASP %vreg6<tied0>, %vreg5<kill>, %vreg7
;v6 = v6 + v5 * v7
is replaced by
%vreg5<def,tied1> = XSMADDMSP %vreg5<tied0>, %vreg7, %vreg96
;v5 = v5 * v7 + v96
This was broken in the case where the target register was also used as a
multiplicand. Fix this case by checking for it and replacing both uses
with the copied register.
%vreg6<def> = COPY %vreg96
%vreg6<def,tied1> = XSMADDASP %vreg6<tied0>, %vreg5<kill>, %vreg6
;v6 = v6 + v5 * v6
is replaced by
%vreg5<def,tied1> = XSMADDMSP %vreg5<tied0>, %vreg96, %vreg96
;v5 = v5 * v96 + v96
llvm-svn: 259617
2016-02-03 09:41:09 +08:00
|
|
|
// otherwise.
|
2016-07-27 21:24:16 +08:00
|
|
|
unsigned OldFMAReg = MI.getOperand(0).getReg();
|
Codegen: [PPC] Fix PPCVSXFMAMutate to handle duplicates.
The purpose of PPCVSXFMAMutate is to elide copies by changing FMA forms
on PPC.
%vreg6<def> = COPY %vreg96
%vreg6<def,tied1> = XSMADDASP %vreg6<tied0>, %vreg5<kill>, %vreg7
;v6 = v6 + v5 * v7
is replaced by
%vreg5<def,tied1> = XSMADDMSP %vreg5<tied0>, %vreg7, %vreg96
;v5 = v5 * v7 + v96
This was broken in the case where the target register was also used as a
multiplicand. Fix this case by checking for it and replacing both uses
with the copied register.
%vreg6<def> = COPY %vreg96
%vreg6<def,tied1> = XSMADDASP %vreg6<tied0>, %vreg5<kill>, %vreg6
;v6 = v6 + v5 * v6
is replaced by
%vreg5<def,tied1> = XSMADDMSP %vreg5<tied0>, %vreg96, %vreg96
;v5 = v5 * v96 + v96
llvm-svn: 259617
2016-02-03 09:41:09 +08:00
|
|
|
|
|
|
|
// Find one of the product operands that is killed by this instruction.
|
2015-02-02 05:51:22 +08:00
|
|
|
unsigned KilledProdOp = 0, OtherProdOp = 0;
|
2016-07-27 21:24:16 +08:00
|
|
|
unsigned Reg2 = MI.getOperand(2).getReg();
|
|
|
|
unsigned Reg3 = MI.getOperand(3).getReg();
|
Codegen: [PPC] Fix PPCVSXFMAMutate to handle duplicates.
The purpose of PPCVSXFMAMutate is to elide copies by changing FMA forms
on PPC.
%vreg6<def> = COPY %vreg96
%vreg6<def,tied1> = XSMADDASP %vreg6<tied0>, %vreg5<kill>, %vreg7
;v6 = v6 + v5 * v7
is replaced by
%vreg5<def,tied1> = XSMADDMSP %vreg5<tied0>, %vreg7, %vreg96
;v5 = v5 * v7 + v96
This was broken in the case where the target register was also used as a
multiplicand. Fix this case by checking for it and replacing both uses
with the copied register.
%vreg6<def> = COPY %vreg96
%vreg6<def,tied1> = XSMADDASP %vreg6<tied0>, %vreg5<kill>, %vreg6
;v6 = v6 + v5 * v6
is replaced by
%vreg5<def,tied1> = XSMADDMSP %vreg5<tied0>, %vreg96, %vreg96
;v5 = v5 * v96 + v96
llvm-svn: 259617
2016-02-03 09:41:09 +08:00
|
|
|
if (LIS->getInterval(Reg2).Query(FMAIdx).isKill()
|
|
|
|
&& Reg2 != OldFMAReg) {
|
2015-02-02 05:51:22 +08:00
|
|
|
KilledProdOp = 2;
|
|
|
|
OtherProdOp = 3;
|
Codegen: [PPC] Fix PPCVSXFMAMutate to handle duplicates.
The purpose of PPCVSXFMAMutate is to elide copies by changing FMA forms
on PPC.
%vreg6<def> = COPY %vreg96
%vreg6<def,tied1> = XSMADDASP %vreg6<tied0>, %vreg5<kill>, %vreg7
;v6 = v6 + v5 * v7
is replaced by
%vreg5<def,tied1> = XSMADDMSP %vreg5<tied0>, %vreg7, %vreg96
;v5 = v5 * v7 + v96
This was broken in the case where the target register was also used as a
multiplicand. Fix this case by checking for it and replacing both uses
with the copied register.
%vreg6<def> = COPY %vreg96
%vreg6<def,tied1> = XSMADDASP %vreg6<tied0>, %vreg5<kill>, %vreg6
;v6 = v6 + v5 * v6
is replaced by
%vreg5<def,tied1> = XSMADDMSP %vreg5<tied0>, %vreg96, %vreg96
;v5 = v5 * v96 + v96
llvm-svn: 259617
2016-02-03 09:41:09 +08:00
|
|
|
} else if (LIS->getInterval(Reg3).Query(FMAIdx).isKill()
|
|
|
|
&& Reg3 != OldFMAReg) {
|
2015-02-02 05:51:22 +08:00
|
|
|
KilledProdOp = 3;
|
|
|
|
OtherProdOp = 2;
|
|
|
|
}
|
|
|
|
|
Codegen: [PPC] Fix PPCVSXFMAMutate to handle duplicates.
The purpose of PPCVSXFMAMutate is to elide copies by changing FMA forms
on PPC.
%vreg6<def> = COPY %vreg96
%vreg6<def,tied1> = XSMADDASP %vreg6<tied0>, %vreg5<kill>, %vreg7
;v6 = v6 + v5 * v7
is replaced by
%vreg5<def,tied1> = XSMADDMSP %vreg5<tied0>, %vreg7, %vreg96
;v5 = v5 * v7 + v96
This was broken in the case where the target register was also used as a
multiplicand. Fix this case by checking for it and replacing both uses
with the copied register.
%vreg6<def> = COPY %vreg96
%vreg6<def,tied1> = XSMADDASP %vreg6<tied0>, %vreg5<kill>, %vreg6
;v6 = v6 + v5 * v6
is replaced by
%vreg5<def,tied1> = XSMADDMSP %vreg5<tied0>, %vreg96, %vreg96
;v5 = v5 * v96 + v96
llvm-svn: 259617
2016-02-03 09:41:09 +08:00
|
|
|
// If there are no usable killed product operands, then this
|
|
|
|
// transformation is likely not profitable.
|
2015-02-02 05:51:22 +08:00
|
|
|
if (!KilledProdOp)
|
|
|
|
continue;
|
|
|
|
|
2015-09-22 19:15:07 +08:00
|
|
|
// If the addend copy is used only by this MI, then the addend source
|
|
|
|
// register is likely not live here. This could be fixed (based on the
|
|
|
|
// legality checks above, the live range for the addend source register
|
|
|
|
// could be extended), but it seems likely that such a trivial copy can
|
|
|
|
// be coalesced away later, and thus is not worth the effort.
|
|
|
|
if (TargetRegisterInfo::isVirtualRegister(AddendSrcReg) &&
|
2015-08-25 07:48:28 +08:00
|
|
|
!LIS->getInterval(AddendSrcReg).liveAt(FMAIdx))
|
|
|
|
continue;
|
2015-02-02 05:51:22 +08:00
|
|
|
|
|
|
|
// Transform: (O2 * O3) + O1 -> (O2 * O1) + O3.
|
|
|
|
|
2016-07-27 21:24:16 +08:00
|
|
|
unsigned KilledProdReg = MI.getOperand(KilledProdOp).getReg();
|
|
|
|
unsigned OtherProdReg = MI.getOperand(OtherProdOp).getReg();
|
2015-02-02 05:51:22 +08:00
|
|
|
|
|
|
|
unsigned AddSubReg = AddendMI->getOperand(1).getSubReg();
|
2016-07-27 21:24:16 +08:00
|
|
|
unsigned KilledProdSubReg = MI.getOperand(KilledProdOp).getSubReg();
|
|
|
|
unsigned OtherProdSubReg = MI.getOperand(OtherProdOp).getSubReg();
|
2015-02-02 05:51:22 +08:00
|
|
|
|
|
|
|
bool AddRegKill = AddendMI->getOperand(1).isKill();
|
2016-07-27 21:24:16 +08:00
|
|
|
bool KilledProdRegKill = MI.getOperand(KilledProdOp).isKill();
|
|
|
|
bool OtherProdRegKill = MI.getOperand(OtherProdOp).isKill();
|
2015-02-02 05:51:22 +08:00
|
|
|
|
|
|
|
bool AddRegUndef = AddendMI->getOperand(1).isUndef();
|
2016-07-27 21:24:16 +08:00
|
|
|
bool KilledProdRegUndef = MI.getOperand(KilledProdOp).isUndef();
|
|
|
|
bool OtherProdRegUndef = MI.getOperand(OtherProdOp).isUndef();
|
2015-02-02 05:51:22 +08:00
|
|
|
|
2015-12-11 05:28:40 +08:00
|
|
|
// If there isn't a class that fits, we can't perform the transform.
|
|
|
|
// This is needed for correctness with a mixture of VSX and Altivec
|
|
|
|
// instructions to make sure that a low VSX register is not assigned to
|
|
|
|
// the Altivec instruction.
|
|
|
|
if (!MRI.constrainRegClass(KilledProdReg,
|
|
|
|
MRI.getRegClass(OldFMAReg)))
|
|
|
|
continue;
|
|
|
|
|
2015-02-02 05:51:22 +08:00
|
|
|
assert(OldFMAReg == AddendMI->getOperand(0).getReg() &&
|
|
|
|
"Addend copy not tied to old FMA output!");
|
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "VSX FMA Mutation:\n " << MI);
|
2015-02-02 05:51:22 +08:00
|
|
|
|
2016-07-27 21:24:16 +08:00
|
|
|
MI.getOperand(0).setReg(KilledProdReg);
|
|
|
|
MI.getOperand(1).setReg(KilledProdReg);
|
|
|
|
MI.getOperand(3).setReg(AddendSrcReg);
|
2015-02-02 05:51:22 +08:00
|
|
|
|
2016-07-27 21:24:16 +08:00
|
|
|
MI.getOperand(0).setSubReg(KilledProdSubReg);
|
|
|
|
MI.getOperand(1).setSubReg(KilledProdSubReg);
|
|
|
|
MI.getOperand(3).setSubReg(AddSubReg);
|
2015-02-02 05:51:22 +08:00
|
|
|
|
2016-07-27 21:24:16 +08:00
|
|
|
MI.getOperand(1).setIsKill(KilledProdRegKill);
|
|
|
|
MI.getOperand(3).setIsKill(AddRegKill);
|
2015-02-02 05:51:22 +08:00
|
|
|
|
2016-07-27 21:24:16 +08:00
|
|
|
MI.getOperand(1).setIsUndef(KilledProdRegUndef);
|
|
|
|
MI.getOperand(3).setIsUndef(AddRegUndef);
|
2015-02-02 05:51:22 +08:00
|
|
|
|
2016-07-27 21:24:16 +08:00
|
|
|
MI.setDesc(TII->get(AltOpc));
|
2015-02-02 05:51:22 +08:00
|
|
|
|
Codegen: [PPC] Fix PPCVSXFMAMutate to handle duplicates.
The purpose of PPCVSXFMAMutate is to elide copies by changing FMA forms
on PPC.
%vreg6<def> = COPY %vreg96
%vreg6<def,tied1> = XSMADDASP %vreg6<tied0>, %vreg5<kill>, %vreg7
;v6 = v6 + v5 * v7
is replaced by
%vreg5<def,tied1> = XSMADDMSP %vreg5<tied0>, %vreg7, %vreg96
;v5 = v5 * v7 + v96
This was broken in the case where the target register was also used as a
multiplicand. Fix this case by checking for it and replacing both uses
with the copied register.
%vreg6<def> = COPY %vreg96
%vreg6<def,tied1> = XSMADDASP %vreg6<tied0>, %vreg5<kill>, %vreg6
;v6 = v6 + v5 * v6
is replaced by
%vreg5<def,tied1> = XSMADDMSP %vreg5<tied0>, %vreg96, %vreg96
;v5 = v5 * v96 + v96
llvm-svn: 259617
2016-02-03 09:41:09 +08:00
|
|
|
// If the addend is also a multiplicand, replace it with the addend
|
|
|
|
// source in both places.
|
|
|
|
if (OtherProdReg == AddendMI->getOperand(0).getReg()) {
|
2016-07-27 21:24:16 +08:00
|
|
|
MI.getOperand(2).setReg(AddendSrcReg);
|
|
|
|
MI.getOperand(2).setSubReg(AddSubReg);
|
|
|
|
MI.getOperand(2).setIsKill(AddRegKill);
|
|
|
|
MI.getOperand(2).setIsUndef(AddRegUndef);
|
Codegen: [PPC] Fix PPCVSXFMAMutate to handle duplicates.
The purpose of PPCVSXFMAMutate is to elide copies by changing FMA forms
on PPC.
%vreg6<def> = COPY %vreg96
%vreg6<def,tied1> = XSMADDASP %vreg6<tied0>, %vreg5<kill>, %vreg7
;v6 = v6 + v5 * v7
is replaced by
%vreg5<def,tied1> = XSMADDMSP %vreg5<tied0>, %vreg7, %vreg96
;v5 = v5 * v7 + v96
This was broken in the case where the target register was also used as a
multiplicand. Fix this case by checking for it and replacing both uses
with the copied register.
%vreg6<def> = COPY %vreg96
%vreg6<def,tied1> = XSMADDASP %vreg6<tied0>, %vreg5<kill>, %vreg6
;v6 = v6 + v5 * v6
is replaced by
%vreg5<def,tied1> = XSMADDMSP %vreg5<tied0>, %vreg96, %vreg96
;v5 = v5 * v96 + v96
llvm-svn: 259617
2016-02-03 09:41:09 +08:00
|
|
|
} else {
|
2016-07-27 21:24:16 +08:00
|
|
|
MI.getOperand(2).setReg(OtherProdReg);
|
|
|
|
MI.getOperand(2).setSubReg(OtherProdSubReg);
|
|
|
|
MI.getOperand(2).setIsKill(OtherProdRegKill);
|
|
|
|
MI.getOperand(2).setIsUndef(OtherProdRegUndef);
|
Codegen: [PPC] Fix PPCVSXFMAMutate to handle duplicates.
The purpose of PPCVSXFMAMutate is to elide copies by changing FMA forms
on PPC.
%vreg6<def> = COPY %vreg96
%vreg6<def,tied1> = XSMADDASP %vreg6<tied0>, %vreg5<kill>, %vreg7
;v6 = v6 + v5 * v7
is replaced by
%vreg5<def,tied1> = XSMADDMSP %vreg5<tied0>, %vreg7, %vreg96
;v5 = v5 * v7 + v96
This was broken in the case where the target register was also used as a
multiplicand. Fix this case by checking for it and replacing both uses
with the copied register.
%vreg6<def> = COPY %vreg96
%vreg6<def,tied1> = XSMADDASP %vreg6<tied0>, %vreg5<kill>, %vreg6
;v6 = v6 + v5 * v6
is replaced by
%vreg5<def,tied1> = XSMADDMSP %vreg5<tied0>, %vreg96, %vreg96
;v5 = v5 * v96 + v96
llvm-svn: 259617
2016-02-03 09:41:09 +08:00
|
|
|
}
|
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << " -> " << MI);
|
2015-02-02 05:51:22 +08:00
|
|
|
|
|
|
|
// The killed product operand was killed here, so we can reuse it now
|
|
|
|
// for the result of the fma.
|
|
|
|
|
|
|
|
LiveInterval &FMAInt = LIS->getInterval(OldFMAReg);
|
|
|
|
VNInfo *FMAValNo = FMAInt.getVNInfoAt(FMAIdx.getRegSlot());
|
|
|
|
for (auto UI = MRI.reg_nodbg_begin(OldFMAReg), UE = MRI.reg_nodbg_end();
|
|
|
|
UI != UE;) {
|
|
|
|
MachineOperand &UseMO = *UI;
|
|
|
|
MachineInstr *UseMI = UseMO.getParent();
|
|
|
|
++UI;
|
|
|
|
|
|
|
|
// Don't replace the result register of the copy we're about to erase.
|
|
|
|
if (UseMI == AddendMI)
|
|
|
|
continue;
|
|
|
|
|
2015-12-11 05:28:40 +08:00
|
|
|
UseMO.substVirtReg(KilledProdReg, KilledProdSubReg, *TRI);
|
2015-02-02 05:51:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Extend the live intervals of the killed product operand to hold the
|
|
|
|
// fma result.
|
|
|
|
|
|
|
|
LiveInterval &NewFMAInt = LIS->getInterval(KilledProdReg);
|
|
|
|
for (LiveInterval::iterator AI = FMAInt.begin(), AE = FMAInt.end();
|
|
|
|
AI != AE; ++AI) {
|
|
|
|
// Don't add the segment that corresponds to the original copy.
|
|
|
|
if (AI->valno == AddendValNo)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
VNInfo *NewFMAValNo =
|
|
|
|
NewFMAInt.getNextValue(AI->start,
|
|
|
|
LIS->getVNInfoAllocator());
|
|
|
|
|
|
|
|
NewFMAInt.addSegment(LiveInterval::Segment(AI->start, AI->end,
|
|
|
|
NewFMAValNo));
|
|
|
|
}
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << " extended: " << NewFMAInt << '\n');
|
2015-02-02 05:51:22 +08:00
|
|
|
|
2015-07-15 16:23:03 +08:00
|
|
|
// Extend the live interval of the addend source (it might end at the
|
|
|
|
// copy to be removed, or somewhere in between there and here). This
|
|
|
|
// is necessary only if it is a physical register.
|
|
|
|
if (!TargetRegisterInfo::isVirtualRegister(AddendSrcReg))
|
|
|
|
for (MCRegUnitIterator Units(AddendSrcReg, TRI); Units.isValid();
|
|
|
|
++Units) {
|
|
|
|
unsigned Unit = *Units;
|
|
|
|
|
|
|
|
LiveRange &AddendSrcRange = LIS->getRegUnit(Unit);
|
|
|
|
AddendSrcRange.extendInBlock(LIS->getMBBStartIdx(&MBB),
|
|
|
|
FMAIdx.getRegSlot());
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << " extended: " << AddendSrcRange << '\n');
|
2015-07-15 16:23:03 +08:00
|
|
|
}
|
|
|
|
|
2015-02-02 05:51:22 +08:00
|
|
|
FMAInt.removeValNo(FMAValNo);
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << " trimmed: " << FMAInt << '\n');
|
2015-02-02 05:51:22 +08:00
|
|
|
|
|
|
|
// Remove the (now unused) copy.
|
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << " removing: " << *AddendMI << '\n');
|
2016-02-27 14:40:41 +08:00
|
|
|
LIS->RemoveMachineInstrFromMaps(*AddendMI);
|
2015-02-02 05:51:22 +08:00
|
|
|
AddendMI->eraseFromParent();
|
|
|
|
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
|
|
|
bool runOnMachineFunction(MachineFunction &MF) override {
|
2017-12-16 06:22:58 +08:00
|
|
|
if (skipFunction(MF.getFunction()))
|
2016-04-28 03:39:32 +08:00
|
|
|
return false;
|
|
|
|
|
2015-02-02 05:51:22 +08:00
|
|
|
// If we don't have VSX then go ahead and return without doing
|
|
|
|
// anything.
|
|
|
|
const PPCSubtarget &STI = MF.getSubtarget<PPCSubtarget>();
|
|
|
|
if (!STI.hasVSX())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
LIS = &getAnalysis<LiveIntervals>();
|
|
|
|
|
|
|
|
TII = STI.getInstrInfo();
|
|
|
|
|
|
|
|
bool Changed = false;
|
|
|
|
|
|
|
|
if (DisableVSXFMAMutate)
|
|
|
|
return Changed;
|
|
|
|
|
|
|
|
for (MachineFunction::iterator I = MF.begin(); I != MF.end();) {
|
|
|
|
MachineBasicBlock &B = *I++;
|
|
|
|
if (processBlock(B))
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
|
|
AU.addRequired<LiveIntervals>();
|
|
|
|
AU.addPreserved<LiveIntervals>();
|
|
|
|
AU.addRequired<SlotIndexes>();
|
|
|
|
AU.addPreserved<SlotIndexes>();
|
2016-11-30 21:31:09 +08:00
|
|
|
AU.addRequired<MachineDominatorTree>();
|
|
|
|
AU.addPreserved<MachineDominatorTree>();
|
2015-02-02 05:51:22 +08:00
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
}
|
|
|
|
};
|
2015-06-23 17:49:53 +08:00
|
|
|
}
|
2015-02-02 05:51:22 +08:00
|
|
|
|
|
|
|
INITIALIZE_PASS_BEGIN(PPCVSXFMAMutate, DEBUG_TYPE,
|
|
|
|
"PowerPC VSX FMA Mutation", false, false)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
|
2016-11-30 21:31:09 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
|
2015-02-02 05:51:22 +08:00
|
|
|
INITIALIZE_PASS_END(PPCVSXFMAMutate, DEBUG_TYPE,
|
|
|
|
"PowerPC VSX FMA Mutation", false, false)
|
|
|
|
|
|
|
|
char &llvm::PPCVSXFMAMutateID = PPCVSXFMAMutate::ID;
|
|
|
|
|
|
|
|
char PPCVSXFMAMutate::ID = 0;
|
2015-09-22 19:13:55 +08:00
|
|
|
FunctionPass *llvm::createPPCVSXFMAMutatePass() {
|
|
|
|
return new PPCVSXFMAMutate();
|
|
|
|
}
|