2017-08-30 06:32:07 +08:00
|
|
|
//===- MachineSink.cpp - Sinking for machine instructions -----------------===//
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2010-06-03 07:04:26 +08:00
|
|
|
// This pass moves instructions into successor blocks when possible, so that
|
2009-08-05 09:19:01 +08:00
|
|
|
// they aren't executed on paths where their results aren't needed.
|
|
|
|
//
|
|
|
|
// This pass is not intended to be a replacement or a complete alternative
|
|
|
|
// for an LLVM-IR-level sinking pass. It is only designed to sink simple
|
|
|
|
// constructs that are not exposed before lowering and instruction selection.
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
[MachineSink] Improve the compile time by preserving the dominance information
as long as possible.
** Context **
Each time the dominance information is modified, the dominator tree analysis
switches in a slow query mode. After a few queries without any modification on
the dominator tree, it performs an expensive update of its internal structure to
provide fast queries again.
** Problem **
Prior to this patch, the MachineSink pass was splitting the critical edges on
demand while relying heavy on the dominator tree information. In some cases,
this leads to pathological behavior where:
- We end up in the slow query mode right after splitting an edge.
- We update the dominance information.
- We break the dominance information again, thus ending up in the slow query
mode and so on.
** Proposed Solution **
To mitigate this effect, this patch postpones all the splitting of the edges at
the end of each iteration of the main loop.
The benefits are:
- The dominance information is valid for the life time of an iteration.
- This simplifies the code as we do not have to special treat instructions that
are sunk on critical edges. Indeed, the related block will be available
through the next iteration.
The downside is that when edges splitting is required, this incurs an additional
iteration of the main loop compared to the previous scheme.
** Performance **
Thanks to this patch, the motivating example compiles in 6+ minutes instead of
10+ minutes. No test case added as the motivating example as nothing special but
being huge!
I have measured only noise for both the compile time and the runtime on the llvm
test-suite + SPECs with Os and O3.
Note: The current implementation of MachineBasicBlock::SplitCriticalEdge also
uses the dominance information and therefore, hits this problem. A subsequent
patch will address that.
<rdar://problem/17894619>
llvm-svn: 215410
2014-08-12 07:52:01 +08:00
|
|
|
#include "llvm/ADT/SetVector.h"
|
2010-09-18 06:28:18 +08:00
|
|
|
#include "llvm/ADT/SmallSet.h"
|
2017-08-30 06:32:07 +08:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2015-05-16 11:11:07 +08:00
|
|
|
#include "llvm/ADT/SparseBitVector.h"
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
2016-08-25 08:45:04 +08:00
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
2014-09-26 07:14:26 +08:00
|
|
|
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
|
2016-10-21 02:06:52 +08:00
|
|
|
#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/CodeGen/MachineDominators.h"
|
2016-08-25 08:45:04 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/CodeGen/MachineLoopInfo.h"
|
2016-08-25 08:45:04 +08:00
|
|
|
#include "llvm/CodeGen/MachineOperand.h"
|
[MachineSink] Use the real post dominator tree
Summary:
Fixes a FIXME in MachineSinking. Instead of using the simple heuristics in
isPostDominatedBy, use the real MachinePostDominatorTree and MachineLoopInfo.
The old heuristics caused instructions to sink unnecessarily, and might create
register pressure.
This is the second try of the fix. The first one (D4814) caused a performance
regression due to failing to sink instructions out of loops (PR21115). This
patch fixes PR21115 by sinking an instruction from a deeper loop to a shallower
one regardless of whether the target block post-dominates the source.
Thanks Alexey Volkov for reporting PR21115!
Test Plan:
Added a NVPTX codegen test to verify that our change prevents the backend from
over-sinking. It also shows the unnecessary register pressure caused by
over-sinking.
Added an X86 test to verify we can sink instructions out of loops regardless of
the dominance relationship. This test is reduced from Alexey's test in PR21115.
Updated an affected test in X86.
Also ran SPEC CINT2006 and llvm-test-suite for compilation time and runtime
performance. Results are attached separately in the review thread.
Reviewers: Jiangning, resistor, hfinkel
Reviewed By: hfinkel
Subscribers: hfinkel, bruno, volkalexey, llvm-commits, meheff, eliben, jholewinski
Differential Revision: http://reviews.llvm.org/D5633
llvm-svn: 219773
2014-10-15 11:27:43 +08:00
|
|
|
#include "llvm/CodeGen/MachinePostDominators.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2017-11-08 09:01:31 +08:00
|
|
|
#include "llvm/CodeGen/TargetInstrInfo.h"
|
2017-11-17 09:07:10 +08:00
|
|
|
#include "llvm/CodeGen/TargetRegisterInfo.h"
|
|
|
|
#include "llvm/CodeGen/TargetSubtargetInfo.h"
|
2017-08-30 06:32:07 +08:00
|
|
|
#include "llvm/IR/BasicBlock.h"
|
2016-01-20 08:06:14 +08:00
|
|
|
#include "llvm/IR/LLVMContext.h"
|
2017-12-09 08:17:01 +08:00
|
|
|
#include "llvm/IR/DebugInfoMetadata.h"
|
2017-08-30 06:32:07 +08:00
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/BranchProbability.h"
|
2010-08-20 01:33:11 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2009-08-23 04:26:23 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2016-08-25 08:45:04 +08:00
|
|
|
#include <algorithm>
|
|
|
|
#include <cassert>
|
|
|
|
#include <cstdint>
|
|
|
|
#include <map>
|
|
|
|
#include <utility>
|
|
|
|
#include <vector>
|
|
|
|
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2014-04-22 10:02:50 +08:00
|
|
|
#define DEBUG_TYPE "machine-sink"
|
|
|
|
|
2012-02-09 05:22:43 +08:00
|
|
|
static cl::opt<bool>
|
2010-08-20 01:33:11 +08:00
|
|
|
SplitEdges("machine-sink-split",
|
|
|
|
cl::desc("Split critical edges during machine sinking"),
|
Enable machine sinking critical edge splitting. e.g.
define double @foo(double %x, double %y, i1 %c) nounwind {
%a = fdiv double %x, 3.2
%z = select i1 %c, double %a, double %y
ret double %z
}
Was:
_foo:
divsd LCPI0_0(%rip), %xmm0
testb $1, %dil
jne LBB0_2
movaps %xmm1, %xmm0
LBB0_2:
ret
Now:
_foo:
testb $1, %dil
je LBB0_2
divsd LCPI0_0(%rip), %xmm0
ret
LBB0_2:
movaps %xmm1, %xmm0
ret
This avoids the divsd when early exit is taken.
rdar://8454886
llvm-svn: 114372
2010-09-21 06:52:00 +08:00
|
|
|
cl::init(true), cl::Hidden);
|
2010-08-20 01:33:11 +08:00
|
|
|
|
2014-09-26 07:14:26 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
UseBlockFreqInfo("machine-sink-bfi",
|
|
|
|
cl::desc("Use block frequency info to find successors to sink"),
|
|
|
|
cl::init(true), cl::Hidden);
|
|
|
|
|
2016-10-21 02:06:52 +08:00
|
|
|
static cl::opt<unsigned> SplitEdgeProbabilityThreshold(
|
|
|
|
"machine-sink-split-probability-threshold",
|
|
|
|
cl::desc(
|
|
|
|
"Percentage threshold for splitting single-instruction critical edge. "
|
|
|
|
"If the branch threshold is higher than this threshold, we allow "
|
|
|
|
"speculative execution of up to 1 instruction to avoid branching to "
|
|
|
|
"splitted critical edge"),
|
|
|
|
cl::init(40), cl::Hidden);
|
|
|
|
|
2010-09-18 06:28:18 +08:00
|
|
|
STATISTIC(NumSunk, "Number of machine instructions sunk");
|
|
|
|
STATISTIC(NumSplit, "Number of critical edges split");
|
|
|
|
STATISTIC(NumCoalesces, "Number of copies coalesced");
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-23 04:06:47 +08:00
|
|
|
STATISTIC(NumPostRACopySink, "Number of copies sunk after RA");
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
|
|
|
|
namespace {
|
2016-08-25 08:45:04 +08:00
|
|
|
|
2009-10-25 14:33:48 +08:00
|
|
|
class MachineSinking : public MachineFunctionPass {
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
const TargetInstrInfo *TII;
|
2009-09-26 06:53:29 +08:00
|
|
|
const TargetRegisterInfo *TRI;
|
[MachineSink] Use the real post dominator tree
Summary:
Fixes a FIXME in MachineSinking. Instead of using the simple heuristics in
isPostDominatedBy, use the real MachinePostDominatorTree and MachineLoopInfo.
The old heuristics caused instructions to sink unnecessarily, and might create
register pressure.
This is the second try of the fix. The first one (D4814) caused a performance
regression due to failing to sink instructions out of loops (PR21115). This
patch fixes PR21115 by sinking an instruction from a deeper loop to a shallower
one regardless of whether the target block post-dominates the source.
Thanks Alexey Volkov for reporting PR21115!
Test Plan:
Added a NVPTX codegen test to verify that our change prevents the backend from
over-sinking. It also shows the unnecessary register pressure caused by
over-sinking.
Added an X86 test to verify we can sink instructions out of loops regardless of
the dominance relationship. This test is reduced from Alexey's test in PR21115.
Updated an affected test in X86.
Also ran SPEC CINT2006 and llvm-test-suite for compilation time and runtime
performance. Results are attached separately in the review thread.
Reviewers: Jiangning, resistor, hfinkel
Reviewed By: hfinkel
Subscribers: hfinkel, bruno, volkalexey, llvm-commits, meheff, eliben, jholewinski
Differential Revision: http://reviews.llvm.org/D5633
llvm-svn: 219773
2014-10-15 11:27:43 +08:00
|
|
|
MachineRegisterInfo *MRI; // Machine register information
|
|
|
|
MachineDominatorTree *DT; // Machine dominator tree
|
|
|
|
MachinePostDominatorTree *PDT; // Machine post dominator tree
|
2010-04-16 07:41:02 +08:00
|
|
|
MachineLoopInfo *LI;
|
2014-09-26 07:14:26 +08:00
|
|
|
const MachineBlockFrequencyInfo *MBFI;
|
2016-10-21 02:06:52 +08:00
|
|
|
const MachineBranchProbabilityInfo *MBPI;
|
2009-10-10 07:27:56 +08:00
|
|
|
AliasAnalysis *AA;
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
|
2010-09-18 06:28:18 +08:00
|
|
|
// Remember which edges have been considered for breaking.
|
2016-08-25 08:45:04 +08:00
|
|
|
SmallSet<std::pair<MachineBasicBlock*, MachineBasicBlock*>, 8>
|
2010-09-18 06:28:18 +08:00
|
|
|
CEBCandidates;
|
[MachineSink] Improve the compile time by preserving the dominance information
as long as possible.
** Context **
Each time the dominance information is modified, the dominator tree analysis
switches in a slow query mode. After a few queries without any modification on
the dominator tree, it performs an expensive update of its internal structure to
provide fast queries again.
** Problem **
Prior to this patch, the MachineSink pass was splitting the critical edges on
demand while relying heavy on the dominator tree information. In some cases,
this leads to pathological behavior where:
- We end up in the slow query mode right after splitting an edge.
- We update the dominance information.
- We break the dominance information again, thus ending up in the slow query
mode and so on.
** Proposed Solution **
To mitigate this effect, this patch postpones all the splitting of the edges at
the end of each iteration of the main loop.
The benefits are:
- The dominance information is valid for the life time of an iteration.
- This simplifies the code as we do not have to special treat instructions that
are sunk on critical edges. Indeed, the related block will be available
through the next iteration.
The downside is that when edges splitting is required, this incurs an additional
iteration of the main loop compared to the previous scheme.
** Performance **
Thanks to this patch, the motivating example compiles in 6+ minutes instead of
10+ minutes. No test case added as the motivating example as nothing special but
being huge!
I have measured only noise for both the compile time and the runtime on the llvm
test-suite + SPECs with Os and O3.
Note: The current implementation of MachineBasicBlock::SplitCriticalEdge also
uses the dominance information and therefore, hits this problem. A subsequent
patch will address that.
<rdar://problem/17894619>
llvm-svn: 215410
2014-08-12 07:52:01 +08:00
|
|
|
// Remember which edges we are about to split.
|
|
|
|
// This is different from CEBCandidates since those edges
|
|
|
|
// will be split.
|
2017-08-30 06:32:07 +08:00
|
|
|
SetVector<std::pair<MachineBasicBlock *, MachineBasicBlock *>> ToSplit;
|
2010-09-18 06:28:18 +08:00
|
|
|
|
2015-05-16 11:11:07 +08:00
|
|
|
SparseBitVector<> RegsToClearKillFlags;
|
|
|
|
|
2017-08-30 06:32:07 +08:00
|
|
|
using AllSuccsCache =
|
|
|
|
std::map<MachineBasicBlock *, SmallVector<MachineBasicBlock *, 4>>;
|
2015-06-15 17:09:06 +08:00
|
|
|
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
public:
|
|
|
|
static char ID; // Pass identification
|
2016-08-25 08:45:04 +08:00
|
|
|
|
2010-10-20 01:21:58 +08:00
|
|
|
MachineSinking() : MachineFunctionPass(ID) {
|
|
|
|
initializeMachineSinkingPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
2010-06-04 07:49:57 +08:00
|
|
|
|
2014-03-07 17:26:03 +08:00
|
|
|
bool runOnMachineFunction(MachineFunction &MF) override;
|
2010-06-04 07:49:57 +08:00
|
|
|
|
2014-03-07 17:26:03 +08:00
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
2009-08-01 07:37:33 +08:00
|
|
|
AU.setPreservesCFG();
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
AU.addRequired<AAResultsWrapperPass>();
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
AU.addRequired<MachineDominatorTree>();
|
[MachineSink] Use the real post dominator tree
Summary:
Fixes a FIXME in MachineSinking. Instead of using the simple heuristics in
isPostDominatedBy, use the real MachinePostDominatorTree and MachineLoopInfo.
The old heuristics caused instructions to sink unnecessarily, and might create
register pressure.
This is the second try of the fix. The first one (D4814) caused a performance
regression due to failing to sink instructions out of loops (PR21115). This
patch fixes PR21115 by sinking an instruction from a deeper loop to a shallower
one regardless of whether the target block post-dominates the source.
Thanks Alexey Volkov for reporting PR21115!
Test Plan:
Added a NVPTX codegen test to verify that our change prevents the backend from
over-sinking. It also shows the unnecessary register pressure caused by
over-sinking.
Added an X86 test to verify we can sink instructions out of loops regardless of
the dominance relationship. This test is reduced from Alexey's test in PR21115.
Updated an affected test in X86.
Also ran SPEC CINT2006 and llvm-test-suite for compilation time and runtime
performance. Results are attached separately in the review thread.
Reviewers: Jiangning, resistor, hfinkel
Reviewed By: hfinkel
Subscribers: hfinkel, bruno, volkalexey, llvm-commits, meheff, eliben, jholewinski
Differential Revision: http://reviews.llvm.org/D5633
llvm-svn: 219773
2014-10-15 11:27:43 +08:00
|
|
|
AU.addRequired<MachinePostDominatorTree>();
|
2010-04-16 07:41:02 +08:00
|
|
|
AU.addRequired<MachineLoopInfo>();
|
2016-10-21 02:06:52 +08:00
|
|
|
AU.addRequired<MachineBranchProbabilityInfo>();
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
AU.addPreserved<MachineDominatorTree>();
|
[MachineSink] Use the real post dominator tree
Summary:
Fixes a FIXME in MachineSinking. Instead of using the simple heuristics in
isPostDominatedBy, use the real MachinePostDominatorTree and MachineLoopInfo.
The old heuristics caused instructions to sink unnecessarily, and might create
register pressure.
This is the second try of the fix. The first one (D4814) caused a performance
regression due to failing to sink instructions out of loops (PR21115). This
patch fixes PR21115 by sinking an instruction from a deeper loop to a shallower
one regardless of whether the target block post-dominates the source.
Thanks Alexey Volkov for reporting PR21115!
Test Plan:
Added a NVPTX codegen test to verify that our change prevents the backend from
over-sinking. It also shows the unnecessary register pressure caused by
over-sinking.
Added an X86 test to verify we can sink instructions out of loops regardless of
the dominance relationship. This test is reduced from Alexey's test in PR21115.
Updated an affected test in X86.
Also ran SPEC CINT2006 and llvm-test-suite for compilation time and runtime
performance. Results are attached separately in the review thread.
Reviewers: Jiangning, resistor, hfinkel
Reviewed By: hfinkel
Subscribers: hfinkel, bruno, volkalexey, llvm-commits, meheff, eliben, jholewinski
Differential Revision: http://reviews.llvm.org/D5633
llvm-svn: 219773
2014-10-15 11:27:43 +08:00
|
|
|
AU.addPreserved<MachinePostDominatorTree>();
|
2010-04-16 07:41:02 +08:00
|
|
|
AU.addPreserved<MachineLoopInfo>();
|
2014-09-26 07:14:26 +08:00
|
|
|
if (UseBlockFreqInfo)
|
|
|
|
AU.addRequired<MachineBlockFrequencyInfo>();
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
}
|
2010-09-18 06:28:18 +08:00
|
|
|
|
2014-03-07 17:26:03 +08:00
|
|
|
void releaseMemory() override {
|
2010-09-18 06:28:18 +08:00
|
|
|
CEBCandidates.clear();
|
|
|
|
}
|
|
|
|
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
private:
|
|
|
|
bool ProcessBlock(MachineBasicBlock &MBB);
|
2016-07-01 08:11:48 +08:00
|
|
|
bool isWorthBreakingCriticalEdge(MachineInstr &MI,
|
2010-09-18 06:28:18 +08:00
|
|
|
MachineBasicBlock *From,
|
|
|
|
MachineBasicBlock *To);
|
2017-08-30 06:32:07 +08:00
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Postpone the splitting of the given critical
|
[MachineSink] Improve the compile time by preserving the dominance information
as long as possible.
** Context **
Each time the dominance information is modified, the dominator tree analysis
switches in a slow query mode. After a few queries without any modification on
the dominator tree, it performs an expensive update of its internal structure to
provide fast queries again.
** Problem **
Prior to this patch, the MachineSink pass was splitting the critical edges on
demand while relying heavy on the dominator tree information. In some cases,
this leads to pathological behavior where:
- We end up in the slow query mode right after splitting an edge.
- We update the dominance information.
- We break the dominance information again, thus ending up in the slow query
mode and so on.
** Proposed Solution **
To mitigate this effect, this patch postpones all the splitting of the edges at
the end of each iteration of the main loop.
The benefits are:
- The dominance information is valid for the life time of an iteration.
- This simplifies the code as we do not have to special treat instructions that
are sunk on critical edges. Indeed, the related block will be available
through the next iteration.
The downside is that when edges splitting is required, this incurs an additional
iteration of the main loop compared to the previous scheme.
** Performance **
Thanks to this patch, the motivating example compiles in 6+ minutes instead of
10+ minutes. No test case added as the motivating example as nothing special but
being huge!
I have measured only noise for both the compile time and the runtime on the llvm
test-suite + SPECs with Os and O3.
Note: The current implementation of MachineBasicBlock::SplitCriticalEdge also
uses the dominance information and therefore, hits this problem. A subsequent
patch will address that.
<rdar://problem/17894619>
llvm-svn: 215410
2014-08-12 07:52:01 +08:00
|
|
|
/// edge (\p From, \p To).
|
|
|
|
///
|
|
|
|
/// We do not split the edges on the fly. Indeed, this invalidates
|
|
|
|
/// the dominance information and thus triggers a lot of updates
|
|
|
|
/// of that information underneath.
|
|
|
|
/// Instead, we postpone all the splits after each iteration of
|
|
|
|
/// the main loop. That way, the information is at least valid
|
|
|
|
/// for the lifetime of an iteration.
|
|
|
|
///
|
|
|
|
/// \return True if the edge is marked as toSplit, false otherwise.
|
2014-12-04 18:36:42 +08:00
|
|
|
/// False can be returned if, for instance, this is not profitable.
|
2016-07-01 08:11:48 +08:00
|
|
|
bool PostponeSplitCriticalEdge(MachineInstr &MI,
|
[MachineSink] Improve the compile time by preserving the dominance information
as long as possible.
** Context **
Each time the dominance information is modified, the dominator tree analysis
switches in a slow query mode. After a few queries without any modification on
the dominator tree, it performs an expensive update of its internal structure to
provide fast queries again.
** Problem **
Prior to this patch, the MachineSink pass was splitting the critical edges on
demand while relying heavy on the dominator tree information. In some cases,
this leads to pathological behavior where:
- We end up in the slow query mode right after splitting an edge.
- We update the dominance information.
- We break the dominance information again, thus ending up in the slow query
mode and so on.
** Proposed Solution **
To mitigate this effect, this patch postpones all the splitting of the edges at
the end of each iteration of the main loop.
The benefits are:
- The dominance information is valid for the life time of an iteration.
- This simplifies the code as we do not have to special treat instructions that
are sunk on critical edges. Indeed, the related block will be available
through the next iteration.
The downside is that when edges splitting is required, this incurs an additional
iteration of the main loop compared to the previous scheme.
** Performance **
Thanks to this patch, the motivating example compiles in 6+ minutes instead of
10+ minutes. No test case added as the motivating example as nothing special but
being huge!
I have measured only noise for both the compile time and the runtime on the llvm
test-suite + SPECs with Os and O3.
Note: The current implementation of MachineBasicBlock::SplitCriticalEdge also
uses the dominance information and therefore, hits this problem. A subsequent
patch will address that.
<rdar://problem/17894619>
llvm-svn: 215410
2014-08-12 07:52:01 +08:00
|
|
|
MachineBasicBlock *From,
|
|
|
|
MachineBasicBlock *To,
|
|
|
|
bool BreakPHIEdge);
|
2016-07-01 08:11:48 +08:00
|
|
|
bool SinkInstruction(MachineInstr &MI, bool &SawStore,
|
2017-08-30 06:32:07 +08:00
|
|
|
|
2015-06-16 16:57:21 +08:00
|
|
|
AllSuccsCache &AllSuccessors);
|
2010-08-19 07:09:25 +08:00
|
|
|
bool AllUsesDominatedByBlock(unsigned Reg, MachineBasicBlock *MBB,
|
2010-09-18 06:28:18 +08:00
|
|
|
MachineBasicBlock *DefMBB,
|
2010-09-21 03:12:55 +08:00
|
|
|
bool &BreakPHIEdge, bool &LocalUse) const;
|
2016-07-01 08:11:48 +08:00
|
|
|
MachineBasicBlock *FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB,
|
2015-06-16 16:57:21 +08:00
|
|
|
bool &BreakPHIEdge, AllSuccsCache &AllSuccessors);
|
2016-07-01 08:11:48 +08:00
|
|
|
bool isProfitableToSinkTo(unsigned Reg, MachineInstr &MI,
|
2011-12-15 07:20:38 +08:00
|
|
|
MachineBasicBlock *MBB,
|
2015-06-16 16:57:21 +08:00
|
|
|
MachineBasicBlock *SuccToSinkTo,
|
|
|
|
AllSuccsCache &AllSuccessors);
|
2011-12-09 05:48:01 +08:00
|
|
|
|
2016-07-01 08:11:48 +08:00
|
|
|
bool PerformTrivialForwardCoalescing(MachineInstr &MI,
|
2010-09-18 06:28:18 +08:00
|
|
|
MachineBasicBlock *MBB);
|
2015-06-15 17:09:06 +08:00
|
|
|
|
|
|
|
SmallVector<MachineBasicBlock *, 4> &
|
2016-07-01 08:11:48 +08:00
|
|
|
GetAllSortedSuccessors(MachineInstr &MI, MachineBasicBlock *MBB,
|
2015-06-16 16:57:21 +08:00
|
|
|
AllSuccsCache &AllSuccessors) const;
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
};
|
2016-08-25 08:45:04 +08:00
|
|
|
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
} // end anonymous namespace
|
2010-06-04 07:49:57 +08:00
|
|
|
|
2008-05-13 08:00:25 +08:00
|
|
|
char MachineSinking::ID = 0;
|
2017-08-30 06:32:07 +08:00
|
|
|
|
2012-02-09 05:23:13 +08:00
|
|
|
char &llvm::MachineSinkingID = MachineSinking::ID;
|
2017-08-30 06:32:07 +08:00
|
|
|
|
2017-05-26 05:26:32 +08:00
|
|
|
INITIALIZE_PASS_BEGIN(MachineSinking, DEBUG_TYPE,
|
|
|
|
"Machine code sinking", false, false)
|
2016-10-21 02:06:52 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo)
|
2010-10-13 03:48:12 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
|
2017-05-26 05:26:32 +08:00
|
|
|
INITIALIZE_PASS_END(MachineSinking, DEBUG_TYPE,
|
|
|
|
"Machine code sinking", false, false)
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
|
2016-07-01 08:11:48 +08:00
|
|
|
bool MachineSinking::PerformTrivialForwardCoalescing(MachineInstr &MI,
|
2010-09-18 06:28:18 +08:00
|
|
|
MachineBasicBlock *MBB) {
|
2016-07-01 08:11:48 +08:00
|
|
|
if (!MI.isCopy())
|
2010-09-18 06:28:18 +08:00
|
|
|
return false;
|
|
|
|
|
2016-07-01 08:11:48 +08:00
|
|
|
unsigned SrcReg = MI.getOperand(1).getReg();
|
|
|
|
unsigned DstReg = MI.getOperand(0).getReg();
|
2010-09-18 06:28:18 +08:00
|
|
|
if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
|
|
|
|
!TargetRegisterInfo::isVirtualRegister(DstReg) ||
|
|
|
|
!MRI->hasOneNonDBGUse(SrcReg))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const TargetRegisterClass *SRC = MRI->getRegClass(SrcReg);
|
|
|
|
const TargetRegisterClass *DRC = MRI->getRegClass(DstReg);
|
|
|
|
if (SRC != DRC)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
|
|
|
|
if (DefMI->isCopyLike())
|
|
|
|
return false;
|
|
|
|
DEBUG(dbgs() << "Coalescing: " << *DefMI);
|
2016-07-01 08:11:48 +08:00
|
|
|
DEBUG(dbgs() << "*** to: " << MI);
|
2010-09-18 06:28:18 +08:00
|
|
|
MRI->replaceRegWith(DstReg, SrcReg);
|
2016-07-01 08:11:48 +08:00
|
|
|
MI.eraseFromParent();
|
2014-09-09 15:47:00 +08:00
|
|
|
|
|
|
|
// Conservatively, clear any kill flags, since it's possible that they are no
|
|
|
|
// longer correct.
|
|
|
|
MRI->clearKillFlags(SrcReg);
|
|
|
|
|
2010-09-18 06:28:18 +08:00
|
|
|
++NumCoalesces;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
/// AllUsesDominatedByBlock - Return true if all uses of the specified register
|
2010-08-19 07:09:25 +08:00
|
|
|
/// occur in blocks dominated by the specified block. If any use is in the
|
|
|
|
/// definition block, then return false since it is never legal to move def
|
|
|
|
/// after uses.
|
2010-09-18 06:28:18 +08:00
|
|
|
bool
|
|
|
|
MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
|
|
|
|
MachineBasicBlock *MBB,
|
|
|
|
MachineBasicBlock *DefMBB,
|
2010-09-21 03:12:55 +08:00
|
|
|
bool &BreakPHIEdge,
|
|
|
|
bool &LocalUse) const {
|
2008-02-11 02:45:23 +08:00
|
|
|
assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
|
|
|
|
"Only makes sense for vregs");
|
2010-09-18 14:42:17 +08:00
|
|
|
|
2011-12-09 09:25:04 +08:00
|
|
|
// Ignore debug uses because debug info doesn't affect the code.
|
2010-09-18 14:42:17 +08:00
|
|
|
if (MRI->use_nodbg_empty(Reg))
|
|
|
|
return true;
|
|
|
|
|
2010-09-21 03:12:55 +08:00
|
|
|
// BreakPHIEdge is true if all the uses are in the successor MBB being sunken
|
|
|
|
// into and they are all PHI nodes. In this case, machine-sink must break
|
|
|
|
// the critical edge first. e.g.
|
|
|
|
//
|
2017-12-05 01:18:51 +08:00
|
|
|
// %bb.1: derived from LLVM BB %bb4.preheader
|
|
|
|
// Predecessors according to CFG: %bb.0
|
2010-09-18 14:42:17 +08:00
|
|
|
// ...
|
2017-12-07 18:40:31 +08:00
|
|
|
// %reg16385 = DEC64_32r %reg16437, implicit-def dead %eflags
|
2010-09-18 14:42:17 +08:00
|
|
|
// ...
|
2017-12-07 18:40:31 +08:00
|
|
|
// JE_4 <%bb.37>, implicit %eflags
|
2017-12-05 01:18:51 +08:00
|
|
|
// Successors according to CFG: %bb.37 %bb.2
|
2010-09-18 14:42:17 +08:00
|
|
|
//
|
2017-12-05 01:18:51 +08:00
|
|
|
// %bb.2: derived from LLVM BB %bb.nph
|
|
|
|
// Predecessors according to CFG: %bb.0 %bb.1
|
2017-12-07 18:40:31 +08:00
|
|
|
// %reg16386 = PHI %reg16434, %bb.0, %reg16385, %bb.1
|
2010-09-21 03:12:55 +08:00
|
|
|
BreakPHIEdge = true;
|
2014-03-18 03:36:09 +08:00
|
|
|
for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
|
|
|
|
MachineInstr *UseInst = MO.getParent();
|
|
|
|
unsigned OpNo = &MO - &UseInst->getOperand(0);
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
MachineBasicBlock *UseBlock = UseInst->getParent();
|
2010-09-18 14:42:17 +08:00
|
|
|
if (!(UseBlock == MBB && UseInst->isPHI() &&
|
2014-03-18 03:36:09 +08:00
|
|
|
UseInst->getOperand(OpNo+1).getMBB() == DefMBB)) {
|
2010-09-21 03:12:55 +08:00
|
|
|
BreakPHIEdge = false;
|
2010-09-18 14:42:17 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2010-09-21 03:12:55 +08:00
|
|
|
if (BreakPHIEdge)
|
2010-09-18 14:42:17 +08:00
|
|
|
return true;
|
2010-06-03 07:04:26 +08:00
|
|
|
|
2014-03-18 03:36:09 +08:00
|
|
|
for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
|
2010-09-18 14:42:17 +08:00
|
|
|
// Determine the block of the use.
|
2014-03-18 03:36:09 +08:00
|
|
|
MachineInstr *UseInst = MO.getParent();
|
|
|
|
unsigned OpNo = &MO - &UseInst->getOperand(0);
|
2010-09-18 14:42:17 +08:00
|
|
|
MachineBasicBlock *UseBlock = UseInst->getParent();
|
|
|
|
if (UseInst->isPHI()) {
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
// PHI nodes use the operand in the predecessor block, not the block with
|
|
|
|
// the PHI.
|
2014-03-18 03:36:09 +08:00
|
|
|
UseBlock = UseInst->getOperand(OpNo+1).getMBB();
|
2010-08-20 02:33:29 +08:00
|
|
|
} else if (UseBlock == DefMBB) {
|
|
|
|
LocalUse = true;
|
|
|
|
return false;
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
}
|
2010-06-03 07:04:26 +08:00
|
|
|
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
// Check that it dominates.
|
|
|
|
if (!DT->dominates(MBB, UseBlock))
|
|
|
|
return false;
|
|
|
|
}
|
2010-06-03 07:04:26 +08:00
|
|
|
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
|
2017-12-16 06:22:58 +08:00
|
|
|
if (skipFunction(MF.getFunction()))
|
2014-04-01 01:43:35 +08:00
|
|
|
return false;
|
|
|
|
|
2010-01-05 09:26:00 +08:00
|
|
|
DEBUG(dbgs() << "******** Machine Sinking ********\n");
|
2010-06-04 07:49:57 +08:00
|
|
|
|
2014-10-14 15:00:33 +08:00
|
|
|
TII = MF.getSubtarget().getInstrInfo();
|
|
|
|
TRI = MF.getSubtarget().getRegisterInfo();
|
2010-09-18 06:28:18 +08:00
|
|
|
MRI = &MF.getRegInfo();
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
DT = &getAnalysis<MachineDominatorTree>();
|
[MachineSink] Use the real post dominator tree
Summary:
Fixes a FIXME in MachineSinking. Instead of using the simple heuristics in
isPostDominatedBy, use the real MachinePostDominatorTree and MachineLoopInfo.
The old heuristics caused instructions to sink unnecessarily, and might create
register pressure.
This is the second try of the fix. The first one (D4814) caused a performance
regression due to failing to sink instructions out of loops (PR21115). This
patch fixes PR21115 by sinking an instruction from a deeper loop to a shallower
one regardless of whether the target block post-dominates the source.
Thanks Alexey Volkov for reporting PR21115!
Test Plan:
Added a NVPTX codegen test to verify that our change prevents the backend from
over-sinking. It also shows the unnecessary register pressure caused by
over-sinking.
Added an X86 test to verify we can sink instructions out of loops regardless of
the dominance relationship. This test is reduced from Alexey's test in PR21115.
Updated an affected test in X86.
Also ran SPEC CINT2006 and llvm-test-suite for compilation time and runtime
performance. Results are attached separately in the review thread.
Reviewers: Jiangning, resistor, hfinkel
Reviewed By: hfinkel
Subscribers: hfinkel, bruno, volkalexey, llvm-commits, meheff, eliben, jholewinski
Differential Revision: http://reviews.llvm.org/D5633
llvm-svn: 219773
2014-10-15 11:27:43 +08:00
|
|
|
PDT = &getAnalysis<MachinePostDominatorTree>();
|
2010-04-16 07:41:02 +08:00
|
|
|
LI = &getAnalysis<MachineLoopInfo>();
|
2014-09-26 07:14:26 +08:00
|
|
|
MBFI = UseBlockFreqInfo ? &getAnalysis<MachineBlockFrequencyInfo>() : nullptr;
|
2016-10-21 02:06:52 +08:00
|
|
|
MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
|
|
|
|
bool EverMadeChange = false;
|
2010-06-04 07:49:57 +08:00
|
|
|
|
2016-08-25 08:45:04 +08:00
|
|
|
while (true) {
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
bool MadeChange = false;
|
|
|
|
|
|
|
|
// Process all basic blocks.
|
2010-09-18 06:28:18 +08:00
|
|
|
CEBCandidates.clear();
|
[MachineSink] Improve the compile time by preserving the dominance information
as long as possible.
** Context **
Each time the dominance information is modified, the dominator tree analysis
switches in a slow query mode. After a few queries without any modification on
the dominator tree, it performs an expensive update of its internal structure to
provide fast queries again.
** Problem **
Prior to this patch, the MachineSink pass was splitting the critical edges on
demand while relying heavy on the dominator tree information. In some cases,
this leads to pathological behavior where:
- We end up in the slow query mode right after splitting an edge.
- We update the dominance information.
- We break the dominance information again, thus ending up in the slow query
mode and so on.
** Proposed Solution **
To mitigate this effect, this patch postpones all the splitting of the edges at
the end of each iteration of the main loop.
The benefits are:
- The dominance information is valid for the life time of an iteration.
- This simplifies the code as we do not have to special treat instructions that
are sunk on critical edges. Indeed, the related block will be available
through the next iteration.
The downside is that when edges splitting is required, this incurs an additional
iteration of the main loop compared to the previous scheme.
** Performance **
Thanks to this patch, the motivating example compiles in 6+ minutes instead of
10+ minutes. No test case added as the motivating example as nothing special but
being huge!
I have measured only noise for both the compile time and the runtime on the llvm
test-suite + SPECs with Os and O3.
Note: The current implementation of MachineBasicBlock::SplitCriticalEdge also
uses the dominance information and therefore, hits this problem. A subsequent
patch will address that.
<rdar://problem/17894619>
llvm-svn: 215410
2014-08-12 07:52:01 +08:00
|
|
|
ToSplit.clear();
|
2015-06-15 17:09:06 +08:00
|
|
|
for (auto &MBB: MF)
|
|
|
|
MadeChange |= ProcessBlock(MBB);
|
2010-06-04 07:49:57 +08:00
|
|
|
|
[MachineSink] Improve the compile time by preserving the dominance information
as long as possible.
** Context **
Each time the dominance information is modified, the dominator tree analysis
switches in a slow query mode. After a few queries without any modification on
the dominator tree, it performs an expensive update of its internal structure to
provide fast queries again.
** Problem **
Prior to this patch, the MachineSink pass was splitting the critical edges on
demand while relying heavy on the dominator tree information. In some cases,
this leads to pathological behavior where:
- We end up in the slow query mode right after splitting an edge.
- We update the dominance information.
- We break the dominance information again, thus ending up in the slow query
mode and so on.
** Proposed Solution **
To mitigate this effect, this patch postpones all the splitting of the edges at
the end of each iteration of the main loop.
The benefits are:
- The dominance information is valid for the life time of an iteration.
- This simplifies the code as we do not have to special treat instructions that
are sunk on critical edges. Indeed, the related block will be available
through the next iteration.
The downside is that when edges splitting is required, this incurs an additional
iteration of the main loop compared to the previous scheme.
** Performance **
Thanks to this patch, the motivating example compiles in 6+ minutes instead of
10+ minutes. No test case added as the motivating example as nothing special but
being huge!
I have measured only noise for both the compile time and the runtime on the llvm
test-suite + SPECs with Os and O3.
Note: The current implementation of MachineBasicBlock::SplitCriticalEdge also
uses the dominance information and therefore, hits this problem. A subsequent
patch will address that.
<rdar://problem/17894619>
llvm-svn: 215410
2014-08-12 07:52:01 +08:00
|
|
|
// If we have anything we marked as toSplit, split it now.
|
|
|
|
for (auto &Pair : ToSplit) {
|
2016-04-22 05:01:13 +08:00
|
|
|
auto NewSucc = Pair.first->SplitCriticalEdge(Pair.second, *this);
|
[MachineSink] Improve the compile time by preserving the dominance information
as long as possible.
** Context **
Each time the dominance information is modified, the dominator tree analysis
switches in a slow query mode. After a few queries without any modification on
the dominator tree, it performs an expensive update of its internal structure to
provide fast queries again.
** Problem **
Prior to this patch, the MachineSink pass was splitting the critical edges on
demand while relying heavy on the dominator tree information. In some cases,
this leads to pathological behavior where:
- We end up in the slow query mode right after splitting an edge.
- We update the dominance information.
- We break the dominance information again, thus ending up in the slow query
mode and so on.
** Proposed Solution **
To mitigate this effect, this patch postpones all the splitting of the edges at
the end of each iteration of the main loop.
The benefits are:
- The dominance information is valid for the life time of an iteration.
- This simplifies the code as we do not have to special treat instructions that
are sunk on critical edges. Indeed, the related block will be available
through the next iteration.
The downside is that when edges splitting is required, this incurs an additional
iteration of the main loop compared to the previous scheme.
** Performance **
Thanks to this patch, the motivating example compiles in 6+ minutes instead of
10+ minutes. No test case added as the motivating example as nothing special but
being huge!
I have measured only noise for both the compile time and the runtime on the llvm
test-suite + SPECs with Os and O3.
Note: The current implementation of MachineBasicBlock::SplitCriticalEdge also
uses the dominance information and therefore, hits this problem. A subsequent
patch will address that.
<rdar://problem/17894619>
llvm-svn: 215410
2014-08-12 07:52:01 +08:00
|
|
|
if (NewSucc != nullptr) {
|
2017-12-05 01:18:51 +08:00
|
|
|
DEBUG(dbgs() << " *** Splitting critical edge: "
|
|
|
|
<< printMBBReference(*Pair.first) << " -- "
|
|
|
|
<< printMBBReference(*NewSucc) << " -- "
|
|
|
|
<< printMBBReference(*Pair.second) << '\n');
|
[MachineSink] Improve the compile time by preserving the dominance information
as long as possible.
** Context **
Each time the dominance information is modified, the dominator tree analysis
switches in a slow query mode. After a few queries without any modification on
the dominator tree, it performs an expensive update of its internal structure to
provide fast queries again.
** Problem **
Prior to this patch, the MachineSink pass was splitting the critical edges on
demand while relying heavy on the dominator tree information. In some cases,
this leads to pathological behavior where:
- We end up in the slow query mode right after splitting an edge.
- We update the dominance information.
- We break the dominance information again, thus ending up in the slow query
mode and so on.
** Proposed Solution **
To mitigate this effect, this patch postpones all the splitting of the edges at
the end of each iteration of the main loop.
The benefits are:
- The dominance information is valid for the life time of an iteration.
- This simplifies the code as we do not have to special treat instructions that
are sunk on critical edges. Indeed, the related block will be available
through the next iteration.
The downside is that when edges splitting is required, this incurs an additional
iteration of the main loop compared to the previous scheme.
** Performance **
Thanks to this patch, the motivating example compiles in 6+ minutes instead of
10+ minutes. No test case added as the motivating example as nothing special but
being huge!
I have measured only noise for both the compile time and the runtime on the llvm
test-suite + SPECs with Os and O3.
Note: The current implementation of MachineBasicBlock::SplitCriticalEdge also
uses the dominance information and therefore, hits this problem. A subsequent
patch will address that.
<rdar://problem/17894619>
llvm-svn: 215410
2014-08-12 07:52:01 +08:00
|
|
|
MadeChange = true;
|
|
|
|
++NumSplit;
|
|
|
|
} else
|
|
|
|
DEBUG(dbgs() << " *** Not legal to break critical edge\n");
|
|
|
|
}
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
// If this iteration over the code changed anything, keep iterating.
|
|
|
|
if (!MadeChange) break;
|
|
|
|
EverMadeChange = true;
|
2010-06-04 07:49:57 +08:00
|
|
|
}
|
2015-05-16 11:11:07 +08:00
|
|
|
|
|
|
|
// Now clear any kill flags for recorded registers.
|
|
|
|
for (auto I : RegsToClearKillFlags)
|
|
|
|
MRI->clearKillFlags(I);
|
|
|
|
RegsToClearKillFlags.clear();
|
|
|
|
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
return EverMadeChange;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) {
|
|
|
|
// Can't sink anything out of a block that has less than two successors.
|
2009-04-11 00:38:36 +08:00
|
|
|
if (MBB.succ_size() <= 1 || MBB.empty()) return false;
|
|
|
|
|
2010-04-06 03:17:22 +08:00
|
|
|
// Don't bother sinking code out of unreachable blocks. In addition to being
|
2010-06-04 07:49:57 +08:00
|
|
|
// unprofitable, it can also lead to infinite looping, because in an
|
|
|
|
// unreachable loop there may be nowhere to stop.
|
2010-04-06 03:17:22 +08:00
|
|
|
if (!DT->isReachableFromEntry(&MBB)) return false;
|
|
|
|
|
2009-04-11 00:38:36 +08:00
|
|
|
bool MadeChange = false;
|
|
|
|
|
2015-06-16 16:57:21 +08:00
|
|
|
// Cache all successors, sorted by frequency info and loop depth.
|
|
|
|
AllSuccsCache AllSuccessors;
|
2015-06-15 17:09:06 +08:00
|
|
|
|
2008-01-12 08:17:41 +08:00
|
|
|
// Walk the basic block bottom-up. Remember if we saw a store.
|
2009-04-11 00:38:36 +08:00
|
|
|
MachineBasicBlock::iterator I = MBB.end();
|
|
|
|
--I;
|
|
|
|
bool ProcessedBegin, SawStore = false;
|
|
|
|
do {
|
2016-07-01 08:11:48 +08:00
|
|
|
MachineInstr &MI = *I; // The instruction to sink.
|
2010-06-04 07:49:57 +08:00
|
|
|
|
2009-04-11 00:38:36 +08:00
|
|
|
// Predecrement I (if it's not begin) so that it isn't invalidated by
|
|
|
|
// sinking.
|
|
|
|
ProcessedBegin = I == MBB.begin();
|
|
|
|
if (!ProcessedBegin)
|
|
|
|
--I;
|
2010-03-05 08:02:59 +08:00
|
|
|
|
2016-07-01 08:11:48 +08:00
|
|
|
if (MI.isDebugValue())
|
2010-03-05 08:02:59 +08:00
|
|
|
continue;
|
|
|
|
|
2011-04-12 02:47:20 +08:00
|
|
|
bool Joined = PerformTrivialForwardCoalescing(MI, &MBB);
|
|
|
|
if (Joined) {
|
|
|
|
MadeChange = true;
|
2010-09-18 06:28:18 +08:00
|
|
|
continue;
|
2011-04-12 02:47:20 +08:00
|
|
|
}
|
2010-09-18 06:28:18 +08:00
|
|
|
|
2016-02-19 06:09:30 +08:00
|
|
|
if (SinkInstruction(MI, SawStore, AllSuccessors)) {
|
|
|
|
++NumSunk;
|
|
|
|
MadeChange = true;
|
|
|
|
}
|
2010-06-04 07:49:57 +08:00
|
|
|
|
2009-04-11 00:38:36 +08:00
|
|
|
// If we just processed the first instruction in the block, we're done.
|
|
|
|
} while (!ProcessedBegin);
|
2010-06-04 07:49:57 +08:00
|
|
|
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
return MadeChange;
|
|
|
|
}
|
|
|
|
|
2016-07-01 08:11:48 +08:00
|
|
|
bool MachineSinking::isWorthBreakingCriticalEdge(MachineInstr &MI,
|
2010-09-18 06:28:18 +08:00
|
|
|
MachineBasicBlock *From,
|
|
|
|
MachineBasicBlock *To) {
|
|
|
|
// FIXME: Need much better heuristics.
|
|
|
|
|
|
|
|
// If the pass has already considered breaking this edge (during this pass
|
|
|
|
// through the function), then let's go ahead and break it. This means
|
|
|
|
// sinking multiple "cheap" instructions into the same block.
|
2014-11-19 15:49:26 +08:00
|
|
|
if (!CEBCandidates.insert(std::make_pair(From, To)).second)
|
2010-09-18 06:28:18 +08:00
|
|
|
return true;
|
|
|
|
|
2016-07-01 08:11:48 +08:00
|
|
|
if (!MI.isCopy() && !TII->isAsCheapAsAMove(MI))
|
2010-09-18 06:28:18 +08:00
|
|
|
return true;
|
|
|
|
|
2016-10-21 02:06:52 +08:00
|
|
|
if (From->isSuccessor(To) && MBPI->getEdgeProbability(From, To) <=
|
|
|
|
BranchProbability(SplitEdgeProbabilityThreshold, 100))
|
|
|
|
return true;
|
|
|
|
|
2010-09-18 06:28:18 +08:00
|
|
|
// MI is cheap, we probably don't want to break the critical edge for it.
|
|
|
|
// However, if this would allow some definitions of its source operands
|
|
|
|
// to be sunk then it's probably worth it.
|
2016-07-01 08:11:48 +08:00
|
|
|
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
|
|
|
|
const MachineOperand &MO = MI.getOperand(i);
|
MachineSink: Fix and tweak critical-edge breaking heuristic.
Per original comment, the intention of this loop
is to go ahead and break the critical edge
(in order to sink this instruction) if there's
reason to believe doing so might "unblock" the
sinking of additional instructions that define
registers used by this one. The idea is that if
we have a few instructions to sink "together"
breaking the edge might be worthwhile.
This commit makes a few small changes
to help better realize this goal:
First, modify the loop to ignore registers
defined by this instruction. We don't
sink definitions of physical registers,
and sinking an SSA definition isn't
going to unblock an upstream instruction.
Second, ignore uses of physical registers.
Instructions that define physical registers are
rejected for sinking, and so moving this one
won't enable moving any defining instructions.
As an added bonus, while virtual register
use-def chains are generally small due
to SSA goodness, iteration over the uses
and definitions (used by hasOneNonDBGUse)
for physical registers like EFLAGS
can be rather expensive in practice.
(This is the original reason for looking at this)
Finally, to keep things simple continue
to only consider this trick for registers that
have a single use (via hasOneNonDBGUse),
but to avoid spuriously breaking critical edges
only do so if the definition resides
in the same MBB and therefore this one directly
blocks it from being sunk as well.
If sinking them together is meant to be,
let the iterative nature of this pass
sink the definition into this block first.
Update tests to accomodate this change,
add new testcase where sinking avoids pipeline stalls.
llvm-svn: 192608
2013-10-15 00:57:17 +08:00
|
|
|
if (!MO.isReg() || !MO.isUse())
|
|
|
|
continue;
|
2010-09-18 06:28:18 +08:00
|
|
|
unsigned Reg = MO.getReg();
|
MachineSink: Fix and tweak critical-edge breaking heuristic.
Per original comment, the intention of this loop
is to go ahead and break the critical edge
(in order to sink this instruction) if there's
reason to believe doing so might "unblock" the
sinking of additional instructions that define
registers used by this one. The idea is that if
we have a few instructions to sink "together"
breaking the edge might be worthwhile.
This commit makes a few small changes
to help better realize this goal:
First, modify the loop to ignore registers
defined by this instruction. We don't
sink definitions of physical registers,
and sinking an SSA definition isn't
going to unblock an upstream instruction.
Second, ignore uses of physical registers.
Instructions that define physical registers are
rejected for sinking, and so moving this one
won't enable moving any defining instructions.
As an added bonus, while virtual register
use-def chains are generally small due
to SSA goodness, iteration over the uses
and definitions (used by hasOneNonDBGUse)
for physical registers like EFLAGS
can be rather expensive in practice.
(This is the original reason for looking at this)
Finally, to keep things simple continue
to only consider this trick for registers that
have a single use (via hasOneNonDBGUse),
but to avoid spuriously breaking critical edges
only do so if the definition resides
in the same MBB and therefore this one directly
blocks it from being sunk as well.
If sinking them together is meant to be,
let the iterative nature of this pass
sink the definition into this block first.
Update tests to accomodate this change,
add new testcase where sinking avoids pipeline stalls.
llvm-svn: 192608
2013-10-15 00:57:17 +08:00
|
|
|
if (Reg == 0)
|
2010-09-18 06:28:18 +08:00
|
|
|
continue;
|
MachineSink: Fix and tweak critical-edge breaking heuristic.
Per original comment, the intention of this loop
is to go ahead and break the critical edge
(in order to sink this instruction) if there's
reason to believe doing so might "unblock" the
sinking of additional instructions that define
registers used by this one. The idea is that if
we have a few instructions to sink "together"
breaking the edge might be worthwhile.
This commit makes a few small changes
to help better realize this goal:
First, modify the loop to ignore registers
defined by this instruction. We don't
sink definitions of physical registers,
and sinking an SSA definition isn't
going to unblock an upstream instruction.
Second, ignore uses of physical registers.
Instructions that define physical registers are
rejected for sinking, and so moving this one
won't enable moving any defining instructions.
As an added bonus, while virtual register
use-def chains are generally small due
to SSA goodness, iteration over the uses
and definitions (used by hasOneNonDBGUse)
for physical registers like EFLAGS
can be rather expensive in practice.
(This is the original reason for looking at this)
Finally, to keep things simple continue
to only consider this trick for registers that
have a single use (via hasOneNonDBGUse),
but to avoid spuriously breaking critical edges
only do so if the definition resides
in the same MBB and therefore this one directly
blocks it from being sunk as well.
If sinking them together is meant to be,
let the iterative nature of this pass
sink the definition into this block first.
Update tests to accomodate this change,
add new testcase where sinking avoids pipeline stalls.
llvm-svn: 192608
2013-10-15 00:57:17 +08:00
|
|
|
|
|
|
|
// We don't move live definitions of physical registers,
|
|
|
|
// so sinking their uses won't enable any opportunities.
|
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(Reg))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// If this instruction is the only user of a virtual register,
|
|
|
|
// check if breaking the edge will enable sinking
|
|
|
|
// both this instruction and the defining instruction.
|
|
|
|
if (MRI->hasOneNonDBGUse(Reg)) {
|
|
|
|
// If the definition resides in same MBB,
|
|
|
|
// claim it's likely we can sink these together.
|
|
|
|
// If definition resides elsewhere, we aren't
|
|
|
|
// blocking it from being sunk so don't break the edge.
|
|
|
|
MachineInstr *DefMI = MRI->getVRegDef(Reg);
|
2016-07-01 08:11:48 +08:00
|
|
|
if (DefMI->getParent() == MI.getParent())
|
MachineSink: Fix and tweak critical-edge breaking heuristic.
Per original comment, the intention of this loop
is to go ahead and break the critical edge
(in order to sink this instruction) if there's
reason to believe doing so might "unblock" the
sinking of additional instructions that define
registers used by this one. The idea is that if
we have a few instructions to sink "together"
breaking the edge might be worthwhile.
This commit makes a few small changes
to help better realize this goal:
First, modify the loop to ignore registers
defined by this instruction. We don't
sink definitions of physical registers,
and sinking an SSA definition isn't
going to unblock an upstream instruction.
Second, ignore uses of physical registers.
Instructions that define physical registers are
rejected for sinking, and so moving this one
won't enable moving any defining instructions.
As an added bonus, while virtual register
use-def chains are generally small due
to SSA goodness, iteration over the uses
and definitions (used by hasOneNonDBGUse)
for physical registers like EFLAGS
can be rather expensive in practice.
(This is the original reason for looking at this)
Finally, to keep things simple continue
to only consider this trick for registers that
have a single use (via hasOneNonDBGUse),
but to avoid spuriously breaking critical edges
only do so if the definition resides
in the same MBB and therefore this one directly
blocks it from being sunk as well.
If sinking them together is meant to be,
let the iterative nature of this pass
sink the definition into this block first.
Update tests to accomodate this change,
add new testcase where sinking avoids pipeline stalls.
llvm-svn: 192608
2013-10-15 00:57:17 +08:00
|
|
|
return true;
|
|
|
|
}
|
2010-09-18 06:28:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-07-01 08:11:48 +08:00
|
|
|
bool MachineSinking::PostponeSplitCriticalEdge(MachineInstr &MI,
|
[MachineSink] Improve the compile time by preserving the dominance information
as long as possible.
** Context **
Each time the dominance information is modified, the dominator tree analysis
switches in a slow query mode. After a few queries without any modification on
the dominator tree, it performs an expensive update of its internal structure to
provide fast queries again.
** Problem **
Prior to this patch, the MachineSink pass was splitting the critical edges on
demand while relying heavy on the dominator tree information. In some cases,
this leads to pathological behavior where:
- We end up in the slow query mode right after splitting an edge.
- We update the dominance information.
- We break the dominance information again, thus ending up in the slow query
mode and so on.
** Proposed Solution **
To mitigate this effect, this patch postpones all the splitting of the edges at
the end of each iteration of the main loop.
The benefits are:
- The dominance information is valid for the life time of an iteration.
- This simplifies the code as we do not have to special treat instructions that
are sunk on critical edges. Indeed, the related block will be available
through the next iteration.
The downside is that when edges splitting is required, this incurs an additional
iteration of the main loop compared to the previous scheme.
** Performance **
Thanks to this patch, the motivating example compiles in 6+ minutes instead of
10+ minutes. No test case added as the motivating example as nothing special but
being huge!
I have measured only noise for both the compile time and the runtime on the llvm
test-suite + SPECs with Os and O3.
Note: The current implementation of MachineBasicBlock::SplitCriticalEdge also
uses the dominance information and therefore, hits this problem. A subsequent
patch will address that.
<rdar://problem/17894619>
llvm-svn: 215410
2014-08-12 07:52:01 +08:00
|
|
|
MachineBasicBlock *FromBB,
|
|
|
|
MachineBasicBlock *ToBB,
|
|
|
|
bool BreakPHIEdge) {
|
2010-09-18 06:28:18 +08:00
|
|
|
if (!isWorthBreakingCriticalEdge(MI, FromBB, ToBB))
|
[MachineSink] Improve the compile time by preserving the dominance information
as long as possible.
** Context **
Each time the dominance information is modified, the dominator tree analysis
switches in a slow query mode. After a few queries without any modification on
the dominator tree, it performs an expensive update of its internal structure to
provide fast queries again.
** Problem **
Prior to this patch, the MachineSink pass was splitting the critical edges on
demand while relying heavy on the dominator tree information. In some cases,
this leads to pathological behavior where:
- We end up in the slow query mode right after splitting an edge.
- We update the dominance information.
- We break the dominance information again, thus ending up in the slow query
mode and so on.
** Proposed Solution **
To mitigate this effect, this patch postpones all the splitting of the edges at
the end of each iteration of the main loop.
The benefits are:
- The dominance information is valid for the life time of an iteration.
- This simplifies the code as we do not have to special treat instructions that
are sunk on critical edges. Indeed, the related block will be available
through the next iteration.
The downside is that when edges splitting is required, this incurs an additional
iteration of the main loop compared to the previous scheme.
** Performance **
Thanks to this patch, the motivating example compiles in 6+ minutes instead of
10+ minutes. No test case added as the motivating example as nothing special but
being huge!
I have measured only noise for both the compile time and the runtime on the llvm
test-suite + SPECs with Os and O3.
Note: The current implementation of MachineBasicBlock::SplitCriticalEdge also
uses the dominance information and therefore, hits this problem. A subsequent
patch will address that.
<rdar://problem/17894619>
llvm-svn: 215410
2014-08-12 07:52:01 +08:00
|
|
|
return false;
|
2010-09-18 06:28:18 +08:00
|
|
|
|
2010-08-20 01:33:11 +08:00
|
|
|
// Avoid breaking back edge. From == To means backedge for single BB loop.
|
Enable machine sinking critical edge splitting. e.g.
define double @foo(double %x, double %y, i1 %c) nounwind {
%a = fdiv double %x, 3.2
%z = select i1 %c, double %a, double %y
ret double %z
}
Was:
_foo:
divsd LCPI0_0(%rip), %xmm0
testb $1, %dil
jne LBB0_2
movaps %xmm1, %xmm0
LBB0_2:
ret
Now:
_foo:
testb $1, %dil
je LBB0_2
divsd LCPI0_0(%rip), %xmm0
ret
LBB0_2:
movaps %xmm1, %xmm0
ret
This avoids the divsd when early exit is taken.
rdar://8454886
llvm-svn: 114372
2010-09-21 06:52:00 +08:00
|
|
|
if (!SplitEdges || FromBB == ToBB)
|
[MachineSink] Improve the compile time by preserving the dominance information
as long as possible.
** Context **
Each time the dominance information is modified, the dominator tree analysis
switches in a slow query mode. After a few queries without any modification on
the dominator tree, it performs an expensive update of its internal structure to
provide fast queries again.
** Problem **
Prior to this patch, the MachineSink pass was splitting the critical edges on
demand while relying heavy on the dominator tree information. In some cases,
this leads to pathological behavior where:
- We end up in the slow query mode right after splitting an edge.
- We update the dominance information.
- We break the dominance information again, thus ending up in the slow query
mode and so on.
** Proposed Solution **
To mitigate this effect, this patch postpones all the splitting of the edges at
the end of each iteration of the main loop.
The benefits are:
- The dominance information is valid for the life time of an iteration.
- This simplifies the code as we do not have to special treat instructions that
are sunk on critical edges. Indeed, the related block will be available
through the next iteration.
The downside is that when edges splitting is required, this incurs an additional
iteration of the main loop compared to the previous scheme.
** Performance **
Thanks to this patch, the motivating example compiles in 6+ minutes instead of
10+ minutes. No test case added as the motivating example as nothing special but
being huge!
I have measured only noise for both the compile time and the runtime on the llvm
test-suite + SPECs with Os and O3.
Note: The current implementation of MachineBasicBlock::SplitCriticalEdge also
uses the dominance information and therefore, hits this problem. A subsequent
patch will address that.
<rdar://problem/17894619>
llvm-svn: 215410
2014-08-12 07:52:01 +08:00
|
|
|
return false;
|
2010-08-20 01:33:11 +08:00
|
|
|
|
2010-09-18 06:28:18 +08:00
|
|
|
// Check for backedges of more "complex" loops.
|
|
|
|
if (LI->getLoopFor(FromBB) == LI->getLoopFor(ToBB) &&
|
|
|
|
LI->isLoopHeader(ToBB))
|
[MachineSink] Improve the compile time by preserving the dominance information
as long as possible.
** Context **
Each time the dominance information is modified, the dominator tree analysis
switches in a slow query mode. After a few queries without any modification on
the dominator tree, it performs an expensive update of its internal structure to
provide fast queries again.
** Problem **
Prior to this patch, the MachineSink pass was splitting the critical edges on
demand while relying heavy on the dominator tree information. In some cases,
this leads to pathological behavior where:
- We end up in the slow query mode right after splitting an edge.
- We update the dominance information.
- We break the dominance information again, thus ending up in the slow query
mode and so on.
** Proposed Solution **
To mitigate this effect, this patch postpones all the splitting of the edges at
the end of each iteration of the main loop.
The benefits are:
- The dominance information is valid for the life time of an iteration.
- This simplifies the code as we do not have to special treat instructions that
are sunk on critical edges. Indeed, the related block will be available
through the next iteration.
The downside is that when edges splitting is required, this incurs an additional
iteration of the main loop compared to the previous scheme.
** Performance **
Thanks to this patch, the motivating example compiles in 6+ minutes instead of
10+ minutes. No test case added as the motivating example as nothing special but
being huge!
I have measured only noise for both the compile time and the runtime on the llvm
test-suite + SPECs with Os and O3.
Note: The current implementation of MachineBasicBlock::SplitCriticalEdge also
uses the dominance information and therefore, hits this problem. A subsequent
patch will address that.
<rdar://problem/17894619>
llvm-svn: 215410
2014-08-12 07:52:01 +08:00
|
|
|
return false;
|
2010-09-18 06:28:18 +08:00
|
|
|
|
|
|
|
// It's not always legal to break critical edges and sink the computation
|
|
|
|
// to the edge.
|
|
|
|
//
|
2017-12-05 01:18:51 +08:00
|
|
|
// %bb.1:
|
2010-09-18 06:28:18 +08:00
|
|
|
// v1024
|
2017-12-05 01:18:51 +08:00
|
|
|
// Beq %bb.3
|
2010-09-18 06:28:18 +08:00
|
|
|
// <fallthrough>
|
2017-12-05 01:18:51 +08:00
|
|
|
// %bb.2:
|
2010-09-18 06:28:18 +08:00
|
|
|
// ... no uses of v1024
|
|
|
|
// <fallthrough>
|
2017-12-05 01:18:51 +08:00
|
|
|
// %bb.3:
|
2010-09-18 06:28:18 +08:00
|
|
|
// ...
|
|
|
|
// = v1024
|
|
|
|
//
|
2017-12-05 01:18:51 +08:00
|
|
|
// If %bb.1 -> %bb.3 edge is broken and computation of v1024 is inserted:
|
2010-09-18 06:28:18 +08:00
|
|
|
//
|
2017-12-05 01:18:51 +08:00
|
|
|
// %bb.1:
|
2010-09-18 06:28:18 +08:00
|
|
|
// ...
|
2017-12-05 01:18:51 +08:00
|
|
|
// Bne %bb.2
|
|
|
|
// %bb.4:
|
2010-09-18 06:28:18 +08:00
|
|
|
// v1024 =
|
2017-12-05 01:18:51 +08:00
|
|
|
// B %bb.3
|
|
|
|
// %bb.2:
|
2010-09-18 06:28:18 +08:00
|
|
|
// ... no uses of v1024
|
|
|
|
// <fallthrough>
|
2017-12-05 01:18:51 +08:00
|
|
|
// %bb.3:
|
2010-09-18 06:28:18 +08:00
|
|
|
// ...
|
|
|
|
// = v1024
|
|
|
|
//
|
2017-12-05 01:18:51 +08:00
|
|
|
// This is incorrect since v1024 is not computed along the %bb.1->%bb.2->%bb.3
|
2010-09-18 06:28:18 +08:00
|
|
|
// flow. We need to ensure the new basic block where the computation is
|
|
|
|
// sunk to dominates all the uses.
|
|
|
|
// It's only legal to break critical edge and sink the computation to the
|
|
|
|
// new block if all the predecessors of "To", except for "From", are
|
|
|
|
// not dominated by "From". Given SSA property, this means these
|
|
|
|
// predecessors are dominated by "To".
|
|
|
|
//
|
|
|
|
// There is no need to do this check if all the uses are PHI nodes. PHI
|
|
|
|
// sources are only defined on the specific predecessor edges.
|
2010-09-21 03:12:55 +08:00
|
|
|
if (!BreakPHIEdge) {
|
2010-08-20 01:33:11 +08:00
|
|
|
for (MachineBasicBlock::pred_iterator PI = ToBB->pred_begin(),
|
|
|
|
E = ToBB->pred_end(); PI != E; ++PI) {
|
|
|
|
if (*PI == FromBB)
|
|
|
|
continue;
|
|
|
|
if (!DT->dominates(ToBB, *PI))
|
[MachineSink] Improve the compile time by preserving the dominance information
as long as possible.
** Context **
Each time the dominance information is modified, the dominator tree analysis
switches in a slow query mode. After a few queries without any modification on
the dominator tree, it performs an expensive update of its internal structure to
provide fast queries again.
** Problem **
Prior to this patch, the MachineSink pass was splitting the critical edges on
demand while relying heavy on the dominator tree information. In some cases,
this leads to pathological behavior where:
- We end up in the slow query mode right after splitting an edge.
- We update the dominance information.
- We break the dominance information again, thus ending up in the slow query
mode and so on.
** Proposed Solution **
To mitigate this effect, this patch postpones all the splitting of the edges at
the end of each iteration of the main loop.
The benefits are:
- The dominance information is valid for the life time of an iteration.
- This simplifies the code as we do not have to special treat instructions that
are sunk on critical edges. Indeed, the related block will be available
through the next iteration.
The downside is that when edges splitting is required, this incurs an additional
iteration of the main loop compared to the previous scheme.
** Performance **
Thanks to this patch, the motivating example compiles in 6+ minutes instead of
10+ minutes. No test case added as the motivating example as nothing special but
being huge!
I have measured only noise for both the compile time and the runtime on the llvm
test-suite + SPECs with Os and O3.
Note: The current implementation of MachineBasicBlock::SplitCriticalEdge also
uses the dominance information and therefore, hits this problem. A subsequent
patch will address that.
<rdar://problem/17894619>
llvm-svn: 215410
2014-08-12 07:52:01 +08:00
|
|
|
return false;
|
2010-08-20 01:33:11 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[MachineSink] Improve the compile time by preserving the dominance information
as long as possible.
** Context **
Each time the dominance information is modified, the dominator tree analysis
switches in a slow query mode. After a few queries without any modification on
the dominator tree, it performs an expensive update of its internal structure to
provide fast queries again.
** Problem **
Prior to this patch, the MachineSink pass was splitting the critical edges on
demand while relying heavy on the dominator tree information. In some cases,
this leads to pathological behavior where:
- We end up in the slow query mode right after splitting an edge.
- We update the dominance information.
- We break the dominance information again, thus ending up in the slow query
mode and so on.
** Proposed Solution **
To mitigate this effect, this patch postpones all the splitting of the edges at
the end of each iteration of the main loop.
The benefits are:
- The dominance information is valid for the life time of an iteration.
- This simplifies the code as we do not have to special treat instructions that
are sunk on critical edges. Indeed, the related block will be available
through the next iteration.
The downside is that when edges splitting is required, this incurs an additional
iteration of the main loop compared to the previous scheme.
** Performance **
Thanks to this patch, the motivating example compiles in 6+ minutes instead of
10+ minutes. No test case added as the motivating example as nothing special but
being huge!
I have measured only noise for both the compile time and the runtime on the llvm
test-suite + SPECs with Os and O3.
Note: The current implementation of MachineBasicBlock::SplitCriticalEdge also
uses the dominance information and therefore, hits this problem. A subsequent
patch will address that.
<rdar://problem/17894619>
llvm-svn: 215410
2014-08-12 07:52:01 +08:00
|
|
|
ToSplit.insert(std::make_pair(FromBB, ToBB));
|
|
|
|
|
|
|
|
return true;
|
2010-08-20 01:33:11 +08:00
|
|
|
}
|
|
|
|
|
2012-02-09 05:22:43 +08:00
|
|
|
/// collectDebgValues - Scan instructions following MI and collect any
|
2011-09-07 08:07:58 +08:00
|
|
|
/// matching DBG_VALUEs.
|
2016-07-01 08:11:48 +08:00
|
|
|
static void collectDebugValues(MachineInstr &MI,
|
2013-07-14 12:42:23 +08:00
|
|
|
SmallVectorImpl<MachineInstr *> &DbgValues) {
|
2011-09-07 08:07:58 +08:00
|
|
|
DbgValues.clear();
|
2016-07-01 08:11:48 +08:00
|
|
|
if (!MI.getOperand(0).isReg())
|
2011-09-07 08:07:58 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
MachineBasicBlock::iterator DI = MI; ++DI;
|
2016-07-01 08:11:48 +08:00
|
|
|
for (MachineBasicBlock::iterator DE = MI.getParent()->end();
|
2011-09-07 08:07:58 +08:00
|
|
|
DI != DE; ++DI) {
|
|
|
|
if (!DI->isDebugValue())
|
|
|
|
return;
|
|
|
|
if (DI->getOperand(0).isReg() &&
|
2016-07-01 08:11:48 +08:00
|
|
|
DI->getOperand(0).getReg() == MI.getOperand(0).getReg())
|
|
|
|
DbgValues.push_back(&*DI);
|
2011-09-07 08:07:58 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-12-15 07:20:38 +08:00
|
|
|
/// isProfitableToSinkTo - Return true if it is profitable to sink MI.
|
2016-07-01 08:11:48 +08:00
|
|
|
bool MachineSinking::isProfitableToSinkTo(unsigned Reg, MachineInstr &MI,
|
2011-12-15 07:20:38 +08:00
|
|
|
MachineBasicBlock *MBB,
|
2015-06-16 16:57:21 +08:00
|
|
|
MachineBasicBlock *SuccToSinkTo,
|
|
|
|
AllSuccsCache &AllSuccessors) {
|
2011-12-15 07:20:38 +08:00
|
|
|
assert (SuccToSinkTo && "Invalid SinkTo Candidate BB");
|
|
|
|
|
|
|
|
if (MBB == SuccToSinkTo)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// It is profitable if SuccToSinkTo does not post dominate current block.
|
[MachineSink] Use the real post dominator tree
Summary:
Fixes a FIXME in MachineSinking. Instead of using the simple heuristics in
isPostDominatedBy, use the real MachinePostDominatorTree and MachineLoopInfo.
The old heuristics caused instructions to sink unnecessarily, and might create
register pressure.
This is the second try of the fix. The first one (D4814) caused a performance
regression due to failing to sink instructions out of loops (PR21115). This
patch fixes PR21115 by sinking an instruction from a deeper loop to a shallower
one regardless of whether the target block post-dominates the source.
Thanks Alexey Volkov for reporting PR21115!
Test Plan:
Added a NVPTX codegen test to verify that our change prevents the backend from
over-sinking. It also shows the unnecessary register pressure caused by
over-sinking.
Added an X86 test to verify we can sink instructions out of loops regardless of
the dominance relationship. This test is reduced from Alexey's test in PR21115.
Updated an affected test in X86.
Also ran SPEC CINT2006 and llvm-test-suite for compilation time and runtime
performance. Results are attached separately in the review thread.
Reviewers: Jiangning, resistor, hfinkel
Reviewed By: hfinkel
Subscribers: hfinkel, bruno, volkalexey, llvm-commits, meheff, eliben, jholewinski
Differential Revision: http://reviews.llvm.org/D5633
llvm-svn: 219773
2014-10-15 11:27:43 +08:00
|
|
|
if (!PDT->dominates(SuccToSinkTo, MBB))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// It is profitable to sink an instruction from a deeper loop to a shallower
|
|
|
|
// loop, even if the latter post-dominates the former (PR21115).
|
|
|
|
if (LI->getLoopDepth(MBB) > LI->getLoopDepth(SuccToSinkTo))
|
|
|
|
return true;
|
2011-12-15 07:20:38 +08:00
|
|
|
|
|
|
|
// Check if only use in post dominated block is PHI instruction.
|
|
|
|
bool NonPHIUse = false;
|
2014-03-18 03:36:09 +08:00
|
|
|
for (MachineInstr &UseInst : MRI->use_nodbg_instructions(Reg)) {
|
|
|
|
MachineBasicBlock *UseBlock = UseInst.getParent();
|
|
|
|
if (UseBlock == SuccToSinkTo && !UseInst.isPHI())
|
2011-12-15 07:20:38 +08:00
|
|
|
NonPHIUse = true;
|
|
|
|
}
|
|
|
|
if (!NonPHIUse)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// If SuccToSinkTo post dominates then also it may be profitable if MI
|
|
|
|
// can further profitably sinked into another block in next round.
|
|
|
|
bool BreakPHIEdge = false;
|
2014-12-04 18:36:42 +08:00
|
|
|
// FIXME - If finding successor is compile time expensive then cache results.
|
2015-06-16 16:57:21 +08:00
|
|
|
if (MachineBasicBlock *MBB2 =
|
|
|
|
FindSuccToSinkTo(MI, SuccToSinkTo, BreakPHIEdge, AllSuccessors))
|
|
|
|
return isProfitableToSinkTo(Reg, MI, SuccToSinkTo, MBB2, AllSuccessors);
|
2011-12-15 07:20:38 +08:00
|
|
|
|
|
|
|
// If SuccToSinkTo is final destination and it is a post dominator of current
|
|
|
|
// block then it is not profitable to sink MI into SuccToSinkTo block.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-06-15 17:09:06 +08:00
|
|
|
/// Get the sorted sequence of successors for this MachineBasicBlock, possibly
|
|
|
|
/// computing it if it was not already cached.
|
|
|
|
SmallVector<MachineBasicBlock *, 4> &
|
2016-07-01 08:11:48 +08:00
|
|
|
MachineSinking::GetAllSortedSuccessors(MachineInstr &MI, MachineBasicBlock *MBB,
|
2015-06-16 16:57:21 +08:00
|
|
|
AllSuccsCache &AllSuccessors) const {
|
2015-06-15 17:09:06 +08:00
|
|
|
// Do we have the sorted successors in cache ?
|
|
|
|
auto Succs = AllSuccessors.find(MBB);
|
|
|
|
if (Succs != AllSuccessors.end())
|
|
|
|
return Succs->second;
|
|
|
|
|
|
|
|
SmallVector<MachineBasicBlock *, 4> AllSuccs(MBB->succ_begin(),
|
|
|
|
MBB->succ_end());
|
|
|
|
|
|
|
|
// Handle cases where sinking can happen but where the sink point isn't a
|
|
|
|
// successor. For example:
|
|
|
|
//
|
|
|
|
// x = computation
|
|
|
|
// if () {} else {}
|
|
|
|
// use x
|
|
|
|
//
|
|
|
|
const std::vector<MachineDomTreeNode *> &Children =
|
|
|
|
DT->getNode(MBB)->getChildren();
|
|
|
|
for (const auto &DTChild : Children)
|
|
|
|
// DomTree children of MBB that have MBB as immediate dominator are added.
|
2016-07-01 08:11:48 +08:00
|
|
|
if (DTChild->getIDom()->getBlock() == MI.getParent() &&
|
2015-06-15 17:09:06 +08:00
|
|
|
// Skip MBBs already added to the AllSuccs vector above.
|
|
|
|
!MBB->isSuccessor(DTChild->getBlock()))
|
|
|
|
AllSuccs.push_back(DTChild->getBlock());
|
|
|
|
|
|
|
|
// Sort Successors according to their loop depth or block frequency info.
|
|
|
|
std::stable_sort(
|
|
|
|
AllSuccs.begin(), AllSuccs.end(),
|
|
|
|
[this](const MachineBasicBlock *L, const MachineBasicBlock *R) {
|
|
|
|
uint64_t LHSFreq = MBFI ? MBFI->getBlockFreq(L).getFrequency() : 0;
|
|
|
|
uint64_t RHSFreq = MBFI ? MBFI->getBlockFreq(R).getFrequency() : 0;
|
|
|
|
bool HasBlockFreq = LHSFreq != 0 && RHSFreq != 0;
|
|
|
|
return HasBlockFreq ? LHSFreq < RHSFreq
|
|
|
|
: LI->getLoopDepth(L) < LI->getLoopDepth(R);
|
|
|
|
});
|
|
|
|
|
|
|
|
auto it = AllSuccessors.insert(std::make_pair(MBB, AllSuccs));
|
|
|
|
|
|
|
|
return it.first->second;
|
|
|
|
}
|
|
|
|
|
2011-12-09 05:48:01 +08:00
|
|
|
/// FindSuccToSinkTo - Find a successor to sink this instruction to.
|
2016-07-01 08:11:48 +08:00
|
|
|
MachineBasicBlock *
|
|
|
|
MachineSinking::FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB,
|
|
|
|
bool &BreakPHIEdge,
|
|
|
|
AllSuccsCache &AllSuccessors) {
|
2011-12-15 07:20:38 +08:00
|
|
|
assert (MBB && "Invalid MachineBasicBlock!");
|
2010-06-04 07:49:57 +08:00
|
|
|
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
// Loop over all the operands of the specified instruction. If there is
|
|
|
|
// anything we can't handle, bail out.
|
2010-06-04 07:49:57 +08:00
|
|
|
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
// SuccToSinkTo - This is the successor to sink this instruction to, once we
|
|
|
|
// decide.
|
2014-04-14 08:51:57 +08:00
|
|
|
MachineBasicBlock *SuccToSinkTo = nullptr;
|
2016-07-01 08:11:48 +08:00
|
|
|
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
|
|
|
|
const MachineOperand &MO = MI.getOperand(i);
|
2008-10-03 23:45:36 +08:00
|
|
|
if (!MO.isReg()) continue; // Ignore non-register operands.
|
2010-06-04 07:49:57 +08:00
|
|
|
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
if (Reg == 0) continue;
|
2010-06-04 07:49:57 +08:00
|
|
|
|
2008-02-11 02:45:23 +08:00
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
|
2009-09-26 06:53:29 +08:00
|
|
|
if (MO.isUse()) {
|
|
|
|
// If the physreg has no defs anywhere, it's just an ambient register
|
2009-09-26 10:34:00 +08:00
|
|
|
// and we can freely move its uses. Alternatively, if it's allocatable,
|
|
|
|
// it could get allocated to something with a def during allocation.
|
2016-10-29 02:05:09 +08:00
|
|
|
if (!MRI->isConstantPhysReg(Reg))
|
2014-04-14 08:51:57 +08:00
|
|
|
return nullptr;
|
2010-06-26 04:48:10 +08:00
|
|
|
} else if (!MO.isDead()) {
|
|
|
|
// A def that isn't dead. We can't move it.
|
2014-04-14 08:51:57 +08:00
|
|
|
return nullptr;
|
2009-09-26 06:53:29 +08:00
|
|
|
}
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
} else {
|
|
|
|
// Virtual register uses are always safe to sink.
|
|
|
|
if (MO.isUse()) continue;
|
2009-02-07 09:21:47 +08:00
|
|
|
|
|
|
|
// If it's not safe to move defs of the register class, then abort.
|
2010-09-18 06:28:18 +08:00
|
|
|
if (!TII->isSafeToMoveRegClassDefs(MRI->getRegClass(Reg)))
|
2014-04-14 08:51:57 +08:00
|
|
|
return nullptr;
|
2010-06-04 07:49:57 +08:00
|
|
|
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
// Virtual register defs can only be sunk if all their uses are in blocks
|
|
|
|
// dominated by one of the successors.
|
|
|
|
if (SuccToSinkTo) {
|
|
|
|
// If a previous operand picked a block to sink to, then this operand
|
|
|
|
// must be sinkable to the same block.
|
2010-08-20 02:33:29 +08:00
|
|
|
bool LocalUse = false;
|
2011-12-15 07:20:38 +08:00
|
|
|
if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, MBB,
|
2010-09-21 03:12:55 +08:00
|
|
|
BreakPHIEdge, LocalUse))
|
2014-04-14 08:51:57 +08:00
|
|
|
return nullptr;
|
2010-06-03 07:04:26 +08:00
|
|
|
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
continue;
|
|
|
|
}
|
2010-06-04 07:49:57 +08:00
|
|
|
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
// Otherwise, we should look at all the successors and decide which one
|
2014-09-26 07:14:26 +08:00
|
|
|
// we should sink to. If we have reliable block frequency information
|
|
|
|
// (frequency != 0) available, give successors with smaller frequencies
|
|
|
|
// higher priority, otherwise prioritize smaller loop depths.
|
2015-06-16 16:57:21 +08:00
|
|
|
for (MachineBasicBlock *SuccBlock :
|
|
|
|
GetAllSortedSuccessors(MI, MBB, AllSuccessors)) {
|
2010-08-20 02:33:29 +08:00
|
|
|
bool LocalUse = false;
|
2011-12-15 07:20:38 +08:00
|
|
|
if (AllUsesDominatedByBlock(Reg, SuccBlock, MBB,
|
2010-09-21 03:12:55 +08:00
|
|
|
BreakPHIEdge, LocalUse)) {
|
2011-12-09 05:33:23 +08:00
|
|
|
SuccToSinkTo = SuccBlock;
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
break;
|
|
|
|
}
|
2010-08-19 07:09:25 +08:00
|
|
|
if (LocalUse)
|
|
|
|
// Def is used locally, it's never safe to move this def.
|
2014-04-14 08:51:57 +08:00
|
|
|
return nullptr;
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
}
|
2010-06-04 07:49:57 +08:00
|
|
|
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
// If we couldn't find a block to sink to, ignore this instruction.
|
2014-04-14 08:51:57 +08:00
|
|
|
if (!SuccToSinkTo)
|
|
|
|
return nullptr;
|
2015-06-16 16:57:21 +08:00
|
|
|
if (!isProfitableToSinkTo(Reg, MI, MBB, SuccToSinkTo, AllSuccessors))
|
2014-04-14 08:51:57 +08:00
|
|
|
return nullptr;
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
}
|
|
|
|
}
|
2011-12-09 07:52:00 +08:00
|
|
|
|
|
|
|
// It is not possible to sink an instruction into its own block. This can
|
|
|
|
// happen with loops.
|
2011-12-15 07:20:38 +08:00
|
|
|
if (MBB == SuccToSinkTo)
|
2014-04-14 08:51:57 +08:00
|
|
|
return nullptr;
|
2011-12-09 07:52:00 +08:00
|
|
|
|
|
|
|
// It's not safe to sink instructions to EH landing pad. Control flow into
|
|
|
|
// landing pad is implicitly defined.
|
2015-08-28 07:27:47 +08:00
|
|
|
if (SuccToSinkTo && SuccToSinkTo->isEHPad())
|
2014-04-14 08:51:57 +08:00
|
|
|
return nullptr;
|
2011-12-09 07:52:00 +08:00
|
|
|
|
2011-12-09 05:48:01 +08:00
|
|
|
return SuccToSinkTo;
|
|
|
|
}
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Return true if MI is likely to be usable as a memory operation by the
|
2016-01-20 08:06:14 +08:00
|
|
|
/// implicit null check optimization.
|
|
|
|
///
|
|
|
|
/// This is a "best effort" heuristic, and should not be relied upon for
|
|
|
|
/// correctness. This returning true does not guarantee that the implicit null
|
|
|
|
/// check optimization is legal over MI, and this returning false does not
|
|
|
|
/// guarantee MI cannot possibly be used to do a null check.
|
2016-07-01 08:11:48 +08:00
|
|
|
static bool SinkingPreventsImplicitNullCheck(MachineInstr &MI,
|
2016-01-20 08:06:14 +08:00
|
|
|
const TargetInstrInfo *TII,
|
|
|
|
const TargetRegisterInfo *TRI) {
|
2017-08-30 06:32:07 +08:00
|
|
|
using MachineBranchPredicate = TargetInstrInfo::MachineBranchPredicate;
|
2016-01-20 08:06:14 +08:00
|
|
|
|
2016-07-01 08:11:48 +08:00
|
|
|
auto *MBB = MI.getParent();
|
2016-01-20 08:06:14 +08:00
|
|
|
if (MBB->pred_size() != 1)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
auto *PredMBB = *MBB->pred_begin();
|
|
|
|
auto *PredBB = PredMBB->getBasicBlock();
|
|
|
|
|
|
|
|
// Frontends that don't use implicit null checks have no reason to emit
|
|
|
|
// branches with make.implicit metadata, and this function should always
|
|
|
|
// return false for them.
|
|
|
|
if (!PredBB ||
|
|
|
|
!PredBB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit))
|
|
|
|
return false;
|
|
|
|
|
2016-03-10 00:00:35 +08:00
|
|
|
unsigned BaseReg;
|
|
|
|
int64_t Offset;
|
2016-07-01 08:11:48 +08:00
|
|
|
if (!TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI))
|
2016-01-20 08:06:14 +08:00
|
|
|
return false;
|
|
|
|
|
2016-07-01 08:11:48 +08:00
|
|
|
if (!(MI.mayLoad() && !MI.isPredicable()))
|
2016-01-20 08:06:14 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
MachineBranchPredicate MBP;
|
2016-07-15 22:41:04 +08:00
|
|
|
if (TII->analyzeBranchPredicate(*PredMBB, MBP, false))
|
2016-01-20 08:06:14 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 &&
|
|
|
|
(MBP.Predicate == MachineBranchPredicate::PRED_NE ||
|
|
|
|
MBP.Predicate == MachineBranchPredicate::PRED_EQ) &&
|
|
|
|
MBP.LHS.getReg() == BaseReg;
|
|
|
|
}
|
|
|
|
|
2011-12-09 05:48:01 +08:00
|
|
|
/// SinkInstruction - Determine whether it is safe to sink the specified machine
|
|
|
|
/// instruction out of its current block into a successor.
|
2016-07-01 08:11:48 +08:00
|
|
|
bool MachineSinking::SinkInstruction(MachineInstr &MI, bool &SawStore,
|
2015-06-16 16:57:21 +08:00
|
|
|
AllSuccsCache &AllSuccessors) {
|
2016-03-30 06:44:57 +08:00
|
|
|
// Don't sink instructions that the target prefers not to sink.
|
2016-07-01 08:11:48 +08:00
|
|
|
if (!TII->shouldSink(MI))
|
2011-12-09 05:48:01 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check if it's safe to move the instruction.
|
2016-07-01 08:11:48 +08:00
|
|
|
if (!MI.isSafeToMove(AA, SawStore))
|
2011-12-09 05:48:01 +08:00
|
|
|
return false;
|
|
|
|
|
2015-10-10 02:06:13 +08:00
|
|
|
// Convergent operations may not be made control-dependent on additional
|
|
|
|
// values.
|
2016-07-01 08:11:48 +08:00
|
|
|
if (MI.isConvergent())
|
2015-06-02 01:26:30 +08:00
|
|
|
return false;
|
|
|
|
|
2016-01-20 08:06:14 +08:00
|
|
|
// Don't break implicit null checks. This is a performance heuristic, and not
|
|
|
|
// required for correctness.
|
|
|
|
if (SinkingPreventsImplicitNullCheck(MI, TII, TRI))
|
|
|
|
return false;
|
|
|
|
|
2011-12-09 05:48:01 +08:00
|
|
|
// FIXME: This should include support for sinking instructions within the
|
|
|
|
// block they are currently in to shorten the live ranges. We often get
|
|
|
|
// instructions sunk into the top of a large block, but it would be better to
|
|
|
|
// also sink them down before their first use in the block. This xform has to
|
|
|
|
// be careful not to *increase* register pressure though, e.g. sinking
|
|
|
|
// "x = y + z" down if it kills y and z would increase the live ranges of y
|
|
|
|
// and z and only shrink the live range of x.
|
|
|
|
|
|
|
|
bool BreakPHIEdge = false;
|
2016-07-01 08:11:48 +08:00
|
|
|
MachineBasicBlock *ParentBlock = MI.getParent();
|
2015-06-16 16:57:21 +08:00
|
|
|
MachineBasicBlock *SuccToSinkTo =
|
|
|
|
FindSuccToSinkTo(MI, ParentBlock, BreakPHIEdge, AllSuccessors);
|
2010-06-04 07:49:57 +08:00
|
|
|
|
2008-01-05 09:39:17 +08:00
|
|
|
// If there are no outputs, it must have side-effects.
|
2014-04-14 08:51:57 +08:00
|
|
|
if (!SuccToSinkTo)
|
2008-01-05 09:39:17 +08:00
|
|
|
return false;
|
2009-02-15 16:36:12 +08:00
|
|
|
|
2010-06-23 08:48:25 +08:00
|
|
|
// If the instruction to move defines a dead physical register which is live
|
|
|
|
// when leaving the basic block, don't move it because it could turn into a
|
|
|
|
// "zombie" define of that preg. E.g., EFLAGS. (<rdar://problem/8030636>)
|
2016-07-01 08:11:48 +08:00
|
|
|
for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
|
|
|
|
const MachineOperand &MO = MI.getOperand(I);
|
2010-06-26 04:48:10 +08:00
|
|
|
if (!MO.isReg()) continue;
|
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
if (Reg == 0 || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
|
|
|
|
if (SuccToSinkTo->isLiveIn(Reg))
|
2010-06-03 15:54:20 +08:00
|
|
|
return false;
|
2010-06-26 04:48:10 +08:00
|
|
|
}
|
2010-06-03 15:54:20 +08:00
|
|
|
|
2016-07-01 08:11:48 +08:00
|
|
|
DEBUG(dbgs() << "Sink instr " << MI << "\tinto block " << *SuccToSinkTo);
|
2010-06-03 07:04:26 +08:00
|
|
|
|
MachineSink: Fix and tweak critical-edge breaking heuristic.
Per original comment, the intention of this loop
is to go ahead and break the critical edge
(in order to sink this instruction) if there's
reason to believe doing so might "unblock" the
sinking of additional instructions that define
registers used by this one. The idea is that if
we have a few instructions to sink "together"
breaking the edge might be worthwhile.
This commit makes a few small changes
to help better realize this goal:
First, modify the loop to ignore registers
defined by this instruction. We don't
sink definitions of physical registers,
and sinking an SSA definition isn't
going to unblock an upstream instruction.
Second, ignore uses of physical registers.
Instructions that define physical registers are
rejected for sinking, and so moving this one
won't enable moving any defining instructions.
As an added bonus, while virtual register
use-def chains are generally small due
to SSA goodness, iteration over the uses
and definitions (used by hasOneNonDBGUse)
for physical registers like EFLAGS
can be rather expensive in practice.
(This is the original reason for looking at this)
Finally, to keep things simple continue
to only consider this trick for registers that
have a single use (via hasOneNonDBGUse),
but to avoid spuriously breaking critical edges
only do so if the definition resides
in the same MBB and therefore this one directly
blocks it from being sunk as well.
If sinking them together is meant to be,
let the iterative nature of this pass
sink the definition into this block first.
Update tests to accomodate this change,
add new testcase where sinking avoids pipeline stalls.
llvm-svn: 192608
2013-10-15 00:57:17 +08:00
|
|
|
// If the block has multiple predecessors, this is a critical edge.
|
|
|
|
// Decide if we can sink along it or need to break the edge.
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
if (SuccToSinkTo->pred_size() > 1) {
|
2010-04-14 03:06:14 +08:00
|
|
|
// We cannot sink a load across a critical edge - there may be stores in
|
|
|
|
// other code paths.
|
2010-08-20 01:33:11 +08:00
|
|
|
bool TryBreak = false;
|
2010-04-14 03:06:14 +08:00
|
|
|
bool store = true;
|
2016-07-01 08:11:48 +08:00
|
|
|
if (!MI.isSafeToMove(AA, store)) {
|
2010-08-20 07:33:02 +08:00
|
|
|
DEBUG(dbgs() << " *** NOTE: Won't sink load along critical edge.\n");
|
2010-08-20 01:33:11 +08:00
|
|
|
TryBreak = true;
|
2010-04-14 03:06:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// We don't want to sink across a critical edge if we don't dominate the
|
|
|
|
// successor. We could be introducing calculations to new code paths.
|
2010-08-20 01:33:11 +08:00
|
|
|
if (!TryBreak && !DT->dominates(ParentBlock, SuccToSinkTo)) {
|
2010-08-20 07:33:02 +08:00
|
|
|
DEBUG(dbgs() << " *** NOTE: Critical edge found\n");
|
2010-08-20 01:33:11 +08:00
|
|
|
TryBreak = true;
|
2010-04-14 03:06:14 +08:00
|
|
|
}
|
|
|
|
|
2010-04-16 07:41:02 +08:00
|
|
|
// Don't sink instructions into a loop.
|
2010-08-20 01:33:11 +08:00
|
|
|
if (!TryBreak && LI->isLoopHeader(SuccToSinkTo)) {
|
2010-08-20 07:33:02 +08:00
|
|
|
DEBUG(dbgs() << " *** NOTE: Loop header found\n");
|
2010-08-20 01:33:11 +08:00
|
|
|
TryBreak = true;
|
2010-04-16 07:41:02 +08:00
|
|
|
}
|
|
|
|
|
2010-04-14 03:06:14 +08:00
|
|
|
// Otherwise we are OK with sinking along a critical edge.
|
2010-08-20 01:33:11 +08:00
|
|
|
if (!TryBreak)
|
|
|
|
DEBUG(dbgs() << "Sinking along critical edge.\n");
|
|
|
|
else {
|
[MachineSink] Improve the compile time by preserving the dominance information
as long as possible.
** Context **
Each time the dominance information is modified, the dominator tree analysis
switches in a slow query mode. After a few queries without any modification on
the dominator tree, it performs an expensive update of its internal structure to
provide fast queries again.
** Problem **
Prior to this patch, the MachineSink pass was splitting the critical edges on
demand while relying heavy on the dominator tree information. In some cases,
this leads to pathological behavior where:
- We end up in the slow query mode right after splitting an edge.
- We update the dominance information.
- We break the dominance information again, thus ending up in the slow query
mode and so on.
** Proposed Solution **
To mitigate this effect, this patch postpones all the splitting of the edges at
the end of each iteration of the main loop.
The benefits are:
- The dominance information is valid for the life time of an iteration.
- This simplifies the code as we do not have to special treat instructions that
are sunk on critical edges. Indeed, the related block will be available
through the next iteration.
The downside is that when edges splitting is required, this incurs an additional
iteration of the main loop compared to the previous scheme.
** Performance **
Thanks to this patch, the motivating example compiles in 6+ minutes instead of
10+ minutes. No test case added as the motivating example as nothing special but
being huge!
I have measured only noise for both the compile time and the runtime on the llvm
test-suite + SPECs with Os and O3.
Note: The current implementation of MachineBasicBlock::SplitCriticalEdge also
uses the dominance information and therefore, hits this problem. A subsequent
patch will address that.
<rdar://problem/17894619>
llvm-svn: 215410
2014-08-12 07:52:01 +08:00
|
|
|
// Mark this edge as to be split.
|
|
|
|
// If the edge can actually be split, the next iteration of the main loop
|
|
|
|
// will sink MI in the newly created block.
|
|
|
|
bool Status =
|
|
|
|
PostponeSplitCriticalEdge(MI, ParentBlock, SuccToSinkTo, BreakPHIEdge);
|
|
|
|
if (!Status)
|
2010-09-18 06:28:18 +08:00
|
|
|
DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
|
[MachineSink] Improve the compile time by preserving the dominance information
as long as possible.
** Context **
Each time the dominance information is modified, the dominator tree analysis
switches in a slow query mode. After a few queries without any modification on
the dominator tree, it performs an expensive update of its internal structure to
provide fast queries again.
** Problem **
Prior to this patch, the MachineSink pass was splitting the critical edges on
demand while relying heavy on the dominator tree information. In some cases,
this leads to pathological behavior where:
- We end up in the slow query mode right after splitting an edge.
- We update the dominance information.
- We break the dominance information again, thus ending up in the slow query
mode and so on.
** Proposed Solution **
To mitigate this effect, this patch postpones all the splitting of the edges at
the end of each iteration of the main loop.
The benefits are:
- The dominance information is valid for the life time of an iteration.
- This simplifies the code as we do not have to special treat instructions that
are sunk on critical edges. Indeed, the related block will be available
through the next iteration.
The downside is that when edges splitting is required, this incurs an additional
iteration of the main loop compared to the previous scheme.
** Performance **
Thanks to this patch, the motivating example compiles in 6+ minutes instead of
10+ minutes. No test case added as the motivating example as nothing special but
being huge!
I have measured only noise for both the compile time and the runtime on the llvm
test-suite + SPECs with Os and O3.
Note: The current implementation of MachineBasicBlock::SplitCriticalEdge also
uses the dominance information and therefore, hits this problem. A subsequent
patch will address that.
<rdar://problem/17894619>
llvm-svn: 215410
2014-08-12 07:52:01 +08:00
|
|
|
"break critical edge\n");
|
|
|
|
// The instruction will not be sunk this time.
|
|
|
|
return false;
|
2010-08-20 01:33:11 +08:00
|
|
|
}
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
}
|
2010-06-04 07:49:57 +08:00
|
|
|
|
2010-09-21 03:12:55 +08:00
|
|
|
if (BreakPHIEdge) {
|
|
|
|
// BreakPHIEdge is true if all the uses are in the successor MBB being
|
|
|
|
// sunken into and they are all PHI nodes. In this case, machine-sink must
|
|
|
|
// break the critical edge first.
|
[MachineSink] Improve the compile time by preserving the dominance information
as long as possible.
** Context **
Each time the dominance information is modified, the dominator tree analysis
switches in a slow query mode. After a few queries without any modification on
the dominator tree, it performs an expensive update of its internal structure to
provide fast queries again.
** Problem **
Prior to this patch, the MachineSink pass was splitting the critical edges on
demand while relying heavy on the dominator tree information. In some cases,
this leads to pathological behavior where:
- We end up in the slow query mode right after splitting an edge.
- We update the dominance information.
- We break the dominance information again, thus ending up in the slow query
mode and so on.
** Proposed Solution **
To mitigate this effect, this patch postpones all the splitting of the edges at
the end of each iteration of the main loop.
The benefits are:
- The dominance information is valid for the life time of an iteration.
- This simplifies the code as we do not have to special treat instructions that
are sunk on critical edges. Indeed, the related block will be available
through the next iteration.
The downside is that when edges splitting is required, this incurs an additional
iteration of the main loop compared to the previous scheme.
** Performance **
Thanks to this patch, the motivating example compiles in 6+ minutes instead of
10+ minutes. No test case added as the motivating example as nothing special but
being huge!
I have measured only noise for both the compile time and the runtime on the llvm
test-suite + SPECs with Os and O3.
Note: The current implementation of MachineBasicBlock::SplitCriticalEdge also
uses the dominance information and therefore, hits this problem. A subsequent
patch will address that.
<rdar://problem/17894619>
llvm-svn: 215410
2014-08-12 07:52:01 +08:00
|
|
|
bool Status = PostponeSplitCriticalEdge(MI, ParentBlock,
|
|
|
|
SuccToSinkTo, BreakPHIEdge);
|
|
|
|
if (!Status)
|
2010-09-18 14:42:17 +08:00
|
|
|
DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
|
|
|
|
"break critical edge\n");
|
[MachineSink] Improve the compile time by preserving the dominance information
as long as possible.
** Context **
Each time the dominance information is modified, the dominator tree analysis
switches in a slow query mode. After a few queries without any modification on
the dominator tree, it performs an expensive update of its internal structure to
provide fast queries again.
** Problem **
Prior to this patch, the MachineSink pass was splitting the critical edges on
demand while relying heavy on the dominator tree information. In some cases,
this leads to pathological behavior where:
- We end up in the slow query mode right after splitting an edge.
- We update the dominance information.
- We break the dominance information again, thus ending up in the slow query
mode and so on.
** Proposed Solution **
To mitigate this effect, this patch postpones all the splitting of the edges at
the end of each iteration of the main loop.
The benefits are:
- The dominance information is valid for the life time of an iteration.
- This simplifies the code as we do not have to special treat instructions that
are sunk on critical edges. Indeed, the related block will be available
through the next iteration.
The downside is that when edges splitting is required, this incurs an additional
iteration of the main loop compared to the previous scheme.
** Performance **
Thanks to this patch, the motivating example compiles in 6+ minutes instead of
10+ minutes. No test case added as the motivating example as nothing special but
being huge!
I have measured only noise for both the compile time and the runtime on the llvm
test-suite + SPECs with Os and O3.
Note: The current implementation of MachineBasicBlock::SplitCriticalEdge also
uses the dominance information and therefore, hits this problem. A subsequent
patch will address that.
<rdar://problem/17894619>
llvm-svn: 215410
2014-08-12 07:52:01 +08:00
|
|
|
// The instruction will not be sunk this time.
|
|
|
|
return false;
|
2010-09-18 14:42:17 +08:00
|
|
|
}
|
|
|
|
|
2010-06-03 07:04:26 +08:00
|
|
|
// Determine where to insert into. Skip phi nodes.
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
MachineBasicBlock::iterator InsertPos = SuccToSinkTo->begin();
|
2010-09-18 14:42:17 +08:00
|
|
|
while (InsertPos != SuccToSinkTo->end() && InsertPos->isPHI())
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
++InsertPos;
|
2010-06-04 07:49:57 +08:00
|
|
|
|
2011-09-07 08:07:58 +08:00
|
|
|
// collect matching debug values.
|
|
|
|
SmallVector<MachineInstr *, 2> DbgValuesToSink;
|
|
|
|
collectDebugValues(MI, DbgValuesToSink);
|
|
|
|
|
2017-12-09 08:17:01 +08:00
|
|
|
// Merge or erase debug location to ensure consistent stepping in profilers
|
|
|
|
// and debuggers.
|
|
|
|
if (!SuccToSinkTo->empty() && InsertPos != SuccToSinkTo->end())
|
|
|
|
MI.setDebugLoc(DILocation::getMergedLocation(MI.getDebugLoc(),
|
|
|
|
InsertPos->getDebugLoc()));
|
|
|
|
else
|
|
|
|
MI.setDebugLoc(DebugLoc());
|
|
|
|
|
|
|
|
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
// Move the instruction.
|
|
|
|
SuccToSinkTo->splice(InsertPos, ParentBlock, MI,
|
|
|
|
++MachineBasicBlock::iterator(MI));
|
2010-05-14 04:34:42 +08:00
|
|
|
|
2017-12-09 08:17:01 +08:00
|
|
|
// Move previously adjacent debug value instructions to the insert position.
|
2013-07-03 13:11:49 +08:00
|
|
|
for (SmallVectorImpl<MachineInstr *>::iterator DBI = DbgValuesToSink.begin(),
|
2011-09-07 08:07:58 +08:00
|
|
|
DBE = DbgValuesToSink.end(); DBI != DBE; ++DBI) {
|
|
|
|
MachineInstr *DbgMI = *DBI;
|
|
|
|
SuccToSinkTo->splice(InsertPos, ParentBlock, DbgMI,
|
|
|
|
++MachineBasicBlock::iterator(DbgMI));
|
|
|
|
}
|
|
|
|
|
2014-09-04 10:07:36 +08:00
|
|
|
// Conservatively, clear any kill flags, since it's possible that they are no
|
|
|
|
// longer correct.
|
2015-05-09 01:54:32 +08:00
|
|
|
// Note that we have to clear the kill flags for any register this instruction
|
|
|
|
// uses as we may sink over another instruction which currently kills the
|
|
|
|
// used registers.
|
2016-07-01 08:11:48 +08:00
|
|
|
for (MachineOperand &MO : MI.operands()) {
|
2015-05-09 01:54:32 +08:00
|
|
|
if (MO.isReg() && MO.isUse())
|
2015-05-16 11:11:07 +08:00
|
|
|
RegsToClearKillFlags.set(MO.getReg()); // Remember to clear kill flags.
|
2015-05-09 01:54:32 +08:00
|
|
|
}
|
2010-05-14 04:34:42 +08:00
|
|
|
|
Add a really quick hack at a machine code sinking pass, enabled with --enable-sinking.
It is missing validity checks, so it is known broken. However, it is powerful enough
to compile this contrived code:
void test1(int C, double A, double B, double *P) {
double Tmp = A*A+B*B;
*P = C ? Tmp : A;
}
into:
_test1:
movsd 8(%esp), %xmm0
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movsd 16(%esp), %xmm1
mulsd %xmm1, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test1:
movsd 16(%esp), %xmm0
mulsd %xmm0, %xmm0
movsd 8(%esp), %xmm1
movapd %xmm1, %xmm2
mulsd %xmm2, %xmm2
addsd %xmm0, %xmm2
cmpl $0, 4(%esp)
je LBB1_2 # entry
LBB1_1: # entry
movapd %xmm2, %xmm1
LBB1_2: # entry
movl 24(%esp), %eax
movsd %xmm1, (%eax)
ret
woo.
llvm-svn: 45570
2008-01-04 15:36:53 +08:00
|
|
|
return true;
|
|
|
|
}
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-23 04:06:47 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// This pass is not intended to be a replacement or a complete alternative
|
|
|
|
// for the pre-ra machine sink pass. It is only designed to sink COPY
|
|
|
|
// instructions which should be handled after RA.
|
|
|
|
//
|
|
|
|
// This pass sinks COPY instructions into a successor block, if the COPY is not
|
|
|
|
// used in the current block and the COPY is live-in to a single successor
|
|
|
|
// (i.e., doesn't require the COPY to be duplicated). This avoids executing the
|
|
|
|
// copy on paths where their results aren't needed. This also exposes
|
|
|
|
// additional opportunites for dead copy elimination and shrink wrapping.
|
|
|
|
//
|
|
|
|
// These copies were either not handled by or are inserted after the MachineSink
|
|
|
|
// pass. As an example of the former case, the MachineSink pass cannot sink
|
|
|
|
// COPY instructions with allocatable source registers; for AArch64 these type
|
|
|
|
// of copy instructions are frequently used to move function parameters (PhyReg)
|
|
|
|
// into virtual registers in the entry block.
|
|
|
|
//
|
|
|
|
// For the machine IR below, this pass will sink %w19 in the entry into its
|
|
|
|
// successor (%bb.1) because %w19 is only live-in in %bb.1.
|
|
|
|
// %bb.0:
|
|
|
|
// %wzr = SUBSWri %w1, 1
|
|
|
|
// %w19 = COPY %w0
|
|
|
|
// Bcc 11, %bb.2
|
|
|
|
// %bb.1:
|
|
|
|
// Live Ins: %w19
|
|
|
|
// BL @fun
|
|
|
|
// %w0 = ADDWrr %w0, %w19
|
|
|
|
// RET %w0
|
|
|
|
// %bb.2:
|
|
|
|
// %w0 = COPY %wzr
|
|
|
|
// RET %w0
|
|
|
|
// As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
|
|
|
|
// able to see %bb.0 as a candidate.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
class PostRAMachineSinking : public MachineFunctionPass {
|
|
|
|
public:
|
|
|
|
bool runOnMachineFunction(MachineFunction &MF) override;
|
|
|
|
|
|
|
|
static char ID;
|
|
|
|
PostRAMachineSinking() : MachineFunctionPass(ID) {}
|
|
|
|
StringRef getPassName() const override { return "PostRA Machine Sink"; }
|
|
|
|
|
2018-03-29 03:56:26 +08:00
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
|
|
AU.setPreservesCFG();
|
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
}
|
|
|
|
|
[CodeGen]Add NoVRegs property on PostRASink and ShrinkWrap
Summary:
This change declare that PostRAMachineSinking and ShrinkWrap require NoVRegs
property, so now the MachineFunctionPass can enforce this check.
These passes are disabled in NVPTX & WebAssembly.
Reviewers: dschuff, jlebar, tra, jgravelle-google, MatzeB, sebpop, thegameg, mcrosier
Reviewed By: dschuff, thegameg
Subscribers: jholewinski, jfb, sbc100, aheejin, sunfish, llvm-commits
Differential Revision: https://reviews.llvm.org/D45183
llvm-svn: 329095
2018-04-04 02:17:34 +08:00
|
|
|
MachineFunctionProperties getRequiredProperties() const override {
|
|
|
|
return MachineFunctionProperties().set(
|
2018-04-13 22:23:09 +08:00
|
|
|
MachineFunctionProperties::Property::NoVRegs);
|
[CodeGen]Add NoVRegs property on PostRASink and ShrinkWrap
Summary:
This change declare that PostRAMachineSinking and ShrinkWrap require NoVRegs
property, so now the MachineFunctionPass can enforce this check.
These passes are disabled in NVPTX & WebAssembly.
Reviewers: dschuff, jlebar, tra, jgravelle-google, MatzeB, sebpop, thegameg, mcrosier
Reviewed By: dschuff, thegameg
Subscribers: jholewinski, jfb, sbc100, aheejin, sunfish, llvm-commits
Differential Revision: https://reviews.llvm.org/D45183
llvm-svn: 329095
2018-04-04 02:17:34 +08:00
|
|
|
}
|
|
|
|
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-23 04:06:47 +08:00
|
|
|
private:
|
[CodeGen] Use RegUnits to track register aliases (NFC)
Summary: Use RegUnits to track register aliases in PostRASink and AArch64LoadStoreOptimizer.
Reviewers: thegameg, mcrosier, gberry, qcolombet, sebpop, MatzeB, t.p.northover, javed.absar
Reviewed By: thegameg, sebpop
Subscribers: javed.absar, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D45695
llvm-svn: 331066
2018-04-28 02:44:37 +08:00
|
|
|
/// Track which register units have been modified and used.
|
|
|
|
LiveRegUnits ModifiedRegUnits, UsedRegUnits;
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-23 04:06:47 +08:00
|
|
|
|
|
|
|
/// Sink Copy instructions unused in the same block close to their uses in
|
|
|
|
/// successors.
|
|
|
|
bool tryToSinkCopy(MachineBasicBlock &BB, MachineFunction &MF,
|
|
|
|
const TargetRegisterInfo *TRI, const TargetInstrInfo *TII);
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
char PostRAMachineSinking::ID = 0;
|
|
|
|
char &llvm::PostRAMachineSinkingID = PostRAMachineSinking::ID;
|
|
|
|
|
|
|
|
INITIALIZE_PASS(PostRAMachineSinking, "postra-machine-sink",
|
|
|
|
"PostRA Machine Sink", false, false)
|
|
|
|
|
[CodeGen] Use RegUnits to track register aliases (NFC)
Summary: Use RegUnits to track register aliases in PostRASink and AArch64LoadStoreOptimizer.
Reviewers: thegameg, mcrosier, gberry, qcolombet, sebpop, MatzeB, t.p.northover, javed.absar
Reviewed By: thegameg, sebpop
Subscribers: javed.absar, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D45695
llvm-svn: 331066
2018-04-28 02:44:37 +08:00
|
|
|
static bool aliasWithRegsInLiveIn(MachineBasicBlock &MBB, unsigned Reg,
|
|
|
|
const TargetRegisterInfo *TRI) {
|
|
|
|
LiveRegUnits LiveInRegUnits(*TRI);
|
|
|
|
LiveInRegUnits.addLiveIns(MBB);
|
|
|
|
return !LiveInRegUnits.available(Reg);
|
2018-04-13 22:23:09 +08:00
|
|
|
}
|
|
|
|
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-23 04:06:47 +08:00
|
|
|
static MachineBasicBlock *
|
|
|
|
getSingleLiveInSuccBB(MachineBasicBlock &CurBB,
|
2018-04-28 03:59:20 +08:00
|
|
|
const SmallPtrSetImpl<MachineBasicBlock *> &SinkableBBs,
|
|
|
|
unsigned Reg, const TargetRegisterInfo *TRI) {
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-23 04:06:47 +08:00
|
|
|
// Try to find a single sinkable successor in which Reg is live-in.
|
|
|
|
MachineBasicBlock *BB = nullptr;
|
|
|
|
for (auto *SI : SinkableBBs) {
|
2018-04-28 03:59:20 +08:00
|
|
|
if (aliasWithRegsInLiveIn(*SI, Reg, TRI)) {
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-23 04:06:47 +08:00
|
|
|
// If BB is set here, Reg is live-in to at least two sinkable successors,
|
|
|
|
// so quit.
|
|
|
|
if (BB)
|
|
|
|
return nullptr;
|
|
|
|
BB = SI;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Reg is not live-in to any sinkable successors.
|
|
|
|
if (!BB)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
// Check if any register aliased with Reg is live-in in other successors.
|
|
|
|
for (auto *SI : CurBB.successors()) {
|
2018-04-28 03:59:20 +08:00
|
|
|
if (!SinkableBBs.count(SI) && aliasWithRegsInLiveIn(*SI, Reg, TRI))
|
2018-04-13 22:23:09 +08:00
|
|
|
return nullptr;
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-23 04:06:47 +08:00
|
|
|
}
|
|
|
|
return BB;
|
|
|
|
}
|
|
|
|
|
2018-04-28 03:59:20 +08:00
|
|
|
static MachineBasicBlock *
|
|
|
|
getSingleLiveInSuccBB(MachineBasicBlock &CurBB,
|
|
|
|
const SmallPtrSetImpl<MachineBasicBlock *> &SinkableBBs,
|
|
|
|
ArrayRef<unsigned> DefedRegsInCopy,
|
|
|
|
const TargetRegisterInfo *TRI) {
|
2018-04-13 22:23:09 +08:00
|
|
|
MachineBasicBlock *SingleBB = nullptr;
|
|
|
|
for (auto DefReg : DefedRegsInCopy) {
|
|
|
|
MachineBasicBlock *BB =
|
|
|
|
getSingleLiveInSuccBB(CurBB, SinkableBBs, DefReg, TRI);
|
|
|
|
if (!BB || (SingleBB && SingleBB != BB))
|
|
|
|
return nullptr;
|
|
|
|
SingleBB = BB;
|
|
|
|
}
|
|
|
|
return SingleBB;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void clearKillFlags(MachineInstr *MI, MachineBasicBlock &CurBB,
|
|
|
|
SmallVectorImpl<unsigned> &UsedOpsInCopy,
|
[CodeGen] Use RegUnits to track register aliases (NFC)
Summary: Use RegUnits to track register aliases in PostRASink and AArch64LoadStoreOptimizer.
Reviewers: thegameg, mcrosier, gberry, qcolombet, sebpop, MatzeB, t.p.northover, javed.absar
Reviewed By: thegameg, sebpop
Subscribers: javed.absar, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D45695
llvm-svn: 331066
2018-04-28 02:44:37 +08:00
|
|
|
LiveRegUnits &UsedRegUnits,
|
|
|
|
const TargetRegisterInfo *TRI) {
|
2018-04-13 22:23:09 +08:00
|
|
|
for (auto U : UsedOpsInCopy) {
|
|
|
|
MachineOperand &MO = MI->getOperand(U);
|
|
|
|
unsigned SrcReg = MO.getReg();
|
[CodeGen] Use RegUnits to track register aliases (NFC)
Summary: Use RegUnits to track register aliases in PostRASink and AArch64LoadStoreOptimizer.
Reviewers: thegameg, mcrosier, gberry, qcolombet, sebpop, MatzeB, t.p.northover, javed.absar
Reviewed By: thegameg, sebpop
Subscribers: javed.absar, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D45695
llvm-svn: 331066
2018-04-28 02:44:37 +08:00
|
|
|
if (!UsedRegUnits.available(SrcReg)) {
|
2018-04-13 22:23:09 +08:00
|
|
|
MachineBasicBlock::iterator NI = std::next(MI->getIterator());
|
|
|
|
for (MachineInstr &UI : make_range(NI, CurBB.end())) {
|
|
|
|
if (UI.killsRegister(SrcReg, TRI)) {
|
|
|
|
UI.clearRegisterKills(SrcReg, TRI);
|
|
|
|
MO.setIsKill(true);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void updateLiveIn(MachineInstr *MI, MachineBasicBlock *SuccBB,
|
|
|
|
SmallVectorImpl<unsigned> &UsedOpsInCopy,
|
|
|
|
SmallVectorImpl<unsigned> &DefedRegsInCopy) {
|
|
|
|
for (auto DefReg : DefedRegsInCopy)
|
|
|
|
SuccBB->removeLiveIn(DefReg);
|
|
|
|
for (auto U : UsedOpsInCopy) {
|
|
|
|
unsigned Reg = MI->getOperand(U).getReg();
|
|
|
|
if (!SuccBB->isLiveIn(Reg))
|
|
|
|
SuccBB->addLiveIn(Reg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool hasRegisterDependency(MachineInstr *MI,
|
|
|
|
SmallVectorImpl<unsigned> &UsedOpsInCopy,
|
|
|
|
SmallVectorImpl<unsigned> &DefedRegsInCopy,
|
[CodeGen] Use RegUnits to track register aliases (NFC)
Summary: Use RegUnits to track register aliases in PostRASink and AArch64LoadStoreOptimizer.
Reviewers: thegameg, mcrosier, gberry, qcolombet, sebpop, MatzeB, t.p.northover, javed.absar
Reviewed By: thegameg, sebpop
Subscribers: javed.absar, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D45695
llvm-svn: 331066
2018-04-28 02:44:37 +08:00
|
|
|
LiveRegUnits &ModifiedRegUnits,
|
|
|
|
LiveRegUnits &UsedRegUnits) {
|
2018-04-13 22:23:09 +08:00
|
|
|
bool HasRegDependency = false;
|
|
|
|
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
|
|
|
MachineOperand &MO = MI->getOperand(i);
|
|
|
|
if (!MO.isReg())
|
|
|
|
continue;
|
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
if (!Reg)
|
|
|
|
continue;
|
|
|
|
if (MO.isDef()) {
|
[CodeGen] Use RegUnits to track register aliases (NFC)
Summary: Use RegUnits to track register aliases in PostRASink and AArch64LoadStoreOptimizer.
Reviewers: thegameg, mcrosier, gberry, qcolombet, sebpop, MatzeB, t.p.northover, javed.absar
Reviewed By: thegameg, sebpop
Subscribers: javed.absar, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D45695
llvm-svn: 331066
2018-04-28 02:44:37 +08:00
|
|
|
if (!ModifiedRegUnits.available(Reg) || !UsedRegUnits.available(Reg)) {
|
2018-04-13 22:23:09 +08:00
|
|
|
HasRegDependency = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
DefedRegsInCopy.push_back(Reg);
|
|
|
|
|
|
|
|
// FIXME: instead of isUse(), readsReg() would be a better fix here,
|
|
|
|
// For example, we can ignore modifications in reg with undef. However,
|
|
|
|
// it's not perfectly clear if skipping the internal read is safe in all
|
|
|
|
// other targets.
|
|
|
|
} else if (MO.isUse()) {
|
[CodeGen] Use RegUnits to track register aliases (NFC)
Summary: Use RegUnits to track register aliases in PostRASink and AArch64LoadStoreOptimizer.
Reviewers: thegameg, mcrosier, gberry, qcolombet, sebpop, MatzeB, t.p.northover, javed.absar
Reviewed By: thegameg, sebpop
Subscribers: javed.absar, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D45695
llvm-svn: 331066
2018-04-28 02:44:37 +08:00
|
|
|
if (!ModifiedRegUnits.available(Reg)) {
|
2018-04-13 22:23:09 +08:00
|
|
|
HasRegDependency = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
UsedOpsInCopy.push_back(i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return HasRegDependency;
|
|
|
|
}
|
|
|
|
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-23 04:06:47 +08:00
|
|
|
bool PostRAMachineSinking::tryToSinkCopy(MachineBasicBlock &CurBB,
|
|
|
|
MachineFunction &MF,
|
|
|
|
const TargetRegisterInfo *TRI,
|
|
|
|
const TargetInstrInfo *TII) {
|
2018-04-28 03:59:20 +08:00
|
|
|
SmallPtrSet<MachineBasicBlock *, 2> SinkableBBs;
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-23 04:06:47 +08:00
|
|
|
// FIXME: For now, we sink only to a successor which has a single predecessor
|
|
|
|
// so that we can directly sink COPY instructions to the successor without
|
|
|
|
// adding any new block or branch instruction.
|
|
|
|
for (MachineBasicBlock *SI : CurBB.successors())
|
|
|
|
if (!SI->livein_empty() && SI->pred_size() == 1)
|
2018-04-28 03:59:20 +08:00
|
|
|
SinkableBBs.insert(SI);
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-23 04:06:47 +08:00
|
|
|
|
|
|
|
if (SinkableBBs.empty())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
bool Changed = false;
|
|
|
|
|
|
|
|
// Track which registers have been modified and used between the end of the
|
|
|
|
// block and the current instruction.
|
[CodeGen] Use RegUnits to track register aliases (NFC)
Summary: Use RegUnits to track register aliases in PostRASink and AArch64LoadStoreOptimizer.
Reviewers: thegameg, mcrosier, gberry, qcolombet, sebpop, MatzeB, t.p.northover, javed.absar
Reviewed By: thegameg, sebpop
Subscribers: javed.absar, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D45695
llvm-svn: 331066
2018-04-28 02:44:37 +08:00
|
|
|
ModifiedRegUnits.clear();
|
|
|
|
UsedRegUnits.clear();
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-23 04:06:47 +08:00
|
|
|
|
|
|
|
for (auto I = CurBB.rbegin(), E = CurBB.rend(); I != E;) {
|
|
|
|
MachineInstr *MI = &*I;
|
|
|
|
++I;
|
|
|
|
|
|
|
|
// Do not move any instruction across function call.
|
|
|
|
if (MI->isCall())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!MI->isCopy() || !MI->getOperand(0).isRenamable()) {
|
[CodeGen] Use RegUnits to track register aliases (NFC)
Summary: Use RegUnits to track register aliases in PostRASink and AArch64LoadStoreOptimizer.
Reviewers: thegameg, mcrosier, gberry, qcolombet, sebpop, MatzeB, t.p.northover, javed.absar
Reviewed By: thegameg, sebpop
Subscribers: javed.absar, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D45695
llvm-svn: 331066
2018-04-28 02:44:37 +08:00
|
|
|
LiveRegUnits::accumulateUsedDefed(*MI, ModifiedRegUnits, UsedRegUnits,
|
|
|
|
TRI);
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-23 04:06:47 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-04-13 22:23:09 +08:00
|
|
|
// Track the operand index for use in Copy.
|
|
|
|
SmallVector<unsigned, 2> UsedOpsInCopy;
|
|
|
|
// Track the register number defed in Copy.
|
|
|
|
SmallVector<unsigned, 2> DefedRegsInCopy;
|
|
|
|
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-23 04:06:47 +08:00
|
|
|
// Don't sink the COPY if it would violate a register dependency.
|
[CodeGen] Use RegUnits to track register aliases (NFC)
Summary: Use RegUnits to track register aliases in PostRASink and AArch64LoadStoreOptimizer.
Reviewers: thegameg, mcrosier, gberry, qcolombet, sebpop, MatzeB, t.p.northover, javed.absar
Reviewed By: thegameg, sebpop
Subscribers: javed.absar, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D45695
llvm-svn: 331066
2018-04-28 02:44:37 +08:00
|
|
|
if (hasRegisterDependency(MI, UsedOpsInCopy, DefedRegsInCopy,
|
|
|
|
ModifiedRegUnits, UsedRegUnits)) {
|
|
|
|
LiveRegUnits::accumulateUsedDefed(*MI, ModifiedRegUnits, UsedRegUnits,
|
|
|
|
TRI);
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-23 04:06:47 +08:00
|
|
|
continue;
|
|
|
|
}
|
2018-04-13 22:23:09 +08:00
|
|
|
assert((!UsedOpsInCopy.empty() && !DefedRegsInCopy.empty()) &&
|
|
|
|
"Unexpect SrcReg or DefReg");
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-23 04:06:47 +08:00
|
|
|
MachineBasicBlock *SuccBB =
|
2018-04-13 22:23:09 +08:00
|
|
|
getSingleLiveInSuccBB(CurBB, SinkableBBs, DefedRegsInCopy, TRI);
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-23 04:06:47 +08:00
|
|
|
// Don't sink if we cannot find a single sinkable successor in which Reg
|
|
|
|
// is live-in.
|
|
|
|
if (!SuccBB) {
|
[CodeGen] Use RegUnits to track register aliases (NFC)
Summary: Use RegUnits to track register aliases in PostRASink and AArch64LoadStoreOptimizer.
Reviewers: thegameg, mcrosier, gberry, qcolombet, sebpop, MatzeB, t.p.northover, javed.absar
Reviewed By: thegameg, sebpop
Subscribers: javed.absar, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D45695
llvm-svn: 331066
2018-04-28 02:44:37 +08:00
|
|
|
LiveRegUnits::accumulateUsedDefed(*MI, ModifiedRegUnits, UsedRegUnits,
|
|
|
|
TRI);
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-23 04:06:47 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
assert((SuccBB->pred_size() == 1 && *SuccBB->pred_begin() == &CurBB) &&
|
|
|
|
"Unexpected predecessor");
|
|
|
|
|
|
|
|
// Clear the kill flag if SrcReg is killed between MI and the end of the
|
|
|
|
// block.
|
[CodeGen] Use RegUnits to track register aliases (NFC)
Summary: Use RegUnits to track register aliases in PostRASink and AArch64LoadStoreOptimizer.
Reviewers: thegameg, mcrosier, gberry, qcolombet, sebpop, MatzeB, t.p.northover, javed.absar
Reviewed By: thegameg, sebpop
Subscribers: javed.absar, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D45695
llvm-svn: 331066
2018-04-28 02:44:37 +08:00
|
|
|
clearKillFlags(MI, CurBB, UsedOpsInCopy, UsedRegUnits, TRI);
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-23 04:06:47 +08:00
|
|
|
MachineBasicBlock::iterator InsertPos = SuccBB->getFirstNonPHI();
|
|
|
|
SuccBB->splice(InsertPos, &CurBB, MI);
|
2018-04-13 22:23:09 +08:00
|
|
|
updateLiveIn(MI, SuccBB, UsedOpsInCopy, DefedRegsInCopy);
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-23 04:06:47 +08:00
|
|
|
|
|
|
|
Changed = true;
|
|
|
|
++NumPostRACopySink;
|
|
|
|
}
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool PostRAMachineSinking::runOnMachineFunction(MachineFunction &MF) {
|
|
|
|
bool Changed = false;
|
|
|
|
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
|
|
|
|
const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
|
|
|
|
|
[CodeGen] Use RegUnits to track register aliases (NFC)
Summary: Use RegUnits to track register aliases in PostRASink and AArch64LoadStoreOptimizer.
Reviewers: thegameg, mcrosier, gberry, qcolombet, sebpop, MatzeB, t.p.northover, javed.absar
Reviewed By: thegameg, sebpop
Subscribers: javed.absar, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D45695
llvm-svn: 331066
2018-04-28 02:44:37 +08:00
|
|
|
ModifiedRegUnits.init(*TRI);
|
|
|
|
UsedRegUnits.init(*TRI);
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-23 04:06:47 +08:00
|
|
|
for (auto &BB : MF)
|
|
|
|
Changed |= tryToSinkCopy(BB, MF, TRI, TII);
|
|
|
|
|
|
|
|
return Changed;
|
|
|
|
}
|