2017-06-27 06:44:03 +08:00
|
|
|
//===- TargetPassConfig.cpp - Target independent code generation passes ---===//
|
2005-04-22 06:36:52 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2005-04-22 06:36:52 +08:00
|
|
|
//
|
2003-10-21 03:43:21 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2003-10-03 00:57:49 +08:00
|
|
|
//
|
|
|
|
// This file defines interfaces to access the target independent code
|
|
|
|
// generation passes provided by the LLVM backend.
|
|
|
|
//
|
|
|
|
//===---------------------------------------------------------------------===//
|
|
|
|
|
2016-05-10 11:21:59 +08:00
|
|
|
#include "llvm/CodeGen/TargetPassConfig.h"
|
2017-06-27 06:44:03 +08:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
|
|
|
#include "llvm/ADT/StringRef.h"
|
2015-08-06 15:33:15 +08:00
|
|
|
#include "llvm/Analysis/BasicAliasAnalysis.h"
|
2016-07-06 08:26:41 +08:00
|
|
|
#include "llvm/Analysis/CFLAndersAliasAnalysis.h"
|
|
|
|
#include "llvm/Analysis/CFLSteensAliasAnalysis.h"
|
2016-06-11 00:19:46 +08:00
|
|
|
#include "llvm/Analysis/CallGraphSCCPass.h"
|
2015-08-14 10:55:50 +08:00
|
|
|
#include "llvm/Analysis/ScopedNoAliasAA.h"
|
2017-06-06 08:26:13 +08:00
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
2015-08-14 11:33:48 +08:00
|
|
|
#include "llvm/Analysis/TypeBasedAliasAnalysis.h"
|
2019-04-15 12:53:46 +08:00
|
|
|
#include "llvm/CodeGen/CSEConfigBase.h"
|
2012-02-04 10:56:48 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
2017-06-27 06:44:03 +08:00
|
|
|
#include "llvm/CodeGen/MachinePassRegistry.h"
|
|
|
|
#include "llvm/CodeGen/Passes.h"
|
2012-02-04 10:56:48 +08:00
|
|
|
#include "llvm/CodeGen/RegAllocRegistry.h"
|
2014-01-12 19:10:32 +08:00
|
|
|
#include "llvm/IR/IRPrintingPasses.h"
|
2015-02-13 18:01:29 +08:00
|
|
|
#include "llvm/IR/LegacyPassManager.h"
|
2020-12-30 08:30:16 +08:00
|
|
|
#include "llvm/IR/PassInstrumentation.h"
|
2014-01-13 17:26:24 +08:00
|
|
|
#include "llvm/IR/Verifier.h"
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-14 05:15:01 +08:00
|
|
|
#include "llvm/InitializePasses.h"
|
2012-07-03 03:48:31 +08:00
|
|
|
#include "llvm/MC/MCAsmInfo.h"
|
2017-06-27 06:44:03 +08:00
|
|
|
#include "llvm/MC/MCTargetOptions.h"
|
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/CodeGen.h"
|
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Support/Compiler.h"
|
2012-02-04 10:56:48 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2021-05-19 07:08:38 +08:00
|
|
|
#include "llvm/Support/Discriminator.h"
|
2012-02-04 10:56:45 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2018-10-03 01:56:58 +08:00
|
|
|
#include "llvm/Support/SaveAndRestore.h"
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-14 05:15:01 +08:00
|
|
|
#include "llvm/Support/Threading.h"
|
2020-12-30 08:30:16 +08:00
|
|
|
#include "llvm/Target/CGPassBuilderOption.h"
|
2016-05-10 11:21:59 +08:00
|
|
|
#include "llvm/Target/TargetMachine.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Transforms/Scalar.h"
|
2018-03-29 01:44:36 +08:00
|
|
|
#include "llvm/Transforms/Utils.h"
|
2014-11-08 05:32:08 +08:00
|
|
|
#include "llvm/Transforms/Utils/SymbolRewriter.h"
|
2017-06-27 06:44:03 +08:00
|
|
|
#include <cassert>
|
|
|
|
#include <string>
|
2006-08-01 22:21:23 +08:00
|
|
|
|
2003-12-28 15:59:53 +08:00
|
|
|
using namespace llvm;
|
2003-11-12 06:41:34 +08:00
|
|
|
|
2019-08-24 03:59:23 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
EnableIPRA("enable-ipra", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Enable interprocedural register allocation "
|
|
|
|
"to reduce load/store at procedure calls."));
|
2016-12-08 08:16:08 +08:00
|
|
|
static cl::opt<bool> DisablePostRASched("disable-post-ra", cl::Hidden,
|
|
|
|
cl::desc("Disable Post Regalloc Scheduler"));
|
2012-02-04 10:56:48 +08:00
|
|
|
static cl::opt<bool> DisableBranchFold("disable-branch-fold", cl::Hidden,
|
|
|
|
cl::desc("Disable branch folding"));
|
|
|
|
static cl::opt<bool> DisableTailDuplicate("disable-tail-duplicate", cl::Hidden,
|
|
|
|
cl::desc("Disable tail duplication"));
|
|
|
|
static cl::opt<bool> DisableEarlyTailDup("disable-early-taildup", cl::Hidden,
|
|
|
|
cl::desc("Disable pre-register allocation tail duplication"));
|
2012-04-16 21:49:17 +08:00
|
|
|
static cl::opt<bool> DisableBlockPlacement("disable-block-placement",
|
2013-03-30 01:14:24 +08:00
|
|
|
cl::Hidden, cl::desc("Disable probability-driven block placement"));
|
2012-02-04 10:56:48 +08:00
|
|
|
static cl::opt<bool> EnableBlockPlacementStats("enable-block-placement-stats",
|
|
|
|
cl::Hidden, cl::desc("Collect probability-driven block placement stats"));
|
|
|
|
static cl::opt<bool> DisableSSC("disable-ssc", cl::Hidden,
|
|
|
|
cl::desc("Disable Stack Slot Coloring"));
|
|
|
|
static cl::opt<bool> DisableMachineDCE("disable-machine-dce", cl::Hidden,
|
|
|
|
cl::desc("Disable Machine Dead Code Elimination"));
|
2012-10-03 08:51:32 +08:00
|
|
|
static cl::opt<bool> DisableEarlyIfConversion("disable-early-ifcvt", cl::Hidden,
|
|
|
|
cl::desc("Disable Early If-conversion"));
|
2012-02-04 10:56:48 +08:00
|
|
|
static cl::opt<bool> DisableMachineLICM("disable-machine-licm", cl::Hidden,
|
|
|
|
cl::desc("Disable Machine LICM"));
|
|
|
|
static cl::opt<bool> DisableMachineCSE("disable-machine-cse", cl::Hidden,
|
|
|
|
cl::desc("Disable Machine Common Subexpression Elimination"));
|
[ShrinkWrap] Add (a simplified version) of shrink-wrapping.
This patch introduces a new pass that computes the safe point to insert the
prologue and epilogue of the function.
The interest is to find safe points that are cheaper than the entry and exits
blocks.
As an example and to avoid regressions to be introduce, this patch also
implements the required bits to enable the shrink-wrapping pass for AArch64.
** Context **
Currently we insert the prologue and epilogue of the method/function in the
entry and exits blocks. Although this is correct, we can do a better job when
those are not immediately required and insert them at less frequently executed
places.
The job of the shrink-wrapping pass is to identify such places.
** Motivating example **
Let us consider the following function that perform a call only in one branch of
a if:
define i32 @f(i32 %a, i32 %b) {
%tmp = alloca i32, align 4
%tmp2 = icmp slt i32 %a, %b
br i1 %tmp2, label %true, label %false
true:
store i32 %a, i32* %tmp, align 4
%tmp4 = call i32 @doSomething(i32 0, i32* %tmp)
br label %false
false:
%tmp.0 = phi i32 [ %tmp4, %true ], [ %a, %0 ]
ret i32 %tmp.0
}
On AArch64 this code generates (removing the cfi directives to ease
readabilities):
_f: ; @f
; BB#0:
stp x29, x30, [sp, #-16]!
mov x29, sp
sub sp, sp, #16 ; =16
cmp w0, w1
b.ge LBB0_2
; BB#1: ; %true
stur w0, [x29, #-4]
sub x1, x29, #4 ; =4
mov w0, wzr
bl _doSomething
LBB0_2: ; %false
mov sp, x29
ldp x29, x30, [sp], #16
ret
With shrink-wrapping we could generate:
_f: ; @f
; BB#0:
cmp w0, w1
b.ge LBB0_2
; BB#1: ; %true
stp x29, x30, [sp, #-16]!
mov x29, sp
sub sp, sp, #16 ; =16
stur w0, [x29, #-4]
sub x1, x29, #4 ; =4
mov w0, wzr
bl _doSomething
add sp, x29, #16 ; =16
ldp x29, x30, [sp], #16
LBB0_2: ; %false
ret
Therefore, we would pay the overhead of setting up/destroying the frame only if
we actually do the call.
** Proposed Solution **
This patch introduces a new machine pass that perform the shrink-wrapping
analysis (See the comments at the beginning of ShrinkWrap.cpp for more details).
It then stores the safe save and restore point into the MachineFrameInfo
attached to the MachineFunction.
This information is then used by the PrologEpilogInserter (PEI) to place the
related code at the right place. This pass runs right before the PEI.
Unlike the original paper of Chow from PLDI’88, this implementation of
shrink-wrapping does not use expensive data-flow analysis and does not need hack
to properly avoid frequently executed point. Instead, it relies on dominance and
loop properties.
The pass is off by default and each target can opt-in by setting the
EnableShrinkWrap boolean to true in their derived class of TargetPassConfig.
This setting can also be overwritten on the command line by using
-enable-shrink-wrap.
Before you try out the pass for your target, make sure you properly fix your
emitProlog/emitEpilog/adjustForXXX method to cope with basic blocks that are not
necessarily the entry block.
** Design Decisions **
1. ShrinkWrap is its own pass right now. It could frankly be merged into PEI but
for debugging and clarity I thought it was best to have its own file.
2. Right now, we only support one save point and one restore point. At some
point we can expand this to several save point and restore point, the impacted
component would then be:
- The pass itself: New algorithm needed.
- MachineFrameInfo: Hold a list or set of Save/Restore point instead of one
pointer.
- PEI: Should loop over the save point and restore point.
Anyhow, at least for this first iteration, I do not believe this is interesting
to support the complex cases. We should revisit that when we motivating
examples.
Differential Revision: http://reviews.llvm.org/D9210
<rdar://problem/3201744>
llvm-svn: 236507
2015-05-06 01:38:16 +08:00
|
|
|
static cl::opt<cl::boolOrDefault> OptimizeRegAlloc(
|
|
|
|
"optimize-regalloc", cl::Hidden,
|
2012-02-10 12:10:36 +08:00
|
|
|
cl::desc("Enable optimized register allocation compilation path."));
|
2012-02-04 10:56:48 +08:00
|
|
|
static cl::opt<bool> DisablePostRAMachineLICM("disable-postra-machine-licm",
|
|
|
|
cl::Hidden,
|
|
|
|
cl::desc("Disable Machine LICM"));
|
|
|
|
static cl::opt<bool> DisableMachineSink("disable-machine-sink", cl::Hidden,
|
|
|
|
cl::desc("Disable Machine Sinking"));
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-23 04:06:47 +08:00
|
|
|
static cl::opt<bool> DisablePostRAMachineSink("disable-postra-machine-sink",
|
|
|
|
cl::Hidden,
|
|
|
|
cl::desc("Disable PostRA Machine Sinking"));
|
2012-02-04 10:56:48 +08:00
|
|
|
static cl::opt<bool> DisableLSR("disable-lsr", cl::Hidden,
|
|
|
|
cl::desc("Disable Loop Strength Reduction Pass"));
|
2014-01-25 10:02:55 +08:00
|
|
|
static cl::opt<bool> DisableConstantHoisting("disable-constant-hoisting",
|
|
|
|
cl::Hidden, cl::desc("Disable ConstantHoisting"));
|
2012-02-04 10:56:48 +08:00
|
|
|
static cl::opt<bool> DisableCGP("disable-cgp", cl::Hidden,
|
|
|
|
cl::desc("Disable Codegen Prepare"));
|
|
|
|
static cl::opt<bool> DisableCopyProp("disable-copyprop", cl::Hidden,
|
2012-02-21 07:28:17 +08:00
|
|
|
cl::desc("Disable Copy Propagation pass"));
|
2014-07-23 21:33:00 +08:00
|
|
|
static cl::opt<bool> DisablePartialLibcallInlining("disable-partial-libcall-inlining",
|
|
|
|
cl::Hidden, cl::desc("Disable Partial Libcall Inlining"));
|
2015-06-16 02:44:27 +08:00
|
|
|
static cl::opt<bool> EnableImplicitNullChecks(
|
|
|
|
"enable-implicit-null-checks",
|
|
|
|
cl::desc("Fold null checks into faulting memory operations"),
|
2017-12-01 08:53:10 +08:00
|
|
|
cl::init(false), cl::Hidden);
|
2019-09-10 18:39:09 +08:00
|
|
|
static cl::opt<bool> DisableMergeICmps("disable-mergeicmps",
|
|
|
|
cl::desc("Disable MergeICmps Pass"),
|
|
|
|
cl::init(false), cl::Hidden);
|
2012-02-04 10:56:48 +08:00
|
|
|
static cl::opt<bool> PrintLSR("print-lsr-output", cl::Hidden,
|
|
|
|
cl::desc("Print LLVM IR produced by the loop-reduce pass"));
|
|
|
|
static cl::opt<bool> PrintISelInput("print-isel-input", cl::Hidden,
|
|
|
|
cl::desc("Print LLVM IR input to isel pass"));
|
|
|
|
static cl::opt<bool> PrintGCInfo("print-gc", cl::Hidden,
|
|
|
|
cl::desc("Dump garbage collector data"));
|
2018-10-04 00:29:24 +08:00
|
|
|
static cl::opt<cl::boolOrDefault>
|
|
|
|
VerifyMachineCode("verify-machineinstrs", cl::Hidden,
|
|
|
|
cl::desc("Verify generated machine code"),
|
|
|
|
cl::ZeroOrMore);
|
Add -debugify-and-strip-all to add debug info before a pass and remove it after
Summary:
This allows us to test each backend pass under the presence
of debug info using pre-existing tests. The tests should not
fail as a result of this so long as it's true that debug info
does not affect CodeGen.
In practice, a few tests are sensitive to this:
* Tests that check the pass structure (e.g. O0-pipeline.ll)
* Tests that check --debug output. Specifically instruction
dumps containing MMO's (e.g. prelegalizercombiner-extends.ll)
* Tests that contain debugify metadata as mir-strip-debug will
remove it (e.g. fastisel-debugvalue-undef.ll)
* Tests with partial debug info (e.g.
patchable-function-entry-empty.mir had debug info but no
!llvm.dbg.cu)
* Tests that check optimization remarks overly strictly (e.g.
prologue-epilogue-remarks.mir)
* Tests that would inject the pass in an unsafe region (e.g.
seqpairspill.mir would inject between register alloc and
virt reg rewriter)
In all cases, the checks can either be updated or
--debugify-and-strip-all-safe=0 can be used to avoid being
affected by something like llvm-lit -Dllc='llc --debugify-and-strip-all-safe'
I tested this without the lost debug locations verifier to
confirm that AArch64 behaviour is unaffected (with the fixes
in this patch) and with it to confirm it finds the problems
without the additional RUN lines we had before.
Depends on D77886, D77887, D77747
Reviewers: aprantl, vsk, bogner
Subscribers: qcolombet, kristof.beyls, hiraditya, danielkiss, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D77888
2020-04-09 04:48:40 +08:00
|
|
|
static cl::opt<cl::boolOrDefault> DebugifyAndStripAll(
|
|
|
|
"debugify-and-strip-all-safe", cl::Hidden,
|
|
|
|
cl::desc(
|
|
|
|
"Debugify MIR before and Strip debug after "
|
|
|
|
"each pass except those known to be unsafe when debug info is present"),
|
|
|
|
cl::ZeroOrMore);
|
2020-12-17 12:23:29 +08:00
|
|
|
static cl::opt<cl::boolOrDefault> DebugifyCheckAndStripAll(
|
|
|
|
"debugify-check-and-strip-all-safe", cl::Hidden,
|
|
|
|
cl::desc(
|
|
|
|
"Debugify MIR before, by checking and stripping the debug info after, "
|
|
|
|
"each pass except those known to be unsafe when debug info is present"),
|
|
|
|
cl::ZeroOrMore);
|
2018-06-30 00:12:45 +08:00
|
|
|
// Enable or disable the MachineOutliner.
|
|
|
|
static cl::opt<RunOutliner> EnableMachineOutliner(
|
|
|
|
"enable-machine-outliner", cl::desc("Enable the machine outliner"),
|
2020-12-30 08:30:16 +08:00
|
|
|
cl::Hidden, cl::ValueOptional, cl::init(RunOutliner::TargetDefault),
|
|
|
|
cl::values(clEnumValN(RunOutliner::AlwaysOutline, "always",
|
2018-06-30 00:12:45 +08:00
|
|
|
"Run on all functions guaranteed to be beneficial"),
|
2020-12-30 08:30:16 +08:00
|
|
|
clEnumValN(RunOutliner::NeverOutline, "never",
|
|
|
|
"Disable all outlining"),
|
2018-06-30 00:12:45 +08:00
|
|
|
// Sentinel value for unspecified option.
|
2020-12-30 08:30:16 +08:00
|
|
|
clEnumValN(RunOutliner::AlwaysOutline, "", "")));
|
2017-06-06 08:26:13 +08:00
|
|
|
// Enable or disable FastISel. Both options are needed, because
|
|
|
|
// FastISel is enabled by default with -fast, and we wish to be
|
|
|
|
// able to enable or disable fast-isel independently from -O0.
|
|
|
|
static cl::opt<cl::boolOrDefault>
|
|
|
|
EnableFastISelOption("fast-isel", cl::Hidden,
|
|
|
|
cl::desc("Enable the \"fast\" instruction selector"));
|
|
|
|
|
2018-01-18 06:34:21 +08:00
|
|
|
static cl::opt<cl::boolOrDefault> EnableGlobalISelOption(
|
|
|
|
"global-isel", cl::Hidden,
|
|
|
|
cl::desc("Enable the \"global\" instruction selector"));
|
2015-02-04 08:02:59 +08:00
|
|
|
|
2020-07-21 01:09:41 +08:00
|
|
|
// FIXME: remove this after switching to NPM or GlobalISel, whichever gets there
|
|
|
|
// first...
|
|
|
|
static cl::opt<bool>
|
|
|
|
PrintAfterISel("print-after-isel", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Print machine instrs after ISel"));
|
2012-02-04 10:56:48 +08:00
|
|
|
|
2018-11-29 20:56:32 +08:00
|
|
|
static cl::opt<GlobalISelAbortMode> EnableGlobalISelAbort(
|
2016-08-27 06:32:59 +08:00
|
|
|
"global-isel-abort", cl::Hidden,
|
|
|
|
cl::desc("Enable abort calls when \"global\" instruction selection "
|
2018-11-29 20:56:32 +08:00
|
|
|
"fails to lower/select an instruction"),
|
|
|
|
cl::values(
|
|
|
|
clEnumValN(GlobalISelAbortMode::Disable, "0", "Disable the abort"),
|
|
|
|
clEnumValN(GlobalISelAbortMode::Enable, "1", "Enable the abort"),
|
|
|
|
clEnumValN(GlobalISelAbortMode::DisableWithDiag, "2",
|
|
|
|
"Disable the abort but emit a diagnostic on failure")));
|
2016-08-27 06:32:59 +08:00
|
|
|
|
2021-05-19 07:08:38 +08:00
|
|
|
// An option that disables inserting FS-AFDO discriminators before emit.
|
|
|
|
// This is mainly for debugging and tuning purpose.
|
|
|
|
static cl::opt<bool>
|
|
|
|
FSNoFinalDiscrim("fs-no-final-discrim", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Do not insert FS-AFDO discriminators before "
|
|
|
|
"emit."));
|
|
|
|
|
2013-12-29 05:56:51 +08:00
|
|
|
// Temporary option to allow experimenting with MachineScheduler as a post-RA
|
|
|
|
// scheduler. Targets can "properly" enable this with
|
2015-12-10 17:10:07 +08:00
|
|
|
// substitutePass(&PostRASchedulerID, &PostMachineSchedulerID).
|
|
|
|
// Targets can return true in targetSchedulesPostRAScheduling() and
|
|
|
|
// insert a PostRA scheduling pass wherever it wants.
|
2019-08-24 03:59:23 +08:00
|
|
|
static cl::opt<bool> MISchedPostRA(
|
|
|
|
"misched-postra", cl::Hidden,
|
|
|
|
cl::desc(
|
|
|
|
"Run MachineScheduler post regalloc (independent of preRA sched)"));
|
2013-12-29 05:56:51 +08:00
|
|
|
|
2013-02-10 14:42:34 +08:00
|
|
|
// Experimental option to run live interval analysis early.
|
2012-08-04 06:12:54 +08:00
|
|
|
static cl::opt<bool> EarlyLiveIntervals("early-live-intervals", cl::Hidden,
|
|
|
|
cl::desc("Run live interval analysis earlier in the pipeline"));
|
|
|
|
|
2016-07-06 08:26:41 +08:00
|
|
|
// Experimental option to use CFL-AA in codegen
|
|
|
|
static cl::opt<CFLAAType> UseCFLAA(
|
|
|
|
"use-cfl-aa-in-codegen", cl::init(CFLAAType::None), cl::Hidden,
|
|
|
|
cl::desc("Enable the new, experimental CFL alias analysis in CodeGen"),
|
|
|
|
cl::values(clEnumValN(CFLAAType::None, "none", "Disable CFL-AA"),
|
|
|
|
clEnumValN(CFLAAType::Steensgaard, "steens",
|
|
|
|
"Enable unification-based CFL-AA"),
|
|
|
|
clEnumValN(CFLAAType::Andersen, "anders",
|
|
|
|
"Enable inclusion-based CFL-AA"),
|
2018-07-31 03:41:25 +08:00
|
|
|
clEnumValN(CFLAAType::Both, "both",
|
2016-10-09 03:41:06 +08:00
|
|
|
"Enable both variants of CFL-AA")));
|
2014-09-03 06:12:54 +08:00
|
|
|
|
2017-08-01 02:24:07 +08:00
|
|
|
/// Option names for limiting the codegen pipeline.
|
|
|
|
/// Those are used in error reporting and we didn't want
|
|
|
|
/// to duplicate their names all over the place.
|
2019-11-27 03:20:57 +08:00
|
|
|
static const char StartAfterOptName[] = "start-after";
|
|
|
|
static const char StartBeforeOptName[] = "start-before";
|
|
|
|
static const char StopAfterOptName[] = "stop-after";
|
|
|
|
static const char StopBeforeOptName[] = "stop-before";
|
2017-08-01 02:24:07 +08:00
|
|
|
|
|
|
|
static cl::opt<std::string>
|
|
|
|
StartAfterOpt(StringRef(StartAfterOptName),
|
|
|
|
cl::desc("Resume compilation after a specific pass"),
|
2017-12-01 08:53:10 +08:00
|
|
|
cl::value_desc("pass-name"), cl::init(""), cl::Hidden);
|
2017-08-01 02:24:07 +08:00
|
|
|
|
|
|
|
static cl::opt<std::string>
|
|
|
|
StartBeforeOpt(StringRef(StartBeforeOptName),
|
|
|
|
cl::desc("Resume compilation before a specific pass"),
|
2017-12-01 08:53:10 +08:00
|
|
|
cl::value_desc("pass-name"), cl::init(""), cl::Hidden);
|
2017-08-01 02:24:07 +08:00
|
|
|
|
|
|
|
static cl::opt<std::string>
|
|
|
|
StopAfterOpt(StringRef(StopAfterOptName),
|
|
|
|
cl::desc("Stop compilation after a specific pass"),
|
2017-12-01 08:53:10 +08:00
|
|
|
cl::value_desc("pass-name"), cl::init(""), cl::Hidden);
|
2017-08-01 02:24:07 +08:00
|
|
|
|
|
|
|
static cl::opt<std::string>
|
|
|
|
StopBeforeOpt(StringRef(StopBeforeOptName),
|
|
|
|
cl::desc("Stop compilation before a specific pass"),
|
2017-12-01 08:53:10 +08:00
|
|
|
cl::value_desc("pass-name"), cl::init(""), cl::Hidden);
|
2017-08-01 02:24:07 +08:00
|
|
|
|
2020-08-06 06:34:31 +08:00
|
|
|
/// Enable the machine function splitter pass.
|
|
|
|
static cl::opt<bool> EnableMachineFunctionSplitter(
|
|
|
|
"enable-split-machine-functions", cl::Hidden,
|
|
|
|
cl::desc("Split out cold blocks from machine functions based on profile "
|
|
|
|
"information."));
|
|
|
|
|
2020-10-08 15:17:02 +08:00
|
|
|
/// Disable the expand reductions pass for testing.
|
|
|
|
static cl::opt<bool> DisableExpandReductions(
|
|
|
|
"disable-expand-reductions", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Disable the expand reduction intrinsics pass from running"));
|
|
|
|
|
2012-02-15 11:21:51 +08:00
|
|
|
/// Allow standard passes to be disabled by command line options. This supports
|
|
|
|
/// simple binary flags that either suppress the pass or do nothing.
|
|
|
|
/// i.e. -disable-mypass=false has no effect.
|
|
|
|
/// These should be converted to boolOrDefault in order to use applyOverride.
|
2013-04-10 09:06:56 +08:00
|
|
|
static IdentifyingPassPtr applyDisable(IdentifyingPassPtr PassID,
|
|
|
|
bool Override) {
|
2012-02-15 11:21:51 +08:00
|
|
|
if (Override)
|
2013-04-10 09:06:56 +08:00
|
|
|
return IdentifyingPassPtr();
|
2012-07-03 03:48:37 +08:00
|
|
|
return PassID;
|
2012-02-15 11:21:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Allow standard passes to be disabled by the command line, regardless of who
|
|
|
|
/// is adding the pass.
|
|
|
|
///
|
|
|
|
/// StandardID is the pass identified in the standard pass pipeline and provided
|
|
|
|
/// to addPass(). It may be a target-specific ID in the case that the target
|
|
|
|
/// directly adds its own pass, but in that case we harmlessly fall through.
|
|
|
|
///
|
|
|
|
/// TargetID is the pass that the target has configured to override StandardID.
|
|
|
|
///
|
|
|
|
/// StandardID may be a pseudo ID. In that case TargetID is the name of the real
|
|
|
|
/// pass to run. This allows multiple options to control a single pass depending
|
|
|
|
/// on where in the pipeline that pass is added.
|
2013-04-10 09:06:56 +08:00
|
|
|
static IdentifyingPassPtr overridePass(AnalysisID StandardID,
|
|
|
|
IdentifyingPassPtr TargetID) {
|
2012-02-15 11:21:51 +08:00
|
|
|
if (StandardID == &PostRASchedulerID)
|
2016-12-08 08:16:08 +08:00
|
|
|
return applyDisable(TargetID, DisablePostRASched);
|
2012-02-15 11:21:51 +08:00
|
|
|
|
|
|
|
if (StandardID == &BranchFolderPassID)
|
|
|
|
return applyDisable(TargetID, DisableBranchFold);
|
|
|
|
|
|
|
|
if (StandardID == &TailDuplicateID)
|
|
|
|
return applyDisable(TargetID, DisableTailDuplicate);
|
|
|
|
|
2018-01-19 14:08:17 +08:00
|
|
|
if (StandardID == &EarlyTailDuplicateID)
|
2012-02-15 11:21:51 +08:00
|
|
|
return applyDisable(TargetID, DisableEarlyTailDup);
|
|
|
|
|
|
|
|
if (StandardID == &MachineBlockPlacementID)
|
2013-03-30 01:14:24 +08:00
|
|
|
return applyDisable(TargetID, DisableBlockPlacement);
|
2012-02-15 11:21:51 +08:00
|
|
|
|
|
|
|
if (StandardID == &StackSlotColoringID)
|
|
|
|
return applyDisable(TargetID, DisableSSC);
|
|
|
|
|
|
|
|
if (StandardID == &DeadMachineInstructionElimID)
|
|
|
|
return applyDisable(TargetID, DisableMachineDCE);
|
|
|
|
|
2012-07-04 08:09:54 +08:00
|
|
|
if (StandardID == &EarlyIfConverterID)
|
2012-10-03 08:51:32 +08:00
|
|
|
return applyDisable(TargetID, DisableEarlyIfConversion);
|
2012-07-04 08:09:54 +08:00
|
|
|
|
2018-01-19 14:46:10 +08:00
|
|
|
if (StandardID == &EarlyMachineLICMID)
|
2012-02-15 11:21:51 +08:00
|
|
|
return applyDisable(TargetID, DisableMachineLICM);
|
|
|
|
|
|
|
|
if (StandardID == &MachineCSEID)
|
|
|
|
return applyDisable(TargetID, DisableMachineCSE);
|
|
|
|
|
2018-01-19 14:46:10 +08:00
|
|
|
if (StandardID == &MachineLICMID)
|
2012-02-15 11:21:51 +08:00
|
|
|
return applyDisable(TargetID, DisablePostRAMachineLICM);
|
|
|
|
|
|
|
|
if (StandardID == &MachineSinkingID)
|
|
|
|
return applyDisable(TargetID, DisableMachineSink);
|
|
|
|
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-23 04:06:47 +08:00
|
|
|
if (StandardID == &PostRAMachineSinkingID)
|
|
|
|
return applyDisable(TargetID, DisablePostRAMachineSink);
|
|
|
|
|
2012-02-15 11:21:51 +08:00
|
|
|
if (StandardID == &MachineCopyPropagationID)
|
|
|
|
return applyDisable(TargetID, DisableCopyProp);
|
|
|
|
|
|
|
|
return TargetID;
|
|
|
|
}
|
|
|
|
|
2012-02-04 10:56:45 +08:00
|
|
|
//===---------------------------------------------------------------------===//
|
|
|
|
/// TargetPassConfig
|
|
|
|
//===---------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
INITIALIZE_PASS(TargetPassConfig, "targetpassconfig",
|
|
|
|
"Target Pass Configuration", false, false)
|
|
|
|
char TargetPassConfig::ID = 0;
|
|
|
|
|
2015-10-08 08:36:22 +08:00
|
|
|
namespace {
|
2017-06-27 06:44:03 +08:00
|
|
|
|
2015-10-08 08:36:22 +08:00
|
|
|
struct InsertedPass {
|
|
|
|
AnalysisID TargetPassID;
|
|
|
|
IdentifyingPassPtr InsertedPassID;
|
|
|
|
bool VerifyAfter;
|
|
|
|
|
|
|
|
InsertedPass(AnalysisID TargetPassID, IdentifyingPassPtr InsertedPassID,
|
2020-07-21 01:09:41 +08:00
|
|
|
bool VerifyAfter)
|
2015-10-08 08:36:22 +08:00
|
|
|
: TargetPassID(TargetPassID), InsertedPassID(InsertedPassID),
|
2020-07-21 01:09:41 +08:00
|
|
|
VerifyAfter(VerifyAfter) {}
|
2015-10-08 08:36:22 +08:00
|
|
|
|
|
|
|
Pass *getInsertedPass() const {
|
|
|
|
assert(InsertedPassID.isValid() && "Illegal Pass ID!");
|
|
|
|
if (InsertedPassID.isInstance())
|
|
|
|
return InsertedPassID.getInstance();
|
|
|
|
Pass *NP = Pass::createPass(InsertedPassID.getID());
|
|
|
|
assert(NP && "Pass ID not registered");
|
|
|
|
return NP;
|
|
|
|
}
|
|
|
|
};
|
2017-06-27 06:44:03 +08:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
2015-10-08 08:36:22 +08:00
|
|
|
|
2012-02-15 11:21:47 +08:00
|
|
|
namespace llvm {
|
2017-06-27 06:44:03 +08:00
|
|
|
|
2021-05-19 07:08:38 +08:00
|
|
|
extern cl::opt<bool> EnableFSDiscriminator;
|
|
|
|
|
2012-02-15 11:21:47 +08:00
|
|
|
class PassConfigImpl {
|
|
|
|
public:
|
|
|
|
// List of passes explicitly substituted by this target. Normally this is
|
|
|
|
// empty, but it is a convenient way to suppress or replace specific passes
|
|
|
|
// that are part of a standard pass pipeline without overridding the entire
|
|
|
|
// pipeline. This mechanism allows target options to inherit a standard pass's
|
|
|
|
// user interface. For example, a target may disable a standard pass by
|
2012-07-03 03:48:37 +08:00
|
|
|
// default by substituting a pass ID of zero, and the user may still enable
|
|
|
|
// that standard pass with an explicit command line option.
|
2013-04-10 09:06:56 +08:00
|
|
|
DenseMap<AnalysisID,IdentifyingPassPtr> TargetPasses;
|
2012-05-30 08:17:12 +08:00
|
|
|
|
|
|
|
/// Store the pairs of <AnalysisID, AnalysisID> of which the second pass
|
|
|
|
/// is inserted after each instance of the first one.
|
2015-10-08 08:36:22 +08:00
|
|
|
SmallVector<InsertedPass, 4> InsertedPasses;
|
2012-02-15 11:21:47 +08:00
|
|
|
};
|
2017-06-27 06:44:03 +08:00
|
|
|
|
|
|
|
} // end namespace llvm
|
2012-02-15 11:21:47 +08:00
|
|
|
|
2012-02-04 10:56:45 +08:00
|
|
|
// Out of line virtual method.
|
2012-02-15 11:21:47 +08:00
|
|
|
TargetPassConfig::~TargetPassConfig() {
|
|
|
|
delete Impl;
|
|
|
|
}
|
2012-02-04 10:56:45 +08:00
|
|
|
|
2017-08-01 02:24:07 +08:00
|
|
|
static const PassInfo *getPassInfo(StringRef PassName) {
|
|
|
|
if (PassName.empty())
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
const PassRegistry &PR = *PassRegistry::getPassRegistry();
|
|
|
|
const PassInfo *PI = PR.getPassInfo(PassName);
|
|
|
|
if (!PI)
|
|
|
|
report_fatal_error(Twine('\"') + Twine(PassName) +
|
|
|
|
Twine("\" pass is not registered."));
|
|
|
|
return PI;
|
|
|
|
}
|
|
|
|
|
|
|
|
static AnalysisID getPassIDFromName(StringRef PassName) {
|
|
|
|
const PassInfo *PI = getPassInfo(PassName);
|
|
|
|
return PI ? PI->getTypeInfo() : nullptr;
|
|
|
|
}
|
|
|
|
|
2018-12-05 01:45:12 +08:00
|
|
|
static std::pair<StringRef, unsigned>
|
|
|
|
getPassNameAndInstanceNum(StringRef PassName) {
|
|
|
|
StringRef Name, InstanceNumStr;
|
|
|
|
std::tie(Name, InstanceNumStr) = PassName.split(',');
|
|
|
|
|
|
|
|
unsigned InstanceNum = 0;
|
|
|
|
if (!InstanceNumStr.empty() && InstanceNumStr.getAsInteger(10, InstanceNum))
|
|
|
|
report_fatal_error("invalid pass instance specifier " + PassName);
|
|
|
|
|
|
|
|
return std::make_pair(Name, InstanceNum);
|
|
|
|
}
|
|
|
|
|
2017-08-01 02:24:07 +08:00
|
|
|
void TargetPassConfig::setStartStopPasses() {
|
2018-12-05 01:45:12 +08:00
|
|
|
StringRef StartBeforeName;
|
|
|
|
std::tie(StartBeforeName, StartBeforeInstanceNum) =
|
|
|
|
getPassNameAndInstanceNum(StartBeforeOpt);
|
|
|
|
|
|
|
|
StringRef StartAfterName;
|
|
|
|
std::tie(StartAfterName, StartAfterInstanceNum) =
|
|
|
|
getPassNameAndInstanceNum(StartAfterOpt);
|
|
|
|
|
|
|
|
StringRef StopBeforeName;
|
|
|
|
std::tie(StopBeforeName, StopBeforeInstanceNum)
|
|
|
|
= getPassNameAndInstanceNum(StopBeforeOpt);
|
|
|
|
|
|
|
|
StringRef StopAfterName;
|
|
|
|
std::tie(StopAfterName, StopAfterInstanceNum)
|
|
|
|
= getPassNameAndInstanceNum(StopAfterOpt);
|
|
|
|
|
|
|
|
StartBefore = getPassIDFromName(StartBeforeName);
|
|
|
|
StartAfter = getPassIDFromName(StartAfterName);
|
|
|
|
StopBefore = getPassIDFromName(StopBeforeName);
|
|
|
|
StopAfter = getPassIDFromName(StopAfterName);
|
2017-08-01 02:24:07 +08:00
|
|
|
if (StartBefore && StartAfter)
|
|
|
|
report_fatal_error(Twine(StartBeforeOptName) + Twine(" and ") +
|
|
|
|
Twine(StartAfterOptName) + Twine(" specified!"));
|
|
|
|
if (StopBefore && StopAfter)
|
|
|
|
report_fatal_error(Twine(StopBeforeOptName) + Twine(" and ") +
|
|
|
|
Twine(StopAfterOptName) + Twine(" specified!"));
|
|
|
|
Started = (StartAfter == nullptr) && (StartBefore == nullptr);
|
|
|
|
}
|
|
|
|
|
2020-12-30 08:30:16 +08:00
|
|
|
CGPassBuilderOption llvm::getCGPassBuilderOption() {
|
|
|
|
CGPassBuilderOption Opt;
|
|
|
|
|
|
|
|
#define SET_OPTION(Option) \
|
|
|
|
if (Option.getNumOccurrences()) \
|
|
|
|
Opt.Option = Option;
|
|
|
|
|
|
|
|
SET_OPTION(EnableFastISelOption)
|
|
|
|
SET_OPTION(EnableGlobalISelAbort)
|
|
|
|
SET_OPTION(EnableGlobalISelOption)
|
|
|
|
SET_OPTION(EnableIPRA)
|
|
|
|
SET_OPTION(OptimizeRegAlloc)
|
|
|
|
SET_OPTION(VerifyMachineCode)
|
|
|
|
|
|
|
|
#define SET_BOOLEAN_OPTION(Option) Opt.Option = Option;
|
|
|
|
|
|
|
|
SET_BOOLEAN_OPTION(EarlyLiveIntervals)
|
|
|
|
SET_BOOLEAN_OPTION(EnableBlockPlacementStats)
|
|
|
|
SET_BOOLEAN_OPTION(EnableImplicitNullChecks)
|
|
|
|
SET_BOOLEAN_OPTION(EnableMachineOutliner)
|
|
|
|
SET_BOOLEAN_OPTION(MISchedPostRA)
|
|
|
|
SET_BOOLEAN_OPTION(UseCFLAA)
|
|
|
|
SET_BOOLEAN_OPTION(DisableMergeICmps)
|
|
|
|
SET_BOOLEAN_OPTION(DisableLSR)
|
|
|
|
SET_BOOLEAN_OPTION(DisableConstantHoisting)
|
|
|
|
SET_BOOLEAN_OPTION(DisableCGP)
|
|
|
|
SET_BOOLEAN_OPTION(DisablePartialLibcallInlining)
|
|
|
|
SET_BOOLEAN_OPTION(PrintLSR)
|
|
|
|
SET_BOOLEAN_OPTION(PrintISelInput)
|
|
|
|
SET_BOOLEAN_OPTION(PrintGCInfo)
|
|
|
|
|
|
|
|
return Opt;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void registerPartialPipelineCallback(PassInstrumentationCallbacks &PIC,
|
|
|
|
LLVMTargetMachine &LLVMTM) {
|
|
|
|
StringRef StartBefore;
|
|
|
|
StringRef StartAfter;
|
|
|
|
StringRef StopBefore;
|
|
|
|
StringRef StopAfter;
|
|
|
|
|
|
|
|
unsigned StartBeforeInstanceNum = 0;
|
|
|
|
unsigned StartAfterInstanceNum = 0;
|
|
|
|
unsigned StopBeforeInstanceNum = 0;
|
|
|
|
unsigned StopAfterInstanceNum = 0;
|
|
|
|
|
|
|
|
std::tie(StartBefore, StartBeforeInstanceNum) =
|
|
|
|
getPassNameAndInstanceNum(StartBeforeOpt);
|
|
|
|
std::tie(StartAfter, StartAfterInstanceNum) =
|
|
|
|
getPassNameAndInstanceNum(StartAfterOpt);
|
|
|
|
std::tie(StopBefore, StopBeforeInstanceNum) =
|
|
|
|
getPassNameAndInstanceNum(StopBeforeOpt);
|
|
|
|
std::tie(StopAfter, StopAfterInstanceNum) =
|
|
|
|
getPassNameAndInstanceNum(StopAfterOpt);
|
|
|
|
|
|
|
|
if (StartBefore.empty() && StartAfter.empty() && StopBefore.empty() &&
|
|
|
|
StopAfter.empty())
|
|
|
|
return;
|
|
|
|
|
|
|
|
std::tie(StartBefore, std::ignore) =
|
|
|
|
LLVMTM.getPassNameFromLegacyName(StartBefore);
|
|
|
|
std::tie(StartAfter, std::ignore) =
|
|
|
|
LLVMTM.getPassNameFromLegacyName(StartAfter);
|
|
|
|
std::tie(StopBefore, std::ignore) =
|
|
|
|
LLVMTM.getPassNameFromLegacyName(StopBefore);
|
|
|
|
std::tie(StopAfter, std::ignore) =
|
|
|
|
LLVMTM.getPassNameFromLegacyName(StopAfter);
|
|
|
|
if (!StartBefore.empty() && !StartAfter.empty())
|
|
|
|
report_fatal_error(Twine(StartBeforeOptName) + Twine(" and ") +
|
|
|
|
Twine(StartAfterOptName) + Twine(" specified!"));
|
|
|
|
if (!StopBefore.empty() && !StopAfter.empty())
|
|
|
|
report_fatal_error(Twine(StopBeforeOptName) + Twine(" and ") +
|
|
|
|
Twine(StopAfterOptName) + Twine(" specified!"));
|
|
|
|
|
|
|
|
PIC.registerShouldRunOptionalPassCallback(
|
|
|
|
[=, EnableCurrent = StartBefore.empty() && StartAfter.empty(),
|
|
|
|
EnableNext = Optional<bool>(), StartBeforeCount = 0u,
|
|
|
|
StartAfterCount = 0u, StopBeforeCount = 0u,
|
|
|
|
StopAfterCount = 0u](StringRef P, Any) mutable {
|
|
|
|
bool StartBeforePass = !StartBefore.empty() && P.contains(StartBefore);
|
|
|
|
bool StartAfterPass = !StartAfter.empty() && P.contains(StartAfter);
|
|
|
|
bool StopBeforePass = !StopBefore.empty() && P.contains(StopBefore);
|
|
|
|
bool StopAfterPass = !StopAfter.empty() && P.contains(StopAfter);
|
|
|
|
|
|
|
|
// Implement -start-after/-stop-after
|
|
|
|
if (EnableNext) {
|
|
|
|
EnableCurrent = *EnableNext;
|
|
|
|
EnableNext.reset();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Using PIC.registerAfterPassCallback won't work because if this
|
|
|
|
// callback returns false, AfterPassCallback is also skipped.
|
|
|
|
if (StartAfterPass && StartAfterCount++ == StartAfterInstanceNum) {
|
|
|
|
assert(!EnableNext && "Error: assign to EnableNext more than once");
|
|
|
|
EnableNext = true;
|
|
|
|
}
|
|
|
|
if (StopAfterPass && StopAfterCount++ == StopAfterInstanceNum) {
|
|
|
|
assert(!EnableNext && "Error: assign to EnableNext more than once");
|
|
|
|
EnableNext = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (StartBeforePass && StartBeforeCount++ == StartBeforeInstanceNum)
|
|
|
|
EnableCurrent = true;
|
|
|
|
if (StopBeforePass && StopBeforeCount++ == StopBeforeInstanceNum)
|
|
|
|
EnableCurrent = false;
|
|
|
|
return EnableCurrent;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
void llvm::registerCodeGenCallback(PassInstrumentationCallbacks &PIC,
|
|
|
|
LLVMTargetMachine &LLVMTM) {
|
|
|
|
|
|
|
|
// Register a callback for disabling passes.
|
|
|
|
PIC.registerShouldRunOptionalPassCallback([](StringRef P, Any) {
|
|
|
|
|
|
|
|
#define DISABLE_PASS(Option, Name) \
|
|
|
|
if (Option && P.contains(#Name)) \
|
|
|
|
return false;
|
|
|
|
DISABLE_PASS(DisableBlockPlacement, MachineBlockPlacementPass)
|
|
|
|
DISABLE_PASS(DisableBranchFold, BranchFolderPass)
|
|
|
|
DISABLE_PASS(DisableCopyProp, MachineCopyPropagationPass)
|
|
|
|
DISABLE_PASS(DisableEarlyIfConversion, EarlyIfConverterPass)
|
|
|
|
DISABLE_PASS(DisableEarlyTailDup, EarlyTailDuplicatePass)
|
|
|
|
DISABLE_PASS(DisableMachineCSE, MachineCSEPass)
|
|
|
|
DISABLE_PASS(DisableMachineDCE, DeadMachineInstructionElimPass)
|
|
|
|
DISABLE_PASS(DisableMachineLICM, EarlyMachineLICMPass)
|
|
|
|
DISABLE_PASS(DisableMachineSink, MachineSinkingPass)
|
|
|
|
DISABLE_PASS(DisablePostRAMachineLICM, MachineLICMPass)
|
|
|
|
DISABLE_PASS(DisablePostRAMachineSink, PostRAMachineSinkingPass)
|
|
|
|
DISABLE_PASS(DisablePostRASched, PostRASchedulerPass)
|
|
|
|
DISABLE_PASS(DisableSSC, StackSlotColoringPass)
|
|
|
|
DISABLE_PASS(DisableTailDuplicate, TailDuplicatePass)
|
|
|
|
|
|
|
|
return true;
|
|
|
|
});
|
|
|
|
|
|
|
|
registerPartialPipelineCallback(PIC, LLVMTM);
|
|
|
|
}
|
|
|
|
|
2012-02-09 05:22:48 +08:00
|
|
|
// Out of line constructor provides default values for pass options and
|
|
|
|
// registers all common codegen passes.
|
2017-10-13 06:57:28 +08:00
|
|
|
TargetPassConfig::TargetPassConfig(LLVMTargetMachine &TM, PassManagerBase &pm)
|
2017-06-27 06:44:03 +08:00
|
|
|
: ImmutablePass(ID), PM(&pm), TM(&TM) {
|
2012-02-15 11:21:47 +08:00
|
|
|
Impl = new PassConfigImpl();
|
|
|
|
|
2012-02-04 10:56:45 +08:00
|
|
|
// Register all target independent codegen passes to activate their PassIDs,
|
|
|
|
// including this pass itself.
|
|
|
|
initializeCodeGen(*PassRegistry::getPassRegistry());
|
2012-02-15 11:21:51 +08:00
|
|
|
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
// Also register alias analysis passes required by codegen passes.
|
|
|
|
initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
|
|
|
|
initializeAAResultsWrapperPassPass(*PassRegistry::getPassRegistry());
|
|
|
|
|
2017-08-15 03:54:47 +08:00
|
|
|
if (EnableIPRA.getNumOccurrences())
|
|
|
|
TM.Options.EnableIPRA = EnableIPRA;
|
|
|
|
else {
|
|
|
|
// If not explicitly specified, use target default.
|
2019-02-23 04:59:07 +08:00
|
|
|
TM.Options.EnableIPRA |= TM.useIPRA();
|
2017-08-15 03:54:47 +08:00
|
|
|
}
|
|
|
|
|
2017-05-31 05:36:41 +08:00
|
|
|
if (TM.Options.EnableIPRA)
|
2017-04-05 07:44:46 +08:00
|
|
|
setRequiresCodeGenSCCOrder();
|
2017-08-01 02:24:07 +08:00
|
|
|
|
2018-11-29 20:56:32 +08:00
|
|
|
if (EnableGlobalISelAbort.getNumOccurrences())
|
|
|
|
TM.Options.GlobalISelAbort = EnableGlobalISelAbort;
|
|
|
|
|
2017-08-01 02:24:07 +08:00
|
|
|
setStartStopPasses();
|
2012-02-04 10:56:45 +08:00
|
|
|
}
|
|
|
|
|
2016-05-10 11:21:59 +08:00
|
|
|
CodeGenOpt::Level TargetPassConfig::getOptLevel() const {
|
|
|
|
return TM->getOptLevel();
|
|
|
|
}
|
|
|
|
|
2012-05-30 08:17:12 +08:00
|
|
|
/// Insert InsertedPassID pass after TargetPassID.
|
2012-07-03 03:48:37 +08:00
|
|
|
void TargetPassConfig::insertPass(AnalysisID TargetPassID,
|
2015-10-08 08:36:22 +08:00
|
|
|
IdentifyingPassPtr InsertedPassID,
|
2020-07-21 01:09:41 +08:00
|
|
|
bool VerifyAfter) {
|
2013-04-11 19:57:01 +08:00
|
|
|
assert(((!InsertedPassID.isInstance() &&
|
|
|
|
TargetPassID != InsertedPassID.getID()) ||
|
|
|
|
(InsertedPassID.isInstance() &&
|
|
|
|
TargetPassID != InsertedPassID.getInstance()->getPassID())) &&
|
2013-04-10 09:06:56 +08:00
|
|
|
"Insert a pass after itself!");
|
2020-07-21 01:09:41 +08:00
|
|
|
Impl->InsertedPasses.emplace_back(TargetPassID, InsertedPassID, VerifyAfter);
|
2012-05-30 08:17:12 +08:00
|
|
|
}
|
|
|
|
|
2012-02-04 10:56:45 +08:00
|
|
|
/// createPassConfig - Create a pass configuration object to be used by
|
|
|
|
/// addPassToEmitX methods for generating a pipeline of CodeGen passes.
|
|
|
|
///
|
|
|
|
/// Targets may override this to extend TargetPassConfig.
|
2017-10-13 06:57:28 +08:00
|
|
|
TargetPassConfig *LLVMTargetMachine::createPassConfig(PassManagerBase &PM) {
|
2017-05-31 05:36:41 +08:00
|
|
|
return new TargetPassConfig(*this, PM);
|
2012-02-04 10:56:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
TargetPassConfig::TargetPassConfig()
|
2017-06-27 06:44:03 +08:00
|
|
|
: ImmutablePass(ID) {
|
2017-05-19 01:21:13 +08:00
|
|
|
report_fatal_error("Trying to construct TargetPassConfig without a target "
|
|
|
|
"machine. Scheduling a CodeGen pass without a target "
|
|
|
|
"triple set?");
|
2012-02-04 10:56:45 +08:00
|
|
|
}
|
|
|
|
|
2018-11-02 09:31:50 +08:00
|
|
|
bool TargetPassConfig::willCompleteCodeGenPipeline() {
|
|
|
|
return StopBeforeOpt.empty() && StopAfterOpt.empty();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool TargetPassConfig::hasLimitedCodeGenPipeline() {
|
|
|
|
return !StartBeforeOpt.empty() || !StartAfterOpt.empty() ||
|
|
|
|
!willCompleteCodeGenPipeline();
|
2017-08-01 02:24:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
std::string
|
2020-07-07 06:10:54 +08:00
|
|
|
TargetPassConfig::getLimitedCodeGenPipelineReason(const char *Separator) {
|
2017-08-01 02:24:07 +08:00
|
|
|
if (!hasLimitedCodeGenPipeline())
|
|
|
|
return std::string();
|
|
|
|
std::string Res;
|
|
|
|
static cl::opt<std::string> *PassNames[] = {&StartAfterOpt, &StartBeforeOpt,
|
|
|
|
&StopAfterOpt, &StopBeforeOpt};
|
|
|
|
static const char *OptNames[] = {StartAfterOptName, StartBeforeOptName,
|
|
|
|
StopAfterOptName, StopBeforeOptName};
|
|
|
|
bool IsFirst = true;
|
|
|
|
for (int Idx = 0; Idx < 4; ++Idx)
|
|
|
|
if (!PassNames[Idx]->empty()) {
|
|
|
|
if (!IsFirst)
|
|
|
|
Res += Separator;
|
|
|
|
IsFirst = false;
|
|
|
|
Res += OptNames[Idx];
|
|
|
|
}
|
|
|
|
return Res;
|
|
|
|
}
|
|
|
|
|
2012-02-09 05:22:39 +08:00
|
|
|
// Helper to verify the analysis is really immutable.
|
|
|
|
void TargetPassConfig::setOpt(bool &Opt, bool Val) {
|
|
|
|
assert(!Initialized && "PassConfig is immutable");
|
|
|
|
Opt = Val;
|
|
|
|
}
|
|
|
|
|
2012-07-03 03:48:37 +08:00
|
|
|
void TargetPassConfig::substitutePass(AnalysisID StandardID,
|
2013-04-10 09:06:56 +08:00
|
|
|
IdentifyingPassPtr TargetID) {
|
2012-07-03 03:48:37 +08:00
|
|
|
Impl->TargetPasses[StandardID] = TargetID;
|
2012-02-15 11:21:47 +08:00
|
|
|
}
|
2012-02-11 15:11:32 +08:00
|
|
|
|
2013-04-10 09:06:56 +08:00
|
|
|
IdentifyingPassPtr TargetPassConfig::getPassSubstitution(AnalysisID ID) const {
|
|
|
|
DenseMap<AnalysisID, IdentifyingPassPtr>::const_iterator
|
2012-02-15 11:21:47 +08:00
|
|
|
I = Impl->TargetPasses.find(ID);
|
|
|
|
if (I == Impl->TargetPasses.end())
|
|
|
|
return ID;
|
|
|
|
return I->second;
|
|
|
|
}
|
|
|
|
|
2016-05-17 16:49:59 +08:00
|
|
|
bool TargetPassConfig::isPassSubstitutedOrOverridden(AnalysisID ID) const {
|
|
|
|
IdentifyingPassPtr TargetID = getPassSubstitution(ID);
|
|
|
|
IdentifyingPassPtr FinalPtr = overridePass(ID, TargetID);
|
|
|
|
return !FinalPtr.isValid() || FinalPtr.isInstance() ||
|
|
|
|
FinalPtr.getID() != ID;
|
|
|
|
}
|
|
|
|
|
2012-07-03 03:48:45 +08:00
|
|
|
/// Add a pass to the PassManager if that pass is supposed to be run. If the
|
|
|
|
/// Started/Stopped flags indicate either that the compilation should start at
|
|
|
|
/// a later pass or that it should stop after an earlier pass, then do not add
|
|
|
|
/// the pass. Finally, compare the current pass against the StartAfter
|
|
|
|
/// and StopAfter options and change the Started/Stopped flags accordingly.
|
2020-07-21 01:09:41 +08:00
|
|
|
void TargetPassConfig::addPass(Pass *P, bool verifyAfter) {
|
2012-07-03 03:48:39 +08:00
|
|
|
assert(!Initialized && "PassConfig is immutable");
|
|
|
|
|
2012-07-03 06:56:41 +08:00
|
|
|
// Cache the Pass ID here in case the pass manager finds this pass is
|
|
|
|
// redundant with ones already scheduled / available, and deletes it.
|
|
|
|
// Fundamentally, once we add the pass to the manager, we no longer own it
|
|
|
|
// and shouldn't reference it.
|
|
|
|
AnalysisID PassID = P->getPassID();
|
|
|
|
|
2018-12-05 01:45:12 +08:00
|
|
|
if (StartBefore == PassID && StartBeforeCount++ == StartBeforeInstanceNum)
|
2015-07-07 01:44:26 +08:00
|
|
|
Started = true;
|
2018-12-05 01:45:12 +08:00
|
|
|
if (StopBefore == PassID && StopBeforeCount++ == StopBeforeInstanceNum)
|
2016-09-24 05:46:02 +08:00
|
|
|
Stopped = true;
|
2014-12-12 05:26:47 +08:00
|
|
|
if (Started && !Stopped) {
|
2020-04-09 04:43:46 +08:00
|
|
|
if (AddingMachinePasses)
|
|
|
|
addMachinePrePasses();
|
2014-12-12 05:26:47 +08:00
|
|
|
std::string Banner;
|
|
|
|
// Construct banner message before PM->add() as that may delete the pass.
|
2020-07-21 01:09:41 +08:00
|
|
|
if (AddingMachinePasses && verifyAfter)
|
2014-12-12 05:26:47 +08:00
|
|
|
Banner = std::string("After ") + std::string(P->getPassName());
|
2012-07-03 03:48:45 +08:00
|
|
|
PM->add(P);
|
2020-04-09 04:43:46 +08:00
|
|
|
if (AddingMachinePasses)
|
2020-07-21 01:09:41 +08:00
|
|
|
addMachinePostPasses(Banner, /*AllowVerify*/ verifyAfter);
|
2015-06-06 05:58:14 +08:00
|
|
|
|
|
|
|
// Add the passes after the pass P if there is any.
|
2020-09-22 00:16:57 +08:00
|
|
|
for (const auto &IP : Impl->InsertedPasses) {
|
2015-10-08 08:36:22 +08:00
|
|
|
if (IP.TargetPassID == PassID)
|
2020-07-21 01:09:41 +08:00
|
|
|
addPass(IP.getInsertedPass(), IP.VerifyAfter);
|
2015-06-06 05:58:14 +08:00
|
|
|
}
|
2014-12-12 05:26:47 +08:00
|
|
|
} else {
|
2013-08-05 19:11:11 +08:00
|
|
|
delete P;
|
2014-12-12 05:26:47 +08:00
|
|
|
}
|
2018-12-05 01:45:12 +08:00
|
|
|
|
|
|
|
if (StopAfter == PassID && StopAfterCount++ == StopAfterInstanceNum)
|
2012-07-03 03:48:45 +08:00
|
|
|
Stopped = true;
|
2018-12-05 01:45:12 +08:00
|
|
|
|
|
|
|
if (StartAfter == PassID && StartAfterCount++ == StartAfterInstanceNum)
|
2012-07-03 03:48:45 +08:00
|
|
|
Started = true;
|
|
|
|
if (Stopped && !Started)
|
|
|
|
report_fatal_error("Cannot stop compilation after pass that is not run");
|
2012-07-03 03:48:31 +08:00
|
|
|
}
|
|
|
|
|
2012-02-15 11:21:47 +08:00
|
|
|
/// Add a CodeGen pass at this point in the pipeline after checking for target
|
|
|
|
/// and command line overrides.
|
2013-04-10 09:06:56 +08:00
|
|
|
///
|
|
|
|
/// addPass cannot return a pointer to the pass instance because is internal the
|
|
|
|
/// PassManager and the instance we create here may already be freed.
|
2020-07-21 01:09:41 +08:00
|
|
|
AnalysisID TargetPassConfig::addPass(AnalysisID PassID, bool verifyAfter) {
|
2013-04-10 09:06:56 +08:00
|
|
|
IdentifyingPassPtr TargetID = getPassSubstitution(PassID);
|
|
|
|
IdentifyingPassPtr FinalPtr = overridePass(PassID, TargetID);
|
|
|
|
if (!FinalPtr.isValid())
|
2014-04-14 08:51:57 +08:00
|
|
|
return nullptr;
|
2013-04-10 09:06:56 +08:00
|
|
|
|
|
|
|
Pass *P;
|
|
|
|
if (FinalPtr.isInstance())
|
|
|
|
P = FinalPtr.getInstance();
|
|
|
|
else {
|
|
|
|
P = Pass::createPass(FinalPtr.getID());
|
|
|
|
if (!P)
|
|
|
|
llvm_unreachable("Pass ID not registered");
|
|
|
|
}
|
|
|
|
AnalysisID FinalID = P->getPassID();
|
2020-07-21 01:09:41 +08:00
|
|
|
addPass(P, verifyAfter); // Ends the lifetime of P.
|
2013-04-10 09:06:56 +08:00
|
|
|
|
2012-02-15 11:21:47 +08:00
|
|
|
return FinalID;
|
2012-02-04 10:56:59 +08:00
|
|
|
}
|
2012-02-04 10:56:48 +08:00
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
void TargetPassConfig::printAndVerify(const std::string &Banner) {
|
|
|
|
addPrintPass(Banner);
|
|
|
|
addVerifyPass(Banner);
|
|
|
|
}
|
|
|
|
|
|
|
|
void TargetPassConfig::addPrintPass(const std::string &Banner) {
|
2020-07-21 01:09:41 +08:00
|
|
|
if (PrintAfterISel)
|
2014-12-12 05:26:47 +08:00
|
|
|
PM->add(createMachineFunctionPrinterPass(dbgs(), Banner));
|
|
|
|
}
|
2012-02-04 10:56:48 +08:00
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
void TargetPassConfig::addVerifyPass(const std::string &Banner) {
|
2018-10-04 00:29:24 +08:00
|
|
|
bool Verify = VerifyMachineCode == cl::BOU_TRUE;
|
2017-06-01 02:41:23 +08:00
|
|
|
#ifdef EXPENSIVE_CHECKS
|
|
|
|
if (VerifyMachineCode == cl::BOU_UNSET)
|
|
|
|
Verify = TM->isMachineVerifierClean();
|
|
|
|
#endif
|
|
|
|
if (Verify)
|
2014-12-12 05:26:47 +08:00
|
|
|
PM->add(createMachineVerifierPass(Banner));
|
2012-02-04 10:56:48 +08:00
|
|
|
}
|
|
|
|
|
Add -debugify-and-strip-all to add debug info before a pass and remove it after
Summary:
This allows us to test each backend pass under the presence
of debug info using pre-existing tests. The tests should not
fail as a result of this so long as it's true that debug info
does not affect CodeGen.
In practice, a few tests are sensitive to this:
* Tests that check the pass structure (e.g. O0-pipeline.ll)
* Tests that check --debug output. Specifically instruction
dumps containing MMO's (e.g. prelegalizercombiner-extends.ll)
* Tests that contain debugify metadata as mir-strip-debug will
remove it (e.g. fastisel-debugvalue-undef.ll)
* Tests with partial debug info (e.g.
patchable-function-entry-empty.mir had debug info but no
!llvm.dbg.cu)
* Tests that check optimization remarks overly strictly (e.g.
prologue-epilogue-remarks.mir)
* Tests that would inject the pass in an unsafe region (e.g.
seqpairspill.mir would inject between register alloc and
virt reg rewriter)
In all cases, the checks can either be updated or
--debugify-and-strip-all-safe=0 can be used to avoid being
affected by something like llvm-lit -Dllc='llc --debugify-and-strip-all-safe'
I tested this without the lost debug locations verifier to
confirm that AArch64 behaviour is unaffected (with the fixes
in this patch) and with it to confirm it finds the problems
without the additional RUN lines we had before.
Depends on D77886, D77887, D77747
Reviewers: aprantl, vsk, bogner
Subscribers: qcolombet, kristof.beyls, hiraditya, danielkiss, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D77888
2020-04-09 04:48:40 +08:00
|
|
|
void TargetPassConfig::addDebugifyPass() {
|
|
|
|
PM->add(createDebugifyMachineModulePass());
|
|
|
|
}
|
|
|
|
|
|
|
|
void TargetPassConfig::addStripDebugPass() {
|
|
|
|
PM->add(createStripDebugMachineModulePass(/*OnlyDebugified=*/true));
|
|
|
|
}
|
|
|
|
|
2020-12-17 12:23:29 +08:00
|
|
|
void TargetPassConfig::addCheckDebugPass() {
|
|
|
|
PM->add(createCheckDebugMachineModulePass());
|
|
|
|
}
|
|
|
|
|
Add -debugify-and-strip-all to add debug info before a pass and remove it after
Summary:
This allows us to test each backend pass under the presence
of debug info using pre-existing tests. The tests should not
fail as a result of this so long as it's true that debug info
does not affect CodeGen.
In practice, a few tests are sensitive to this:
* Tests that check the pass structure (e.g. O0-pipeline.ll)
* Tests that check --debug output. Specifically instruction
dumps containing MMO's (e.g. prelegalizercombiner-extends.ll)
* Tests that contain debugify metadata as mir-strip-debug will
remove it (e.g. fastisel-debugvalue-undef.ll)
* Tests with partial debug info (e.g.
patchable-function-entry-empty.mir had debug info but no
!llvm.dbg.cu)
* Tests that check optimization remarks overly strictly (e.g.
prologue-epilogue-remarks.mir)
* Tests that would inject the pass in an unsafe region (e.g.
seqpairspill.mir would inject between register alloc and
virt reg rewriter)
In all cases, the checks can either be updated or
--debugify-and-strip-all-safe=0 can be used to avoid being
affected by something like llvm-lit -Dllc='llc --debugify-and-strip-all-safe'
I tested this without the lost debug locations verifier to
confirm that AArch64 behaviour is unaffected (with the fixes
in this patch) and with it to confirm it finds the problems
without the additional RUN lines we had before.
Depends on D77886, D77887, D77747
Reviewers: aprantl, vsk, bogner
Subscribers: qcolombet, kristof.beyls, hiraditya, danielkiss, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D77888
2020-04-09 04:48:40 +08:00
|
|
|
void TargetPassConfig::addMachinePrePasses(bool AllowDebugify) {
|
2020-12-17 12:23:29 +08:00
|
|
|
if (AllowDebugify && DebugifyIsSafe &&
|
|
|
|
(DebugifyAndStripAll == cl::BOU_TRUE ||
|
|
|
|
DebugifyCheckAndStripAll == cl::BOU_TRUE))
|
Add -debugify-and-strip-all to add debug info before a pass and remove it after
Summary:
This allows us to test each backend pass under the presence
of debug info using pre-existing tests. The tests should not
fail as a result of this so long as it's true that debug info
does not affect CodeGen.
In practice, a few tests are sensitive to this:
* Tests that check the pass structure (e.g. O0-pipeline.ll)
* Tests that check --debug output. Specifically instruction
dumps containing MMO's (e.g. prelegalizercombiner-extends.ll)
* Tests that contain debugify metadata as mir-strip-debug will
remove it (e.g. fastisel-debugvalue-undef.ll)
* Tests with partial debug info (e.g.
patchable-function-entry-empty.mir had debug info but no
!llvm.dbg.cu)
* Tests that check optimization remarks overly strictly (e.g.
prologue-epilogue-remarks.mir)
* Tests that would inject the pass in an unsafe region (e.g.
seqpairspill.mir would inject between register alloc and
virt reg rewriter)
In all cases, the checks can either be updated or
--debugify-and-strip-all-safe=0 can be used to avoid being
affected by something like llvm-lit -Dllc='llc --debugify-and-strip-all-safe'
I tested this without the lost debug locations verifier to
confirm that AArch64 behaviour is unaffected (with the fixes
in this patch) and with it to confirm it finds the problems
without the additional RUN lines we had before.
Depends on D77886, D77887, D77747
Reviewers: aprantl, vsk, bogner
Subscribers: qcolombet, kristof.beyls, hiraditya, danielkiss, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D77888
2020-04-09 04:48:40 +08:00
|
|
|
addDebugifyPass();
|
|
|
|
}
|
2020-04-09 04:43:46 +08:00
|
|
|
|
|
|
|
void TargetPassConfig::addMachinePostPasses(const std::string &Banner,
|
2020-07-21 01:09:41 +08:00
|
|
|
bool AllowVerify, bool AllowStrip) {
|
2020-12-17 12:23:29 +08:00
|
|
|
if (DebugifyIsSafe) {
|
|
|
|
if (DebugifyCheckAndStripAll == cl::BOU_TRUE) {
|
|
|
|
addCheckDebugPass();
|
|
|
|
addStripDebugPass();
|
|
|
|
} else if (DebugifyAndStripAll == cl::BOU_TRUE)
|
|
|
|
addStripDebugPass();
|
|
|
|
}
|
2020-04-09 04:43:46 +08:00
|
|
|
if (AllowVerify)
|
|
|
|
addVerifyPass(Banner);
|
|
|
|
}
|
|
|
|
|
2012-02-04 10:56:59 +08:00
|
|
|
/// Add common target configurable passes that perform LLVM IR to IR transforms
|
|
|
|
/// following machine independent optimization.
|
|
|
|
void TargetPassConfig::addIRPasses() {
|
2012-02-04 10:56:48 +08:00
|
|
|
// Before running any passes, run the verifier to determine if the input
|
|
|
|
// coming from the front-end and/or optimizer is valid.
|
2015-03-20 06:24:17 +08:00
|
|
|
if (!DisableVerify)
|
2012-07-03 03:48:31 +08:00
|
|
|
addPass(createVerifierPass());
|
2012-02-04 10:56:48 +08:00
|
|
|
|
2019-09-10 18:39:09 +08:00
|
|
|
if (getOptLevel() != CodeGenOpt::None) {
|
2020-05-20 04:37:37 +08:00
|
|
|
switch (UseCFLAA) {
|
|
|
|
case CFLAAType::Steensgaard:
|
|
|
|
addPass(createCFLSteensAAWrapperPass());
|
|
|
|
break;
|
|
|
|
case CFLAAType::Andersen:
|
|
|
|
addPass(createCFLAndersAAWrapperPass());
|
|
|
|
break;
|
|
|
|
case CFLAAType::Both:
|
|
|
|
addPass(createCFLAndersAAWrapperPass());
|
|
|
|
addPass(createCFLSteensAAWrapperPass());
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Basic AliasAnalysis support.
|
|
|
|
// Add TypeBasedAliasAnalysis before BasicAliasAnalysis so that
|
|
|
|
// BasicAliasAnalysis wins if they disagree. This is intended to help
|
|
|
|
// support "obvious" type-punning idioms.
|
|
|
|
addPass(createTypeBasedAAWrapperPass());
|
|
|
|
addPass(createScopedNoAliasAAWrapperPass());
|
|
|
|
addPass(createBasicAAWrapperPass());
|
|
|
|
|
|
|
|
// Run loop strength reduction before anything else.
|
|
|
|
if (!DisableLSR) {
|
[TargetPassConfig] Add CanonicalizeFreezeInLoops before LSR
Summary:
This patch adds CanonicalizeFreezeInLoops before LSR.
Relevant patch: https://reviews.llvm.org/D77523
Reviewers: spatel, efriedma, jdoerfert, fhahn, nikic, reames, xbolva00
Reviewed By: nikic
Subscribers: xbolva00, nikic, lebedev.ri, hiraditya, llvm-commits, sanwou01, nlopes
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D77524
2020-05-28 00:48:37 +08:00
|
|
|
addPass(createCanonicalizeFreezeInLoopsPass());
|
2020-05-20 04:37:37 +08:00
|
|
|
addPass(createLoopStrengthReducePass());
|
|
|
|
if (PrintLSR)
|
|
|
|
addPass(createPrintFunctionPass(dbgs(),
|
|
|
|
"\n\n*** Code after LSR ***\n"));
|
|
|
|
}
|
|
|
|
|
2019-09-10 18:39:09 +08:00
|
|
|
// The MergeICmpsPass tries to create memcmp calls by grouping sequences of
|
|
|
|
// loads and compares. ExpandMemCmpPass then tries to expand those calls
|
|
|
|
// into optimally-sized loads and compares. The transforms are enabled by a
|
|
|
|
// target lowering hook.
|
|
|
|
if (!DisableMergeICmps)
|
|
|
|
addPass(createMergeICmpsLegacyPass());
|
|
|
|
addPass(createExpandMemCmpPass());
|
|
|
|
}
|
|
|
|
|
2015-01-29 03:28:03 +08:00
|
|
|
// Run GC lowering passes for builtin collectors
|
|
|
|
// TODO: add a pass insertion point here
|
2021-07-08 05:25:24 +08:00
|
|
|
addPass(&GCLoweringID);
|
|
|
|
addPass(&ShadowStackGCLoweringID);
|
2019-10-15 00:15:14 +08:00
|
|
|
addPass(createLowerConstantIntrinsicsPass());
|
2012-02-04 10:56:48 +08:00
|
|
|
|
|
|
|
// Make sure that no unreachable blocks are instruction selected.
|
2012-07-03 03:48:31 +08:00
|
|
|
addPass(createUnreachableBlockEliminationPass());
|
2014-01-25 10:02:55 +08:00
|
|
|
|
|
|
|
// Prepare expensive constants for SelectionDAG.
|
|
|
|
if (getOptLevel() != CodeGenOpt::None && !DisableConstantHoisting)
|
|
|
|
addPass(createConstantHoistingPass());
|
2014-07-23 21:33:00 +08:00
|
|
|
|
2021-02-13 01:22:28 +08:00
|
|
|
if (getOptLevel() != CodeGenOpt::None)
|
|
|
|
addPass(createReplaceWithVeclibLegacyPass());
|
|
|
|
|
2014-07-23 21:33:00 +08:00
|
|
|
if (getOptLevel() != CodeGenOpt::None && !DisablePartialLibcallInlining)
|
|
|
|
addPass(createPartiallyInlineLibCallsPass());
|
2016-09-01 17:42:39 +08:00
|
|
|
|
2021-04-30 19:43:48 +08:00
|
|
|
// Expand vector predication intrinsics into standard IR instructions.
|
|
|
|
// This pass has to run before ScalarizeMaskedMemIntrin and ExpandReduction
|
|
|
|
// passes since it emits those kinds of intrinsics.
|
|
|
|
addPass(createExpandVectorPredicationPass());
|
|
|
|
|
2017-05-15 19:30:54 +08:00
|
|
|
// Add scalarization of target's unsupported masked memory intrinsics pass.
|
|
|
|
// the unsupported intrinsic will be replaced with a chain of basic blocks,
|
|
|
|
// that stores/loads element one-by-one if the appropriate mask bit is set.
|
2020-12-07 11:51:23 +08:00
|
|
|
addPass(createScalarizeMaskedMemIntrinLegacyPass());
|
2017-05-15 19:30:54 +08:00
|
|
|
|
2017-05-10 17:42:49 +08:00
|
|
|
// Expand reduction intrinsics into shuffle sequences if the target wants to.
|
2020-10-08 15:17:02 +08:00
|
|
|
// Allow disabling it for testing purposes.
|
|
|
|
if (!DisableExpandReductions)
|
|
|
|
addPass(createExpandReductionsPass());
|
2012-07-03 03:48:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Turn exception handling constructs into something the code generators can
|
|
|
|
/// handle.
|
|
|
|
void TargetPassConfig::addPassesToHandleExceptions() {
|
2016-08-18 21:08:58 +08:00
|
|
|
const MCAsmInfo *MCAI = TM->getMCAsmInfo();
|
|
|
|
assert(MCAI && "No MCAsmInfo");
|
|
|
|
switch (MCAI->getExceptionHandlingType()) {
|
2012-07-03 03:48:31 +08:00
|
|
|
case ExceptionHandling::SjLj:
|
|
|
|
// SjLj piggy-backs on dwarf for this bit. The cleanups done apply to both
|
|
|
|
// Dwarf EH prepare needs to be run after SjLj prepare. Otherwise,
|
|
|
|
// catch info can get misplaced when a selector ends up more than one block
|
|
|
|
// removed from the parent invoke(s). This could happen when a landing
|
|
|
|
// pad is shared by multiple invokes and is also a target of a normal
|
|
|
|
// edge from elsewhere.
|
2020-03-11 00:39:11 +08:00
|
|
|
addPass(createSjLjEHPreparePass(TM));
|
2016-08-17 13:10:15 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2012-07-03 03:48:31 +08:00
|
|
|
case ExceptionHandling::DwarfCFI:
|
|
|
|
case ExceptionHandling::ARM:
|
2020-12-02 22:48:52 +08:00
|
|
|
case ExceptionHandling::AIX:
|
2020-05-22 01:40:43 +08:00
|
|
|
addPass(createDwarfEHPass(getOptLevel()));
|
2012-07-03 03:48:31 +08:00
|
|
|
break;
|
2015-01-29 08:41:44 +08:00
|
|
|
case ExceptionHandling::WinEH:
|
2015-03-12 08:36:20 +08:00
|
|
|
// We support using both GCC-style and MSVC-style exceptions on Windows, so
|
|
|
|
// add both preparation passes. Each pass will only actually run if it
|
|
|
|
// recognizes the personality function.
|
2017-05-19 01:21:13 +08:00
|
|
|
addPass(createWinEHPass());
|
2020-05-22 01:40:43 +08:00
|
|
|
addPass(createDwarfEHPass(getOptLevel()));
|
2015-01-29 08:41:44 +08:00
|
|
|
break;
|
2018-02-24 08:40:50 +08:00
|
|
|
case ExceptionHandling::Wasm:
|
2018-06-01 06:02:34 +08:00
|
|
|
// Wasm EH uses Windows EH instructions, but it does not need to demote PHIs
|
|
|
|
// on catchpads and cleanuppads because it does not outline them into
|
|
|
|
// funclets. Catchswitch blocks are not lowered in SelectionDAG, so we
|
|
|
|
// should remove PHIs there.
|
|
|
|
addPass(createWinEHPass(/*DemoteCatchSwitchPHIOnly=*/false));
|
|
|
|
addPass(createWasmEHPass());
|
2018-02-24 08:40:50 +08:00
|
|
|
break;
|
2012-07-03 03:48:31 +08:00
|
|
|
case ExceptionHandling::None:
|
2014-03-21 03:54:47 +08:00
|
|
|
addPass(createLowerInvokePass());
|
2012-07-03 03:48:31 +08:00
|
|
|
|
|
|
|
// The lower invoke pass may create unreachable code. Remove it.
|
|
|
|
addPass(createUnreachableBlockEliminationPass());
|
|
|
|
break;
|
|
|
|
}
|
2012-02-04 10:56:59 +08:00
|
|
|
}
|
2012-02-04 10:56:48 +08:00
|
|
|
|
2012-12-01 06:08:55 +08:00
|
|
|
/// Add pass to prepare the LLVM IR for code generation. This should be done
|
|
|
|
/// before exception handling preparation passes.
|
|
|
|
void TargetPassConfig::addCodeGenPrepare() {
|
2012-02-04 10:56:48 +08:00
|
|
|
if (getOptLevel() != CodeGenOpt::None && !DisableCGP)
|
2017-05-19 01:21:13 +08:00
|
|
|
addPass(createCodeGenPreparePass());
|
2012-12-01 06:08:55 +08:00
|
|
|
}
|
2012-02-04 10:56:48 +08:00
|
|
|
|
2012-12-01 06:08:55 +08:00
|
|
|
/// Add common passes that perform LLVM IR to IR transforms in preparation for
|
|
|
|
/// instruction selection.
|
|
|
|
void TargetPassConfig::addISelPrepare() {
|
2012-02-04 10:56:48 +08:00
|
|
|
addPreISel();
|
|
|
|
|
2016-06-11 00:19:46 +08:00
|
|
|
// Force codegen to run according to the callgraph.
|
2017-04-05 07:44:46 +08:00
|
|
|
if (requiresCodeGenSCCOrder())
|
2016-06-11 00:19:46 +08:00
|
|
|
addPass(new DummyCGSCCPass);
|
|
|
|
|
Protection against stack-based memory corruption errors using SafeStack
This patch adds the safe stack instrumentation pass to LLVM, which separates
the program stack into a safe stack, which stores return addresses, register
spills, and local variables that are statically verified to be accessed
in a safe way, and the unsafe stack, which stores everything else. Such
separation makes it much harder for an attacker to corrupt objects on the
safe stack, including function pointers stored in spilled registers and
return addresses. You can find more information about the safe stack, as
well as other parts of or control-flow hijack protection technique in our
OSDI paper on code-pointer integrity (http://dslab.epfl.ch/pubs/cpi.pdf)
and our project website (http://levee.epfl.ch).
The overhead of our implementation of the safe stack is very close to zero
(0.01% on the Phoronix benchmarks). This is lower than the overhead of
stack cookies, which are supported by LLVM and are commonly used today,
yet the security guarantees of the safe stack are strictly stronger than
stack cookies. In some cases, the safe stack improves performance due to
better cache locality.
Our current implementation of the safe stack is stable and robust, we
used it to recompile multiple projects on Linux including Chromium, and
we also recompiled the entire FreeBSD user-space system and more than 100
packages. We ran unit tests on the FreeBSD system and many of the packages
and observed no errors caused by the safe stack. The safe stack is also fully
binary compatible with non-instrumented code and can be applied to parts of
a program selectively.
This patch is our implementation of the safe stack on top of LLVM. The
patches make the following changes:
- Add the safestack function attribute, similar to the ssp, sspstrong and
sspreq attributes.
- Add the SafeStack instrumentation pass that applies the safe stack to all
functions that have the safestack attribute. This pass moves all unsafe local
variables to the unsafe stack with a separate stack pointer, whereas all
safe variables remain on the regular stack that is managed by LLVM as usual.
- Invoke the pass as the last stage before code generation (at the same time
the existing cookie-based stack protector pass is invoked).
- Add unit tests for the safe stack.
Original patch by Volodymyr Kuznetsov and others at the Dependable Systems
Lab at EPFL; updates and upstreaming by myself.
Differential Revision: http://reviews.llvm.org/D6094
llvm-svn: 239761
2015-06-16 05:07:11 +08:00
|
|
|
// Add both the safe stack and the stack protection passes: each of them will
|
|
|
|
// only protect functions that have corresponding attributes.
|
2017-05-19 01:21:13 +08:00
|
|
|
addPass(createSafeStackPass());
|
|
|
|
addPass(createStackProtectorPass());
|
2013-12-19 11:17:11 +08:00
|
|
|
|
2012-02-04 10:56:48 +08:00
|
|
|
if (PrintISelInput)
|
2014-01-12 19:30:46 +08:00
|
|
|
addPass(createPrintFunctionPass(
|
|
|
|
dbgs(), "\n\n*** Final LLVM Code input to ISel ***\n"));
|
2012-02-04 10:56:48 +08:00
|
|
|
|
|
|
|
// All passes which modify the LLVM IR are now complete; run the verifier
|
|
|
|
// to ensure that the IR is valid.
|
|
|
|
if (!DisableVerify)
|
2012-07-03 03:48:31 +08:00
|
|
|
addPass(createVerifierPass());
|
2012-02-04 10:56:59 +08:00
|
|
|
}
|
2012-02-04 10:56:48 +08:00
|
|
|
|
2017-06-06 08:26:13 +08:00
|
|
|
bool TargetPassConfig::addCoreISelPasses() {
|
2018-01-18 06:34:21 +08:00
|
|
|
// Enable FastISel with -fast-isel, but allow that to be overridden.
|
2017-06-06 08:26:13 +08:00
|
|
|
TM->setO0WantsFastISel(EnableFastISelOption != cl::BOU_FALSE);
|
2019-01-08 22:19:06 +08:00
|
|
|
|
|
|
|
// Determine an instruction selector.
|
|
|
|
enum class SelectorType { SelectionDAG, FastISel, GlobalISel };
|
|
|
|
SelectorType Selector;
|
|
|
|
|
|
|
|
if (EnableFastISelOption == cl::BOU_TRUE)
|
|
|
|
Selector = SelectorType::FastISel;
|
|
|
|
else if (EnableGlobalISelOption == cl::BOU_TRUE ||
|
|
|
|
(TM->Options.EnableGlobalISel &&
|
|
|
|
EnableGlobalISelOption != cl::BOU_FALSE))
|
|
|
|
Selector = SelectorType::GlobalISel;
|
|
|
|
else if (TM->getOptLevel() == CodeGenOpt::None && TM->getO0WantsFastISel())
|
|
|
|
Selector = SelectorType::FastISel;
|
|
|
|
else
|
|
|
|
Selector = SelectorType::SelectionDAG;
|
|
|
|
|
|
|
|
// Set consistently TM->Options.EnableFastISel and EnableGlobalISel.
|
|
|
|
if (Selector == SelectorType::FastISel) {
|
2017-06-06 08:26:13 +08:00
|
|
|
TM->setFastISel(true);
|
2018-11-29 20:56:32 +08:00
|
|
|
TM->setGlobalISel(false);
|
2019-01-08 22:19:06 +08:00
|
|
|
} else if (Selector == SelectorType::GlobalISel) {
|
2018-01-25 03:59:29 +08:00
|
|
|
TM->setFastISel(false);
|
2019-01-08 22:19:06 +08:00
|
|
|
TM->setGlobalISel(true);
|
|
|
|
}
|
2018-01-25 03:59:29 +08:00
|
|
|
|
Add -debugify-and-strip-all to add debug info before a pass and remove it after
Summary:
This allows us to test each backend pass under the presence
of debug info using pre-existing tests. The tests should not
fail as a result of this so long as it's true that debug info
does not affect CodeGen.
In practice, a few tests are sensitive to this:
* Tests that check the pass structure (e.g. O0-pipeline.ll)
* Tests that check --debug output. Specifically instruction
dumps containing MMO's (e.g. prelegalizercombiner-extends.ll)
* Tests that contain debugify metadata as mir-strip-debug will
remove it (e.g. fastisel-debugvalue-undef.ll)
* Tests with partial debug info (e.g.
patchable-function-entry-empty.mir had debug info but no
!llvm.dbg.cu)
* Tests that check optimization remarks overly strictly (e.g.
prologue-epilogue-remarks.mir)
* Tests that would inject the pass in an unsafe region (e.g.
seqpairspill.mir would inject between register alloc and
virt reg rewriter)
In all cases, the checks can either be updated or
--debugify-and-strip-all-safe=0 can be used to avoid being
affected by something like llvm-lit -Dllc='llc --debugify-and-strip-all-safe'
I tested this without the lost debug locations verifier to
confirm that AArch64 behaviour is unaffected (with the fixes
in this patch) and with it to confirm it finds the problems
without the additional RUN lines we had before.
Depends on D77886, D77887, D77747
Reviewers: aprantl, vsk, bogner
Subscribers: qcolombet, kristof.beyls, hiraditya, danielkiss, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D77888
2020-04-09 04:48:40 +08:00
|
|
|
// FIXME: Injecting into the DAGISel pipeline seems to cause issues with
|
|
|
|
// analyses needing to be re-run. This can result in being unable to
|
|
|
|
// schedule passes (particularly with 'Function Alias Analysis
|
|
|
|
// Results'). It's not entirely clear why but AFAICT this seems to be
|
|
|
|
// due to one FunctionPassManager not being able to use analyses from a
|
|
|
|
// previous one. As we're injecting a ModulePass we break the usual
|
|
|
|
// pass manager into two. GlobalISel with the fallback path disabled
|
|
|
|
// and -run-pass seem to be unaffected. The majority of GlobalISel
|
|
|
|
// testing uses -run-pass so this probably isn't too bad.
|
|
|
|
SaveAndRestore<bool> SavedDebugifyIsSafe(DebugifyIsSafe);
|
|
|
|
if (Selector != SelectorType::GlobalISel || !isGlobalISelAbortEnabled())
|
|
|
|
DebugifyIsSafe = false;
|
|
|
|
|
2019-01-08 22:19:06 +08:00
|
|
|
// Add instruction selector passes.
|
|
|
|
if (Selector == SelectorType::GlobalISel) {
|
2018-10-03 01:56:58 +08:00
|
|
|
SaveAndRestore<bool> SavedAddingMachinePasses(AddingMachinePasses, true);
|
2017-06-06 08:26:13 +08:00
|
|
|
if (addIRTranslator())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
addPreLegalizeMachineIR();
|
|
|
|
|
|
|
|
if (addLegalizeMachineIR())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Before running the register bank selector, ask the target if it
|
|
|
|
// wants to run some passes.
|
|
|
|
addPreRegBankSelect();
|
|
|
|
|
|
|
|
if (addRegBankSelect())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
addPreGlobalInstructionSelect();
|
|
|
|
|
|
|
|
if (addGlobalInstructionSelect())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Pass to reset the MachineFunction if the ISel failed.
|
|
|
|
addPass(createResetMachineFunctionPass(
|
|
|
|
reportDiagnosticWhenGlobalISelFallback(), isGlobalISelAbortEnabled()));
|
|
|
|
|
|
|
|
// Provide a fallback path when we do not want to abort on
|
|
|
|
// not-yet-supported input.
|
|
|
|
if (!isGlobalISelAbortEnabled() && addInstSelector())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
} else if (addInstSelector())
|
|
|
|
return true;
|
|
|
|
|
2019-06-19 08:25:39 +08:00
|
|
|
// Expand pseudo-instructions emitted by ISel. Don't run the verifier before
|
|
|
|
// FinalizeISel.
|
|
|
|
addPass(&FinalizeISelID);
|
|
|
|
|
|
|
|
// Print the instruction selected machine code...
|
|
|
|
printAndVerify("After Instruction Selection");
|
|
|
|
|
2017-06-06 08:26:13 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool TargetPassConfig::addISelPasses() {
|
2018-03-01 01:48:55 +08:00
|
|
|
if (TM->useEmulatedTLS())
|
2017-06-06 08:26:13 +08:00
|
|
|
addPass(createLowerEmuTLSPass());
|
|
|
|
|
|
|
|
addPass(createPreISelIntrinsicLoweringPass());
|
2020-07-18 19:11:40 +08:00
|
|
|
PM->add(createTargetTransformInfoWrapperPass(TM->getTargetIRAnalysis()));
|
2017-06-06 08:26:13 +08:00
|
|
|
addIRPasses();
|
|
|
|
addCodeGenPrepare();
|
|
|
|
addPassesToHandleExceptions();
|
|
|
|
addISelPrepare();
|
|
|
|
|
|
|
|
return addCoreISelPasses();
|
|
|
|
}
|
|
|
|
|
2017-05-17 15:36:03 +08:00
|
|
|
/// -regalloc=... command line option.
|
|
|
|
static FunctionPass *useDefaultRegisterAllocator() { return nullptr; }
|
|
|
|
static cl::opt<RegisterRegAlloc::FunctionPassCtor, false,
|
2017-12-01 08:53:10 +08:00
|
|
|
RegisterPassParser<RegisterRegAlloc>>
|
|
|
|
RegAlloc("regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
|
|
|
|
cl::desc("Register allocator to use"));
|
2017-05-17 15:36:03 +08:00
|
|
|
|
2012-02-09 08:40:55 +08:00
|
|
|
/// Add the complete set of target-independent postISel code generator passes.
|
|
|
|
///
|
|
|
|
/// This can be read as the standard order of major LLVM CodeGen stages. Stages
|
|
|
|
/// with nontrivial configuration or multiple passes are broken out below in
|
|
|
|
/// add%Stage routines.
|
|
|
|
///
|
|
|
|
/// Any TargetPassConfig::addXX routine may be overriden by the Target. The
|
|
|
|
/// addPre/Post methods with empty header implementations allow injecting
|
|
|
|
/// target-specific fixups just before or after major stages. Additionally,
|
|
|
|
/// targets have the flexibility to change pass order within a stage by
|
|
|
|
/// overriding default implementation of add%Stage routines below. Each
|
|
|
|
/// technique has maintainability tradeoffs because alternate pass orders are
|
|
|
|
/// not well supported. addPre/Post works better if the target pass is easily
|
|
|
|
/// tied to a common pass. But if it has subtle dependencies on multiple passes,
|
2012-02-10 15:08:25 +08:00
|
|
|
/// the target should override the stage instead.
|
2012-02-09 08:40:55 +08:00
|
|
|
///
|
|
|
|
/// TODO: We could use a single addPre/Post(ID) hook to allow pass injection
|
|
|
|
/// before/after any target-independent pass. But it's currently overkill.
|
2012-02-04 10:56:59 +08:00
|
|
|
void TargetPassConfig::addMachinePasses() {
|
2014-12-12 05:26:47 +08:00
|
|
|
AddingMachinePasses = true;
|
|
|
|
|
2012-02-09 08:40:55 +08:00
|
|
|
// Add passes that optimize machine instructions in SSA form.
|
2012-02-04 10:56:48 +08:00
|
|
|
if (getOptLevel() != CodeGenOpt::None) {
|
2012-02-09 08:40:55 +08:00
|
|
|
addMachineSSAOptimization();
|
2012-11-19 08:11:50 +08:00
|
|
|
} else {
|
2012-02-09 08:40:55 +08:00
|
|
|
// If the target requests it, assign local variables to stack slots relative
|
|
|
|
// to one another and simplify frame index references where possible.
|
2020-04-22 12:00:19 +08:00
|
|
|
addPass(&LocalStackSlotAllocationID);
|
2012-02-04 10:56:48 +08:00
|
|
|
}
|
|
|
|
|
2017-08-15 03:54:45 +08:00
|
|
|
if (TM->Options.EnableIPRA)
|
|
|
|
addPass(createRegUsageInfoPropPass());
|
|
|
|
|
2012-02-04 10:56:48 +08:00
|
|
|
// Run pre-ra passes.
|
2014-12-12 05:26:47 +08:00
|
|
|
addPreRegAlloc();
|
2012-02-04 10:56:48 +08:00
|
|
|
|
Add -debugify-and-strip-all to add debug info before a pass and remove it after
Summary:
This allows us to test each backend pass under the presence
of debug info using pre-existing tests. The tests should not
fail as a result of this so long as it's true that debug info
does not affect CodeGen.
In practice, a few tests are sensitive to this:
* Tests that check the pass structure (e.g. O0-pipeline.ll)
* Tests that check --debug output. Specifically instruction
dumps containing MMO's (e.g. prelegalizercombiner-extends.ll)
* Tests that contain debugify metadata as mir-strip-debug will
remove it (e.g. fastisel-debugvalue-undef.ll)
* Tests with partial debug info (e.g.
patchable-function-entry-empty.mir had debug info but no
!llvm.dbg.cu)
* Tests that check optimization remarks overly strictly (e.g.
prologue-epilogue-remarks.mir)
* Tests that would inject the pass in an unsafe region (e.g.
seqpairspill.mir would inject between register alloc and
virt reg rewriter)
In all cases, the checks can either be updated or
--debugify-and-strip-all-safe=0 can be used to avoid being
affected by something like llvm-lit -Dllc='llc --debugify-and-strip-all-safe'
I tested this without the lost debug locations verifier to
confirm that AArch64 behaviour is unaffected (with the fixes
in this patch) and with it to confirm it finds the problems
without the additional RUN lines we had before.
Depends on D77886, D77887, D77747
Reviewers: aprantl, vsk, bogner
Subscribers: qcolombet, kristof.beyls, hiraditya, danielkiss, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D77888
2020-04-09 04:48:40 +08:00
|
|
|
// Debugifying the register allocator passes seems to provoke some
|
|
|
|
// non-determinism that affects CodeGen and there doesn't seem to be a point
|
|
|
|
// where it becomes safe again so stop debugifying here.
|
|
|
|
DebugifyIsSafe = false;
|
|
|
|
|
2012-02-09 08:40:55 +08:00
|
|
|
// Run register allocation and passes that are tightly coupled with it,
|
|
|
|
// including phi elimination and scheduling.
|
2012-02-10 12:10:36 +08:00
|
|
|
if (getOptimizeRegAlloc())
|
2019-03-20 03:33:12 +08:00
|
|
|
addOptimizedRegAlloc();
|
|
|
|
else
|
|
|
|
addFastRegAlloc();
|
2012-02-04 10:56:48 +08:00
|
|
|
|
|
|
|
// Run post-ra passes.
|
2014-12-12 05:26:47 +08:00
|
|
|
addPostRegAlloc();
|
2012-02-04 10:56:48 +08:00
|
|
|
|
2020-04-09 19:40:53 +08:00
|
|
|
addPass(&FixupStatepointCallerSavedID);
|
|
|
|
|
2012-02-04 10:56:48 +08:00
|
|
|
// Insert prolog/epilog code. Eliminate abstract frame index references...
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-23 04:06:47 +08:00
|
|
|
if (getOptLevel() != CodeGenOpt::None) {
|
|
|
|
addPass(&PostRAMachineSinkingID);
|
2015-08-15 00:54:32 +08:00
|
|
|
addPass(&ShrinkWrapID);
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-23 04:06:47 +08:00
|
|
|
}
|
2015-09-01 02:26:45 +08:00
|
|
|
|
2016-05-17 16:49:59 +08:00
|
|
|
// Prolog/Epilog inserter needs a TargetMachine to instantiate. But only
|
|
|
|
// do so if it hasn't been disabled, substituted, or overridden.
|
|
|
|
if (!isPassSubstitutedOrOverridden(&PrologEpilogCodeInserterID))
|
2017-05-19 01:21:13 +08:00
|
|
|
addPass(createPrologEpilogInserterPass());
|
2012-02-04 10:56:48 +08:00
|
|
|
|
2012-02-09 08:40:55 +08:00
|
|
|
/// Add passes that optimize machine instructions after register allocation.
|
|
|
|
if (getOptLevel() != CodeGenOpt::None)
|
|
|
|
addMachineLateOptimization();
|
2012-02-04 10:56:48 +08:00
|
|
|
|
|
|
|
// Expand pseudo instructions before second scheduling pass.
|
2012-07-03 03:48:37 +08:00
|
|
|
addPass(&ExpandPostRAPseudosID);
|
2012-02-04 10:56:48 +08:00
|
|
|
|
|
|
|
// Run pre-sched2 passes.
|
2014-12-12 05:26:47 +08:00
|
|
|
addPreSched2();
|
2012-02-04 10:56:48 +08:00
|
|
|
|
2015-06-16 02:44:27 +08:00
|
|
|
if (EnableImplicitNullChecks)
|
|
|
|
addPass(&ImplicitNullChecksID);
|
|
|
|
|
2012-02-04 10:56:48 +08:00
|
|
|
// Second pass scheduler.
|
2015-12-10 17:10:07 +08:00
|
|
|
// Let Target optionally insert this pass by itself at some other
|
|
|
|
// point.
|
|
|
|
if (getOptLevel() != CodeGenOpt::None &&
|
|
|
|
!TM->targetSchedulesPostRAScheduling()) {
|
2013-12-29 05:56:51 +08:00
|
|
|
if (MISchedPostRA)
|
|
|
|
addPass(&PostMachineSchedulerID);
|
|
|
|
else
|
|
|
|
addPass(&PostRASchedulerID);
|
2012-02-04 10:56:48 +08:00
|
|
|
}
|
|
|
|
|
2012-02-09 08:40:55 +08:00
|
|
|
// GC
|
2012-12-21 10:57:04 +08:00
|
|
|
if (addGCPasses()) {
|
|
|
|
if (PrintGCInfo)
|
2020-07-21 01:09:41 +08:00
|
|
|
addPass(createGCInfoPrinter(dbgs()), false);
|
2012-12-21 10:57:04 +08:00
|
|
|
}
|
2012-02-04 10:56:48 +08:00
|
|
|
|
2012-02-09 08:40:55 +08:00
|
|
|
// Basic block placement.
|
2012-02-15 11:21:51 +08:00
|
|
|
if (getOptLevel() != CodeGenOpt::None)
|
2012-02-09 08:40:55 +08:00
|
|
|
addBlockPlacement();
|
2012-02-04 10:56:48 +08:00
|
|
|
|
2020-01-19 13:44:06 +08:00
|
|
|
// Insert before XRay Instrumentation.
|
2020-04-22 12:00:19 +08:00
|
|
|
addPass(&FEntryInserterID);
|
2020-01-19 13:44:06 +08:00
|
|
|
|
2020-04-22 12:00:19 +08:00
|
|
|
addPass(&XRayInstrumentationID);
|
|
|
|
addPass(&PatchableFunctionID);
|
2020-01-19 13:44:06 +08:00
|
|
|
|
2021-05-19 07:08:38 +08:00
|
|
|
if (EnableFSDiscriminator && !FSNoFinalDiscrim)
|
2021-05-28 02:34:22 +08:00
|
|
|
// Add FS discriminators here so that all the instruction duplicates
|
|
|
|
// in different BBs get their own discriminators. With this, we can "sum"
|
|
|
|
// the SampleFDO counters instead of using MAX. This will improve the
|
|
|
|
// SampleFDO profile quality.
|
|
|
|
addPass(createMIRAddFSDiscriminatorsPass(
|
|
|
|
sampleprof::FSDiscriminatorPass::PassLast));
|
2021-05-19 07:08:38 +08:00
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
addPreEmitPass();
|
|
|
|
|
2016-07-14 07:39:46 +08:00
|
|
|
if (TM->Options.EnableIPRA)
|
2016-06-11 00:19:46 +08:00
|
|
|
// Collect register usage information and produce a register mask of
|
|
|
|
// clobbered registers, to be used to optimize call sites.
|
|
|
|
addPass(createRegUsageInfoCollector());
|
|
|
|
|
2020-04-22 12:00:19 +08:00
|
|
|
// FIXME: Some backends are incompatible with running the verifier after
|
|
|
|
// addPreEmitPass. Maybe only pass "false" here for those targets?
|
2015-09-18 04:45:18 +08:00
|
|
|
addPass(&FuncletLayoutID, false);
|
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&StackMapLivenessID, false);
|
2015-12-16 19:09:48 +08:00
|
|
|
addPass(&LiveDebugValuesID, false);
|
2013-12-14 14:53:06 +08:00
|
|
|
|
2018-06-29 05:49:24 +08:00
|
|
|
if (TM->Options.EnableMachineOutliner && getOptLevel() != CodeGenOpt::None &&
|
2020-12-30 08:30:16 +08:00
|
|
|
EnableMachineOutliner != RunOutliner::NeverOutline) {
|
|
|
|
bool RunOnAllFunctions =
|
|
|
|
(EnableMachineOutliner == RunOutliner::AlwaysOutline);
|
|
|
|
bool AddOutliner =
|
|
|
|
RunOnAllFunctions || TM->Options.SupportsDefaultOutlining;
|
2018-06-30 11:56:03 +08:00
|
|
|
if (AddOutliner)
|
|
|
|
addPass(createMachineOutlinerPass(RunOnAllFunctions));
|
|
|
|
}
|
2017-03-07 05:31:18 +08:00
|
|
|
|
2020-08-06 06:34:31 +08:00
|
|
|
// Machine function splitter uses the basic block sections feature. Both
|
2021-02-10 11:14:47 +08:00
|
|
|
// cannot be enabled at the same time. Basic block sections takes precedence.
|
|
|
|
// FIXME: In principle, BasicBlockSection::Labels and splitting can used
|
|
|
|
// together. Update this check once we have addressed any issues.
|
|
|
|
if (TM->getBBSectionsType() != llvm::BasicBlockSection::None) {
|
2020-08-06 08:11:48 +08:00
|
|
|
addPass(llvm::createBasicBlockSectionsPass(TM->getBBSectionsFuncListBuf()));
|
2021-02-10 11:14:47 +08:00
|
|
|
} else if (TM->Options.EnableMachineFunctionSplitter ||
|
|
|
|
EnableMachineFunctionSplitter) {
|
|
|
|
addPass(createMachineFunctionSplitterPass());
|
2020-08-06 06:34:31 +08:00
|
|
|
}
|
2020-03-17 06:56:02 +08:00
|
|
|
|
Introduce the "retpoline" x86 mitigation technique for variant #2 of the speculative execution vulnerabilities disclosed today, specifically identified by CVE-2017-5715, "Branch Target Injection", and is one of the two halves to Spectre..
Summary:
First, we need to explain the core of the vulnerability. Note that this
is a very incomplete description, please see the Project Zero blog post
for details:
https://googleprojectzero.blogspot.com/2018/01/reading-privileged-memory-with-side.html
The basis for branch target injection is to direct speculative execution
of the processor to some "gadget" of executable code by poisoning the
prediction of indirect branches with the address of that gadget. The
gadget in turn contains an operation that provides a side channel for
reading data. Most commonly, this will look like a load of secret data
followed by a branch on the loaded value and then a load of some
predictable cache line. The attacker then uses timing of the processors
cache to determine which direction the branch took *in the speculative
execution*, and in turn what one bit of the loaded value was. Due to the
nature of these timing side channels and the branch predictor on Intel
processors, this allows an attacker to leak data only accessible to
a privileged domain (like the kernel) back into an unprivileged domain.
The goal is simple: avoid generating code which contains an indirect
branch that could have its prediction poisoned by an attacker. In many
cases, the compiler can simply use directed conditional branches and
a small search tree. LLVM already has support for lowering switches in
this way and the first step of this patch is to disable jump-table
lowering of switches and introduce a pass to rewrite explicit indirectbr
sequences into a switch over integers.
However, there is no fully general alternative to indirect calls. We
introduce a new construct we call a "retpoline" to implement indirect
calls in a non-speculatable way. It can be thought of loosely as
a trampoline for indirect calls which uses the RET instruction on x86.
Further, we arrange for a specific call->ret sequence which ensures the
processor predicts the return to go to a controlled, known location. The
retpoline then "smashes" the return address pushed onto the stack by the
call with the desired target of the original indirect call. The result
is a predicted return to the next instruction after a call (which can be
used to trap speculative execution within an infinite loop) and an
actual indirect branch to an arbitrary address.
On 64-bit x86 ABIs, this is especially easily done in the compiler by
using a guaranteed scratch register to pass the target into this device.
For 32-bit ABIs there isn't a guaranteed scratch register and so several
different retpoline variants are introduced to use a scratch register if
one is available in the calling convention and to otherwise use direct
stack push/pop sequences to pass the target address.
This "retpoline" mitigation is fully described in the following blog
post: https://support.google.com/faqs/answer/7625886
We also support a target feature that disables emission of the retpoline
thunk by the compiler to allow for custom thunks if users want them.
These are particularly useful in environments like kernels that
routinely do hot-patching on boot and want to hot-patch their thunk to
different code sequences. They can write this custom thunk and use
`-mretpoline-external-thunk` *in addition* to `-mretpoline`. In this
case, on x86-64 thu thunk names must be:
```
__llvm_external_retpoline_r11
```
or on 32-bit:
```
__llvm_external_retpoline_eax
__llvm_external_retpoline_ecx
__llvm_external_retpoline_edx
__llvm_external_retpoline_push
```
And the target of the retpoline is passed in the named register, or in
the case of the `push` suffix on the top of the stack via a `pushl`
instruction.
There is one other important source of indirect branches in x86 ELF
binaries: the PLT. These patches also include support for LLD to
generate PLT entries that perform a retpoline-style indirection.
The only other indirect branches remaining that we are aware of are from
precompiled runtimes (such as crt0.o and similar). The ones we have
found are not really attackable, and so we have not focused on them
here, but eventually these runtimes should also be replicated for
retpoline-ed configurations for completeness.
For kernels or other freestanding or fully static executables, the
compiler switch `-mretpoline` is sufficient to fully mitigate this
particular attack. For dynamic executables, you must compile *all*
libraries with `-mretpoline` and additionally link the dynamic
executable and all shared libraries with LLD and pass `-z retpolineplt`
(or use similar functionality from some other linker). We strongly
recommend also using `-z now` as non-lazy binding allows the
retpoline-mitigated PLT to be substantially smaller.
When manually apply similar transformations to `-mretpoline` to the
Linux kernel we observed very small performance hits to applications
running typical workloads, and relatively minor hits (approximately 2%)
even for extremely syscall-heavy applications. This is largely due to
the small number of indirect branches that occur in performance
sensitive paths of the kernel.
When using these patches on statically linked applications, especially
C++ applications, you should expect to see a much more dramatic
performance hit. For microbenchmarks that are switch, indirect-, or
virtual-call heavy we have seen overheads ranging from 10% to 50%.
However, real-world workloads exhibit substantially lower performance
impact. Notably, techniques such as PGO and ThinLTO dramatically reduce
the impact of hot indirect calls (by speculatively promoting them to
direct calls) and allow optimized search trees to be used to lower
switches. If you need to deploy these techniques in C++ applications, we
*strongly* recommend that you ensure all hot call targets are statically
linked (avoiding PLT indirection) and use both PGO and ThinLTO. Well
tuned servers using all of these techniques saw 5% - 10% overhead from
the use of retpoline.
We will add detailed documentation covering these components in
subsequent patches, but wanted to make the core functionality available
as soon as possible. Happy for more code review, but we'd really like to
get these patches landed and backported ASAP for obvious reasons. We're
planning to backport this to both 6.0 and 5.0 release streams and get
a 5.0 release with just this cherry picked ASAP for distros and vendors.
This patch is the work of a number of people over the past month: Eric, Reid,
Rui, and myself. I'm mailing it out as a single commit due to the time
sensitive nature of landing this and the need to backport it. Huge thanks to
everyone who helped out here, and everyone at Intel who helped out in
discussions about how to craft this. Also, credit goes to Paul Turner (at
Google, but not an LLVM contributor) for much of the underlying retpoline
design.
Reviewers: echristo, rnk, ruiu, craig.topper, DavidKreitzer
Subscribers: sanjoy, emaste, mcrosier, mgorny, mehdi_amini, hiraditya, llvm-commits
Differential Revision: https://reviews.llvm.org/D41723
llvm-svn: 323155
2018-01-23 06:05:25 +08:00
|
|
|
// Add passes that directly emit MI after all other MI passes.
|
|
|
|
addPreEmitPass2();
|
|
|
|
|
2020-12-02 13:44:06 +08:00
|
|
|
// Insert pseudo probe annotation for callsite profiling
|
|
|
|
if (TM->Options.PseudoProbeForProfiling)
|
|
|
|
addPass(createPseudoProbeInserter());
|
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
AddingMachinePasses = false;
|
2012-02-04 10:56:48 +08:00
|
|
|
}
|
|
|
|
|
2012-02-09 08:40:55 +08:00
|
|
|
/// Add passes that optimize machine instructions in SSA form.
|
|
|
|
void TargetPassConfig::addMachineSSAOptimization() {
|
|
|
|
// Pre-ra tail duplication.
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&EarlyTailDuplicateID);
|
2012-02-09 08:40:55 +08:00
|
|
|
|
|
|
|
// Optimize PHIs before DCE: removing dead PHI cycles may make more
|
|
|
|
// instructions dead.
|
2020-04-22 12:00:19 +08:00
|
|
|
addPass(&OptimizePHIsID);
|
2012-02-09 08:40:55 +08:00
|
|
|
|
2012-09-06 17:17:37 +08:00
|
|
|
// This pass merges large allocas. StackSlotColoring is a different pass
|
|
|
|
// which merges spill slots.
|
2020-04-22 12:00:19 +08:00
|
|
|
addPass(&StackColoringID);
|
2012-09-06 17:17:37 +08:00
|
|
|
|
2012-02-09 08:40:55 +08:00
|
|
|
// If the target requests it, assign local variables to stack slots relative
|
|
|
|
// to one another and simplify frame index references where possible.
|
2020-04-22 12:00:19 +08:00
|
|
|
addPass(&LocalStackSlotAllocationID);
|
2012-02-09 08:40:55 +08:00
|
|
|
|
|
|
|
// With optimization, dead code should already be eliminated. However
|
|
|
|
// there is one known exception: lowered code for arguments that are only
|
|
|
|
// used by tail calls, where the tail calls reuse the incoming stack
|
|
|
|
// arguments directly (see t11 in test/CodeGen/X86/sibcall.ll).
|
2012-07-03 03:48:37 +08:00
|
|
|
addPass(&DeadMachineInstructionElimID);
|
2012-02-09 08:40:55 +08:00
|
|
|
|
2013-01-17 08:58:38 +08:00
|
|
|
// Allow targets to insert passes that improve instruction level parallelism,
|
|
|
|
// like if-conversion. Such passes will typically need dominator trees and
|
|
|
|
// loop info, just like LICM and CSE below.
|
2014-12-12 05:26:47 +08:00
|
|
|
addILPOpts();
|
2013-01-17 08:58:38 +08:00
|
|
|
|
2020-04-22 12:00:19 +08:00
|
|
|
addPass(&EarlyMachineLICMID);
|
|
|
|
addPass(&MachineCSEID);
|
2017-03-02 04:29:34 +08:00
|
|
|
|
2012-07-03 03:48:37 +08:00
|
|
|
addPass(&MachineSinkingID);
|
2012-02-09 08:40:55 +08:00
|
|
|
|
2015-10-13 01:43:56 +08:00
|
|
|
addPass(&PeepholeOptimizerID);
|
[PeepholeOptimizer] Refactor the advanced copy optimization to take advantage of
the isRegSequence property.
This is a follow-up of r215394 and r215404, which respectively introduces the
isRegSequence property and uses it for ARM.
Thanks to the property introduced by the previous commits, this patch is able
to optimize the following sequence:
vmov d0, r2, r3
vmov d1, r0, r1
vmov r0, s0
vmov r1, s2
udiv r0, r1, r0
vmov r1, s1
vmov r2, s3
udiv r1, r2, r1
vmov.32 d16[0], r0
vmov.32 d16[1], r1
vmov r0, r1, d16
bx lr
into:
udiv r0, r0, r2
udiv r1, r1, r3
vmov.32 d16[0], r0
vmov.32 d16[1], r1
vmov r0, r1, d16
bx lr
This patch refactors how the copy optimizations are done in the peephole
optimizer. Prior to this patch, we had one copy-related optimization that
replaced a copy or bitcast by a generic, more suitable (in terms of register
file), copy.
With this patch, the peephole optimizer features two copy-related optimizations:
1. One for rewriting generic copies to generic copies:
PeepholeOptimizer::optimizeCoalescableCopy.
2. One for replacing non-generic copies with generic copies:
PeepholeOptimizer::optimizeUncoalescableCopy.
The goals of these two optimizations are slightly different: one rewrite the
operand of the instruction (#1), the other kills off the non-generic instruction
and replace it by a (sequence of) generic instruction(s).
Both optimizations rely on the ValueTracker introduced in r212100.
The ValueTracker has been refactored to use the information from the
TargetInstrInfo for non-generic instruction. As part of the refactoring, we
switched the tracking from the index of the definition to the actual register
(virtual or physical). This one change is to provide better consistency with
register related APIs and to ease the use of the TargetInstrInfo.
Moreover, this patch introduces a new helper class CopyRewriter used to ease the
rewriting of generic copies (i.e., #1).
Finally, this patch adds a dead code elimination pass right after the peephole
optimizer to get rid of dead code that may appear after rewriting.
This is related to <rdar://problem/12702965>.
Review: http://reviews.llvm.org/D4874
llvm-svn: 216088
2014-08-21 01:41:48 +08:00
|
|
|
// Clean-up the dead code that may have been generated by peephole
|
|
|
|
// rewriting.
|
|
|
|
addPass(&DeadMachineInstructionElimID);
|
2012-02-09 08:40:55 +08:00
|
|
|
}
|
|
|
|
|
2006-08-02 20:30:23 +08:00
|
|
|
//===---------------------------------------------------------------------===//
|
2012-02-09 08:40:55 +08:00
|
|
|
/// Register Allocation Pass Configuration
|
2006-08-02 20:30:23 +08:00
|
|
|
//===---------------------------------------------------------------------===//
|
2012-02-09 08:40:55 +08:00
|
|
|
|
2012-02-10 12:10:36 +08:00
|
|
|
bool TargetPassConfig::getOptimizeRegAlloc() const {
|
|
|
|
switch (OptimizeRegAlloc) {
|
|
|
|
case cl::BOU_UNSET: return getOptLevel() != CodeGenOpt::None;
|
|
|
|
case cl::BOU_TRUE: return true;
|
|
|
|
case cl::BOU_FALSE: return false;
|
|
|
|
}
|
|
|
|
llvm_unreachable("Invalid optimize-regalloc state");
|
|
|
|
}
|
|
|
|
|
2012-02-09 08:40:55 +08:00
|
|
|
/// A dummy default pass factory indicates whether the register allocator is
|
|
|
|
/// overridden on the command line.
|
2017-02-06 05:13:06 +08:00
|
|
|
static llvm::once_flag InitializeDefaultRegisterAllocatorFlag;
|
2017-05-17 15:36:03 +08:00
|
|
|
|
2010-05-28 07:57:25 +08:00
|
|
|
static RegisterRegAlloc
|
|
|
|
defaultRegAlloc("default",
|
|
|
|
"pick register allocator based on -O option",
|
2012-02-10 12:10:36 +08:00
|
|
|
useDefaultRegisterAllocator);
|
2006-08-02 20:30:23 +08:00
|
|
|
|
2016-07-09 00:39:00 +08:00
|
|
|
static void initializeDefaultRegisterAllocatorOnce() {
|
2019-07-12 22:58:15 +08:00
|
|
|
if (!RegisterRegAlloc::getDefault())
|
2016-07-09 00:39:00 +08:00
|
|
|
RegisterRegAlloc::setDefault(RegAlloc);
|
|
|
|
}
|
|
|
|
|
2012-02-10 12:10:36 +08:00
|
|
|
/// Instantiate the default register allocator pass for this target for either
|
|
|
|
/// the optimized or unoptimized allocation path. This will be added to the pass
|
|
|
|
/// manager by addFastRegAlloc in the unoptimized case or addOptimizedRegAlloc
|
|
|
|
/// in the optimized case.
|
|
|
|
///
|
|
|
|
/// A target that uses the standard regalloc pass order for fast or optimized
|
|
|
|
/// allocation may still override this for per-target regalloc
|
|
|
|
/// selection. But -regalloc=... always takes precedence.
|
|
|
|
FunctionPass *TargetPassConfig::createTargetRegisterAllocator(bool Optimized) {
|
|
|
|
if (Optimized)
|
|
|
|
return createGreedyRegisterAllocator();
|
|
|
|
else
|
|
|
|
return createFastRegisterAllocator();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Find and instantiate the register allocation pass requested by this target
|
|
|
|
/// at the current optimization level. Different register allocators are
|
|
|
|
/// defined as separate passes because they may require different analysis.
|
|
|
|
///
|
|
|
|
/// This helper ensures that the regalloc= option is always available,
|
|
|
|
/// even for targets that override the default allocator.
|
|
|
|
///
|
|
|
|
/// FIXME: When MachinePassRegistry register pass IDs instead of function ptrs,
|
|
|
|
/// this can be folded into addPass.
|
|
|
|
FunctionPass *TargetPassConfig::createRegAllocPass(bool Optimized) {
|
|
|
|
// Initialize the global default.
|
2016-07-09 00:39:00 +08:00
|
|
|
llvm::call_once(InitializeDefaultRegisterAllocatorFlag,
|
|
|
|
initializeDefaultRegisterAllocatorOnce);
|
|
|
|
|
|
|
|
RegisterRegAlloc::FunctionPassCtor Ctor = RegisterRegAlloc::getDefault();
|
2012-02-10 12:10:36 +08:00
|
|
|
if (Ctor != useDefaultRegisterAllocator)
|
2010-05-28 07:57:25 +08:00
|
|
|
return Ctor();
|
|
|
|
|
2012-02-10 12:10:36 +08:00
|
|
|
// With no -regalloc= override, ask the target for a regalloc pass.
|
|
|
|
return createTargetRegisterAllocator(Optimized);
|
|
|
|
}
|
|
|
|
|
2019-02-23 02:15:39 +08:00
|
|
|
bool TargetPassConfig::addRegAssignAndRewriteFast() {
|
2019-03-20 03:33:12 +08:00
|
|
|
if (RegAlloc != &useDefaultRegisterAllocator &&
|
|
|
|
RegAlloc != &createFastRegisterAllocator)
|
|
|
|
report_fatal_error("Must use fast (default) register allocator for unoptimized regalloc.");
|
|
|
|
|
|
|
|
addPass(createRegAllocPass(false));
|
2021-05-08 13:46:51 +08:00
|
|
|
|
|
|
|
// Allow targets to change the register assignments after
|
|
|
|
// fast register allocation.
|
|
|
|
addPostFastRegAllocRewrite();
|
2019-03-20 03:33:12 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-02-23 02:15:39 +08:00
|
|
|
bool TargetPassConfig::addRegAssignAndRewriteOptimized() {
|
2019-03-20 03:33:12 +08:00
|
|
|
// Add the selected register allocation pass.
|
|
|
|
addPass(createRegAllocPass(true));
|
|
|
|
|
|
|
|
// Allow targets to change the register assignments before rewriting.
|
|
|
|
addPreRewrite();
|
|
|
|
|
|
|
|
// Finally rewrite virtual registers.
|
|
|
|
addPass(&VirtRegRewriterID);
|
Add -debugify-and-strip-all to add debug info before a pass and remove it after
Summary:
This allows us to test each backend pass under the presence
of debug info using pre-existing tests. The tests should not
fail as a result of this so long as it's true that debug info
does not affect CodeGen.
In practice, a few tests are sensitive to this:
* Tests that check the pass structure (e.g. O0-pipeline.ll)
* Tests that check --debug output. Specifically instruction
dumps containing MMO's (e.g. prelegalizercombiner-extends.ll)
* Tests that contain debugify metadata as mir-strip-debug will
remove it (e.g. fastisel-debugvalue-undef.ll)
* Tests with partial debug info (e.g.
patchable-function-entry-empty.mir had debug info but no
!llvm.dbg.cu)
* Tests that check optimization remarks overly strictly (e.g.
prologue-epilogue-remarks.mir)
* Tests that would inject the pass in an unsafe region (e.g.
seqpairspill.mir would inject between register alloc and
virt reg rewriter)
In all cases, the checks can either be updated or
--debugify-and-strip-all-safe=0 can be used to avoid being
affected by something like llvm-lit -Dllc='llc --debugify-and-strip-all-safe'
I tested this without the lost debug locations verifier to
confirm that AArch64 behaviour is unaffected (with the fixes
in this patch) and with it to confirm it finds the problems
without the additional RUN lines we had before.
Depends on D77886, D77887, D77747
Reviewers: aprantl, vsk, bogner
Subscribers: qcolombet, kristof.beyls, hiraditya, danielkiss, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D77888
2020-04-09 04:48:40 +08:00
|
|
|
|
2019-03-20 03:33:12 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-10-22 04:47:22 +08:00
|
|
|
/// Return true if the default global register allocator is in use and
|
|
|
|
/// has not be overriden on the command line with '-regalloc=...'
|
|
|
|
bool TargetPassConfig::usingDefaultRegAlloc() const {
|
2014-10-22 05:50:49 +08:00
|
|
|
return RegAlloc.getNumOccurrences() == 0;
|
2014-10-22 04:47:22 +08:00
|
|
|
}
|
|
|
|
|
2012-02-10 12:10:36 +08:00
|
|
|
/// Add the minimum set of target-independent passes that are required for
|
|
|
|
/// register allocation. No coalescing or scheduling.
|
2019-03-20 03:33:12 +08:00
|
|
|
void TargetPassConfig::addFastRegAlloc() {
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&PHIEliminationID, false);
|
|
|
|
addPass(&TwoAddressInstructionPassID, false);
|
2012-02-10 12:10:36 +08:00
|
|
|
|
2019-02-23 02:15:39 +08:00
|
|
|
addRegAssignAndRewriteFast();
|
2006-07-28 04:05:00 +08:00
|
|
|
}
|
2012-02-09 08:40:55 +08:00
|
|
|
|
|
|
|
/// Add standard target-independent passes that are tightly coupled with
|
2012-02-10 12:10:36 +08:00
|
|
|
/// optimized register allocation, including coalescing, machine instruction
|
|
|
|
/// scheduling, and register allocation itself.
|
2019-03-20 03:33:12 +08:00
|
|
|
void TargetPassConfig::addOptimizedRegAlloc() {
|
2016-04-28 11:07:16 +08:00
|
|
|
addPass(&DetectDeadLanesID, false);
|
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&ProcessImplicitDefsID, false);
|
2012-06-26 02:12:18 +08:00
|
|
|
|
2012-02-10 12:10:36 +08:00
|
|
|
// LiveVariables currently requires pure SSA form.
|
|
|
|
//
|
|
|
|
// FIXME: Once TwoAddressInstruction pass no longer uses kill flags,
|
|
|
|
// LiveVariables can be removed completely, and LiveIntervals can be directly
|
|
|
|
// computed. (We still either need to regenerate kill flags after regalloc, or
|
|
|
|
// preferably fix the scavenger to not depend on them).
|
2020-07-23 23:05:11 +08:00
|
|
|
// FIXME: UnreachableMachineBlockElim is a dependant pass of LiveVariables.
|
|
|
|
// When LiveVariables is removed this has to be removed/moved either.
|
|
|
|
// Explicit addition of UnreachableMachineBlockElim allows stopping before or
|
|
|
|
// after it with -stop-before/-stop-after.
|
|
|
|
addPass(&UnreachableMachineBlockElimID, false);
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&LiveVariablesID, false);
|
2012-02-10 12:10:36 +08:00
|
|
|
|
2013-10-15 00:39:04 +08:00
|
|
|
// Edge splitting is smarter with machine loop info.
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&MachineLoopInfoID, false);
|
|
|
|
addPass(&PHIEliminationID, false);
|
2012-08-04 06:12:54 +08:00
|
|
|
|
|
|
|
// Eventually, we want to run LiveIntervals before PHI elimination.
|
|
|
|
if (EarlyLiveIntervals)
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&LiveIntervalsID, false);
|
2012-08-04 06:12:54 +08:00
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&TwoAddressInstructionPassID, false);
|
2012-07-03 03:48:37 +08:00
|
|
|
addPass(&RegisterCoalescerID);
|
2012-02-10 12:10:36 +08:00
|
|
|
|
2016-06-01 06:38:06 +08:00
|
|
|
// The machine scheduler may accidentally create disconnected components
|
|
|
|
// when moving subregister definitions around, avoid this by splitting them to
|
|
|
|
// separate vregs before. Splitting can also improve reg. allocation quality.
|
|
|
|
addPass(&RenameIndependentSubregsID);
|
|
|
|
|
2012-02-10 12:10:36 +08:00
|
|
|
// PreRA instruction scheduling.
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&MachineSchedulerID);
|
2012-02-10 12:10:36 +08:00
|
|
|
|
2019-02-23 02:15:39 +08:00
|
|
|
if (addRegAssignAndRewriteOptimized()) {
|
|
|
|
// Perform stack slot coloring and post-ra machine LICM.
|
|
|
|
//
|
|
|
|
// FIXME: Re-enable coloring with register when it's capable of adding
|
|
|
|
// kill markers.
|
|
|
|
addPass(&StackSlotColoringID);
|
|
|
|
|
2019-06-08 14:19:15 +08:00
|
|
|
// Allow targets to expand pseudo instructions depending on the choice of
|
|
|
|
// registers before MachineCopyPropagation.
|
|
|
|
addPostRewrite();
|
|
|
|
|
2018-02-28 00:59:10 +08:00
|
|
|
// Copy propagate to forward register uses and try to eliminate COPYs that
|
|
|
|
// were not coalesced.
|
|
|
|
addPass(&MachineCopyPropagationID);
|
|
|
|
|
2015-09-09 04:36:33 +08:00
|
|
|
// Run post-ra machine LICM to hoist reloads / remats.
|
|
|
|
//
|
|
|
|
// FIXME: can this move into MachineLateOptimization?
|
2018-01-19 14:46:10 +08:00
|
|
|
addPass(&MachineLICMID);
|
2015-09-09 04:36:33 +08:00
|
|
|
}
|
2012-02-09 08:40:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
//===---------------------------------------------------------------------===//
|
|
|
|
/// Post RegAlloc Pass Configuration
|
|
|
|
//===---------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// Add passes that optimize machine instructions after register allocation.
|
|
|
|
void TargetPassConfig::addMachineLateOptimization() {
|
|
|
|
// Branch folding must be run after regalloc and prolog/epilog insertion.
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&BranchFolderPassID);
|
2012-02-09 08:40:55 +08:00
|
|
|
|
|
|
|
// Tail duplication.
|
2013-12-07 09:49:19 +08:00
|
|
|
// Note that duplicating tail just increases code size and degrades
|
|
|
|
// performance for targets that require Structured Control Flow.
|
|
|
|
// In addition it can also make CFG irreducible. Thus we disable it.
|
2014-12-12 05:26:47 +08:00
|
|
|
if (!TM->requiresStructuredCFG())
|
|
|
|
addPass(&TailDuplicateID);
|
2012-02-09 08:40:55 +08:00
|
|
|
|
|
|
|
// Copy propagation.
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&MachineCopyPropagationID);
|
2012-02-09 08:40:55 +08:00
|
|
|
}
|
|
|
|
|
2012-12-21 10:57:04 +08:00
|
|
|
/// Add standard GC passes.
|
|
|
|
bool TargetPassConfig::addGCPasses() {
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&GCMachineCodeAnalysisID, false);
|
2012-12-21 10:57:04 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-02-09 08:40:55 +08:00
|
|
|
/// Add standard basic block placement passes.
|
|
|
|
void TargetPassConfig::addBlockPlacement() {
|
2016-06-10 07:31:55 +08:00
|
|
|
if (addPass(&MachineBlockPlacementID)) {
|
2012-02-15 11:21:51 +08:00
|
|
|
// Run a separate pass to collect block placement statistics.
|
|
|
|
if (EnableBlockPlacementStats)
|
2012-07-03 03:48:37 +08:00
|
|
|
addPass(&MachineBlockPlacementStatsID);
|
2012-02-09 08:40:55 +08:00
|
|
|
}
|
|
|
|
}
|
2016-08-27 06:32:59 +08:00
|
|
|
|
|
|
|
//===---------------------------------------------------------------------===//
|
|
|
|
/// GlobalISel Configuration
|
|
|
|
//===---------------------------------------------------------------------===//
|
|
|
|
bool TargetPassConfig::isGlobalISelAbortEnabled() const {
|
2018-11-29 20:56:32 +08:00
|
|
|
return TM->Options.GlobalISelAbort == GlobalISelAbortMode::Enable;
|
2016-09-01 02:43:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool TargetPassConfig::reportDiagnosticWhenGlobalISelFallback() const {
|
2018-11-29 20:56:32 +08:00
|
|
|
return TM->Options.GlobalISelAbort == GlobalISelAbortMode::DisableWithDiag;
|
2016-08-27 06:32:59 +08:00
|
|
|
}
|
2019-01-25 07:11:25 +08:00
|
|
|
|
|
|
|
bool TargetPassConfig::isGISelCSEEnabled() const {
|
2019-04-15 13:04:20 +08:00
|
|
|
return true;
|
2019-01-25 07:11:25 +08:00
|
|
|
}
|
2019-04-15 12:53:46 +08:00
|
|
|
|
|
|
|
std::unique_ptr<CSEConfigBase> TargetPassConfig::getCSEConfig() const {
|
2019-08-15 23:54:37 +08:00
|
|
|
return std::make_unique<CSEConfigBase>();
|
2019-04-15 12:53:46 +08:00
|
|
|
}
|