2003-12-28 15:59:53 +08:00
|
|
|
//===-- Passes.cpp - Target independent code generation passes ------------===//
|
2005-04-22 06:36:52 +08:00
|
|
|
//
|
2003-10-21 03:43:21 +08:00
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2005-04-22 06:36:52 +08:00
|
|
|
//
|
2003-10-21 03:43:21 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2003-10-03 00:57:49 +08:00
|
|
|
//
|
|
|
|
// This file defines interfaces to access the target independent code
|
|
|
|
// generation passes provided by the LLVM backend.
|
|
|
|
//
|
|
|
|
//===---------------------------------------------------------------------===//
|
|
|
|
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/CodeGen/Passes.h"
|
2015-08-06 15:33:15 +08:00
|
|
|
#include "llvm/Analysis/BasicAliasAnalysis.h"
|
2015-08-14 10:42:20 +08:00
|
|
|
#include "llvm/Analysis/CFLAliasAnalysis.h"
|
2012-02-04 10:56:48 +08:00
|
|
|
#include "llvm/Analysis/Passes.h"
|
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
|
|
|
#include "llvm/CodeGen/RegAllocRegistry.h"
|
2014-01-12 19:10:32 +08:00
|
|
|
#include "llvm/IR/IRPrintingPasses.h"
|
2015-02-13 18:01:29 +08:00
|
|
|
#include "llvm/IR/LegacyPassManager.h"
|
2014-01-13 17:26:24 +08:00
|
|
|
#include "llvm/IR/Verifier.h"
|
2012-07-03 03:48:31 +08:00
|
|
|
#include "llvm/MC/MCAsmInfo.h"
|
2012-02-04 10:56:48 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
2012-02-04 10:56:45 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2015-03-24 03:32:43 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
Protection against stack-based memory corruption errors using SafeStack
This patch adds the safe stack instrumentation pass to LLVM, which separates
the program stack into a safe stack, which stores return addresses, register
spills, and local variables that are statically verified to be accessed
in a safe way, and the unsafe stack, which stores everything else. Such
separation makes it much harder for an attacker to corrupt objects on the
safe stack, including function pointers stored in spilled registers and
return addresses. You can find more information about the safe stack, as
well as other parts of or control-flow hijack protection technique in our
OSDI paper on code-pointer integrity (http://dslab.epfl.ch/pubs/cpi.pdf)
and our project website (http://levee.epfl.ch).
The overhead of our implementation of the safe stack is very close to zero
(0.01% on the Phoronix benchmarks). This is lower than the overhead of
stack cookies, which are supported by LLVM and are commonly used today,
yet the security guarantees of the safe stack are strictly stronger than
stack cookies. In some cases, the safe stack improves performance due to
better cache locality.
Our current implementation of the safe stack is stable and robust, we
used it to recompile multiple projects on Linux including Chromium, and
we also recompiled the entire FreeBSD user-space system and more than 100
packages. We ran unit tests on the FreeBSD system and many of the packages
and observed no errors caused by the safe stack. The safe stack is also fully
binary compatible with non-instrumented code and can be applied to parts of
a program selectively.
This patch is our implementation of the safe stack on top of LLVM. The
patches make the following changes:
- Add the safestack function attribute, similar to the ssp, sspstrong and
sspreq attributes.
- Add the SafeStack instrumentation pass that applies the safe stack to all
functions that have the safestack attribute. This pass moves all unsafe local
variables to the unsafe stack with a separate stack pointer, whereas all
safe variables remain on the regular stack that is managed by LLVM as usual.
- Invoke the pass as the last stage before code generation (at the same time
the existing cookie-based stack protector pass is invoked).
- Add unit tests for the safe stack.
Original patch by Volodymyr Kuznetsov and others at the Dependable Systems
Lab at EPFL; updates and upstreaming by myself.
Differential Revision: http://reviews.llvm.org/D6094
llvm-svn: 239761
2015-06-16 05:07:11 +08:00
|
|
|
#include "llvm/Transforms/Instrumentation.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Transforms/Scalar.h"
|
2014-11-08 05:32:08 +08:00
|
|
|
#include "llvm/Transforms/Utils/SymbolRewriter.h"
|
2006-08-01 22:21:23 +08:00
|
|
|
|
2003-12-28 15:59:53 +08:00
|
|
|
using namespace llvm;
|
2003-11-12 06:41:34 +08:00
|
|
|
|
2012-02-04 10:56:48 +08:00
|
|
|
static cl::opt<bool> DisablePostRA("disable-post-ra", cl::Hidden,
|
|
|
|
cl::desc("Disable Post Regalloc"));
|
|
|
|
static cl::opt<bool> DisableBranchFold("disable-branch-fold", cl::Hidden,
|
|
|
|
cl::desc("Disable branch folding"));
|
|
|
|
static cl::opt<bool> DisableTailDuplicate("disable-tail-duplicate", cl::Hidden,
|
|
|
|
cl::desc("Disable tail duplication"));
|
|
|
|
static cl::opt<bool> DisableEarlyTailDup("disable-early-taildup", cl::Hidden,
|
|
|
|
cl::desc("Disable pre-register allocation tail duplication"));
|
2012-04-16 21:49:17 +08:00
|
|
|
static cl::opt<bool> DisableBlockPlacement("disable-block-placement",
|
2013-03-30 01:14:24 +08:00
|
|
|
cl::Hidden, cl::desc("Disable probability-driven block placement"));
|
2012-02-04 10:56:48 +08:00
|
|
|
static cl::opt<bool> EnableBlockPlacementStats("enable-block-placement-stats",
|
|
|
|
cl::Hidden, cl::desc("Collect probability-driven block placement stats"));
|
|
|
|
static cl::opt<bool> DisableSSC("disable-ssc", cl::Hidden,
|
|
|
|
cl::desc("Disable Stack Slot Coloring"));
|
|
|
|
static cl::opt<bool> DisableMachineDCE("disable-machine-dce", cl::Hidden,
|
|
|
|
cl::desc("Disable Machine Dead Code Elimination"));
|
2012-10-03 08:51:32 +08:00
|
|
|
static cl::opt<bool> DisableEarlyIfConversion("disable-early-ifcvt", cl::Hidden,
|
|
|
|
cl::desc("Disable Early If-conversion"));
|
2012-02-04 10:56:48 +08:00
|
|
|
static cl::opt<bool> DisableMachineLICM("disable-machine-licm", cl::Hidden,
|
|
|
|
cl::desc("Disable Machine LICM"));
|
|
|
|
static cl::opt<bool> DisableMachineCSE("disable-machine-cse", cl::Hidden,
|
|
|
|
cl::desc("Disable Machine Common Subexpression Elimination"));
|
[ShrinkWrap] Add (a simplified version) of shrink-wrapping.
This patch introduces a new pass that computes the safe point to insert the
prologue and epilogue of the function.
The interest is to find safe points that are cheaper than the entry and exits
blocks.
As an example and to avoid regressions to be introduce, this patch also
implements the required bits to enable the shrink-wrapping pass for AArch64.
** Context **
Currently we insert the prologue and epilogue of the method/function in the
entry and exits blocks. Although this is correct, we can do a better job when
those are not immediately required and insert them at less frequently executed
places.
The job of the shrink-wrapping pass is to identify such places.
** Motivating example **
Let us consider the following function that perform a call only in one branch of
a if:
define i32 @f(i32 %a, i32 %b) {
%tmp = alloca i32, align 4
%tmp2 = icmp slt i32 %a, %b
br i1 %tmp2, label %true, label %false
true:
store i32 %a, i32* %tmp, align 4
%tmp4 = call i32 @doSomething(i32 0, i32* %tmp)
br label %false
false:
%tmp.0 = phi i32 [ %tmp4, %true ], [ %a, %0 ]
ret i32 %tmp.0
}
On AArch64 this code generates (removing the cfi directives to ease
readabilities):
_f: ; @f
; BB#0:
stp x29, x30, [sp, #-16]!
mov x29, sp
sub sp, sp, #16 ; =16
cmp w0, w1
b.ge LBB0_2
; BB#1: ; %true
stur w0, [x29, #-4]
sub x1, x29, #4 ; =4
mov w0, wzr
bl _doSomething
LBB0_2: ; %false
mov sp, x29
ldp x29, x30, [sp], #16
ret
With shrink-wrapping we could generate:
_f: ; @f
; BB#0:
cmp w0, w1
b.ge LBB0_2
; BB#1: ; %true
stp x29, x30, [sp, #-16]!
mov x29, sp
sub sp, sp, #16 ; =16
stur w0, [x29, #-4]
sub x1, x29, #4 ; =4
mov w0, wzr
bl _doSomething
add sp, x29, #16 ; =16
ldp x29, x30, [sp], #16
LBB0_2: ; %false
ret
Therefore, we would pay the overhead of setting up/destroying the frame only if
we actually do the call.
** Proposed Solution **
This patch introduces a new machine pass that perform the shrink-wrapping
analysis (See the comments at the beginning of ShrinkWrap.cpp for more details).
It then stores the safe save and restore point into the MachineFrameInfo
attached to the MachineFunction.
This information is then used by the PrologEpilogInserter (PEI) to place the
related code at the right place. This pass runs right before the PEI.
Unlike the original paper of Chow from PLDI’88, this implementation of
shrink-wrapping does not use expensive data-flow analysis and does not need hack
to properly avoid frequently executed point. Instead, it relies on dominance and
loop properties.
The pass is off by default and each target can opt-in by setting the
EnableShrinkWrap boolean to true in their derived class of TargetPassConfig.
This setting can also be overwritten on the command line by using
-enable-shrink-wrap.
Before you try out the pass for your target, make sure you properly fix your
emitProlog/emitEpilog/adjustForXXX method to cope with basic blocks that are not
necessarily the entry block.
** Design Decisions **
1. ShrinkWrap is its own pass right now. It could frankly be merged into PEI but
for debugging and clarity I thought it was best to have its own file.
2. Right now, we only support one save point and one restore point. At some
point we can expand this to several save point and restore point, the impacted
component would then be:
- The pass itself: New algorithm needed.
- MachineFrameInfo: Hold a list or set of Save/Restore point instead of one
pointer.
- PEI: Should loop over the save point and restore point.
Anyhow, at least for this first iteration, I do not believe this is interesting
to support the complex cases. We should revisit that when we motivating
examples.
Differential Revision: http://reviews.llvm.org/D9210
<rdar://problem/3201744>
llvm-svn: 236507
2015-05-06 01:38:16 +08:00
|
|
|
static cl::opt<cl::boolOrDefault> OptimizeRegAlloc(
|
|
|
|
"optimize-regalloc", cl::Hidden,
|
2012-02-10 12:10:36 +08:00
|
|
|
cl::desc("Enable optimized register allocation compilation path."));
|
2012-02-04 10:56:48 +08:00
|
|
|
static cl::opt<bool> DisablePostRAMachineLICM("disable-postra-machine-licm",
|
|
|
|
cl::Hidden,
|
|
|
|
cl::desc("Disable Machine LICM"));
|
|
|
|
static cl::opt<bool> DisableMachineSink("disable-machine-sink", cl::Hidden,
|
|
|
|
cl::desc("Disable Machine Sinking"));
|
|
|
|
static cl::opt<bool> DisableLSR("disable-lsr", cl::Hidden,
|
|
|
|
cl::desc("Disable Loop Strength Reduction Pass"));
|
2014-01-25 10:02:55 +08:00
|
|
|
static cl::opt<bool> DisableConstantHoisting("disable-constant-hoisting",
|
|
|
|
cl::Hidden, cl::desc("Disable ConstantHoisting"));
|
2012-02-04 10:56:48 +08:00
|
|
|
static cl::opt<bool> DisableCGP("disable-cgp", cl::Hidden,
|
|
|
|
cl::desc("Disable Codegen Prepare"));
|
|
|
|
static cl::opt<bool> DisableCopyProp("disable-copyprop", cl::Hidden,
|
2012-02-21 07:28:17 +08:00
|
|
|
cl::desc("Disable Copy Propagation pass"));
|
2014-07-23 21:33:00 +08:00
|
|
|
static cl::opt<bool> DisablePartialLibcallInlining("disable-partial-libcall-inlining",
|
|
|
|
cl::Hidden, cl::desc("Disable Partial Libcall Inlining"));
|
2015-06-16 02:44:27 +08:00
|
|
|
static cl::opt<bool> EnableImplicitNullChecks(
|
|
|
|
"enable-implicit-null-checks",
|
|
|
|
cl::desc("Fold null checks into faulting memory operations"),
|
|
|
|
cl::init(false));
|
2012-02-04 10:56:48 +08:00
|
|
|
static cl::opt<bool> PrintLSR("print-lsr-output", cl::Hidden,
|
|
|
|
cl::desc("Print LLVM IR produced by the loop-reduce pass"));
|
|
|
|
static cl::opt<bool> PrintISelInput("print-isel-input", cl::Hidden,
|
|
|
|
cl::desc("Print LLVM IR input to isel pass"));
|
|
|
|
static cl::opt<bool> PrintGCInfo("print-gc", cl::Hidden,
|
|
|
|
cl::desc("Dump garbage collector data"));
|
|
|
|
static cl::opt<bool> VerifyMachineCode("verify-machineinstrs", cl::Hidden,
|
|
|
|
cl::desc("Verify generated machine code"),
|
2015-02-04 08:02:59 +08:00
|
|
|
cl::init(false),
|
|
|
|
cl::ZeroOrMore);
|
|
|
|
|
2012-05-30 08:17:12 +08:00
|
|
|
static cl::opt<std::string>
|
|
|
|
PrintMachineInstrs("print-machineinstrs", cl::ValueOptional,
|
|
|
|
cl::desc("Print machine instrs"),
|
|
|
|
cl::value_desc("pass-name"), cl::init("option-unspecified"));
|
2012-02-04 10:56:48 +08:00
|
|
|
|
2013-12-29 05:56:51 +08:00
|
|
|
// Temporary option to allow experimenting with MachineScheduler as a post-RA
|
|
|
|
// scheduler. Targets can "properly" enable this with
|
2014-06-04 15:06:27 +08:00
|
|
|
// substitutePass(&PostRASchedulerID, &PostMachineSchedulerID); Ideally it
|
|
|
|
// wouldn't be part of the standard pass pipeline, and the target would just add
|
|
|
|
// a PostRA scheduling pass wherever it wants.
|
2013-12-29 05:56:51 +08:00
|
|
|
static cl::opt<bool> MISchedPostRA("misched-postra", cl::Hidden,
|
|
|
|
cl::desc("Run MachineScheduler post regalloc (independent of preRA sched)"));
|
|
|
|
|
2013-02-10 14:42:34 +08:00
|
|
|
// Experimental option to run live interval analysis early.
|
2012-08-04 06:12:54 +08:00
|
|
|
static cl::opt<bool> EarlyLiveIntervals("early-live-intervals", cl::Hidden,
|
|
|
|
cl::desc("Run live interval analysis earlier in the pipeline"));
|
|
|
|
|
2014-09-03 06:12:54 +08:00
|
|
|
static cl::opt<bool> UseCFLAA("use-cfl-aa-in-codegen",
|
|
|
|
cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Enable the new, experimental CFL alias analysis in CodeGen"));
|
|
|
|
|
2012-02-15 11:21:51 +08:00
|
|
|
/// Allow standard passes to be disabled by command line options. This supports
|
|
|
|
/// simple binary flags that either suppress the pass or do nothing.
|
|
|
|
/// i.e. -disable-mypass=false has no effect.
|
|
|
|
/// These should be converted to boolOrDefault in order to use applyOverride.
|
2013-04-10 09:06:56 +08:00
|
|
|
static IdentifyingPassPtr applyDisable(IdentifyingPassPtr PassID,
|
|
|
|
bool Override) {
|
2012-02-15 11:21:51 +08:00
|
|
|
if (Override)
|
2013-04-10 09:06:56 +08:00
|
|
|
return IdentifyingPassPtr();
|
2012-07-03 03:48:37 +08:00
|
|
|
return PassID;
|
2012-02-15 11:21:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Allow standard passes to be disabled by the command line, regardless of who
|
|
|
|
/// is adding the pass.
|
|
|
|
///
|
|
|
|
/// StandardID is the pass identified in the standard pass pipeline and provided
|
|
|
|
/// to addPass(). It may be a target-specific ID in the case that the target
|
|
|
|
/// directly adds its own pass, but in that case we harmlessly fall through.
|
|
|
|
///
|
|
|
|
/// TargetID is the pass that the target has configured to override StandardID.
|
|
|
|
///
|
|
|
|
/// StandardID may be a pseudo ID. In that case TargetID is the name of the real
|
|
|
|
/// pass to run. This allows multiple options to control a single pass depending
|
|
|
|
/// on where in the pipeline that pass is added.
|
2013-04-10 09:06:56 +08:00
|
|
|
static IdentifyingPassPtr overridePass(AnalysisID StandardID,
|
|
|
|
IdentifyingPassPtr TargetID) {
|
2012-02-15 11:21:51 +08:00
|
|
|
if (StandardID == &PostRASchedulerID)
|
|
|
|
return applyDisable(TargetID, DisablePostRA);
|
|
|
|
|
|
|
|
if (StandardID == &BranchFolderPassID)
|
|
|
|
return applyDisable(TargetID, DisableBranchFold);
|
|
|
|
|
|
|
|
if (StandardID == &TailDuplicateID)
|
|
|
|
return applyDisable(TargetID, DisableTailDuplicate);
|
|
|
|
|
|
|
|
if (StandardID == &TargetPassConfig::EarlyTailDuplicateID)
|
|
|
|
return applyDisable(TargetID, DisableEarlyTailDup);
|
|
|
|
|
|
|
|
if (StandardID == &MachineBlockPlacementID)
|
2013-03-30 01:14:24 +08:00
|
|
|
return applyDisable(TargetID, DisableBlockPlacement);
|
2012-02-15 11:21:51 +08:00
|
|
|
|
|
|
|
if (StandardID == &StackSlotColoringID)
|
|
|
|
return applyDisable(TargetID, DisableSSC);
|
|
|
|
|
|
|
|
if (StandardID == &DeadMachineInstructionElimID)
|
|
|
|
return applyDisable(TargetID, DisableMachineDCE);
|
|
|
|
|
2012-07-04 08:09:54 +08:00
|
|
|
if (StandardID == &EarlyIfConverterID)
|
2012-10-03 08:51:32 +08:00
|
|
|
return applyDisable(TargetID, DisableEarlyIfConversion);
|
2012-07-04 08:09:54 +08:00
|
|
|
|
2012-02-15 11:21:51 +08:00
|
|
|
if (StandardID == &MachineLICMID)
|
|
|
|
return applyDisable(TargetID, DisableMachineLICM);
|
|
|
|
|
|
|
|
if (StandardID == &MachineCSEID)
|
|
|
|
return applyDisable(TargetID, DisableMachineCSE);
|
|
|
|
|
|
|
|
if (StandardID == &TargetPassConfig::PostRAMachineLICMID)
|
|
|
|
return applyDisable(TargetID, DisablePostRAMachineLICM);
|
|
|
|
|
|
|
|
if (StandardID == &MachineSinkingID)
|
|
|
|
return applyDisable(TargetID, DisableMachineSink);
|
|
|
|
|
|
|
|
if (StandardID == &MachineCopyPropagationID)
|
|
|
|
return applyDisable(TargetID, DisableCopyProp);
|
|
|
|
|
|
|
|
return TargetID;
|
|
|
|
}
|
|
|
|
|
2012-02-04 10:56:45 +08:00
|
|
|
//===---------------------------------------------------------------------===//
|
|
|
|
/// TargetPassConfig
|
|
|
|
//===---------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
INITIALIZE_PASS(TargetPassConfig, "targetpassconfig",
|
|
|
|
"Target Pass Configuration", false, false)
|
|
|
|
char TargetPassConfig::ID = 0;
|
|
|
|
|
2012-02-15 11:21:51 +08:00
|
|
|
// Pseudo Pass IDs.
|
|
|
|
char TargetPassConfig::EarlyTailDuplicateID = 0;
|
|
|
|
char TargetPassConfig::PostRAMachineLICMID = 0;
|
|
|
|
|
2012-02-15 11:21:47 +08:00
|
|
|
namespace llvm {
|
|
|
|
class PassConfigImpl {
|
|
|
|
public:
|
|
|
|
// List of passes explicitly substituted by this target. Normally this is
|
|
|
|
// empty, but it is a convenient way to suppress or replace specific passes
|
|
|
|
// that are part of a standard pass pipeline without overridding the entire
|
|
|
|
// pipeline. This mechanism allows target options to inherit a standard pass's
|
|
|
|
// user interface. For example, a target may disable a standard pass by
|
2012-07-03 03:48:37 +08:00
|
|
|
// default by substituting a pass ID of zero, and the user may still enable
|
|
|
|
// that standard pass with an explicit command line option.
|
2013-04-10 09:06:56 +08:00
|
|
|
DenseMap<AnalysisID,IdentifyingPassPtr> TargetPasses;
|
2012-05-30 08:17:12 +08:00
|
|
|
|
|
|
|
/// Store the pairs of <AnalysisID, AnalysisID> of which the second pass
|
|
|
|
/// is inserted after each instance of the first one.
|
2013-04-10 09:06:56 +08:00
|
|
|
SmallVector<std::pair<AnalysisID, IdentifyingPassPtr>, 4> InsertedPasses;
|
2012-02-15 11:21:47 +08:00
|
|
|
};
|
|
|
|
} // namespace llvm
|
|
|
|
|
2012-02-04 10:56:45 +08:00
|
|
|
// Out of line virtual method.
|
2012-02-15 11:21:47 +08:00
|
|
|
TargetPassConfig::~TargetPassConfig() {
|
|
|
|
delete Impl;
|
|
|
|
}
|
2012-02-04 10:56:45 +08:00
|
|
|
|
2012-02-09 05:22:48 +08:00
|
|
|
// Out of line constructor provides default values for pass options and
|
|
|
|
// registers all common codegen passes.
|
2012-02-04 10:56:59 +08:00
|
|
|
TargetPassConfig::TargetPassConfig(TargetMachine *tm, PassManagerBase &pm)
|
2015-07-07 01:44:26 +08:00
|
|
|
: ImmutablePass(ID), PM(&pm), StartBefore(nullptr), StartAfter(nullptr),
|
|
|
|
StopAfter(nullptr), Started(true), Stopped(false),
|
|
|
|
AddingMachinePasses(false), TM(tm), Impl(nullptr), Initialized(false),
|
2015-08-07 02:02:53 +08:00
|
|
|
DisableVerify(false), EnableTailMerge(true) {
|
2012-02-09 05:22:39 +08:00
|
|
|
|
2012-02-15 11:21:47 +08:00
|
|
|
Impl = new PassConfigImpl();
|
|
|
|
|
2012-02-04 10:56:45 +08:00
|
|
|
// Register all target independent codegen passes to activate their PassIDs,
|
|
|
|
// including this pass itself.
|
|
|
|
initializeCodeGen(*PassRegistry::getPassRegistry());
|
2012-02-15 11:21:51 +08:00
|
|
|
|
|
|
|
// Substitute Pseudo Pass IDs for real ones.
|
2012-07-03 03:48:37 +08:00
|
|
|
substitutePass(&EarlyTailDuplicateID, &TailDuplicateID);
|
|
|
|
substitutePass(&PostRAMachineLICMID, &MachineLICMID);
|
2012-02-04 10:56:45 +08:00
|
|
|
}
|
|
|
|
|
2012-05-30 08:17:12 +08:00
|
|
|
/// Insert InsertedPassID pass after TargetPassID.
|
2012-07-03 03:48:37 +08:00
|
|
|
void TargetPassConfig::insertPass(AnalysisID TargetPassID,
|
2013-04-10 09:06:56 +08:00
|
|
|
IdentifyingPassPtr InsertedPassID) {
|
2013-04-11 19:57:01 +08:00
|
|
|
assert(((!InsertedPassID.isInstance() &&
|
|
|
|
TargetPassID != InsertedPassID.getID()) ||
|
|
|
|
(InsertedPassID.isInstance() &&
|
|
|
|
TargetPassID != InsertedPassID.getInstance()->getPassID())) &&
|
2013-04-10 09:06:56 +08:00
|
|
|
"Insert a pass after itself!");
|
|
|
|
std::pair<AnalysisID, IdentifyingPassPtr> P(TargetPassID, InsertedPassID);
|
2012-05-30 08:17:12 +08:00
|
|
|
Impl->InsertedPasses.push_back(P);
|
|
|
|
}
|
|
|
|
|
2012-02-04 10:56:45 +08:00
|
|
|
/// createPassConfig - Create a pass configuration object to be used by
|
|
|
|
/// addPassToEmitX methods for generating a pipeline of CodeGen passes.
|
|
|
|
///
|
|
|
|
/// Targets may override this to extend TargetPassConfig.
|
2012-02-04 10:56:59 +08:00
|
|
|
TargetPassConfig *LLVMTargetMachine::createPassConfig(PassManagerBase &PM) {
|
|
|
|
return new TargetPassConfig(this, PM);
|
2012-02-04 10:56:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
TargetPassConfig::TargetPassConfig()
|
2014-04-14 08:51:57 +08:00
|
|
|
: ImmutablePass(ID), PM(nullptr) {
|
2012-02-04 10:56:45 +08:00
|
|
|
llvm_unreachable("TargetPassConfig should not be constructed on-the-fly");
|
|
|
|
}
|
|
|
|
|
2012-02-09 05:22:39 +08:00
|
|
|
// Helper to verify the analysis is really immutable.
|
|
|
|
void TargetPassConfig::setOpt(bool &Opt, bool Val) {
|
|
|
|
assert(!Initialized && "PassConfig is immutable");
|
|
|
|
Opt = Val;
|
|
|
|
}
|
|
|
|
|
2012-07-03 03:48:37 +08:00
|
|
|
void TargetPassConfig::substitutePass(AnalysisID StandardID,
|
2013-04-10 09:06:56 +08:00
|
|
|
IdentifyingPassPtr TargetID) {
|
2012-07-03 03:48:37 +08:00
|
|
|
Impl->TargetPasses[StandardID] = TargetID;
|
2012-02-15 11:21:47 +08:00
|
|
|
}
|
2012-02-11 15:11:32 +08:00
|
|
|
|
2013-04-10 09:06:56 +08:00
|
|
|
IdentifyingPassPtr TargetPassConfig::getPassSubstitution(AnalysisID ID) const {
|
|
|
|
DenseMap<AnalysisID, IdentifyingPassPtr>::const_iterator
|
2012-02-15 11:21:47 +08:00
|
|
|
I = Impl->TargetPasses.find(ID);
|
|
|
|
if (I == Impl->TargetPasses.end())
|
|
|
|
return ID;
|
|
|
|
return I->second;
|
|
|
|
}
|
|
|
|
|
2012-07-03 03:48:45 +08:00
|
|
|
/// Add a pass to the PassManager if that pass is supposed to be run. If the
|
|
|
|
/// Started/Stopped flags indicate either that the compilation should start at
|
|
|
|
/// a later pass or that it should stop after an earlier pass, then do not add
|
|
|
|
/// the pass. Finally, compare the current pass against the StartAfter
|
|
|
|
/// and StopAfter options and change the Started/Stopped flags accordingly.
|
2014-12-12 05:26:47 +08:00
|
|
|
void TargetPassConfig::addPass(Pass *P, bool verifyAfter, bool printAfter) {
|
2012-07-03 03:48:39 +08:00
|
|
|
assert(!Initialized && "PassConfig is immutable");
|
|
|
|
|
2012-07-03 06:56:41 +08:00
|
|
|
// Cache the Pass ID here in case the pass manager finds this pass is
|
|
|
|
// redundant with ones already scheduled / available, and deletes it.
|
|
|
|
// Fundamentally, once we add the pass to the manager, we no longer own it
|
|
|
|
// and shouldn't reference it.
|
|
|
|
AnalysisID PassID = P->getPassID();
|
|
|
|
|
2015-07-07 01:44:26 +08:00
|
|
|
if (StartBefore == PassID)
|
|
|
|
Started = true;
|
2014-12-12 05:26:47 +08:00
|
|
|
if (Started && !Stopped) {
|
|
|
|
std::string Banner;
|
|
|
|
// Construct banner message before PM->add() as that may delete the pass.
|
|
|
|
if (AddingMachinePasses && (printAfter || verifyAfter))
|
|
|
|
Banner = std::string("After ") + std::string(P->getPassName());
|
2012-07-03 03:48:45 +08:00
|
|
|
PM->add(P);
|
2014-12-12 05:26:47 +08:00
|
|
|
if (AddingMachinePasses) {
|
|
|
|
if (printAfter)
|
|
|
|
addPrintPass(Banner);
|
|
|
|
if (verifyAfter)
|
|
|
|
addVerifyPass(Banner);
|
|
|
|
}
|
2015-06-06 05:58:14 +08:00
|
|
|
|
|
|
|
// Add the passes after the pass P if there is any.
|
|
|
|
for (SmallVectorImpl<std::pair<AnalysisID, IdentifyingPassPtr> >::iterator
|
|
|
|
I = Impl->InsertedPasses.begin(),
|
|
|
|
E = Impl->InsertedPasses.end();
|
|
|
|
I != E; ++I) {
|
|
|
|
if ((*I).first == PassID) {
|
|
|
|
assert((*I).second.isValid() && "Illegal Pass ID!");
|
|
|
|
Pass *NP;
|
|
|
|
if ((*I).second.isInstance())
|
|
|
|
NP = (*I).second.getInstance();
|
|
|
|
else {
|
|
|
|
NP = Pass::createPass((*I).second.getID());
|
|
|
|
assert(NP && "Pass ID not registered");
|
|
|
|
}
|
|
|
|
addPass(NP, false, false);
|
|
|
|
}
|
|
|
|
}
|
2014-12-12 05:26:47 +08:00
|
|
|
} else {
|
2013-08-05 19:11:11 +08:00
|
|
|
delete P;
|
2014-12-12 05:26:47 +08:00
|
|
|
}
|
2012-07-03 06:56:41 +08:00
|
|
|
if (StopAfter == PassID)
|
2012-07-03 03:48:45 +08:00
|
|
|
Stopped = true;
|
2012-07-03 06:56:41 +08:00
|
|
|
if (StartAfter == PassID)
|
2012-07-03 03:48:45 +08:00
|
|
|
Started = true;
|
|
|
|
if (Stopped && !Started)
|
|
|
|
report_fatal_error("Cannot stop compilation after pass that is not run");
|
2012-07-03 03:48:31 +08:00
|
|
|
}
|
|
|
|
|
2012-02-15 11:21:47 +08:00
|
|
|
/// Add a CodeGen pass at this point in the pipeline after checking for target
|
|
|
|
/// and command line overrides.
|
2013-04-10 09:06:56 +08:00
|
|
|
///
|
|
|
|
/// addPass cannot return a pointer to the pass instance because is internal the
|
|
|
|
/// PassManager and the instance we create here may already be freed.
|
2014-12-12 05:26:47 +08:00
|
|
|
AnalysisID TargetPassConfig::addPass(AnalysisID PassID, bool verifyAfter,
|
|
|
|
bool printAfter) {
|
2013-04-10 09:06:56 +08:00
|
|
|
IdentifyingPassPtr TargetID = getPassSubstitution(PassID);
|
|
|
|
IdentifyingPassPtr FinalPtr = overridePass(PassID, TargetID);
|
|
|
|
if (!FinalPtr.isValid())
|
2014-04-14 08:51:57 +08:00
|
|
|
return nullptr;
|
2013-04-10 09:06:56 +08:00
|
|
|
|
|
|
|
Pass *P;
|
|
|
|
if (FinalPtr.isInstance())
|
|
|
|
P = FinalPtr.getInstance();
|
|
|
|
else {
|
|
|
|
P = Pass::createPass(FinalPtr.getID());
|
|
|
|
if (!P)
|
|
|
|
llvm_unreachable("Pass ID not registered");
|
|
|
|
}
|
|
|
|
AnalysisID FinalID = P->getPassID();
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(P, verifyAfter, printAfter); // Ends the lifetime of P.
|
2013-04-10 09:06:56 +08:00
|
|
|
|
2012-02-15 11:21:47 +08:00
|
|
|
return FinalID;
|
2012-02-04 10:56:59 +08:00
|
|
|
}
|
2012-02-04 10:56:48 +08:00
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
void TargetPassConfig::printAndVerify(const std::string &Banner) {
|
|
|
|
addPrintPass(Banner);
|
|
|
|
addVerifyPass(Banner);
|
|
|
|
}
|
|
|
|
|
|
|
|
void TargetPassConfig::addPrintPass(const std::string &Banner) {
|
2012-02-04 10:56:48 +08:00
|
|
|
if (TM->shouldPrintMachineCode())
|
2014-12-12 05:26:47 +08:00
|
|
|
PM->add(createMachineFunctionPrinterPass(dbgs(), Banner));
|
|
|
|
}
|
2012-02-04 10:56:48 +08:00
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
void TargetPassConfig::addVerifyPass(const std::string &Banner) {
|
2012-02-04 10:56:48 +08:00
|
|
|
if (VerifyMachineCode)
|
2014-12-12 05:26:47 +08:00
|
|
|
PM->add(createMachineVerifierPass(Banner));
|
2012-02-04 10:56:48 +08:00
|
|
|
}
|
|
|
|
|
2012-02-04 10:56:59 +08:00
|
|
|
/// Add common target configurable passes that perform LLVM IR to IR transforms
|
|
|
|
/// following machine independent optimization.
|
|
|
|
void TargetPassConfig::addIRPasses() {
|
2012-02-04 10:56:48 +08:00
|
|
|
// Basic AliasAnalysis support.
|
|
|
|
// Add TypeBasedAliasAnalysis before BasicAliasAnalysis so that
|
|
|
|
// BasicAliasAnalysis wins if they disagree. This is intended to help
|
|
|
|
// support "obvious" type-punning idioms.
|
2014-09-03 06:12:54 +08:00
|
|
|
if (UseCFLAA)
|
|
|
|
addPass(createCFLAliasAnalysisPass());
|
2012-07-03 03:48:31 +08:00
|
|
|
addPass(createTypeBasedAliasAnalysisPass());
|
Add scoped-noalias metadata
This commit adds scoped noalias metadata. The primary motivations for this
feature are:
1. To preserve noalias function attribute information when inlining
2. To provide the ability to model block-scope C99 restrict pointers
Neither of these two abilities are added here, only the necessary
infrastructure. In fact, there should be no change to existing functionality,
only the addition of new features. The logic that converts noalias function
parameters into this metadata during inlining will come in a follow-up commit.
What is added here is the ability to generally specify noalias memory-access
sets. Regarding the metadata, alias-analysis scopes are defined similar to TBAA
nodes:
!scope0 = metadata !{ metadata !"scope of foo()" }
!scope1 = metadata !{ metadata !"scope 1", metadata !scope0 }
!scope2 = metadata !{ metadata !"scope 2", metadata !scope0 }
!scope3 = metadata !{ metadata !"scope 2.1", metadata !scope2 }
!scope4 = metadata !{ metadata !"scope 2.2", metadata !scope2 }
Loads and stores can be tagged with an alias-analysis scope, and also, with a
noalias tag for a specific scope:
... = load %ptr1, !alias.scope !{ !scope1 }
... = load %ptr2, !alias.scope !{ !scope1, !scope2 }, !noalias !{ !scope1 }
When evaluating an aliasing query, if one of the instructions is associated
with an alias.scope id that is identical to the noalias scope associated with
the other instruction, or is a descendant (in the scope hierarchy) of the
noalias scope associated with the other instruction, then the two memory
accesses are assumed not to alias.
Note that is the first element of the scope metadata is a string, then it can
be combined accross functions and translation units. The string can be replaced
by a self-reference to create globally unqiue scope identifiers.
[Note: This overview is slightly stylized, since the metadata nodes really need
to just be numbers (!0 instead of !scope0), and the scope lists are also global
unnamed metadata.]
Existing noalias metadata in a callee is "cloned" for use by the inlined code.
This is necessary because the aliasing scopes are unique to each call site
(because of possible control dependencies on the aliasing properties). For
example, consider a function: foo(noalias a, noalias b) { *a = *b; } that gets
inlined into bar() { ... if (...) foo(a1, b1); ... if (...) foo(a2, b2); } --
now just because we know that a1 does not alias with b1 at the first call site,
and a2 does not alias with b2 at the second call site, we cannot let inlining
these functons have the metadata imply that a1 does not alias with b2.
llvm-svn: 213864
2014-07-24 22:25:39 +08:00
|
|
|
addPass(createScopedNoAliasAAPass());
|
2012-07-03 03:48:31 +08:00
|
|
|
addPass(createBasicAliasAnalysisPass());
|
2012-02-04 10:56:48 +08:00
|
|
|
|
|
|
|
// Before running any passes, run the verifier to determine if the input
|
|
|
|
// coming from the front-end and/or optimizer is valid.
|
2015-03-20 06:24:17 +08:00
|
|
|
if (!DisableVerify)
|
2012-07-03 03:48:31 +08:00
|
|
|
addPass(createVerifierPass());
|
2012-02-04 10:56:48 +08:00
|
|
|
|
|
|
|
// Run loop strength reduction before anything else.
|
|
|
|
if (getOptLevel() != CodeGenOpt::None && !DisableLSR) {
|
Switch the SCEV expander and LoopStrengthReduce to use
TargetTransformInfo rather than TargetLowering, removing one of the
primary instances of the layering violation of Transforms depending
directly on Target.
This is a really big deal because LSR used to be a "special" pass that
could only be tested fully using llc and by looking at the full output
of it. It also couldn't run with any other loop passes because it had to
be created by the backend. No longer is this true. LSR is now just
a normal pass and we should probably lift the creation of LSR out of
lib/CodeGen/Passes.cpp and into the PassManagerBuilder. =] I've not done
this, or updated all of the tests to use opt and a triple, because
I suspect someone more familiar with LSR would do a better job. This
change should be essentially without functional impact for normal
compilations, and only change behvaior of targetless compilations.
The conversion required changing all of the LSR code to refer to the TTI
interfaces, which fortunately are very similar to TargetLowering's
interfaces. However, it also allowed us to *always* expect to have some
implementation around. I've pushed that simplification through the pass,
and leveraged it to simplify code somewhat. It required some test
updates for one of two things: either we used to skip some checks
altogether but now we get the default "no" answer for them, or we used
to have no information about the target and now we do have some.
I've also started the process of removing AddrMode, as the TTI interface
doesn't use it any longer. In some cases this simplifies code, and in
others it adds some complexity, but I think it's not a bad tradeoff even
there. Subsequent patches will try to clean this up even further and use
other (more appropriate) abstractions.
Yet again, almost all of the formatting changes brought to you by
clang-format. =]
llvm-svn: 171735
2013-01-07 22:41:08 +08:00
|
|
|
addPass(createLoopStrengthReducePass());
|
2012-02-04 10:56:48 +08:00
|
|
|
if (PrintLSR)
|
2014-01-12 19:30:46 +08:00
|
|
|
addPass(createPrintFunctionPass(dbgs(), "\n\n*** Code after LSR ***\n"));
|
2012-02-04 10:56:48 +08:00
|
|
|
}
|
|
|
|
|
2015-01-29 03:28:03 +08:00
|
|
|
// Run GC lowering passes for builtin collectors
|
|
|
|
// TODO: add a pass insertion point here
|
2012-07-03 03:48:31 +08:00
|
|
|
addPass(createGCLoweringPass());
|
2015-01-29 03:28:03 +08:00
|
|
|
addPass(createShadowStackGCLoweringPass());
|
2012-02-04 10:56:48 +08:00
|
|
|
|
|
|
|
// Make sure that no unreachable blocks are instruction selected.
|
2012-07-03 03:48:31 +08:00
|
|
|
addPass(createUnreachableBlockEliminationPass());
|
2014-01-25 10:02:55 +08:00
|
|
|
|
|
|
|
// Prepare expensive constants for SelectionDAG.
|
|
|
|
if (getOptLevel() != CodeGenOpt::None && !DisableConstantHoisting)
|
|
|
|
addPass(createConstantHoistingPass());
|
2014-07-23 21:33:00 +08:00
|
|
|
|
|
|
|
if (getOptLevel() != CodeGenOpt::None && !DisablePartialLibcallInlining)
|
|
|
|
addPass(createPartiallyInlineLibCallsPass());
|
2012-07-03 03:48:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Turn exception handling constructs into something the code generators can
|
|
|
|
/// handle.
|
|
|
|
void TargetPassConfig::addPassesToHandleExceptions() {
|
|
|
|
switch (TM->getMCAsmInfo()->getExceptionHandlingType()) {
|
|
|
|
case ExceptionHandling::SjLj:
|
|
|
|
// SjLj piggy-backs on dwarf for this bit. The cleanups done apply to both
|
|
|
|
// Dwarf EH prepare needs to be run after SjLj prepare. Otherwise,
|
|
|
|
// catch info can get misplaced when a selector ends up more than one block
|
|
|
|
// removed from the parent invoke(s). This could happen when a landing
|
|
|
|
// pad is shared by multiple invokes and is also a target of a normal
|
|
|
|
// edge from elsewhere.
|
2015-07-08 09:00:31 +08:00
|
|
|
addPass(createSjLjEHPreparePass());
|
2012-07-03 03:48:31 +08:00
|
|
|
// FALLTHROUGH
|
|
|
|
case ExceptionHandling::DwarfCFI:
|
|
|
|
case ExceptionHandling::ARM:
|
2013-06-20 04:51:24 +08:00
|
|
|
addPass(createDwarfEHPass(TM));
|
2012-07-03 03:48:31 +08:00
|
|
|
break;
|
2015-01-29 08:41:44 +08:00
|
|
|
case ExceptionHandling::WinEH:
|
2015-03-12 08:36:20 +08:00
|
|
|
// We support using both GCC-style and MSVC-style exceptions on Windows, so
|
|
|
|
// add both preparation passes. Each pass will only actually run if it
|
|
|
|
// recognizes the personality function.
|
2015-01-29 08:41:44 +08:00
|
|
|
addPass(createWinEHPass(TM));
|
2015-03-12 08:36:20 +08:00
|
|
|
addPass(createDwarfEHPass(TM));
|
2015-01-29 08:41:44 +08:00
|
|
|
break;
|
2012-07-03 03:48:31 +08:00
|
|
|
case ExceptionHandling::None:
|
2014-03-21 03:54:47 +08:00
|
|
|
addPass(createLowerInvokePass());
|
2012-07-03 03:48:31 +08:00
|
|
|
|
|
|
|
// The lower invoke pass may create unreachable code. Remove it.
|
|
|
|
addPass(createUnreachableBlockEliminationPass());
|
|
|
|
break;
|
|
|
|
}
|
2012-02-04 10:56:59 +08:00
|
|
|
}
|
2012-02-04 10:56:48 +08:00
|
|
|
|
2012-12-01 06:08:55 +08:00
|
|
|
/// Add pass to prepare the LLVM IR for code generation. This should be done
|
|
|
|
/// before exception handling preparation passes.
|
|
|
|
void TargetPassConfig::addCodeGenPrepare() {
|
2012-02-04 10:56:48 +08:00
|
|
|
if (getOptLevel() != CodeGenOpt::None && !DisableCGP)
|
2013-06-20 05:07:11 +08:00
|
|
|
addPass(createCodeGenPreparePass(TM));
|
2014-11-08 08:00:50 +08:00
|
|
|
addPass(createRewriteSymbolsPass());
|
2012-12-01 06:08:55 +08:00
|
|
|
}
|
2012-02-04 10:56:48 +08:00
|
|
|
|
2012-12-01 06:08:55 +08:00
|
|
|
/// Add common passes that perform LLVM IR to IR transforms in preparation for
|
|
|
|
/// instruction selection.
|
|
|
|
void TargetPassConfig::addISelPrepare() {
|
2012-02-04 10:56:48 +08:00
|
|
|
addPreISel();
|
|
|
|
|
Protection against stack-based memory corruption errors using SafeStack
This patch adds the safe stack instrumentation pass to LLVM, which separates
the program stack into a safe stack, which stores return addresses, register
spills, and local variables that are statically verified to be accessed
in a safe way, and the unsafe stack, which stores everything else. Such
separation makes it much harder for an attacker to corrupt objects on the
safe stack, including function pointers stored in spilled registers and
return addresses. You can find more information about the safe stack, as
well as other parts of or control-flow hijack protection technique in our
OSDI paper on code-pointer integrity (http://dslab.epfl.ch/pubs/cpi.pdf)
and our project website (http://levee.epfl.ch).
The overhead of our implementation of the safe stack is very close to zero
(0.01% on the Phoronix benchmarks). This is lower than the overhead of
stack cookies, which are supported by LLVM and are commonly used today,
yet the security guarantees of the safe stack are strictly stronger than
stack cookies. In some cases, the safe stack improves performance due to
better cache locality.
Our current implementation of the safe stack is stable and robust, we
used it to recompile multiple projects on Linux including Chromium, and
we also recompiled the entire FreeBSD user-space system and more than 100
packages. We ran unit tests on the FreeBSD system and many of the packages
and observed no errors caused by the safe stack. The safe stack is also fully
binary compatible with non-instrumented code and can be applied to parts of
a program selectively.
This patch is our implementation of the safe stack on top of LLVM. The
patches make the following changes:
- Add the safestack function attribute, similar to the ssp, sspstrong and
sspreq attributes.
- Add the SafeStack instrumentation pass that applies the safe stack to all
functions that have the safestack attribute. This pass moves all unsafe local
variables to the unsafe stack with a separate stack pointer, whereas all
safe variables remain on the regular stack that is managed by LLVM as usual.
- Invoke the pass as the last stage before code generation (at the same time
the existing cookie-based stack protector pass is invoked).
- Add unit tests for the safe stack.
Original patch by Volodymyr Kuznetsov and others at the Dependable Systems
Lab at EPFL; updates and upstreaming by myself.
Differential Revision: http://reviews.llvm.org/D6094
llvm-svn: 239761
2015-06-16 05:07:11 +08:00
|
|
|
// Add both the safe stack and the stack protection passes: each of them will
|
|
|
|
// only protect functions that have corresponding attributes.
|
|
|
|
addPass(createSafeStackPass());
|
2013-12-19 11:17:11 +08:00
|
|
|
addPass(createStackProtectorPass(TM));
|
|
|
|
|
2012-02-04 10:56:48 +08:00
|
|
|
if (PrintISelInput)
|
2014-01-12 19:30:46 +08:00
|
|
|
addPass(createPrintFunctionPass(
|
|
|
|
dbgs(), "\n\n*** Final LLVM Code input to ISel ***\n"));
|
2012-02-04 10:56:48 +08:00
|
|
|
|
|
|
|
// All passes which modify the LLVM IR are now complete; run the verifier
|
|
|
|
// to ensure that the IR is valid.
|
|
|
|
if (!DisableVerify)
|
2012-07-03 03:48:31 +08:00
|
|
|
addPass(createVerifierPass());
|
2012-02-04 10:56:59 +08:00
|
|
|
}
|
2012-02-04 10:56:48 +08:00
|
|
|
|
2012-02-09 08:40:55 +08:00
|
|
|
/// Add the complete set of target-independent postISel code generator passes.
|
|
|
|
///
|
|
|
|
/// This can be read as the standard order of major LLVM CodeGen stages. Stages
|
|
|
|
/// with nontrivial configuration or multiple passes are broken out below in
|
|
|
|
/// add%Stage routines.
|
|
|
|
///
|
|
|
|
/// Any TargetPassConfig::addXX routine may be overriden by the Target. The
|
|
|
|
/// addPre/Post methods with empty header implementations allow injecting
|
|
|
|
/// target-specific fixups just before or after major stages. Additionally,
|
|
|
|
/// targets have the flexibility to change pass order within a stage by
|
|
|
|
/// overriding default implementation of add%Stage routines below. Each
|
|
|
|
/// technique has maintainability tradeoffs because alternate pass orders are
|
|
|
|
/// not well supported. addPre/Post works better if the target pass is easily
|
|
|
|
/// tied to a common pass. But if it has subtle dependencies on multiple passes,
|
2012-02-10 15:08:25 +08:00
|
|
|
/// the target should override the stage instead.
|
2012-02-09 08:40:55 +08:00
|
|
|
///
|
|
|
|
/// TODO: We could use a single addPre/Post(ID) hook to allow pass injection
|
|
|
|
/// before/after any target-independent pass. But it's currently overkill.
|
2012-02-04 10:56:59 +08:00
|
|
|
void TargetPassConfig::addMachinePasses() {
|
2014-12-12 05:26:47 +08:00
|
|
|
AddingMachinePasses = true;
|
|
|
|
|
2012-05-30 08:17:12 +08:00
|
|
|
// Insert a machine instr printer pass after the specified pass.
|
|
|
|
// If -print-machineinstrs specified, print machineinstrs after all passes.
|
|
|
|
if (StringRef(PrintMachineInstrs.getValue()).equals(""))
|
|
|
|
TM->Options.PrintMachineCode = true;
|
|
|
|
else if (!StringRef(PrintMachineInstrs.getValue())
|
|
|
|
.equals("option-unspecified")) {
|
|
|
|
const PassRegistry *PR = PassRegistry::getPassRegistry();
|
|
|
|
const PassInfo *TPI = PR->getPassInfo(PrintMachineInstrs.getValue());
|
2014-12-13 12:52:04 +08:00
|
|
|
const PassInfo *IPI = PR->getPassInfo(StringRef("machineinstr-printer"));
|
2012-05-30 08:17:12 +08:00
|
|
|
assert (TPI && IPI && "Pass ID not registered!");
|
2012-09-06 06:26:57 +08:00
|
|
|
const char *TID = (const char *)(TPI->getTypeInfo());
|
|
|
|
const char *IID = (const char *)(IPI->getTypeInfo());
|
2012-07-03 03:48:37 +08:00
|
|
|
insertPass(TID, IID);
|
2012-05-30 08:17:12 +08:00
|
|
|
}
|
|
|
|
|
2012-07-05 03:28:27 +08:00
|
|
|
// Print the instruction selected machine code...
|
|
|
|
printAndVerify("After Instruction Selection");
|
|
|
|
|
2012-02-04 10:56:48 +08:00
|
|
|
// Expand pseudo-instructions emitted by ISel.
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&ExpandISelPseudosID);
|
2012-02-04 10:56:48 +08:00
|
|
|
|
2012-02-09 08:40:55 +08:00
|
|
|
// Add passes that optimize machine instructions in SSA form.
|
2012-02-04 10:56:48 +08:00
|
|
|
if (getOptLevel() != CodeGenOpt::None) {
|
2012-02-09 08:40:55 +08:00
|
|
|
addMachineSSAOptimization();
|
2012-11-19 08:11:50 +08:00
|
|
|
} else {
|
2012-02-09 08:40:55 +08:00
|
|
|
// If the target requests it, assign local variables to stack slots relative
|
|
|
|
// to one another and simplify frame index references where possible.
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&LocalStackSlotAllocationID, false);
|
2012-02-04 10:56:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Run pre-ra passes.
|
2014-12-12 05:26:47 +08:00
|
|
|
addPreRegAlloc();
|
2012-02-04 10:56:48 +08:00
|
|
|
|
2012-02-09 08:40:55 +08:00
|
|
|
// Run register allocation and passes that are tightly coupled with it,
|
|
|
|
// including phi elimination and scheduling.
|
2012-02-10 12:10:36 +08:00
|
|
|
if (getOptimizeRegAlloc())
|
|
|
|
addOptimizedRegAlloc(createRegAllocPass(true));
|
|
|
|
else
|
|
|
|
addFastRegAlloc(createRegAllocPass(false));
|
2012-02-04 10:56:48 +08:00
|
|
|
|
|
|
|
// Run post-ra passes.
|
2014-12-12 05:26:47 +08:00
|
|
|
addPostRegAlloc();
|
2012-02-04 10:56:48 +08:00
|
|
|
|
|
|
|
// Insert prolog/epilog code. Eliminate abstract frame index references...
|
2015-08-07 02:02:53 +08:00
|
|
|
if (getOptLevel() != CodeGenOpt::None)
|
|
|
|
addPass(createShrinkWrapPass());
|
2012-07-03 03:48:37 +08:00
|
|
|
addPass(&PrologEpilogCodeInserterID);
|
2012-02-04 10:56:48 +08:00
|
|
|
|
2012-02-09 08:40:55 +08:00
|
|
|
/// Add passes that optimize machine instructions after register allocation.
|
|
|
|
if (getOptLevel() != CodeGenOpt::None)
|
|
|
|
addMachineLateOptimization();
|
2012-02-04 10:56:48 +08:00
|
|
|
|
|
|
|
// Expand pseudo instructions before second scheduling pass.
|
2012-07-03 03:48:37 +08:00
|
|
|
addPass(&ExpandPostRAPseudosID);
|
2012-02-04 10:56:48 +08:00
|
|
|
|
|
|
|
// Run pre-sched2 passes.
|
2014-12-12 05:26:47 +08:00
|
|
|
addPreSched2();
|
2012-02-04 10:56:48 +08:00
|
|
|
|
2015-06-16 02:44:27 +08:00
|
|
|
if (EnableImplicitNullChecks)
|
|
|
|
addPass(&ImplicitNullChecksID);
|
|
|
|
|
2012-02-04 10:56:48 +08:00
|
|
|
// Second pass scheduler.
|
2012-02-15 11:21:51 +08:00
|
|
|
if (getOptLevel() != CodeGenOpt::None) {
|
2013-12-29 05:56:51 +08:00
|
|
|
if (MISchedPostRA)
|
|
|
|
addPass(&PostMachineSchedulerID);
|
|
|
|
else
|
|
|
|
addPass(&PostRASchedulerID);
|
2012-02-04 10:56:48 +08:00
|
|
|
}
|
|
|
|
|
2012-02-09 08:40:55 +08:00
|
|
|
// GC
|
2012-12-21 10:57:04 +08:00
|
|
|
if (addGCPasses()) {
|
|
|
|
if (PrintGCInfo)
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(createGCInfoPrinter(dbgs()), false, false);
|
2012-12-21 10:57:04 +08:00
|
|
|
}
|
2012-02-04 10:56:48 +08:00
|
|
|
|
2012-02-09 08:40:55 +08:00
|
|
|
// Basic block placement.
|
2012-02-15 11:21:51 +08:00
|
|
|
if (getOptLevel() != CodeGenOpt::None)
|
2012-02-09 08:40:55 +08:00
|
|
|
addBlockPlacement();
|
2012-02-04 10:56:48 +08:00
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
addPreEmitPass();
|
|
|
|
|
|
|
|
addPass(&StackMapLivenessID, false);
|
2013-12-14 14:53:06 +08:00
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
AddingMachinePasses = false;
|
2012-02-04 10:56:48 +08:00
|
|
|
}
|
|
|
|
|
2012-02-09 08:40:55 +08:00
|
|
|
/// Add passes that optimize machine instructions in SSA form.
|
|
|
|
void TargetPassConfig::addMachineSSAOptimization() {
|
|
|
|
// Pre-ra tail duplication.
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&EarlyTailDuplicateID);
|
2012-02-09 08:40:55 +08:00
|
|
|
|
|
|
|
// Optimize PHIs before DCE: removing dead PHI cycles may make more
|
|
|
|
// instructions dead.
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&OptimizePHIsID, false);
|
2012-02-09 08:40:55 +08:00
|
|
|
|
2012-09-06 17:17:37 +08:00
|
|
|
// This pass merges large allocas. StackSlotColoring is a different pass
|
|
|
|
// which merges spill slots.
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&StackColoringID, false);
|
2012-09-06 17:17:37 +08:00
|
|
|
|
2012-02-09 08:40:55 +08:00
|
|
|
// If the target requests it, assign local variables to stack slots relative
|
|
|
|
// to one another and simplify frame index references where possible.
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&LocalStackSlotAllocationID, false);
|
2012-02-09 08:40:55 +08:00
|
|
|
|
|
|
|
// With optimization, dead code should already be eliminated. However
|
|
|
|
// there is one known exception: lowered code for arguments that are only
|
|
|
|
// used by tail calls, where the tail calls reuse the incoming stack
|
|
|
|
// arguments directly (see t11 in test/CodeGen/X86/sibcall.ll).
|
2012-07-03 03:48:37 +08:00
|
|
|
addPass(&DeadMachineInstructionElimID);
|
2012-02-09 08:40:55 +08:00
|
|
|
|
2013-01-17 08:58:38 +08:00
|
|
|
// Allow targets to insert passes that improve instruction level parallelism,
|
|
|
|
// like if-conversion. Such passes will typically need dominator trees and
|
|
|
|
// loop info, just like LICM and CSE below.
|
2014-12-12 05:26:47 +08:00
|
|
|
addILPOpts();
|
2013-01-17 08:58:38 +08:00
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&MachineLICMID, false);
|
|
|
|
addPass(&MachineCSEID, false);
|
2012-07-03 03:48:37 +08:00
|
|
|
addPass(&MachineSinkingID);
|
2012-02-09 08:40:55 +08:00
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&PeepholeOptimizerID, false);
|
[PeepholeOptimizer] Refactor the advanced copy optimization to take advantage of
the isRegSequence property.
This is a follow-up of r215394 and r215404, which respectively introduces the
isRegSequence property and uses it for ARM.
Thanks to the property introduced by the previous commits, this patch is able
to optimize the following sequence:
vmov d0, r2, r3
vmov d1, r0, r1
vmov r0, s0
vmov r1, s2
udiv r0, r1, r0
vmov r1, s1
vmov r2, s3
udiv r1, r2, r1
vmov.32 d16[0], r0
vmov.32 d16[1], r1
vmov r0, r1, d16
bx lr
into:
udiv r0, r0, r2
udiv r1, r1, r3
vmov.32 d16[0], r0
vmov.32 d16[1], r1
vmov r0, r1, d16
bx lr
This patch refactors how the copy optimizations are done in the peephole
optimizer. Prior to this patch, we had one copy-related optimization that
replaced a copy or bitcast by a generic, more suitable (in terms of register
file), copy.
With this patch, the peephole optimizer features two copy-related optimizations:
1. One for rewriting generic copies to generic copies:
PeepholeOptimizer::optimizeCoalescableCopy.
2. One for replacing non-generic copies with generic copies:
PeepholeOptimizer::optimizeUncoalescableCopy.
The goals of these two optimizations are slightly different: one rewrite the
operand of the instruction (#1), the other kills off the non-generic instruction
and replace it by a (sequence of) generic instruction(s).
Both optimizations rely on the ValueTracker introduced in r212100.
The ValueTracker has been refactored to use the information from the
TargetInstrInfo for non-generic instruction. As part of the refactoring, we
switched the tracking from the index of the definition to the actual register
(virtual or physical). This one change is to provide better consistency with
register related APIs and to ease the use of the TargetInstrInfo.
Moreover, this patch introduces a new helper class CopyRewriter used to ease the
rewriting of generic copies (i.e., #1).
Finally, this patch adds a dead code elimination pass right after the peephole
optimizer to get rid of dead code that may appear after rewriting.
This is related to <rdar://problem/12702965>.
Review: http://reviews.llvm.org/D4874
llvm-svn: 216088
2014-08-21 01:41:48 +08:00
|
|
|
// Clean-up the dead code that may have been generated by peephole
|
|
|
|
// rewriting.
|
|
|
|
addPass(&DeadMachineInstructionElimID);
|
2012-02-09 08:40:55 +08:00
|
|
|
}
|
|
|
|
|
2006-08-02 20:30:23 +08:00
|
|
|
//===---------------------------------------------------------------------===//
|
2012-02-09 08:40:55 +08:00
|
|
|
/// Register Allocation Pass Configuration
|
2006-08-02 20:30:23 +08:00
|
|
|
//===---------------------------------------------------------------------===//
|
2012-02-09 08:40:55 +08:00
|
|
|
|
2012-02-10 12:10:36 +08:00
|
|
|
bool TargetPassConfig::getOptimizeRegAlloc() const {
|
|
|
|
switch (OptimizeRegAlloc) {
|
|
|
|
case cl::BOU_UNSET: return getOptLevel() != CodeGenOpt::None;
|
|
|
|
case cl::BOU_TRUE: return true;
|
|
|
|
case cl::BOU_FALSE: return false;
|
|
|
|
}
|
|
|
|
llvm_unreachable("Invalid optimize-regalloc state");
|
|
|
|
}
|
|
|
|
|
2012-02-09 08:40:55 +08:00
|
|
|
/// RegisterRegAlloc's global Registry tracks allocator registration.
|
2006-08-02 20:30:23 +08:00
|
|
|
MachinePassRegistry RegisterRegAlloc::Registry;
|
|
|
|
|
2012-02-09 08:40:55 +08:00
|
|
|
/// A dummy default pass factory indicates whether the register allocator is
|
|
|
|
/// overridden on the command line.
|
2014-04-14 08:51:57 +08:00
|
|
|
static FunctionPass *useDefaultRegisterAllocator() { return nullptr; }
|
2010-05-28 07:57:25 +08:00
|
|
|
static RegisterRegAlloc
|
|
|
|
defaultRegAlloc("default",
|
|
|
|
"pick register allocator based on -O option",
|
2012-02-10 12:10:36 +08:00
|
|
|
useDefaultRegisterAllocator);
|
2006-08-02 20:30:23 +08:00
|
|
|
|
2012-02-09 08:40:55 +08:00
|
|
|
/// -regalloc=... command line option.
|
2008-05-13 08:00:25 +08:00
|
|
|
static cl::opt<RegisterRegAlloc::FunctionPassCtor, false,
|
|
|
|
RegisterPassParser<RegisterRegAlloc> >
|
|
|
|
RegAlloc("regalloc",
|
2012-02-10 12:10:36 +08:00
|
|
|
cl::init(&useDefaultRegisterAllocator),
|
2010-05-28 07:57:25 +08:00
|
|
|
cl::desc("Register allocator to use"));
|
2006-07-28 04:05:00 +08:00
|
|
|
|
2006-08-02 20:30:23 +08:00
|
|
|
|
2012-02-10 12:10:36 +08:00
|
|
|
/// Instantiate the default register allocator pass for this target for either
|
|
|
|
/// the optimized or unoptimized allocation path. This will be added to the pass
|
|
|
|
/// manager by addFastRegAlloc in the unoptimized case or addOptimizedRegAlloc
|
|
|
|
/// in the optimized case.
|
|
|
|
///
|
|
|
|
/// A target that uses the standard regalloc pass order for fast or optimized
|
|
|
|
/// allocation may still override this for per-target regalloc
|
|
|
|
/// selection. But -regalloc=... always takes precedence.
|
|
|
|
FunctionPass *TargetPassConfig::createTargetRegisterAllocator(bool Optimized) {
|
|
|
|
if (Optimized)
|
|
|
|
return createGreedyRegisterAllocator();
|
|
|
|
else
|
|
|
|
return createFastRegisterAllocator();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Find and instantiate the register allocation pass requested by this target
|
|
|
|
/// at the current optimization level. Different register allocators are
|
|
|
|
/// defined as separate passes because they may require different analysis.
|
|
|
|
///
|
|
|
|
/// This helper ensures that the regalloc= option is always available,
|
|
|
|
/// even for targets that override the default allocator.
|
|
|
|
///
|
|
|
|
/// FIXME: When MachinePassRegistry register pass IDs instead of function ptrs,
|
|
|
|
/// this can be folded into addPass.
|
|
|
|
FunctionPass *TargetPassConfig::createRegAllocPass(bool Optimized) {
|
2006-08-02 02:29:48 +08:00
|
|
|
RegisterRegAlloc::FunctionPassCtor Ctor = RegisterRegAlloc::getDefault();
|
2010-05-28 07:57:25 +08:00
|
|
|
|
2012-02-10 12:10:36 +08:00
|
|
|
// Initialize the global default.
|
2006-08-01 22:21:23 +08:00
|
|
|
if (!Ctor) {
|
2006-08-02 20:30:23 +08:00
|
|
|
Ctor = RegAlloc;
|
|
|
|
RegisterRegAlloc::setDefault(RegAlloc);
|
2006-08-01 22:21:23 +08:00
|
|
|
}
|
2012-02-10 12:10:36 +08:00
|
|
|
if (Ctor != useDefaultRegisterAllocator)
|
2010-05-28 07:57:25 +08:00
|
|
|
return Ctor();
|
|
|
|
|
2012-02-10 12:10:36 +08:00
|
|
|
// With no -regalloc= override, ask the target for a regalloc pass.
|
|
|
|
return createTargetRegisterAllocator(Optimized);
|
|
|
|
}
|
|
|
|
|
2014-10-22 04:47:22 +08:00
|
|
|
/// Return true if the default global register allocator is in use and
|
|
|
|
/// has not be overriden on the command line with '-regalloc=...'
|
|
|
|
bool TargetPassConfig::usingDefaultRegAlloc() const {
|
2014-10-22 05:50:49 +08:00
|
|
|
return RegAlloc.getNumOccurrences() == 0;
|
2014-10-22 04:47:22 +08:00
|
|
|
}
|
|
|
|
|
2012-02-10 12:10:36 +08:00
|
|
|
/// Add the minimum set of target-independent passes that are required for
|
|
|
|
/// register allocation. No coalescing or scheduling.
|
|
|
|
void TargetPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&PHIEliminationID, false);
|
|
|
|
addPass(&TwoAddressInstructionPassID, false);
|
2012-02-10 12:10:36 +08:00
|
|
|
|
2012-07-03 03:48:31 +08:00
|
|
|
addPass(RegAllocPass);
|
2006-07-28 04:05:00 +08:00
|
|
|
}
|
2012-02-09 08:40:55 +08:00
|
|
|
|
|
|
|
/// Add standard target-independent passes that are tightly coupled with
|
2012-02-10 12:10:36 +08:00
|
|
|
/// optimized register allocation, including coalescing, machine instruction
|
|
|
|
/// scheduling, and register allocation itself.
|
|
|
|
void TargetPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&ProcessImplicitDefsID, false);
|
2012-06-26 02:12:18 +08:00
|
|
|
|
2012-02-10 12:10:36 +08:00
|
|
|
// LiveVariables currently requires pure SSA form.
|
|
|
|
//
|
|
|
|
// FIXME: Once TwoAddressInstruction pass no longer uses kill flags,
|
|
|
|
// LiveVariables can be removed completely, and LiveIntervals can be directly
|
|
|
|
// computed. (We still either need to regenerate kill flags after regalloc, or
|
|
|
|
// preferably fix the scavenger to not depend on them).
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&LiveVariablesID, false);
|
2012-02-10 12:10:36 +08:00
|
|
|
|
2013-10-15 00:39:04 +08:00
|
|
|
// Edge splitting is smarter with machine loop info.
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&MachineLoopInfoID, false);
|
|
|
|
addPass(&PHIEliminationID, false);
|
2012-08-04 06:12:54 +08:00
|
|
|
|
|
|
|
// Eventually, we want to run LiveIntervals before PHI elimination.
|
|
|
|
if (EarlyLiveIntervals)
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&LiveIntervalsID, false);
|
2012-08-04 06:12:54 +08:00
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&TwoAddressInstructionPassID, false);
|
2012-07-03 03:48:37 +08:00
|
|
|
addPass(&RegisterCoalescerID);
|
2012-02-10 12:10:36 +08:00
|
|
|
|
|
|
|
// PreRA instruction scheduling.
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&MachineSchedulerID);
|
2012-02-10 12:10:36 +08:00
|
|
|
|
|
|
|
// Add the selected register allocation pass.
|
2012-07-03 03:48:31 +08:00
|
|
|
addPass(RegAllocPass);
|
2012-06-27 01:09:29 +08:00
|
|
|
|
|
|
|
// Allow targets to change the register assignments before rewriting.
|
2014-12-12 05:26:47 +08:00
|
|
|
addPreRewrite();
|
2012-02-09 08:40:55 +08:00
|
|
|
|
2012-06-09 07:44:45 +08:00
|
|
|
// Finally rewrite virtual registers.
|
2012-07-03 03:48:37 +08:00
|
|
|
addPass(&VirtRegRewriterID);
|
2012-06-09 07:44:45 +08:00
|
|
|
|
2012-02-09 08:40:55 +08:00
|
|
|
// Perform stack slot coloring and post-ra machine LICM.
|
2012-02-10 12:10:36 +08:00
|
|
|
//
|
|
|
|
// FIXME: Re-enable coloring with register when it's capable of adding
|
|
|
|
// kill markers.
|
2012-07-03 03:48:37 +08:00
|
|
|
addPass(&StackSlotColoringID);
|
2012-02-15 15:57:03 +08:00
|
|
|
|
|
|
|
// Run post-ra machine LICM to hoist reloads / remats.
|
|
|
|
//
|
|
|
|
// FIXME: can this move into MachineLateOptimization?
|
2012-07-03 03:48:37 +08:00
|
|
|
addPass(&PostRAMachineLICMID);
|
2012-02-09 08:40:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
//===---------------------------------------------------------------------===//
|
|
|
|
/// Post RegAlloc Pass Configuration
|
|
|
|
//===---------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// Add passes that optimize machine instructions after register allocation.
|
|
|
|
void TargetPassConfig::addMachineLateOptimization() {
|
|
|
|
// Branch folding must be run after regalloc and prolog/epilog insertion.
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&BranchFolderPassID);
|
2012-02-09 08:40:55 +08:00
|
|
|
|
|
|
|
// Tail duplication.
|
2013-12-07 09:49:19 +08:00
|
|
|
// Note that duplicating tail just increases code size and degrades
|
|
|
|
// performance for targets that require Structured Control Flow.
|
|
|
|
// In addition it can also make CFG irreducible. Thus we disable it.
|
2014-12-12 05:26:47 +08:00
|
|
|
if (!TM->requiresStructuredCFG())
|
|
|
|
addPass(&TailDuplicateID);
|
2012-02-09 08:40:55 +08:00
|
|
|
|
|
|
|
// Copy propagation.
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&MachineCopyPropagationID);
|
2012-02-09 08:40:55 +08:00
|
|
|
}
|
|
|
|
|
2012-12-21 10:57:04 +08:00
|
|
|
/// Add standard GC passes.
|
|
|
|
bool TargetPassConfig::addGCPasses() {
|
2014-12-12 05:26:47 +08:00
|
|
|
addPass(&GCMachineCodeAnalysisID, false);
|
2012-12-21 10:57:04 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-02-09 08:40:55 +08:00
|
|
|
/// Add standard basic block placement passes.
|
|
|
|
void TargetPassConfig::addBlockPlacement() {
|
2014-12-12 05:26:47 +08:00
|
|
|
if (addPass(&MachineBlockPlacementID, false)) {
|
2012-02-15 11:21:51 +08:00
|
|
|
// Run a separate pass to collect block placement statistics.
|
|
|
|
if (EnableBlockPlacementStats)
|
2012-07-03 03:48:37 +08:00
|
|
|
addPass(&MachineBlockPlacementStatsID);
|
2012-02-09 08:40:55 +08:00
|
|
|
}
|
|
|
|
}
|