2014-05-24 20:50:23 +08:00
|
|
|
//===-- AArch64TargetMachine.cpp - Define TargetMachine for AArch64 -------===//
|
2014-03-29 18:18:08 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2014-03-29 18:18:08 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2017-05-27 09:34:07 +08:00
|
|
|
#include "AArch64TargetMachine.h"
|
2014-05-24 20:50:23 +08:00
|
|
|
#include "AArch64.h"
|
2017-02-01 10:54:34 +08:00
|
|
|
#include "AArch64MacroFusion.h"
|
2017-01-06 08:30:53 +08:00
|
|
|
#include "AArch64Subtarget.h"
|
2014-11-13 17:26:31 +08:00
|
|
|
#include "AArch64TargetObjectFile.h"
|
2015-01-31 19:17:59 +08:00
|
|
|
#include "AArch64TargetTransformInfo.h"
|
2017-01-06 08:30:53 +08:00
|
|
|
#include "MCTargetDesc/AArch64MCTargetDesc.h"
|
2019-05-15 05:33:53 +08:00
|
|
|
#include "TargetInfo/AArch64TargetInfo.h"
|
2017-01-06 08:30:53 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
|
|
|
#include "llvm/ADT/Triple.h"
|
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
2019-04-15 12:53:46 +08:00
|
|
|
#include "llvm/CodeGen/CSEConfigBase.h"
|
2016-04-08 05:24:40 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
|
2016-07-27 22:31:55 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
|
2016-10-15 06:18:18 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/Legalizer.h"
|
2017-05-27 09:34:07 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/Localizer.h"
|
2016-04-08 04:27:33 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
|
2016-11-29 04:11:54 +08:00
|
|
|
#include "llvm/CodeGen/MachineScheduler.h"
|
2014-03-29 18:18:08 +08:00
|
|
|
#include "llvm/CodeGen/Passes.h"
|
2016-05-10 11:21:59 +08:00
|
|
|
#include "llvm/CodeGen/TargetPassConfig.h"
|
2017-01-06 08:30:53 +08:00
|
|
|
#include "llvm/IR/Attributes.h"
|
2014-10-06 14:45:36 +08:00
|
|
|
#include "llvm/IR/Function.h"
|
2018-11-08 06:30:56 +08:00
|
|
|
#include "llvm/MC/MCAsmInfo.h"
|
2017-01-06 08:30:53 +08:00
|
|
|
#include "llvm/MC/MCTargetOptions.h"
|
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/CodeGen.h"
|
2014-03-29 18:18:08 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Support/TargetRegistry.h"
|
2018-03-24 07:58:19 +08:00
|
|
|
#include "llvm/Target/TargetLoweringObjectFile.h"
|
2014-03-29 18:18:08 +08:00
|
|
|
#include "llvm/Target/TargetOptions.h"
|
|
|
|
#include "llvm/Transforms/Scalar.h"
|
2017-01-06 08:30:53 +08:00
|
|
|
#include <memory>
|
|
|
|
#include <string>
|
|
|
|
|
2014-03-29 18:18:08 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2016-08-01 13:56:57 +08:00
|
|
|
static cl::opt<bool> EnableCCMP("aarch64-enable-ccmp",
|
|
|
|
cl::desc("Enable the CCMP formation pass"),
|
|
|
|
cl::init(true), cl::Hidden);
|
2014-05-08 00:41:55 +08:00
|
|
|
|
[AArch64] Prefer Bcc to CBZ/CBNZ/TBZ/TBNZ when NZCV flags can be set for "free".
This patch contains a pass that transforms CBZ/CBNZ/TBZ/TBNZ instructions into a
conditional branch (Bcc), when the NZCV flags can be set for "free". This is
preferred on targets that have more flexibility when scheduling Bcc
instructions as compared to CBZ/CBNZ/TBZ/TBNZ (assuming all other variables are
equal). This can reduce register pressure and is also the default behavior for
GCC.
A few examples:
add w8, w0, w1 -> cmn w0, w1 ; CMN is an alias of ADDS.
cbz w8, .LBB_2 -> b.eq .LBB0_2 ; single def/use of w8 removed.
add w8, w0, w1 -> adds w8, w0, w1 ; w8 has multiple uses.
cbz w8, .LBB1_2 -> b.eq .LBB1_2
sub w8, w0, w1 -> subs w8, w0, w1 ; w8 has multiple uses.
tbz w8, #31, .LBB6_2 -> b.ge .LBB6_2
In looking at all current sub-target machine descriptions, this transformation
appears to be either positive or neutral.
Differential Revision: https://reviews.llvm.org/D34220.
llvm-svn: 306144
2017-06-24 03:20:12 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
EnableCondBrTuning("aarch64-enable-cond-br-tune",
|
|
|
|
cl::desc("Enable the conditional branch tuning pass"),
|
|
|
|
cl::init(true), cl::Hidden);
|
|
|
|
|
2016-08-01 13:56:57 +08:00
|
|
|
static cl::opt<bool> EnableMCR("aarch64-enable-mcr",
|
2014-08-08 05:40:58 +08:00
|
|
|
cl::desc("Enable the machine combiner pass"),
|
|
|
|
cl::init(true), cl::Hidden);
|
|
|
|
|
2016-08-01 13:56:57 +08:00
|
|
|
static cl::opt<bool> EnableStPairSuppress("aarch64-enable-stp-suppress",
|
|
|
|
cl::desc("Suppress STP for AArch64"),
|
|
|
|
cl::init(true), cl::Hidden);
|
2014-03-29 18:18:08 +08:00
|
|
|
|
2016-08-01 13:56:57 +08:00
|
|
|
static cl::opt<bool> EnableAdvSIMDScalar(
|
|
|
|
"aarch64-enable-simd-scalar",
|
|
|
|
cl::desc("Enable use of AdvSIMD scalar integer instructions"),
|
|
|
|
cl::init(false), cl::Hidden);
|
2014-03-29 18:18:08 +08:00
|
|
|
|
2014-04-15 05:05:02 +08:00
|
|
|
static cl::opt<bool>
|
2016-08-01 13:56:57 +08:00
|
|
|
EnablePromoteConstant("aarch64-enable-promote-const",
|
|
|
|
cl::desc("Enable the promote constant pass"),
|
|
|
|
cl::init(true), cl::Hidden);
|
2014-04-15 05:05:02 +08:00
|
|
|
|
2016-08-01 13:56:57 +08:00
|
|
|
static cl::opt<bool> EnableCollectLOH(
|
|
|
|
"aarch64-enable-collect-loh",
|
|
|
|
cl::desc("Enable the pass that emits the linker optimization hints (LOH)"),
|
|
|
|
cl::init(true), cl::Hidden);
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
|
2014-05-08 00:41:55 +08:00
|
|
|
static cl::opt<bool>
|
2016-08-01 13:56:57 +08:00
|
|
|
EnableDeadRegisterElimination("aarch64-enable-dead-defs", cl::Hidden,
|
|
|
|
cl::desc("Enable the pass that removes dead"
|
|
|
|
" definitons and replaces stores to"
|
|
|
|
" them with stores to the zero"
|
|
|
|
" register"),
|
|
|
|
cl::init(true));
|
|
|
|
|
|
|
|
static cl::opt<bool> EnableRedundantCopyElimination(
|
|
|
|
"aarch64-enable-copyelim",
|
|
|
|
cl::desc("Enable the redundant copy elimination pass"), cl::init(true),
|
|
|
|
cl::Hidden);
|
|
|
|
|
|
|
|
static cl::opt<bool> EnableLoadStoreOpt("aarch64-enable-ldst-opt",
|
|
|
|
cl::desc("Enable the load/store pair"
|
|
|
|
" optimization pass"),
|
|
|
|
cl::init(true), cl::Hidden);
|
|
|
|
|
|
|
|
static cl::opt<bool> EnableAtomicTidy(
|
|
|
|
"aarch64-enable-atomic-cfg-tidy", cl::Hidden,
|
|
|
|
cl::desc("Run SimplifyCFG after expanding atomic operations"
|
|
|
|
" to make use of cmpxchg flow-based information"),
|
|
|
|
cl::init(true));
|
2014-05-30 18:09:59 +08:00
|
|
|
|
2014-08-06 21:31:32 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden,
|
|
|
|
cl::desc("Run early if-conversion"),
|
|
|
|
cl::init(true));
|
|
|
|
|
2014-09-05 10:55:24 +08:00
|
|
|
static cl::opt<bool>
|
2016-08-01 13:56:57 +08:00
|
|
|
EnableCondOpt("aarch64-enable-condopt",
|
|
|
|
cl::desc("Enable the condition optimizer pass"),
|
|
|
|
cl::init(true), cl::Hidden);
|
2014-09-05 10:55:24 +08:00
|
|
|
|
2014-10-13 18:12:35 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden,
|
|
|
|
cl::desc("Work around Cortex-A53 erratum 835769"),
|
|
|
|
cl::init(false));
|
|
|
|
|
2016-08-01 13:56:57 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
EnableGEPOpt("aarch64-enable-gep-opt", cl::Hidden,
|
|
|
|
cl::desc("Enable optimizations on complex GEPs"),
|
|
|
|
cl::init(false));
|
|
|
|
|
|
|
|
static cl::opt<bool>
|
|
|
|
BranchRelaxation("aarch64-enable-branch-relax", cl::Hidden, cl::init(true),
|
|
|
|
cl::desc("Relax out of range conditional branches"));
|
2014-11-19 14:39:53 +08:00
|
|
|
|
2018-10-25 04:19:09 +08:00
|
|
|
static cl::opt<bool> EnableCompressJumpTables(
|
|
|
|
"aarch64-enable-compress-jump-tables", cl::Hidden, cl::init(true),
|
|
|
|
cl::desc("Use smallest entry possible for jump tables"));
|
|
|
|
|
2015-04-11 08:06:36 +08:00
|
|
|
// FIXME: Unify control over GlobalMerge.
|
|
|
|
static cl::opt<cl::boolOrDefault>
|
2016-08-01 13:56:57 +08:00
|
|
|
EnableGlobalMerge("aarch64-enable-global-merge", cl::Hidden,
|
|
|
|
cl::desc("Enable the global merge pass"));
|
2015-04-11 08:06:36 +08:00
|
|
|
|
2016-03-18 08:27:29 +08:00
|
|
|
static cl::opt<bool>
|
2016-08-01 13:56:57 +08:00
|
|
|
EnableLoopDataPrefetch("aarch64-enable-loop-data-prefetch", cl::Hidden,
|
2016-03-18 08:27:29 +08:00
|
|
|
cl::desc("Enable the loop data prefetch pass"),
|
2016-03-30 08:21:29 +08:00
|
|
|
cl::init(true));
|
2016-03-18 08:27:29 +08:00
|
|
|
|
2017-03-02 07:33:08 +08:00
|
|
|
static cl::opt<int> EnableGlobalISelAtO(
|
|
|
|
"aarch64-enable-global-isel-at-O", cl::Hidden,
|
|
|
|
cl::desc("Enable GlobalISel at or below an opt level (-1 to disable)"),
|
2018-01-03 00:30:47 +08:00
|
|
|
cl::init(0));
|
2017-03-02 07:33:08 +08:00
|
|
|
|
2017-07-15 05:44:12 +08:00
|
|
|
static cl::opt<bool> EnableFalkorHWPFFix("aarch64-enable-falkor-hwpf-fix",
|
|
|
|
cl::init(true), cl::Hidden);
|
|
|
|
|
[AArch64][v8.5A] Branch Target Identification code-generation pass
The Branch Target Identification extension, introduced to AArch64 in
Armv8.5-A, adds the BTI instruction, which is used to mark valid targets
of indirect branches. When enabled, the processor will trap if an
instruction in a protected page tries to perform an indirect branch to
any instruction other than a BTI. The BTI instruction uses encodings
which were NOPs in earlier versions of the architecture, so BTI-enabled
code will still run on earlier hardware, just without the extra
protection.
There are 3 variants of the BTI instruction, which are valid targets for
different kinds or branches:
- BTI C can be targeted by call instructions, and is inteneded to be
used at function entry points. These are the BLR instruction, as well
as BR with x16 or x17. These BR instructions are allowed for use in
PLT entries, and we can also use them to allow indirect tail-calls.
- BTI J can be targeted by BR only, and is intended to be used by jump
tables.
- BTI JC acts ab both a BTI C and a BTI J instruction, and can be
targeted by any BLR or BR instruction.
Note that RET instructions are not restricted by branch target
identification, the reason for this is that return addresses can be
protected more effectively using return address signing. Direct branches
and calls are also unaffected, as it is assumed that an attacker cannot
modify executable pages (if they could, they wouldn't need to do a
ROP/JOP attack).
This patch adds a MachineFunctionPass which:
- Adds a BTI C at the start of every function which could be indirectly
called (either because it is address-taken, or externally visible so
could be address-taken in another translation unit).
- Adds a BTI J at the start of every basic block which could be
indirectly branched to. This could be either done by a jump table, or
by taking the address of the block (e.g. the using GCC label values
extension).
We only need to use BTI JC when a function is indirectly-callable, and
takes the address of the entry block. I've not been able to trigger this
from C or IR, but I've included a MIR test just in case.
Using BTI C at function entries relies on the fact that no other code in
BTI-protected pages uses indirect tail-calls, unless they use x16 or x17
to hold the address. I'll add that code-generation restriction as a
separate patch.
Differential revision: https://reviews.llvm.org/D52867
llvm-svn: 343967
2018-10-08 22:04:24 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
EnableBranchTargets("aarch64-enable-branch-targets", cl::Hidden,
|
|
|
|
cl::desc("Enable the AAcrh64 branch target pass"),
|
|
|
|
cl::init(true));
|
|
|
|
|
2019-06-11 11:21:13 +08:00
|
|
|
extern "C" void LLVMInitializeAArch64Target() {
|
2014-03-29 18:18:08 +08:00
|
|
|
// Register the target.
|
2016-10-10 07:00:34 +08:00
|
|
|
RegisterTargetMachine<AArch64leTargetMachine> X(getTheAArch64leTarget());
|
|
|
|
RegisterTargetMachine<AArch64beTargetMachine> Y(getTheAArch64beTarget());
|
|
|
|
RegisterTargetMachine<AArch64leTargetMachine> Z(getTheARM64Target());
|
2016-04-02 07:14:52 +08:00
|
|
|
auto PR = PassRegistry::getPassRegistry();
|
|
|
|
initializeGlobalISel(*PR);
|
2016-08-01 13:56:57 +08:00
|
|
|
initializeAArch64A53Fix835769Pass(*PR);
|
|
|
|
initializeAArch64A57FPLoadBalancingPass(*PR);
|
|
|
|
initializeAArch64AdvSIMDScalarPass(*PR);
|
[AArch64][v8.5A] Branch Target Identification code-generation pass
The Branch Target Identification extension, introduced to AArch64 in
Armv8.5-A, adds the BTI instruction, which is used to mark valid targets
of indirect branches. When enabled, the processor will trap if an
instruction in a protected page tries to perform an indirect branch to
any instruction other than a BTI. The BTI instruction uses encodings
which were NOPs in earlier versions of the architecture, so BTI-enabled
code will still run on earlier hardware, just without the extra
protection.
There are 3 variants of the BTI instruction, which are valid targets for
different kinds or branches:
- BTI C can be targeted by call instructions, and is inteneded to be
used at function entry points. These are the BLR instruction, as well
as BR with x16 or x17. These BR instructions are allowed for use in
PLT entries, and we can also use them to allow indirect tail-calls.
- BTI J can be targeted by BR only, and is intended to be used by jump
tables.
- BTI JC acts ab both a BTI C and a BTI J instruction, and can be
targeted by any BLR or BR instruction.
Note that RET instructions are not restricted by branch target
identification, the reason for this is that return addresses can be
protected more effectively using return address signing. Direct branches
and calls are also unaffected, as it is assumed that an attacker cannot
modify executable pages (if they could, they wouldn't need to do a
ROP/JOP attack).
This patch adds a MachineFunctionPass which:
- Adds a BTI C at the start of every function which could be indirectly
called (either because it is address-taken, or externally visible so
could be address-taken in another translation unit).
- Adds a BTI J at the start of every basic block which could be
indirectly branched to. This could be either done by a jump table, or
by taking the address of the block (e.g. the using GCC label values
extension).
We only need to use BTI JC when a function is indirectly-callable, and
takes the address of the entry block. I've not been able to trigger this
from C or IR, but I've included a MIR test just in case.
Using BTI C at function entries relies on the fact that no other code in
BTI-protected pages uses indirect tail-calls, unless they use x16 or x17
to hold the address. I'll add that code-generation restriction as a
separate patch.
Differential revision: https://reviews.llvm.org/D52867
llvm-svn: 343967
2018-10-08 22:04:24 +08:00
|
|
|
initializeAArch64BranchTargetsPass(*PR);
|
2016-08-01 13:56:57 +08:00
|
|
|
initializeAArch64CollectLOHPass(*PR);
|
2018-10-25 04:19:09 +08:00
|
|
|
initializeAArch64CompressJumpTablesPass(*PR);
|
2016-08-01 13:56:57 +08:00
|
|
|
initializeAArch64ConditionalComparesPass(*PR);
|
|
|
|
initializeAArch64ConditionOptimizerPass(*PR);
|
|
|
|
initializeAArch64DeadRegisterDefinitionsPass(*PR);
|
2016-04-02 07:14:52 +08:00
|
|
|
initializeAArch64ExpandPseudoPass(*PR);
|
2016-07-21 05:45:58 +08:00
|
|
|
initializeAArch64LoadStoreOptPass(*PR);
|
2017-12-08 08:58:49 +08:00
|
|
|
initializeAArch64SIMDInstrOptPass(*PR);
|
Re-commit: [globalisel] Add a combiner helpers for extending loads and use them in a pre-legalize combiner for AArch64
Summary: Depends on D45541
Reviewers: ab, aditya_nandakumar, bogner, rtereshin, volkan, rovka, javed.absar, aemerson
Subscribers: aemerson, rengolin, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D45543
The previous commit failed portions of the test-suite on GreenDragon due to
duplicate COPY instructions and iterator invalidation. Both issues have now
been fixed. To assist with this, a helper (cloneVirtualRegister) has been added
to MachineRegisterInfo that can be used to get another register that has the same
type and class/bank as an existing one.
llvm-svn: 343654
2018-10-03 10:12:17 +08:00
|
|
|
initializeAArch64PreLegalizerCombinerPass(*PR);
|
2016-08-01 13:56:57 +08:00
|
|
|
initializeAArch64PromoteConstantPass(*PR);
|
|
|
|
initializeAArch64RedundantCopyEliminationPass(*PR);
|
|
|
|
initializeAArch64StorePairSuppressPass(*PR);
|
2017-07-19 00:14:22 +08:00
|
|
|
initializeFalkorHWPFFixPass(*PR);
|
2017-07-15 05:44:12 +08:00
|
|
|
initializeFalkorMarkStridedAccessesLegacyPass(*PR);
|
2016-08-01 13:56:57 +08:00
|
|
|
initializeLDTLSCleanupPass(*PR);
|
Introduce control flow speculation tracking pass for AArch64
The pass implements tracking of control flow miss-speculation into a "taint"
register. That taint register can then be used to mask off registers with
sensitive data when executing under miss-speculation, a.k.a. "transient
execution".
This pass is aimed at mitigating against SpectreV1-style vulnarabilities.
At the moment, it implements the tracking of miss-speculation of control
flow into a taint register, but doesn't implement a mechanism yet to then
use that taint register to mask off vulnerable data in registers (something
for a follow-on improvement). Possible strategies to mask out vulnerable
data that can be implemented on top of this are:
- speculative load hardening to automatically mask of data loaded
in registers.
- using intrinsics to mask of data in registers as indicated by the
programmer (see https://lwn.net/Articles/759423/).
For AArch64, the following implementation choices are made.
Some of these are different than the implementation choices made in
the similar pass implemented in X86SpeculativeLoadHardening.cpp, as
the instruction set characteristics result in different trade-offs.
- The speculation hardening is done after register allocation. With a
relative abundance of registers, one register is reserved (X16) to be
the taint register. X16 is expected to not clash with other register
reservation mechanisms with very high probability because:
. The AArch64 ABI doesn't guarantee X16 to be retained across any call.
. The only way to request X16 to be used as a programmer is through
inline assembly. In the rare case a function explicitly demands to
use X16/W16, this pass falls back to hardening against speculation
by inserting a DSB SYS/ISB barrier pair which will prevent control
flow speculation.
- It is easy to insert mask operations at this late stage as we have
mask operations available that don't set flags.
- The taint variable contains all-ones when no miss-speculation is detected,
and contains all-zeros when miss-speculation is detected. Therefore, when
masking, an AND instruction (which only changes the register to be masked,
no other side effects) can easily be inserted anywhere that's needed.
- The tracking of miss-speculation is done by using a data-flow conditional
select instruction (CSEL) to evaluate the flags that were also used to
make conditional branch direction decisions. Speculation of the CSEL
instruction can be limited with a CSDB instruction - so the combination of
CSEL + a later CSDB gives the guarantee that the flags as used in the CSEL
aren't speculated. When conditional branch direction gets miss-speculated,
the semantics of the inserted CSEL instruction is such that the taint
register will contain all zero bits.
One key requirement for this to work is that the conditional branch is
followed by an execution of the CSEL instruction, where the CSEL
instruction needs to use the same flags status as the conditional branch.
This means that the conditional branches must not be implemented as one
of the AArch64 conditional branches that do not use the flags as input
(CB(N)Z and TB(N)Z). This is implemented by ensuring in the instruction
selectors to not produce these instructions when speculation hardening
is enabled. This pass will assert if it does encounter such an instruction.
- On function call boundaries, the miss-speculation state is transferred from
the taint register X16 to be encoded in the SP register as value 0.
Future extensions/improvements could be:
- Implement this functionality using full speculation barriers, akin to the
x86-slh-lfence option. This may be more useful for the intrinsics-based
approach than for the SLH approach to masking.
Note that this pass already inserts the full speculation barriers if the
function for some niche reason makes use of X16/W16.
- no indirect branch misprediction gets protected/instrumented; but this
could be done for some indirect branches, such as switch jump tables.
Differential Revision: https://reviews.llvm.org/D54896
llvm-svn: 349456
2018-12-18 16:50:02 +08:00
|
|
|
initializeAArch64SpeculationHardeningPass(*PR);
|
2019-07-18 03:24:12 +08:00
|
|
|
initializeAArch64StackTaggingPass(*PR);
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
2014-11-13 17:26:31 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AArch64 Lowering public interface.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
|
|
|
|
if (TT.isOSBinFormatMachO())
|
2017-01-06 08:30:53 +08:00
|
|
|
return llvm::make_unique<AArch64_MachoTargetObjectFile>();
|
2017-06-28 07:58:19 +08:00
|
|
|
if (TT.isOSBinFormatCOFF())
|
|
|
|
return llvm::make_unique<AArch64_COFFTargetObjectFile>();
|
2014-11-13 17:26:31 +08:00
|
|
|
|
2017-01-06 08:30:53 +08:00
|
|
|
return llvm::make_unique<AArch64_ELFTargetObjectFile>();
|
2014-11-13 17:26:31 +08:00
|
|
|
}
|
|
|
|
|
2015-03-12 08:07:24 +08:00
|
|
|
// Helper function to build a DataLayout string
|
2016-10-24 21:37:13 +08:00
|
|
|
static std::string computeDataLayout(const Triple &TT,
|
|
|
|
const MCTargetOptions &Options,
|
|
|
|
bool LittleEndian) {
|
|
|
|
if (Options.getABIName() == "ilp32")
|
|
|
|
return "e-m:e-p:32:32-i8:8-i16:16-i64:64-S128";
|
2015-06-11 23:34:59 +08:00
|
|
|
if (TT.isOSBinFormatMachO())
|
2015-03-12 08:07:24 +08:00
|
|
|
return "e-m:o-i64:64-i128:128-n32:64-S128";
|
2017-06-28 07:58:19 +08:00
|
|
|
if (TT.isOSBinFormatCOFF())
|
2017-07-18 05:25:19 +08:00
|
|
|
return "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128";
|
2015-03-12 08:07:24 +08:00
|
|
|
if (LittleEndian)
|
2016-07-08 04:02:18 +08:00
|
|
|
return "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128";
|
|
|
|
return "E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128";
|
2015-03-12 08:07:24 +08:00
|
|
|
}
|
|
|
|
|
2016-05-19 06:04:49 +08:00
|
|
|
static Reloc::Model getEffectiveRelocModel(const Triple &TT,
|
|
|
|
Optional<Reloc::Model> RM) {
|
2019-01-29 17:36:48 +08:00
|
|
|
// AArch64 Darwin and Windows are always PIC.
|
|
|
|
if (TT.isOSDarwin() || TT.isOSWindows())
|
2016-05-19 06:04:49 +08:00
|
|
|
return Reloc::PIC_;
|
|
|
|
// On ELF platforms the default static relocation model has a smart enough
|
|
|
|
// linker to cope with referencing external symbols defined in a shared
|
|
|
|
// library. Hence DynamicNoPIC doesn't need to be promoted to PIC.
|
|
|
|
if (!RM.hasValue() || *RM == Reloc::DynamicNoPIC)
|
|
|
|
return Reloc::Static;
|
|
|
|
return *RM;
|
|
|
|
}
|
|
|
|
|
2018-12-07 20:10:23 +08:00
|
|
|
static CodeModel::Model
|
|
|
|
getEffectiveAArch64CodeModel(const Triple &TT, Optional<CodeModel::Model> CM,
|
|
|
|
bool JIT) {
|
2017-08-03 10:16:21 +08:00
|
|
|
if (CM) {
|
2018-08-22 19:31:39 +08:00
|
|
|
if (*CM != CodeModel::Small && *CM != CodeModel::Tiny &&
|
|
|
|
*CM != CodeModel::Large) {
|
2017-08-03 10:16:21 +08:00
|
|
|
if (!TT.isOSFuchsia())
|
|
|
|
report_fatal_error(
|
2018-08-22 19:31:39 +08:00
|
|
|
"Only small, tiny and large code models are allowed on AArch64");
|
|
|
|
else if (*CM != CodeModel::Kernel)
|
|
|
|
report_fatal_error("Only small, tiny, kernel, and large code models "
|
|
|
|
"are allowed on AArch64");
|
|
|
|
} else if (*CM == CodeModel::Tiny && !TT.isOSBinFormatELF())
|
|
|
|
report_fatal_error("tiny code model is only supported on ELF");
|
2017-08-03 10:16:21 +08:00
|
|
|
return *CM;
|
|
|
|
}
|
|
|
|
// The default MCJIT memory managers make no guarantees about where they can
|
|
|
|
// find an executable page; JITed code needs to be able to refer to globals
|
|
|
|
// no matter how far away they are.
|
|
|
|
if (JIT)
|
|
|
|
return CodeModel::Large;
|
|
|
|
return CodeModel::Small;
|
|
|
|
}
|
|
|
|
|
2016-05-19 00:00:24 +08:00
|
|
|
/// Create an AArch64 architecture model.
|
2014-03-29 18:18:08 +08:00
|
|
|
///
|
2017-08-03 10:16:21 +08:00
|
|
|
AArch64TargetMachine::AArch64TargetMachine(const Target &T, const Triple &TT,
|
|
|
|
StringRef CPU, StringRef FS,
|
|
|
|
const TargetOptions &Options,
|
|
|
|
Optional<Reloc::Model> RM,
|
|
|
|
Optional<CodeModel::Model> CM,
|
|
|
|
CodeGenOpt::Level OL, bool JIT,
|
|
|
|
bool LittleEndian)
|
2017-10-13 06:57:28 +08:00
|
|
|
: LLVMTargetMachine(T,
|
|
|
|
computeDataLayout(TT, Options.MCOptions, LittleEndian),
|
|
|
|
TT, CPU, FS, Options, getEffectiveRelocModel(TT, RM),
|
2018-12-07 20:10:23 +08:00
|
|
|
getEffectiveAArch64CodeModel(TT, CM, JIT), OL),
|
2017-08-03 10:16:21 +08:00
|
|
|
TLOF(createTLOF(getTargetTriple())), isLittle(LittleEndian) {
|
2014-03-29 18:18:08 +08:00
|
|
|
initAsmInfo();
|
2018-01-18 06:34:21 +08:00
|
|
|
|
2018-06-29 01:00:45 +08:00
|
|
|
if (TT.isOSBinFormatMachO()) {
|
2018-04-14 06:25:20 +08:00
|
|
|
this->Options.TrapUnreachable = true;
|
2018-06-29 01:00:45 +08:00
|
|
|
this->Options.NoTrapAfterNoreturn = true;
|
|
|
|
}
|
2018-04-14 06:25:20 +08:00
|
|
|
|
2018-11-08 06:30:56 +08:00
|
|
|
if (getMCAsmInfo()->usesWindowsCFI()) {
|
2018-11-08 05:31:14 +08:00
|
|
|
// Unwinding can get confused if the last instruction in an
|
|
|
|
// exception-handling region (function, funclet, try block, etc.)
|
|
|
|
// is a call.
|
|
|
|
//
|
|
|
|
// FIXME: We could elide the trap if the next instruction would be in
|
|
|
|
// the same region anyway.
|
|
|
|
this->Options.TrapUnreachable = true;
|
|
|
|
}
|
|
|
|
|
2018-01-18 06:34:21 +08:00
|
|
|
// Enable GlobalISel at or below EnableGlobalISelAt0.
|
2018-11-29 20:56:32 +08:00
|
|
|
if (getOptLevel() <= EnableGlobalISelAtO) {
|
2018-01-18 06:34:21 +08:00
|
|
|
setGlobalISel(true);
|
2018-11-29 20:56:32 +08:00
|
|
|
setGlobalISelAbort(GlobalISelAbortMode::Disable);
|
|
|
|
}
|
2018-06-29 01:45:43 +08:00
|
|
|
|
|
|
|
// AArch64 supports the MachineOutliner.
|
|
|
|
setMachineOutliner(true);
|
2018-07-28 04:18:27 +08:00
|
|
|
|
|
|
|
// AArch64 supports default outlining behaviour.
|
|
|
|
setSupportsDefaultOutlining(true);
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
2017-01-06 08:30:53 +08:00
|
|
|
AArch64TargetMachine::~AArch64TargetMachine() = default;
|
2014-11-21 07:37:18 +08:00
|
|
|
|
2014-10-06 14:45:36 +08:00
|
|
|
const AArch64Subtarget *
|
|
|
|
AArch64TargetMachine::getSubtargetImpl(const Function &F) const {
|
2015-02-14 10:09:06 +08:00
|
|
|
Attribute CPUAttr = F.getFnAttribute("target-cpu");
|
|
|
|
Attribute FSAttr = F.getFnAttribute("target-features");
|
2014-10-06 14:45:36 +08:00
|
|
|
|
|
|
|
std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
|
|
|
|
? CPUAttr.getValueAsString().str()
|
|
|
|
: TargetCPU;
|
|
|
|
std::string FS = !FSAttr.hasAttribute(Attribute::None)
|
|
|
|
? FSAttr.getValueAsString().str()
|
|
|
|
: TargetFS;
|
|
|
|
|
2017-05-19 19:08:33 +08:00
|
|
|
auto &I = SubtargetMap[CPU + FS];
|
2014-10-06 14:45:36 +08:00
|
|
|
if (!I) {
|
|
|
|
// This needs to be done before we create a new subtarget since any
|
|
|
|
// creation will depend on the TM and the code generation flags on the
|
|
|
|
// function that reside in TargetOptions.
|
|
|
|
resetTargetOptions(F);
|
2015-06-16 23:44:21 +08:00
|
|
|
I = llvm::make_unique<AArch64Subtarget>(TargetTriple, CPU, FS, *this,
|
2017-05-19 19:08:33 +08:00
|
|
|
isLittle);
|
2014-10-06 14:45:36 +08:00
|
|
|
}
|
|
|
|
return I.get();
|
|
|
|
}
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
void AArch64leTargetMachine::anchor() { }
|
2014-04-23 18:26:40 +08:00
|
|
|
|
2015-06-12 03:41:26 +08:00
|
|
|
AArch64leTargetMachine::AArch64leTargetMachine(
|
|
|
|
const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
|
2016-05-19 06:04:49 +08:00
|
|
|
const TargetOptions &Options, Optional<Reloc::Model> RM,
|
2017-08-03 10:16:21 +08:00
|
|
|
Optional<CodeModel::Model> CM, CodeGenOpt::Level OL, bool JIT)
|
|
|
|
: AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, true) {}
|
2014-04-23 18:26:40 +08:00
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
void AArch64beTargetMachine::anchor() { }
|
2014-04-23 18:26:40 +08:00
|
|
|
|
2015-06-12 03:41:26 +08:00
|
|
|
AArch64beTargetMachine::AArch64beTargetMachine(
|
|
|
|
const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
|
2016-05-19 06:04:49 +08:00
|
|
|
const TargetOptions &Options, Optional<Reloc::Model> RM,
|
2017-08-03 10:16:21 +08:00
|
|
|
Optional<CodeModel::Model> CM, CodeGenOpt::Level OL, bool JIT)
|
|
|
|
: AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, false) {}
|
2014-04-23 18:26:40 +08:00
|
|
|
|
2014-03-29 18:18:08 +08:00
|
|
|
namespace {
|
2017-01-06 08:30:53 +08:00
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
/// AArch64 Code Generator Pass Configuration Options.
|
|
|
|
class AArch64PassConfig : public TargetPassConfig {
|
2014-03-29 18:18:08 +08:00
|
|
|
public:
|
2017-05-31 05:36:41 +08:00
|
|
|
AArch64PassConfig(AArch64TargetMachine &TM, PassManagerBase &PM)
|
2014-09-13 01:40:39 +08:00
|
|
|
: TargetPassConfig(TM, PM) {
|
2017-05-31 05:36:41 +08:00
|
|
|
if (TM.getOptLevel() != CodeGenOpt::None)
|
2014-09-13 06:17:28 +08:00
|
|
|
substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
|
2014-09-13 01:40:39 +08:00
|
|
|
}
|
2014-03-29 18:18:08 +08:00
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
AArch64TargetMachine &getAArch64TargetMachine() const {
|
|
|
|
return getTM<AArch64TargetMachine>();
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
2016-11-29 04:11:54 +08:00
|
|
|
ScheduleDAGInstrs *
|
|
|
|
createMachineScheduler(MachineSchedContext *C) const override {
|
2017-07-13 05:41:28 +08:00
|
|
|
const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>();
|
2016-11-29 04:11:54 +08:00
|
|
|
ScheduleDAGMILive *DAG = createGenericSchedLive(C);
|
|
|
|
DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
|
|
|
|
DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
|
2017-07-13 05:41:28 +08:00
|
|
|
if (ST.hasFusion())
|
|
|
|
DAG->addMutation(createAArch64MacroFusionDAGMutation());
|
2016-11-29 04:11:54 +08:00
|
|
|
return DAG;
|
|
|
|
}
|
|
|
|
|
2017-02-01 10:54:42 +08:00
|
|
|
ScheduleDAGInstrs *
|
|
|
|
createPostMachineScheduler(MachineSchedContext *C) const override {
|
|
|
|
const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>();
|
[AArch64] Add AArch64Subtarget::isFusion function.
Summary:
isFusion returns true if the subtarget supports any kind of instruction
fusion, similar to ARMSubtarget::isFusion. This was suggested in D34142.
This changes the current behavior slightly, because the macro fusion mutation
is now added to the PostRA MachineScheduler in case the subtarget supports
any kind of fusion. I think that makes sense because if the PostRA
MachineScheduler is run, there is potential that instructions scheduled back to
back are re-scheduled.
Reviewers: evandro, t.p.northover, joelkevinjones, joel_k_jones, steleman
Reviewed By: joelkevinjones
Subscribers: joel_k_jones, aemerson, rengolin, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D34958
llvm-svn: 307842
2017-07-13 04:53:22 +08:00
|
|
|
if (ST.hasFusion()) {
|
2017-02-01 10:54:42 +08:00
|
|
|
// Run the Macro Fusion after RA again since literals are expanded from
|
|
|
|
// pseudos then (v. addPreSched2()).
|
|
|
|
ScheduleDAGMI *DAG = createGenericSchedPostRA(C);
|
|
|
|
DAG->addMutation(createAArch64MacroFusionDAGMutation());
|
|
|
|
return DAG;
|
|
|
|
}
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2014-05-30 18:09:59 +08:00
|
|
|
void addIRPasses() override;
|
2014-04-29 15:58:25 +08:00
|
|
|
bool addPreISel() override;
|
|
|
|
bool addInstSelector() override;
|
2016-02-12 03:35:06 +08:00
|
|
|
bool addIRTranslator() override;
|
Re-commit: [globalisel] Add a combiner helpers for extending loads and use them in a pre-legalize combiner for AArch64
Summary: Depends on D45541
Reviewers: ab, aditya_nandakumar, bogner, rtereshin, volkan, rovka, javed.absar, aemerson
Subscribers: aemerson, rengolin, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D45543
The previous commit failed portions of the test-suite on GreenDragon due to
duplicate COPY instructions and iterator invalidation. Both issues have now
been fixed. To assist with this, a helper (cloneVirtualRegister) has been added
to MachineRegisterInfo that can be used to get another register that has the same
type and class/bank as an existing one.
llvm-svn: 343654
2018-10-03 10:12:17 +08:00
|
|
|
void addPreLegalizeMachineIR() override;
|
2016-07-23 04:03:43 +08:00
|
|
|
bool addLegalizeMachineIR() override;
|
2016-04-08 04:27:33 +08:00
|
|
|
bool addRegBankSelect() override;
|
2017-05-27 09:34:07 +08:00
|
|
|
void addPreGlobalInstructionSelect() override;
|
2016-07-27 22:31:55 +08:00
|
|
|
bool addGlobalInstructionSelect() override;
|
2014-04-29 15:58:25 +08:00
|
|
|
bool addILPOpts() override;
|
2014-12-12 05:26:47 +08:00
|
|
|
void addPreRegAlloc() override;
|
|
|
|
void addPostRegAlloc() override;
|
|
|
|
void addPreSched2() override;
|
|
|
|
void addPreEmitPass() override;
|
2019-04-15 12:53:46 +08:00
|
|
|
|
|
|
|
std::unique_ptr<CSEConfigBase> getCSEConfig() const override;
|
2014-03-29 18:18:08 +08:00
|
|
|
};
|
2017-01-06 08:30:53 +08:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
2014-03-29 18:18:08 +08:00
|
|
|
|
(Re-landing) Expose a TargetMachine::getTargetTransformInfo function
Re-land r321234. It had to be reverted because it broke the shared
library build. The shared library build broke because there was a
missing LLVMBuild dependency from lib/Passes (which calls
TargetMachine::getTargetIRAnalysis) to lib/Target. As far as I can
tell, this problem was always there but was somehow masked
before (perhaps because TargetMachine::getTargetIRAnalysis was a
virtual function).
Original commit message:
This makes the TargetMachine interface a bit simpler. We still need
the std::function in TargetIRAnalysis to avoid having to add a
dependency from Analysis to Target.
See discussion:
http://lists.llvm.org/pipermail/llvm-dev/2017-December/119749.html
I avoided adding all of the backend owners to this review since the
change is simple, but let me know if you feel differently about this.
Reviewers: echristo, MatzeB, hfinkel
Reviewed By: hfinkel
Subscribers: jholewinski, jfb, arsenm, dschuff, mcrosier, sdardis, nemanjai, nhaehnle, javed.absar, sbc100, jgravelle-google, aheejin, kbarton, llvm-commits
Differential Revision: https://reviews.llvm.org/D41464
llvm-svn: 321375
2017-12-23 02:21:59 +08:00
|
|
|
TargetTransformInfo
|
|
|
|
AArch64TargetMachine::getTargetTransformInfo(const Function &F) {
|
|
|
|
return TargetTransformInfo(AArch64TTIImpl(this, F));
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) {
|
2017-05-31 05:36:41 +08:00
|
|
|
return new AArch64PassConfig(*this, PM);
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
2019-04-15 12:53:46 +08:00
|
|
|
std::unique_ptr<CSEConfigBase> AArch64PassConfig::getCSEConfig() const {
|
|
|
|
return getStandardCSEConfigForOpt(TM->getOptLevel());
|
|
|
|
}
|
|
|
|
|
2014-05-30 18:09:59 +08:00
|
|
|
void AArch64PassConfig::addIRPasses() {
|
|
|
|
// Always expand atomic operations, we don't deal with atomicrmw or cmpxchg
|
|
|
|
// ourselves.
|
2017-05-19 01:21:13 +08:00
|
|
|
addPass(createAtomicExpandPass());
|
2014-05-30 18:09:59 +08:00
|
|
|
|
|
|
|
// Cmpxchg instructions are often used with a subsequent comparison to
|
|
|
|
// determine whether it succeeded. We can exploit existing control-flow in
|
|
|
|
// ldrex/strex loops to simplify this, but it needs tidying up.
|
|
|
|
if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy)
|
2017-12-15 06:05:20 +08:00
|
|
|
addPass(createCFGSimplificationPass(1, true, true, false, true));
|
2014-05-30 18:09:59 +08:00
|
|
|
|
2016-07-07 07:18:58 +08:00
|
|
|
// Run LoopDataPrefetch
|
2016-03-18 08:27:29 +08:00
|
|
|
//
|
|
|
|
// Run this before LSR to remove the multiplies involved in computing the
|
|
|
|
// pointer values N iterations ahead.
|
2017-07-15 05:44:12 +08:00
|
|
|
if (TM->getOptLevel() != CodeGenOpt::None) {
|
|
|
|
if (EnableLoopDataPrefetch)
|
|
|
|
addPass(createLoopDataPrefetchPass());
|
|
|
|
if (EnableFalkorHWPFFix)
|
|
|
|
addPass(createFalkorMarkStridedAccessesPass());
|
|
|
|
}
|
2016-03-18 08:27:29 +08:00
|
|
|
|
2014-05-30 18:09:59 +08:00
|
|
|
TargetPassConfig::addIRPasses();
|
2014-11-19 14:39:53 +08:00
|
|
|
|
[AArch64] Lower interleaved memory accesses to ldN/stN intrinsics. This patch also adds a function to calculate the cost of interleaved memory accesses.
E.g. Lower an interleaved load:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%v0 = shuffle %wide.vec, undef, <0, 2, 4, 6>
%v1 = shuffle %wide.vec, undef, <1, 3, 5, 7>
into:
%ld2 = { <4 x i32>, <4 x i32> } call llvm.aarch64.neon.ld2(%ptr)
%vec0 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 0
%vec1 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 1
E.g. Lower an interleaved store:
%i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
store <12 x i32> %i.vec, <12 x i32>* %ptr
into:
%sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
%sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
%sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
call void llvm.aarch64.neon.st3(%sub.v0, %sub.v1, %sub.v2, %ptr)
Differential Revision: http://reviews.llvm.org/D10533
llvm-svn: 240754
2015-06-26 10:32:07 +08:00
|
|
|
// Match interleaved memory accesses to ldN/stN intrinsics.
|
2018-11-19 22:26:10 +08:00
|
|
|
if (TM->getOptLevel() != CodeGenOpt::None) {
|
|
|
|
addPass(createInterleavedLoadCombinePass());
|
2017-05-19 01:21:13 +08:00
|
|
|
addPass(createInterleavedAccessPass());
|
2018-11-19 22:26:10 +08:00
|
|
|
}
|
[AArch64] Lower interleaved memory accesses to ldN/stN intrinsics. This patch also adds a function to calculate the cost of interleaved memory accesses.
E.g. Lower an interleaved load:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%v0 = shuffle %wide.vec, undef, <0, 2, 4, 6>
%v1 = shuffle %wide.vec, undef, <1, 3, 5, 7>
into:
%ld2 = { <4 x i32>, <4 x i32> } call llvm.aarch64.neon.ld2(%ptr)
%vec0 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 0
%vec1 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 1
E.g. Lower an interleaved store:
%i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
store <12 x i32> %i.vec, <12 x i32>* %ptr
into:
%sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
%sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
%sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
call void llvm.aarch64.neon.st3(%sub.v0, %sub.v1, %sub.v2, %ptr)
Differential Revision: http://reviews.llvm.org/D10533
llvm-svn: 240754
2015-06-26 10:32:07 +08:00
|
|
|
|
2014-11-19 14:39:53 +08:00
|
|
|
if (TM->getOptLevel() == CodeGenOpt::Aggressive && EnableGEPOpt) {
|
|
|
|
// Call SeparateConstOffsetFromGEP pass to extract constants within indices
|
|
|
|
// and lower a GEP with multiple indices to either arithmetic operations or
|
|
|
|
// multiple GEPs with single index.
|
2018-03-29 06:28:50 +08:00
|
|
|
addPass(createSeparateConstOffsetFromGEPPass(true));
|
2014-11-19 14:39:53 +08:00
|
|
|
// Call EarlyCSE pass to find and remove subexpressions in the lowered
|
|
|
|
// result.
|
|
|
|
addPass(createEarlyCSEPass());
|
|
|
|
// Do loop invariant code motion in case part of the lowered result is
|
|
|
|
// invariant.
|
|
|
|
addPass(createLICMPass());
|
|
|
|
}
|
2019-07-18 03:24:12 +08:00
|
|
|
|
|
|
|
addPass(createAArch64StackTaggingPass());
|
2014-05-30 18:09:59 +08:00
|
|
|
}
|
|
|
|
|
2014-03-29 18:18:08 +08:00
|
|
|
// Pass Pipeline Configuration
|
2014-05-24 20:50:23 +08:00
|
|
|
bool AArch64PassConfig::addPreISel() {
|
2014-03-29 18:18:08 +08:00
|
|
|
// Run promote constant before global merge, so that the promoted constants
|
|
|
|
// get a chance to be merged
|
|
|
|
if (TM->getOptLevel() != CodeGenOpt::None && EnablePromoteConstant)
|
2014-05-24 20:50:23 +08:00
|
|
|
addPass(createAArch64PromoteConstantPass());
|
2015-02-24 03:28:45 +08:00
|
|
|
// FIXME: On AArch64, this depends on the type.
|
|
|
|
// Basically, the addressable offsets are up to 4095 * Ty.getSizeInBytes().
|
|
|
|
// and the offset has to be a multiple of the related size in bytes.
|
2015-06-05 04:39:23 +08:00
|
|
|
if ((TM->getOptLevel() != CodeGenOpt::None &&
|
2015-04-11 08:06:36 +08:00
|
|
|
EnableGlobalMerge == cl::BOU_UNSET) ||
|
2015-06-05 04:39:23 +08:00
|
|
|
EnableGlobalMerge == cl::BOU_TRUE) {
|
|
|
|
bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) &&
|
|
|
|
(EnableGlobalMerge == cl::BOU_UNSET);
|
2019-06-12 16:28:35 +08:00
|
|
|
|
|
|
|
// Merging of extern globals is enabled by default on non-Mach-O as we
|
|
|
|
// expect it to be generally either beneficial or harmless. On Mach-O it
|
|
|
|
// is disabled as we emit the .subsections_via_symbols directive which
|
|
|
|
// means that merging extern globals is not safe.
|
|
|
|
bool MergeExternalByDefault = !TM->getTargetTriple().isOSBinFormatMachO();
|
|
|
|
|
|
|
|
// FIXME: extern global merging is only enabled when we optimise for size
|
|
|
|
// because there are some regressions with it also enabled for performance.
|
|
|
|
if (!OnlyOptimizeForSize)
|
|
|
|
MergeExternalByDefault = false;
|
|
|
|
|
|
|
|
addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize,
|
|
|
|
MergeExternalByDefault));
|
2015-06-05 04:39:23 +08:00
|
|
|
}
|
|
|
|
|
2014-03-29 18:18:08 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
bool AArch64PassConfig::addInstSelector() {
|
|
|
|
addPass(createAArch64ISelDag(getAArch64TargetMachine(), getOptLevel()));
|
2014-03-29 18:18:08 +08:00
|
|
|
|
|
|
|
// For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many
|
|
|
|
// references to _TLS_MODULE_BASE_ as possible.
|
2015-06-16 23:44:21 +08:00
|
|
|
if (TM->getTargetTriple().isOSBinFormatELF() &&
|
2014-03-29 18:18:08 +08:00
|
|
|
getOptLevel() != CodeGenOpt::None)
|
2014-05-24 20:50:23 +08:00
|
|
|
addPass(createAArch64CleanupLocalDynamicTLSPass());
|
2014-03-29 18:18:08 +08:00
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-02-12 03:35:06 +08:00
|
|
|
bool AArch64PassConfig::addIRTranslator() {
|
|
|
|
addPass(new IRTranslator());
|
|
|
|
return false;
|
|
|
|
}
|
2017-01-06 08:30:53 +08:00
|
|
|
|
Re-commit: [globalisel] Add a combiner helpers for extending loads and use them in a pre-legalize combiner for AArch64
Summary: Depends on D45541
Reviewers: ab, aditya_nandakumar, bogner, rtereshin, volkan, rovka, javed.absar, aemerson
Subscribers: aemerson, rengolin, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D45543
The previous commit failed portions of the test-suite on GreenDragon due to
duplicate COPY instructions and iterator invalidation. Both issues have now
been fixed. To assist with this, a helper (cloneVirtualRegister) has been added
to MachineRegisterInfo that can be used to get another register that has the same
type and class/bank as an existing one.
llvm-svn: 343654
2018-10-03 10:12:17 +08:00
|
|
|
void AArch64PassConfig::addPreLegalizeMachineIR() {
|
|
|
|
addPass(createAArch64PreLegalizeCombiner());
|
|
|
|
}
|
|
|
|
|
2016-07-23 04:03:43 +08:00
|
|
|
bool AArch64PassConfig::addLegalizeMachineIR() {
|
2016-10-15 06:18:18 +08:00
|
|
|
addPass(new Legalizer());
|
2016-07-23 04:03:43 +08:00
|
|
|
return false;
|
|
|
|
}
|
2017-01-06 08:30:53 +08:00
|
|
|
|
2016-04-08 04:27:33 +08:00
|
|
|
bool AArch64PassConfig::addRegBankSelect() {
|
|
|
|
addPass(new RegBankSelect());
|
|
|
|
return false;
|
|
|
|
}
|
2017-01-06 08:30:53 +08:00
|
|
|
|
2017-05-27 09:34:07 +08:00
|
|
|
void AArch64PassConfig::addPreGlobalInstructionSelect() {
|
|
|
|
// Workaround the deficiency of the fast register allocator.
|
|
|
|
if (TM->getOptLevel() == CodeGenOpt::None)
|
|
|
|
addPass(new Localizer());
|
|
|
|
}
|
|
|
|
|
2016-07-27 22:31:55 +08:00
|
|
|
bool AArch64PassConfig::addGlobalInstructionSelect() {
|
|
|
|
addPass(new InstructionSelect());
|
|
|
|
return false;
|
|
|
|
}
|
2016-02-12 03:35:06 +08:00
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
bool AArch64PassConfig::addILPOpts() {
|
2014-09-05 10:55:24 +08:00
|
|
|
if (EnableCondOpt)
|
|
|
|
addPass(createAArch64ConditionOptimizerPass());
|
2014-03-29 18:18:08 +08:00
|
|
|
if (EnableCCMP)
|
2014-05-24 20:50:23 +08:00
|
|
|
addPass(createAArch64ConditionalCompares());
|
2014-08-08 05:40:58 +08:00
|
|
|
if (EnableMCR)
|
|
|
|
addPass(&MachineCombinerID);
|
[AArch64] Prefer Bcc to CBZ/CBNZ/TBZ/TBNZ when NZCV flags can be set for "free".
This patch contains a pass that transforms CBZ/CBNZ/TBZ/TBNZ instructions into a
conditional branch (Bcc), when the NZCV flags can be set for "free". This is
preferred on targets that have more flexibility when scheduling Bcc
instructions as compared to CBZ/CBNZ/TBZ/TBNZ (assuming all other variables are
equal). This can reduce register pressure and is also the default behavior for
GCC.
A few examples:
add w8, w0, w1 -> cmn w0, w1 ; CMN is an alias of ADDS.
cbz w8, .LBB_2 -> b.eq .LBB0_2 ; single def/use of w8 removed.
add w8, w0, w1 -> adds w8, w0, w1 ; w8 has multiple uses.
cbz w8, .LBB1_2 -> b.eq .LBB1_2
sub w8, w0, w1 -> subs w8, w0, w1 ; w8 has multiple uses.
tbz w8, #31, .LBB6_2 -> b.ge .LBB6_2
In looking at all current sub-target machine descriptions, this transformation
appears to be either positive or neutral.
Differential Revision: https://reviews.llvm.org/D34220.
llvm-svn: 306144
2017-06-24 03:20:12 +08:00
|
|
|
if (EnableCondBrTuning)
|
|
|
|
addPass(createAArch64CondBrTuning());
|
2014-08-06 21:31:32 +08:00
|
|
|
if (EnableEarlyIfConversion)
|
|
|
|
addPass(&EarlyIfConverterID);
|
2014-03-29 18:18:08 +08:00
|
|
|
if (EnableStPairSuppress)
|
2014-05-24 20:50:23 +08:00
|
|
|
addPass(createAArch64StorePairSuppressPass());
|
2017-12-08 08:58:49 +08:00
|
|
|
addPass(createAArch64SIMDInstrOptPass());
|
2014-03-29 18:18:08 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
void AArch64PassConfig::addPreRegAlloc() {
|
2016-11-16 11:38:27 +08:00
|
|
|
// Change dead register definitions to refer to the zero register.
|
|
|
|
if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination)
|
|
|
|
addPass(createAArch64DeadRegisterDefinitions());
|
|
|
|
|
2014-03-29 18:18:08 +08:00
|
|
|
// Use AdvSIMD scalar instructions whenever profitable.
|
2014-08-22 02:10:07 +08:00
|
|
|
if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar) {
|
2014-12-12 07:18:03 +08:00
|
|
|
addPass(createAArch64AdvSIMDScalar());
|
2014-08-22 02:10:07 +08:00
|
|
|
// The AdvSIMD pass may produce copies that can be rewritten to
|
|
|
|
// be register coaleascer friendly.
|
|
|
|
addPass(&PeepholeOptimizerID);
|
|
|
|
}
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
void AArch64PassConfig::addPostRegAlloc() {
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
// Remove redundant copy instructions.
|
|
|
|
if (TM->getOptLevel() != CodeGenOpt::None && EnableRedundantCopyElimination)
|
|
|
|
addPass(createAArch64RedundantCopyEliminationPass());
|
|
|
|
|
2015-03-04 07:22:40 +08:00
|
|
|
if (TM->getOptLevel() != CodeGenOpt::None && usingDefaultRegAlloc())
|
2014-08-08 20:33:21 +08:00
|
|
|
// Improve performance for some FP/SIMD code for A57.
|
|
|
|
addPass(createAArch64A57FPLoadBalancing());
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
void AArch64PassConfig::addPreSched2() {
|
2014-03-29 18:18:08 +08:00
|
|
|
// Expand some pseudo instructions to allow proper scheduling.
|
2014-12-12 07:18:03 +08:00
|
|
|
addPass(createAArch64ExpandPseudoPass());
|
2014-03-29 18:18:08 +08:00
|
|
|
// Use load/store pair instructions when possible.
|
2017-07-19 00:14:22 +08:00
|
|
|
if (TM->getOptLevel() != CodeGenOpt::None) {
|
|
|
|
if (EnableLoadStoreOpt)
|
|
|
|
addPass(createAArch64LoadStoreOptimizationPass());
|
Introduce control flow speculation tracking pass for AArch64
The pass implements tracking of control flow miss-speculation into a "taint"
register. That taint register can then be used to mask off registers with
sensitive data when executing under miss-speculation, a.k.a. "transient
execution".
This pass is aimed at mitigating against SpectreV1-style vulnarabilities.
At the moment, it implements the tracking of miss-speculation of control
flow into a taint register, but doesn't implement a mechanism yet to then
use that taint register to mask off vulnerable data in registers (something
for a follow-on improvement). Possible strategies to mask out vulnerable
data that can be implemented on top of this are:
- speculative load hardening to automatically mask of data loaded
in registers.
- using intrinsics to mask of data in registers as indicated by the
programmer (see https://lwn.net/Articles/759423/).
For AArch64, the following implementation choices are made.
Some of these are different than the implementation choices made in
the similar pass implemented in X86SpeculativeLoadHardening.cpp, as
the instruction set characteristics result in different trade-offs.
- The speculation hardening is done after register allocation. With a
relative abundance of registers, one register is reserved (X16) to be
the taint register. X16 is expected to not clash with other register
reservation mechanisms with very high probability because:
. The AArch64 ABI doesn't guarantee X16 to be retained across any call.
. The only way to request X16 to be used as a programmer is through
inline assembly. In the rare case a function explicitly demands to
use X16/W16, this pass falls back to hardening against speculation
by inserting a DSB SYS/ISB barrier pair which will prevent control
flow speculation.
- It is easy to insert mask operations at this late stage as we have
mask operations available that don't set flags.
- The taint variable contains all-ones when no miss-speculation is detected,
and contains all-zeros when miss-speculation is detected. Therefore, when
masking, an AND instruction (which only changes the register to be masked,
no other side effects) can easily be inserted anywhere that's needed.
- The tracking of miss-speculation is done by using a data-flow conditional
select instruction (CSEL) to evaluate the flags that were also used to
make conditional branch direction decisions. Speculation of the CSEL
instruction can be limited with a CSDB instruction - so the combination of
CSEL + a later CSDB gives the guarantee that the flags as used in the CSEL
aren't speculated. When conditional branch direction gets miss-speculated,
the semantics of the inserted CSEL instruction is such that the taint
register will contain all zero bits.
One key requirement for this to work is that the conditional branch is
followed by an execution of the CSEL instruction, where the CSEL
instruction needs to use the same flags status as the conditional branch.
This means that the conditional branches must not be implemented as one
of the AArch64 conditional branches that do not use the flags as input
(CB(N)Z and TB(N)Z). This is implemented by ensuring in the instruction
selectors to not produce these instructions when speculation hardening
is enabled. This pass will assert if it does encounter such an instruction.
- On function call boundaries, the miss-speculation state is transferred from
the taint register X16 to be encoded in the SP register as value 0.
Future extensions/improvements could be:
- Implement this functionality using full speculation barriers, akin to the
x86-slh-lfence option. This may be more useful for the intrinsics-based
approach than for the SLH approach to masking.
Note that this pass already inserts the full speculation barriers if the
function for some niche reason makes use of X16/W16.
- no indirect branch misprediction gets protected/instrumented; but this
could be done for some indirect branches, such as switch jump tables.
Differential Revision: https://reviews.llvm.org/D54896
llvm-svn: 349456
2018-12-18 16:50:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// The AArch64SpeculationHardeningPass destroys dominator tree and natural
|
|
|
|
// loop info, which is needed for the FalkorHWPFFixPass and also later on.
|
|
|
|
// Therefore, run the AArch64SpeculationHardeningPass before the
|
|
|
|
// FalkorHWPFFixPass to avoid recomputing dominator tree and natural loop
|
|
|
|
// info.
|
|
|
|
addPass(createAArch64SpeculationHardeningPass());
|
|
|
|
|
|
|
|
if (TM->getOptLevel() != CodeGenOpt::None) {
|
2017-07-19 00:14:22 +08:00
|
|
|
if (EnableFalkorHWPFFix)
|
|
|
|
addPass(createFalkorHWPFFixPass());
|
|
|
|
}
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
void AArch64PassConfig::addPreEmitPass() {
|
2018-12-17 18:45:43 +08:00
|
|
|
// Machine Block Placement might have created new opportunities when run
|
|
|
|
// at O3, where the Tail Duplication Threshold is set to 4 instructions.
|
|
|
|
// Run the load/store optimizer once more.
|
|
|
|
if (TM->getOptLevel() >= CodeGenOpt::Aggressive && EnableLoadStoreOpt)
|
|
|
|
addPass(createAArch64LoadStoreOptimizationPass());
|
|
|
|
|
2014-10-13 18:12:35 +08:00
|
|
|
if (EnableA53Fix835769)
|
2014-12-12 07:18:03 +08:00
|
|
|
addPass(createAArch64A53Fix835769());
|
2014-03-29 18:18:08 +08:00
|
|
|
// Relax conditional branch instructions if they're otherwise out of
|
|
|
|
// range of their destination.
|
2016-08-01 13:56:57 +08:00
|
|
|
if (BranchRelaxation)
|
2016-10-06 23:38:53 +08:00
|
|
|
addPass(&BranchRelaxationPassID);
|
|
|
|
|
[AArch64][v8.5A] Branch Target Identification code-generation pass
The Branch Target Identification extension, introduced to AArch64 in
Armv8.5-A, adds the BTI instruction, which is used to mark valid targets
of indirect branches. When enabled, the processor will trap if an
instruction in a protected page tries to perform an indirect branch to
any instruction other than a BTI. The BTI instruction uses encodings
which were NOPs in earlier versions of the architecture, so BTI-enabled
code will still run on earlier hardware, just without the extra
protection.
There are 3 variants of the BTI instruction, which are valid targets for
different kinds or branches:
- BTI C can be targeted by call instructions, and is inteneded to be
used at function entry points. These are the BLR instruction, as well
as BR with x16 or x17. These BR instructions are allowed for use in
PLT entries, and we can also use them to allow indirect tail-calls.
- BTI J can be targeted by BR only, and is intended to be used by jump
tables.
- BTI JC acts ab both a BTI C and a BTI J instruction, and can be
targeted by any BLR or BR instruction.
Note that RET instructions are not restricted by branch target
identification, the reason for this is that return addresses can be
protected more effectively using return address signing. Direct branches
and calls are also unaffected, as it is assumed that an attacker cannot
modify executable pages (if they could, they wouldn't need to do a
ROP/JOP attack).
This patch adds a MachineFunctionPass which:
- Adds a BTI C at the start of every function which could be indirectly
called (either because it is address-taken, or externally visible so
could be address-taken in another translation unit).
- Adds a BTI J at the start of every basic block which could be
indirectly branched to. This could be either done by a jump table, or
by taking the address of the block (e.g. the using GCC label values
extension).
We only need to use BTI JC when a function is indirectly-callable, and
takes the address of the entry block. I've not been able to trigger this
from C or IR, but I've included a MIR test just in case.
Using BTI C at function entries relies on the fact that no other code in
BTI-protected pages uses indirect tail-calls, unless they use x16 or x17
to hold the address. I'll add that code-generation restriction as a
separate patch.
Differential revision: https://reviews.llvm.org/D52867
llvm-svn: 343967
2018-10-08 22:04:24 +08:00
|
|
|
if (EnableBranchTargets)
|
|
|
|
addPass(createAArch64BranchTargetsPass());
|
|
|
|
|
2018-10-25 04:19:09 +08:00
|
|
|
if (TM->getOptLevel() != CodeGenOpt::None && EnableCompressJumpTables)
|
|
|
|
addPass(createAArch64CompressJumpTablesPass());
|
|
|
|
|
2014-04-18 22:54:46 +08:00
|
|
|
if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH &&
|
2015-06-16 23:44:21 +08:00
|
|
|
TM->getTargetTriple().isOSBinFormatMachO())
|
2014-05-24 20:50:23 +08:00
|
|
|
addPass(createAArch64CollectLOHPass());
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|