2006-05-15 06:18:28 +08:00
|
|
|
//===-- ARMTargetMachine.cpp - Define TargetMachine for ARM ---------------===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2006-05-15 06:18:28 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2017-11-17 09:07:10 +08:00
|
|
|
#include "ARMTargetMachine.h"
|
2006-05-15 06:18:28 +08:00
|
|
|
#include "ARM.h"
|
2017-06-22 17:39:36 +08:00
|
|
|
#include "ARMMacroFusion.h"
|
2017-11-17 09:07:10 +08:00
|
|
|
#include "ARMSubtarget.h"
|
2014-11-13 17:26:31 +08:00
|
|
|
#include "ARMTargetObjectFile.h"
|
2015-01-31 19:17:59 +08:00
|
|
|
#include "ARMTargetTransformInfo.h"
|
2017-01-31 08:56:17 +08:00
|
|
|
#include "MCTargetDesc/ARMMCTargetDesc.h"
|
2019-05-15 06:29:50 +08:00
|
|
|
#include "TargetInfo/ARMTargetInfo.h"
|
2017-01-31 08:56:17 +08:00
|
|
|
#include "llvm/ADT/Optional.h"
|
|
|
|
#include "llvm/ADT/STLExtras.h"
|
|
|
|
#include "llvm/ADT/StringRef.h"
|
|
|
|
#include "llvm/ADT/Triple.h"
|
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
2018-01-22 18:06:33 +08:00
|
|
|
#include "llvm/CodeGen/ExecutionDomainFix.h"
|
2017-01-31 08:56:17 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/CallLowering.h"
|
2016-11-11 16:27:37 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
|
|
|
|
#include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
|
2017-01-31 08:56:17 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
|
2016-11-11 16:27:37 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/Legalizer.h"
|
2017-01-31 08:56:17 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
|
2016-11-11 16:27:37 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
|
2017-01-31 08:56:17 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
|
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
2017-06-09 22:07:21 +08:00
|
|
|
#include "llvm/CodeGen/MachineScheduler.h"
|
2007-05-16 10:01:49 +08:00
|
|
|
#include "llvm/CodeGen/Passes.h"
|
2016-05-10 11:21:59 +08:00
|
|
|
#include "llvm/CodeGen/TargetPassConfig.h"
|
2017-01-31 08:56:17 +08:00
|
|
|
#include "llvm/IR/Attributes.h"
|
|
|
|
#include "llvm/IR/DataLayout.h"
|
2014-10-06 14:45:36 +08:00
|
|
|
#include "llvm/IR/Function.h"
|
2017-01-31 08:56:17 +08:00
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/CodeGen.h"
|
2010-12-06 06:04:16 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2017-01-31 08:56:17 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2016-08-17 10:08:28 +08:00
|
|
|
#include "llvm/Support/TargetParser.h"
|
2016-11-11 16:27:37 +08:00
|
|
|
#include "llvm/Support/TargetRegistry.h"
|
2018-03-24 07:58:19 +08:00
|
|
|
#include "llvm/Target/TargetLoweringObjectFile.h"
|
2007-01-19 15:51:42 +08:00
|
|
|
#include "llvm/Target/TargetOptions.h"
|
Add Windows Control Flow Guard checks (/guard:cf).
Summary:
A new function pass (Transforms/CFGuard/CFGuard.cpp) inserts CFGuard checks on
indirect function calls, using either the check mechanism (X86, ARM, AArch64) or
or the dispatch mechanism (X86-64). The check mechanism requires a new calling
convention for the supported targets. The dispatch mechanism adds the target as
an operand bundle, which is processed by SelectionDAG. Another pass
(CodeGen/CFGuardLongjmp.cpp) identifies and emits valid longjmp targets, as
required by /guard:cf. This feature is enabled using the `cfguard` CC1 option.
Reviewers: thakis, rnk, theraven, pcc
Subscribers: ychen, hans, metalcanine, dmajor, tomrittervg, alex, mehdi_amini, mgorny, javed.absar, kristof.beyls, hiraditya, steven_wu, dexonsmith, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D65761
2019-10-28 21:22:19 +08:00
|
|
|
#include "llvm/Transforms/CFGuard.h"
|
2021-04-01 14:12:36 +08:00
|
|
|
#include "llvm/Transforms/IPO.h"
|
2011-10-18 01:17:43 +08:00
|
|
|
#include "llvm/Transforms/Scalar.h"
|
2017-01-31 08:56:17 +08:00
|
|
|
#include <cassert>
|
|
|
|
#include <memory>
|
|
|
|
#include <string>
|
|
|
|
|
2006-05-15 06:18:28 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2013-03-16 02:28:25 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
DisableA15SDOptimization("disable-a15-sd-optimization", cl::Hidden,
|
|
|
|
cl::desc("Inhibit optimization of S->D register accesses on A15"),
|
|
|
|
cl::init(false));
|
|
|
|
|
2014-05-30 18:09:59 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
EnableAtomicTidy("arm-atomic-cfg-tidy", cl::Hidden,
|
|
|
|
cl::desc("Run SimplifyCFG after expanding atomic operations"
|
|
|
|
" to make use of cmpxchg flow-based information"),
|
|
|
|
cl::init(true));
|
|
|
|
|
2015-03-27 02:38:04 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
EnableARMLoadStoreOpt("arm-load-store-opt", cl::Hidden,
|
|
|
|
cl::desc("Enable ARM load/store optimization pass"),
|
|
|
|
cl::init(true));
|
|
|
|
|
2015-04-11 08:06:36 +08:00
|
|
|
// FIXME: Unify control over GlobalMerge.
|
|
|
|
static cl::opt<cl::boolOrDefault>
|
|
|
|
EnableGlobalMerge("arm-global-merge", cl::Hidden,
|
|
|
|
cl::desc("Enable the global merge pass"));
|
|
|
|
|
2017-03-18 13:08:58 +08:00
|
|
|
namespace llvm {
|
2018-01-22 18:05:23 +08:00
|
|
|
void initializeARMExecutionDomainFixPass(PassRegistry&);
|
2017-03-18 13:08:58 +08:00
|
|
|
}
|
|
|
|
|
CMake: Make most target symbols hidden by default
Summary:
For builds with LLVM_BUILD_LLVM_DYLIB=ON and BUILD_SHARED_LIBS=OFF
this change makes all symbols in the target specific libraries hidden
by default.
A new macro called LLVM_EXTERNAL_VISIBILITY has been added to mark symbols in these
libraries public, which is mainly needed for the definitions of the
LLVMInitialize* functions.
This patch reduces the number of public symbols in libLLVM.so by about
25%. This should improve load times for the dynamic library and also
make abi checker tools, like abidiff require less memory when analyzing
libLLVM.so
One side-effect of this change is that for builds with
LLVM_BUILD_LLVM_DYLIB=ON and LLVM_LINK_LLVM_DYLIB=ON some unittests that
access symbols that are no longer public will need to be statically linked.
Before and after public symbol counts (using gcc 8.2.1, ld.bfd 2.31.1):
nm before/libLLVM-9svn.so | grep ' [A-Zuvw] ' | wc -l
36221
nm after/libLLVM-9svn.so | grep ' [A-Zuvw] ' | wc -l
26278
Reviewers: chandlerc, beanz, mgorny, rnk, hans
Reviewed By: rnk, hans
Subscribers: merge_guards_bot, luismarques, smeenai, ldionne, lenary, s.egerton, pzheng, sameer.abuasal, MaskRay, wuzish, echristo, Jim, hiraditya, michaelplatings, chapuni, jholewinski, arsenm, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, javed.absar, sbc100, jgravelle-google, aheejin, kbarton, fedor.sergeev, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, zzheng, edward-jones, mgrang, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, kristina, jsji, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D54439
2020-01-15 11:15:07 +08:00
|
|
|
extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeARMTarget() {
|
2009-07-25 14:49:55 +08:00
|
|
|
// Register the target.
|
2016-10-10 07:00:34 +08:00
|
|
|
RegisterTargetMachine<ARMLETargetMachine> X(getTheARMLETarget());
|
2017-05-24 18:18:57 +08:00
|
|
|
RegisterTargetMachine<ARMLETargetMachine> A(getTheThumbLETarget());
|
2016-10-10 07:00:34 +08:00
|
|
|
RegisterTargetMachine<ARMBETargetMachine> Y(getTheARMBETarget());
|
2017-05-24 18:18:57 +08:00
|
|
|
RegisterTargetMachine<ARMBETargetMachine> B(getTheThumbBETarget());
|
2016-07-16 10:24:10 +08:00
|
|
|
|
|
|
|
PassRegistry &Registry = *PassRegistry::getPassRegistry();
|
2016-11-11 16:27:37 +08:00
|
|
|
initializeGlobalISel(Registry);
|
2016-07-16 10:24:10 +08:00
|
|
|
initializeARMLoadStoreOptPass(Registry);
|
|
|
|
initializeARMPreAllocLoadStoreOptPass(Registry);
|
2018-06-28 20:55:29 +08:00
|
|
|
initializeARMParallelDSPPass(Registry);
|
2017-02-13 22:07:25 +08:00
|
|
|
initializeARMConstantIslandsPass(Registry);
|
2018-01-22 18:05:23 +08:00
|
|
|
initializeARMExecutionDomainFixPass(Registry);
|
2017-09-06 06:45:23 +08:00
|
|
|
initializeARMExpandPseudoPass(Registry);
|
2017-12-19 20:19:08 +08:00
|
|
|
initializeThumb2SizeReducePass(Registry);
|
2019-06-14 19:46:05 +08:00
|
|
|
initializeMVEVPTBlockPass(Registry);
|
2021-03-02 05:57:19 +08:00
|
|
|
initializeMVETPAndVPTOptimisationsPass(Registry);
|
[ARM] MVE Tail Predication
The MVE and LOB extensions of Armv8.1m can be combined to enable
'tail predication' which removes the need for a scalar remainder
loop after vectorization. Lane predication is performed implicitly
via a system register. The effects of predication is described in
Section B5.6.3 of the Armv8.1-m Arch Reference Manual, the key points
being:
- For vector operations that perform reduction across the vector and
produce a scalar result, whether the value is accumulated or not.
- For non-load instructions, the predicate flags determine if the
destination register byte is updated with the new value or if the
previous value is preserved.
- For vector store instructions, whether the store occurs or not.
- For vector load instructions, whether the value that is loaded or
whether zeros are written to that element of the destination
register.
This patch implements a pass that takes a hardware loop, containing
masked vector instructions, and converts it something that resembles
an MVE tail predicated loop. Currently, if we had code generation,
we'd generate a loop in which the VCTP would generate the predicate
and VPST would then setup the value of VPR.PO. The loads and stores
would be placed in VPT blocks so this is not tail predication, but
normal VPT predication with the predicate based upon a element
counting induction variable. Further work needs to be done to finally
produce a true tail predicated loop.
Because only the loads and stores are predicated, in both the LLVM IR
and MIR level, we will restrict support to only lane-wise operations
(no horizontal reductions). We will perform a final check on MIR
during loop finalisation too.
Another restriction, specific to MVE, is that all the vector
instructions need operate on the same number of elements. This is
because predication is performed at the byte level and this is set
on entry to the loop, or by the VCTP instead.
Differential Revision: https://reviews.llvm.org/D65884
llvm-svn: 371179
2019-09-06 16:24:41 +08:00
|
|
|
initializeMVETailPredicationPass(Registry);
|
2019-06-25 18:45:51 +08:00
|
|
|
initializeARMLowOverheadLoopsPass(Registry);
|
2020-11-26 19:05:23 +08:00
|
|
|
initializeARMBlockPlacementPass(Registry);
|
2020-01-08 21:08:27 +08:00
|
|
|
initializeMVEGatherScatterLoweringPass(Registry);
|
[ARM] Implement harden-sls-retbr for ARM mode
Some processors may speculatively execute the instructions immediately
following indirect control flow, such as returns, indirect jumps and
indirect function calls.
To avoid a potential miss-speculatively executed gadget after these
instructions leaking secrets through side channels, this pass places a
speculation barrier immediately after every indirect control flow where
control flow doesn't return to the next instruction, such as returns and
indirect jumps, but not indirect function calls.
Hardening of indirect function calls will be done in a later,
independent patch.
This patch is implementing the same functionality as the AArch64 counter
part implemented in https://reviews.llvm.org/D81400.
For AArch64, returns and indirect jumps only occur on RET and BR
instructions and hence the function attribute to control the hardening
is called "harden-sls-retbr" there. On AArch32, there is a much wider
variety of instructions that can trigger an indirect unconditional
control flow change. I've decided to stick with the name
"harden-sls-retbr" as introduced for the corresponding AArch64
mitigation.
This patch implements this for ARM mode. A future patch will extend this
to also support Thumb mode.
The inserted barriers are never on the correct, architectural execution
path, and therefore performance overhead of this is expected to be low.
To ensure these barriers are never on an architecturally executed path,
when the harden-sls-retbr function attribute is present, indirect
control flow is never conditionalized/predicated.
On targets that implement that Armv8.0-SB Speculation Barrier extension,
a single SB instruction is emitted that acts as a speculation barrier.
On other targets, a DSB SYS followed by a ISB is emitted to act as a
speculation barrier.
These speculation barriers are implemented as pseudo instructions to
avoid later passes to analyze them and potentially remove them.
The mitigation is off by default and can be enabled by the
harden-sls-retbr subtarget feature.
Differential Revision: https://reviews.llvm.org/D92395
2020-10-29 05:04:11 +08:00
|
|
|
initializeARMSLSHardeningPass(Registry);
|
2021-03-29 02:34:58 +08:00
|
|
|
initializeMVELaneInterleavingPass(Registry);
|
2009-07-25 14:49:55 +08:00
|
|
|
}
|
2009-06-17 04:12:29 +08:00
|
|
|
|
2014-11-13 17:26:31 +08:00
|
|
|
static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
|
|
|
|
if (TT.isOSBinFormatMachO())
|
2019-08-15 23:54:37 +08:00
|
|
|
return std::make_unique<TargetLoweringObjectFileMachO>();
|
2014-11-13 17:26:31 +08:00
|
|
|
if (TT.isOSWindows())
|
2019-08-15 23:54:37 +08:00
|
|
|
return std::make_unique<TargetLoweringObjectFileCOFF>();
|
|
|
|
return std::make_unique<ARMElfTargetObjectFile>();
|
2014-11-13 17:26:31 +08:00
|
|
|
}
|
|
|
|
|
2014-12-18 10:20:58 +08:00
|
|
|
static ARMBaseTargetMachine::ARMABI
|
|
|
|
computeTargetABI(const Triple &TT, StringRef CPU,
|
|
|
|
const TargetOptions &Options) {
|
2017-06-30 08:03:54 +08:00
|
|
|
StringRef ABIName = Options.MCOptions.getABIName();
|
|
|
|
|
|
|
|
if (ABIName.empty())
|
|
|
|
ABIName = ARM::computeDefaultTargetABI(TT, CPU);
|
|
|
|
|
|
|
|
if (ABIName == "aapcs16")
|
2015-10-29 06:46:43 +08:00
|
|
|
return ARMBaseTargetMachine::ARM_ABI_AAPCS16;
|
2017-06-30 08:03:54 +08:00
|
|
|
else if (ABIName.startswith("aapcs"))
|
2014-12-18 10:20:58 +08:00
|
|
|
return ARMBaseTargetMachine::ARM_ABI_AAPCS;
|
2017-06-30 08:03:54 +08:00
|
|
|
else if (ABIName.startswith("apcs"))
|
2014-12-18 10:20:58 +08:00
|
|
|
return ARMBaseTargetMachine::ARM_ABI_APCS;
|
|
|
|
|
2017-06-30 08:03:54 +08:00
|
|
|
llvm_unreachable("Unhandled/unknown ABI Name!");
|
|
|
|
return ARMBaseTargetMachine::ARM_ABI_UNKNOWN;
|
2014-12-18 10:20:58 +08:00
|
|
|
}
|
|
|
|
|
2015-06-11 23:34:59 +08:00
|
|
|
static std::string computeDataLayout(const Triple &TT, StringRef CPU,
|
2015-03-12 08:07:24 +08:00
|
|
|
const TargetOptions &Options,
|
2015-01-27 03:03:15 +08:00
|
|
|
bool isLittle) {
|
2015-06-11 23:34:59 +08:00
|
|
|
auto ABI = computeTargetABI(TT, CPU, Options);
|
2017-01-31 08:56:17 +08:00
|
|
|
std::string Ret;
|
2015-01-27 03:03:15 +08:00
|
|
|
|
|
|
|
if (isLittle)
|
|
|
|
// Little endian.
|
|
|
|
Ret += "e";
|
|
|
|
else
|
|
|
|
// Big endian.
|
|
|
|
Ret += "E";
|
|
|
|
|
2015-06-11 23:34:59 +08:00
|
|
|
Ret += DataLayout::getManglingComponent(TT);
|
2015-01-27 03:03:15 +08:00
|
|
|
|
|
|
|
// Pointers are 32 bits and aligned to 32 bits.
|
|
|
|
Ret += "-p:32:32";
|
|
|
|
|
2019-03-08 18:44:06 +08:00
|
|
|
// Function pointers are aligned to 8 bits (because the LSB stores the
|
|
|
|
// ARM/Thumb state).
|
|
|
|
Ret += "-Fi8";
|
|
|
|
|
2015-01-27 03:03:15 +08:00
|
|
|
// ABIs other than APCS have 64 bit integers with natural alignment.
|
|
|
|
if (ABI != ARMBaseTargetMachine::ARM_ABI_APCS)
|
|
|
|
Ret += "-i64:64";
|
|
|
|
|
|
|
|
// We have 64 bits floats. The APCS ABI requires them to be aligned to 32
|
|
|
|
// bits, others to 64 bits. We always try to align to 64 bits.
|
|
|
|
if (ABI == ARMBaseTargetMachine::ARM_ABI_APCS)
|
|
|
|
Ret += "-f64:32:64";
|
|
|
|
|
|
|
|
// We have 128 and 64 bit vectors. The APCS ABI aligns them to 32 bits, others
|
|
|
|
// to 64. We always ty to give them natural alignment.
|
|
|
|
if (ABI == ARMBaseTargetMachine::ARM_ABI_APCS)
|
|
|
|
Ret += "-v64:32:64-v128:32:128";
|
2015-10-29 06:46:43 +08:00
|
|
|
else if (ABI != ARMBaseTargetMachine::ARM_ABI_AAPCS16)
|
2015-01-27 03:03:15 +08:00
|
|
|
Ret += "-v128:64:128";
|
|
|
|
|
|
|
|
// Try to align aggregates to 32 bits (the default is 64 bits, which has no
|
|
|
|
// particular hardware support on 32-bit ARM).
|
|
|
|
Ret += "-a:0:32";
|
|
|
|
|
|
|
|
// Integer registers are 32 bits.
|
|
|
|
Ret += "-n32";
|
|
|
|
|
|
|
|
// The stack is 128 bit aligned on NaCl, 64 bit aligned on AAPCS and 32 bit
|
|
|
|
// aligned everywhere else.
|
2015-10-29 06:46:43 +08:00
|
|
|
if (TT.isOSNaCl() || ABI == ARMBaseTargetMachine::ARM_ABI_AAPCS16)
|
2015-01-27 03:03:15 +08:00
|
|
|
Ret += "-S128";
|
|
|
|
else if (ABI == ARMBaseTargetMachine::ARM_ABI_AAPCS)
|
|
|
|
Ret += "-S64";
|
|
|
|
else
|
|
|
|
Ret += "-S32";
|
|
|
|
|
|
|
|
return Ret;
|
|
|
|
}
|
|
|
|
|
2016-05-19 06:04:49 +08:00
|
|
|
static Reloc::Model getEffectiveRelocModel(const Triple &TT,
|
|
|
|
Optional<Reloc::Model> RM) {
|
|
|
|
if (!RM.hasValue())
|
2016-05-28 18:41:15 +08:00
|
|
|
// Default relocation model on Darwin is PIC.
|
|
|
|
return TT.isOSBinFormatMachO() ? Reloc::PIC_ : Reloc::Static;
|
2016-05-28 12:47:13 +08:00
|
|
|
|
2016-08-08 23:28:31 +08:00
|
|
|
if (*RM == Reloc::ROPI || *RM == Reloc::RWPI || *RM == Reloc::ROPI_RWPI)
|
|
|
|
assert(TT.isOSBinFormatELF() &&
|
|
|
|
"ROPI/RWPI currently only supported for ELF");
|
|
|
|
|
2016-05-28 12:47:13 +08:00
|
|
|
// DynamicNoPIC is only used on darwin.
|
|
|
|
if (*RM == Reloc::DynamicNoPIC && !TT.isOSDarwin())
|
|
|
|
return Reloc::Static;
|
|
|
|
|
2016-05-19 06:04:49 +08:00
|
|
|
return *RM;
|
|
|
|
}
|
|
|
|
|
2016-05-19 00:00:24 +08:00
|
|
|
/// Create an ARM architecture model.
|
2007-02-23 11:14:31 +08:00
|
|
|
///
|
2015-06-12 03:41:26 +08:00
|
|
|
ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, const Triple &TT,
|
2011-07-19 14:37:02 +08:00
|
|
|
StringRef CPU, StringRef FS,
|
2011-12-03 06:16:29 +08:00
|
|
|
const TargetOptions &Options,
|
2016-05-19 06:04:49 +08:00
|
|
|
Optional<Reloc::Model> RM,
|
2017-08-03 10:16:21 +08:00
|
|
|
Optional<CodeModel::Model> CM,
|
2014-06-27 03:30:02 +08:00
|
|
|
CodeGenOpt::Level OL, bool isLittle)
|
2017-10-13 06:57:28 +08:00
|
|
|
: LLVMTargetMachine(T, computeDataLayout(TT, CPU, Options, isLittle), TT,
|
|
|
|
CPU, FS, Options, getEffectiveRelocModel(TT, RM),
|
2018-12-07 20:10:23 +08:00
|
|
|
getEffectiveCodeModel(CM, CodeModel::Small), OL),
|
2015-06-12 03:41:26 +08:00
|
|
|
TargetABI(computeTargetABI(TT, CPU, Options)),
|
2017-07-01 11:41:53 +08:00
|
|
|
TLOF(createTLOF(getTargetTriple())), isLittle(isLittle) {
|
2013-12-18 22:18:36 +08:00
|
|
|
|
|
|
|
// Default to triple-appropriate float ABI
|
2017-07-01 11:41:53 +08:00
|
|
|
if (Options.FloatABIType == FloatABI::Default) {
|
2018-07-18 20:36:25 +08:00
|
|
|
if (isTargetHardFloat())
|
2017-07-01 11:41:53 +08:00
|
|
|
this->Options.FloatABIType = FloatABI::Hard;
|
|
|
|
else
|
|
|
|
this->Options.FloatABIType = FloatABI::Soft;
|
|
|
|
}
|
2015-11-09 20:40:30 +08:00
|
|
|
|
|
|
|
// Default to triple-appropriate EABI
|
|
|
|
if (Options.EABIVersion == EABI::Default ||
|
|
|
|
Options.EABIVersion == EABI::Unknown) {
|
2016-06-25 05:14:33 +08:00
|
|
|
// musl is compatible with glibc with regard to EABI version
|
2017-07-01 11:41:53 +08:00
|
|
|
if ((TargetTriple.getEnvironment() == Triple::GNUEABI ||
|
2017-08-28 14:47:47 +08:00
|
|
|
TargetTriple.getEnvironment() == Triple::GNUEABIHF ||
|
|
|
|
TargetTriple.getEnvironment() == Triple::MuslEABI ||
|
|
|
|
TargetTriple.getEnvironment() == Triple::MuslEABIHF) &&
|
|
|
|
!(TargetTriple.isOSWindows() || TargetTriple.isOSDarwin()))
|
2015-11-09 20:40:30 +08:00
|
|
|
this->Options.EABIVersion = EABI::GNU;
|
|
|
|
else
|
|
|
|
this->Options.EABIVersion = EABI::EABI5;
|
|
|
|
}
|
2017-05-24 18:18:57 +08:00
|
|
|
|
2018-06-29 01:00:45 +08:00
|
|
|
if (TT.isOSBinFormatMachO()) {
|
2018-04-14 06:25:20 +08:00
|
|
|
this->Options.TrapUnreachable = true;
|
2018-06-29 01:00:45 +08:00
|
|
|
this->Options.NoTrapAfterNoreturn = true;
|
|
|
|
}
|
2018-04-14 06:25:20 +08:00
|
|
|
|
2020-03-19 19:13:18 +08:00
|
|
|
// ARM supports the debug entry values.
|
|
|
|
setSupportsDebugEntryValues(true);
|
|
|
|
|
2017-05-24 18:18:57 +08:00
|
|
|
initAsmInfo();
|
2020-05-15 14:44:23 +08:00
|
|
|
|
|
|
|
// ARM supports the MachineOutliner.
|
|
|
|
setMachineOutliner(true);
|
2020-08-27 14:14:33 +08:00
|
|
|
setSupportsDefaultOutlining(true);
|
2008-10-31 00:10:54 +08:00
|
|
|
}
|
2006-05-15 06:18:28 +08:00
|
|
|
|
2017-01-31 08:56:17 +08:00
|
|
|
ARMBaseTargetMachine::~ARMBaseTargetMachine() = default;
|
2014-11-21 07:37:18 +08:00
|
|
|
|
2014-10-06 14:45:36 +08:00
|
|
|
const ARMSubtarget *
|
|
|
|
ARMBaseTargetMachine::getSubtargetImpl(const Function &F) const {
|
2015-02-14 10:24:44 +08:00
|
|
|
Attribute CPUAttr = F.getFnAttribute("target-cpu");
|
|
|
|
Attribute FSAttr = F.getFnAttribute("target-features");
|
2014-10-06 14:45:36 +08:00
|
|
|
|
2020-08-29 04:02:42 +08:00
|
|
|
std::string CPU =
|
|
|
|
CPUAttr.isValid() ? CPUAttr.getValueAsString().str() : TargetCPU;
|
|
|
|
std::string FS =
|
|
|
|
FSAttr.isValid() ? FSAttr.getValueAsString().str() : TargetFS;
|
2014-10-06 14:45:36 +08:00
|
|
|
|
|
|
|
// FIXME: This is related to the code below to reset the target options,
|
|
|
|
// we need to know whether or not the soft float flag is set on the
|
|
|
|
// function before we can generate a subtarget. We also need to use
|
|
|
|
// it as a key for the subtarget since that can be the only difference
|
|
|
|
// between two functions.
|
2021-03-25 04:45:04 +08:00
|
|
|
bool SoftFloat = F.getFnAttribute("use-soft-float").getValueAsBool();
|
2015-05-12 09:26:05 +08:00
|
|
|
// If the soft float attribute is set on the function turn on the soft float
|
|
|
|
// subtarget feature.
|
|
|
|
if (SoftFloat)
|
|
|
|
FS += FS.empty() ? "+soft-float" : ",+soft-float";
|
|
|
|
|
2019-02-08 15:57:42 +08:00
|
|
|
// Use the optminsize to identify the subtarget, but don't use it in the
|
|
|
|
// feature string.
|
|
|
|
std::string Key = CPU + FS;
|
2019-04-05 06:40:06 +08:00
|
|
|
if (F.hasMinSize())
|
2019-02-08 15:57:42 +08:00
|
|
|
Key += "+minsize";
|
|
|
|
|
|
|
|
auto &I = SubtargetMap[Key];
|
2014-10-06 14:45:36 +08:00
|
|
|
if (!I) {
|
|
|
|
// This needs to be done before we create a new subtarget since any
|
|
|
|
// creation will depend on the TM and the code generation flags on the
|
|
|
|
// function that reside in TargetOptions.
|
|
|
|
resetTargetOptions(F);
|
2019-08-15 23:54:37 +08:00
|
|
|
I = std::make_unique<ARMSubtarget>(TargetTriple, CPU, FS, *this, isLittle,
|
2019-04-05 06:40:06 +08:00
|
|
|
F.hasMinSize());
|
[ARM] Emit error when ARM exec mode is not available.
Summary:
A similar error message has been removed from the ARMTargetMachineBase
constructor in r306939. With this patch, we generate an error message
for the example below, compiled with -mcpu=cortex-m0, which does not
have ARM execution mode.
__attribute__((target("arm"))) int foo(int a, int b)
{
return a + b % a;
}
__attribute__((target("thumb"))) int bar(int a, int b)
{
return a + b % a;
}
By adding this error message to ARMBaseTargetMachine::getSubtargetImpl,
we can deal with functions that set -thumb-mode in target-features.
At the moment it seems like Clang does not have access to target-feature
specific information, so adding the error message to the frontend will
be harder.
Reviewers: echristo, richard.barton.arm, t.p.northover, rengolin, efriedma
Reviewed By: echristo, efriedma
Subscribers: efriedma, aemerson, javed.absar, kristof.beyls
Differential Revision: https://reviews.llvm.org/D35627
llvm-svn: 310486
2017-08-09 23:39:10 +08:00
|
|
|
|
|
|
|
if (!I->isThumb() && !I->hasARMOps())
|
|
|
|
F.getContext().emitError("Function '" + F.getName() + "' uses ARM "
|
|
|
|
"instructions, but the target does not support ARM mode execution.");
|
2016-11-15 23:38:15 +08:00
|
|
|
}
|
[ARM] Emit error when ARM exec mode is not available.
Summary:
A similar error message has been removed from the ARMTargetMachineBase
constructor in r306939. With this patch, we generate an error message
for the example below, compiled with -mcpu=cortex-m0, which does not
have ARM execution mode.
__attribute__((target("arm"))) int foo(int a, int b)
{
return a + b % a;
}
__attribute__((target("thumb"))) int bar(int a, int b)
{
return a + b % a;
}
By adding this error message to ARMBaseTargetMachine::getSubtargetImpl,
we can deal with functions that set -thumb-mode in target-features.
At the moment it seems like Clang does not have access to target-feature
specific information, so adding the error message to the frontend will
be harder.
Reviewers: echristo, richard.barton.arm, t.p.northover, rengolin, efriedma
Reviewed By: echristo, efriedma
Subscribers: efriedma, aemerson, javed.absar, kristof.beyls
Differential Revision: https://reviews.llvm.org/D35627
llvm-svn: 310486
2017-08-09 23:39:10 +08:00
|
|
|
|
2014-10-06 14:45:36 +08:00
|
|
|
return I.get();
|
|
|
|
}
|
|
|
|
|
(Re-landing) Expose a TargetMachine::getTargetTransformInfo function
Re-land r321234. It had to be reverted because it broke the shared
library build. The shared library build broke because there was a
missing LLVMBuild dependency from lib/Passes (which calls
TargetMachine::getTargetIRAnalysis) to lib/Target. As far as I can
tell, this problem was always there but was somehow masked
before (perhaps because TargetMachine::getTargetIRAnalysis was a
virtual function).
Original commit message:
This makes the TargetMachine interface a bit simpler. We still need
the std::function in TargetIRAnalysis to avoid having to add a
dependency from Analysis to Target.
See discussion:
http://lists.llvm.org/pipermail/llvm-dev/2017-December/119749.html
I avoided adding all of the backend owners to this review since the
change is simple, but let me know if you feel differently about this.
Reviewers: echristo, MatzeB, hfinkel
Reviewed By: hfinkel
Subscribers: jholewinski, jfb, arsenm, dschuff, mcrosier, sdardis, nemanjai, nhaehnle, javed.absar, sbc100, jgravelle-google, aheejin, kbarton, llvm-commits
Differential Revision: https://reviews.llvm.org/D41464
llvm-svn: 321375
2017-12-23 02:21:59 +08:00
|
|
|
TargetTransformInfo
|
|
|
|
ARMBaseTargetMachine::getTargetTransformInfo(const Function &F) {
|
|
|
|
return TargetTransformInfo(ARMTTIImpl(this, F));
|
Switch TargetTransformInfo from an immutable analysis pass that requires
a TargetMachine to construct (and thus isn't always available), to an
analysis group that supports layered implementations much like
AliasAnalysis does. This is a pretty massive change, with a few parts
that I was unable to easily separate (sorry), so I'll walk through it.
The first step of this conversion was to make TargetTransformInfo an
analysis group, and to sink the nonce implementations in
ScalarTargetTransformInfo and VectorTargetTranformInfo into
a NoTargetTransformInfo pass. This allows other passes to add a hard
requirement on TTI, and assume they will always get at least on
implementation.
The TargetTransformInfo analysis group leverages the delegation chaining
trick that AliasAnalysis uses, where the base class for the analysis
group delegates to the previous analysis *pass*, allowing all but tho
NoFoo analysis passes to only implement the parts of the interfaces they
support. It also introduces a new trick where each pass in the group
retains a pointer to the top-most pass that has been initialized. This
allows passes to implement one API in terms of another API and benefit
when some other pass above them in the stack has more precise results
for the second API.
The second step of this conversion is to create a pass that implements
the TargetTransformInfo analysis using the target-independent
abstractions in the code generator. This replaces the
ScalarTargetTransformImpl and VectorTargetTransformImpl classes in
lib/Target with a single pass in lib/CodeGen called
BasicTargetTransformInfo. This class actually provides most of the TTI
functionality, basing it upon the TargetLowering abstraction and other
information in the target independent code generator.
The third step of the conversion adds support to all TargetMachines to
register custom analysis passes. This allows building those passes with
access to TargetLowering or other target-specific classes, and it also
allows each target to customize the set of analysis passes desired in
the pass manager. The baseline LLVMTargetMachine implements this
interface to add the BasicTTI pass to the pass manager, and all of the
tools that want to support target-aware TTI passes call this routine on
whatever target machine they end up with to add the appropriate passes.
The fourth step of the conversion created target-specific TTI analysis
passes for the X86 and ARM backends. These passes contain the custom
logic that was previously in their extensions of the
ScalarTargetTransformInfo and VectorTargetTransformInfo interfaces.
I separated them into their own file, as now all of the interface bits
are private and they just expose a function to create the pass itself.
Then I extended these target machines to set up a custom set of analysis
passes, first adding BasicTTI as a fallback, and then adding their
customized TTI implementations.
The fourth step required logic that was shared between the target
independent layer and the specific targets to move to a different
interface, as they no longer derive from each other. As a consequence,
a helper functions were added to TargetLowering representing the common
logic needed both in the target implementation and the codegen
implementation of the TTI pass. While technically this is the only
change that could have been committed separately, it would have been
a nightmare to extract.
The final step of the conversion was just to delete all the old
boilerplate. This got rid of the ScalarTargetTransformInfo and
VectorTargetTransformInfo classes, all of the support in all of the
targets for producing instances of them, and all of the support in the
tools for manually constructing a pass based around them.
Now that TTI is a relatively normal analysis group, two things become
straightforward. First, we can sink it into lib/Analysis which is a more
natural layer for it to live. Second, clients of this interface can
depend on it *always* being available which will simplify their code and
behavior. These (and other) simplifications will follow in subsequent
commits, this one is clearly big enough.
Finally, I'm very aware that much of the comments and documentation
needs to be updated. As soon as I had this working, and plausibly well
commented, I wanted to get it committed and in front of the build bots.
I'll be doing a few passes over documentation later if it sticks.
Commits to update DragonEgg and Clang will be made presently.
llvm-svn: 171681
2013-01-07 09:37:14 +08:00
|
|
|
}
|
|
|
|
|
2015-06-12 03:41:26 +08:00
|
|
|
ARMLETargetMachine::ARMLETargetMachine(const Target &T, const Triple &TT,
|
2014-06-27 03:30:02 +08:00
|
|
|
StringRef CPU, StringRef FS,
|
|
|
|
const TargetOptions &Options,
|
2016-05-19 06:04:49 +08:00
|
|
|
Optional<Reloc::Model> RM,
|
2017-08-03 10:16:21 +08:00
|
|
|
Optional<CodeModel::Model> CM,
|
|
|
|
CodeGenOpt::Level OL, bool JIT)
|
2017-05-24 18:18:57 +08:00
|
|
|
: ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
|
2014-03-28 22:35:30 +08:00
|
|
|
|
2015-06-12 03:41:26 +08:00
|
|
|
ARMBETargetMachine::ARMBETargetMachine(const Target &T, const Triple &TT,
|
2014-06-27 03:30:02 +08:00
|
|
|
StringRef CPU, StringRef FS,
|
|
|
|
const TargetOptions &Options,
|
2016-05-19 06:04:49 +08:00
|
|
|
Optional<Reloc::Model> RM,
|
2017-08-03 10:16:21 +08:00
|
|
|
Optional<CodeModel::Model> CM,
|
|
|
|
CodeGenOpt::Level OL, bool JIT)
|
2017-05-24 18:18:57 +08:00
|
|
|
: ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
|
2014-03-28 22:35:30 +08:00
|
|
|
|
2012-02-03 13:12:41 +08:00
|
|
|
namespace {
|
2017-01-31 08:56:17 +08:00
|
|
|
|
2012-02-03 13:12:41 +08:00
|
|
|
/// ARM Code Generator Pass Configuration Options.
|
|
|
|
class ARMPassConfig : public TargetPassConfig {
|
|
|
|
public:
|
2017-05-31 05:36:41 +08:00
|
|
|
ARMPassConfig(ARMBaseTargetMachine &TM, PassManagerBase &PM)
|
2019-11-05 17:10:58 +08:00
|
|
|
: TargetPassConfig(TM, PM) {}
|
2012-02-03 13:12:41 +08:00
|
|
|
|
|
|
|
ARMBaseTargetMachine &getARMTargetMachine() const {
|
|
|
|
return getTM<ARMBaseTargetMachine>();
|
|
|
|
}
|
|
|
|
|
2017-06-09 22:07:21 +08:00
|
|
|
ScheduleDAGInstrs *
|
|
|
|
createMachineScheduler(MachineSchedContext *C) const override {
|
|
|
|
ScheduleDAGMILive *DAG = createGenericSchedLive(C);
|
|
|
|
// add DAG Mutations here.
|
2017-06-22 17:39:36 +08:00
|
|
|
const ARMSubtarget &ST = C->MF->getSubtarget<ARMSubtarget>();
|
|
|
|
if (ST.hasFusion())
|
|
|
|
DAG->addMutation(createARMMacroFusionDAGMutation());
|
2017-06-09 22:07:21 +08:00
|
|
|
return DAG;
|
|
|
|
}
|
|
|
|
|
|
|
|
ScheduleDAGInstrs *
|
|
|
|
createPostMachineScheduler(MachineSchedContext *C) const override {
|
|
|
|
ScheduleDAGMI *DAG = createGenericSchedPostRA(C);
|
|
|
|
// add DAG Mutations here.
|
2017-06-22 17:39:36 +08:00
|
|
|
const ARMSubtarget &ST = C->MF->getSubtarget<ARMSubtarget>();
|
|
|
|
if (ST.hasFusion())
|
|
|
|
DAG->addMutation(createARMMacroFusionDAGMutation());
|
2017-06-09 22:07:21 +08:00
|
|
|
return DAG;
|
|
|
|
}
|
|
|
|
|
2014-05-30 18:09:59 +08:00
|
|
|
void addIRPasses() override;
|
2018-07-23 20:27:47 +08:00
|
|
|
void addCodeGenPrepare() override;
|
2014-03-10 10:09:33 +08:00
|
|
|
bool addPreISel() override;
|
|
|
|
bool addInstSelector() override;
|
2016-11-11 16:27:37 +08:00
|
|
|
bool addIRTranslator() override;
|
|
|
|
bool addLegalizeMachineIR() override;
|
|
|
|
bool addRegBankSelect() override;
|
|
|
|
bool addGlobalInstructionSelect() override;
|
2014-12-12 05:26:47 +08:00
|
|
|
void addPreRegAlloc() override;
|
|
|
|
void addPreSched2() override;
|
|
|
|
void addPreEmitPass() override;
|
2020-03-25 23:40:30 +08:00
|
|
|
void addPreEmitPass2() override;
|
2019-04-15 12:53:46 +08:00
|
|
|
|
|
|
|
std::unique_ptr<CSEConfigBase> getCSEConfig() const override;
|
2012-02-03 13:12:41 +08:00
|
|
|
};
|
2017-01-31 08:56:17 +08:00
|
|
|
|
2018-01-22 18:05:23 +08:00
|
|
|
class ARMExecutionDomainFix : public ExecutionDomainFix {
|
2017-03-18 13:08:58 +08:00
|
|
|
public:
|
|
|
|
static char ID;
|
2018-01-22 18:05:23 +08:00
|
|
|
ARMExecutionDomainFix() : ExecutionDomainFix(ID, ARM::DPRRegClass) {}
|
2017-03-18 13:08:58 +08:00
|
|
|
StringRef getPassName() const override {
|
2018-01-22 18:05:23 +08:00
|
|
|
return "ARM Execution Domain Fix";
|
2017-03-18 13:08:58 +08:00
|
|
|
}
|
|
|
|
};
|
2018-01-22 18:05:23 +08:00
|
|
|
char ARMExecutionDomainFix::ID;
|
2017-03-18 13:08:58 +08:00
|
|
|
|
2017-01-31 08:56:17 +08:00
|
|
|
} // end anonymous namespace
|
2012-02-03 13:12:41 +08:00
|
|
|
|
2018-01-22 18:05:23 +08:00
|
|
|
INITIALIZE_PASS_BEGIN(ARMExecutionDomainFix, "arm-execution-domain-fix",
|
|
|
|
"ARM Execution Domain Fix", false, false)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(ReachingDefAnalysis)
|
|
|
|
INITIALIZE_PASS_END(ARMExecutionDomainFix, "arm-execution-domain-fix",
|
|
|
|
"ARM Execution Domain Fix", false, false)
|
2017-03-18 13:08:58 +08:00
|
|
|
|
2012-02-04 10:56:59 +08:00
|
|
|
TargetPassConfig *ARMBaseTargetMachine::createPassConfig(PassManagerBase &PM) {
|
2017-05-31 05:36:41 +08:00
|
|
|
return new ARMPassConfig(*this, PM);
|
2012-02-03 13:12:41 +08:00
|
|
|
}
|
|
|
|
|
2019-04-15 12:53:46 +08:00
|
|
|
std::unique_ptr<CSEConfigBase> ARMPassConfig::getCSEConfig() const {
|
|
|
|
return getStandardCSEConfigForOpt(TM->getOptLevel());
|
|
|
|
}
|
|
|
|
|
2014-05-30 18:09:59 +08:00
|
|
|
void ARMPassConfig::addIRPasses() {
|
2014-08-21 22:35:47 +08:00
|
|
|
if (TM->Options.ThreadModel == ThreadModel::Single)
|
|
|
|
addPass(createLowerAtomicPass());
|
|
|
|
else
|
2017-05-19 01:21:13 +08:00
|
|
|
addPass(createAtomicExpandPass());
|
2014-04-03 19:44:58 +08:00
|
|
|
|
2014-06-20 05:03:04 +08:00
|
|
|
// Cmpxchg instructions are often used with a subsequent comparison to
|
|
|
|
// determine whether it succeeded. We can exploit existing control-flow in
|
|
|
|
// ldrex/strex loops to simplify this, but it needs tidying up.
|
2015-06-09 02:50:43 +08:00
|
|
|
if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy)
|
2017-10-29 02:43:07 +08:00
|
|
|
addPass(createCFGSimplificationPass(
|
Reland [SimplifyCFG][LoopRotate] SimplifyCFG: disable common instruction hoisting by default, enable late in pipeline
This was reverted in 503deec2183d466dad64b763bab4e15fd8804239
because it caused gigantic increase (3x) in branch mispredictions
in certain benchmarks on certain CPU's,
see https://reviews.llvm.org/D84108#2227365.
It has since been investigated and here are the results:
https://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20200907/827578.html
> It's an amazingly severe regression, but it's also all due to branch
> mispredicts (about 3x without this). The code layout looks ok so there's
> probably something else to deal with. I'm not sure there's anything we can
> reasonably do so we'll just have to take the hit for now and wait for
> another code reorganization to make the branch predictor a bit more happy :)
>
> Thanks for giving us some time to investigate and feel free to recommit
> whenever you'd like.
>
> -eric
So let's just reland this.
Original commit message:
I've been looking at missed vectorizations in one codebase.
One particular thing that stands out is that some of the loops
reach vectorizer in a rather mangled form, with weird PHI's,
and some of the loops aren't even in a rotated form.
After taking a more detailed look, that happened because
the loop's headers were too big by then. It is evident that
SimplifyCFG's common code hoisting transform is at fault there,
because the pattern it handles is precisely the unrotated
loop basic block structure.
Surprizingly, `SimplifyCFGOpt::HoistThenElseCodeToIf()` is enabled
by default, and is always run, unlike it's friend, common code sinking
transform, `SinkCommonCodeFromPredecessors()`, which is not enabled
by default and is only run once very late in the pipeline.
I'm proposing to harmonize this, and disable common code hoisting
until //late// in pipeline. Definition of //late// may vary,
here currently i've picked the same one as for code sinking,
but i suppose we could enable it as soon as right after
loop rotation happens.
Experimentation shows that this does indeed unsurprizingly help,
more loops got rotated, although other issues remain elsewhere.
Now, this undoubtedly seriously shakes phase ordering.
This will undoubtedly be a mixed bag in terms of both compile- and
run- time performance, codesize. Since we no longer aggressively
hoist+deduplicate common code, we don't pay the price of said hoisting
(which wasn't big). That may allow more loops to be rotated,
so we pay that price. That, in turn, that may enable all the transforms
that require canonical (rotated) loop form, including but not limited to
vectorization, so we pay that too. And in general, no deduplication means
more [duplicate] instructions going through the optimizations. But there's still
late hoisting, some of them will be caught late.
As per benchmarks i've run {F12360204}, this is mostly within the noise,
there are some small improvements, some small regressions.
One big regression i saw i fixed in rG8d487668d09fb0e4e54f36207f07c1480ffabbfd, but i'm sure
this will expose many more pre-existing missed optimizations, as usual :S
llvm-compile-time-tracker.com thoughts on this:
http://llvm-compile-time-tracker.com/compare.php?from=e40315d2b4ed1e38962a8f33ff151693ed4ada63&to=c8289c0ecbf235da9fb0e3bc052e3c0d6bff5cf9&stat=instructions
* this does regress compile-time by +0.5% geomean (unsurprizingly)
* size impact varies; for ThinLTO it's actually an improvement
The largest fallout appears to be in GVN's load partial redundancy
elimination, it spends *much* more time in
`MemoryDependenceResults::getNonLocalPointerDependency()`.
Non-local `MemoryDependenceResults` is widely-known to be, uh, costly.
There does not appear to be a proper solution to this issue,
other than silencing the compile-time performance regression
by tuning cut-off thresholds in `MemoryDependenceResults`,
at the cost of potentially regressing run-time performance.
D84609 attempts to move in that direction, but the path is unclear
and is going to take some time.
If we look at stats before/after diffs, some excerpts:
* RawSpeed (the target) {F12360200}
* -14 (-73.68%) loops not rotated due to the header size (yay)
* -272 (-0.67%) `"Number of live out of a loop variables"` - good for vectorizer
* -3937 (-64.19%) common instructions hoisted
* +561 (+0.06%) x86 asm instructions
* -2 basic blocks
* +2418 (+0.11%) IR instructions
* vanilla test-suite + RawSpeed + darktable {F12360201}
* -36396 (-65.29%) common instructions hoisted
* +1676 (+0.02%) x86 asm instructions
* +662 (+0.06%) basic blocks
* +4395 (+0.04%) IR instructions
It is likely to be sub-optimal for when optimizing for code size,
so one might want to change tune pipeline by enabling sinking/hoisting
when optimizing for size.
Reviewed By: mkazantsev
Differential Revision: https://reviews.llvm.org/D84108
This reverts commit 503deec2183d466dad64b763bab4e15fd8804239.
2020-09-08 04:54:06 +08:00
|
|
|
SimplifyCFGOptions().hoistCommonInsts(true).sinkCommonInsts(true),
|
|
|
|
[this](const Function &F) {
|
2017-10-29 02:43:07 +08:00
|
|
|
const auto &ST = this->TM->getSubtarget<ARMSubtarget>(F);
|
|
|
|
return ST.hasAnyDataBarrier() && !ST.isThumb1Only();
|
|
|
|
}));
|
2014-05-30 18:09:59 +08:00
|
|
|
|
2020-01-08 21:08:27 +08:00
|
|
|
addPass(createMVEGatherScatterLoweringPass());
|
2021-03-29 02:34:58 +08:00
|
|
|
addPass(createMVELaneInterleavingPass());
|
2020-01-08 21:08:27 +08:00
|
|
|
|
2014-05-30 18:09:59 +08:00
|
|
|
TargetPassConfig::addIRPasses();
|
[ARM] Lower interleaved memory accesses to vldN/vstN intrinsics.
This patch also adds a function to calculate the cost of interleaved memory accesses.
E.g. Lower an interleaved load:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4
%v0 = shuffle %wide.vec, undef, <0, 2, 4, 6>
%v1 = shuffle %wide.vec, undef, <1, 3, 5, 7>
into:
%vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4)
%vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0
%vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1
E.g. Lower an interleaved store:
%i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4
into:
%sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
%sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
%sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
Differential Revision: http://reviews.llvm.org/D10533
llvm-svn: 240755
2015-06-26 10:45:36 +08:00
|
|
|
|
2019-03-15 21:36:37 +08:00
|
|
|
// Run the parallel DSP pass.
|
|
|
|
if (getOptLevel() == CodeGenOpt::Aggressive)
|
2019-03-14 18:57:40 +08:00
|
|
|
addPass(createARMParallelDSPPass());
|
|
|
|
|
[ARM] Lower interleaved memory accesses to vldN/vstN intrinsics.
This patch also adds a function to calculate the cost of interleaved memory accesses.
E.g. Lower an interleaved load:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4
%v0 = shuffle %wide.vec, undef, <0, 2, 4, 6>
%v1 = shuffle %wide.vec, undef, <1, 3, 5, 7>
into:
%vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4)
%vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0
%vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1
E.g. Lower an interleaved store:
%i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4
into:
%sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
%sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
%sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
Differential Revision: http://reviews.llvm.org/D10533
llvm-svn: 240755
2015-06-26 10:45:36 +08:00
|
|
|
// Match interleaved memory accesses to ldN/stN intrinsics.
|
|
|
|
if (TM->getOptLevel() != CodeGenOpt::None)
|
2017-05-19 01:21:13 +08:00
|
|
|
addPass(createInterleavedAccessPass());
|
Add Windows Control Flow Guard checks (/guard:cf).
Summary:
A new function pass (Transforms/CFGuard/CFGuard.cpp) inserts CFGuard checks on
indirect function calls, using either the check mechanism (X86, ARM, AArch64) or
or the dispatch mechanism (X86-64). The check mechanism requires a new calling
convention for the supported targets. The dispatch mechanism adds the target as
an operand bundle, which is processed by SelectionDAG. Another pass
(CodeGen/CFGuardLongjmp.cpp) identifies and emits valid longjmp targets, as
required by /guard:cf. This feature is enabled using the `cfguard` CC1 option.
Reviewers: thakis, rnk, theraven, pcc
Subscribers: ychen, hans, metalcanine, dmajor, tomrittervg, alex, mehdi_amini, mgorny, javed.absar, kristof.beyls, hiraditya, steven_wu, dexonsmith, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D65761
2019-10-28 21:22:19 +08:00
|
|
|
|
|
|
|
// Add Control Flow Guard checks.
|
|
|
|
if (TM->getTargetTriple().isOSWindows())
|
|
|
|
addPass(createCFGuardCheckPass());
|
2014-05-30 18:09:59 +08:00
|
|
|
}
|
|
|
|
|
2018-07-23 20:27:47 +08:00
|
|
|
void ARMPassConfig::addCodeGenPrepare() {
|
|
|
|
if (getOptLevel() != CodeGenOpt::None)
|
2019-12-03 19:00:32 +08:00
|
|
|
addPass(createTypePromotionPass());
|
2018-07-23 20:27:47 +08:00
|
|
|
TargetPassConfig::addCodeGenPrepare();
|
|
|
|
}
|
|
|
|
|
2014-05-30 18:09:59 +08:00
|
|
|
bool ARMPassConfig::addPreISel() {
|
2015-06-05 04:39:23 +08:00
|
|
|
if ((TM->getOptLevel() != CodeGenOpt::None &&
|
2015-04-11 08:06:36 +08:00
|
|
|
EnableGlobalMerge == cl::BOU_UNSET) ||
|
2015-06-05 04:39:23 +08:00
|
|
|
EnableGlobalMerge == cl::BOU_TRUE) {
|
2015-02-24 03:28:45 +08:00
|
|
|
// FIXME: This is using the thumb1 only constant value for
|
|
|
|
// maximal global offset for merging globals. We may want
|
|
|
|
// to look into using the old value for non-thumb1 code of
|
|
|
|
// 4095 based on the TargetMachine, but this starts to become
|
|
|
|
// tricky when doing code gen per function.
|
2015-06-05 04:39:23 +08:00
|
|
|
bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) &&
|
|
|
|
(EnableGlobalMerge == cl::BOU_UNSET);
|
2015-08-03 20:13:33 +08:00
|
|
|
// Merging of extern globals is enabled by default on non-Mach-O as we
|
|
|
|
// expect it to be generally either beneficial or harmless. On Mach-O it
|
|
|
|
// is disabled as we emit the .subsections_via_symbols directive which
|
|
|
|
// means that merging extern globals is not safe.
|
|
|
|
bool MergeExternalByDefault = !TM->getTargetTriple().isOSBinFormatMachO();
|
|
|
|
addPass(createGlobalMergePass(TM, 127, OnlyOptimizeForSize,
|
|
|
|
MergeExternalByDefault));
|
2015-06-05 04:39:23 +08:00
|
|
|
}
|
2006-05-15 06:18:28 +08:00
|
|
|
|
[ARM] MVE Tail Predication
The MVE and LOB extensions of Armv8.1m can be combined to enable
'tail predication' which removes the need for a scalar remainder
loop after vectorization. Lane predication is performed implicitly
via a system register. The effects of predication is described in
Section B5.6.3 of the Armv8.1-m Arch Reference Manual, the key points
being:
- For vector operations that perform reduction across the vector and
produce a scalar result, whether the value is accumulated or not.
- For non-load instructions, the predicate flags determine if the
destination register byte is updated with the new value or if the
previous value is preserved.
- For vector store instructions, whether the store occurs or not.
- For vector load instructions, whether the value that is loaded or
whether zeros are written to that element of the destination
register.
This patch implements a pass that takes a hardware loop, containing
masked vector instructions, and converts it something that resembles
an MVE tail predicated loop. Currently, if we had code generation,
we'd generate a loop in which the VCTP would generate the predicate
and VPST would then setup the value of VPR.PO. The loads and stores
would be placed in VPT blocks so this is not tail predication, but
normal VPT predication with the predicate based upon a element
counting induction variable. Further work needs to be done to finally
produce a true tail predicated loop.
Because only the loads and stores are predicated, in both the LLVM IR
and MIR level, we will restrict support to only lane-wise operations
(no horizontal reductions). We will perform a final check on MIR
during loop finalisation too.
Another restriction, specific to MVE, is that all the vector
instructions need operate on the same number of elements. This is
because predication is performed at the byte level and this is set
on entry to the loop, or by the VCTP instead.
Differential Revision: https://reviews.llvm.org/D65884
llvm-svn: 371179
2019-09-06 16:24:41 +08:00
|
|
|
if (TM->getOptLevel() != CodeGenOpt::None) {
|
2019-06-25 18:45:51 +08:00
|
|
|
addPass(createHardwareLoopsPass());
|
[ARM] MVE Tail Predication
The MVE and LOB extensions of Armv8.1m can be combined to enable
'tail predication' which removes the need for a scalar remainder
loop after vectorization. Lane predication is performed implicitly
via a system register. The effects of predication is described in
Section B5.6.3 of the Armv8.1-m Arch Reference Manual, the key points
being:
- For vector operations that perform reduction across the vector and
produce a scalar result, whether the value is accumulated or not.
- For non-load instructions, the predicate flags determine if the
destination register byte is updated with the new value or if the
previous value is preserved.
- For vector store instructions, whether the store occurs or not.
- For vector load instructions, whether the value that is loaded or
whether zeros are written to that element of the destination
register.
This patch implements a pass that takes a hardware loop, containing
masked vector instructions, and converts it something that resembles
an MVE tail predicated loop. Currently, if we had code generation,
we'd generate a loop in which the VCTP would generate the predicate
and VPST would then setup the value of VPR.PO. The loads and stores
would be placed in VPT blocks so this is not tail predication, but
normal VPT predication with the predicate based upon a element
counting induction variable. Further work needs to be done to finally
produce a true tail predicated loop.
Because only the loads and stores are predicated, in both the LLVM IR
and MIR level, we will restrict support to only lane-wise operations
(no horizontal reductions). We will perform a final check on MIR
during loop finalisation too.
Another restriction, specific to MVE, is that all the vector
instructions need operate on the same number of elements. This is
because predication is performed at the byte level and this is set
on entry to the loop, or by the VCTP instead.
Differential Revision: https://reviews.llvm.org/D65884
llvm-svn: 371179
2019-09-06 16:24:41 +08:00
|
|
|
addPass(createMVETailPredicationPass());
|
2021-04-01 14:12:36 +08:00
|
|
|
// FIXME: IR passes can delete address-taken basic blocks, deleting
|
|
|
|
// corresponding blockaddresses. ARMConstantPoolConstant holds references to
|
|
|
|
// address-taken basic blocks which can be invalidated if the function
|
|
|
|
// containing the blockaddress has already been codegen'd and the basic
|
|
|
|
// block is removed. Work around this by forcing all IR passes to run before
|
|
|
|
// any ISel takes place. We should have a more principled way of handling
|
|
|
|
// this. See D99707 for more details.
|
|
|
|
addPass(createBarrierNoopPass());
|
[ARM] MVE Tail Predication
The MVE and LOB extensions of Armv8.1m can be combined to enable
'tail predication' which removes the need for a scalar remainder
loop after vectorization. Lane predication is performed implicitly
via a system register. The effects of predication is described in
Section B5.6.3 of the Armv8.1-m Arch Reference Manual, the key points
being:
- For vector operations that perform reduction across the vector and
produce a scalar result, whether the value is accumulated or not.
- For non-load instructions, the predicate flags determine if the
destination register byte is updated with the new value or if the
previous value is preserved.
- For vector store instructions, whether the store occurs or not.
- For vector load instructions, whether the value that is loaded or
whether zeros are written to that element of the destination
register.
This patch implements a pass that takes a hardware loop, containing
masked vector instructions, and converts it something that resembles
an MVE tail predicated loop. Currently, if we had code generation,
we'd generate a loop in which the VCTP would generate the predicate
and VPST would then setup the value of VPR.PO. The loads and stores
would be placed in VPT blocks so this is not tail predication, but
normal VPT predication with the predicate based upon a element
counting induction variable. Further work needs to be done to finally
produce a true tail predicated loop.
Because only the loads and stores are predicated, in both the LLVM IR
and MIR level, we will restrict support to only lane-wise operations
(no horizontal reductions). We will perform a final check on MIR
during loop finalisation too.
Another restriction, specific to MVE, is that all the vector
instructions need operate on the same number of elements. This is
because predication is performed at the byte level and this is set
on entry to the loop, or by the VCTP instead.
Differential Revision: https://reviews.llvm.org/D65884
llvm-svn: 371179
2019-09-06 16:24:41 +08:00
|
|
|
}
|
2019-06-25 18:45:51 +08:00
|
|
|
|
2010-07-25 05:52:08 +08:00
|
|
|
return false;
|
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2012-02-03 13:12:41 +08:00
|
|
|
bool ARMPassConfig::addInstSelector() {
|
2012-07-03 03:48:31 +08:00
|
|
|
addPass(createARMISelDag(getARMTargetMachine(), getOptLevel()));
|
2006-09-04 12:14:57 +08:00
|
|
|
return false;
|
|
|
|
}
|
2006-09-19 23:49:25 +08:00
|
|
|
|
2016-11-11 16:27:37 +08:00
|
|
|
bool ARMPassConfig::addIRTranslator() {
|
2020-08-29 07:21:34 +08:00
|
|
|
addPass(new IRTranslator(getOptLevel()));
|
2016-11-11 16:27:37 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ARMPassConfig::addLegalizeMachineIR() {
|
|
|
|
addPass(new Legalizer());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ARMPassConfig::addRegBankSelect() {
|
|
|
|
addPass(new RegBankSelect());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ARMPassConfig::addGlobalInstructionSelect() {
|
2021-02-25 14:45:25 +08:00
|
|
|
addPass(new InstructionSelect(getOptLevel()));
|
2016-11-11 16:27:37 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
void ARMPassConfig::addPreRegAlloc() {
|
2015-03-27 02:38:04 +08:00
|
|
|
if (getOptLevel() != CodeGenOpt::None) {
|
2021-03-02 05:57:19 +08:00
|
|
|
addPass(createMVETPAndVPTOptimisationsPass());
|
2020-03-18 23:23:17 +08:00
|
|
|
|
2014-12-12 07:18:03 +08:00
|
|
|
addPass(createMLxExpansionPass());
|
2015-03-27 02:38:04 +08:00
|
|
|
|
|
|
|
if (EnableARMLoadStoreOpt)
|
|
|
|
addPass(createARMLoadStoreOptimizationPass(/* pre-register alloc */ true));
|
|
|
|
|
|
|
|
if (!DisableA15SDOptimization)
|
|
|
|
addPass(createA15SDOptimizerPass());
|
2013-03-16 02:28:25 +08:00
|
|
|
}
|
2009-06-13 17:12:55 +08:00
|
|
|
}
|
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
void ARMPassConfig::addPreSched2() {
|
2011-11-16 16:38:26 +08:00
|
|
|
if (getOptLevel() != CodeGenOpt::None) {
|
2015-03-27 02:38:04 +08:00
|
|
|
if (EnableARMLoadStoreOpt)
|
|
|
|
addPass(createARMLoadStoreOptimizationPass());
|
|
|
|
|
2018-01-22 18:05:23 +08:00
|
|
|
addPass(new ARMExecutionDomainFix());
|
2018-01-22 18:06:50 +08:00
|
|
|
addPass(createBreakFalseDeps());
|
2010-11-12 04:50:14 +08:00
|
|
|
}
|
2009-09-30 16:53:01 +08:00
|
|
|
|
2009-11-07 07:52:48 +08:00
|
|
|
// Expand some pseudo instructions into multiple instructions to allow
|
|
|
|
// proper scheduling.
|
2014-12-12 07:18:03 +08:00
|
|
|
addPass(createARMExpandPseudoPass());
|
2009-11-07 07:52:48 +08:00
|
|
|
|
2011-11-16 16:38:26 +08:00
|
|
|
if (getOptLevel() != CodeGenOpt::None) {
|
2020-06-18 17:25:24 +08:00
|
|
|
// When optimising for size, always run the Thumb2SizeReduction pass before
|
|
|
|
// IfConversion. Otherwise, check whether IT blocks are restricted
|
|
|
|
// (e.g. in v8, IfConversion depends on Thumb instruction widths)
|
2015-06-09 02:50:43 +08:00
|
|
|
addPass(createThumb2SizeReductionPass([this](const Function &F) {
|
2020-06-18 17:25:24 +08:00
|
|
|
return this->TM->getSubtarget<ARMSubtarget>(F).hasMinSize() ||
|
|
|
|
this->TM->getSubtarget<ARMSubtarget>(F).restrictIT();
|
2015-06-09 02:50:43 +08:00
|
|
|
}));
|
|
|
|
|
2016-10-25 07:23:02 +08:00
|
|
|
addPass(createIfConverter([](const MachineFunction &MF) {
|
|
|
|
return !MF.getSubtarget<ARMSubtarget>().isThumb1Only();
|
2015-06-09 02:50:43 +08:00
|
|
|
}));
|
2015-03-27 02:38:04 +08:00
|
|
|
}
|
2019-06-14 19:46:05 +08:00
|
|
|
addPass(createMVEVPTBlockPass());
|
2015-03-05 08:23:40 +08:00
|
|
|
addPass(createThumb2ITBlockPass());
|
2019-11-05 17:10:58 +08:00
|
|
|
|
|
|
|
// Add both scheduling passes to give the subtarget an opportunity to pick
|
|
|
|
// between them.
|
|
|
|
if (getOptLevel() != CodeGenOpt::None) {
|
|
|
|
addPass(&PostMachineSchedulerID);
|
|
|
|
addPass(&PostRASchedulerID);
|
|
|
|
}
|
[ARM] Implement harden-sls-retbr for ARM mode
Some processors may speculatively execute the instructions immediately
following indirect control flow, such as returns, indirect jumps and
indirect function calls.
To avoid a potential miss-speculatively executed gadget after these
instructions leaking secrets through side channels, this pass places a
speculation barrier immediately after every indirect control flow where
control flow doesn't return to the next instruction, such as returns and
indirect jumps, but not indirect function calls.
Hardening of indirect function calls will be done in a later,
independent patch.
This patch is implementing the same functionality as the AArch64 counter
part implemented in https://reviews.llvm.org/D81400.
For AArch64, returns and indirect jumps only occur on RET and BR
instructions and hence the function attribute to control the hardening
is called "harden-sls-retbr" there. On AArch32, there is a much wider
variety of instructions that can trigger an indirect unconditional
control flow change. I've decided to stick with the name
"harden-sls-retbr" as introduced for the corresponding AArch64
mitigation.
This patch implements this for ARM mode. A future patch will extend this
to also support Thumb mode.
The inserted barriers are never on the correct, architectural execution
path, and therefore performance overhead of this is expected to be low.
To ensure these barriers are never on an architecturally executed path,
when the harden-sls-retbr function attribute is present, indirect
control flow is never conditionalized/predicated.
On targets that implement that Armv8.0-SB Speculation Barrier extension,
a single SB instruction is emitted that acts as a speculation barrier.
On other targets, a DSB SYS followed by a ISB is emitted to act as a
speculation barrier.
These speculation barriers are implemented as pseudo instructions to
avoid later passes to analyze them and potentially remove them.
The mitigation is off by default and can be enabled by the
harden-sls-retbr subtarget feature.
Differential Revision: https://reviews.llvm.org/D92395
2020-10-29 05:04:11 +08:00
|
|
|
|
2020-11-21 00:11:17 +08:00
|
|
|
addPass(createARMIndirectThunks());
|
[ARM] Implement harden-sls-retbr for ARM mode
Some processors may speculatively execute the instructions immediately
following indirect control flow, such as returns, indirect jumps and
indirect function calls.
To avoid a potential miss-speculatively executed gadget after these
instructions leaking secrets through side channels, this pass places a
speculation barrier immediately after every indirect control flow where
control flow doesn't return to the next instruction, such as returns and
indirect jumps, but not indirect function calls.
Hardening of indirect function calls will be done in a later,
independent patch.
This patch is implementing the same functionality as the AArch64 counter
part implemented in https://reviews.llvm.org/D81400.
For AArch64, returns and indirect jumps only occur on RET and BR
instructions and hence the function attribute to control the hardening
is called "harden-sls-retbr" there. On AArch32, there is a much wider
variety of instructions that can trigger an indirect unconditional
control flow change. I've decided to stick with the name
"harden-sls-retbr" as introduced for the corresponding AArch64
mitigation.
This patch implements this for ARM mode. A future patch will extend this
to also support Thumb mode.
The inserted barriers are never on the correct, architectural execution
path, and therefore performance overhead of this is expected to be low.
To ensure these barriers are never on an architecturally executed path,
when the harden-sls-retbr function attribute is present, indirect
control flow is never conditionalized/predicated.
On targets that implement that Armv8.0-SB Speculation Barrier extension,
a single SB instruction is emitted that acts as a speculation barrier.
On other targets, a DSB SYS followed by a ISB is emitted to act as a
speculation barrier.
These speculation barriers are implemented as pseudo instructions to
avoid later passes to analyze them and potentially remove them.
The mitigation is off by default and can be enabled by the
harden-sls-retbr subtarget feature.
Differential Revision: https://reviews.llvm.org/D92395
2020-10-29 05:04:11 +08:00
|
|
|
addPass(createARMSLSHardeningPass());
|
2009-09-30 16:53:01 +08:00
|
|
|
}
|
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
void ARMPassConfig::addPreEmitPass() {
|
2015-03-05 08:23:40 +08:00
|
|
|
addPass(createThumb2SizeReductionPass());
|
2011-12-14 10:11:42 +08:00
|
|
|
|
2015-03-05 08:23:40 +08:00
|
|
|
// Constant island pass work on unbundled instructions.
|
2016-10-25 07:23:02 +08:00
|
|
|
addPass(createUnpackMachineBundles([](const MachineFunction &MF) {
|
|
|
|
return MF.getSubtarget<ARMSubtarget>().isThumb2();
|
2015-06-09 02:50:43 +08:00
|
|
|
}));
|
2009-07-10 09:54:42 +08:00
|
|
|
|
2021-01-15 19:42:44 +08:00
|
|
|
// Don't optimize barriers or block placement at -O0.
|
|
|
|
if (getOptLevel() != CodeGenOpt::None) {
|
|
|
|
addPass(createARMBlockPlacementPass());
|
2015-05-21 05:40:38 +08:00
|
|
|
addPass(createARMOptimizeBarriersPass());
|
2021-01-15 19:42:44 +08:00
|
|
|
}
|
2020-03-25 23:40:30 +08:00
|
|
|
}
|
2015-05-21 05:40:38 +08:00
|
|
|
|
2020-03-25 23:40:30 +08:00
|
|
|
void ARMPassConfig::addPreEmitPass2() {
|
2012-07-03 03:48:31 +08:00
|
|
|
addPass(createARMConstantIslandPass());
|
2019-06-25 18:45:51 +08:00
|
|
|
addPass(createARMLowOverheadLoopsPass());
|
Add Windows Control Flow Guard checks (/guard:cf).
Summary:
A new function pass (Transforms/CFGuard/CFGuard.cpp) inserts CFGuard checks on
indirect function calls, using either the check mechanism (X86, ARM, AArch64) or
or the dispatch mechanism (X86-64). The check mechanism requires a new calling
convention for the supported targets. The dispatch mechanism adds the target as
an operand bundle, which is processed by SelectionDAG. Another pass
(CodeGen/CFGuardLongjmp.cpp) identifies and emits valid longjmp targets, as
required by /guard:cf. This feature is enabled using the `cfguard` CC1 option.
Reviewers: thakis, rnk, theraven, pcc
Subscribers: ychen, hans, metalcanine, dmajor, tomrittervg, alex, mehdi_amini, mgorny, javed.absar, kristof.beyls, hiraditya, steven_wu, dexonsmith, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D65761
2019-10-28 21:22:19 +08:00
|
|
|
|
2021-02-15 09:23:40 +08:00
|
|
|
if (TM->getTargetTriple().isOSWindows()) {
|
|
|
|
// Identify valid longjmp targets for Windows Control Flow Guard.
|
Add Windows Control Flow Guard checks (/guard:cf).
Summary:
A new function pass (Transforms/CFGuard/CFGuard.cpp) inserts CFGuard checks on
indirect function calls, using either the check mechanism (X86, ARM, AArch64) or
or the dispatch mechanism (X86-64). The check mechanism requires a new calling
convention for the supported targets. The dispatch mechanism adds the target as
an operand bundle, which is processed by SelectionDAG. Another pass
(CodeGen/CFGuardLongjmp.cpp) identifies and emits valid longjmp targets, as
required by /guard:cf. This feature is enabled using the `cfguard` CC1 option.
Reviewers: thakis, rnk, theraven, pcc
Subscribers: ychen, hans, metalcanine, dmajor, tomrittervg, alex, mehdi_amini, mgorny, javed.absar, kristof.beyls, hiraditya, steven_wu, dexonsmith, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D65761
2019-10-28 21:22:19 +08:00
|
|
|
addPass(createCFGuardLongjmpPass());
|
2021-02-15 09:23:40 +08:00
|
|
|
// Identify valid eh continuation targets for Windows EHCont Guard.
|
|
|
|
addPass(createEHContGuardCatchretPass());
|
|
|
|
}
|
2006-09-19 23:49:25 +08:00
|
|
|
}
|