2014-05-24 20:50:23 +08:00
|
|
|
//===-- AArch64TargetMachine.cpp - Define TargetMachine for AArch64 -------===//
|
2014-03-29 18:18:08 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2017-05-27 09:34:07 +08:00
|
|
|
#include "AArch64TargetMachine.h"
|
2014-05-24 20:50:23 +08:00
|
|
|
#include "AArch64.h"
|
2017-02-01 10:54:34 +08:00
|
|
|
#include "AArch64MacroFusion.h"
|
2017-01-06 08:30:53 +08:00
|
|
|
#include "AArch64Subtarget.h"
|
2014-11-13 17:26:31 +08:00
|
|
|
#include "AArch64TargetObjectFile.h"
|
2015-01-31 19:17:59 +08:00
|
|
|
#include "AArch64TargetTransformInfo.h"
|
2017-01-06 08:30:53 +08:00
|
|
|
#include "MCTargetDesc/AArch64MCTargetDesc.h"
|
|
|
|
#include "llvm/ADT/STLExtras.h"
|
|
|
|
#include "llvm/ADT/Triple.h"
|
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
2016-04-08 05:24:40 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
|
2016-07-27 22:31:55 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
|
2016-10-15 06:18:18 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/Legalizer.h"
|
2017-05-27 09:34:07 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/Localizer.h"
|
2016-04-08 04:27:33 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
|
2016-11-29 04:11:54 +08:00
|
|
|
#include "llvm/CodeGen/MachineScheduler.h"
|
2014-03-29 18:18:08 +08:00
|
|
|
#include "llvm/CodeGen/Passes.h"
|
2016-05-10 11:21:59 +08:00
|
|
|
#include "llvm/CodeGen/TargetPassConfig.h"
|
2017-01-06 08:30:53 +08:00
|
|
|
#include "llvm/IR/Attributes.h"
|
2014-10-06 14:45:36 +08:00
|
|
|
#include "llvm/IR/Function.h"
|
2017-01-06 08:30:53 +08:00
|
|
|
#include "llvm/MC/MCTargetOptions.h"
|
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/CodeGen.h"
|
2014-03-29 18:18:08 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Support/TargetRegistry.h"
|
2018-03-24 07:58:19 +08:00
|
|
|
#include "llvm/Target/TargetLoweringObjectFile.h"
|
2014-03-29 18:18:08 +08:00
|
|
|
#include "llvm/Target/TargetOptions.h"
|
|
|
|
#include "llvm/Transforms/Scalar.h"
|
2017-01-06 08:30:53 +08:00
|
|
|
#include <memory>
|
|
|
|
#include <string>
|
|
|
|
|
2014-03-29 18:18:08 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2016-08-01 13:56:57 +08:00
|
|
|
static cl::opt<bool> EnableCCMP("aarch64-enable-ccmp",
|
|
|
|
cl::desc("Enable the CCMP formation pass"),
|
|
|
|
cl::init(true), cl::Hidden);
|
2014-05-08 00:41:55 +08:00
|
|
|
|
[AArch64] Prefer Bcc to CBZ/CBNZ/TBZ/TBNZ when NZCV flags can be set for "free".
This patch contains a pass that transforms CBZ/CBNZ/TBZ/TBNZ instructions into a
conditional branch (Bcc), when the NZCV flags can be set for "free". This is
preferred on targets that have more flexibility when scheduling Bcc
instructions as compared to CBZ/CBNZ/TBZ/TBNZ (assuming all other variables are
equal). This can reduce register pressure and is also the default behavior for
GCC.
A few examples:
add w8, w0, w1 -> cmn w0, w1 ; CMN is an alias of ADDS.
cbz w8, .LBB_2 -> b.eq .LBB0_2 ; single def/use of w8 removed.
add w8, w0, w1 -> adds w8, w0, w1 ; w8 has multiple uses.
cbz w8, .LBB1_2 -> b.eq .LBB1_2
sub w8, w0, w1 -> subs w8, w0, w1 ; w8 has multiple uses.
tbz w8, #31, .LBB6_2 -> b.ge .LBB6_2
In looking at all current sub-target machine descriptions, this transformation
appears to be either positive or neutral.
Differential Revision: https://reviews.llvm.org/D34220.
llvm-svn: 306144
2017-06-24 03:20:12 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
EnableCondBrTuning("aarch64-enable-cond-br-tune",
|
|
|
|
cl::desc("Enable the conditional branch tuning pass"),
|
|
|
|
cl::init(true), cl::Hidden);
|
|
|
|
|
2016-08-01 13:56:57 +08:00
|
|
|
static cl::opt<bool> EnableMCR("aarch64-enable-mcr",
|
2014-08-08 05:40:58 +08:00
|
|
|
cl::desc("Enable the machine combiner pass"),
|
|
|
|
cl::init(true), cl::Hidden);
|
|
|
|
|
2016-08-01 13:56:57 +08:00
|
|
|
static cl::opt<bool> EnableStPairSuppress("aarch64-enable-stp-suppress",
|
|
|
|
cl::desc("Suppress STP for AArch64"),
|
|
|
|
cl::init(true), cl::Hidden);
|
2014-03-29 18:18:08 +08:00
|
|
|
|
2016-08-01 13:56:57 +08:00
|
|
|
static cl::opt<bool> EnableAdvSIMDScalar(
|
|
|
|
"aarch64-enable-simd-scalar",
|
|
|
|
cl::desc("Enable use of AdvSIMD scalar integer instructions"),
|
|
|
|
cl::init(false), cl::Hidden);
|
2014-03-29 18:18:08 +08:00
|
|
|
|
2014-04-15 05:05:02 +08:00
|
|
|
static cl::opt<bool>
|
2016-08-01 13:56:57 +08:00
|
|
|
EnablePromoteConstant("aarch64-enable-promote-const",
|
|
|
|
cl::desc("Enable the promote constant pass"),
|
|
|
|
cl::init(true), cl::Hidden);
|
2014-04-15 05:05:02 +08:00
|
|
|
|
2016-08-01 13:56:57 +08:00
|
|
|
static cl::opt<bool> EnableCollectLOH(
|
|
|
|
"aarch64-enable-collect-loh",
|
|
|
|
cl::desc("Enable the pass that emits the linker optimization hints (LOH)"),
|
|
|
|
cl::init(true), cl::Hidden);
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
|
2014-05-08 00:41:55 +08:00
|
|
|
static cl::opt<bool>
|
2016-08-01 13:56:57 +08:00
|
|
|
EnableDeadRegisterElimination("aarch64-enable-dead-defs", cl::Hidden,
|
|
|
|
cl::desc("Enable the pass that removes dead"
|
|
|
|
" definitons and replaces stores to"
|
|
|
|
" them with stores to the zero"
|
|
|
|
" register"),
|
|
|
|
cl::init(true));
|
|
|
|
|
|
|
|
static cl::opt<bool> EnableRedundantCopyElimination(
|
|
|
|
"aarch64-enable-copyelim",
|
|
|
|
cl::desc("Enable the redundant copy elimination pass"), cl::init(true),
|
|
|
|
cl::Hidden);
|
|
|
|
|
|
|
|
static cl::opt<bool> EnableLoadStoreOpt("aarch64-enable-ldst-opt",
|
|
|
|
cl::desc("Enable the load/store pair"
|
|
|
|
" optimization pass"),
|
|
|
|
cl::init(true), cl::Hidden);
|
|
|
|
|
|
|
|
static cl::opt<bool> EnableAtomicTidy(
|
|
|
|
"aarch64-enable-atomic-cfg-tidy", cl::Hidden,
|
|
|
|
cl::desc("Run SimplifyCFG after expanding atomic operations"
|
|
|
|
" to make use of cmpxchg flow-based information"),
|
|
|
|
cl::init(true));
|
2014-05-30 18:09:59 +08:00
|
|
|
|
2014-08-06 21:31:32 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden,
|
|
|
|
cl::desc("Run early if-conversion"),
|
|
|
|
cl::init(true));
|
|
|
|
|
2014-09-05 10:55:24 +08:00
|
|
|
static cl::opt<bool>
|
2016-08-01 13:56:57 +08:00
|
|
|
EnableCondOpt("aarch64-enable-condopt",
|
|
|
|
cl::desc("Enable the condition optimizer pass"),
|
|
|
|
cl::init(true), cl::Hidden);
|
2014-09-05 10:55:24 +08:00
|
|
|
|
2014-10-13 18:12:35 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden,
|
|
|
|
cl::desc("Work around Cortex-A53 erratum 835769"),
|
|
|
|
cl::init(false));
|
|
|
|
|
2016-08-01 13:56:57 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
EnableGEPOpt("aarch64-enable-gep-opt", cl::Hidden,
|
|
|
|
cl::desc("Enable optimizations on complex GEPs"),
|
|
|
|
cl::init(false));
|
|
|
|
|
|
|
|
static cl::opt<bool>
|
|
|
|
BranchRelaxation("aarch64-enable-branch-relax", cl::Hidden, cl::init(true),
|
|
|
|
cl::desc("Relax out of range conditional branches"));
|
2014-11-19 14:39:53 +08:00
|
|
|
|
2015-04-11 08:06:36 +08:00
|
|
|
// FIXME: Unify control over GlobalMerge.
|
|
|
|
static cl::opt<cl::boolOrDefault>
|
2016-08-01 13:56:57 +08:00
|
|
|
EnableGlobalMerge("aarch64-enable-global-merge", cl::Hidden,
|
|
|
|
cl::desc("Enable the global merge pass"));
|
2015-04-11 08:06:36 +08:00
|
|
|
|
2016-03-18 08:27:29 +08:00
|
|
|
static cl::opt<bool>
|
2016-08-01 13:56:57 +08:00
|
|
|
EnableLoopDataPrefetch("aarch64-enable-loop-data-prefetch", cl::Hidden,
|
2016-03-18 08:27:29 +08:00
|
|
|
cl::desc("Enable the loop data prefetch pass"),
|
2016-03-30 08:21:29 +08:00
|
|
|
cl::init(true));
|
2016-03-18 08:27:29 +08:00
|
|
|
|
2017-03-02 07:33:08 +08:00
|
|
|
static cl::opt<int> EnableGlobalISelAtO(
|
|
|
|
"aarch64-enable-global-isel-at-O", cl::Hidden,
|
|
|
|
cl::desc("Enable GlobalISel at or below an opt level (-1 to disable)"),
|
2018-01-03 00:30:47 +08:00
|
|
|
cl::init(0));
|
2017-03-02 07:33:08 +08:00
|
|
|
|
2017-07-15 05:44:12 +08:00
|
|
|
static cl::opt<bool> EnableFalkorHWPFFix("aarch64-enable-falkor-hwpf-fix",
|
|
|
|
cl::init(true), cl::Hidden);
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
extern "C" void LLVMInitializeAArch64Target() {
|
2014-03-29 18:18:08 +08:00
|
|
|
// Register the target.
|
2016-10-10 07:00:34 +08:00
|
|
|
RegisterTargetMachine<AArch64leTargetMachine> X(getTheAArch64leTarget());
|
|
|
|
RegisterTargetMachine<AArch64beTargetMachine> Y(getTheAArch64beTarget());
|
|
|
|
RegisterTargetMachine<AArch64leTargetMachine> Z(getTheARM64Target());
|
2016-04-02 07:14:52 +08:00
|
|
|
auto PR = PassRegistry::getPassRegistry();
|
|
|
|
initializeGlobalISel(*PR);
|
2016-08-01 13:56:57 +08:00
|
|
|
initializeAArch64A53Fix835769Pass(*PR);
|
|
|
|
initializeAArch64A57FPLoadBalancingPass(*PR);
|
|
|
|
initializeAArch64AdvSIMDScalarPass(*PR);
|
|
|
|
initializeAArch64CollectLOHPass(*PR);
|
|
|
|
initializeAArch64ConditionalComparesPass(*PR);
|
|
|
|
initializeAArch64ConditionOptimizerPass(*PR);
|
|
|
|
initializeAArch64DeadRegisterDefinitionsPass(*PR);
|
2016-04-02 07:14:52 +08:00
|
|
|
initializeAArch64ExpandPseudoPass(*PR);
|
2016-07-21 05:45:58 +08:00
|
|
|
initializeAArch64LoadStoreOptPass(*PR);
|
2017-12-08 08:58:49 +08:00
|
|
|
initializeAArch64SIMDInstrOptPass(*PR);
|
2016-08-01 13:56:57 +08:00
|
|
|
initializeAArch64PromoteConstantPass(*PR);
|
|
|
|
initializeAArch64RedundantCopyEliminationPass(*PR);
|
|
|
|
initializeAArch64StorePairSuppressPass(*PR);
|
2017-07-19 00:14:22 +08:00
|
|
|
initializeFalkorHWPFFixPass(*PR);
|
2017-07-15 05:44:12 +08:00
|
|
|
initializeFalkorMarkStridedAccessesLegacyPass(*PR);
|
2016-08-01 13:56:57 +08:00
|
|
|
initializeLDTLSCleanupPass(*PR);
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
2014-11-13 17:26:31 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AArch64 Lowering public interface.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
|
|
|
|
if (TT.isOSBinFormatMachO())
|
2017-01-06 08:30:53 +08:00
|
|
|
return llvm::make_unique<AArch64_MachoTargetObjectFile>();
|
2017-06-28 07:58:19 +08:00
|
|
|
if (TT.isOSBinFormatCOFF())
|
|
|
|
return llvm::make_unique<AArch64_COFFTargetObjectFile>();
|
2014-11-13 17:26:31 +08:00
|
|
|
|
2017-01-06 08:30:53 +08:00
|
|
|
return llvm::make_unique<AArch64_ELFTargetObjectFile>();
|
2014-11-13 17:26:31 +08:00
|
|
|
}
|
|
|
|
|
2015-03-12 08:07:24 +08:00
|
|
|
// Helper function to build a DataLayout string
|
2016-10-24 21:37:13 +08:00
|
|
|
static std::string computeDataLayout(const Triple &TT,
|
|
|
|
const MCTargetOptions &Options,
|
|
|
|
bool LittleEndian) {
|
|
|
|
if (Options.getABIName() == "ilp32")
|
|
|
|
return "e-m:e-p:32:32-i8:8-i16:16-i64:64-S128";
|
2015-06-11 23:34:59 +08:00
|
|
|
if (TT.isOSBinFormatMachO())
|
2015-03-12 08:07:24 +08:00
|
|
|
return "e-m:o-i64:64-i128:128-n32:64-S128";
|
2017-06-28 07:58:19 +08:00
|
|
|
if (TT.isOSBinFormatCOFF())
|
2017-07-18 05:25:19 +08:00
|
|
|
return "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128";
|
2015-03-12 08:07:24 +08:00
|
|
|
if (LittleEndian)
|
2016-07-08 04:02:18 +08:00
|
|
|
return "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128";
|
|
|
|
return "E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128";
|
2015-03-12 08:07:24 +08:00
|
|
|
}
|
|
|
|
|
2016-05-19 06:04:49 +08:00
|
|
|
static Reloc::Model getEffectiveRelocModel(const Triple &TT,
|
|
|
|
Optional<Reloc::Model> RM) {
|
|
|
|
// AArch64 Darwin is always PIC.
|
|
|
|
if (TT.isOSDarwin())
|
|
|
|
return Reloc::PIC_;
|
|
|
|
// On ELF platforms the default static relocation model has a smart enough
|
|
|
|
// linker to cope with referencing external symbols defined in a shared
|
|
|
|
// library. Hence DynamicNoPIC doesn't need to be promoted to PIC.
|
|
|
|
if (!RM.hasValue() || *RM == Reloc::DynamicNoPIC)
|
|
|
|
return Reloc::Static;
|
|
|
|
return *RM;
|
|
|
|
}
|
|
|
|
|
2017-08-03 10:16:21 +08:00
|
|
|
static CodeModel::Model getEffectiveCodeModel(const Triple &TT,
|
|
|
|
Optional<CodeModel::Model> CM,
|
|
|
|
bool JIT) {
|
|
|
|
if (CM) {
|
|
|
|
if (*CM != CodeModel::Small && *CM != CodeModel::Large) {
|
|
|
|
if (!TT.isOSFuchsia())
|
|
|
|
report_fatal_error(
|
|
|
|
"Only small and large code models are allowed on AArch64");
|
|
|
|
else if (CM != CodeModel::Kernel)
|
|
|
|
report_fatal_error(
|
|
|
|
"Only small, kernel, and large code models are allowed on AArch64");
|
|
|
|
}
|
|
|
|
return *CM;
|
|
|
|
}
|
|
|
|
// The default MCJIT memory managers make no guarantees about where they can
|
|
|
|
// find an executable page; JITed code needs to be able to refer to globals
|
|
|
|
// no matter how far away they are.
|
|
|
|
if (JIT)
|
|
|
|
return CodeModel::Large;
|
|
|
|
return CodeModel::Small;
|
|
|
|
}
|
|
|
|
|
2016-05-19 00:00:24 +08:00
|
|
|
/// Create an AArch64 architecture model.
|
2014-03-29 18:18:08 +08:00
|
|
|
///
|
2017-08-03 10:16:21 +08:00
|
|
|
AArch64TargetMachine::AArch64TargetMachine(const Target &T, const Triple &TT,
|
|
|
|
StringRef CPU, StringRef FS,
|
|
|
|
const TargetOptions &Options,
|
|
|
|
Optional<Reloc::Model> RM,
|
|
|
|
Optional<CodeModel::Model> CM,
|
|
|
|
CodeGenOpt::Level OL, bool JIT,
|
|
|
|
bool LittleEndian)
|
2017-10-13 06:57:28 +08:00
|
|
|
: LLVMTargetMachine(T,
|
|
|
|
computeDataLayout(TT, Options.MCOptions, LittleEndian),
|
|
|
|
TT, CPU, FS, Options, getEffectiveRelocModel(TT, RM),
|
|
|
|
getEffectiveCodeModel(TT, CM, JIT), OL),
|
2017-08-03 10:16:21 +08:00
|
|
|
TLOF(createTLOF(getTargetTriple())), isLittle(LittleEndian) {
|
2014-03-29 18:18:08 +08:00
|
|
|
initAsmInfo();
|
2018-01-18 06:34:21 +08:00
|
|
|
|
|
|
|
// Enable GlobalISel at or below EnableGlobalISelAt0.
|
|
|
|
if (getOptLevel() <= EnableGlobalISelAtO)
|
|
|
|
setGlobalISel(true);
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
2017-01-06 08:30:53 +08:00
|
|
|
AArch64TargetMachine::~AArch64TargetMachine() = default;
|
2014-11-21 07:37:18 +08:00
|
|
|
|
2014-10-06 14:45:36 +08:00
|
|
|
const AArch64Subtarget *
|
|
|
|
AArch64TargetMachine::getSubtargetImpl(const Function &F) const {
|
2015-02-14 10:09:06 +08:00
|
|
|
Attribute CPUAttr = F.getFnAttribute("target-cpu");
|
|
|
|
Attribute FSAttr = F.getFnAttribute("target-features");
|
2014-10-06 14:45:36 +08:00
|
|
|
|
|
|
|
std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
|
|
|
|
? CPUAttr.getValueAsString().str()
|
|
|
|
: TargetCPU;
|
|
|
|
std::string FS = !FSAttr.hasAttribute(Attribute::None)
|
|
|
|
? FSAttr.getValueAsString().str()
|
|
|
|
: TargetFS;
|
|
|
|
|
2017-05-19 19:08:33 +08:00
|
|
|
auto &I = SubtargetMap[CPU + FS];
|
2014-10-06 14:45:36 +08:00
|
|
|
if (!I) {
|
|
|
|
// This needs to be done before we create a new subtarget since any
|
|
|
|
// creation will depend on the TM and the code generation flags on the
|
|
|
|
// function that reside in TargetOptions.
|
|
|
|
resetTargetOptions(F);
|
2015-06-16 23:44:21 +08:00
|
|
|
I = llvm::make_unique<AArch64Subtarget>(TargetTriple, CPU, FS, *this,
|
2017-05-19 19:08:33 +08:00
|
|
|
isLittle);
|
2014-10-06 14:45:36 +08:00
|
|
|
}
|
|
|
|
return I.get();
|
|
|
|
}
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
void AArch64leTargetMachine::anchor() { }
|
2014-04-23 18:26:40 +08:00
|
|
|
|
2015-06-12 03:41:26 +08:00
|
|
|
AArch64leTargetMachine::AArch64leTargetMachine(
|
|
|
|
const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
|
2016-05-19 06:04:49 +08:00
|
|
|
const TargetOptions &Options, Optional<Reloc::Model> RM,
|
2017-08-03 10:16:21 +08:00
|
|
|
Optional<CodeModel::Model> CM, CodeGenOpt::Level OL, bool JIT)
|
|
|
|
: AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, true) {}
|
2014-04-23 18:26:40 +08:00
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
void AArch64beTargetMachine::anchor() { }
|
2014-04-23 18:26:40 +08:00
|
|
|
|
2015-06-12 03:41:26 +08:00
|
|
|
AArch64beTargetMachine::AArch64beTargetMachine(
|
|
|
|
const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
|
2016-05-19 06:04:49 +08:00
|
|
|
const TargetOptions &Options, Optional<Reloc::Model> RM,
|
2017-08-03 10:16:21 +08:00
|
|
|
Optional<CodeModel::Model> CM, CodeGenOpt::Level OL, bool JIT)
|
|
|
|
: AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, false) {}
|
2014-04-23 18:26:40 +08:00
|
|
|
|
2014-03-29 18:18:08 +08:00
|
|
|
namespace {
|
2017-01-06 08:30:53 +08:00
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
/// AArch64 Code Generator Pass Configuration Options.
|
|
|
|
class AArch64PassConfig : public TargetPassConfig {
|
2014-03-29 18:18:08 +08:00
|
|
|
public:
|
2017-05-31 05:36:41 +08:00
|
|
|
AArch64PassConfig(AArch64TargetMachine &TM, PassManagerBase &PM)
|
2014-09-13 01:40:39 +08:00
|
|
|
: TargetPassConfig(TM, PM) {
|
2017-05-31 05:36:41 +08:00
|
|
|
if (TM.getOptLevel() != CodeGenOpt::None)
|
2014-09-13 06:17:28 +08:00
|
|
|
substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
|
2014-09-13 01:40:39 +08:00
|
|
|
}
|
2014-03-29 18:18:08 +08:00
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
AArch64TargetMachine &getAArch64TargetMachine() const {
|
|
|
|
return getTM<AArch64TargetMachine>();
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
2016-11-29 04:11:54 +08:00
|
|
|
ScheduleDAGInstrs *
|
|
|
|
createMachineScheduler(MachineSchedContext *C) const override {
|
2017-07-13 05:41:28 +08:00
|
|
|
const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>();
|
2016-11-29 04:11:54 +08:00
|
|
|
ScheduleDAGMILive *DAG = createGenericSchedLive(C);
|
|
|
|
DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
|
|
|
|
DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
|
2017-07-13 05:41:28 +08:00
|
|
|
if (ST.hasFusion())
|
|
|
|
DAG->addMutation(createAArch64MacroFusionDAGMutation());
|
2016-11-29 04:11:54 +08:00
|
|
|
return DAG;
|
|
|
|
}
|
|
|
|
|
2017-02-01 10:54:42 +08:00
|
|
|
ScheduleDAGInstrs *
|
|
|
|
createPostMachineScheduler(MachineSchedContext *C) const override {
|
|
|
|
const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>();
|
[AArch64] Add AArch64Subtarget::isFusion function.
Summary:
isFusion returns true if the subtarget supports any kind of instruction
fusion, similar to ARMSubtarget::isFusion. This was suggested in D34142.
This changes the current behavior slightly, because the macro fusion mutation
is now added to the PostRA MachineScheduler in case the subtarget supports
any kind of fusion. I think that makes sense because if the PostRA
MachineScheduler is run, there is potential that instructions scheduled back to
back are re-scheduled.
Reviewers: evandro, t.p.northover, joelkevinjones, joel_k_jones, steleman
Reviewed By: joelkevinjones
Subscribers: joel_k_jones, aemerson, rengolin, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D34958
llvm-svn: 307842
2017-07-13 04:53:22 +08:00
|
|
|
if (ST.hasFusion()) {
|
2017-02-01 10:54:42 +08:00
|
|
|
// Run the Macro Fusion after RA again since literals are expanded from
|
|
|
|
// pseudos then (v. addPreSched2()).
|
|
|
|
ScheduleDAGMI *DAG = createGenericSchedPostRA(C);
|
|
|
|
DAG->addMutation(createAArch64MacroFusionDAGMutation());
|
|
|
|
return DAG;
|
|
|
|
}
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2014-05-30 18:09:59 +08:00
|
|
|
void addIRPasses() override;
|
2014-04-29 15:58:25 +08:00
|
|
|
bool addPreISel() override;
|
|
|
|
bool addInstSelector() override;
|
2016-02-12 03:35:06 +08:00
|
|
|
bool addIRTranslator() override;
|
2016-07-23 04:03:43 +08:00
|
|
|
bool addLegalizeMachineIR() override;
|
2016-04-08 04:27:33 +08:00
|
|
|
bool addRegBankSelect() override;
|
2017-05-27 09:34:07 +08:00
|
|
|
void addPreGlobalInstructionSelect() override;
|
2016-07-27 22:31:55 +08:00
|
|
|
bool addGlobalInstructionSelect() override;
|
2014-04-29 15:58:25 +08:00
|
|
|
bool addILPOpts() override;
|
2014-12-12 05:26:47 +08:00
|
|
|
void addPreRegAlloc() override;
|
|
|
|
void addPostRegAlloc() override;
|
|
|
|
void addPreSched2() override;
|
|
|
|
void addPreEmitPass() override;
|
2014-03-29 18:18:08 +08:00
|
|
|
};
|
2017-01-06 08:30:53 +08:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
2014-03-29 18:18:08 +08:00
|
|
|
|
(Re-landing) Expose a TargetMachine::getTargetTransformInfo function
Re-land r321234. It had to be reverted because it broke the shared
library build. The shared library build broke because there was a
missing LLVMBuild dependency from lib/Passes (which calls
TargetMachine::getTargetIRAnalysis) to lib/Target. As far as I can
tell, this problem was always there but was somehow masked
before (perhaps because TargetMachine::getTargetIRAnalysis was a
virtual function).
Original commit message:
This makes the TargetMachine interface a bit simpler. We still need
the std::function in TargetIRAnalysis to avoid having to add a
dependency from Analysis to Target.
See discussion:
http://lists.llvm.org/pipermail/llvm-dev/2017-December/119749.html
I avoided adding all of the backend owners to this review since the
change is simple, but let me know if you feel differently about this.
Reviewers: echristo, MatzeB, hfinkel
Reviewed By: hfinkel
Subscribers: jholewinski, jfb, arsenm, dschuff, mcrosier, sdardis, nemanjai, nhaehnle, javed.absar, sbc100, jgravelle-google, aheejin, kbarton, llvm-commits
Differential Revision: https://reviews.llvm.org/D41464
llvm-svn: 321375
2017-12-23 02:21:59 +08:00
|
|
|
TargetTransformInfo
|
|
|
|
AArch64TargetMachine::getTargetTransformInfo(const Function &F) {
|
|
|
|
return TargetTransformInfo(AArch64TTIImpl(this, F));
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) {
|
2017-05-31 05:36:41 +08:00
|
|
|
return new AArch64PassConfig(*this, PM);
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
2014-05-30 18:09:59 +08:00
|
|
|
void AArch64PassConfig::addIRPasses() {
|
|
|
|
// Always expand atomic operations, we don't deal with atomicrmw or cmpxchg
|
|
|
|
// ourselves.
|
2017-05-19 01:21:13 +08:00
|
|
|
addPass(createAtomicExpandPass());
|
2014-05-30 18:09:59 +08:00
|
|
|
|
|
|
|
// Cmpxchg instructions are often used with a subsequent comparison to
|
|
|
|
// determine whether it succeeded. We can exploit existing control-flow in
|
|
|
|
// ldrex/strex loops to simplify this, but it needs tidying up.
|
|
|
|
if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy)
|
2017-12-15 06:05:20 +08:00
|
|
|
addPass(createCFGSimplificationPass(1, true, true, false, true));
|
2014-05-30 18:09:59 +08:00
|
|
|
|
2016-07-07 07:18:58 +08:00
|
|
|
// Run LoopDataPrefetch
|
2016-03-18 08:27:29 +08:00
|
|
|
//
|
|
|
|
// Run this before LSR to remove the multiplies involved in computing the
|
|
|
|
// pointer values N iterations ahead.
|
2017-07-15 05:44:12 +08:00
|
|
|
if (TM->getOptLevel() != CodeGenOpt::None) {
|
|
|
|
if (EnableLoopDataPrefetch)
|
|
|
|
addPass(createLoopDataPrefetchPass());
|
|
|
|
if (EnableFalkorHWPFFix)
|
|
|
|
addPass(createFalkorMarkStridedAccessesPass());
|
|
|
|
}
|
2016-03-18 08:27:29 +08:00
|
|
|
|
2014-05-30 18:09:59 +08:00
|
|
|
TargetPassConfig::addIRPasses();
|
2014-11-19 14:39:53 +08:00
|
|
|
|
[AArch64] Lower interleaved memory accesses to ldN/stN intrinsics. This patch also adds a function to calculate the cost of interleaved memory accesses.
E.g. Lower an interleaved load:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%v0 = shuffle %wide.vec, undef, <0, 2, 4, 6>
%v1 = shuffle %wide.vec, undef, <1, 3, 5, 7>
into:
%ld2 = { <4 x i32>, <4 x i32> } call llvm.aarch64.neon.ld2(%ptr)
%vec0 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 0
%vec1 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 1
E.g. Lower an interleaved store:
%i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
store <12 x i32> %i.vec, <12 x i32>* %ptr
into:
%sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
%sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
%sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
call void llvm.aarch64.neon.st3(%sub.v0, %sub.v1, %sub.v2, %ptr)
Differential Revision: http://reviews.llvm.org/D10533
llvm-svn: 240754
2015-06-26 10:32:07 +08:00
|
|
|
// Match interleaved memory accesses to ldN/stN intrinsics.
|
|
|
|
if (TM->getOptLevel() != CodeGenOpt::None)
|
2017-05-19 01:21:13 +08:00
|
|
|
addPass(createInterleavedAccessPass());
|
[AArch64] Lower interleaved memory accesses to ldN/stN intrinsics. This patch also adds a function to calculate the cost of interleaved memory accesses.
E.g. Lower an interleaved load:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%v0 = shuffle %wide.vec, undef, <0, 2, 4, 6>
%v1 = shuffle %wide.vec, undef, <1, 3, 5, 7>
into:
%ld2 = { <4 x i32>, <4 x i32> } call llvm.aarch64.neon.ld2(%ptr)
%vec0 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 0
%vec1 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 1
E.g. Lower an interleaved store:
%i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
store <12 x i32> %i.vec, <12 x i32>* %ptr
into:
%sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
%sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
%sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
call void llvm.aarch64.neon.st3(%sub.v0, %sub.v1, %sub.v2, %ptr)
Differential Revision: http://reviews.llvm.org/D10533
llvm-svn: 240754
2015-06-26 10:32:07 +08:00
|
|
|
|
2014-11-19 14:39:53 +08:00
|
|
|
if (TM->getOptLevel() == CodeGenOpt::Aggressive && EnableGEPOpt) {
|
|
|
|
// Call SeparateConstOffsetFromGEP pass to extract constants within indices
|
|
|
|
// and lower a GEP with multiple indices to either arithmetic operations or
|
|
|
|
// multiple GEPs with single index.
|
2018-03-29 06:28:50 +08:00
|
|
|
addPass(createSeparateConstOffsetFromGEPPass(true));
|
2014-11-19 14:39:53 +08:00
|
|
|
// Call EarlyCSE pass to find and remove subexpressions in the lowered
|
|
|
|
// result.
|
|
|
|
addPass(createEarlyCSEPass());
|
|
|
|
// Do loop invariant code motion in case part of the lowered result is
|
|
|
|
// invariant.
|
|
|
|
addPass(createLICMPass());
|
|
|
|
}
|
2014-05-30 18:09:59 +08:00
|
|
|
}
|
|
|
|
|
2014-03-29 18:18:08 +08:00
|
|
|
// Pass Pipeline Configuration
|
2014-05-24 20:50:23 +08:00
|
|
|
bool AArch64PassConfig::addPreISel() {
|
2014-03-29 18:18:08 +08:00
|
|
|
// Run promote constant before global merge, so that the promoted constants
|
|
|
|
// get a chance to be merged
|
|
|
|
if (TM->getOptLevel() != CodeGenOpt::None && EnablePromoteConstant)
|
2014-05-24 20:50:23 +08:00
|
|
|
addPass(createAArch64PromoteConstantPass());
|
2015-02-24 03:28:45 +08:00
|
|
|
// FIXME: On AArch64, this depends on the type.
|
|
|
|
// Basically, the addressable offsets are up to 4095 * Ty.getSizeInBytes().
|
|
|
|
// and the offset has to be a multiple of the related size in bytes.
|
2015-06-05 04:39:23 +08:00
|
|
|
if ((TM->getOptLevel() != CodeGenOpt::None &&
|
2015-04-11 08:06:36 +08:00
|
|
|
EnableGlobalMerge == cl::BOU_UNSET) ||
|
2015-06-05 04:39:23 +08:00
|
|
|
EnableGlobalMerge == cl::BOU_TRUE) {
|
|
|
|
bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) &&
|
|
|
|
(EnableGlobalMerge == cl::BOU_UNSET);
|
|
|
|
addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize));
|
|
|
|
}
|
|
|
|
|
2014-03-29 18:18:08 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
bool AArch64PassConfig::addInstSelector() {
|
|
|
|
addPass(createAArch64ISelDag(getAArch64TargetMachine(), getOptLevel()));
|
2014-03-29 18:18:08 +08:00
|
|
|
|
|
|
|
// For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many
|
|
|
|
// references to _TLS_MODULE_BASE_ as possible.
|
2015-06-16 23:44:21 +08:00
|
|
|
if (TM->getTargetTriple().isOSBinFormatELF() &&
|
2014-03-29 18:18:08 +08:00
|
|
|
getOptLevel() != CodeGenOpt::None)
|
2014-05-24 20:50:23 +08:00
|
|
|
addPass(createAArch64CleanupLocalDynamicTLSPass());
|
2014-03-29 18:18:08 +08:00
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-02-12 03:35:06 +08:00
|
|
|
bool AArch64PassConfig::addIRTranslator() {
|
|
|
|
addPass(new IRTranslator());
|
|
|
|
return false;
|
|
|
|
}
|
2017-01-06 08:30:53 +08:00
|
|
|
|
2016-07-23 04:03:43 +08:00
|
|
|
bool AArch64PassConfig::addLegalizeMachineIR() {
|
2016-10-15 06:18:18 +08:00
|
|
|
addPass(new Legalizer());
|
2016-07-23 04:03:43 +08:00
|
|
|
return false;
|
|
|
|
}
|
2017-01-06 08:30:53 +08:00
|
|
|
|
2016-04-08 04:27:33 +08:00
|
|
|
bool AArch64PassConfig::addRegBankSelect() {
|
|
|
|
addPass(new RegBankSelect());
|
|
|
|
return false;
|
|
|
|
}
|
2017-01-06 08:30:53 +08:00
|
|
|
|
2017-05-27 09:34:07 +08:00
|
|
|
void AArch64PassConfig::addPreGlobalInstructionSelect() {
|
|
|
|
// Workaround the deficiency of the fast register allocator.
|
|
|
|
if (TM->getOptLevel() == CodeGenOpt::None)
|
|
|
|
addPass(new Localizer());
|
|
|
|
}
|
|
|
|
|
2016-07-27 22:31:55 +08:00
|
|
|
bool AArch64PassConfig::addGlobalInstructionSelect() {
|
|
|
|
addPass(new InstructionSelect());
|
|
|
|
return false;
|
|
|
|
}
|
2016-02-12 03:35:06 +08:00
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
bool AArch64PassConfig::addILPOpts() {
|
2014-09-05 10:55:24 +08:00
|
|
|
if (EnableCondOpt)
|
|
|
|
addPass(createAArch64ConditionOptimizerPass());
|
2014-03-29 18:18:08 +08:00
|
|
|
if (EnableCCMP)
|
2014-05-24 20:50:23 +08:00
|
|
|
addPass(createAArch64ConditionalCompares());
|
2014-08-08 05:40:58 +08:00
|
|
|
if (EnableMCR)
|
|
|
|
addPass(&MachineCombinerID);
|
[AArch64] Prefer Bcc to CBZ/CBNZ/TBZ/TBNZ when NZCV flags can be set for "free".
This patch contains a pass that transforms CBZ/CBNZ/TBZ/TBNZ instructions into a
conditional branch (Bcc), when the NZCV flags can be set for "free". This is
preferred on targets that have more flexibility when scheduling Bcc
instructions as compared to CBZ/CBNZ/TBZ/TBNZ (assuming all other variables are
equal). This can reduce register pressure and is also the default behavior for
GCC.
A few examples:
add w8, w0, w1 -> cmn w0, w1 ; CMN is an alias of ADDS.
cbz w8, .LBB_2 -> b.eq .LBB0_2 ; single def/use of w8 removed.
add w8, w0, w1 -> adds w8, w0, w1 ; w8 has multiple uses.
cbz w8, .LBB1_2 -> b.eq .LBB1_2
sub w8, w0, w1 -> subs w8, w0, w1 ; w8 has multiple uses.
tbz w8, #31, .LBB6_2 -> b.ge .LBB6_2
In looking at all current sub-target machine descriptions, this transformation
appears to be either positive or neutral.
Differential Revision: https://reviews.llvm.org/D34220.
llvm-svn: 306144
2017-06-24 03:20:12 +08:00
|
|
|
if (EnableCondBrTuning)
|
|
|
|
addPass(createAArch64CondBrTuning());
|
2014-08-06 21:31:32 +08:00
|
|
|
if (EnableEarlyIfConversion)
|
|
|
|
addPass(&EarlyIfConverterID);
|
2014-03-29 18:18:08 +08:00
|
|
|
if (EnableStPairSuppress)
|
2014-05-24 20:50:23 +08:00
|
|
|
addPass(createAArch64StorePairSuppressPass());
|
2017-12-08 08:58:49 +08:00
|
|
|
addPass(createAArch64SIMDInstrOptPass());
|
2014-03-29 18:18:08 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
void AArch64PassConfig::addPreRegAlloc() {
|
2016-11-16 11:38:27 +08:00
|
|
|
// Change dead register definitions to refer to the zero register.
|
|
|
|
if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination)
|
|
|
|
addPass(createAArch64DeadRegisterDefinitions());
|
|
|
|
|
2014-03-29 18:18:08 +08:00
|
|
|
// Use AdvSIMD scalar instructions whenever profitable.
|
2014-08-22 02:10:07 +08:00
|
|
|
if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar) {
|
2014-12-12 07:18:03 +08:00
|
|
|
addPass(createAArch64AdvSIMDScalar());
|
2014-08-22 02:10:07 +08:00
|
|
|
// The AdvSIMD pass may produce copies that can be rewritten to
|
|
|
|
// be register coaleascer friendly.
|
|
|
|
addPass(&PeepholeOptimizerID);
|
|
|
|
}
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
void AArch64PassConfig::addPostRegAlloc() {
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
// Remove redundant copy instructions.
|
|
|
|
if (TM->getOptLevel() != CodeGenOpt::None && EnableRedundantCopyElimination)
|
|
|
|
addPass(createAArch64RedundantCopyEliminationPass());
|
|
|
|
|
2015-03-04 07:22:40 +08:00
|
|
|
if (TM->getOptLevel() != CodeGenOpt::None && usingDefaultRegAlloc())
|
2014-08-08 20:33:21 +08:00
|
|
|
// Improve performance for some FP/SIMD code for A57.
|
|
|
|
addPass(createAArch64A57FPLoadBalancing());
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
void AArch64PassConfig::addPreSched2() {
|
2014-03-29 18:18:08 +08:00
|
|
|
// Expand some pseudo instructions to allow proper scheduling.
|
2014-12-12 07:18:03 +08:00
|
|
|
addPass(createAArch64ExpandPseudoPass());
|
2014-03-29 18:18:08 +08:00
|
|
|
// Use load/store pair instructions when possible.
|
2017-07-19 00:14:22 +08:00
|
|
|
if (TM->getOptLevel() != CodeGenOpt::None) {
|
|
|
|
if (EnableLoadStoreOpt)
|
|
|
|
addPass(createAArch64LoadStoreOptimizationPass());
|
|
|
|
if (EnableFalkorHWPFFix)
|
|
|
|
addPass(createFalkorHWPFFixPass());
|
|
|
|
}
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
2014-12-12 05:26:47 +08:00
|
|
|
void AArch64PassConfig::addPreEmitPass() {
|
2014-10-13 18:12:35 +08:00
|
|
|
if (EnableA53Fix835769)
|
2014-12-12 07:18:03 +08:00
|
|
|
addPass(createAArch64A53Fix835769());
|
2014-03-29 18:18:08 +08:00
|
|
|
// Relax conditional branch instructions if they're otherwise out of
|
|
|
|
// range of their destination.
|
2016-08-01 13:56:57 +08:00
|
|
|
if (BranchRelaxation)
|
2016-10-06 23:38:53 +08:00
|
|
|
addPass(&BranchRelaxationPassID);
|
|
|
|
|
2014-04-18 22:54:46 +08:00
|
|
|
if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH &&
|
2015-06-16 23:44:21 +08:00
|
|
|
TM->getTargetTriple().isOSBinFormatMachO())
|
2014-05-24 20:50:23 +08:00
|
|
|
addPass(createAArch64CollectLOHPass());
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|