2012-02-18 20:03:15 +08:00
|
|
|
//===-- ARMSubtarget.h - Define Subtarget for the ARM ----------*- C++ -*--===//
|
2007-01-19 15:51:42 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2007-01-19 15:51:42 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2011-07-02 05:01:15 +08:00
|
|
|
// This file declares the ARM specific subclass of TargetSubtargetInfo.
|
2007-01-19 15:51:42 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2014-08-14 00:26:38 +08:00
|
|
|
#ifndef LLVM_LIB_TARGET_ARM_ARMSUBTARGET_H
|
|
|
|
#define LLVM_LIB_TARGET_ARM_ARMSUBTARGET_H
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2017-01-28 07:58:02 +08:00
|
|
|
#include "ARMBaseInstrInfo.h"
|
|
|
|
#include "ARMBaseRegisterInfo.h"
|
2017-08-29 17:47:55 +08:00
|
|
|
#include "ARMConstantPoolValue.h"
|
2014-06-27 03:30:02 +08:00
|
|
|
#include "ARMFrameLowering.h"
|
|
|
|
#include "ARMISelLowering.h"
|
2014-06-13 08:20:39 +08:00
|
|
|
#include "ARMSelectionDAGInfo.h"
|
2011-01-12 05:46:47 +08:00
|
|
|
#include "llvm/ADT/Triple.h"
|
2017-08-16 06:31:51 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/CallLowering.h"
|
|
|
|
#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
|
|
|
|
#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
|
|
|
|
#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
|
2017-01-28 07:58:02 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
2017-11-17 09:07:10 +08:00
|
|
|
#include "llvm/CodeGen/TargetSubtargetInfo.h"
|
2012-12-04 15:12:27 +08:00
|
|
|
#include "llvm/MC/MCInstrItineraries.h"
|
2017-01-28 07:58:02 +08:00
|
|
|
#include "llvm/MC/MCSchedule.h"
|
|
|
|
#include "llvm/Target/TargetOptions.h"
|
|
|
|
#include <memory>
|
2007-01-19 15:51:42 +08:00
|
|
|
#include <string>
|
|
|
|
|
2011-07-02 04:45:01 +08:00
|
|
|
#define GET_SUBTARGETINFO_HEADER
|
2011-07-02 06:36:09 +08:00
|
|
|
#include "ARMGenSubtargetInfo.inc"
|
2011-07-02 04:45:01 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
namespace llvm {
|
2017-01-28 07:58:02 +08:00
|
|
|
|
|
|
|
class ARMBaseTargetMachine;
|
2009-08-29 07:18:09 +08:00
|
|
|
class GlobalValue;
|
2011-07-07 15:07:08 +08:00
|
|
|
class StringRef;
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2011-07-02 04:45:01 +08:00
|
|
|
class ARMSubtarget : public ARMGenSubtargetInfo {
|
2007-01-19 15:51:42 +08:00
|
|
|
protected:
|
2010-09-10 09:29:16 +08:00
|
|
|
enum ARMProcFamilyEnum {
|
2017-02-10 08:06:44 +08:00
|
|
|
Others,
|
|
|
|
|
|
|
|
CortexA12,
|
|
|
|
CortexA15,
|
|
|
|
CortexA17,
|
|
|
|
CortexA32,
|
|
|
|
CortexA35,
|
|
|
|
CortexA5,
|
|
|
|
CortexA53,
|
2017-08-21 16:43:06 +08:00
|
|
|
CortexA55,
|
2017-02-10 08:06:44 +08:00
|
|
|
CortexA57,
|
|
|
|
CortexA7,
|
|
|
|
CortexA72,
|
|
|
|
CortexA73,
|
2017-08-21 16:43:06 +08:00
|
|
|
CortexA75,
|
2019-02-25 23:08:27 +08:00
|
|
|
CortexA76,
|
2017-02-10 08:06:44 +08:00
|
|
|
CortexA8,
|
|
|
|
CortexA9,
|
|
|
|
CortexM3,
|
|
|
|
CortexR4,
|
|
|
|
CortexR4F,
|
|
|
|
CortexR5,
|
|
|
|
CortexR52,
|
|
|
|
CortexR7,
|
2018-09-20 03:43:23 +08:00
|
|
|
Exynos,
|
2017-02-10 08:06:44 +08:00
|
|
|
Krait,
|
2017-04-07 06:47:47 +08:00
|
|
|
Kryo,
|
[ARM][AArch64] Support for Cortex-A65 & A65AE, Neoverse E1 & N1
Summary:
Add support for Cortex-A65, Cortex-A65AE, Neoverse E1 and Neoverse N1.
Neoverse E1 and Cortex-A65(&AE) only implement the AArch64 state of the
Arm architecture. Neoverse N1 implements both AArch32 and AArch64.
Cortex-A65:
https://developer.arm.com/ip-products/processors/cortex-a/cortex-a65
Cortex-A65AE:
https://developer.arm.com/ip-products/processors/cortex-a/cortex-a65ae
Neoverse E1:
https://developer.arm.com/ip-products/processors/neoverse/neoverse-e1
Neoverse N1:
https://developer.arm.com/ip-products/processors/neoverse/neoverse-n1
Patch by Diogo Sampaio and Pablo Barrio
Reviewers: samparker, LukeCheeseman, sbaranga, ostannard
Reviewed By: ostannard
Subscribers: ostannard, javed.absar, kristof.beyls, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D64406
llvm-svn: 367007
2019-07-25 18:59:45 +08:00
|
|
|
NeoverseN1,
|
2017-02-10 08:09:20 +08:00
|
|
|
Swift
|
2010-09-10 09:29:16 +08:00
|
|
|
};
|
2013-09-23 22:26:15 +08:00
|
|
|
enum ARMProcClassEnum {
|
2017-02-10 08:06:44 +08:00
|
|
|
None,
|
|
|
|
|
|
|
|
AClass,
|
|
|
|
MClass,
|
|
|
|
RClass
|
2013-09-23 22:26:15 +08:00
|
|
|
};
|
2015-11-16 19:10:19 +08:00
|
|
|
enum ARMArchEnum {
|
2017-02-10 08:06:44 +08:00
|
|
|
ARMv2,
|
|
|
|
ARMv2a,
|
|
|
|
ARMv3,
|
|
|
|
ARMv3m,
|
|
|
|
ARMv4,
|
|
|
|
ARMv4t,
|
|
|
|
ARMv5,
|
|
|
|
ARMv5t,
|
|
|
|
ARMv5te,
|
|
|
|
ARMv5tej,
|
|
|
|
ARMv6,
|
|
|
|
ARMv6k,
|
|
|
|
ARMv6kz,
|
|
|
|
ARMv6m,
|
|
|
|
ARMv6sm,
|
|
|
|
ARMv6t2,
|
|
|
|
ARMv7a,
|
|
|
|
ARMv7em,
|
|
|
|
ARMv7m,
|
|
|
|
ARMv7r,
|
|
|
|
ARMv7ve,
|
|
|
|
ARMv81a,
|
|
|
|
ARMv82a,
|
2017-08-10 17:41:00 +08:00
|
|
|
ARMv83a,
|
[ARM][AArch64] Armv8.4-A Enablement
Initial patch adding assembly support for Armv8.4-A.
Besides adding v8.4 as a supported architecture to the usual places, this also
adds target features for the different crypto algorithms. Armv8.4-A introduced
new crypto algorithms, made them optional, and allows different combinations:
- none of the v8.4 crypto functions are supported, which is independent of the
implementation of the Armv8.0 SHA1 and SHA2 instructions.
- the v8.4 SHA512 and SHA3 support is implemented, in this case the Armv8.0
SHA1 and SHA2 instructions must also be implemented.
- the v8.4 SM3 and SM4 support is implemented, which is independent of the
implementation of the Armv8.0 SHA1 and SHA2 instructions.
- all of the v8.4 crypto functions are supported, in this case the Armv8.0 SHA1
and SHA2 instructions must also be implemented.
The v8.4 crypto instructions are added to AArch64 only, and not AArch32,
and are made optional extensions to Armv8.2-A.
The user-facing Clang options will map on these new target features, their
naming will be compatible with GCC and added in follow-up patches.
The Armv8.4-A instruction sets can be downloaded here:
https://developer.arm.com/products/architecture/a-profile/exploration-tools
Differential Revision: https://reviews.llvm.org/D48625
llvm-svn: 335953
2018-06-29 16:43:19 +08:00
|
|
|
ARMv84a,
|
2018-09-26 20:48:21 +08:00
|
|
|
ARMv85a,
|
2017-02-10 08:06:44 +08:00
|
|
|
ARMv8a,
|
|
|
|
ARMv8mBaseline,
|
|
|
|
ARMv8mMainline,
|
2019-05-30 20:57:04 +08:00
|
|
|
ARMv8r,
|
|
|
|
ARMv81mMainline,
|
2015-11-16 19:10:19 +08:00
|
|
|
};
|
2010-09-10 09:29:16 +08:00
|
|
|
|
2016-06-27 17:08:23 +08:00
|
|
|
public:
|
|
|
|
/// What kind of timing do load multiple/store multiple instructions have.
|
|
|
|
enum ARMLdStMultipleTiming {
|
|
|
|
/// Can load/store 2 registers/cycle.
|
|
|
|
DoubleIssue,
|
|
|
|
/// Can load/store 2 registers/cycle, but needs an extra cycle if the access
|
|
|
|
/// is not 64-bit aligned.
|
|
|
|
DoubleIssueCheckUnalignedAccess,
|
|
|
|
/// Can load/store 1 register/cycle.
|
|
|
|
SingleIssue,
|
|
|
|
/// Can load/store 1 register/cycle, but needs an extra cycle for address
|
|
|
|
/// computation and potentially also for register writeback.
|
|
|
|
SingleIssuePlusExtras,
|
|
|
|
};
|
|
|
|
|
|
|
|
protected:
|
2010-09-10 09:29:16 +08:00
|
|
|
/// ARMProcFamily - ARM processor family: Cortex-A8, Cortex-A9, and others.
|
2016-06-27 21:06:10 +08:00
|
|
|
ARMProcFamilyEnum ARMProcFamily = Others;
|
2010-09-10 09:29:16 +08:00
|
|
|
|
2013-09-23 22:26:15 +08:00
|
|
|
/// ARMProcClass - ARM processor class: None, AClass, RClass or MClass.
|
2016-06-27 21:06:10 +08:00
|
|
|
ARMProcClassEnum ARMProcClass = None;
|
2013-09-23 22:26:15 +08:00
|
|
|
|
2015-11-16 19:10:19 +08:00
|
|
|
/// ARMArch - ARM architecture
|
2016-06-27 21:06:10 +08:00
|
|
|
ARMArchEnum ARMArch = ARMv4t;
|
2015-11-16 19:10:19 +08:00
|
|
|
|
2013-06-27 00:58:26 +08:00
|
|
|
/// HasV4TOps, HasV5TOps, HasV5TEOps,
|
2015-03-17 19:55:28 +08:00
|
|
|
/// HasV6Ops, HasV6MOps, HasV6KOps, HasV6T2Ops, HasV7Ops, HasV8Ops -
|
2011-07-07 11:55:05 +08:00
|
|
|
/// Specify whether target support specific ARM ISA variants.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool HasV4TOps = false;
|
|
|
|
bool HasV5TOps = false;
|
|
|
|
bool HasV5TEOps = false;
|
|
|
|
bool HasV6Ops = false;
|
|
|
|
bool HasV6MOps = false;
|
|
|
|
bool HasV6KOps = false;
|
|
|
|
bool HasV6T2Ops = false;
|
|
|
|
bool HasV7Ops = false;
|
|
|
|
bool HasV8Ops = false;
|
|
|
|
bool HasV8_1aOps = false;
|
|
|
|
bool HasV8_2aOps = false;
|
2017-08-10 17:41:00 +08:00
|
|
|
bool HasV8_3aOps = false;
|
[ARM][AArch64] Armv8.4-A Enablement
Initial patch adding assembly support for Armv8.4-A.
Besides adding v8.4 as a supported architecture to the usual places, this also
adds target features for the different crypto algorithms. Armv8.4-A introduced
new crypto algorithms, made them optional, and allows different combinations:
- none of the v8.4 crypto functions are supported, which is independent of the
implementation of the Armv8.0 SHA1 and SHA2 instructions.
- the v8.4 SHA512 and SHA3 support is implemented, in this case the Armv8.0
SHA1 and SHA2 instructions must also be implemented.
- the v8.4 SM3 and SM4 support is implemented, which is independent of the
implementation of the Armv8.0 SHA1 and SHA2 instructions.
- all of the v8.4 crypto functions are supported, in this case the Armv8.0 SHA1
and SHA2 instructions must also be implemented.
The v8.4 crypto instructions are added to AArch64 only, and not AArch32,
and are made optional extensions to Armv8.2-A.
The user-facing Clang options will map on these new target features, their
naming will be compatible with GCC and added in follow-up patches.
The Armv8.4-A instruction sets can be downloaded here:
https://developer.arm.com/products/architecture/a-profile/exploration-tools
Differential Revision: https://reviews.llvm.org/D48625
llvm-svn: 335953
2018-06-29 16:43:19 +08:00
|
|
|
bool HasV8_4aOps = false;
|
2018-09-26 20:48:21 +08:00
|
|
|
bool HasV8_5aOps = false;
|
2016-06-27 21:06:10 +08:00
|
|
|
bool HasV8MBaselineOps = false;
|
|
|
|
bool HasV8MMainlineOps = false;
|
2019-05-30 20:57:04 +08:00
|
|
|
bool HasV8_1MMainlineOps = false;
|
|
|
|
bool HasMVEIntegerOps = false;
|
|
|
|
bool HasMVEFloatOps = false;
|
[ARM] Add initial support for Custom Datapath Extension (CDE)
Summary:
This patch adds assembly-level support for a new Arm M-profile
architecture extension, Custom Datapath Extension (CDE).
A brief description of the extension is available at
https://developer.arm.com/architectures/instruction-sets/custom-instructions
The latest specification for CDE is currently a beta release and is
available at
https://static.docs.arm.com/ddi0607/aa/DDI0607A_a_armv8m_arm_supplement_cde.pdf
CDE allows chip vendors to add custom CPU instructions. The CDE
instructions re-use the same encoding space as existing coprocessor
instructions (such as MRC, MCR, CDP etc.). Each coprocessor in range
cp0-cp7 can be configured as either general purpose (GCP) or custom
datapath (CDEv1). This configuration is defined by the CPU vendor and
is provided to LLVM using 8 subtarget features: cdecp0 ... cdecp7.
The semantics of CDE instructions are implementation-defined, but the
instructions are guaranteed to be pure (that is, they are stateless,
they do not access memory or any registers except their explicit
inputs/outputs).
CDE requires the CPU to support at least Armv8.0-M mainline
architecture. CDE includes 3 sets of instructions:
* Instructions that operate on general purpose registers and NZCV
flags
* Instructions that operate on the S or D register file (require
either FP or MVE extension)
* Instructions that operate on the Q register file, require MVE
The user-facing names that can be specified on the command line are
the same as the 8 subtarget feature names. For example:
$ clang -target arm-none-none-eabi -march=armv8m.main+cdecp0+cdecp3
tells the compiler that the coprocessors 0 and 3 are configured as
CDEv1 and the remaining coprocessors are configured as GCP (which is
the default).
Reviewers: simon_tatham, ostannard, dmgreen, eli.friedman
Reviewed By: simon_tatham
Subscribers: kristof.beyls, hiraditya, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D74044
2020-02-17 23:37:49 +08:00
|
|
|
bool HasCDEOps = false;
|
2011-07-07 11:55:05 +08:00
|
|
|
|
2013-09-13 21:46:57 +08:00
|
|
|
/// HasVFPv2, HasVFPv3, HasVFPv4, HasFPARMv8, HasNEON - Specify what
|
2012-01-22 20:07:33 +08:00
|
|
|
/// floating point ISAs are supported.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool HasVFPv2 = false;
|
|
|
|
bool HasVFPv3 = false;
|
|
|
|
bool HasVFPv4 = false;
|
|
|
|
bool HasFPARMv8 = false;
|
|
|
|
bool HasNEON = false;
|
2019-05-30 20:37:05 +08:00
|
|
|
bool HasFPRegs = false;
|
|
|
|
bool HasFPRegs16 = false;
|
|
|
|
bool HasFPRegs64 = false;
|
2007-01-19 15:51:42 +08:00
|
|
|
|
[ARM] Replace fp-only-sp and d16 with fp64 and d32.
Those two subtarget features were awkward because their semantics are
reversed: each one indicates the _lack_ of support for something in
the architecture, rather than the presence. As a consequence, you
don't get the behavior you want if you combine two sets of feature
bits.
Each SubtargetFeature for an FP architecture version now comes in four
versions, one for each combination of those options. So you can still
say (for example) '+vfp2' in a feature string and it will mean what
it's always meant, but there's a new string '+vfp2d16sp' meaning the
version without those extra options.
A lot of this change is just mechanically replacing positive checks
for the old features with negative checks for the new ones. But one
more interesting change is that I've rearranged getFPUFeatures() so
that the main FPU feature is appended to the output list *before*
rather than after the features derived from the Restriction field, so
that -fp64 and -d32 can override defaults added by the main feature.
Reviewers: dmgreen, samparker, SjoerdMeijer
Subscribers: srhines, javed.absar, eraman, kristof.beyls, hiraditya, zzheng, Petar.Avramovic, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D60691
llvm-svn: 361845
2019-05-29 00:13:20 +08:00
|
|
|
/// Versions of the VFP flags restricted to single precision, or to
|
|
|
|
/// 16 d-registers, or both.
|
|
|
|
bool HasVFPv2SP = false;
|
|
|
|
bool HasVFPv3SP = false;
|
|
|
|
bool HasVFPv4SP = false;
|
|
|
|
bool HasFPARMv8SP = false;
|
|
|
|
bool HasVFPv3D16 = false;
|
|
|
|
bool HasVFPv4D16 = false;
|
|
|
|
bool HasFPARMv8D16 = false;
|
|
|
|
bool HasVFPv3D16SP = false;
|
|
|
|
bool HasVFPv4D16SP = false;
|
|
|
|
bool HasFPARMv8D16SP = false;
|
|
|
|
|
2017-08-11 17:52:30 +08:00
|
|
|
/// HasDotProd - True if the ARMv8.2A dot product instructions are supported.
|
|
|
|
bool HasDotProd = false;
|
|
|
|
|
2009-08-06 00:01:19 +08:00
|
|
|
/// UseNEONForSinglePrecisionFP - if the NEONFP attribute has been
|
|
|
|
/// specified. Use the method useNEONForSinglePrecisionFP() to
|
|
|
|
/// determine if NEON should actually be used.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool UseNEONForSinglePrecisionFP = false;
|
2009-08-05 01:53:06 +08:00
|
|
|
|
2012-09-30 05:43:49 +08:00
|
|
|
/// UseMulOps - True if non-microcoded fused integer multiply-add and
|
|
|
|
/// multiply-subtract instructions should be used.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool UseMulOps = false;
|
2012-09-30 05:43:49 +08:00
|
|
|
|
2010-12-06 06:04:16 +08:00
|
|
|
/// SlowFPVMLx - If the VFP2 / NEON instructions are available, indicates
|
|
|
|
/// whether the FP VML[AS] instructions are slow (if so, don't use them).
|
2016-06-27 21:06:10 +08:00
|
|
|
bool SlowFPVMLx = false;
|
2010-03-25 06:31:46 +08:00
|
|
|
|
2020-01-05 18:59:21 +08:00
|
|
|
/// SlowFPVFMx - If the VFP4 / NEON instructions are available, indicates
|
|
|
|
/// whether the FP VFM[AS] instructions are slow (if so, don't use them).
|
|
|
|
bool SlowFPVFMx = false;
|
|
|
|
|
2011-04-01 03:38:48 +08:00
|
|
|
/// HasVMLxForwarding - If true, NEON has special multiplier accumulator
|
|
|
|
/// forwarding to allow mul + mla being issued back to back.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool HasVMLxForwarding = false;
|
2011-04-01 03:38:48 +08:00
|
|
|
|
2010-07-14 03:21:50 +08:00
|
|
|
/// SlowFPBrcc - True if floating point compare + branch is slow.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool SlowFPBrcc = false;
|
2010-07-14 03:21:50 +08:00
|
|
|
|
2011-07-08 03:09:06 +08:00
|
|
|
/// InThumbMode - True if compiling for Thumb, false for ARM.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool InThumbMode = false;
|
2009-06-02 04:00:48 +08:00
|
|
|
|
2015-05-12 09:26:05 +08:00
|
|
|
/// UseSoftFloat - True if we're using software floating point features.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool UseSoftFloat = false;
|
2015-05-12 09:26:05 +08:00
|
|
|
|
2017-07-28 03:56:44 +08:00
|
|
|
/// UseMISched - True if MachineScheduler should be used for this subtarget.
|
|
|
|
bool UseMISched = false;
|
|
|
|
|
2017-08-31 16:57:51 +08:00
|
|
|
/// DisablePostRAScheduler - False if scheduling should happen again after
|
2017-08-18 22:27:51 +08:00
|
|
|
/// register allocation.
|
2017-08-31 16:57:51 +08:00
|
|
|
bool DisablePostRAScheduler = false;
|
2017-08-18 22:27:51 +08:00
|
|
|
|
2011-07-07 08:08:19 +08:00
|
|
|
/// HasThumb2 - True if Thumb2 instructions are supported.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool HasThumb2 = false;
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2010-08-11 15:17:46 +08:00
|
|
|
/// NoARM - True if subtarget does not support ARM mode execution.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool NoARM = false;
|
2010-08-11 15:17:46 +08:00
|
|
|
|
2019-11-30 01:01:05 +08:00
|
|
|
/// ReserveR9 - True if R9 is not available as a general purpose register.
|
|
|
|
bool ReserveR9 = false;
|
2007-02-14 03:52:28 +08:00
|
|
|
|
2015-07-16 08:58:23 +08:00
|
|
|
/// NoMovt - True if MOVT / MOVW pairs are not used for materialization of
|
|
|
|
/// 32-bit imms (including global addresses).
|
2016-06-27 21:06:10 +08:00
|
|
|
bool NoMovt = false;
|
2009-11-24 08:44:37 +08:00
|
|
|
|
2011-10-08 01:17:49 +08:00
|
|
|
/// SupportsTailCall - True if the OS supports tail call. The dynamic linker
|
|
|
|
/// must be able to synthesize call stubs for interworking between ARM and
|
|
|
|
/// Thumb.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool SupportsTailCall = false;
|
2011-10-08 01:17:49 +08:00
|
|
|
|
2015-12-01 18:23:06 +08:00
|
|
|
/// HasFP16 - True if subtarget supports half-precision FP conversions
|
2016-06-27 21:06:10 +08:00
|
|
|
bool HasFP16 = false;
|
2010-03-15 02:42:38 +08:00
|
|
|
|
2015-12-01 18:23:06 +08:00
|
|
|
/// HasFullFP16 - True if subtarget supports half-precision FP operations
|
2016-06-27 21:06:10 +08:00
|
|
|
bool HasFullFP16 = false;
|
2015-12-01 18:23:06 +08:00
|
|
|
|
2018-08-17 19:29:49 +08:00
|
|
|
/// HasFP16FML - True if subtarget supports half-precision FP fml operations
|
|
|
|
bool HasFP16FML = false;
|
|
|
|
|
[ARM] Replace fp-only-sp and d16 with fp64 and d32.
Those two subtarget features were awkward because their semantics are
reversed: each one indicates the _lack_ of support for something in
the architecture, rather than the presence. As a consequence, you
don't get the behavior you want if you combine two sets of feature
bits.
Each SubtargetFeature for an FP architecture version now comes in four
versions, one for each combination of those options. So you can still
say (for example) '+vfp2' in a feature string and it will mean what
it's always meant, but there's a new string '+vfp2d16sp' meaning the
version without those extra options.
A lot of this change is just mechanically replacing positive checks
for the old features with negative checks for the new ones. But one
more interesting change is that I've rearranged getFPUFeatures() so
that the main FPU feature is appended to the output list *before*
rather than after the features derived from the Restriction field, so
that -fp64 and -d32 can override defaults added by the main feature.
Reviewers: dmgreen, samparker, SjoerdMeijer
Subscribers: srhines, javed.absar, eraman, kristof.beyls, hiraditya, zzheng, Petar.Avramovic, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D60691
llvm-svn: 361845
2019-05-29 00:13:20 +08:00
|
|
|
/// HasD32 - True if subtarget has the full 32 double precision
|
2010-10-13 00:22:47 +08:00
|
|
|
/// FP registers for VFPv3.
|
[ARM] Replace fp-only-sp and d16 with fp64 and d32.
Those two subtarget features were awkward because their semantics are
reversed: each one indicates the _lack_ of support for something in
the architecture, rather than the presence. As a consequence, you
don't get the behavior you want if you combine two sets of feature
bits.
Each SubtargetFeature for an FP architecture version now comes in four
versions, one for each combination of those options. So you can still
say (for example) '+vfp2' in a feature string and it will mean what
it's always meant, but there's a new string '+vfp2d16sp' meaning the
version without those extra options.
A lot of this change is just mechanically replacing positive checks
for the old features with negative checks for the new ones. But one
more interesting change is that I've rearranged getFPUFeatures() so
that the main FPU feature is appended to the output list *before*
rather than after the features derived from the Restriction field, so
that -fp64 and -d32 can override defaults added by the main feature.
Reviewers: dmgreen, samparker, SjoerdMeijer
Subscribers: srhines, javed.absar, eraman, kristof.beyls, hiraditya, zzheng, Petar.Avramovic, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D60691
llvm-svn: 361845
2019-05-29 00:13:20 +08:00
|
|
|
bool HasD32 = false;
|
2010-10-13 00:22:47 +08:00
|
|
|
|
2017-04-20 17:38:25 +08:00
|
|
|
/// HasHardwareDivide - True if subtarget supports [su]div in Thumb mode
|
|
|
|
bool HasHardwareDivideInThumb = false;
|
2010-05-06 07:44:43 +08:00
|
|
|
|
2012-09-30 05:43:49 +08:00
|
|
|
/// HasHardwareDivideInARM - True if subtarget supports [su]div in ARM mode
|
2016-06-27 21:06:10 +08:00
|
|
|
bool HasHardwareDivideInARM = false;
|
2012-09-30 05:43:49 +08:00
|
|
|
|
2010-08-11 14:22:01 +08:00
|
|
|
/// HasDataBarrier - True if the subtarget supports DMB / DSB data barrier
|
|
|
|
/// instructions.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool HasDataBarrier = false;
|
2010-08-11 14:22:01 +08:00
|
|
|
|
2017-12-21 19:17:49 +08:00
|
|
|
/// HasFullDataBarrier - True if the subtarget supports DFB data barrier
|
|
|
|
/// instruction.
|
|
|
|
bool HasFullDataBarrier = false;
|
|
|
|
|
2016-01-15 18:23:46 +08:00
|
|
|
/// HasV7Clrex - True if the subtarget supports CLREX instructions
|
2016-06-27 21:06:10 +08:00
|
|
|
bool HasV7Clrex = false;
|
2016-01-15 18:23:46 +08:00
|
|
|
|
|
|
|
/// HasAcquireRelease - True if the subtarget supports v8 atomics (LDA/LDAEX etc)
|
|
|
|
/// instructions
|
2016-06-27 21:06:10 +08:00
|
|
|
bool HasAcquireRelease = false;
|
2016-01-15 18:23:46 +08:00
|
|
|
|
2010-08-10 02:35:19 +08:00
|
|
|
/// Pref32BitThumb - If true, codegen would prefer 32-bit Thumb instructions
|
|
|
|
/// over 16-bit ones.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool Pref32BitThumb = false;
|
2010-08-10 02:35:19 +08:00
|
|
|
|
2011-04-20 02:11:49 +08:00
|
|
|
/// AvoidCPSRPartialUpdate - If true, codegen would avoid using instructions
|
|
|
|
/// that partially update CPSR and add false dependency on the previous
|
|
|
|
/// CPSR setting instruction.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool AvoidCPSRPartialUpdate = false;
|
2011-04-20 02:11:49 +08:00
|
|
|
|
2017-06-02 16:53:19 +08:00
|
|
|
/// CheapPredicableCPSRDef - If true, disable +1 predication cost
|
|
|
|
/// for instructions updating CPSR. Enabled for Cortex-A57.
|
|
|
|
bool CheapPredicableCPSRDef = false;
|
|
|
|
|
2012-12-21 03:59:30 +08:00
|
|
|
/// AvoidMOVsShifterOperand - If true, codegen should avoid using flag setting
|
|
|
|
/// movs with shifter operand (i.e. asr, lsl, lsr).
|
2016-06-27 21:06:10 +08:00
|
|
|
bool AvoidMOVsShifterOperand = false;
|
2012-12-21 03:59:30 +08:00
|
|
|
|
2016-06-03 22:03:27 +08:00
|
|
|
/// HasRetAddrStack - Some processors perform return stack prediction. CodeGen should
|
2012-02-29 02:51:51 +08:00
|
|
|
/// avoid issue "normal" call instructions to callees which do not return.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool HasRetAddrStack = false;
|
2012-02-29 02:51:51 +08:00
|
|
|
|
2017-06-28 22:11:15 +08:00
|
|
|
/// HasBranchPredictor - True if the subtarget has a branch predictor. Having
|
|
|
|
/// a branch predictor or not changes the expected cost of taking a branch
|
|
|
|
/// which affects the choice of whether to use predicated instructions.
|
|
|
|
bool HasBranchPredictor = true;
|
|
|
|
|
2010-11-03 14:34:55 +08:00
|
|
|
/// HasMPExtension - True if the subtarget supports Multiprocessing
|
|
|
|
/// extension (ARMv7 only).
|
2016-06-27 21:06:10 +08:00
|
|
|
bool HasMPExtension = false;
|
2010-11-03 14:34:55 +08:00
|
|
|
|
2013-11-01 21:27:35 +08:00
|
|
|
/// HasVirtualization - True if the subtarget supports the Virtualization
|
|
|
|
/// extension.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool HasVirtualization = false;
|
2013-11-01 21:27:35 +08:00
|
|
|
|
[ARM] Replace fp-only-sp and d16 with fp64 and d32.
Those two subtarget features were awkward because their semantics are
reversed: each one indicates the _lack_ of support for something in
the architecture, rather than the presence. As a consequence, you
don't get the behavior you want if you combine two sets of feature
bits.
Each SubtargetFeature for an FP architecture version now comes in four
versions, one for each combination of those options. So you can still
say (for example) '+vfp2' in a feature string and it will mean what
it's always meant, but there's a new string '+vfp2d16sp' meaning the
version without those extra options.
A lot of this change is just mechanically replacing positive checks
for the old features with negative checks for the new ones. But one
more interesting change is that I've rearranged getFPUFeatures() so
that the main FPU feature is appended to the output list *before*
rather than after the features derived from the Restriction field, so
that -fp64 and -d32 can override defaults added by the main feature.
Reviewers: dmgreen, samparker, SjoerdMeijer
Subscribers: srhines, javed.absar, eraman, kristof.beyls, hiraditya, zzheng, Petar.Avramovic, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D60691
llvm-svn: 361845
2019-05-29 00:13:20 +08:00
|
|
|
/// HasFP64 - If true, the floating point unit supports double
|
2010-08-11 23:44:15 +08:00
|
|
|
/// precision.
|
[ARM] Replace fp-only-sp and d16 with fp64 and d32.
Those two subtarget features were awkward because their semantics are
reversed: each one indicates the _lack_ of support for something in
the architecture, rather than the presence. As a consequence, you
don't get the behavior you want if you combine two sets of feature
bits.
Each SubtargetFeature for an FP architecture version now comes in four
versions, one for each combination of those options. So you can still
say (for example) '+vfp2' in a feature string and it will mean what
it's always meant, but there's a new string '+vfp2d16sp' meaning the
version without those extra options.
A lot of this change is just mechanically replacing positive checks
for the old features with negative checks for the new ones. But one
more interesting change is that I've rearranged getFPUFeatures() so
that the main FPU feature is appended to the output list *before*
rather than after the features derived from the Restriction field, so
that -fp64 and -d32 can override defaults added by the main feature.
Reviewers: dmgreen, samparker, SjoerdMeijer
Subscribers: srhines, javed.absar, eraman, kristof.beyls, hiraditya, zzheng, Petar.Avramovic, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D60691
llvm-svn: 361845
2019-05-29 00:13:20 +08:00
|
|
|
bool HasFP64 = false;
|
2010-08-11 23:44:15 +08:00
|
|
|
|
2013-05-24 03:11:14 +08:00
|
|
|
/// If true, the processor supports the Performance Monitor Extensions. These
|
|
|
|
/// include a generic cycle-counter as well as more fine-grained (often
|
|
|
|
/// implementation-specific) events.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool HasPerfMon = false;
|
2013-05-24 03:11:14 +08:00
|
|
|
|
2013-04-10 20:08:35 +08:00
|
|
|
/// HasTrustZone - if true, processor supports TrustZone security extensions
|
2016-06-27 21:06:10 +08:00
|
|
|
bool HasTrustZone = false;
|
2013-04-10 20:08:35 +08:00
|
|
|
|
2016-01-25 19:24:47 +08:00
|
|
|
/// Has8MSecExt - if true, processor supports ARMv8-M Security Extensions
|
2016-06-27 21:06:10 +08:00
|
|
|
bool Has8MSecExt = false;
|
2016-01-25 19:24:47 +08:00
|
|
|
|
[ARM][AArch64] Armv8.4-A Enablement
Initial patch adding assembly support for Armv8.4-A.
Besides adding v8.4 as a supported architecture to the usual places, this also
adds target features for the different crypto algorithms. Armv8.4-A introduced
new crypto algorithms, made them optional, and allows different combinations:
- none of the v8.4 crypto functions are supported, which is independent of the
implementation of the Armv8.0 SHA1 and SHA2 instructions.
- the v8.4 SHA512 and SHA3 support is implemented, in this case the Armv8.0
SHA1 and SHA2 instructions must also be implemented.
- the v8.4 SM3 and SM4 support is implemented, which is independent of the
implementation of the Armv8.0 SHA1 and SHA2 instructions.
- all of the v8.4 crypto functions are supported, in this case the Armv8.0 SHA1
and SHA2 instructions must also be implemented.
The v8.4 crypto instructions are added to AArch64 only, and not AArch32,
and are made optional extensions to Armv8.2-A.
The user-facing Clang options will map on these new target features, their
naming will be compatible with GCC and added in follow-up patches.
The Armv8.4-A instruction sets can be downloaded here:
https://developer.arm.com/products/architecture/a-profile/exploration-tools
Differential Revision: https://reviews.llvm.org/D48625
llvm-svn: 335953
2018-06-29 16:43:19 +08:00
|
|
|
/// HasSHA2 - if true, processor supports SHA1 and SHA256
|
|
|
|
bool HasSHA2 = false;
|
|
|
|
|
|
|
|
/// HasAES - if true, processor supports AES
|
|
|
|
bool HasAES = false;
|
|
|
|
|
2013-09-19 19:59:01 +08:00
|
|
|
/// HasCrypto - if true, processor supports Cryptography extensions
|
2016-06-27 21:06:10 +08:00
|
|
|
bool HasCrypto = false;
|
2013-09-19 19:59:01 +08:00
|
|
|
|
2013-10-29 17:47:35 +08:00
|
|
|
/// HasCRC - if true, processor supports CRC instructions
|
2016-06-27 21:06:10 +08:00
|
|
|
bool HasCRC = false;
|
2013-10-29 17:47:35 +08:00
|
|
|
|
2016-06-03 22:03:27 +08:00
|
|
|
/// HasRAS - if true, the processor supports RAS extensions
|
2016-06-27 21:06:10 +08:00
|
|
|
bool HasRAS = false;
|
2016-06-03 22:03:27 +08:00
|
|
|
|
[ARM] Add the non-MVE instructions in Arm v8.1-M.
This adds support for the new family of conditional selection /
increment / negation instructions; the low-overhead branch
instructions (e.g. BF, WLS, DLS); the CLRM instruction to zero a whole
list of registers at once; the new VMRS/VMSR and VLDR/VSTR
instructions to get data in and out of 8.1-M system registers,
particularly including the new VPR register used by MVE vector
predication.
To support this, we also add a register name 'zr' (used by the CSEL
family to force one of the inputs to the constant 0), and operand
types for lists of registers that are also allowed to include APSR or
VPR (used by CLRM). The VLDR/VSTR instructions also need a new
addressing mode.
The low-overhead branch instructions exist in their own separate
architecture extension, which we treat as enabled by default, but you
can say -mattr=-lob or equivalent to turn it off.
Reviewers: dmgreen, samparker, SjoerdMeijer, t.p.northover
Reviewed By: samparker
Subscribers: miyuki, javed.absar, kristof.beyls, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62667
llvm-svn: 363039
2019-06-11 17:29:18 +08:00
|
|
|
/// HasLOB - if true, the processor supports the Low Overhead Branch extension
|
|
|
|
bool HasLOB = false;
|
|
|
|
|
2014-04-01 21:22:02 +08:00
|
|
|
/// If true, the instructions "vmov.i32 d0, #0" and "vmov.i32 q0, #0" are
|
|
|
|
/// particularly effective at zeroing a VFP register.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool HasZeroCycleZeroing = false;
|
2014-04-01 21:22:02 +08:00
|
|
|
|
2016-10-13 22:57:43 +08:00
|
|
|
/// HasFPAO - if true, processor does positive address offset computation faster
|
|
|
|
bool HasFPAO = false;
|
|
|
|
|
2017-06-22 17:39:36 +08:00
|
|
|
/// HasFuseAES - if true, processor executes back to back AES instruction
|
|
|
|
/// pairs faster.
|
|
|
|
bool HasFuseAES = false;
|
|
|
|
|
2018-07-28 02:16:47 +08:00
|
|
|
/// HasFuseLiterals - if true, processor executes back to back
|
|
|
|
/// bottom and top halves of literal generation faster.
|
|
|
|
bool HasFuseLiterals = false;
|
|
|
|
|
2016-06-23 15:47:35 +08:00
|
|
|
/// If true, if conversion may decide to leave some instructions unpredicated.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool IsProfitableToUnpredicate = false;
|
2016-06-23 15:47:35 +08:00
|
|
|
|
|
|
|
/// If true, VMOV will be favored over VGETLNi32.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool HasSlowVGETLNi32 = false;
|
2016-06-23 15:47:35 +08:00
|
|
|
|
|
|
|
/// If true, VMOV will be favored over VDUP.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool HasSlowVDUP32 = false;
|
2016-06-23 15:47:35 +08:00
|
|
|
|
|
|
|
/// If true, VMOVSR will be favored over VMOVDRR.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool PreferVMOVSR = false;
|
2016-06-23 15:47:35 +08:00
|
|
|
|
|
|
|
/// If true, ISHST barriers will be used for Release semantics.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool PreferISHST = false;
|
2016-06-23 15:47:35 +08:00
|
|
|
|
2016-07-06 17:22:23 +08:00
|
|
|
/// If true, a VLDM/VSTM starting with an odd register number is considered to
|
|
|
|
/// take more microops than single VLDRS/VSTRS.
|
|
|
|
bool SlowOddRegister = false;
|
|
|
|
|
|
|
|
/// If true, loading into a D subregister will be penalized.
|
|
|
|
bool SlowLoadDSubregister = false;
|
|
|
|
|
2018-08-10 00:13:24 +08:00
|
|
|
/// If true, use a wider stride when allocating VFP registers.
|
|
|
|
bool UseWideStrideVFP = false;
|
|
|
|
|
2016-07-06 17:22:23 +08:00
|
|
|
/// If true, the AGU and NEON/FPU units are multiplexed.
|
|
|
|
bool HasMuxedUnits = false;
|
|
|
|
|
2018-07-21 00:49:28 +08:00
|
|
|
/// If true, VMOVS will never be widened to VMOVD.
|
2016-07-06 19:22:11 +08:00
|
|
|
bool DontWidenVMOVS = false;
|
|
|
|
|
2018-07-21 00:49:28 +08:00
|
|
|
/// If true, splat a register between VFP and NEON instructions.
|
|
|
|
bool SplatVFPToNeon = false;
|
|
|
|
|
2016-07-07 17:11:39 +08:00
|
|
|
/// If true, run the MLx expansion pass.
|
|
|
|
bool ExpandMLx = false;
|
|
|
|
|
|
|
|
/// If true, VFP/NEON VMLA/VMLS have special RAW hazards.
|
|
|
|
bool HasVMLxHazards = false;
|
|
|
|
|
2017-07-28 20:54:57 +08:00
|
|
|
// If true, read thread pointer from coprocessor register.
|
|
|
|
bool ReadTPHard = false;
|
|
|
|
|
2016-06-23 15:47:35 +08:00
|
|
|
/// If true, VMOVRS, VMOVSR and VMOVS will be converted from VFP to NEON.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool UseNEONForFPMovs = false;
|
2016-06-23 15:47:35 +08:00
|
|
|
|
2016-06-27 17:08:23 +08:00
|
|
|
/// If true, VLDn instructions take an extra cycle for unaligned accesses.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool CheckVLDnAlign = false;
|
2016-06-27 17:08:23 +08:00
|
|
|
|
|
|
|
/// If true, VFP instructions are not pipelined.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool NonpipelinedVFP = false;
|
2016-06-27 17:08:23 +08:00
|
|
|
|
2015-07-29 06:44:28 +08:00
|
|
|
/// StrictAlign - If true, the subtarget disallows unaligned memory
|
2010-09-28 12:09:35 +08:00
|
|
|
/// accesses for some types. For details, see
|
2014-07-28 01:46:40 +08:00
|
|
|
/// ARMTargetLowering::allowsMisalignedMemoryAccesses().
|
2016-06-27 21:06:10 +08:00
|
|
|
bool StrictAlign = false;
|
2010-09-28 12:09:35 +08:00
|
|
|
|
2013-11-14 02:29:49 +08:00
|
|
|
/// RestrictIT - If true, the subtarget disallows generation of deprecated IT
|
|
|
|
/// blocks to conform to ARMv8 rule.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool RestrictIT = false;
|
2013-11-14 02:29:49 +08:00
|
|
|
|
2015-09-25 01:31:16 +08:00
|
|
|
/// HasDSP - If true, the subtarget supports the DSP (saturating arith
|
|
|
|
/// and such) instructions.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool HasDSP = false;
|
2011-07-02 05:12:19 +08:00
|
|
|
|
2013-01-31 00:30:19 +08:00
|
|
|
/// NaCl TRAP instruction is generated instead of the regular TRAP.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool UseNaClTrap = false;
|
2013-01-31 00:30:19 +08:00
|
|
|
|
2015-07-07 14:54:42 +08:00
|
|
|
/// Generate calls via indirect call instructions.
|
2016-06-27 21:06:10 +08:00
|
|
|
bool GenLongCalls = false;
|
2015-07-07 14:54:42 +08:00
|
|
|
|
2016-12-15 15:59:08 +08:00
|
|
|
/// Generate code that does not contain data access to code sections.
|
|
|
|
bool GenExecuteOnly = false;
|
|
|
|
|
2013-03-22 02:47:47 +08:00
|
|
|
/// Target machine allowed unsafe FP math (such as use of NEON fp)
|
2016-06-27 21:06:10 +08:00
|
|
|
bool UnsafeFPMath = false;
|
2013-03-22 02:47:47 +08:00
|
|
|
|
2015-10-29 06:56:36 +08:00
|
|
|
/// UseSjLjEH - If true, the target uses SjLj exception handling (e.g. iOS).
|
2016-06-27 21:06:10 +08:00
|
|
|
bool UseSjLjEH = false;
|
2015-10-29 06:56:36 +08:00
|
|
|
|
2018-09-27 21:41:14 +08:00
|
|
|
/// Has speculation barrier
|
2019-01-03 20:09:12 +08:00
|
|
|
bool HasSB = false;
|
2018-09-27 21:41:14 +08:00
|
|
|
|
[ARM] [Assembler] Support negative immediates for A32, T32 and T16
Summary:
To support negative immediates for certain arithmetic instructions, the
instruction is converted to the inverse instruction with a negated (or inverted)
immediate. For example, "ADD r0, r1, #FFFFFFFF" cannot be encoded as an ADD
instruction. However, "SUB r0, r1, #1" is equivalent.
These conversions are different from instruction aliases. An alias maps
several assembler instructions onto one encoding. A conversion, however, maps
an *invalid* instruction--e.g. with an immediate that cannot be represented in
the encoding--to a different (but equivalent) instruction.
Several instructions with negative immediates were being converted already, but
this was not systematically tested, nor did it cover all instructions.
This patch implements all possible substitutions for ARM, Thumb1 and
Thumb2 assembler and adds tests. It also adds a feature flag
(-mattr=+no-neg-immediates) to turn these substitutions off. This is
helpful for users who want their code to assemble to exactly what they
wrote.
Reviewers: t.p.northover, rovka, samparker, javed.absar, peter.smith, rengolin
Reviewed By: javed.absar
Subscribers: aadg, aemerson, llvm-commits
Differential Revision: https://reviews.llvm.org/D30571
llvm-svn: 298380
2017-03-21 22:59:17 +08:00
|
|
|
/// Implicitly convert an instruction to a different one if its immediates
|
|
|
|
/// cannot be encoded. For example, ADD r0, r1, #FFFFFFFF -> SUB r0, r1, #1.
|
|
|
|
bool NegativeImmediates = true;
|
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
/// stackAlignment - The minimum alignment known to hold of the stack frame on
|
|
|
|
/// entry to the function and which must be maintained by every function.
|
[Alignment][NFC] Use Align for TargetFrameLowering/Subtarget
Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: jholewinski, arsenm, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, hiraditya, aheejin, kbarton, fedor.sergeev, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Jim, lenary, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68993
llvm-svn: 375084
2019-10-17 15:49:39 +08:00
|
|
|
Align stackAlignment = Align(4);
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2009-05-24 03:50:50 +08:00
|
|
|
/// CPUString - String name of used CPU.
|
|
|
|
std::string CPUString;
|
|
|
|
|
2016-06-27 21:06:10 +08:00
|
|
|
unsigned MaxInterleaveFactor = 1;
|
2016-06-27 17:08:23 +08:00
|
|
|
|
2016-07-06 19:22:11 +08:00
|
|
|
/// Clearance before partial register updates (in number of instructions)
|
|
|
|
unsigned PartialUpdateClearance = 0;
|
|
|
|
|
2016-06-27 17:08:23 +08:00
|
|
|
/// What kind of timing do load multiple/store multiple have (double issue,
|
|
|
|
/// single issue etc).
|
2016-06-27 21:06:10 +08:00
|
|
|
ARMLdStMultipleTiming LdStMultipleTiming = SingleIssue;
|
2016-06-27 17:08:23 +08:00
|
|
|
|
|
|
|
/// The adjustment that we need to apply to get the operand latency from the
|
|
|
|
/// operand cycle returned by the itinerary data for pre-ISel operands.
|
2016-06-27 21:06:10 +08:00
|
|
|
int PreISelOperandLatencyAdjustment = 2;
|
2016-06-27 17:08:23 +08:00
|
|
|
|
2018-09-13 18:28:05 +08:00
|
|
|
/// What alignment is preferred for loop bodies, in log2(bytes).
|
[LLVM][Alignment] Make functions using log of alignment explicit
Summary:
This patch renames functions that takes or returns alignment as log2, this patch will help with the transition to llvm::Align.
The renaming makes it explicit that we deal with log(alignment) instead of a power of two alignment.
A few renames uncovered dubious assignments:
- `MirParser`/`MirPrinter` was expecting powers of two but `MachineFunction` and `MachineBasicBlock` were using deal with log2(align). This patch fixes it and updates the documentation.
- `MachineBlockPlacement` exposes two flags (`align-all-blocks` and `align-all-nofallthru-blocks`) supposedly interpreted as power of two alignments, internally these values are interpreted as log2(align). This patch updates the documentation,
- `MachineFunctionexposes` exposes `align-all-functions` also interpreted as power of two alignment, internally this value is interpreted as log2(align). This patch updates the documentation,
Reviewers: lattner, thegameg, courbet
Subscribers: dschuff, arsenm, jyknight, dylanmckay, sdardis, nemanjai, jvesely, nhaehnle, javed.absar, hiraditya, kbarton, fedor.sergeev, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, dexonsmith, PkmX, jocewei, jsji, Jim, s.egerton, llvm-commits, courbet
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65945
llvm-svn: 371045
2019-09-05 18:00:22 +08:00
|
|
|
unsigned PrefLoopLogAlignment = 0;
|
2018-09-13 18:28:05 +08:00
|
|
|
|
2019-08-14 02:12:08 +08:00
|
|
|
/// The cost factor for MVE instructions, representing the multiple beats an
|
|
|
|
// instruction can take. The default is 2, (set in initSubtargetFeatures so
|
|
|
|
// that we can use subtarget features less than 2).
|
|
|
|
unsigned MVEVectorCostFactor = 0;
|
|
|
|
|
2019-02-08 15:57:42 +08:00
|
|
|
/// OptMinSize - True if we're optimising for minimum code size, equal to
|
|
|
|
/// the function attribute.
|
|
|
|
bool OptMinSize = false;
|
|
|
|
|
2014-03-28 22:35:30 +08:00
|
|
|
/// IsLittle - The target is Little Endian
|
|
|
|
bool IsLittle;
|
|
|
|
|
2011-01-12 05:46:47 +08:00
|
|
|
/// TargetTriple - What processor and OS we're targeting.
|
|
|
|
Triple TargetTriple;
|
|
|
|
|
2012-08-08 10:44:16 +08:00
|
|
|
/// SchedModel - Processor specific instruction costs.
|
2014-09-03 01:43:54 +08:00
|
|
|
MCSchedModel SchedModel;
|
2012-08-08 10:44:16 +08:00
|
|
|
|
2009-06-19 09:51:50 +08:00
|
|
|
/// Selected instruction itineraries (one entry per itinerary class.)
|
|
|
|
InstrItineraryData InstrItins;
|
2009-08-11 23:33:49 +08:00
|
|
|
|
2013-03-22 02:47:47 +08:00
|
|
|
/// Options passed via command line that could influence the target
|
|
|
|
const TargetOptions &Options;
|
|
|
|
|
2014-12-18 10:20:58 +08:00
|
|
|
const ARMBaseTargetMachine &TM;
|
2007-02-14 03:52:28 +08:00
|
|
|
|
2014-12-18 10:20:58 +08:00
|
|
|
public:
|
2007-01-19 15:51:42 +08:00
|
|
|
/// This constructor initializes the data members to match that
|
2009-08-03 06:11:08 +08:00
|
|
|
/// of the specified triple.
|
2007-01-19 15:51:42 +08:00
|
|
|
///
|
2015-06-10 20:11:26 +08:00
|
|
|
ARMSubtarget(const Triple &TT, const std::string &CPU, const std::string &FS,
|
2019-02-08 15:57:42 +08:00
|
|
|
const ARMBaseTargetMachine &TM, bool IsLittle,
|
|
|
|
bool MinSize = false);
|
2007-01-19 15:51:42 +08:00
|
|
|
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
llvm-svn: 49572
2008-04-12 12:36:06 +08:00
|
|
|
/// getMaxInlineSizeThreshold - Returns the maximum memset / memcpy size
|
|
|
|
/// that still makes it profitable to inline the call.
|
2007-10-31 22:39:58 +08:00
|
|
|
unsigned getMaxInlineSizeThreshold() const {
|
2014-05-16 22:24:22 +08:00
|
|
|
return 64;
|
2007-10-31 22:39:58 +08:00
|
|
|
}
|
2017-01-28 07:58:02 +08:00
|
|
|
|
2009-05-24 03:51:43 +08:00
|
|
|
/// ParseSubtargetFeatures - Parses features string setting specified
|
2007-01-19 15:51:42 +08:00
|
|
|
/// subtarget options. Definition of function is auto generated by tblgen.
|
2011-07-07 15:07:08 +08:00
|
|
|
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2014-06-13 08:20:35 +08:00
|
|
|
/// initializeSubtargetDependencies - Initializes using a CPU and feature string
|
|
|
|
/// so that we can use initializer lists for subtarget initialization.
|
|
|
|
ARMSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS);
|
|
|
|
|
2014-08-05 05:25:23 +08:00
|
|
|
const ARMSelectionDAGInfo *getSelectionDAGInfo() const override {
|
|
|
|
return &TSInfo;
|
|
|
|
}
|
2017-01-28 07:58:02 +08:00
|
|
|
|
2014-08-05 05:25:23 +08:00
|
|
|
const ARMBaseInstrInfo *getInstrInfo() const override {
|
|
|
|
return InstrInfo.get();
|
|
|
|
}
|
2017-01-28 07:58:02 +08:00
|
|
|
|
2014-08-05 05:25:23 +08:00
|
|
|
const ARMTargetLowering *getTargetLowering() const override {
|
|
|
|
return &TLInfo;
|
|
|
|
}
|
2017-01-28 07:58:02 +08:00
|
|
|
|
2014-08-05 05:25:23 +08:00
|
|
|
const ARMFrameLowering *getFrameLowering() const override {
|
|
|
|
return FrameLowering.get();
|
|
|
|
}
|
2017-01-28 07:58:02 +08:00
|
|
|
|
2014-08-05 05:25:23 +08:00
|
|
|
const ARMBaseRegisterInfo *getRegisterInfo() const override {
|
2014-06-27 03:30:02 +08:00
|
|
|
return &InstrInfo->getRegisterInfo();
|
|
|
|
}
|
2014-06-13 08:20:35 +08:00
|
|
|
|
2016-11-11 16:27:37 +08:00
|
|
|
const CallLowering *getCallLowering() const override;
|
2019-08-13 14:26:59 +08:00
|
|
|
InstructionSelector *getInstructionSelector() const override;
|
2016-11-11 16:27:37 +08:00
|
|
|
const LegalizerInfo *getLegalizerInfo() const override;
|
|
|
|
const RegisterBankInfo *getRegBankInfo() const override;
|
|
|
|
|
2013-02-16 09:36:26 +08:00
|
|
|
private:
|
2014-06-13 08:20:39 +08:00
|
|
|
ARMSelectionDAGInfo TSInfo;
|
2015-01-27 03:03:15 +08:00
|
|
|
// Either Thumb1FrameLowering or ARMFrameLowering.
|
|
|
|
std::unique_ptr<ARMFrameLowering> FrameLowering;
|
2014-06-27 03:30:02 +08:00
|
|
|
// Either Thumb1InstrInfo or Thumb2InstrInfo.
|
|
|
|
std::unique_ptr<ARMBaseInstrInfo> InstrInfo;
|
|
|
|
ARMTargetLowering TLInfo;
|
2014-06-13 08:20:35 +08:00
|
|
|
|
2017-08-16 06:31:51 +08:00
|
|
|
/// GlobalISel related APIs.
|
|
|
|
std::unique_ptr<CallLowering> CallLoweringInfo;
|
|
|
|
std::unique_ptr<InstructionSelector> InstSelector;
|
|
|
|
std::unique_ptr<LegalizerInfo> Legalizer;
|
|
|
|
std::unique_ptr<RegisterBankInfo> RegBankInfo;
|
2016-11-11 16:27:37 +08:00
|
|
|
|
2013-02-16 09:36:26 +08:00
|
|
|
void initializeEnvironment();
|
2014-09-04 04:36:31 +08:00
|
|
|
void initSubtargetFeatures(StringRef CPU, StringRef FS);
|
2015-01-27 03:03:15 +08:00
|
|
|
ARMFrameLowering *initializeFrameLowering(StringRef CPU, StringRef FS);
|
|
|
|
|
[ARM] Add initial support for Custom Datapath Extension (CDE)
Summary:
This patch adds assembly-level support for a new Arm M-profile
architecture extension, Custom Datapath Extension (CDE).
A brief description of the extension is available at
https://developer.arm.com/architectures/instruction-sets/custom-instructions
The latest specification for CDE is currently a beta release and is
available at
https://static.docs.arm.com/ddi0607/aa/DDI0607A_a_armv8m_arm_supplement_cde.pdf
CDE allows chip vendors to add custom CPU instructions. The CDE
instructions re-use the same encoding space as existing coprocessor
instructions (such as MRC, MCR, CDP etc.). Each coprocessor in range
cp0-cp7 can be configured as either general purpose (GCP) or custom
datapath (CDEv1). This configuration is defined by the CPU vendor and
is provided to LLVM using 8 subtarget features: cdecp0 ... cdecp7.
The semantics of CDE instructions are implementation-defined, but the
instructions are guaranteed to be pure (that is, they are stateless,
they do not access memory or any registers except their explicit
inputs/outputs).
CDE requires the CPU to support at least Armv8.0-M mainline
architecture. CDE includes 3 sets of instructions:
* Instructions that operate on general purpose registers and NZCV
flags
* Instructions that operate on the S or D register file (require
either FP or MVE extension)
* Instructions that operate on the Q register file, require MVE
The user-facing names that can be specified on the command line are
the same as the 8 subtarget feature names. For example:
$ clang -target arm-none-none-eabi -march=armv8m.main+cdecp0+cdecp3
tells the compiler that the coprocessors 0 and 3 are configured as
CDEv1 and the remaining coprocessors are configured as GCP (which is
the default).
Reviewers: simon_tatham, ostannard, dmgreen, eli.friedman
Reviewed By: simon_tatham
Subscribers: kristof.beyls, hiraditya, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D74044
2020-02-17 23:37:49 +08:00
|
|
|
std::bitset<8> CoprocCDE = {};
|
2013-02-16 09:36:26 +08:00
|
|
|
public:
|
Various bits of framework needed for precise machine-level selection
DAG scheduling during isel. Most new functionality is currently
guarded by -enable-sched-cycles and -enable-sched-hazard.
Added InstrItineraryData::IssueWidth field, currently derived from
ARM itineraries, but could be initialized differently on other targets.
Added ScheduleHazardRecognizer::MaxLookAhead to indicate whether it is
active, and if so how many cycles of state it holds.
Added SchedulingPriorityQueue::HasReadyFilter to allowing gating entry
into the scheduler's available queue.
ScoreboardHazardRecognizer now accesses the ScheduleDAG in order to
get information about it's SUnits, provides RecedeCycle for bottom-up
scheduling, correctly computes scoreboard depth, tracks IssueCount, and
considers potential stall cycles when checking for hazards.
ScheduleDAGRRList now models machine cycles and hazards (under
flags). It tracks MinAvailableCycle, drives the hazard recognizer and
priority queue's ready filter, manages a new PendingQueue, properly
accounts for stall cycles, etc.
llvm-svn: 122541
2010-12-24 13:03:26 +08:00
|
|
|
void computeIssueWidth();
|
|
|
|
|
2011-07-07 11:55:05 +08:00
|
|
|
bool hasV4TOps() const { return HasV4TOps; }
|
|
|
|
bool hasV5TOps() const { return HasV5TOps; }
|
|
|
|
bool hasV5TEOps() const { return HasV5TEOps; }
|
|
|
|
bool hasV6Ops() const { return HasV6Ops; }
|
2013-10-08 00:55:23 +08:00
|
|
|
bool hasV6MOps() const { return HasV6MOps; }
|
2015-03-17 19:55:28 +08:00
|
|
|
bool hasV6KOps() const { return HasV6KOps; }
|
2011-07-07 11:55:05 +08:00
|
|
|
bool hasV6T2Ops() const { return HasV6T2Ops; }
|
|
|
|
bool hasV7Ops() const { return HasV7Ops; }
|
2013-06-27 00:58:26 +08:00
|
|
|
bool hasV8Ops() const { return HasV8Ops; }
|
2015-04-01 22:54:56 +08:00
|
|
|
bool hasV8_1aOps() const { return HasV8_1aOps; }
|
2015-12-01 18:23:06 +08:00
|
|
|
bool hasV8_2aOps() const { return HasV8_2aOps; }
|
2017-08-10 17:41:00 +08:00
|
|
|
bool hasV8_3aOps() const { return HasV8_3aOps; }
|
[ARM][AArch64] Armv8.4-A Enablement
Initial patch adding assembly support for Armv8.4-A.
Besides adding v8.4 as a supported architecture to the usual places, this also
adds target features for the different crypto algorithms. Armv8.4-A introduced
new crypto algorithms, made them optional, and allows different combinations:
- none of the v8.4 crypto functions are supported, which is independent of the
implementation of the Armv8.0 SHA1 and SHA2 instructions.
- the v8.4 SHA512 and SHA3 support is implemented, in this case the Armv8.0
SHA1 and SHA2 instructions must also be implemented.
- the v8.4 SM3 and SM4 support is implemented, which is independent of the
implementation of the Armv8.0 SHA1 and SHA2 instructions.
- all of the v8.4 crypto functions are supported, in this case the Armv8.0 SHA1
and SHA2 instructions must also be implemented.
The v8.4 crypto instructions are added to AArch64 only, and not AArch32,
and are made optional extensions to Armv8.2-A.
The user-facing Clang options will map on these new target features, their
naming will be compatible with GCC and added in follow-up patches.
The Armv8.4-A instruction sets can be downloaded here:
https://developer.arm.com/products/architecture/a-profile/exploration-tools
Differential Revision: https://reviews.llvm.org/D48625
llvm-svn: 335953
2018-06-29 16:43:19 +08:00
|
|
|
bool hasV8_4aOps() const { return HasV8_4aOps; }
|
2018-09-26 20:48:21 +08:00
|
|
|
bool hasV8_5aOps() const { return HasV8_5aOps; }
|
2016-01-15 18:24:39 +08:00
|
|
|
bool hasV8MBaselineOps() const { return HasV8MBaselineOps; }
|
|
|
|
bool hasV8MMainlineOps() const { return HasV8MMainlineOps; }
|
2019-05-30 20:57:04 +08:00
|
|
|
bool hasV8_1MMainlineOps() const { return HasV8_1MMainlineOps; }
|
|
|
|
bool hasMVEIntegerOps() const { return HasMVEIntegerOps; }
|
|
|
|
bool hasMVEFloatOps() const { return HasMVEFloatOps; }
|
[ARM] Add initial support for Custom Datapath Extension (CDE)
Summary:
This patch adds assembly-level support for a new Arm M-profile
architecture extension, Custom Datapath Extension (CDE).
A brief description of the extension is available at
https://developer.arm.com/architectures/instruction-sets/custom-instructions
The latest specification for CDE is currently a beta release and is
available at
https://static.docs.arm.com/ddi0607/aa/DDI0607A_a_armv8m_arm_supplement_cde.pdf
CDE allows chip vendors to add custom CPU instructions. The CDE
instructions re-use the same encoding space as existing coprocessor
instructions (such as MRC, MCR, CDP etc.). Each coprocessor in range
cp0-cp7 can be configured as either general purpose (GCP) or custom
datapath (CDEv1). This configuration is defined by the CPU vendor and
is provided to LLVM using 8 subtarget features: cdecp0 ... cdecp7.
The semantics of CDE instructions are implementation-defined, but the
instructions are guaranteed to be pure (that is, they are stateless,
they do not access memory or any registers except their explicit
inputs/outputs).
CDE requires the CPU to support at least Armv8.0-M mainline
architecture. CDE includes 3 sets of instructions:
* Instructions that operate on general purpose registers and NZCV
flags
* Instructions that operate on the S or D register file (require
either FP or MVE extension)
* Instructions that operate on the Q register file, require MVE
The user-facing names that can be specified on the command line are
the same as the 8 subtarget feature names. For example:
$ clang -target arm-none-none-eabi -march=armv8m.main+cdecp0+cdecp3
tells the compiler that the coprocessors 0 and 3 are configured as
CDEv1 and the remaining coprocessors are configured as GCP (which is
the default).
Reviewers: simon_tatham, ostannard, dmgreen, eli.friedman
Reviewed By: simon_tatham
Subscribers: kristof.beyls, hiraditya, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D74044
2020-02-17 23:37:49 +08:00
|
|
|
bool hasCDEOps() const { return HasCDEOps; }
|
2019-05-30 20:37:05 +08:00
|
|
|
bool hasFPRegs() const { return HasFPRegs; }
|
|
|
|
bool hasFPRegs16() const { return HasFPRegs16; }
|
|
|
|
bool hasFPRegs64() const { return HasFPRegs64; }
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2016-07-06 17:22:23 +08:00
|
|
|
/// @{
|
|
|
|
/// These functions are obsolete, please consider adding subtarget features
|
|
|
|
/// or properties instead of calling them.
|
2012-11-30 03:48:01 +08:00
|
|
|
bool isCortexA5() const { return ARMProcFamily == CortexA5; }
|
2014-04-01 22:10:07 +08:00
|
|
|
bool isCortexA7() const { return ARMProcFamily == CortexA7; }
|
2010-09-10 09:29:16 +08:00
|
|
|
bool isCortexA8() const { return ARMProcFamily == CortexA8; }
|
|
|
|
bool isCortexA9() const { return ARMProcFamily == CortexA9; }
|
2012-09-13 23:05:10 +08:00
|
|
|
bool isCortexA15() const { return ARMProcFamily == CortexA15; }
|
2012-09-30 05:43:49 +08:00
|
|
|
bool isSwift() const { return ARMProcFamily == Swift; }
|
2016-03-24 00:18:13 +08:00
|
|
|
bool isCortexM3() const { return ARMProcFamily == CortexM3; }
|
2013-12-07 06:48:17 +08:00
|
|
|
bool isLikeA9() const { return isCortexA9() || isCortexA15() || isKrait(); }
|
2012-12-21 12:35:05 +08:00
|
|
|
bool isCortexR5() const { return ARMProcFamily == CortexR5; }
|
2013-12-07 06:48:17 +08:00
|
|
|
bool isKrait() const { return ARMProcFamily == Krait; }
|
2016-07-06 17:22:23 +08:00
|
|
|
/// @}
|
2010-09-10 09:29:16 +08:00
|
|
|
|
2010-08-11 15:17:46 +08:00
|
|
|
bool hasARMOps() const { return !NoARM; }
|
|
|
|
|
2019-09-18 05:42:38 +08:00
|
|
|
bool hasVFP2Base() const { return HasVFPv2SP; }
|
[ARM] Replace fp-only-sp and d16 with fp64 and d32.
Those two subtarget features were awkward because their semantics are
reversed: each one indicates the _lack_ of support for something in
the architecture, rather than the presence. As a consequence, you
don't get the behavior you want if you combine two sets of feature
bits.
Each SubtargetFeature for an FP architecture version now comes in four
versions, one for each combination of those options. So you can still
say (for example) '+vfp2' in a feature string and it will mean what
it's always meant, but there's a new string '+vfp2d16sp' meaning the
version without those extra options.
A lot of this change is just mechanically replacing positive checks
for the old features with negative checks for the new ones. But one
more interesting change is that I've rearranged getFPUFeatures() so
that the main FPU feature is appended to the output list *before*
rather than after the features derived from the Restriction field, so
that -fp64 and -d32 can override defaults added by the main feature.
Reviewers: dmgreen, samparker, SjoerdMeijer
Subscribers: srhines, javed.absar, eraman, kristof.beyls, hiraditya, zzheng, Petar.Avramovic, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D60691
llvm-svn: 361845
2019-05-29 00:13:20 +08:00
|
|
|
bool hasVFP3Base() const { return HasVFPv3D16SP; }
|
|
|
|
bool hasVFP4Base() const { return HasVFPv4D16SP; }
|
|
|
|
bool hasFPARMv8Base() const { return HasFPARMv8D16SP; }
|
2011-07-07 11:55:05 +08:00
|
|
|
bool hasNEON() const { return HasNEON; }
|
[ARM][AArch64] Armv8.4-A Enablement
Initial patch adding assembly support for Armv8.4-A.
Besides adding v8.4 as a supported architecture to the usual places, this also
adds target features for the different crypto algorithms. Armv8.4-A introduced
new crypto algorithms, made them optional, and allows different combinations:
- none of the v8.4 crypto functions are supported, which is independent of the
implementation of the Armv8.0 SHA1 and SHA2 instructions.
- the v8.4 SHA512 and SHA3 support is implemented, in this case the Armv8.0
SHA1 and SHA2 instructions must also be implemented.
- the v8.4 SM3 and SM4 support is implemented, which is independent of the
implementation of the Armv8.0 SHA1 and SHA2 instructions.
- all of the v8.4 crypto functions are supported, in this case the Armv8.0 SHA1
and SHA2 instructions must also be implemented.
The v8.4 crypto instructions are added to AArch64 only, and not AArch32,
and are made optional extensions to Armv8.2-A.
The user-facing Clang options will map on these new target features, their
naming will be compatible with GCC and added in follow-up patches.
The Armv8.4-A instruction sets can be downloaded here:
https://developer.arm.com/products/architecture/a-profile/exploration-tools
Differential Revision: https://reviews.llvm.org/D48625
llvm-svn: 335953
2018-06-29 16:43:19 +08:00
|
|
|
bool hasSHA2() const { return HasSHA2; }
|
|
|
|
bool hasAES() const { return HasAES; }
|
2013-09-19 19:59:01 +08:00
|
|
|
bool hasCrypto() const { return HasCrypto; }
|
2017-08-11 17:52:30 +08:00
|
|
|
bool hasDotProd() const { return HasDotProd; }
|
2013-10-29 17:47:35 +08:00
|
|
|
bool hasCRC() const { return HasCRC; }
|
2016-06-03 22:03:27 +08:00
|
|
|
bool hasRAS() const { return HasRAS; }
|
[ARM] Add the non-MVE instructions in Arm v8.1-M.
This adds support for the new family of conditional selection /
increment / negation instructions; the low-overhead branch
instructions (e.g. BF, WLS, DLS); the CLRM instruction to zero a whole
list of registers at once; the new VMRS/VMSR and VLDR/VSTR
instructions to get data in and out of 8.1-M system registers,
particularly including the new VPR register used by MVE vector
predication.
To support this, we also add a register name 'zr' (used by the CSEL
family to force one of the inputs to the constant 0), and operand
types for lists of registers that are also allowed to include APSR or
VPR (used by CLRM). The VLDR/VSTR instructions also need a new
addressing mode.
The low-overhead branch instructions exist in their own separate
architecture extension, which we treat as enabled by default, but you
can say -mattr=-lob or equivalent to turn it off.
Reviewers: dmgreen, samparker, SjoerdMeijer, t.p.northover
Reviewed By: samparker
Subscribers: miyuki, javed.absar, kristof.beyls, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62667
llvm-svn: 363039
2019-06-11 17:29:18 +08:00
|
|
|
bool hasLOB() const { return HasLOB; }
|
2013-11-01 21:27:35 +08:00
|
|
|
bool hasVirtualization() const { return HasVirtualization; }
|
2017-01-28 07:58:02 +08:00
|
|
|
|
2009-08-11 23:33:49 +08:00
|
|
|
bool useNEONForSinglePrecisionFP() const {
|
2015-02-05 10:09:33 +08:00
|
|
|
return hasNEON() && UseNEONForSinglePrecisionFP;
|
|
|
|
}
|
2011-07-07 11:55:05 +08:00
|
|
|
|
2017-04-20 17:38:25 +08:00
|
|
|
bool hasDivideInThumbMode() const { return HasHardwareDivideInThumb; }
|
2012-09-30 05:43:49 +08:00
|
|
|
bool hasDivideInARMMode() const { return HasHardwareDivideInARM; }
|
2010-08-11 14:22:01 +08:00
|
|
|
bool hasDataBarrier() const { return HasDataBarrier; }
|
2017-12-21 19:17:49 +08:00
|
|
|
bool hasFullDataBarrier() const { return HasFullDataBarrier; }
|
2016-01-15 18:23:46 +08:00
|
|
|
bool hasV7Clrex() const { return HasV7Clrex; }
|
|
|
|
bool hasAcquireRelease() const { return HasAcquireRelease; }
|
2017-01-28 07:58:02 +08:00
|
|
|
|
2013-10-25 17:30:24 +08:00
|
|
|
bool hasAnyDataBarrier() const {
|
|
|
|
return HasDataBarrier || (hasV6Ops() && !isThumb());
|
|
|
|
}
|
2017-01-28 07:58:02 +08:00
|
|
|
|
2012-09-30 05:43:49 +08:00
|
|
|
bool useMulOps() const { return UseMulOps; }
|
2010-12-06 06:04:16 +08:00
|
|
|
bool useFPVMLx() const { return !SlowFPVMLx; }
|
2020-01-05 18:59:21 +08:00
|
|
|
bool useFPVFMx() const {
|
|
|
|
return !isTargetDarwin() && hasVFP4Base() && !SlowFPVFMx;
|
|
|
|
}
|
|
|
|
bool useFPVFMx16() const { return useFPVFMx() && hasFullFP16(); }
|
|
|
|
bool useFPVFMx64() const { return useFPVFMx() && hasFP64(); }
|
2011-04-01 03:38:48 +08:00
|
|
|
bool hasVMLxForwarding() const { return HasVMLxForwarding; }
|
2010-07-14 03:21:50 +08:00
|
|
|
bool isFPBrccSlow() const { return SlowFPBrcc; }
|
[ARM] Replace fp-only-sp and d16 with fp64 and d32.
Those two subtarget features were awkward because their semantics are
reversed: each one indicates the _lack_ of support for something in
the architecture, rather than the presence. As a consequence, you
don't get the behavior you want if you combine two sets of feature
bits.
Each SubtargetFeature for an FP architecture version now comes in four
versions, one for each combination of those options. So you can still
say (for example) '+vfp2' in a feature string and it will mean what
it's always meant, but there's a new string '+vfp2d16sp' meaning the
version without those extra options.
A lot of this change is just mechanically replacing positive checks
for the old features with negative checks for the new ones. But one
more interesting change is that I've rearranged getFPUFeatures() so
that the main FPU feature is appended to the output list *before*
rather than after the features derived from the Restriction field, so
that -fp64 and -d32 can override defaults added by the main feature.
Reviewers: dmgreen, samparker, SjoerdMeijer
Subscribers: srhines, javed.absar, eraman, kristof.beyls, hiraditya, zzheng, Petar.Avramovic, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D60691
llvm-svn: 361845
2019-05-29 00:13:20 +08:00
|
|
|
bool hasFP64() const { return HasFP64; }
|
2013-05-24 03:11:14 +08:00
|
|
|
bool hasPerfMon() const { return HasPerfMon; }
|
2013-04-10 20:08:35 +08:00
|
|
|
bool hasTrustZone() const { return HasTrustZone; }
|
2016-01-25 19:24:47 +08:00
|
|
|
bool has8MSecExt() const { return Has8MSecExt; }
|
2014-04-01 21:22:02 +08:00
|
|
|
bool hasZeroCycleZeroing() const { return HasZeroCycleZeroing; }
|
2016-10-13 22:57:43 +08:00
|
|
|
bool hasFPAO() const { return HasFPAO; }
|
2016-06-23 15:47:35 +08:00
|
|
|
bool isProfitableToUnpredicate() const { return IsProfitableToUnpredicate; }
|
|
|
|
bool hasSlowVGETLNi32() const { return HasSlowVGETLNi32; }
|
|
|
|
bool hasSlowVDUP32() const { return HasSlowVDUP32; }
|
|
|
|
bool preferVMOVSR() const { return PreferVMOVSR; }
|
|
|
|
bool preferISHSTBarriers() const { return PreferISHST; }
|
2016-07-07 17:11:39 +08:00
|
|
|
bool expandMLx() const { return ExpandMLx; }
|
|
|
|
bool hasVMLxHazards() const { return HasVMLxHazards; }
|
2016-07-06 17:22:23 +08:00
|
|
|
bool hasSlowOddRegister() const { return SlowOddRegister; }
|
|
|
|
bool hasSlowLoadDSubregister() const { return SlowLoadDSubregister; }
|
2018-08-10 00:13:24 +08:00
|
|
|
bool useWideStrideVFP() const { return UseWideStrideVFP; }
|
2016-07-06 17:22:23 +08:00
|
|
|
bool hasMuxedUnits() const { return HasMuxedUnits; }
|
2016-07-06 19:22:11 +08:00
|
|
|
bool dontWidenVMOVS() const { return DontWidenVMOVS; }
|
2018-07-21 00:49:28 +08:00
|
|
|
bool useSplatVFPToNeon() const { return SplatVFPToNeon; }
|
2016-06-23 15:47:35 +08:00
|
|
|
bool useNEONForFPMovs() const { return UseNEONForFPMovs; }
|
2016-06-27 17:08:23 +08:00
|
|
|
bool checkVLDnAccessAlignment() const { return CheckVLDnAlign; }
|
|
|
|
bool nonpipelinedVFP() const { return NonpipelinedVFP; }
|
2010-08-10 02:35:19 +08:00
|
|
|
bool prefers32BitThumb() const { return Pref32BitThumb; }
|
2011-04-20 02:11:49 +08:00
|
|
|
bool avoidCPSRPartialUpdate() const { return AvoidCPSRPartialUpdate; }
|
2017-06-02 16:53:19 +08:00
|
|
|
bool cheapPredicableCPSRDef() const { return CheapPredicableCPSRDef; }
|
2012-12-21 03:59:30 +08:00
|
|
|
bool avoidMOVsShifterOperand() const { return AvoidMOVsShifterOperand; }
|
2016-06-03 22:03:27 +08:00
|
|
|
bool hasRetAddrStack() const { return HasRetAddrStack; }
|
2017-06-28 22:11:15 +08:00
|
|
|
bool hasBranchPredictor() const { return HasBranchPredictor; }
|
2010-11-03 14:34:55 +08:00
|
|
|
bool hasMPExtension() const { return HasMPExtension; }
|
2015-09-25 01:31:16 +08:00
|
|
|
bool hasDSP() const { return HasDSP; }
|
2013-01-31 00:30:19 +08:00
|
|
|
bool useNaClTrap() const { return UseNaClTrap; }
|
2015-10-29 06:56:36 +08:00
|
|
|
bool useSjLjEH() const { return UseSjLjEH; }
|
2019-01-03 20:09:12 +08:00
|
|
|
bool hasSB() const { return HasSB; }
|
2015-07-07 14:54:42 +08:00
|
|
|
bool genLongCalls() const { return GenLongCalls; }
|
2016-12-15 15:59:08 +08:00
|
|
|
bool genExecuteOnly() const { return GenExecuteOnly; }
|
2019-10-21 20:33:46 +08:00
|
|
|
bool hasBaseDSP() const {
|
|
|
|
if (isThumb())
|
|
|
|
return hasDSP();
|
|
|
|
else
|
|
|
|
return hasV5TEOps();
|
|
|
|
}
|
2009-08-11 23:33:49 +08:00
|
|
|
|
2010-03-15 02:42:38 +08:00
|
|
|
bool hasFP16() const { return HasFP16; }
|
[ARM] Replace fp-only-sp and d16 with fp64 and d32.
Those two subtarget features were awkward because their semantics are
reversed: each one indicates the _lack_ of support for something in
the architecture, rather than the presence. As a consequence, you
don't get the behavior you want if you combine two sets of feature
bits.
Each SubtargetFeature for an FP architecture version now comes in four
versions, one for each combination of those options. So you can still
say (for example) '+vfp2' in a feature string and it will mean what
it's always meant, but there's a new string '+vfp2d16sp' meaning the
version without those extra options.
A lot of this change is just mechanically replacing positive checks
for the old features with negative checks for the new ones. But one
more interesting change is that I've rearranged getFPUFeatures() so
that the main FPU feature is appended to the output list *before*
rather than after the features derived from the Restriction field, so
that -fp64 and -d32 can override defaults added by the main feature.
Reviewers: dmgreen, samparker, SjoerdMeijer
Subscribers: srhines, javed.absar, eraman, kristof.beyls, hiraditya, zzheng, Petar.Avramovic, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D60691
llvm-svn: 361845
2019-05-29 00:13:20 +08:00
|
|
|
bool hasD32() const { return HasD32; }
|
2015-12-01 18:23:06 +08:00
|
|
|
bool hasFullFP16() const { return HasFullFP16; }
|
2018-08-17 19:29:49 +08:00
|
|
|
bool hasFP16FML() const { return HasFP16FML; }
|
2010-03-15 02:42:38 +08:00
|
|
|
|
2017-06-22 17:39:36 +08:00
|
|
|
bool hasFuseAES() const { return HasFuseAES; }
|
2018-07-28 02:16:47 +08:00
|
|
|
bool hasFuseLiterals() const { return HasFuseLiterals; }
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Return true if the CPU supports any kind of instruction fusion.
|
2018-07-28 02:16:47 +08:00
|
|
|
bool hasFusion() const { return hasFuseAES() || hasFuseLiterals(); }
|
2017-06-22 17:39:36 +08:00
|
|
|
|
2011-04-21 06:20:12 +08:00
|
|
|
const Triple &getTargetTriple() const { return TargetTriple; }
|
|
|
|
|
2011-04-20 05:14:45 +08:00
|
|
|
bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); }
|
2014-04-03 04:32:05 +08:00
|
|
|
bool isTargetIOS() const { return TargetTriple.isiOS(); }
|
2015-10-29 06:46:43 +08:00
|
|
|
bool isTargetWatchOS() const { return TargetTriple.isWatchOS(); }
|
2016-01-28 03:32:29 +08:00
|
|
|
bool isTargetWatchABI() const { return TargetTriple.isWatchABI(); }
|
2013-08-30 04:23:14 +08:00
|
|
|
bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
|
2014-04-03 04:32:05 +08:00
|
|
|
bool isTargetNaCl() const { return TargetTriple.isOSNaCl(); }
|
2014-11-23 03:12:10 +08:00
|
|
|
bool isTargetNetBSD() const { return TargetTriple.isOSNetBSD(); }
|
2014-04-03 04:32:05 +08:00
|
|
|
bool isTargetWindows() const { return TargetTriple.isOSWindows(); }
|
2014-01-06 22:28:05 +08:00
|
|
|
|
2014-04-03 04:32:05 +08:00
|
|
|
bool isTargetCOFF() const { return TargetTriple.isOSBinFormatCOFF(); }
|
2013-12-11 00:57:43 +08:00
|
|
|
bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
|
2014-01-06 22:28:05 +08:00
|
|
|
bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }
|
|
|
|
|
2013-07-16 17:32:17 +08:00
|
|
|
// ARM EABI is the bare-metal EABI described in ARM ABI documents and
|
|
|
|
// can be accessed via -target arm-none-eabi. This is NOT GNUEABI.
|
|
|
|
// FIXME: Add a flag for bare-metal for that target and set Triple::EABI
|
|
|
|
// even for GNUEABI, so we can make a distinction here and still conform to
|
|
|
|
// the EABI on GNU (and Android) mode. This requires change in Clang, too.
|
2014-01-06 20:00:44 +08:00
|
|
|
// FIXME: The Darwin exception is temporary, while we move users to
|
|
|
|
// "*-*-*-macho" triples as quickly as possible.
|
2013-07-16 17:32:17 +08:00
|
|
|
bool isTargetAEABI() const {
|
2014-01-06 20:00:44 +08:00
|
|
|
return (TargetTriple.getEnvironment() == Triple::EABI ||
|
|
|
|
TargetTriple.getEnvironment() == Triple::EABIHF) &&
|
2014-04-03 04:32:05 +08:00
|
|
|
!isTargetDarwin() && !isTargetWindows();
|
2013-12-18 17:27:33 +08:00
|
|
|
}
|
2015-11-09 20:40:30 +08:00
|
|
|
bool isTargetGNUAEABI() const {
|
|
|
|
return (TargetTriple.getEnvironment() == Triple::GNUEABI ||
|
|
|
|
TargetTriple.getEnvironment() == Triple::GNUEABIHF) &&
|
|
|
|
!isTargetDarwin() && !isTargetWindows();
|
|
|
|
}
|
2016-06-25 05:14:33 +08:00
|
|
|
bool isTargetMuslAEABI() const {
|
|
|
|
return (TargetTriple.getEnvironment() == Triple::MuslEABI ||
|
|
|
|
TargetTriple.getEnvironment() == Triple::MuslEABIHF) &&
|
|
|
|
!isTargetDarwin() && !isTargetWindows();
|
|
|
|
}
|
2013-12-18 17:27:33 +08:00
|
|
|
|
2014-01-29 19:50:56 +08:00
|
|
|
// ARM Targets that support EHABI exception handling standard
|
|
|
|
// Darwin uses SjLj. Other targets might need more checks.
|
|
|
|
bool isTargetEHABICompatible() const {
|
|
|
|
return (TargetTriple.getEnvironment() == Triple::EABI ||
|
|
|
|
TargetTriple.getEnvironment() == Triple::GNUEABI ||
|
2016-06-25 05:14:33 +08:00
|
|
|
TargetTriple.getEnvironment() == Triple::MuslEABI ||
|
2014-01-29 19:50:56 +08:00
|
|
|
TargetTriple.getEnvironment() == Triple::EABIHF ||
|
2014-01-30 22:18:25 +08:00
|
|
|
TargetTriple.getEnvironment() == Triple::GNUEABIHF ||
|
2016-06-25 05:14:33 +08:00
|
|
|
TargetTriple.getEnvironment() == Triple::MuslEABIHF ||
|
2015-10-09 05:21:24 +08:00
|
|
|
isTargetAndroid()) &&
|
2014-04-03 04:32:05 +08:00
|
|
|
!isTargetDarwin() && !isTargetWindows();
|
2014-01-29 19:50:56 +08:00
|
|
|
}
|
|
|
|
|
2018-07-18 20:36:25 +08:00
|
|
|
bool isTargetHardFloat() const;
|
2017-01-28 07:58:02 +08:00
|
|
|
|
2015-10-09 05:21:24 +08:00
|
|
|
bool isTargetAndroid() const { return TargetTriple.isAndroid(); }
|
2007-01-20 03:22:40 +08:00
|
|
|
|
2017-01-28 07:58:02 +08:00
|
|
|
bool isXRaySupported() const override;
|
2016-09-19 08:54:35 +08:00
|
|
|
|
2014-12-18 10:20:58 +08:00
|
|
|
bool isAPCS_ABI() const;
|
|
|
|
bool isAAPCS_ABI() const;
|
2015-10-29 06:46:43 +08:00
|
|
|
bool isAAPCS16_ABI() const;
|
2007-02-14 03:52:28 +08:00
|
|
|
|
2016-08-08 23:28:31 +08:00
|
|
|
bool isROPI() const;
|
|
|
|
bool isRWPI() const;
|
|
|
|
|
2017-07-28 03:56:44 +08:00
|
|
|
bool useMachineScheduler() const { return UseMISched; }
|
2017-08-31 16:57:51 +08:00
|
|
|
bool disablePostRAScheduler() const { return DisablePostRAScheduler; }
|
2015-05-12 09:26:05 +08:00
|
|
|
bool useSoftFloat() const { return UseSoftFloat; }
|
2011-07-08 03:05:12 +08:00
|
|
|
bool isThumb() const { return InThumbMode; }
|
2019-04-05 06:40:06 +08:00
|
|
|
bool hasMinSize() const { return OptMinSize; }
|
2011-07-08 03:05:12 +08:00
|
|
|
bool isThumb1Only() const { return InThumbMode && !HasThumb2; }
|
|
|
|
bool isThumb2() const { return InThumbMode && HasThumb2; }
|
2011-07-07 08:08:19 +08:00
|
|
|
bool hasThumb2() const { return HasThumb2; }
|
2013-09-23 22:26:15 +08:00
|
|
|
bool isMClass() const { return ARMProcClass == MClass; }
|
|
|
|
bool isRClass() const { return ARMProcClass == RClass; }
|
|
|
|
bool isAClass() const { return ARMProcClass == AClass; }
|
2017-07-28 20:54:57 +08:00
|
|
|
bool isReadTPHard() const { return ReadTPHard; }
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2019-11-30 01:01:05 +08:00
|
|
|
bool isR9Reserved() const {
|
|
|
|
return isTargetMachO() ? (ReserveR9 || !HasV6Ops) : ReserveR9;
|
2015-07-21 09:42:02 +08:00
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
|
[ARM] Generate consistent frame records for Thumb2
There is not an official documented ABI for frame pointers in Thumb2,
but we should try to emit something which is useful.
We use r7 as the frame pointer for Thumb code, which currently means
that if a function needs to save a high register (r8-r11), it will get
pushed to the stack between the frame pointer (r7) and link register
(r14). This means that while a stack unwinder can follow the chain of
frame pointers up the stack, it cannot know the offset to lr, so does
not know which functions correspond to the stack frames.
To fix this, we need to push the callee-saved registers in two batches,
with the first push saving the low registers, fp and lr, and the second
push saving the high registers. This is already implemented, but
previously only used for iOS. This patch turns it on for all Thumb2
targets when frame pointers are required by the ABI, and the frame
pointer is r7 (Windows uses r11, so this isn't a problem there). If
frame pointer elimination is enabled we still emit a single push/pop
even if we need a frame pointer for other reasons, to avoid increasing
code size.
We must also ensure that lr is pushed to the stack when using a frame
pointer, so that we end up with a complete frame record. Situations that
could cause this were rare, because we already push lr in most
situations so that we can return using the pop instruction.
Differential Revision: https://reviews.llvm.org/D23516
llvm-svn: 279506
2016-08-23 17:19:22 +08:00
|
|
|
bool useR7AsFramePointer() const {
|
|
|
|
return isTargetDarwin() || (!isTargetWindows() && isThumb());
|
|
|
|
}
|
2017-01-28 07:58:02 +08:00
|
|
|
|
2016-05-14 03:16:14 +08:00
|
|
|
/// Returns true if the frame setup is split into two separate pushes (first
|
|
|
|
/// r0-r7,lr then r8-r11), principally so that the frame pointer is adjacent
|
2016-10-12 05:14:03 +08:00
|
|
|
/// to lr. This is always required on Thumb1-only targets, as the push and
|
|
|
|
/// pop instructions can't access the high registers.
|
[ARM] Generate consistent frame records for Thumb2
There is not an official documented ABI for frame pointers in Thumb2,
but we should try to emit something which is useful.
We use r7 as the frame pointer for Thumb code, which currently means
that if a function needs to save a high register (r8-r11), it will get
pushed to the stack between the frame pointer (r7) and link register
(r14). This means that while a stack unwinder can follow the chain of
frame pointers up the stack, it cannot know the offset to lr, so does
not know which functions correspond to the stack frames.
To fix this, we need to push the callee-saved registers in two batches,
with the first push saving the low registers, fp and lr, and the second
push saving the high registers. This is already implemented, but
previously only used for iOS. This patch turns it on for all Thumb2
targets when frame pointers are required by the ABI, and the frame
pointer is r7 (Windows uses r11, so this isn't a problem there). If
frame pointer elimination is enabled we still emit a single push/pop
even if we need a frame pointer for other reasons, to avoid increasing
code size.
We must also ensure that lr is pushed to the stack when using a frame
pointer, so that we end up with a complete frame record. Situations that
could cause this were rare, because we already push lr in most
situations so that we can return using the pop instruction.
Differential Revision: https://reviews.llvm.org/D23516
llvm-svn: 279506
2016-08-23 17:19:22 +08:00
|
|
|
bool splitFramePushPop(const MachineFunction &MF) const {
|
2016-10-12 05:14:03 +08:00
|
|
|
return (useR7AsFramePointer() &&
|
|
|
|
MF.getTarget().Options.DisableFramePointerElim(MF)) ||
|
|
|
|
isThumb1Only();
|
2016-05-14 03:16:14 +08:00
|
|
|
}
|
|
|
|
|
2019-02-08 15:57:42 +08:00
|
|
|
bool useStride4VFPs() const;
|
2015-08-04 01:20:10 +08:00
|
|
|
|
2019-02-08 15:57:42 +08:00
|
|
|
bool useMovt() const;
|
2014-07-04 09:55:26 +08:00
|
|
|
|
2011-10-08 01:17:49 +08:00
|
|
|
bool supportsTailCall() const { return SupportsTailCall; }
|
2009-11-24 08:44:37 +08:00
|
|
|
|
2015-07-29 06:44:28 +08:00
|
|
|
bool allowsUnalignedMem() const { return !StrictAlign; }
|
2010-09-28 12:09:35 +08:00
|
|
|
|
2013-11-14 02:29:49 +08:00
|
|
|
bool restrictIT() const { return RestrictIT; }
|
|
|
|
|
2009-05-24 03:50:50 +08:00
|
|
|
const std::string & getCPUString() const { return CPUString; }
|
2009-11-24 08:44:37 +08:00
|
|
|
|
2014-03-28 22:35:30 +08:00
|
|
|
bool isLittle() const { return IsLittle; }
|
|
|
|
|
2010-09-29 05:57:50 +08:00
|
|
|
unsigned getMispredictionPenalty() const;
|
2010-12-24 12:28:06 +08:00
|
|
|
|
2015-07-18 07:18:30 +08:00
|
|
|
/// Returns true if machine scheduler should be enabled.
|
|
|
|
bool enableMachineScheduler() const override;
|
|
|
|
|
2014-06-04 15:06:27 +08:00
|
|
|
/// True for some subtargets at > -O0.
|
2015-06-13 11:42:16 +08:00
|
|
|
bool enablePostRAScheduler() const override;
|
2009-05-24 03:50:50 +08:00
|
|
|
|
2019-11-05 17:10:58 +08:00
|
|
|
/// True for some subtargets at > -O0.
|
|
|
|
bool enablePostRAMachineScheduler() const override;
|
|
|
|
|
[LiveInterval] Allow updating subranges with slightly out-dated IR
During register coalescing, we update the live-intervals on-the-fly.
To do that we are in this strange mode where the live-intervals can
be slightly out-of-sync (more precisely they are forward looking)
compared to what the IR actually represents.
This happens because the register coalescer only updates the IR when
it is done with updating the live-intervals and it has to do it this
way because updating the IR on-the-fly would actually clobber some
information on how the live-ranges that are being updated look like.
This is problematic for updates that rely on the IR to accurately
represents the state of the live-ranges. Right now, we have only
one of those: stripValuesNotDefiningMask.
To reconcile this need of out-of-sync IR, this patch introduces a
new argument to LiveInterval::refineSubRanges that allows the code
doing the live range updates to reason about how the code should
look like after the coalescer will have rewritten the registers.
Essentially this captures how a subregister index with be offseted
to match its position in a new register class.
E.g., let say we want to merge:
V1.sub1:<2 x s32> = COPY V2.sub3:<4 x s32>
We do that by choosing a class where sub1:<2 x s32> and sub3:<4 x s32>
overlap, i.e., by choosing a class where we can find "offset + 1 == 3".
Put differently we align V2's sub3 with V1's sub1:
V2: sub0 sub1 sub2 sub3
V1: <offset> sub0 sub1
This offset will look like a composed subregidx in the the class:
V1.(composed sub2 with sub1):<4 x s32> = COPY V2.sub3:<4 x s32>
=> V1.(composed sub2 with sub1):<4 x s32> = COPY V2.sub3:<4 x s32>
Now if we didn't rewrite the uses and def of V1, all the checks for V1
need to account for this offset to match what the live intervals intend
to capture.
Prior to this patch, we would fail to recognize the uses and def of V1
and would end up with machine verifier errors: No live segment at def.
This could lead to miscompile as we would drop some live-ranges and
thus, miss some interferences.
For this problem to trigger, we need to reach stripValuesNotDefiningMask
while having a mismatch between the IR and the live-ranges (i.e.,
we have to apply a subreg offset to the IR.)
This requires the following three conditions:
1. An update of overlapping subreg lanes: e.g., dsub0 == <ssub0, ssub1>
2. An update with Tuple registers with a possibility to coalesce the
subreg index: e.g., v1.dsub_1 == v2.dsub_3
3. Subreg liveness enabled.
looking at the IR to decide what is alive and what is not, i.e., calling
stripValuesNotDefiningMask.
coalescer maintains for the live-ranges information.
None of the targets that currently use subreg liveness (i.e., the targets
that fulfill #3, Hexagon, AMDGPU, PowerPC, and SystemZ IIRC) expose #1 and
and #2, so this patch also artificial enables subreg liveness for ARM,
so that a nice test case can be attached.
2019-11-13 08:32:12 +08:00
|
|
|
/// Check whether this subtarget wants to use subregister liveness.
|
|
|
|
bool enableSubRegLiveness() const override;
|
|
|
|
|
2018-06-21 23:48:29 +08:00
|
|
|
/// Enable use of alias analysis during code generation (during MI
|
|
|
|
/// scheduling, DAGCombine, etc.).
|
2019-11-05 18:46:56 +08:00
|
|
|
bool useAA() const override { return true; }
|
2018-06-21 23:48:29 +08:00
|
|
|
|
2014-08-22 05:50:01 +08:00
|
|
|
// enableAtomicExpand- True if we need to expand our atomics.
|
|
|
|
bool enableAtomicExpand() const override;
|
2014-06-20 05:03:04 +08:00
|
|
|
|
2014-08-16 06:17:28 +08:00
|
|
|
/// getInstrItins - Return the instruction itineraries based on subtarget
|
2009-06-19 09:51:50 +08:00
|
|
|
/// selection.
|
2014-09-03 19:41:21 +08:00
|
|
|
const InstrItineraryData *getInstrItineraryData() const override {
|
2014-08-05 05:25:23 +08:00
|
|
|
return &InstrItins;
|
|
|
|
}
|
2009-06-19 09:51:50 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
/// getStackAlignment - Returns the minimum alignment known to hold of the
|
|
|
|
/// stack frame on entry to the function and which must be maintained by every
|
|
|
|
/// function for this subtarget.
|
[Alignment][NFC] Use Align for TargetFrameLowering/Subtarget
Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: jholewinski, arsenm, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, hiraditya, aheejin, kbarton, fedor.sergeev, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Jim, lenary, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68993
llvm-svn: 375084
2019-10-17 15:49:39 +08:00
|
|
|
Align getStackAlignment() const { return stackAlignment; }
|
2009-08-29 07:18:09 +08:00
|
|
|
|
2016-06-27 17:08:23 +08:00
|
|
|
unsigned getMaxInterleaveFactor() const { return MaxInterleaveFactor; }
|
|
|
|
|
2016-07-06 19:22:11 +08:00
|
|
|
unsigned getPartialUpdateClearance() const { return PartialUpdateClearance; }
|
|
|
|
|
2016-06-27 17:08:23 +08:00
|
|
|
ARMLdStMultipleTiming getLdStMultipleTiming() const {
|
|
|
|
return LdStMultipleTiming;
|
|
|
|
}
|
|
|
|
|
|
|
|
int getPreISelOperandLatencyAdjustment() const {
|
|
|
|
return PreISelOperandLatencyAdjustment;
|
|
|
|
}
|
|
|
|
|
2016-06-28 23:38:13 +08:00
|
|
|
/// True if the GV will be accessed via an indirect symbol.
|
|
|
|
bool isGVIndirectSymbol(const GlobalValue *GV) const;
|
2014-07-16 01:18:41 +08:00
|
|
|
|
2017-08-29 17:47:55 +08:00
|
|
|
/// Returns the constant pool modifier needed to access the GV.
|
2017-11-14 04:45:38 +08:00
|
|
|
bool isGVInGOT(const GlobalValue *GV) const;
|
2017-08-29 17:47:55 +08:00
|
|
|
|
2015-05-23 09:14:08 +08:00
|
|
|
/// True if fast-isel is used.
|
|
|
|
bool useFastISel() const;
|
2017-08-29 04:20:47 +08:00
|
|
|
|
|
|
|
/// Returns the correct return opcode for the current feature set.
|
|
|
|
/// Use BX if available to allow mixing thumb/arm code, but fall back
|
|
|
|
/// to plain mov pc,lr on ARMv4.
|
|
|
|
unsigned getReturnOpcode() const {
|
|
|
|
if (isThumb())
|
|
|
|
return ARM::tBX_RET;
|
|
|
|
if (hasV4TOps())
|
|
|
|
return ARM::BX_RET;
|
|
|
|
return ARM::MOVPCLR;
|
|
|
|
}
|
2017-11-14 04:45:38 +08:00
|
|
|
|
|
|
|
/// Allow movt+movw for PIC global address calculation.
|
|
|
|
/// ELF does not have GOT relocations for movt+movw.
|
|
|
|
/// ROPI does not use GOT.
|
|
|
|
bool allowPositionIndependentMovt() const {
|
|
|
|
return isROPI() || !isTargetELF();
|
|
|
|
}
|
2018-09-13 18:28:05 +08:00
|
|
|
|
[LLVM][Alignment] Make functions using log of alignment explicit
Summary:
This patch renames functions that takes or returns alignment as log2, this patch will help with the transition to llvm::Align.
The renaming makes it explicit that we deal with log(alignment) instead of a power of two alignment.
A few renames uncovered dubious assignments:
- `MirParser`/`MirPrinter` was expecting powers of two but `MachineFunction` and `MachineBasicBlock` were using deal with log2(align). This patch fixes it and updates the documentation.
- `MachineBlockPlacement` exposes two flags (`align-all-blocks` and `align-all-nofallthru-blocks`) supposedly interpreted as power of two alignments, internally these values are interpreted as log2(align). This patch updates the documentation,
- `MachineFunctionexposes` exposes `align-all-functions` also interpreted as power of two alignment, internally this value is interpreted as log2(align). This patch updates the documentation,
Reviewers: lattner, thegameg, courbet
Subscribers: dschuff, arsenm, jyknight, dylanmckay, sdardis, nemanjai, jvesely, nhaehnle, javed.absar, hiraditya, kbarton, fedor.sergeev, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, dexonsmith, PkmX, jocewei, jsji, Jim, s.egerton, llvm-commits, courbet
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65945
llvm-svn: 371045
2019-09-05 18:00:22 +08:00
|
|
|
unsigned getPrefLoopLogAlignment() const { return PrefLoopLogAlignment; }
|
[ARM] Thumb2: favor R4-R7 over R12/LR in allocation order when opt for minsize
For Thumb2, we prefer low regs (costPerUse = 0) to allow narrow
encoding. However, current allocation order is like:
R0-R3, R12, LR, R4-R11
As a result, a lot of instructs that use R12/LR will be wide instrs.
This patch changes the allocation order to:
R0-R7, R12, LR, R8-R11
for thumb2 and -Osize.
In most cases, there is no extra push/pop instrs as they will be folded
into existing ones. There might be slight performance impact due to more
stack usage, so we only enable it when opt for min size.
https://reviews.llvm.org/D30324
llvm-svn: 365014
2019-07-03 17:58:52 +08:00
|
|
|
|
2019-08-14 02:12:08 +08:00
|
|
|
unsigned getMVEVectorCostFactor() const { return MVEVectorCostFactor; }
|
|
|
|
|
[ARM] Thumb2: favor R4-R7 over R12/LR in allocation order when opt for minsize
For Thumb2, we prefer low regs (costPerUse = 0) to allow narrow
encoding. However, current allocation order is like:
R0-R3, R12, LR, R4-R11
As a result, a lot of instructs that use R12/LR will be wide instrs.
This patch changes the allocation order to:
R0-R7, R12, LR, R8-R11
for thumb2 and -Osize.
In most cases, there is no extra push/pop instrs as they will be folded
into existing ones. There might be slight performance impact due to more
stack usage, so we only enable it when opt for min size.
https://reviews.llvm.org/D30324
llvm-svn: 365014
2019-07-03 17:58:52 +08:00
|
|
|
bool ignoreCSRForAllocationOrder(const MachineFunction &MF,
|
|
|
|
unsigned PhysReg) const override;
|
|
|
|
unsigned getGPRAllocationOrder(const MachineFunction &MF) const;
|
2007-01-19 15:51:42 +08:00
|
|
|
};
|
|
|
|
|
2017-01-28 07:58:02 +08:00
|
|
|
} // end namespace llvm
|
|
|
|
|
|
|
|
#endif // LLVM_LIB_TARGET_ARM_ARMSUBTARGET_H
|