2015-01-31 19:17:59 +08:00
|
|
|
//===-- X86TargetTransformInfo.h - X86 specific TTI -------------*- C++ -*-===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2015-01-31 19:17:59 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
/// \file
|
|
|
|
/// This file a TargetTransformInfo::Concept conforming object specific to the
|
|
|
|
/// X86 target machine. It uses the target's detailed information to
|
|
|
|
/// provide more precise answers to certain TTI queries, while letting the
|
|
|
|
/// target independent and default TTI implementations handle the rest.
|
|
|
|
///
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#ifndef LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H
|
|
|
|
#define LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H
|
|
|
|
|
|
|
|
#include "X86TargetMachine.h"
|
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
|
|
|
#include "llvm/CodeGen/BasicTTIImpl.h"
|
|
|
|
|
|
|
|
namespace llvm {
|
|
|
|
|
2020-06-03 21:56:40 +08:00
|
|
|
class InstCombiner;
|
|
|
|
|
2015-01-31 19:17:59 +08:00
|
|
|
class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> {
|
|
|
|
typedef BasicTTIImplBase<X86TTIImpl> BaseT;
|
|
|
|
typedef TargetTransformInfo TTI;
|
2015-02-01 22:01:15 +08:00
|
|
|
friend BaseT;
|
2015-01-31 19:17:59 +08:00
|
|
|
|
|
|
|
const X86Subtarget *ST;
|
|
|
|
const X86TargetLowering *TLI;
|
|
|
|
|
2015-02-01 22:22:17 +08:00
|
|
|
const X86Subtarget *getST() const { return ST; }
|
2015-02-01 22:01:15 +08:00
|
|
|
const X86TargetLowering *getTLI() const { return TLI; }
|
|
|
|
|
2019-02-20 01:05:11 +08:00
|
|
|
const FeatureBitset InlineFeatureIgnoreList = {
|
2019-03-28 21:38:58 +08:00
|
|
|
// This indicates the CPU is 64 bit capable not that we are in 64-bit
|
|
|
|
// mode.
|
|
|
|
X86::Feature64Bit,
|
|
|
|
|
|
|
|
// These features don't have any intrinsics or ABI effect.
|
|
|
|
X86::FeatureNOPL,
|
|
|
|
X86::FeatureCMPXCHG16B,
|
|
|
|
X86::FeatureLAHFSAHF,
|
|
|
|
|
|
|
|
// Codegen control options.
|
|
|
|
X86::FeatureFast11ByteNOP,
|
|
|
|
X86::FeatureFast15ByteNOP,
|
|
|
|
X86::FeatureFastBEXTR,
|
|
|
|
X86::FeatureFastHorizontalOps,
|
|
|
|
X86::FeatureFastLZCNT,
|
|
|
|
X86::FeatureFastScalarFSQRT,
|
|
|
|
X86::FeatureFastSHLDRotate,
|
2019-05-17 14:40:21 +08:00
|
|
|
X86::FeatureFastScalarShiftMasks,
|
|
|
|
X86::FeatureFastVectorShiftMasks,
|
2019-03-28 21:38:58 +08:00
|
|
|
X86::FeatureFastVariableShuffle,
|
|
|
|
X86::FeatureFastVectorFSQRT,
|
|
|
|
X86::FeatureLEAForSP,
|
|
|
|
X86::FeatureLEAUsesAG,
|
|
|
|
X86::FeatureLZCNTFalseDeps,
|
2019-03-28 22:12:46 +08:00
|
|
|
X86::FeatureBranchFusion,
|
2019-03-28 21:38:58 +08:00
|
|
|
X86::FeatureMacroFusion,
|
|
|
|
X86::FeaturePadShortFunctions,
|
|
|
|
X86::FeaturePOPCNTFalseDeps,
|
|
|
|
X86::FeatureSSEUnalignedMem,
|
|
|
|
X86::FeatureSlow3OpsLEA,
|
|
|
|
X86::FeatureSlowDivide32,
|
|
|
|
X86::FeatureSlowDivide64,
|
|
|
|
X86::FeatureSlowIncDec,
|
|
|
|
X86::FeatureSlowLEA,
|
|
|
|
X86::FeatureSlowPMADDWD,
|
|
|
|
X86::FeatureSlowPMULLD,
|
|
|
|
X86::FeatureSlowSHLD,
|
|
|
|
X86::FeatureSlowTwoMemOps,
|
|
|
|
X86::FeatureSlowUAMem16,
|
2019-10-17 17:38:15 +08:00
|
|
|
X86::FeaturePreferMaskRegisters,
|
2019-11-05 02:20:00 +08:00
|
|
|
X86::FeatureInsertVZEROUPPER,
|
2019-12-06 02:24:10 +08:00
|
|
|
X86::FeatureUseGLMDivSqrtCosts,
|
2019-03-28 21:38:58 +08:00
|
|
|
|
|
|
|
// Perf-tuning flags.
|
|
|
|
X86::FeatureHasFastGather,
|
|
|
|
X86::FeatureSlowUAMem32,
|
|
|
|
|
|
|
|
// Based on whether user set the -mprefer-vector-width command line.
|
2019-09-08 03:54:22 +08:00
|
|
|
X86::FeaturePrefer128Bit,
|
2019-03-28 21:38:58 +08:00
|
|
|
X86::FeaturePrefer256Bit,
|
|
|
|
|
|
|
|
// CPU name enums. These just follow CPU string.
|
|
|
|
X86::ProcIntelAtom,
|
|
|
|
X86::ProcIntelSLM,
|
2019-02-20 01:05:11 +08:00
|
|
|
};
|
|
|
|
|
2015-01-31 19:17:59 +08:00
|
|
|
public:
|
2015-09-17 07:38:13 +08:00
|
|
|
explicit X86TTIImpl(const X86TargetMachine *TM, const Function &F)
|
2015-07-09 10:08:42 +08:00
|
|
|
: BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)),
|
|
|
|
TLI(ST->getTargetLowering()) {}
|
2015-01-31 19:17:59 +08:00
|
|
|
|
|
|
|
/// \name Scalar TTI Implementations
|
|
|
|
/// @{
|
|
|
|
TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth);
|
|
|
|
|
|
|
|
/// @}
|
|
|
|
|
Model cache size and associativity in TargetTransformInfo
Summary:
We add the precise cache sizes and associativity for the following Intel
architectures:
- Penry
- Nehalem
- Westmere
- Sandy Bridge
- Ivy Bridge
- Haswell
- Broadwell
- Skylake
- Kabylake
Polly uses since several months a performance model for BLAS computations that
derives optimal cache and register tile sizes from cache and latency
information (based on ideas from "Analytical Modeling Is Enough for High-Performance BLIS", by Tze Meng Low published at TOMS 2016).
While bootstrapping this model, these target values have been kept in Polly.
However, as our implementation is now rather mature, it seems time to teach
LLVM itself about cache sizes.
Interestingly, L1 and L2 cache sizes are pretty constant across
micro-architectures, hence a set of architecture specific default values
seems like a good start. They can be expanded to more target specific values,
in case certain newer architectures require different values. For now a set
of Intel architectures are provided.
Just as a little teaser, for a simple gemm kernel this model allows us to
improve performance from 1.2s to 0.27s. For gemm kernels with less optimal
memory layouts even larger speedups can be reported.
Reviewers: Meinersbur, bollu, singam-sanjay, hfinkel, gareevroman, fhahn, sebpop, efriedma, asb
Reviewed By: fhahn, asb
Subscribers: lsaba, asb, pollydev, llvm-commits
Differential Revision: https://reviews.llvm.org/D37051
llvm-svn: 311647
2017-08-24 17:46:25 +08:00
|
|
|
/// \name Cache TTI Implementation
|
|
|
|
/// @{
|
|
|
|
llvm::Optional<unsigned> getCacheSize(
|
2020-07-15 00:47:29 +08:00
|
|
|
TargetTransformInfo::CacheLevel Level) const override;
|
Model cache size and associativity in TargetTransformInfo
Summary:
We add the precise cache sizes and associativity for the following Intel
architectures:
- Penry
- Nehalem
- Westmere
- Sandy Bridge
- Ivy Bridge
- Haswell
- Broadwell
- Skylake
- Kabylake
Polly uses since several months a performance model for BLAS computations that
derives optimal cache and register tile sizes from cache and latency
information (based on ideas from "Analytical Modeling Is Enough for High-Performance BLIS", by Tze Meng Low published at TOMS 2016).
While bootstrapping this model, these target values have been kept in Polly.
However, as our implementation is now rather mature, it seems time to teach
LLVM itself about cache sizes.
Interestingly, L1 and L2 cache sizes are pretty constant across
micro-architectures, hence a set of architecture specific default values
seems like a good start. They can be expanded to more target specific values,
in case certain newer architectures require different values. For now a set
of Intel architectures are provided.
Just as a little teaser, for a simple gemm kernel this model allows us to
improve performance from 1.2s to 0.27s. For gemm kernels with less optimal
memory layouts even larger speedups can be reported.
Reviewers: Meinersbur, bollu, singam-sanjay, hfinkel, gareevroman, fhahn, sebpop, efriedma, asb
Reviewed By: fhahn, asb
Subscribers: lsaba, asb, pollydev, llvm-commits
Differential Revision: https://reviews.llvm.org/D37051
llvm-svn: 311647
2017-08-24 17:46:25 +08:00
|
|
|
llvm::Optional<unsigned> getCacheAssociativity(
|
2020-07-15 00:47:29 +08:00
|
|
|
TargetTransformInfo::CacheLevel Level) const override;
|
Model cache size and associativity in TargetTransformInfo
Summary:
We add the precise cache sizes and associativity for the following Intel
architectures:
- Penry
- Nehalem
- Westmere
- Sandy Bridge
- Ivy Bridge
- Haswell
- Broadwell
- Skylake
- Kabylake
Polly uses since several months a performance model for BLAS computations that
derives optimal cache and register tile sizes from cache and latency
information (based on ideas from "Analytical Modeling Is Enough for High-Performance BLIS", by Tze Meng Low published at TOMS 2016).
While bootstrapping this model, these target values have been kept in Polly.
However, as our implementation is now rather mature, it seems time to teach
LLVM itself about cache sizes.
Interestingly, L1 and L2 cache sizes are pretty constant across
micro-architectures, hence a set of architecture specific default values
seems like a good start. They can be expanded to more target specific values,
in case certain newer architectures require different values. For now a set
of Intel architectures are provided.
Just as a little teaser, for a simple gemm kernel this model allows us to
improve performance from 1.2s to 0.27s. For gemm kernels with less optimal
memory layouts even larger speedups can be reported.
Reviewers: Meinersbur, bollu, singam-sanjay, hfinkel, gareevroman, fhahn, sebpop, efriedma, asb
Reviewed By: fhahn, asb
Subscribers: lsaba, asb, pollydev, llvm-commits
Differential Revision: https://reviews.llvm.org/D37051
llvm-svn: 311647
2017-08-24 17:46:25 +08:00
|
|
|
/// @}
|
|
|
|
|
2015-01-31 19:17:59 +08:00
|
|
|
/// \name Vector TTI Implementations
|
|
|
|
/// @{
|
|
|
|
|
recommit: [LoopVectorize][PowerPC] Estimate int and float register pressure separately in loop-vectorize
In loop-vectorize, interleave count and vector factor depend on target register number. Currently, it does not
estimate different register pressure for different register class separately(especially for scalar type,
float type should not be on the same position with int type), so it's not accurate. Specifically,
it causes too many times interleaving/unrolling, result in too many register spills in loop body and hurting performance.
So we need classify the register classes in IR level, and importantly these are abstract register classes,
and are not the target register class of backend provided in td file. It's used to establish the mapping between
the types of IR values and the number of simultaneous live ranges to which we'd like to limit for some set of those types.
For example, POWER target, register num is special when VSX is enabled. When VSX is enabled, the number of int scalar register is 32(GPR),
float is 64(VSR), but for int and float vector register both are 64(VSR). So there should be 2 kinds of register class when vsx is enabled,
and 3 kinds of register class when VSX is NOT enabled.
It runs on POWER target, it makes big(+~30%) performance improvement in one specific bmk(503.bwaves_r) of spec2017 and no other obvious degressions.
Differential revision: https://reviews.llvm.org/D67148
llvm-svn: 374634
2019-10-12 10:53:04 +08:00
|
|
|
unsigned getNumberOfRegisters(unsigned ClassID) const;
|
2017-04-06 04:51:38 +08:00
|
|
|
unsigned getRegisterBitWidth(bool Vector) const;
|
|
|
|
unsigned getLoadStoreVecRegBitWidth(unsigned AS) const;
|
2015-05-07 01:12:25 +08:00
|
|
|
unsigned getMaxInterleaveFactor(unsigned VF);
|
2015-08-06 02:08:10 +08:00
|
|
|
int getArithmeticInstrCost(
|
2015-01-31 19:17:59 +08:00
|
|
|
unsigned Opcode, Type *Ty,
|
2020-04-28 21:11:27 +08:00
|
|
|
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
|
2015-01-31 19:17:59 +08:00
|
|
|
TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
|
|
|
|
TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
|
|
|
|
TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
|
[X86] updating TTI costs for arithmetic instructions on X86\SLM arch.
updated instructions:
pmulld, pmullw, pmulhw, mulsd, mulps, mulpd, divss, divps, divsd, divpd, addpd and subpd.
special optimization case which replaces pmulld with pmullw\pmulhw\pshuf seq.
In case if the real operands bitwidth <= 16.
Differential Revision: https://reviews.llvm.org/D28104
llvm-svn: 291657
2017-01-11 16:23:37 +08:00
|
|
|
TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None,
|
[ARM] Teach the Arm cost model that a Shift can be folded into other instructions
This attempts to teach the cost model in Arm that code such as:
%s = shl i32 %a, 3
%a = and i32 %s, %b
Can under Arm or Thumb2 become:
and r0, r1, r2, lsl #3
So the cost of the shift can essentially be free. To do this without
trying to artificially adjust the cost of the "and" instruction, it
needs to get the users of the shl and check if they are a type of
instruction that the shift can be folded into. And so it needs to have
access to the actual instruction in getArithmeticInstrCost, which if
available is added as an extra parameter much like getCastInstrCost.
We otherwise limit it to shifts with a single user, which should
hopefully handle most of the cases. The list of instruction that the
shift can be folded into include ADC, ADD, AND, BIC, CMP, EOR, MVN, ORR,
ORN, RSB, SBC and SUB. This translates to Add, Sub, And, Or, Xor and
ICmp.
Differential Revision: https://reviews.llvm.org/D70966
2019-12-08 23:33:24 +08:00
|
|
|
ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
|
|
|
|
const Instruction *CxtI = nullptr);
|
2020-04-17 20:29:31 +08:00
|
|
|
int getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, int Index,
|
|
|
|
VectorType *SubTp);
|
2017-04-12 19:49:08 +08:00
|
|
|
int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
|
[Analysis] TTI: Add CastContextHint for getCastInstrCost
Currently, getCastInstrCost has limited information about the cast it's
rating, often just the opcode and types. Sometimes there is a context
instruction as well, but it isn't trustworthy: for instance, when the
vectorizer is rating a plan, it calls getCastInstrCost with the old
instructions when, in fact, it's trying to evaluate the cost of the
instruction post-vectorization. Thus, the current system can get the
cost of certain casts incorrect as the correct cost can vary greatly
based on the context in which it's used.
For example, if the vectorizer queries getCastInstrCost to evaluate the
cost of a sext(load) with tail predication enabled, getCastInstrCost
will think it's free most of the time, but it's not always free. On ARM
MVE, a VLD2 group cannot be extended like a normal VLDR can. Similar
situations can come up with how masked loads can be extended when being
split.
To fix that, this path adds a new parameter to getCastInstrCost to give
it a hint about the context of the cast. It adds a CastContextHint enum
which contains the type of the load/store being created by the
vectorizer - one for each of the types it can produce.
Original patch by Pierre van Houtryve
Differential Revision: https://reviews.llvm.org/D79162
2020-07-29 20:32:53 +08:00
|
|
|
TTI::CastContextHint CCH, TTI::TargetCostKind CostKind,
|
2017-04-12 19:49:08 +08:00
|
|
|
const Instruction *I = nullptr);
|
|
|
|
int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
|
2020-11-02 20:40:34 +08:00
|
|
|
CmpInst::Predicate VecPred,
|
2020-04-28 21:11:27 +08:00
|
|
|
TTI::TargetCostKind CostKind,
|
2017-04-12 19:49:08 +08:00
|
|
|
const Instruction *I = nullptr);
|
2015-08-06 02:08:10 +08:00
|
|
|
int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
|
2020-05-05 23:57:55 +08:00
|
|
|
unsigned getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts,
|
2020-04-29 18:39:13 +08:00
|
|
|
bool Insert, bool Extract);
|
2019-10-22 23:16:52 +08:00
|
|
|
int getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
|
2020-04-28 21:11:27 +08:00
|
|
|
unsigned AddressSpace,
|
|
|
|
TTI::TargetCostKind CostKind,
|
|
|
|
const Instruction *I = nullptr);
|
2020-06-26 18:14:16 +08:00
|
|
|
int getMaskedMemoryOpCost(
|
|
|
|
unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
|
|
|
|
TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency);
|
2020-06-23 21:07:44 +08:00
|
|
|
int getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr,
|
2020-06-26 19:08:27 +08:00
|
|
|
bool VariableMask, Align Alignment,
|
2020-04-28 21:11:27 +08:00
|
|
|
TTI::TargetCostKind CostKind,
|
2020-03-11 18:13:11 +08:00
|
|
|
const Instruction *I);
|
2017-01-05 22:03:41 +08:00
|
|
|
int getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE,
|
|
|
|
const SCEV *Ptr);
|
2015-01-31 19:17:59 +08:00
|
|
|
|
2020-06-03 21:56:40 +08:00
|
|
|
Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
|
|
|
|
IntrinsicInst &II) const;
|
|
|
|
Optional<Value *>
|
|
|
|
simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
|
|
|
|
APInt DemandedMask, KnownBits &Known,
|
|
|
|
bool &KnownBitsComputed) const;
|
|
|
|
Optional<Value *> simplifyDemandedVectorEltsIntrinsic(
|
|
|
|
InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
|
|
|
|
APInt &UndefElts2, APInt &UndefElts3,
|
|
|
|
std::function<void(Instruction *, unsigned, APInt, APInt &)>
|
|
|
|
SimplifyAndSetOp) const;
|
|
|
|
|
2017-06-07 00:45:25 +08:00
|
|
|
unsigned getAtomicMemIntrinsicMaxElementSize() const;
|
|
|
|
|
2020-05-20 16:18:42 +08:00
|
|
|
int getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
|
|
|
|
TTI::TargetCostKind CostKind);
|
|
|
|
int getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
|
|
|
|
TTI::TargetCostKind CostKind);
|
2016-05-24 16:17:50 +08:00
|
|
|
|
2020-04-17 20:29:31 +08:00
|
|
|
int getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
|
2020-04-28 21:11:27 +08:00
|
|
|
bool IsPairwiseForm,
|
|
|
|
TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency);
|
2015-01-31 19:17:59 +08:00
|
|
|
|
2020-04-10 07:58:28 +08:00
|
|
|
int getMinMaxCost(Type *Ty, Type *CondTy, bool IsUnsigned);
|
|
|
|
|
2020-04-17 20:29:31 +08:00
|
|
|
int getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
|
2020-04-28 21:11:27 +08:00
|
|
|
bool IsPairwiseForm, bool IsUnsigned,
|
|
|
|
TTI::TargetCostKind CostKind);
|
2017-09-08 21:49:36 +08:00
|
|
|
|
2020-06-26 19:00:53 +08:00
|
|
|
int getInterleavedMemoryOpCost(
|
|
|
|
unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
|
|
|
|
Align Alignment, unsigned AddressSpace,
|
|
|
|
TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency,
|
|
|
|
bool UseMaskForCond = false, bool UseMaskForGaps = false);
|
|
|
|
int getInterleavedMemoryOpCostAVX512(
|
2020-06-30 01:30:43 +08:00
|
|
|
unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
|
|
|
|
ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
|
2020-06-26 19:00:53 +08:00
|
|
|
TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency,
|
|
|
|
bool UseMaskForCond = false, bool UseMaskForGaps = false);
|
|
|
|
int getInterleavedMemoryOpCostAVX2(
|
2020-06-30 01:30:43 +08:00
|
|
|
unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
|
|
|
|
ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
|
2020-06-26 19:00:53 +08:00
|
|
|
TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency,
|
|
|
|
bool UseMaskForCond = false, bool UseMaskForGaps = false);
|
2017-01-02 18:37:52 +08:00
|
|
|
|
2015-08-06 02:08:10 +08:00
|
|
|
int getIntImmCost(int64_t);
|
2015-01-31 19:17:59 +08:00
|
|
|
|
2020-04-28 21:11:27 +08:00
|
|
|
int getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind);
|
2015-01-31 19:17:59 +08:00
|
|
|
|
2020-06-15 15:58:25 +08:00
|
|
|
unsigned getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind);
|
|
|
|
|
2020-09-22 19:54:10 +08:00
|
|
|
int getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm,
|
|
|
|
Type *Ty, TTI::TargetCostKind CostKind,
|
|
|
|
Instruction *Inst = nullptr);
|
2019-12-12 03:54:58 +08:00
|
|
|
int getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
|
2020-04-28 21:11:27 +08:00
|
|
|
Type *Ty, TTI::TargetCostKind CostKind);
|
2017-08-08 03:56:34 +08:00
|
|
|
bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
|
|
|
|
TargetTransformInfo::LSRCost &C2);
|
2018-02-06 07:43:05 +08:00
|
|
|
bool canMacroFuseCmp();
|
2020-06-25 21:23:12 +08:00
|
|
|
bool isLegalMaskedLoad(Type *DataType, Align Alignment);
|
|
|
|
bool isLegalMaskedStore(Type *DataType, Align Alignment);
|
2019-09-27 20:54:21 +08:00
|
|
|
bool isLegalNTLoad(Type *DataType, Align Alignment);
|
|
|
|
bool isLegalNTStore(Type *DataType, Align Alignment);
|
2020-06-25 21:23:12 +08:00
|
|
|
bool isLegalMaskedGather(Type *DataType, Align Alignment);
|
|
|
|
bool isLegalMaskedScatter(Type *DataType, Align Alignment);
|
2019-03-22 01:38:52 +08:00
|
|
|
bool isLegalMaskedExpandLoad(Type *DataType);
|
|
|
|
bool isLegalMaskedCompressStore(Type *DataType);
|
2017-09-09 21:38:18 +08:00
|
|
|
bool hasDivRemOp(Type *DataType, bool IsSigned);
|
2017-11-28 05:15:43 +08:00
|
|
|
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty);
|
2015-07-30 06:09:48 +08:00
|
|
|
bool areInlineCompatible(const Function *Caller,
|
|
|
|
const Function *Callee) const;
|
2019-02-20 04:12:20 +08:00
|
|
|
bool areFunctionArgsABICompatible(const Function *Caller,
|
|
|
|
const Function *Callee,
|
|
|
|
SmallPtrSetImpl<Argument *> &Args) const;
|
2019-06-25 16:04:13 +08:00
|
|
|
TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
|
|
|
|
bool IsZeroCmp) const;
|
2016-10-21 05:04:31 +08:00
|
|
|
bool enableInterleavedAccessVectorization();
|
2020-06-06 00:20:13 +08:00
|
|
|
|
|
|
|
/// Allow vectorizers to form reduction intrinsics in IR. The IR is expanded
|
|
|
|
/// into shuffles and vector math/logic by the backend
|
|
|
|
/// (see TTI::shouldExpandReduction)
|
|
|
|
bool useReductionIntrinsic(unsigned Opcode, Type *Ty,
|
|
|
|
TTI::ReductionFlags Flags) const {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-12-29 04:10:59 +08:00
|
|
|
private:
|
|
|
|
int getGSScalarCost(unsigned Opcode, Type *DataTy, bool VariableMask,
|
2020-06-26 19:08:27 +08:00
|
|
|
Align Alignment, unsigned AddressSpace);
|
2020-06-23 21:07:44 +08:00
|
|
|
int getGSVectorCost(unsigned Opcode, Type *DataTy, const Value *Ptr,
|
2020-06-26 19:08:27 +08:00
|
|
|
Align Alignment, unsigned AddressSpace);
|
2015-01-31 19:17:59 +08:00
|
|
|
|
2020-07-27 01:38:34 +08:00
|
|
|
int getGatherOverhead() const;
|
|
|
|
int getScatterOverhead() const;
|
|
|
|
|
2015-01-31 19:17:59 +08:00
|
|
|
/// @}
|
|
|
|
};
|
|
|
|
|
|
|
|
} // end namespace llvm
|
|
|
|
|
|
|
|
#endif
|