2015-03-31 20:52:27 +08:00
|
|
|
//===-- SystemZTargetTransformInfo.h - SystemZ-specific TTI ---------------===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2015-03-31 20:52:27 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#ifndef LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZTARGETTRANSFORMINFO_H
|
|
|
|
#define LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZTARGETTRANSFORMINFO_H
|
|
|
|
|
|
|
|
#include "SystemZTargetMachine.h"
|
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
|
|
|
#include "llvm/CodeGen/BasicTTIImpl.h"
|
|
|
|
|
|
|
|
namespace llvm {
|
|
|
|
|
|
|
|
class SystemZTTIImpl : public BasicTTIImplBase<SystemZTTIImpl> {
|
|
|
|
typedef BasicTTIImplBase<SystemZTTIImpl> BaseT;
|
|
|
|
typedef TargetTransformInfo TTI;
|
|
|
|
friend BaseT;
|
|
|
|
|
|
|
|
const SystemZSubtarget *ST;
|
|
|
|
const SystemZTargetLowering *TLI;
|
|
|
|
|
|
|
|
const SystemZSubtarget *getST() const { return ST; }
|
|
|
|
const SystemZTargetLowering *getTLI() const { return TLI; }
|
|
|
|
|
2017-04-12 19:49:08 +08:00
|
|
|
unsigned const LIBCALL_COST = 30;
|
|
|
|
|
2015-03-31 20:52:27 +08:00
|
|
|
public:
|
2015-09-17 07:38:13 +08:00
|
|
|
explicit SystemZTTIImpl(const SystemZTargetMachine *TM, const Function &F)
|
2015-07-09 10:08:42 +08:00
|
|
|
: BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)),
|
|
|
|
TLI(ST->getTargetLowering()) {}
|
2015-03-31 20:52:27 +08:00
|
|
|
|
|
|
|
/// \name Scalar TTI Implementations
|
|
|
|
/// @{
|
|
|
|
|
2018-08-13 21:31:30 +08:00
|
|
|
unsigned getInliningThresholdMultiplier() { return 3; }
|
|
|
|
|
2020-04-28 21:11:27 +08:00
|
|
|
int getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind);
|
2015-03-31 20:52:27 +08:00
|
|
|
|
2020-04-28 21:11:27 +08:00
|
|
|
int getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm,
|
2020-09-22 19:54:10 +08:00
|
|
|
Type *Ty, TTI::TargetCostKind CostKind,
|
|
|
|
Instruction *Inst = nullptr);
|
2019-12-12 03:54:58 +08:00
|
|
|
int getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
|
2020-04-28 21:11:27 +08:00
|
|
|
Type *Ty, TTI::TargetCostKind CostKind);
|
2015-03-31 20:52:27 +08:00
|
|
|
|
2015-03-31 20:56:33 +08:00
|
|
|
TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth);
|
|
|
|
|
[LoopUnroll] Pass SCEV to getUnrollingPreferences hook. NFCI.
Reviewers: sanjoy, anna, reames, apilipenko, igor-laevsky, mkuper
Subscribers: jholewinski, arsenm, mzolotukhin, nemanjai, nhaehnle, javed.absar, mcrosier, llvm-commits
Differential Revision: https://reviews.llvm.org/D34531
llvm-svn: 306554
2017-06-28 23:53:17 +08:00
|
|
|
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
|
|
|
|
TTI::UnrollingPreferences &UP);
|
2016-09-28 17:41:38 +08:00
|
|
|
|
[NFC] Separate Peeling Properties into its own struct (re-land after minor fix)
Summary:
This patch separates the peeling specific parameters from the UnrollingPreferences,
and creates a new struct called PeelingPreferences. Functions which used the
UnrollingPreferences struct for peeling have been updated to use the PeelingPreferences struct.
Author: sidbav (Sidharth Baveja)
Reviewers: Whitney (Whitney Tsang), Meinersbur (Michael Kruse), skatkov (Serguei Katkov), ashlykov (Arkady Shlykov), bogner (Justin Bogner), hfinkel (Hal Finkel), anhtuyen (Anh Tuyen Tran), nikic (Nikita Popov)
Reviewed By: Meinersbur (Michael Kruse)
Subscribers: fhahn (Florian Hahn), hiraditya (Aditya Kumar), llvm-commits, LLVM
Tag: LLVM
Differential Revision: https://reviews.llvm.org/D80580
2020-07-11 02:38:08 +08:00
|
|
|
void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
|
|
|
|
TTI::PeelingPreferences &PP);
|
|
|
|
|
2017-07-21 19:59:37 +08:00
|
|
|
bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
|
|
|
|
TargetTransformInfo::LSRCost &C2);
|
2015-03-31 20:52:27 +08:00
|
|
|
/// @}
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
|
|
|
|
/// \name Vector TTI Implementations
|
|
|
|
/// @{
|
|
|
|
|
recommit: [LoopVectorize][PowerPC] Estimate int and float register pressure separately in loop-vectorize
In loop-vectorize, interleave count and vector factor depend on target register number. Currently, it does not
estimate different register pressure for different register class separately(especially for scalar type,
float type should not be on the same position with int type), so it's not accurate. Specifically,
it causes too many times interleaving/unrolling, result in too many register spills in loop body and hurting performance.
So we need classify the register classes in IR level, and importantly these are abstract register classes,
and are not the target register class of backend provided in td file. It's used to establish the mapping between
the types of IR values and the number of simultaneous live ranges to which we'd like to limit for some set of those types.
For example, POWER target, register num is special when VSX is enabled. When VSX is enabled, the number of int scalar register is 32(GPR),
float is 64(VSR), but for int and float vector register both are 64(VSR). So there should be 2 kinds of register class when vsx is enabled,
and 3 kinds of register class when VSX is NOT enabled.
It runs on POWER target, it makes big(+~30%) performance improvement in one specific bmk(503.bwaves_r) of spec2017 and no other obvious degressions.
Differential revision: https://reviews.llvm.org/D67148
llvm-svn: 374634
2019-10-12 10:53:04 +08:00
|
|
|
unsigned getNumberOfRegisters(unsigned ClassID) const;
|
Const correctness for TTI::getRegisterBitWidth
Summary: The method TargetTransformInfo::getRegisterBitWidth() is declared const, but the type erasing implementation classes (TargetTransformInfo::Concept & TargetTransformInfo::Model) that were introduced by Chandler in https://reviews.llvm.org/D7293 do not have the method declared const. This is an NFC to tidy up the const consistency between TTI and its implementation.
Reviewers: chandlerc, rnk, reames
Reviewed By: reames
Subscribers: reames, jfb, arsenm, dschuff, nemanjai, nhaehnle, javed.absar, sbc100, jgravelle-google, llvm-commits
Differential Revision: https://reviews.llvm.org/D33903
llvm-svn: 305189
2017-06-12 22:22:21 +08:00
|
|
|
unsigned getRegisterBitWidth(bool Vector) const;
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
|
2019-10-10 03:51:48 +08:00
|
|
|
unsigned getCacheLineSize() const override { return 256; }
|
2019-10-31 23:05:58 +08:00
|
|
|
unsigned getPrefetchDistance() const override { return 4500; }
|
|
|
|
unsigned getMinPrefetchStride(unsigned NumMemAccesses,
|
|
|
|
unsigned NumStridedMemAccesses,
|
|
|
|
unsigned NumPrefetches,
|
|
|
|
bool HasCall) const override;
|
|
|
|
bool enableWritePrefetching() const override { return true; }
|
2017-07-14 21:52:38 +08:00
|
|
|
|
2017-11-06 21:10:31 +08:00
|
|
|
bool hasDivRemOp(Type *DataType, bool IsSigned);
|
2017-05-24 21:42:56 +08:00
|
|
|
bool prefersVectorizedAddressing() { return false; }
|
2017-07-21 19:59:37 +08:00
|
|
|
bool LSRWithInstrQueries() { return true; }
|
2017-04-12 20:41:37 +08:00
|
|
|
bool supportsEfficientVectorElementLoadStore() { return true; }
|
2017-04-12 19:49:08 +08:00
|
|
|
bool enableInterleavedAccessVectorization() { return true; }
|
|
|
|
|
|
|
|
int getArithmeticInstrCost(
|
|
|
|
unsigned Opcode, Type *Ty,
|
2020-04-28 21:11:27 +08:00
|
|
|
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
|
2017-04-12 19:49:08 +08:00
|
|
|
TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
|
|
|
|
TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
|
|
|
|
TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
|
|
|
|
TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None,
|
[ARM] Teach the Arm cost model that a Shift can be folded into other instructions
This attempts to teach the cost model in Arm that code such as:
%s = shl i32 %a, 3
%a = and i32 %s, %b
Can under Arm or Thumb2 become:
and r0, r1, r2, lsl #3
So the cost of the shift can essentially be free. To do this without
trying to artificially adjust the cost of the "and" instruction, it
needs to get the users of the shl and check if they are a type of
instruction that the shift can be folded into. And so it needs to have
access to the actual instruction in getArithmeticInstrCost, which if
available is added as an extra parameter much like getCastInstrCost.
We otherwise limit it to shifts with a single user, which should
hopefully handle most of the cases. The list of instruction that the
shift can be folded into include ADC, ADD, AND, BIC, CMP, EOR, MVN, ORR,
ORN, RSB, SBC and SUB. This translates to Add, Sub, And, Or, Xor and
ICmp.
Differential Revision: https://reviews.llvm.org/D70966
2019-12-08 23:33:24 +08:00
|
|
|
ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
|
|
|
|
const Instruction *CxtI = nullptr);
|
2020-04-17 20:29:31 +08:00
|
|
|
int getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, int Index,
|
|
|
|
VectorType *SubTp);
|
2017-04-12 19:49:08 +08:00
|
|
|
unsigned getVectorTruncCost(Type *SrcTy, Type *DstTy);
|
|
|
|
unsigned getVectorBitmaskConversionCost(Type *SrcTy, Type *DstTy);
|
2018-11-01 17:01:51 +08:00
|
|
|
unsigned getBoolVecToIntConversionCost(unsigned Opcode, Type *Dst,
|
|
|
|
const Instruction *I);
|
2017-04-12 19:49:08 +08:00
|
|
|
int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
|
[Analysis] TTI: Add CastContextHint for getCastInstrCost
Currently, getCastInstrCost has limited information about the cast it's
rating, often just the opcode and types. Sometimes there is a context
instruction as well, but it isn't trustworthy: for instance, when the
vectorizer is rating a plan, it calls getCastInstrCost with the old
instructions when, in fact, it's trying to evaluate the cost of the
instruction post-vectorization. Thus, the current system can get the
cost of certain casts incorrect as the correct cost can vary greatly
based on the context in which it's used.
For example, if the vectorizer queries getCastInstrCost to evaluate the
cost of a sext(load) with tail predication enabled, getCastInstrCost
will think it's free most of the time, but it's not always free. On ARM
MVE, a VLD2 group cannot be extended like a normal VLDR can. Similar
situations can come up with how masked loads can be extended when being
split.
To fix that, this path adds a new parameter to getCastInstrCost to give
it a hint about the context of the cast. It adds a CastContextHint enum
which contains the type of the load/store being created by the
vectorizer - one for each of the types it can produce.
Original patch by Pierre van Houtryve
Differential Revision: https://reviews.llvm.org/D79162
2020-07-29 20:32:53 +08:00
|
|
|
TTI::CastContextHint CCH, TTI::TargetCostKind CostKind,
|
2017-04-12 19:49:08 +08:00
|
|
|
const Instruction *I = nullptr);
|
|
|
|
int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
|
2020-11-02 20:40:34 +08:00
|
|
|
CmpInst::Predicate VecPred,
|
2020-04-28 21:11:27 +08:00
|
|
|
TTI::TargetCostKind CostKind,
|
2017-04-12 19:49:08 +08:00
|
|
|
const Instruction *I = nullptr);
|
|
|
|
int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
|
2018-10-26 06:28:25 +08:00
|
|
|
bool isFoldableLoad(const LoadInst *Ld, const Instruction *&FoldedValue);
|
2019-10-22 23:16:52 +08:00
|
|
|
int getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
|
2020-04-28 21:11:27 +08:00
|
|
|
unsigned AddressSpace, TTI::TargetCostKind CostKind,
|
|
|
|
const Instruction *I = nullptr);
|
2017-04-12 19:49:08 +08:00
|
|
|
|
2020-06-26 19:00:53 +08:00
|
|
|
int getInterleavedMemoryOpCost(
|
|
|
|
unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
|
|
|
|
Align Alignment, unsigned AddressSpace,
|
|
|
|
TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency,
|
|
|
|
bool UseMaskForCond = false, bool UseMaskForGaps = false);
|
2018-11-22 15:17:29 +08:00
|
|
|
|
2020-05-20 16:18:42 +08:00
|
|
|
int getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
|
|
|
|
TTI::TargetCostKind CostKind);
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
/// @}
|
2015-03-31 20:52:27 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
} // end namespace llvm
|
|
|
|
|
|
|
|
#endif
|