2015-03-31 20:52:27 +08:00
|
|
|
//===-- SystemZTargetTransformInfo.h - SystemZ-specific TTI ---------------===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2015-03-31 20:52:27 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#ifndef LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZTARGETTRANSFORMINFO_H
|
|
|
|
#define LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZTARGETTRANSFORMINFO_H
|
|
|
|
|
|
|
|
#include "SystemZTargetMachine.h"
|
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
|
|
|
#include "llvm/CodeGen/BasicTTIImpl.h"
|
|
|
|
|
|
|
|
namespace llvm {
|
|
|
|
|
|
|
|
class SystemZTTIImpl : public BasicTTIImplBase<SystemZTTIImpl> {
|
|
|
|
typedef BasicTTIImplBase<SystemZTTIImpl> BaseT;
|
|
|
|
typedef TargetTransformInfo TTI;
|
|
|
|
friend BaseT;
|
|
|
|
|
|
|
|
const SystemZSubtarget *ST;
|
|
|
|
const SystemZTargetLowering *TLI;
|
|
|
|
|
|
|
|
const SystemZSubtarget *getST() const { return ST; }
|
|
|
|
const SystemZTargetLowering *getTLI() const { return TLI; }
|
|
|
|
|
2017-04-12 19:49:08 +08:00
|
|
|
unsigned const LIBCALL_COST = 30;
|
|
|
|
|
2015-03-31 20:52:27 +08:00
|
|
|
public:
|
2015-09-17 07:38:13 +08:00
|
|
|
explicit SystemZTTIImpl(const SystemZTargetMachine *TM, const Function &F)
|
2015-07-09 10:08:42 +08:00
|
|
|
: BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)),
|
|
|
|
TLI(ST->getTargetLowering()) {}
|
2015-03-31 20:52:27 +08:00
|
|
|
|
|
|
|
/// \name Scalar TTI Implementations
|
|
|
|
/// @{
|
|
|
|
|
2018-08-13 21:31:30 +08:00
|
|
|
unsigned getInliningThresholdMultiplier() { return 3; }
|
|
|
|
|
2015-08-06 02:08:10 +08:00
|
|
|
int getIntImmCost(const APInt &Imm, Type *Ty);
|
2015-03-31 20:52:27 +08:00
|
|
|
|
2015-08-06 02:08:10 +08:00
|
|
|
int getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty);
|
|
|
|
int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
|
|
|
|
Type *Ty);
|
2015-03-31 20:52:27 +08:00
|
|
|
|
2015-03-31 20:56:33 +08:00
|
|
|
TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth);
|
|
|
|
|
[LoopUnroll] Pass SCEV to getUnrollingPreferences hook. NFCI.
Reviewers: sanjoy, anna, reames, apilipenko, igor-laevsky, mkuper
Subscribers: jholewinski, arsenm, mzolotukhin, nemanjai, nhaehnle, javed.absar, mcrosier, llvm-commits
Differential Revision: https://reviews.llvm.org/D34531
llvm-svn: 306554
2017-06-28 23:53:17 +08:00
|
|
|
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
|
|
|
|
TTI::UnrollingPreferences &UP);
|
2016-09-28 17:41:38 +08:00
|
|
|
|
2017-07-21 19:59:37 +08:00
|
|
|
bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
|
|
|
|
TargetTransformInfo::LSRCost &C2);
|
2015-03-31 20:52:27 +08:00
|
|
|
/// @}
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
|
|
|
|
/// \name Vector TTI Implementations
|
|
|
|
/// @{
|
|
|
|
|
recommit: [LoopVectorize][PowerPC] Estimate int and float register pressure separately in loop-vectorize
In loop-vectorize, interleave count and vector factor depend on target register number. Currently, it does not
estimate different register pressure for different register class separately(especially for scalar type,
float type should not be on the same position with int type), so it's not accurate. Specifically,
it causes too many times interleaving/unrolling, result in too many register spills in loop body and hurting performance.
So we need classify the register classes in IR level, and importantly these are abstract register classes,
and are not the target register class of backend provided in td file. It's used to establish the mapping between
the types of IR values and the number of simultaneous live ranges to which we'd like to limit for some set of those types.
For example, POWER target, register num is special when VSX is enabled. When VSX is enabled, the number of int scalar register is 32(GPR),
float is 64(VSR), but for int and float vector register both are 64(VSR). So there should be 2 kinds of register class when vsx is enabled,
and 3 kinds of register class when VSX is NOT enabled.
It runs on POWER target, it makes big(+~30%) performance improvement in one specific bmk(503.bwaves_r) of spec2017 and no other obvious degressions.
Differential revision: https://reviews.llvm.org/D67148
llvm-svn: 374634
2019-10-12 10:53:04 +08:00
|
|
|
unsigned getNumberOfRegisters(unsigned ClassID) const;
|
Const correctness for TTI::getRegisterBitWidth
Summary: The method TargetTransformInfo::getRegisterBitWidth() is declared const, but the type erasing implementation classes (TargetTransformInfo::Concept & TargetTransformInfo::Model) that were introduced by Chandler in https://reviews.llvm.org/D7293 do not have the method declared const. This is an NFC to tidy up the const consistency between TTI and its implementation.
Reviewers: chandlerc, rnk, reames
Reviewed By: reames
Subscribers: reames, jfb, arsenm, dschuff, nemanjai, nhaehnle, javed.absar, sbc100, jgravelle-google, llvm-commits
Differential Revision: https://reviews.llvm.org/D33903
llvm-svn: 305189
2017-06-12 22:22:21 +08:00
|
|
|
unsigned getRegisterBitWidth(bool Vector) const;
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
|
2019-10-10 03:51:48 +08:00
|
|
|
unsigned getCacheLineSize() const override { return 256; }
|
|
|
|
unsigned getPrefetchDistance() const override { return 2000; }
|
|
|
|
unsigned getMinPrefetchStride() const override { return 2048; }
|
2017-07-14 21:52:38 +08:00
|
|
|
|
2017-11-06 21:10:31 +08:00
|
|
|
bool hasDivRemOp(Type *DataType, bool IsSigned);
|
2017-05-24 21:42:56 +08:00
|
|
|
bool prefersVectorizedAddressing() { return false; }
|
2017-07-21 19:59:37 +08:00
|
|
|
bool LSRWithInstrQueries() { return true; }
|
2017-04-12 20:41:37 +08:00
|
|
|
bool supportsEfficientVectorElementLoadStore() { return true; }
|
2017-04-12 19:49:08 +08:00
|
|
|
bool enableInterleavedAccessVectorization() { return true; }
|
|
|
|
|
|
|
|
int getArithmeticInstrCost(
|
|
|
|
unsigned Opcode, Type *Ty,
|
|
|
|
TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
|
|
|
|
TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
|
|
|
|
TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
|
|
|
|
TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None,
|
[ARM] Teach the Arm cost model that a Shift can be folded into other instructions
This attempts to teach the cost model in Arm that code such as:
%s = shl i32 %a, 3
%a = and i32 %s, %b
Can under Arm or Thumb2 become:
and r0, r1, r2, lsl #3
So the cost of the shift can essentially be free. To do this without
trying to artificially adjust the cost of the "and" instruction, it
needs to get the users of the shl and check if they are a type of
instruction that the shift can be folded into. And so it needs to have
access to the actual instruction in getArithmeticInstrCost, which if
available is added as an extra parameter much like getCastInstrCost.
We otherwise limit it to shifts with a single user, which should
hopefully handle most of the cases. The list of instruction that the
shift can be folded into include ADC, ADD, AND, BIC, CMP, EOR, MVN, ORR,
ORN, RSB, SBC and SUB. This translates to Add, Sub, And, Or, Xor and
ICmp.
Differential Revision: https://reviews.llvm.org/D70966
2019-12-08 23:33:24 +08:00
|
|
|
ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
|
|
|
|
const Instruction *CxtI = nullptr);
|
2017-04-12 19:49:08 +08:00
|
|
|
int getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, Type *SubTp);
|
|
|
|
unsigned getVectorTruncCost(Type *SrcTy, Type *DstTy);
|
|
|
|
unsigned getVectorBitmaskConversionCost(Type *SrcTy, Type *DstTy);
|
2018-11-01 17:01:51 +08:00
|
|
|
unsigned getBoolVecToIntConversionCost(unsigned Opcode, Type *Dst,
|
|
|
|
const Instruction *I);
|
2017-04-12 19:49:08 +08:00
|
|
|
int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
|
|
|
|
const Instruction *I = nullptr);
|
|
|
|
int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
|
|
|
|
const Instruction *I = nullptr);
|
|
|
|
int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
|
2018-10-26 06:28:25 +08:00
|
|
|
bool isFoldableLoad(const LoadInst *Ld, const Instruction *&FoldedValue);
|
2019-10-22 23:16:52 +08:00
|
|
|
int getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
|
2017-04-12 19:49:08 +08:00
|
|
|
unsigned AddressSpace, const Instruction *I = nullptr);
|
|
|
|
|
|
|
|
int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
|
|
|
|
unsigned Factor,
|
|
|
|
ArrayRef<unsigned> Indices,
|
|
|
|
unsigned Alignment,
|
2018-10-31 17:57:56 +08:00
|
|
|
unsigned AddressSpace,
|
|
|
|
bool UseMaskForCond = false,
|
|
|
|
bool UseMaskForGaps = false);
|
2018-11-22 15:17:29 +08:00
|
|
|
|
|
|
|
int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
|
|
|
|
ArrayRef<Value *> Args, FastMathFlags FMF,
|
|
|
|
unsigned VF = 1);
|
|
|
|
int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
|
|
|
|
ArrayRef<Type *> Tys, FastMathFlags FMF,
|
|
|
|
unsigned ScalarizationCostPassed = UINT_MAX);
|
[SystemZ] Add CodeGen support for integer vector types
This the first of a series of patches to add CodeGen support exploiting
the instructions of the z13 vector facility. This patch adds support
for the native integer vector types (v16i8, v8i16, v4i32, v2i64).
When the vector facility is present, we default to the new vector ABI.
This is characterized by two major differences:
- Vector types are passed/returned in vector registers
(except for unnamed arguments of a variable-argument list function).
- Vector types are at most 8-byte aligned.
The reason for the choice of 8-byte vector alignment is that the hardware
is able to efficiently load vectors at 8-byte alignment, and the ABI only
guarantees 8-byte alignment of the stack pointer, so requiring any higher
alignment for vectors would require dynamic stack re-alignment code.
However, for compatibility with old code that may use vector types, when
*not* using the vector facility, the old alignment rules (vector types
are naturally aligned) remain in use.
These alignment rules are not only implemented at the C language level
(implemented in clang), but also at the LLVM IR level. This is done
by selecting a different DataLayout string depending on whether the
vector ABI is in effect or not.
Based on a patch by Richard Sandiford.
llvm-svn: 236521
2015-05-06 03:25:42 +08:00
|
|
|
/// @}
|
2015-03-31 20:52:27 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
} // end namespace llvm
|
|
|
|
|
|
|
|
#endif
|