2015-01-31 19:17:59 +08:00
|
|
|
//===-- PPCTargetTransformInfo.h - PPC specific TTI -------------*- C++ -*-===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2015-01-31 19:17:59 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
/// \file
|
|
|
|
/// This file a TargetTransformInfo::Concept conforming object specific to the
|
|
|
|
/// PPC target machine. It uses the target's detailed information to
|
|
|
|
/// provide more precise answers to certain TTI queries, while letting the
|
|
|
|
/// target independent and default TTI implementations handle the rest.
|
|
|
|
///
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#ifndef LLVM_LIB_TARGET_POWERPC_PPCTARGETTRANSFORMINFO_H
|
|
|
|
#define LLVM_LIB_TARGET_POWERPC_PPCTARGETTRANSFORMINFO_H
|
|
|
|
|
|
|
|
#include "PPCTargetMachine.h"
|
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
|
|
|
#include "llvm/CodeGen/BasicTTIImpl.h"
|
2017-11-17 09:07:10 +08:00
|
|
|
#include "llvm/CodeGen/TargetLowering.h"
|
2015-01-31 19:17:59 +08:00
|
|
|
|
|
|
|
namespace llvm {
|
|
|
|
|
|
|
|
class PPCTTIImpl : public BasicTTIImplBase<PPCTTIImpl> {
|
|
|
|
typedef BasicTTIImplBase<PPCTTIImpl> BaseT;
|
|
|
|
typedef TargetTransformInfo TTI;
|
2015-02-01 22:01:15 +08:00
|
|
|
friend BaseT;
|
2015-01-31 19:17:59 +08:00
|
|
|
|
|
|
|
const PPCSubtarget *ST;
|
|
|
|
const PPCTargetLowering *TLI;
|
|
|
|
|
2015-02-01 22:22:17 +08:00
|
|
|
const PPCSubtarget *getST() const { return ST; }
|
2015-02-01 22:01:15 +08:00
|
|
|
const PPCTargetLowering *getTLI() const { return TLI; }
|
[PowerPC] Fix compile time issue in recursive CTR analysis code
Summary:
Avoid re-examining operands on recursive walk looking for CTR.
This was causing huge compile time after some earlier optimization
created a large expression.
The start of the expression (created by IndVarSimplify) looked like:
%469 = lshr i64 trunc (i128 xor (i128 udiv (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 ptrtoint (i8 @_ZN4absl13hash_internal13CityHashState5kSeedE to i64), i64 120) to i128), i128 8192506886679785011), i128 64), i128 mul (i128 zext (i64 add (i64 ptrtoint (i8 @_ZN4absl13hash_internal13CityHashState5kSeedE to i64), i64 120) to i128), i128 8192506886679785011)) to i64), i64 45) to i128), i128 8192506886679785011), i128 64), i128 mul (i128 zext (i64 add (i64 trunc (i128 xor (i128 lshr (i128 mul (i128 zext (i64 add (i64 ptrtoint (i8 @_ZN4absl13hash_internal13CityHashState5kSeedE to i64), i64 120) to i128), i128 8192506886679785011), i128 64), i128 mul (i128 zext (i64 add (i64 ptrtoint (i8 @_ZN4absl13hash_internal13CityHashState5kSeedE to i64), i64 120) to i128), i128 8192506886679785011)) to i64), i64 45) to i128), ...
with the _ZN4absl13hash_internal13CityHashState5kSeedE referenced many times.
Reviewers: hfinkel
Subscribers: nemanjai, hiraditya, kbarton, jsji, shchenz, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75790
2020-03-07 09:48:22 +08:00
|
|
|
bool mightUseCTR(BasicBlock *BB, TargetLibraryInfo *LibInfo,
|
|
|
|
SmallPtrSetImpl<const Value *> &Visited);
|
2015-02-01 22:01:15 +08:00
|
|
|
|
2015-01-31 19:17:59 +08:00
|
|
|
public:
|
2015-09-17 07:38:13 +08:00
|
|
|
explicit PPCTTIImpl(const PPCTargetMachine *TM, const Function &F)
|
2015-07-09 10:08:42 +08:00
|
|
|
: BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)),
|
|
|
|
TLI(ST->getTargetLowering()) {}
|
2015-01-31 19:17:59 +08:00
|
|
|
|
2020-06-03 21:56:40 +08:00
|
|
|
Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
|
|
|
|
IntrinsicInst &II) const;
|
|
|
|
|
2015-01-31 19:17:59 +08:00
|
|
|
/// \name Scalar TTI Implementations
|
|
|
|
/// @{
|
|
|
|
|
|
|
|
using BaseT::getIntImmCost;
|
2020-04-28 21:11:27 +08:00
|
|
|
int getIntImmCost(const APInt &Imm, Type *Ty,
|
|
|
|
TTI::TargetCostKind CostKind);
|
2015-01-31 19:17:59 +08:00
|
|
|
|
2019-12-12 03:54:58 +08:00
|
|
|
int getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm,
|
2020-09-22 19:54:10 +08:00
|
|
|
Type *Ty, TTI::TargetCostKind CostKind,
|
|
|
|
Instruction *Inst = nullptr);
|
2019-12-12 03:54:58 +08:00
|
|
|
int getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
|
2020-04-28 21:11:27 +08:00
|
|
|
Type *Ty, TTI::TargetCostKind CostKind);
|
2015-01-31 19:17:59 +08:00
|
|
|
|
2020-04-27 16:02:14 +08:00
|
|
|
unsigned getUserCost(const User *U, ArrayRef<const Value *> Operands,
|
|
|
|
TTI::TargetCostKind CostKind);
|
2017-10-20 02:16:31 +08:00
|
|
|
|
2015-01-31 19:17:59 +08:00
|
|
|
TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth);
|
2019-06-07 15:35:30 +08:00
|
|
|
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
|
|
|
|
AssumptionCache &AC,
|
|
|
|
TargetLibraryInfo *LibInfo,
|
2019-06-19 09:26:31 +08:00
|
|
|
HardwareLoopInfo &HWLoopInfo);
|
2019-07-03 09:49:03 +08:00
|
|
|
bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI,
|
|
|
|
DominatorTree *DT, AssumptionCache *AC,
|
|
|
|
TargetLibraryInfo *LibInfo);
|
2020-09-27 16:13:16 +08:00
|
|
|
bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info);
|
[LoopUnroll] Pass SCEV to getUnrollingPreferences hook. NFCI.
Reviewers: sanjoy, anna, reames, apilipenko, igor-laevsky, mkuper
Subscribers: jholewinski, arsenm, mzolotukhin, nemanjai, nhaehnle, javed.absar, mcrosier, llvm-commits
Differential Revision: https://reviews.llvm.org/D34531
llvm-svn: 306554
2017-06-28 23:53:17 +08:00
|
|
|
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
|
|
|
|
TTI::UnrollingPreferences &UP);
|
[NFC] Separate Peeling Properties into its own struct (re-land after minor fix)
Summary:
This patch separates the peeling specific parameters from the UnrollingPreferences,
and creates a new struct called PeelingPreferences. Functions which used the
UnrollingPreferences struct for peeling have been updated to use the PeelingPreferences struct.
Author: sidbav (Sidharth Baveja)
Reviewers: Whitney (Whitney Tsang), Meinersbur (Michael Kruse), skatkov (Serguei Katkov), ashlykov (Arkady Shlykov), bogner (Justin Bogner), hfinkel (Hal Finkel), anhtuyen (Anh Tuyen Tran), nikic (Nikita Popov)
Reviewed By: Meinersbur (Michael Kruse)
Subscribers: fhahn (Florian Hahn), hiraditya (Aditya Kumar), llvm-commits, LLVM
Tag: LLVM
Differential Revision: https://reviews.llvm.org/D80580
2020-07-11 02:38:08 +08:00
|
|
|
void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
|
|
|
|
TTI::PeelingPreferences &PP);
|
2020-02-14 14:42:43 +08:00
|
|
|
bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
|
|
|
|
TargetTransformInfo::LSRCost &C2);
|
2020-10-27 10:29:22 +08:00
|
|
|
bool isNumRegsMajorCostOfLSR();
|
2015-01-31 19:17:59 +08:00
|
|
|
|
|
|
|
/// @}
|
|
|
|
|
|
|
|
/// \name Vector TTI Implementations
|
|
|
|
/// @{
|
2018-01-31 00:17:22 +08:00
|
|
|
bool useColdCCForColdCall(Function &F);
|
2015-03-07 07:12:04 +08:00
|
|
|
bool enableAggressiveInterleaving(bool LoopHasReductions);
|
2019-06-25 16:04:13 +08:00
|
|
|
TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
|
|
|
|
bool IsZeroCmp) const;
|
2015-09-04 08:10:41 +08:00
|
|
|
bool enableInterleavedAccessVectorization();
|
recommit: [LoopVectorize][PowerPC] Estimate int and float register pressure separately in loop-vectorize
In loop-vectorize, interleave count and vector factor depend on target register number. Currently, it does not
estimate different register pressure for different register class separately(especially for scalar type,
float type should not be on the same position with int type), so it's not accurate. Specifically,
it causes too many times interleaving/unrolling, result in too many register spills in loop body and hurting performance.
So we need classify the register classes in IR level, and importantly these are abstract register classes,
and are not the target register class of backend provided in td file. It's used to establish the mapping between
the types of IR values and the number of simultaneous live ranges to which we'd like to limit for some set of those types.
For example, POWER target, register num is special when VSX is enabled. When VSX is enabled, the number of int scalar register is 32(GPR),
float is 64(VSR), but for int and float vector register both are 64(VSR). So there should be 2 kinds of register class when vsx is enabled,
and 3 kinds of register class when VSX is NOT enabled.
It runs on POWER target, it makes big(+~30%) performance improvement in one specific bmk(503.bwaves_r) of spec2017 and no other obvious degressions.
Differential revision: https://reviews.llvm.org/D67148
llvm-svn: 374634
2019-10-12 10:53:04 +08:00
|
|
|
|
|
|
|
enum PPCRegisterClass {
|
|
|
|
GPRRC, FPRRC, VRRC, VSXRC
|
|
|
|
};
|
|
|
|
unsigned getNumberOfRegisters(unsigned ClassID) const;
|
|
|
|
unsigned getRegisterClassForType(bool Vector, Type *Ty = nullptr) const;
|
|
|
|
const char* getRegisterClassName(unsigned ClassID) const;
|
Const correctness for TTI::getRegisterBitWidth
Summary: The method TargetTransformInfo::getRegisterBitWidth() is declared const, but the type erasing implementation classes (TargetTransformInfo::Concept & TargetTransformInfo::Model) that were introduced by Chandler in https://reviews.llvm.org/D7293 do not have the method declared const. This is an NFC to tidy up the const consistency between TTI and its implementation.
Reviewers: chandlerc, rnk, reames
Reviewed By: reames
Subscribers: reames, jfb, arsenm, dschuff, nemanjai, nhaehnle, javed.absar, sbc100, jgravelle-google, llvm-commits
Differential Revision: https://reviews.llvm.org/D33903
llvm-svn: 305189
2017-06-12 22:22:21 +08:00
|
|
|
unsigned getRegisterBitWidth(bool Vector) const;
|
2019-10-10 03:51:48 +08:00
|
|
|
unsigned getCacheLineSize() const override;
|
|
|
|
unsigned getPrefetchDistance() const override;
|
2015-05-07 01:12:25 +08:00
|
|
|
unsigned getMaxInterleaveFactor(unsigned VF);
|
2019-01-26 09:18:48 +08:00
|
|
|
int vectorCostAdjustment(int Cost, unsigned Opcode, Type *Ty1, Type *Ty2);
|
2015-08-06 02:08:10 +08:00
|
|
|
int getArithmeticInstrCost(
|
2015-01-31 19:17:59 +08:00
|
|
|
unsigned Opcode, Type *Ty,
|
2020-04-28 21:11:27 +08:00
|
|
|
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
|
2015-01-31 19:17:59 +08:00
|
|
|
TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
|
|
|
|
TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
|
|
|
|
TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
|
[X86] updating TTI costs for arithmetic instructions on X86\SLM arch.
updated instructions:
pmulld, pmullw, pmulhw, mulsd, mulps, mulpd, divss, divps, divsd, divpd, addpd and subpd.
special optimization case which replaces pmulld with pmullw\pmulhw\pshuf seq.
In case if the real operands bitwidth <= 16.
Differential Revision: https://reviews.llvm.org/D28104
llvm-svn: 291657
2017-01-11 16:23:37 +08:00
|
|
|
TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None,
|
[ARM] Teach the Arm cost model that a Shift can be folded into other instructions
This attempts to teach the cost model in Arm that code such as:
%s = shl i32 %a, 3
%a = and i32 %s, %b
Can under Arm or Thumb2 become:
and r0, r1, r2, lsl #3
So the cost of the shift can essentially be free. To do this without
trying to artificially adjust the cost of the "and" instruction, it
needs to get the users of the shl and check if they are a type of
instruction that the shift can be folded into. And so it needs to have
access to the actual instruction in getArithmeticInstrCost, which if
available is added as an extra parameter much like getCastInstrCost.
We otherwise limit it to shifts with a single user, which should
hopefully handle most of the cases. The list of instruction that the
shift can be folded into include ADC, ADD, AND, BIC, CMP, EOR, MVN, ORR,
ORN, RSB, SBC and SUB. This translates to Add, Sub, And, Or, Xor and
ICmp.
Differential Revision: https://reviews.llvm.org/D70966
2019-12-08 23:33:24 +08:00
|
|
|
ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
|
|
|
|
const Instruction *CxtI = nullptr);
|
2015-08-06 02:08:10 +08:00
|
|
|
int getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, Type *SubTp);
|
2017-04-12 19:49:08 +08:00
|
|
|
int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
|
[Analysis] TTI: Add CastContextHint for getCastInstrCost
Currently, getCastInstrCost has limited information about the cast it's
rating, often just the opcode and types. Sometimes there is a context
instruction as well, but it isn't trustworthy: for instance, when the
vectorizer is rating a plan, it calls getCastInstrCost with the old
instructions when, in fact, it's trying to evaluate the cost of the
instruction post-vectorization. Thus, the current system can get the
cost of certain casts incorrect as the correct cost can vary greatly
based on the context in which it's used.
For example, if the vectorizer queries getCastInstrCost to evaluate the
cost of a sext(load) with tail predication enabled, getCastInstrCost
will think it's free most of the time, but it's not always free. On ARM
MVE, a VLD2 group cannot be extended like a normal VLDR can. Similar
situations can come up with how masked loads can be extended when being
split.
To fix that, this path adds a new parameter to getCastInstrCost to give
it a hint about the context of the cast. It adds a CastContextHint enum
which contains the type of the load/store being created by the
vectorizer - one for each of the types it can produce.
Original patch by Pierre van Houtryve
Differential Revision: https://reviews.llvm.org/D79162
2020-07-29 20:32:53 +08:00
|
|
|
TTI::CastContextHint CCH, TTI::TargetCostKind CostKind,
|
2017-04-12 19:49:08 +08:00
|
|
|
const Instruction *I = nullptr);
|
2020-06-15 15:58:25 +08:00
|
|
|
int getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind);
|
2017-04-12 19:49:08 +08:00
|
|
|
int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
|
2020-11-02 20:40:34 +08:00
|
|
|
CmpInst::Predicate VecPred,
|
2020-04-28 21:11:27 +08:00
|
|
|
TTI::TargetCostKind CostKind,
|
2017-04-12 19:49:08 +08:00
|
|
|
const Instruction *I = nullptr);
|
2015-08-06 02:08:10 +08:00
|
|
|
int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
|
2019-10-22 23:16:52 +08:00
|
|
|
int getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
|
2020-04-28 21:11:27 +08:00
|
|
|
unsigned AddressSpace,
|
|
|
|
TTI::TargetCostKind CostKind,
|
|
|
|
const Instruction *I = nullptr);
|
2020-06-26 19:00:53 +08:00
|
|
|
int getInterleavedMemoryOpCost(
|
|
|
|
unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
|
|
|
|
Align Alignment, unsigned AddressSpace,
|
|
|
|
TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency,
|
|
|
|
bool UseMaskForCond = false, bool UseMaskForGaps = false);
|
2020-05-20 16:18:42 +08:00
|
|
|
unsigned getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
|
|
|
|
TTI::TargetCostKind CostKind);
|
2015-01-31 19:17:59 +08:00
|
|
|
|
|
|
|
/// @}
|
|
|
|
};
|
|
|
|
|
|
|
|
} // end namespace llvm
|
|
|
|
|
|
|
|
#endif
|