2017-08-02 05:20:10 +08:00
|
|
|
//===- HexagonTargetTransformInfo.cpp - Hexagon specific TTI pass ---------===//
|
2015-08-06 02:35:37 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2015-08-06 02:35:37 +08:00
|
|
|
//
|
|
|
|
/// \file
|
|
|
|
/// This file implements a TargetTransformInfo analysis pass specific to the
|
|
|
|
/// Hexagon target machine. It uses the target's detailed information to provide
|
|
|
|
/// more precise answers to certain TTI queries, while letting the target
|
|
|
|
/// independent and default TTI implementations handle the rest.
|
|
|
|
///
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "HexagonTargetTransformInfo.h"
|
2017-08-02 05:20:10 +08:00
|
|
|
#include "HexagonSubtarget.h"
|
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
2018-04-14 04:46:50 +08:00
|
|
|
#include "llvm/CodeGen/ValueTypes.h"
|
2017-08-02 05:20:10 +08:00
|
|
|
#include "llvm/IR/InstrTypes.h"
|
2016-08-19 22:22:07 +08:00
|
|
|
#include "llvm/IR/Instructions.h"
|
2017-08-02 05:20:10 +08:00
|
|
|
#include "llvm/IR/User.h"
|
|
|
|
#include "llvm/Support/Casting.h"
|
|
|
|
#include "llvm/Support/CommandLine.h"
|
2020-08-01 02:31:58 +08:00
|
|
|
#include "llvm/Transforms/Utils/LoopPeel.h"
|
2018-04-04 06:55:09 +08:00
|
|
|
#include "llvm/Transforms/Utils/UnrollLoop.h"
|
2015-08-06 02:35:37 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "hexagontti"
|
|
|
|
|
2018-10-27 12:51:12 +08:00
|
|
|
static cl::opt<bool> HexagonAutoHVX("hexagon-autohvx", cl::init(false),
|
2018-03-28 01:07:52 +08:00
|
|
|
cl::Hidden, cl::desc("Enable loop vectorizer for HVX"));
|
|
|
|
|
2017-07-01 04:54:24 +08:00
|
|
|
static cl::opt<bool> EmitLookupTables("hexagon-emit-lookup-tables",
|
|
|
|
cl::init(true), cl::Hidden,
|
|
|
|
cl::desc("Control lookup table emission on Hexagon target"));
|
|
|
|
|
2020-08-25 07:29:57 +08:00
|
|
|
static cl::opt<bool> HexagonMaskedVMem("hexagon-masked-vmem", cl::init(true),
|
|
|
|
cl::Hidden, cl::desc("Enable loop vectorizer for HVX"));
|
|
|
|
|
2018-06-12 23:12:50 +08:00
|
|
|
// Constant "cost factor" to make floating point operations more expensive
|
|
|
|
// in terms of vectorization cost. This isn't the best way, but it should
|
|
|
|
// do. Ultimately, the cost should use cycles.
|
|
|
|
static const unsigned FloatFactor = 4;
|
2018-04-14 04:46:50 +08:00
|
|
|
|
|
|
|
bool HexagonTTIImpl::useHVX() const {
|
|
|
|
return ST.useHVXOps() && HexagonAutoHVX;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool HexagonTTIImpl::isTypeForHVX(Type *VecTy) const {
|
2020-08-25 07:29:57 +08:00
|
|
|
if (!VecTy->isVectorTy() || isa<ScalableVectorType>(VecTy))
|
2019-09-17 18:19:23 +08:00
|
|
|
return false;
|
2018-04-14 04:46:50 +08:00
|
|
|
// Avoid types like <2 x i32*>.
|
|
|
|
if (!cast<VectorType>(VecTy)->getElementType()->isIntegerTy())
|
|
|
|
return false;
|
|
|
|
EVT VecVT = EVT::getEVT(VecTy);
|
|
|
|
if (!VecVT.isSimple() || VecVT.getSizeInBits() <= 64)
|
|
|
|
return false;
|
|
|
|
if (ST.isHVXVectorType(VecVT.getSimpleVT()))
|
|
|
|
return true;
|
2018-11-06 07:26:13 +08:00
|
|
|
auto Action = TLI.getPreferredVectorAction(VecVT.getSimpleVT());
|
2018-04-14 04:46:50 +08:00
|
|
|
return Action == TargetLoweringBase::TypeWidenVector;
|
|
|
|
}
|
|
|
|
|
2018-06-12 23:12:50 +08:00
|
|
|
unsigned HexagonTTIImpl::getTypeNumElements(Type *Ty) const {
|
2020-05-14 07:00:20 +08:00
|
|
|
if (auto *VTy = dyn_cast<FixedVectorType>(Ty))
|
2020-04-04 02:24:59 +08:00
|
|
|
return VTy->getNumElements();
|
2018-06-12 23:12:50 +08:00
|
|
|
assert((Ty->isIntegerTy() || Ty->isFloatingPointTy()) &&
|
|
|
|
"Expecting scalar type");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2015-08-06 02:35:37 +08:00
|
|
|
TargetTransformInfo::PopcntSupportKind
|
|
|
|
HexagonTTIImpl::getPopcntSupport(unsigned IntTyWidthInBit) const {
|
2018-04-14 04:46:50 +08:00
|
|
|
// Return fast hardware support as every input < 64 bits will be promoted
|
2015-08-06 02:35:37 +08:00
|
|
|
// to 64 bits.
|
|
|
|
return TargetTransformInfo::PSK_FastHardware;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The Hexagon target can unroll loops with run-time trip counts.
|
[LoopUnroll] Pass SCEV to getUnrollingPreferences hook. NFCI.
Reviewers: sanjoy, anna, reames, apilipenko, igor-laevsky, mkuper
Subscribers: jholewinski, arsenm, mzolotukhin, nemanjai, nhaehnle, javed.absar, mcrosier, llvm-commits
Differential Revision: https://reviews.llvm.org/D34531
llvm-svn: 306554
2017-06-28 23:53:17 +08:00
|
|
|
void HexagonTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
|
2015-08-06 02:35:37 +08:00
|
|
|
TTI::UnrollingPreferences &UP) {
|
|
|
|
UP.Runtime = UP.Partial = true;
|
[NFC] Separate Peeling Properties into its own struct (re-land after minor fix)
Summary:
This patch separates the peeling specific parameters from the UnrollingPreferences,
and creates a new struct called PeelingPreferences. Functions which used the
UnrollingPreferences struct for peeling have been updated to use the PeelingPreferences struct.
Author: sidbav (Sidharth Baveja)
Reviewers: Whitney (Whitney Tsang), Meinersbur (Michael Kruse), skatkov (Serguei Katkov), ashlykov (Arkady Shlykov), bogner (Justin Bogner), hfinkel (Hal Finkel), anhtuyen (Anh Tuyen Tran), nikic (Nikita Popov)
Reviewed By: Meinersbur (Michael Kruse)
Subscribers: fhahn (Florian Hahn), hiraditya (Aditya Kumar), llvm-commits, LLVM
Tag: LLVM
Differential Revision: https://reviews.llvm.org/D80580
2020-07-11 02:38:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void HexagonTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
|
|
|
|
TTI::PeelingPreferences &PP) {
|
|
|
|
BaseT::getPeelingPreferences(L, SE, PP);
|
2018-04-03 11:39:43 +08:00
|
|
|
// Only try to peel innermost loops with small runtime trip counts.
|
2020-09-23 04:59:34 +08:00
|
|
|
if (L && L->isInnermost() && canPeel(L) &&
|
2018-04-03 11:39:43 +08:00
|
|
|
SE.getSmallConstantTripCount(L) == 0 &&
|
|
|
|
SE.getSmallConstantMaxTripCount(L) > 0 &&
|
|
|
|
SE.getSmallConstantMaxTripCount(L) <= 5) {
|
[NFC] Separate Peeling Properties into its own struct (re-land after minor fix)
Summary:
This patch separates the peeling specific parameters from the UnrollingPreferences,
and creates a new struct called PeelingPreferences. Functions which used the
UnrollingPreferences struct for peeling have been updated to use the PeelingPreferences struct.
Author: sidbav (Sidharth Baveja)
Reviewers: Whitney (Whitney Tsang), Meinersbur (Michael Kruse), skatkov (Serguei Katkov), ashlykov (Arkady Shlykov), bogner (Justin Bogner), hfinkel (Hal Finkel), anhtuyen (Anh Tuyen Tran), nikic (Nikita Popov)
Reviewed By: Meinersbur (Michael Kruse)
Subscribers: fhahn (Florian Hahn), hiraditya (Aditya Kumar), llvm-commits, LLVM
Tag: LLVM
Differential Revision: https://reviews.llvm.org/D80580
2020-07-11 02:38:08 +08:00
|
|
|
PP.PeelCount = 2;
|
2018-04-03 11:39:43 +08:00
|
|
|
}
|
2015-08-06 02:35:37 +08:00
|
|
|
}
|
|
|
|
|
2018-03-26 23:32:03 +08:00
|
|
|
bool HexagonTTIImpl::shouldFavorPostInc() const {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-04-14 04:46:50 +08:00
|
|
|
/// --- Vector TTI begin ---
|
|
|
|
|
2018-03-28 01:07:52 +08:00
|
|
|
unsigned HexagonTTIImpl::getNumberOfRegisters(bool Vector) const {
|
|
|
|
if (Vector)
|
2018-04-14 04:46:50 +08:00
|
|
|
return useHVX() ? 32 : 0;
|
2018-03-28 01:07:52 +08:00
|
|
|
return 32;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned HexagonTTIImpl::getMaxInterleaveFactor(unsigned VF) {
|
2020-10-09 22:34:24 +08:00
|
|
|
return useHVX() ? 2 : 1;
|
2018-03-28 01:07:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned HexagonTTIImpl::getRegisterBitWidth(bool Vector) const {
|
|
|
|
return Vector ? getMinVectorRegisterBitWidth() : 32;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned HexagonTTIImpl::getMinVectorRegisterBitWidth() const {
|
2020-08-12 23:11:12 +08:00
|
|
|
return useHVX() ? ST.getVectorLength()*8 : 32;
|
2018-03-28 01:07:52 +08:00
|
|
|
}
|
|
|
|
|
2018-04-14 04:16:32 +08:00
|
|
|
unsigned HexagonTTIImpl::getMinimumVF(unsigned ElemWidth) const {
|
2018-04-14 04:46:50 +08:00
|
|
|
return (8 * ST.getVectorLength()) / ElemWidth;
|
|
|
|
}
|
|
|
|
|
2020-05-05 23:57:55 +08:00
|
|
|
unsigned HexagonTTIImpl::getScalarizationOverhead(VectorType *Ty,
|
2020-04-29 18:39:13 +08:00
|
|
|
const APInt &DemandedElts,
|
|
|
|
bool Insert, bool Extract) {
|
|
|
|
return BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, Extract);
|
2018-04-14 04:46:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned HexagonTTIImpl::getOperandsScalarizationOverhead(
|
|
|
|
ArrayRef<const Value*> Args, unsigned VF) {
|
|
|
|
return BaseT::getOperandsScalarizationOverhead(Args, VF);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned HexagonTTIImpl::getCallInstrCost(Function *F, Type *RetTy,
|
2020-04-28 21:11:27 +08:00
|
|
|
ArrayRef<Type*> Tys, TTI::TargetCostKind CostKind) {
|
|
|
|
return BaseT::getCallInstrCost(F, RetTy, Tys, CostKind);
|
2018-04-14 04:46:50 +08:00
|
|
|
}
|
|
|
|
|
2020-05-20 16:18:42 +08:00
|
|
|
unsigned
|
|
|
|
HexagonTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
|
|
|
|
TTI::TargetCostKind CostKind) {
|
|
|
|
if (ICA.getID() == Intrinsic::bswap) {
|
|
|
|
std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, ICA.getReturnType());
|
2018-04-14 04:46:50 +08:00
|
|
|
return LT.first + 2;
|
|
|
|
}
|
2020-05-20 16:18:42 +08:00
|
|
|
return BaseT::getIntrinsicInstrCost(ICA, CostKind);
|
2018-04-14 04:46:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned HexagonTTIImpl::getAddressComputationCost(Type *Tp,
|
|
|
|
ScalarEvolution *SE, const SCEV *S) {
|
|
|
|
return 0;
|
2018-04-14 04:16:32 +08:00
|
|
|
}
|
|
|
|
|
2018-03-28 01:07:52 +08:00
|
|
|
unsigned HexagonTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
|
2019-10-22 23:16:52 +08:00
|
|
|
MaybeAlign Alignment,
|
|
|
|
unsigned AddressSpace,
|
2020-04-28 21:11:27 +08:00
|
|
|
TTI::TargetCostKind CostKind,
|
2019-10-22 23:16:52 +08:00
|
|
|
const Instruction *I) {
|
2018-04-14 04:46:50 +08:00
|
|
|
assert(Opcode == Instruction::Load || Opcode == Instruction::Store);
|
2020-06-05 17:09:56 +08:00
|
|
|
// TODO: Handle other cost kinds.
|
|
|
|
if (CostKind != TTI::TCK_RecipThroughput)
|
|
|
|
return 1;
|
|
|
|
|
2018-04-14 04:46:50 +08:00
|
|
|
if (Opcode == Instruction::Store)
|
2020-04-28 21:11:27 +08:00
|
|
|
return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
|
|
|
|
CostKind, I);
|
2018-04-14 04:46:50 +08:00
|
|
|
|
|
|
|
if (Src->isVectorTy()) {
|
2018-03-28 01:07:52 +08:00
|
|
|
VectorType *VecTy = cast<VectorType>(Src);
|
2020-04-15 01:58:39 +08:00
|
|
|
unsigned VecWidth = VecTy->getPrimitiveSizeInBits().getFixedSize();
|
2018-04-14 04:46:50 +08:00
|
|
|
if (useHVX() && isTypeForHVX(VecTy)) {
|
|
|
|
unsigned RegWidth = getRegisterBitWidth(true);
|
2019-05-22 20:25:46 +08:00
|
|
|
assert(RegWidth && "Non-zero vector register width expected");
|
2018-04-14 04:46:50 +08:00
|
|
|
// Cost of HVX loads.
|
|
|
|
if (VecWidth % RegWidth == 0)
|
|
|
|
return VecWidth / RegWidth;
|
2019-10-22 23:16:52 +08:00
|
|
|
// Cost of constructing HVX vector from scalar loads
|
|
|
|
const Align RegAlign(RegWidth / 8);
|
|
|
|
if (!Alignment || *Alignment > RegAlign)
|
|
|
|
Alignment = RegAlign;
|
|
|
|
assert(Alignment);
|
|
|
|
unsigned AlignWidth = 8 * Alignment->value();
|
2018-03-28 01:07:52 +08:00
|
|
|
unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
|
2019-05-22 20:25:46 +08:00
|
|
|
return 3 * NumLoads;
|
2018-03-28 01:07:52 +08:00
|
|
|
}
|
2018-04-14 04:46:50 +08:00
|
|
|
|
|
|
|
// Non-HVX vectors.
|
|
|
|
// Add extra cost for floating point types.
|
2019-10-22 23:16:52 +08:00
|
|
|
unsigned Cost =
|
|
|
|
VecTy->getElementType()->isFloatingPointTy() ? FloatFactor : 1;
|
|
|
|
|
[Alignment][NFC] Deprecate Align::None()
Summary:
This is a follow up on https://reviews.llvm.org/D71473#inline-647262.
There's a caveat here that `Align(1)` relies on the compiler understanding of `Log2_64` implementation to produce good code. One could use `Align()` as a replacement but I believe it is less clear that the alignment is one in that case.
Reviewers: xbolva00, courbet, bollu
Subscribers: arsenm, dylanmckay, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, jrtc27, atanasyan, jsji, Jim, kerbowa, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D73099
2020-01-21 22:00:04 +08:00
|
|
|
// At this point unspecified alignment is considered as Align(1).
|
2019-10-22 23:16:52 +08:00
|
|
|
const Align BoundAlignment = std::min(Alignment.valueOrOne(), Align(8));
|
|
|
|
unsigned AlignWidth = 8 * BoundAlignment.value();
|
2018-04-14 04:46:50 +08:00
|
|
|
unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
|
2019-10-22 23:16:52 +08:00
|
|
|
if (Alignment == Align(4) || Alignment == Align(8))
|
2018-04-14 04:46:50 +08:00
|
|
|
return Cost * NumLoads;
|
|
|
|
// Loads of less than 32 bits will need extra inserts to compose a vector.
|
2019-10-22 23:16:52 +08:00
|
|
|
assert(BoundAlignment <= Align(8));
|
|
|
|
unsigned LogA = Log2(BoundAlignment);
|
2018-04-14 04:46:50 +08:00
|
|
|
return (3 - LogA) * Cost * NumLoads;
|
2018-03-28 01:07:52 +08:00
|
|
|
}
|
2018-04-14 04:46:50 +08:00
|
|
|
|
2020-04-28 21:11:27 +08:00
|
|
|
return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
|
|
|
|
CostKind, I);
|
2015-08-06 02:35:37 +08:00
|
|
|
}
|
2016-07-22 22:22:43 +08:00
|
|
|
|
2020-06-26 18:14:16 +08:00
|
|
|
unsigned HexagonTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
|
|
|
|
Align Alignment,
|
|
|
|
unsigned AddressSpace,
|
|
|
|
TTI::TargetCostKind CostKind) {
|
2020-04-28 21:11:27 +08:00
|
|
|
return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
|
|
|
|
CostKind);
|
2018-04-14 04:46:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned HexagonTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp,
|
|
|
|
int Index, Type *SubTp) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2020-04-28 21:11:27 +08:00
|
|
|
unsigned HexagonTTIImpl::getGatherScatterOpCost(
|
2020-06-23 21:07:44 +08:00
|
|
|
unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
|
2020-06-26 19:08:27 +08:00
|
|
|
Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
|
2018-04-14 04:46:50 +08:00
|
|
|
return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
|
2020-04-28 21:11:27 +08:00
|
|
|
Alignment, CostKind, I);
|
2018-04-14 04:46:50 +08:00
|
|
|
}
|
|
|
|
|
2020-06-26 19:00:53 +08:00
|
|
|
unsigned HexagonTTIImpl::getInterleavedMemoryOpCost(
|
|
|
|
unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
|
|
|
|
Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
|
|
|
|
bool UseMaskForCond, bool UseMaskForGaps) {
|
2018-10-31 17:57:56 +08:00
|
|
|
if (Indices.size() != Factor || UseMaskForCond || UseMaskForGaps)
|
2018-08-23 04:15:04 +08:00
|
|
|
return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
|
2018-10-31 17:57:56 +08:00
|
|
|
Alignment, AddressSpace,
|
2020-04-28 21:11:27 +08:00
|
|
|
CostKind,
|
2018-10-31 17:57:56 +08:00
|
|
|
UseMaskForCond, UseMaskForGaps);
|
2019-10-22 23:16:52 +08:00
|
|
|
return getMemoryOpCost(Opcode, VecTy, MaybeAlign(Alignment), AddressSpace,
|
2020-04-28 21:11:27 +08:00
|
|
|
CostKind);
|
2018-04-14 04:46:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned HexagonTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
|
2020-10-31 05:19:52 +08:00
|
|
|
Type *CondTy, TTI::TargetCostKind CostKind, const Instruction *I) {
|
2020-05-26 21:28:34 +08:00
|
|
|
if (ValTy->isVectorTy() && CostKind == TTI::TCK_RecipThroughput) {
|
2018-04-14 04:46:50 +08:00
|
|
|
std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, ValTy);
|
|
|
|
if (Opcode == Instruction::FCmp)
|
2018-06-12 23:12:50 +08:00
|
|
|
return LT.first + FloatFactor * getTypeNumElements(ValTy);
|
2018-04-14 04:46:50 +08:00
|
|
|
}
|
2020-10-31 05:19:52 +08:00
|
|
|
return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind, I);
|
2018-04-14 04:46:50 +08:00
|
|
|
}
|
|
|
|
|
[ARM] Teach the Arm cost model that a Shift can be folded into other instructions
This attempts to teach the cost model in Arm that code such as:
%s = shl i32 %a, 3
%a = and i32 %s, %b
Can under Arm or Thumb2 become:
and r0, r1, r2, lsl #3
So the cost of the shift can essentially be free. To do this without
trying to artificially adjust the cost of the "and" instruction, it
needs to get the users of the shl and check if they are a type of
instruction that the shift can be folded into. And so it needs to have
access to the actual instruction in getArithmeticInstrCost, which if
available is added as an extra parameter much like getCastInstrCost.
We otherwise limit it to shifts with a single user, which should
hopefully handle most of the cases. The list of instruction that the
shift can be folded into include ADC, ADD, AND, BIC, CMP, EOR, MVN, ORR,
ORN, RSB, SBC and SUB. This translates to Add, Sub, And, Or, Xor and
ICmp.
Differential Revision: https://reviews.llvm.org/D70966
2019-12-08 23:33:24 +08:00
|
|
|
unsigned HexagonTTIImpl::getArithmeticInstrCost(
|
2020-04-28 21:11:27 +08:00
|
|
|
unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
|
|
|
|
TTI::OperandValueKind Opd1Info,
|
[ARM] Teach the Arm cost model that a Shift can be folded into other instructions
This attempts to teach the cost model in Arm that code such as:
%s = shl i32 %a, 3
%a = and i32 %s, %b
Can under Arm or Thumb2 become:
and r0, r1, r2, lsl #3
So the cost of the shift can essentially be free. To do this without
trying to artificially adjust the cost of the "and" instruction, it
needs to get the users of the shl and check if they are a type of
instruction that the shift can be folded into. And so it needs to have
access to the actual instruction in getArithmeticInstrCost, which if
available is added as an extra parameter much like getCastInstrCost.
We otherwise limit it to shifts with a single user, which should
hopefully handle most of the cases. The list of instruction that the
shift can be folded into include ADC, ADD, AND, BIC, CMP, EOR, MVN, ORR,
ORN, RSB, SBC and SUB. This translates to Add, Sub, And, Or, Xor and
ICmp.
Differential Revision: https://reviews.llvm.org/D70966
2019-12-08 23:33:24 +08:00
|
|
|
TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
|
|
|
|
TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
|
|
|
|
const Instruction *CxtI) {
|
2020-06-05 15:42:03 +08:00
|
|
|
// TODO: Handle more cost kinds.
|
|
|
|
if (CostKind != TTI::TCK_RecipThroughput)
|
|
|
|
return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
|
|
|
|
Opd2Info, Opd1PropInfo,
|
|
|
|
Opd2PropInfo, Args, CxtI);
|
|
|
|
|
2018-06-12 23:12:50 +08:00
|
|
|
if (Ty->isVectorTy()) {
|
|
|
|
std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, Ty);
|
|
|
|
if (LT.second.isFloatingPoint())
|
|
|
|
return LT.first + FloatFactor * getTypeNumElements(Ty);
|
|
|
|
}
|
2020-04-28 21:11:27 +08:00
|
|
|
return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info,
|
[ARM] Teach the Arm cost model that a Shift can be folded into other instructions
This attempts to teach the cost model in Arm that code such as:
%s = shl i32 %a, 3
%a = and i32 %s, %b
Can under Arm or Thumb2 become:
and r0, r1, r2, lsl #3
So the cost of the shift can essentially be free. To do this without
trying to artificially adjust the cost of the "and" instruction, it
needs to get the users of the shl and check if they are a type of
instruction that the shift can be folded into. And so it needs to have
access to the actual instruction in getArithmeticInstrCost, which if
available is added as an extra parameter much like getCastInstrCost.
We otherwise limit it to shifts with a single user, which should
hopefully handle most of the cases. The list of instruction that the
shift can be folded into include ADC, ADD, AND, BIC, CMP, EOR, MVN, ORR,
ORN, RSB, SBC and SUB. This translates to Add, Sub, And, Or, Xor and
ICmp.
Differential Revision: https://reviews.llvm.org/D70966
2019-12-08 23:33:24 +08:00
|
|
|
Opd1PropInfo, Opd2PropInfo, Args, CxtI);
|
2018-04-14 04:46:50 +08:00
|
|
|
}
|
|
|
|
|
2018-06-12 23:12:50 +08:00
|
|
|
unsigned HexagonTTIImpl::getCastInstrCost(unsigned Opcode, Type *DstTy,
|
[Analysis] TTI: Add CastContextHint for getCastInstrCost
Currently, getCastInstrCost has limited information about the cast it's
rating, often just the opcode and types. Sometimes there is a context
instruction as well, but it isn't trustworthy: for instance, when the
vectorizer is rating a plan, it calls getCastInstrCost with the old
instructions when, in fact, it's trying to evaluate the cost of the
instruction post-vectorization. Thus, the current system can get the
cost of certain casts incorrect as the correct cost can vary greatly
based on the context in which it's used.
For example, if the vectorizer queries getCastInstrCost to evaluate the
cost of a sext(load) with tail predication enabled, getCastInstrCost
will think it's free most of the time, but it's not always free. On ARM
MVE, a VLD2 group cannot be extended like a normal VLDR can. Similar
situations can come up with how masked loads can be extended when being
split.
To fix that, this path adds a new parameter to getCastInstrCost to give
it a hint about the context of the cast. It adds a CastContextHint enum
which contains the type of the load/store being created by the
vectorizer - one for each of the types it can produce.
Original patch by Pierre van Houtryve
Differential Revision: https://reviews.llvm.org/D79162
2020-07-29 20:32:53 +08:00
|
|
|
Type *SrcTy, TTI::CastContextHint CCH,
|
|
|
|
TTI::TargetCostKind CostKind,
|
|
|
|
const Instruction *I) {
|
2018-06-12 23:12:50 +08:00
|
|
|
if (SrcTy->isFPOrFPVectorTy() || DstTy->isFPOrFPVectorTy()) {
|
|
|
|
unsigned SrcN = SrcTy->isFPOrFPVectorTy() ? getTypeNumElements(SrcTy) : 0;
|
|
|
|
unsigned DstN = DstTy->isFPOrFPVectorTy() ? getTypeNumElements(DstTy) : 0;
|
|
|
|
|
|
|
|
std::pair<int, MVT> SrcLT = TLI.getTypeLegalizationCost(DL, SrcTy);
|
|
|
|
std::pair<int, MVT> DstLT = TLI.getTypeLegalizationCost(DL, DstTy);
|
2020-05-26 18:27:57 +08:00
|
|
|
unsigned Cost = std::max(SrcLT.first, DstLT.first) + FloatFactor * (SrcN + DstN);
|
|
|
|
// TODO: Allow non-throughput costs that aren't binary.
|
|
|
|
if (CostKind != TTI::TCK_RecipThroughput)
|
|
|
|
return Cost == 0 ? 0 : 1;
|
|
|
|
return Cost;
|
2018-06-12 23:12:50 +08:00
|
|
|
}
|
2018-04-14 04:46:50 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned HexagonTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
|
|
|
|
unsigned Index) {
|
|
|
|
Type *ElemTy = Val->isVectorTy() ? cast<VectorType>(Val)->getElementType()
|
|
|
|
: Val;
|
|
|
|
if (Opcode == Instruction::InsertElement) {
|
|
|
|
// Need two rotations for non-zero index.
|
|
|
|
unsigned Cost = (Index != 0) ? 2 : 0;
|
|
|
|
if (ElemTy->isIntegerTy(32))
|
|
|
|
return Cost;
|
|
|
|
// If it's not a 32-bit value, there will need to be an extract.
|
|
|
|
return Cost + getVectorInstrCost(Instruction::ExtractElement, Val, Index);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Opcode == Instruction::ExtractElement)
|
|
|
|
return 2;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2020-08-25 07:29:57 +08:00
|
|
|
bool HexagonTTIImpl::isLegalMaskedStore(Type *DataType, Align /*Alignment*/) {
|
|
|
|
return HexagonMaskedVMem && isTypeForHVX(DataType);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool HexagonTTIImpl::isLegalMaskedLoad(Type *DataType, Align /*Alignment*/) {
|
|
|
|
return HexagonMaskedVMem && isTypeForHVX(DataType);
|
|
|
|
}
|
|
|
|
|
2018-04-14 04:46:50 +08:00
|
|
|
/// --- Vector TTI end ---
|
|
|
|
|
2016-07-22 22:22:43 +08:00
|
|
|
unsigned HexagonTTIImpl::getPrefetchDistance() const {
|
2018-04-14 04:46:50 +08:00
|
|
|
return ST.getL1PrefetchDistance();
|
2016-07-22 22:22:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned HexagonTTIImpl::getCacheLineSize() const {
|
2018-04-14 04:46:50 +08:00
|
|
|
return ST.getL1CacheLineSize();
|
2016-07-22 22:22:43 +08:00
|
|
|
}
|
2016-08-19 22:22:07 +08:00
|
|
|
|
2020-04-27 16:02:14 +08:00
|
|
|
int
|
|
|
|
HexagonTTIImpl::getUserCost(const User *U,
|
|
|
|
ArrayRef<const Value *> Operands,
|
|
|
|
TTI::TargetCostKind CostKind) {
|
2018-03-28 01:07:52 +08:00
|
|
|
auto isCastFoldedIntoLoad = [this](const CastInst *CI) -> bool {
|
2016-08-19 22:22:07 +08:00
|
|
|
if (!CI->isIntegerCast())
|
|
|
|
return false;
|
2018-03-28 01:07:52 +08:00
|
|
|
// Only extensions from an integer type shorter than 32-bit to i32
|
|
|
|
// can be folded into the load.
|
|
|
|
const DataLayout &DL = getDataLayout();
|
|
|
|
unsigned SBW = DL.getTypeSizeInBits(CI->getSrcTy());
|
|
|
|
unsigned DBW = DL.getTypeSizeInBits(CI->getDestTy());
|
|
|
|
if (DBW != 32 || SBW >= DBW)
|
|
|
|
return false;
|
|
|
|
|
2016-08-19 22:22:07 +08:00
|
|
|
const LoadInst *LI = dyn_cast<const LoadInst>(CI->getOperand(0));
|
|
|
|
// Technically, this code could allow multiple uses of the load, and
|
|
|
|
// check if all the uses are the same extension operation, but this
|
|
|
|
// should be sufficient for most cases.
|
2018-03-28 01:07:52 +08:00
|
|
|
return LI && LI->hasOneUse();
|
2016-08-19 22:22:07 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
if (const CastInst *CI = dyn_cast<const CastInst>(U))
|
|
|
|
if (isCastFoldedIntoLoad(CI))
|
|
|
|
return TargetTransformInfo::TCC_Free;
|
2020-04-27 16:02:14 +08:00
|
|
|
return BaseT::getUserCost(U, Operands, CostKind);
|
2016-08-19 22:22:07 +08:00
|
|
|
}
|
2017-07-01 04:54:24 +08:00
|
|
|
|
|
|
|
bool HexagonTTIImpl::shouldBuildLookupTables() const {
|
2018-03-26 23:32:03 +08:00
|
|
|
return EmitLookupTables;
|
2017-07-01 04:54:24 +08:00
|
|
|
}
|