2013-07-27 08:01:07 +08:00
|
|
|
//===-- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass ---------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// \file
|
|
|
|
// This file implements a TargetTransformInfo analysis pass specific to the
|
|
|
|
// AMDGPU target machine. It uses the target's detailed information to provide
|
|
|
|
// more precise answers to certain TTI queries, while letting the target
|
|
|
|
// independent and default TTI implementations handle the rest.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2015-01-31 19:17:59 +08:00
|
|
|
#include "AMDGPUTargetTransformInfo.h"
|
2014-01-24 02:49:28 +08:00
|
|
|
#include "llvm/Analysis/LoopInfo.h"
|
2013-07-27 08:01:07 +08:00
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
2014-01-24 02:49:28 +08:00
|
|
|
#include "llvm/Analysis/ValueTracking.h"
|
[PM] Change the core design of the TTI analysis to use a polymorphic
type erased interface and a single analysis pass rather than an
extremely complex analysis group.
The end result is that the TTI analysis can contain a type erased
implementation that supports the polymorphic TTI interface. We can build
one from a target-specific implementation or from a dummy one in the IR.
I've also factored all of the code into "mix-in"-able base classes,
including CRTP base classes to facilitate calling back up to the most
specialized form when delegating horizontally across the surface. These
aren't as clean as I would like and I'm planning to work on cleaning
some of this up, but I wanted to start by putting into the right form.
There are a number of reasons for this change, and this particular
design. The first and foremost reason is that an analysis group is
complete overkill, and the chaining delegation strategy was so opaque,
confusing, and high overhead that TTI was suffering greatly for it.
Several of the TTI functions had failed to be implemented in all places
because of the chaining-based delegation making there be no checking of
this. A few other functions were implemented with incorrect delegation.
The message to me was very clear working on this -- the delegation and
analysis group structure was too confusing to be useful here.
The other reason of course is that this is *much* more natural fit for
the new pass manager. This will lay the ground work for a type-erased
per-function info object that can look up the correct subtarget and even
cache it.
Yet another benefit is that this will significantly simplify the
interaction of the pass managers and the TargetMachine. See the future
work below.
The downside of this change is that it is very, very verbose. I'm going
to work to improve that, but it is somewhat an implementation necessity
in C++ to do type erasure. =/ I discussed this design really extensively
with Eric and Hal prior to going down this path, and afterward showed
them the result. No one was really thrilled with it, but there doesn't
seem to be a substantially better alternative. Using a base class and
virtual method dispatch would make the code much shorter, but as
discussed in the update to the programmer's manual and elsewhere,
a polymorphic interface feels like the more principled approach even if
this is perhaps the least compelling example of it. ;]
Ultimately, there is still a lot more to be done here, but this was the
huge chunk that I couldn't really split things out of because this was
the interface change to TTI. I've tried to minimize all the other parts
of this. The follow up work should include at least:
1) Improving the TargetMachine interface by having it directly return
a TTI object. Because we have a non-pass object with value semantics
and an internal type erasure mechanism, we can narrow the interface
of the TargetMachine to *just* do what we need: build and return
a TTI object that we can then insert into the pass pipeline.
2) Make the TTI object be fully specialized for a particular function.
This will include splitting off a minimal form of it which is
sufficient for the inliner and the old pass manager.
3) Add a new pass manager analysis which produces TTI objects from the
target machine for each function. This may actually be done as part
of #2 in order to use the new analysis to implement #2.
4) Work on narrowing the API between TTI and the targets so that it is
easier to understand and less verbose to type erase.
5) Work on narrowing the API between TTI and its clients so that it is
easier to understand and less verbose to forward.
6) Try to improve the CRTP-based delegation. I feel like this code is
just a bit messy and exacerbating the complexity of implementing
the TTI in each target.
Many thanks to Eric and Hal for their help here. I ended up blocked on
this somewhat more abruptly than I expected, and so I appreciate getting
it sorted out very quickly.
Differential Revision: http://reviews.llvm.org/D7293
llvm-svn: 227669
2015-01-31 11:43:40 +08:00
|
|
|
#include "llvm/CodeGen/BasicTTIImpl.h"
|
2015-03-10 10:37:25 +08:00
|
|
|
#include "llvm/IR/Module.h"
|
2016-02-13 07:45:29 +08:00
|
|
|
#include "llvm/IR/Intrinsics.h"
|
2013-07-27 08:01:07 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Target/CostTable.h"
|
2014-01-07 19:48:04 +08:00
|
|
|
#include "llvm/Target/TargetLowering.h"
|
2013-07-27 08:01:07 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2014-04-22 10:41:26 +08:00
|
|
|
#define DEBUG_TYPE "AMDGPUtti"
|
|
|
|
|
2017-02-03 10:20:05 +08:00
|
|
|
static cl::opt<unsigned> UnrollThresholdPrivate(
|
|
|
|
"amdgpu-unroll-threshold-private",
|
|
|
|
cl::desc("Unroll threshold for AMDGPU if private memory used in a loop"),
|
2017-02-04 04:08:29 +08:00
|
|
|
cl::init(2000), cl::Hidden);
|
2016-03-25 09:00:32 +08:00
|
|
|
|
2015-02-01 22:31:23 +08:00
|
|
|
void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L,
|
[PM] Change the core design of the TTI analysis to use a polymorphic
type erased interface and a single analysis pass rather than an
extremely complex analysis group.
The end result is that the TTI analysis can contain a type erased
implementation that supports the polymorphic TTI interface. We can build
one from a target-specific implementation or from a dummy one in the IR.
I've also factored all of the code into "mix-in"-able base classes,
including CRTP base classes to facilitate calling back up to the most
specialized form when delegating horizontally across the surface. These
aren't as clean as I would like and I'm planning to work on cleaning
some of this up, but I wanted to start by putting into the right form.
There are a number of reasons for this change, and this particular
design. The first and foremost reason is that an analysis group is
complete overkill, and the chaining delegation strategy was so opaque,
confusing, and high overhead that TTI was suffering greatly for it.
Several of the TTI functions had failed to be implemented in all places
because of the chaining-based delegation making there be no checking of
this. A few other functions were implemented with incorrect delegation.
The message to me was very clear working on this -- the delegation and
analysis group structure was too confusing to be useful here.
The other reason of course is that this is *much* more natural fit for
the new pass manager. This will lay the ground work for a type-erased
per-function info object that can look up the correct subtarget and even
cache it.
Yet another benefit is that this will significantly simplify the
interaction of the pass managers and the TargetMachine. See the future
work below.
The downside of this change is that it is very, very verbose. I'm going
to work to improve that, but it is somewhat an implementation necessity
in C++ to do type erasure. =/ I discussed this design really extensively
with Eric and Hal prior to going down this path, and afterward showed
them the result. No one was really thrilled with it, but there doesn't
seem to be a substantially better alternative. Using a base class and
virtual method dispatch would make the code much shorter, but as
discussed in the update to the programmer's manual and elsewhere,
a polymorphic interface feels like the more principled approach even if
this is perhaps the least compelling example of it. ;]
Ultimately, there is still a lot more to be done here, but this was the
huge chunk that I couldn't really split things out of because this was
the interface change to TTI. I've tried to minimize all the other parts
of this. The follow up work should include at least:
1) Improving the TargetMachine interface by having it directly return
a TTI object. Because we have a non-pass object with value semantics
and an internal type erasure mechanism, we can narrow the interface
of the TargetMachine to *just* do what we need: build and return
a TTI object that we can then insert into the pass pipeline.
2) Make the TTI object be fully specialized for a particular function.
This will include splitting off a minimal form of it which is
sufficient for the inliner and the old pass manager.
3) Add a new pass manager analysis which produces TTI objects from the
target machine for each function. This may actually be done as part
of #2 in order to use the new analysis to implement #2.
4) Work on narrowing the API between TTI and the targets so that it is
easier to understand and less verbose to type erase.
5) Work on narrowing the API between TTI and its clients so that it is
easier to understand and less verbose to forward.
6) Try to improve the CRTP-based delegation. I feel like this code is
just a bit messy and exacerbating the complexity of implementing
the TTI in each target.
Many thanks to Eric and Hal for their help here. I ended up blocked on
this somewhat more abruptly than I expected, and so I appreciate getting
it sorted out very quickly.
Differential Revision: http://reviews.llvm.org/D7293
llvm-svn: 227669
2015-01-31 11:43:40 +08:00
|
|
|
TTI::UnrollingPreferences &UP) {
|
2014-07-26 07:02:42 +08:00
|
|
|
UP.Threshold = 300; // Twice the default.
|
2015-02-05 23:32:18 +08:00
|
|
|
UP.MaxCount = UINT_MAX;
|
2014-07-26 07:02:42 +08:00
|
|
|
UP.Partial = true;
|
|
|
|
|
|
|
|
// TODO: Do we want runtime unrolling?
|
|
|
|
|
2017-02-03 10:20:05 +08:00
|
|
|
// Maximum alloca size than can fit registers. Reserve 16 registers.
|
|
|
|
const unsigned MaxAlloca = (256 - 16) * 4;
|
2014-07-17 14:19:06 +08:00
|
|
|
for (const BasicBlock *BB : L->getBlocks()) {
|
2015-03-10 10:37:25 +08:00
|
|
|
const DataLayout &DL = BB->getModule()->getDataLayout();
|
2014-07-17 14:19:06 +08:00
|
|
|
for (const Instruction &I : *BB) {
|
|
|
|
const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I);
|
2014-07-17 14:13:41 +08:00
|
|
|
if (!GEP || GEP->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS)
|
2014-01-24 02:49:28 +08:00
|
|
|
continue;
|
2014-07-17 14:19:06 +08:00
|
|
|
|
2014-01-24 02:49:28 +08:00
|
|
|
const Value *Ptr = GEP->getPointerOperand();
|
2015-03-10 10:37:25 +08:00
|
|
|
const AllocaInst *Alloca =
|
|
|
|
dyn_cast<AllocaInst>(GetUnderlyingObject(Ptr, DL));
|
2017-02-04 03:36:00 +08:00
|
|
|
if (Alloca && Alloca->isStaticAlloca()) {
|
2017-02-03 10:20:05 +08:00
|
|
|
Type *Ty = Alloca->getAllocatedType();
|
|
|
|
unsigned AllocaSize = Ty->isSized() ? DL.getTypeAllocSize(Ty) : 0;
|
|
|
|
if (AllocaSize > MaxAlloca)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Check if GEP depends on a value defined by this loop itself.
|
|
|
|
bool HasLoopDef = false;
|
|
|
|
for (const Value *Op : GEP->operands()) {
|
|
|
|
const Instruction *Inst = dyn_cast<Instruction>(Op);
|
|
|
|
if (!Inst || L->isLoopInvariant(Op))
|
|
|
|
continue;
|
|
|
|
if (any_of(L->getSubLoops(), [Inst](const Loop* SubLoop) {
|
|
|
|
return SubLoop->contains(Inst); }))
|
|
|
|
continue;
|
|
|
|
HasLoopDef = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!HasLoopDef)
|
|
|
|
continue;
|
|
|
|
|
2014-01-24 02:49:28 +08:00
|
|
|
// We want to do whatever we can to limit the number of alloca
|
|
|
|
// instructions that make it through to the code generator. allocas
|
|
|
|
// require us to use indirect addressing, which is slow and prone to
|
|
|
|
// compiler bugs. If this loop does an address calculation on an
|
2014-02-26 05:36:21 +08:00
|
|
|
// alloca ptr, then we want to use a higher than normal loop unroll
|
2014-04-05 04:13:08 +08:00
|
|
|
// threshold. This will give SROA a better chance to eliminate these
|
|
|
|
// allocas.
|
|
|
|
//
|
|
|
|
// Don't use the maximum allowed value here as it will make some
|
|
|
|
// programs way too big.
|
2017-02-03 10:20:05 +08:00
|
|
|
UP.Threshold = UnrollThresholdPrivate;
|
|
|
|
return;
|
2014-01-24 02:49:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-07-18 14:07:13 +08:00
|
|
|
|
[PM] Change the core design of the TTI analysis to use a polymorphic
type erased interface and a single analysis pass rather than an
extremely complex analysis group.
The end result is that the TTI analysis can contain a type erased
implementation that supports the polymorphic TTI interface. We can build
one from a target-specific implementation or from a dummy one in the IR.
I've also factored all of the code into "mix-in"-able base classes,
including CRTP base classes to facilitate calling back up to the most
specialized form when delegating horizontally across the surface. These
aren't as clean as I would like and I'm planning to work on cleaning
some of this up, but I wanted to start by putting into the right form.
There are a number of reasons for this change, and this particular
design. The first and foremost reason is that an analysis group is
complete overkill, and the chaining delegation strategy was so opaque,
confusing, and high overhead that TTI was suffering greatly for it.
Several of the TTI functions had failed to be implemented in all places
because of the chaining-based delegation making there be no checking of
this. A few other functions were implemented with incorrect delegation.
The message to me was very clear working on this -- the delegation and
analysis group structure was too confusing to be useful here.
The other reason of course is that this is *much* more natural fit for
the new pass manager. This will lay the ground work for a type-erased
per-function info object that can look up the correct subtarget and even
cache it.
Yet another benefit is that this will significantly simplify the
interaction of the pass managers and the TargetMachine. See the future
work below.
The downside of this change is that it is very, very verbose. I'm going
to work to improve that, but it is somewhat an implementation necessity
in C++ to do type erasure. =/ I discussed this design really extensively
with Eric and Hal prior to going down this path, and afterward showed
them the result. No one was really thrilled with it, but there doesn't
seem to be a substantially better alternative. Using a base class and
virtual method dispatch would make the code much shorter, but as
discussed in the update to the programmer's manual and elsewhere,
a polymorphic interface feels like the more principled approach even if
this is perhaps the least compelling example of it. ;]
Ultimately, there is still a lot more to be done here, but this was the
huge chunk that I couldn't really split things out of because this was
the interface change to TTI. I've tried to minimize all the other parts
of this. The follow up work should include at least:
1) Improving the TargetMachine interface by having it directly return
a TTI object. Because we have a non-pass object with value semantics
and an internal type erasure mechanism, we can narrow the interface
of the TargetMachine to *just* do what we need: build and return
a TTI object that we can then insert into the pass pipeline.
2) Make the TTI object be fully specialized for a particular function.
This will include splitting off a minimal form of it which is
sufficient for the inliner and the old pass manager.
3) Add a new pass manager analysis which produces TTI objects from the
target machine for each function. This may actually be done as part
of #2 in order to use the new analysis to implement #2.
4) Work on narrowing the API between TTI and the targets so that it is
easier to understand and less verbose to type erase.
5) Work on narrowing the API between TTI and its clients so that it is
easier to understand and less verbose to forward.
6) Try to improve the CRTP-based delegation. I feel like this code is
just a bit messy and exacerbating the complexity of implementing
the TTI in each target.
Many thanks to Eric and Hal for their help here. I ended up blocked on
this somewhat more abruptly than I expected, and so I appreciate getting
it sorted out very quickly.
Differential Revision: http://reviews.llvm.org/D7293
llvm-svn: 227669
2015-01-31 11:43:40 +08:00
|
|
|
unsigned AMDGPUTTIImpl::getNumberOfRegisters(bool Vec) {
|
2014-07-20 02:15:16 +08:00
|
|
|
if (Vec)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
// Number of VGPRs on SI.
|
|
|
|
if (ST->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS)
|
|
|
|
return 256;
|
|
|
|
|
|
|
|
return 4 * 128; // XXX - 4 channels. Should these count as vector instead?
|
|
|
|
}
|
|
|
|
|
2015-12-24 13:14:55 +08:00
|
|
|
unsigned AMDGPUTTIImpl::getRegisterBitWidth(bool Vector) {
|
|
|
|
return Vector ? 0 : 32;
|
|
|
|
}
|
2014-07-20 02:15:16 +08:00
|
|
|
|
2016-10-03 18:31:34 +08:00
|
|
|
unsigned AMDGPUTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
|
2016-07-01 08:56:27 +08:00
|
|
|
switch (AddrSpace) {
|
|
|
|
case AMDGPUAS::GLOBAL_ADDRESS:
|
|
|
|
case AMDGPUAS::CONSTANT_ADDRESS:
|
|
|
|
case AMDGPUAS::FLAT_ADDRESS:
|
|
|
|
return 128;
|
|
|
|
case AMDGPUAS::LOCAL_ADDRESS:
|
|
|
|
case AMDGPUAS::REGION_ADDRESS:
|
|
|
|
return 64;
|
|
|
|
case AMDGPUAS::PRIVATE_ADDRESS:
|
|
|
|
return 8 * ST->getMaxPrivateElementSize();
|
|
|
|
default:
|
|
|
|
if (ST->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS &&
|
|
|
|
(AddrSpace == AMDGPUAS::PARAM_D_ADDRESS ||
|
|
|
|
AddrSpace == AMDGPUAS::PARAM_I_ADDRESS ||
|
|
|
|
(AddrSpace >= AMDGPUAS::CONSTANT_BUFFER_0 &&
|
|
|
|
AddrSpace <= AMDGPUAS::CONSTANT_BUFFER_15)))
|
|
|
|
return 128;
|
|
|
|
llvm_unreachable("unhandled address space");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-23 11:58:53 +08:00
|
|
|
bool AMDGPUTTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
|
|
|
|
unsigned Alignment,
|
|
|
|
unsigned AddrSpace) const {
|
|
|
|
// We allow vectorization of flat stores, even though we may need to decompose
|
|
|
|
// them later if they may access private memory. We don't have enough context
|
|
|
|
// here, and legalization can handle it.
|
|
|
|
if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) {
|
|
|
|
return (Alignment >= 4 || ST->hasUnalignedScratchAccess()) &&
|
|
|
|
ChainSizeInBytes <= ST->getMaxPrivateElementSize();
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool AMDGPUTTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
|
|
|
|
unsigned Alignment,
|
|
|
|
unsigned AddrSpace) const {
|
|
|
|
return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool AMDGPUTTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
|
|
|
|
unsigned Alignment,
|
|
|
|
unsigned AddrSpace) const {
|
|
|
|
return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
|
|
|
|
}
|
|
|
|
|
2015-05-07 01:12:25 +08:00
|
|
|
unsigned AMDGPUTTIImpl::getMaxInterleaveFactor(unsigned VF) {
|
2017-03-09 08:07:00 +08:00
|
|
|
// Disable unrolling if the loop is not vectorized.
|
|
|
|
if (VF == 1)
|
|
|
|
return 1;
|
|
|
|
|
2014-07-20 02:15:16 +08:00
|
|
|
// Semi-arbitrary large amount.
|
|
|
|
return 64;
|
|
|
|
}
|
2015-12-02 03:08:39 +08:00
|
|
|
|
2016-03-25 09:00:32 +08:00
|
|
|
int AMDGPUTTIImpl::getArithmeticInstrCost(
|
|
|
|
unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
|
|
|
|
TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
|
[X86] updating TTI costs for arithmetic instructions on X86\SLM arch.
updated instructions:
pmulld, pmullw, pmulhw, mulsd, mulps, mulpd, divss, divps, divsd, divpd, addpd and subpd.
special optimization case which replaces pmulld with pmullw\pmulhw\pshuf seq.
In case if the real operands bitwidth <= 16.
Differential Revision: https://reviews.llvm.org/D28104
llvm-svn: 291657
2017-01-11 16:23:37 +08:00
|
|
|
TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args ) {
|
2016-03-25 09:00:32 +08:00
|
|
|
|
|
|
|
EVT OrigTy = TLI->getValueType(DL, Ty);
|
|
|
|
if (!OrigTy.isSimple()) {
|
|
|
|
return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
|
|
|
|
Opd1PropInfo, Opd2PropInfo);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Legalize the type.
|
|
|
|
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
|
|
|
|
int ISD = TLI->InstructionOpcodeToISD(Opcode);
|
|
|
|
|
|
|
|
// Because we don't have any legal vector operations, but the legal types, we
|
|
|
|
// need to account for split vectors.
|
|
|
|
unsigned NElts = LT.second.isVector() ?
|
|
|
|
LT.second.getVectorNumElements() : 1;
|
|
|
|
|
|
|
|
MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
|
|
|
|
|
|
|
|
switch (ISD) {
|
2016-03-25 09:16:40 +08:00
|
|
|
case ISD::SHL:
|
|
|
|
case ISD::SRL:
|
|
|
|
case ISD::SRA: {
|
|
|
|
if (SLT == MVT::i64)
|
|
|
|
return get64BitInstrCost() * LT.first * NElts;
|
|
|
|
|
|
|
|
// i32
|
|
|
|
return getFullRateInstrCost() * LT.first * NElts;
|
|
|
|
}
|
|
|
|
case ISD::ADD:
|
|
|
|
case ISD::SUB:
|
|
|
|
case ISD::AND:
|
|
|
|
case ISD::OR:
|
|
|
|
case ISD::XOR: {
|
|
|
|
if (SLT == MVT::i64){
|
|
|
|
// and, or and xor are typically split into 2 VALU instructions.
|
|
|
|
return 2 * getFullRateInstrCost() * LT.first * NElts;
|
|
|
|
}
|
|
|
|
|
|
|
|
return LT.first * NElts * getFullRateInstrCost();
|
|
|
|
}
|
|
|
|
case ISD::MUL: {
|
|
|
|
const int QuarterRateCost = getQuarterRateInstrCost();
|
|
|
|
if (SLT == MVT::i64) {
|
|
|
|
const int FullRateCost = getFullRateInstrCost();
|
|
|
|
return (4 * QuarterRateCost + (2 * 2) * FullRateCost) * LT.first * NElts;
|
|
|
|
}
|
|
|
|
|
|
|
|
// i32
|
|
|
|
return QuarterRateCost * NElts * LT.first;
|
|
|
|
}
|
2016-03-25 09:00:32 +08:00
|
|
|
case ISD::FADD:
|
|
|
|
case ISD::FSUB:
|
|
|
|
case ISD::FMUL:
|
|
|
|
if (SLT == MVT::f64)
|
|
|
|
return LT.first * NElts * get64BitInstrCost();
|
|
|
|
|
|
|
|
if (SLT == MVT::f32 || SLT == MVT::f16)
|
|
|
|
return LT.first * NElts * getFullRateInstrCost();
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ISD::FDIV:
|
|
|
|
case ISD::FREM:
|
|
|
|
// FIXME: frem should be handled separately. The fdiv in it is most of it,
|
|
|
|
// but the current lowering is also not entirely correct.
|
|
|
|
if (SLT == MVT::f64) {
|
|
|
|
int Cost = 4 * get64BitInstrCost() + 7 * getQuarterRateInstrCost();
|
|
|
|
|
|
|
|
// Add cost of workaround.
|
|
|
|
if (ST->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS)
|
|
|
|
Cost += 3 * getFullRateInstrCost();
|
|
|
|
|
|
|
|
return LT.first * Cost * NElts;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Assuming no fp32 denormals lowering.
|
|
|
|
if (SLT == MVT::f32 || SLT == MVT::f16) {
|
|
|
|
assert(!ST->hasFP32Denormals() && "will change when supported");
|
|
|
|
int Cost = 7 * getFullRateInstrCost() + 1 * getQuarterRateInstrCost();
|
|
|
|
return LT.first * NElts * Cost;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
|
|
|
|
Opd1PropInfo, Opd2PropInfo);
|
|
|
|
}
|
|
|
|
|
2015-12-17 02:37:19 +08:00
|
|
|
unsigned AMDGPUTTIImpl::getCFInstrCost(unsigned Opcode) {
|
|
|
|
// XXX - For some reason this isn't called for switch.
|
|
|
|
switch (Opcode) {
|
|
|
|
case Instruction::Br:
|
|
|
|
case Instruction::Ret:
|
|
|
|
return 10;
|
|
|
|
default:
|
|
|
|
return BaseT::getCFInstrCost(Opcode);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-02 03:08:39 +08:00
|
|
|
int AMDGPUTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
|
|
|
|
unsigned Index) {
|
|
|
|
switch (Opcode) {
|
|
|
|
case Instruction::ExtractElement:
|
2016-03-25 08:14:11 +08:00
|
|
|
case Instruction::InsertElement:
|
|
|
|
// Extracts are just reads of a subregister, so are free. Inserts are
|
|
|
|
// considered free because we don't want to have any cost for scalarizing
|
|
|
|
// operations, and we don't have to copy into a different register class.
|
|
|
|
|
2015-12-02 03:08:39 +08:00
|
|
|
// Dynamic indexing isn't free and is best avoided.
|
|
|
|
return Index == ~0u ? 2 : 0;
|
|
|
|
default:
|
|
|
|
return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
|
|
|
|
}
|
|
|
|
}
|
2015-12-16 02:04:38 +08:00
|
|
|
|
2017-02-16 10:01:13 +08:00
|
|
|
static bool isIntrinsicSourceOfDivergence(const IntrinsicInst *I) {
|
2015-12-16 02:04:38 +08:00
|
|
|
switch (I->getIntrinsicID()) {
|
2016-02-11 13:32:51 +08:00
|
|
|
case Intrinsic::amdgcn_workitem_id_x:
|
|
|
|
case Intrinsic::amdgcn_workitem_id_y:
|
|
|
|
case Intrinsic::amdgcn_workitem_id_z:
|
2016-12-13 00:52:19 +08:00
|
|
|
case Intrinsic::amdgcn_interp_mov:
|
2015-12-16 02:04:38 +08:00
|
|
|
case Intrinsic::amdgcn_interp_p1:
|
|
|
|
case Intrinsic::amdgcn_interp_p2:
|
|
|
|
case Intrinsic::amdgcn_mbcnt_hi:
|
|
|
|
case Intrinsic::amdgcn_mbcnt_lo:
|
|
|
|
case Intrinsic::r600_read_tidig_x:
|
|
|
|
case Intrinsic::r600_read_tidig_y:
|
|
|
|
case Intrinsic::r600_read_tidig_z:
|
2017-01-31 01:09:47 +08:00
|
|
|
case Intrinsic::amdgcn_atomic_inc:
|
|
|
|
case Intrinsic::amdgcn_atomic_dec:
|
2016-03-14 23:37:18 +08:00
|
|
|
case Intrinsic::amdgcn_image_atomic_swap:
|
|
|
|
case Intrinsic::amdgcn_image_atomic_add:
|
|
|
|
case Intrinsic::amdgcn_image_atomic_sub:
|
|
|
|
case Intrinsic::amdgcn_image_atomic_smin:
|
|
|
|
case Intrinsic::amdgcn_image_atomic_umin:
|
|
|
|
case Intrinsic::amdgcn_image_atomic_smax:
|
|
|
|
case Intrinsic::amdgcn_image_atomic_umax:
|
|
|
|
case Intrinsic::amdgcn_image_atomic_and:
|
|
|
|
case Intrinsic::amdgcn_image_atomic_or:
|
|
|
|
case Intrinsic::amdgcn_image_atomic_xor:
|
|
|
|
case Intrinsic::amdgcn_image_atomic_inc:
|
|
|
|
case Intrinsic::amdgcn_image_atomic_dec:
|
|
|
|
case Intrinsic::amdgcn_image_atomic_cmpswap:
|
2016-03-19 00:24:31 +08:00
|
|
|
case Intrinsic::amdgcn_buffer_atomic_swap:
|
|
|
|
case Intrinsic::amdgcn_buffer_atomic_add:
|
|
|
|
case Intrinsic::amdgcn_buffer_atomic_sub:
|
|
|
|
case Intrinsic::amdgcn_buffer_atomic_smin:
|
|
|
|
case Intrinsic::amdgcn_buffer_atomic_umin:
|
|
|
|
case Intrinsic::amdgcn_buffer_atomic_smax:
|
|
|
|
case Intrinsic::amdgcn_buffer_atomic_umax:
|
|
|
|
case Intrinsic::amdgcn_buffer_atomic_and:
|
|
|
|
case Intrinsic::amdgcn_buffer_atomic_or:
|
|
|
|
case Intrinsic::amdgcn_buffer_atomic_xor:
|
|
|
|
case Intrinsic::amdgcn_buffer_atomic_cmpswap:
|
2016-04-22 12:04:08 +08:00
|
|
|
case Intrinsic::amdgcn_ps_live:
|
2017-01-31 01:09:47 +08:00
|
|
|
case Intrinsic::amdgcn_ds_swizzle:
|
2015-12-16 02:04:38 +08:00
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool isArgPassedInSGPR(const Argument *A) {
|
|
|
|
const Function *F = A->getParent();
|
|
|
|
|
|
|
|
// Arguments to compute shaders are never a source of divergence.
|
2016-04-07 03:40:20 +08:00
|
|
|
if (!AMDGPU::isShader(F->getCallingConv()))
|
2015-12-16 02:04:38 +08:00
|
|
|
return true;
|
|
|
|
|
2015-12-19 10:54:15 +08:00
|
|
|
// For non-compute shaders, SGPR inputs are marked with either inreg or byval.
|
|
|
|
if (F->getAttributes().hasAttribute(A->getArgNo() + 1, Attribute::InReg) ||
|
|
|
|
F->getAttributes().hasAttribute(A->getArgNo() + 1, Attribute::ByVal))
|
2015-12-16 02:04:38 +08:00
|
|
|
return true;
|
|
|
|
|
2015-12-19 10:54:15 +08:00
|
|
|
// Everything else is in VGPRs.
|
|
|
|
return false;
|
2015-12-16 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
///
|
|
|
|
/// \returns true if the result of the value could potentially be
|
|
|
|
/// different across workitems in a wavefront.
|
|
|
|
bool AMDGPUTTIImpl::isSourceOfDivergence(const Value *V) const {
|
|
|
|
|
|
|
|
if (const Argument *A = dyn_cast<Argument>(V))
|
|
|
|
return !isArgPassedInSGPR(A);
|
|
|
|
|
|
|
|
// Loads from the private address space are divergent, because threads
|
|
|
|
// can execute the load instruction with the same inputs and get different
|
|
|
|
// results.
|
|
|
|
//
|
|
|
|
// All other loads are not divergent, because if threads issue loads with the
|
|
|
|
// same arguments, they will always get the same result.
|
|
|
|
if (const LoadInst *Load = dyn_cast<LoadInst>(V))
|
|
|
|
return Load->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
|
|
|
|
|
2016-03-18 00:21:59 +08:00
|
|
|
// Atomics are divergent because they are executed sequentially: when an
|
|
|
|
// atomic operation refers to the same address in each thread, then each
|
|
|
|
// thread after the first sees the value written by the previous thread as
|
|
|
|
// original value.
|
|
|
|
if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V))
|
|
|
|
return true;
|
|
|
|
|
2017-02-16 10:01:13 +08:00
|
|
|
if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V))
|
|
|
|
return isIntrinsicSourceOfDivergence(Intrinsic);
|
2015-12-16 02:04:38 +08:00
|
|
|
|
|
|
|
// Assume all function calls are a source of divergence.
|
|
|
|
if (isa<CallInst>(V) || isa<InvokeInst>(V))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|