2013-07-27 08:01:07 +08:00
|
|
|
//===-- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass ---------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// \file
|
|
|
|
// This file implements a TargetTransformInfo analysis pass specific to the
|
|
|
|
// AMDGPU target machine. It uses the target's detailed information to provide
|
|
|
|
// more precise answers to certain TTI queries, while letting the target
|
|
|
|
// independent and default TTI implementations handle the rest.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2015-01-31 19:17:59 +08:00
|
|
|
#include "AMDGPUTargetTransformInfo.h"
|
2014-01-24 02:49:28 +08:00
|
|
|
#include "llvm/Analysis/LoopInfo.h"
|
2013-07-27 08:01:07 +08:00
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
2014-01-24 02:49:28 +08:00
|
|
|
#include "llvm/Analysis/ValueTracking.h"
|
[PM] Change the core design of the TTI analysis to use a polymorphic
type erased interface and a single analysis pass rather than an
extremely complex analysis group.
The end result is that the TTI analysis can contain a type erased
implementation that supports the polymorphic TTI interface. We can build
one from a target-specific implementation or from a dummy one in the IR.
I've also factored all of the code into "mix-in"-able base classes,
including CRTP base classes to facilitate calling back up to the most
specialized form when delegating horizontally across the surface. These
aren't as clean as I would like and I'm planning to work on cleaning
some of this up, but I wanted to start by putting into the right form.
There are a number of reasons for this change, and this particular
design. The first and foremost reason is that an analysis group is
complete overkill, and the chaining delegation strategy was so opaque,
confusing, and high overhead that TTI was suffering greatly for it.
Several of the TTI functions had failed to be implemented in all places
because of the chaining-based delegation making there be no checking of
this. A few other functions were implemented with incorrect delegation.
The message to me was very clear working on this -- the delegation and
analysis group structure was too confusing to be useful here.
The other reason of course is that this is *much* more natural fit for
the new pass manager. This will lay the ground work for a type-erased
per-function info object that can look up the correct subtarget and even
cache it.
Yet another benefit is that this will significantly simplify the
interaction of the pass managers and the TargetMachine. See the future
work below.
The downside of this change is that it is very, very verbose. I'm going
to work to improve that, but it is somewhat an implementation necessity
in C++ to do type erasure. =/ I discussed this design really extensively
with Eric and Hal prior to going down this path, and afterward showed
them the result. No one was really thrilled with it, but there doesn't
seem to be a substantially better alternative. Using a base class and
virtual method dispatch would make the code much shorter, but as
discussed in the update to the programmer's manual and elsewhere,
a polymorphic interface feels like the more principled approach even if
this is perhaps the least compelling example of it. ;]
Ultimately, there is still a lot more to be done here, but this was the
huge chunk that I couldn't really split things out of because this was
the interface change to TTI. I've tried to minimize all the other parts
of this. The follow up work should include at least:
1) Improving the TargetMachine interface by having it directly return
a TTI object. Because we have a non-pass object with value semantics
and an internal type erasure mechanism, we can narrow the interface
of the TargetMachine to *just* do what we need: build and return
a TTI object that we can then insert into the pass pipeline.
2) Make the TTI object be fully specialized for a particular function.
This will include splitting off a minimal form of it which is
sufficient for the inliner and the old pass manager.
3) Add a new pass manager analysis which produces TTI objects from the
target machine for each function. This may actually be done as part
of #2 in order to use the new analysis to implement #2.
4) Work on narrowing the API between TTI and the targets so that it is
easier to understand and less verbose to type erase.
5) Work on narrowing the API between TTI and its clients so that it is
easier to understand and less verbose to forward.
6) Try to improve the CRTP-based delegation. I feel like this code is
just a bit messy and exacerbating the complexity of implementing
the TTI in each target.
Many thanks to Eric and Hal for their help here. I ended up blocked on
this somewhat more abruptly than I expected, and so I appreciate getting
it sorted out very quickly.
Differential Revision: http://reviews.llvm.org/D7293
llvm-svn: 227669
2015-01-31 11:43:40 +08:00
|
|
|
#include "llvm/CodeGen/BasicTTIImpl.h"
|
2016-02-13 07:45:29 +08:00
|
|
|
#include "llvm/IR/Intrinsics.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/IR/Module.h"
|
2013-07-27 08:01:07 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Target/CostTable.h"
|
2014-01-07 19:48:04 +08:00
|
|
|
#include "llvm/Target/TargetLowering.h"
|
2013-07-27 08:01:07 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2014-04-22 10:41:26 +08:00
|
|
|
#define DEBUG_TYPE "AMDGPUtti"
|
|
|
|
|
2017-02-03 10:20:05 +08:00
|
|
|
static cl::opt<unsigned> UnrollThresholdPrivate(
|
|
|
|
"amdgpu-unroll-threshold-private",
|
|
|
|
cl::desc("Unroll threshold for AMDGPU if private memory used in a loop"),
|
2017-04-08 00:26:28 +08:00
|
|
|
cl::init(2500), cl::Hidden);
|
2016-03-25 09:00:32 +08:00
|
|
|
|
2017-03-29 06:13:51 +08:00
|
|
|
static cl::opt<unsigned> UnrollThresholdLocal(
|
|
|
|
"amdgpu-unroll-threshold-local",
|
|
|
|
cl::desc("Unroll threshold for AMDGPU if local memory used in a loop"),
|
|
|
|
cl::init(1000), cl::Hidden);
|
|
|
|
|
2017-04-08 00:26:28 +08:00
|
|
|
static cl::opt<unsigned> UnrollThresholdIf(
|
|
|
|
"amdgpu-unroll-threshold-if",
|
|
|
|
cl::desc("Unroll threshold increment for AMDGPU for each if statement inside loop"),
|
|
|
|
cl::init(150), cl::Hidden);
|
|
|
|
|
|
|
|
static bool dependsOnLocalPhi(const Loop *L, const Value *Cond,
|
|
|
|
unsigned Depth = 0) {
|
|
|
|
const Instruction *I = dyn_cast<Instruction>(Cond);
|
|
|
|
if (!I)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (const Value *V : I->operand_values()) {
|
|
|
|
if (!L->contains(I))
|
|
|
|
continue;
|
|
|
|
if (const PHINode *PHI = dyn_cast<PHINode>(V)) {
|
|
|
|
if (none_of(L->getSubLoops(), [PHI](const Loop* SubLoop) {
|
|
|
|
return SubLoop->contains(PHI); }))
|
|
|
|
return true;
|
|
|
|
} else if (Depth < 10 && dependsOnLocalPhi(L, V, Depth+1))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
[LoopUnroll] Pass SCEV to getUnrollingPreferences hook. NFCI.
Reviewers: sanjoy, anna, reames, apilipenko, igor-laevsky, mkuper
Subscribers: jholewinski, arsenm, mzolotukhin, nemanjai, nhaehnle, javed.absar, mcrosier, llvm-commits
Differential Revision: https://reviews.llvm.org/D34531
llvm-svn: 306554
2017-06-28 23:53:17 +08:00
|
|
|
void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
|
[PM] Change the core design of the TTI analysis to use a polymorphic
type erased interface and a single analysis pass rather than an
extremely complex analysis group.
The end result is that the TTI analysis can contain a type erased
implementation that supports the polymorphic TTI interface. We can build
one from a target-specific implementation or from a dummy one in the IR.
I've also factored all of the code into "mix-in"-able base classes,
including CRTP base classes to facilitate calling back up to the most
specialized form when delegating horizontally across the surface. These
aren't as clean as I would like and I'm planning to work on cleaning
some of this up, but I wanted to start by putting into the right form.
There are a number of reasons for this change, and this particular
design. The first and foremost reason is that an analysis group is
complete overkill, and the chaining delegation strategy was so opaque,
confusing, and high overhead that TTI was suffering greatly for it.
Several of the TTI functions had failed to be implemented in all places
because of the chaining-based delegation making there be no checking of
this. A few other functions were implemented with incorrect delegation.
The message to me was very clear working on this -- the delegation and
analysis group structure was too confusing to be useful here.
The other reason of course is that this is *much* more natural fit for
the new pass manager. This will lay the ground work for a type-erased
per-function info object that can look up the correct subtarget and even
cache it.
Yet another benefit is that this will significantly simplify the
interaction of the pass managers and the TargetMachine. See the future
work below.
The downside of this change is that it is very, very verbose. I'm going
to work to improve that, but it is somewhat an implementation necessity
in C++ to do type erasure. =/ I discussed this design really extensively
with Eric and Hal prior to going down this path, and afterward showed
them the result. No one was really thrilled with it, but there doesn't
seem to be a substantially better alternative. Using a base class and
virtual method dispatch would make the code much shorter, but as
discussed in the update to the programmer's manual and elsewhere,
a polymorphic interface feels like the more principled approach even if
this is perhaps the least compelling example of it. ;]
Ultimately, there is still a lot more to be done here, but this was the
huge chunk that I couldn't really split things out of because this was
the interface change to TTI. I've tried to minimize all the other parts
of this. The follow up work should include at least:
1) Improving the TargetMachine interface by having it directly return
a TTI object. Because we have a non-pass object with value semantics
and an internal type erasure mechanism, we can narrow the interface
of the TargetMachine to *just* do what we need: build and return
a TTI object that we can then insert into the pass pipeline.
2) Make the TTI object be fully specialized for a particular function.
This will include splitting off a minimal form of it which is
sufficient for the inliner and the old pass manager.
3) Add a new pass manager analysis which produces TTI objects from the
target machine for each function. This may actually be done as part
of #2 in order to use the new analysis to implement #2.
4) Work on narrowing the API between TTI and the targets so that it is
easier to understand and less verbose to type erase.
5) Work on narrowing the API between TTI and its clients so that it is
easier to understand and less verbose to forward.
6) Try to improve the CRTP-based delegation. I feel like this code is
just a bit messy and exacerbating the complexity of implementing
the TTI in each target.
Many thanks to Eric and Hal for their help here. I ended up blocked on
this somewhat more abruptly than I expected, and so I appreciate getting
it sorted out very quickly.
Differential Revision: http://reviews.llvm.org/D7293
llvm-svn: 227669
2015-01-31 11:43:40 +08:00
|
|
|
TTI::UnrollingPreferences &UP) {
|
2014-07-26 07:02:42 +08:00
|
|
|
UP.Threshold = 300; // Twice the default.
|
2015-02-05 23:32:18 +08:00
|
|
|
UP.MaxCount = UINT_MAX;
|
2014-07-26 07:02:42 +08:00
|
|
|
UP.Partial = true;
|
|
|
|
|
|
|
|
// TODO: Do we want runtime unrolling?
|
|
|
|
|
2017-02-03 10:20:05 +08:00
|
|
|
// Maximum alloca size than can fit registers. Reserve 16 registers.
|
|
|
|
const unsigned MaxAlloca = (256 - 16) * 4;
|
2017-03-29 06:13:51 +08:00
|
|
|
unsigned ThresholdPrivate = UnrollThresholdPrivate;
|
|
|
|
unsigned ThresholdLocal = UnrollThresholdLocal;
|
|
|
|
unsigned MaxBoost = std::max(ThresholdPrivate, ThresholdLocal);
|
|
|
|
AMDGPUAS ASST = ST->getAMDGPUAS();
|
2014-07-17 14:19:06 +08:00
|
|
|
for (const BasicBlock *BB : L->getBlocks()) {
|
2015-03-10 10:37:25 +08:00
|
|
|
const DataLayout &DL = BB->getModule()->getDataLayout();
|
2017-03-29 06:13:51 +08:00
|
|
|
unsigned LocalGEPsSeen = 0;
|
|
|
|
|
2017-04-08 00:26:28 +08:00
|
|
|
if (any_of(L->getSubLoops(), [BB](const Loop* SubLoop) {
|
|
|
|
return SubLoop->contains(BB); }))
|
|
|
|
continue; // Block belongs to an inner loop.
|
|
|
|
|
2014-07-17 14:19:06 +08:00
|
|
|
for (const Instruction &I : *BB) {
|
2017-04-08 00:26:28 +08:00
|
|
|
|
|
|
|
// Unroll a loop which contains an "if" statement whose condition
|
|
|
|
// defined by a PHI belonging to the loop. This may help to eliminate
|
|
|
|
// if region and potentially even PHI itself, saving on both divergence
|
|
|
|
// and registers used for the PHI.
|
|
|
|
// Add a small bonus for each of such "if" statements.
|
|
|
|
if (const BranchInst *Br = dyn_cast<BranchInst>(&I)) {
|
|
|
|
if (UP.Threshold < MaxBoost && Br->isConditional()) {
|
|
|
|
if (L->isLoopExiting(Br->getSuccessor(0)) ||
|
|
|
|
L->isLoopExiting(Br->getSuccessor(1)))
|
|
|
|
continue;
|
|
|
|
if (dependsOnLocalPhi(L, Br->getCondition())) {
|
|
|
|
UP.Threshold += UnrollThresholdIf;
|
|
|
|
DEBUG(dbgs() << "Set unroll threshold " << UP.Threshold
|
|
|
|
<< " for loop:\n" << *L << " due to " << *Br << '\n');
|
|
|
|
if (UP.Threshold >= MaxBoost)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2014-07-17 14:19:06 +08:00
|
|
|
const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I);
|
2017-03-29 06:13:51 +08:00
|
|
|
if (!GEP)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
unsigned AS = GEP->getAddressSpace();
|
|
|
|
unsigned Threshold = 0;
|
|
|
|
if (AS == ASST.PRIVATE_ADDRESS)
|
|
|
|
Threshold = ThresholdPrivate;
|
|
|
|
else if (AS == ASST.LOCAL_ADDRESS)
|
|
|
|
Threshold = ThresholdLocal;
|
|
|
|
else
|
2014-01-24 02:49:28 +08:00
|
|
|
continue;
|
2014-07-17 14:19:06 +08:00
|
|
|
|
2017-03-29 06:13:51 +08:00
|
|
|
if (UP.Threshold >= Threshold)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (AS == ASST.PRIVATE_ADDRESS) {
|
|
|
|
const Value *Ptr = GEP->getPointerOperand();
|
|
|
|
const AllocaInst *Alloca =
|
|
|
|
dyn_cast<AllocaInst>(GetUnderlyingObject(Ptr, DL));
|
|
|
|
if (!Alloca || !Alloca->isStaticAlloca())
|
|
|
|
continue;
|
2017-02-03 10:20:05 +08:00
|
|
|
Type *Ty = Alloca->getAllocatedType();
|
|
|
|
unsigned AllocaSize = Ty->isSized() ? DL.getTypeAllocSize(Ty) : 0;
|
|
|
|
if (AllocaSize > MaxAlloca)
|
|
|
|
continue;
|
2017-03-29 06:13:51 +08:00
|
|
|
} else if (AS == ASST.LOCAL_ADDRESS) {
|
|
|
|
LocalGEPsSeen++;
|
|
|
|
// Inhibit unroll for local memory if we have seen addressing not to
|
|
|
|
// a variable, most likely we will be unable to combine it.
|
|
|
|
// Do not unroll too deep inner loops for local memory to give a chance
|
|
|
|
// to unroll an outer loop for a more important reason.
|
|
|
|
if (LocalGEPsSeen > 1 || L->getLoopDepth() > 2 ||
|
|
|
|
(!isa<GlobalVariable>(GEP->getPointerOperand()) &&
|
|
|
|
!isa<Argument>(GEP->getPointerOperand())))
|
|
|
|
continue;
|
|
|
|
}
|
2017-02-03 10:20:05 +08:00
|
|
|
|
2017-03-29 06:13:51 +08:00
|
|
|
// Check if GEP depends on a value defined by this loop itself.
|
|
|
|
bool HasLoopDef = false;
|
|
|
|
for (const Value *Op : GEP->operands()) {
|
|
|
|
const Instruction *Inst = dyn_cast<Instruction>(Op);
|
|
|
|
if (!Inst || L->isLoopInvariant(Op))
|
2017-02-03 10:20:05 +08:00
|
|
|
continue;
|
|
|
|
|
2017-03-29 06:13:51 +08:00
|
|
|
if (any_of(L->getSubLoops(), [Inst](const Loop* SubLoop) {
|
|
|
|
return SubLoop->contains(Inst); }))
|
|
|
|
continue;
|
|
|
|
HasLoopDef = true;
|
|
|
|
break;
|
2014-01-24 02:49:28 +08:00
|
|
|
}
|
2017-03-29 06:13:51 +08:00
|
|
|
if (!HasLoopDef)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// We want to do whatever we can to limit the number of alloca
|
|
|
|
// instructions that make it through to the code generator. allocas
|
|
|
|
// require us to use indirect addressing, which is slow and prone to
|
|
|
|
// compiler bugs. If this loop does an address calculation on an
|
|
|
|
// alloca ptr, then we want to use a higher than normal loop unroll
|
|
|
|
// threshold. This will give SROA a better chance to eliminate these
|
|
|
|
// allocas.
|
|
|
|
//
|
|
|
|
// We also want to have more unrolling for local memory to let ds
|
|
|
|
// instructions with different offsets combine.
|
|
|
|
//
|
|
|
|
// Don't use the maximum allowed value here as it will make some
|
|
|
|
// programs way too big.
|
|
|
|
UP.Threshold = Threshold;
|
|
|
|
DEBUG(dbgs() << "Set unroll threshold " << Threshold << " for loop:\n"
|
|
|
|
<< *L << " due to " << *GEP << '\n');
|
2017-04-08 00:26:28 +08:00
|
|
|
if (UP.Threshold >= MaxBoost)
|
2017-03-29 06:13:51 +08:00
|
|
|
return;
|
2014-01-24 02:49:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-07-18 14:07:13 +08:00
|
|
|
|
2017-06-21 04:38:06 +08:00
|
|
|
unsigned AMDGPUTTIImpl::getHardwareNumberOfRegisters(bool Vec) const {
|
|
|
|
// The concept of vector registers doesn't really exist. Some packed vector
|
|
|
|
// operations operate on the normal 32-bit registers.
|
2014-07-20 02:15:16 +08:00
|
|
|
|
|
|
|
// Number of VGPRs on SI.
|
|
|
|
if (ST->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS)
|
|
|
|
return 256;
|
|
|
|
|
|
|
|
return 4 * 128; // XXX - 4 channels. Should these count as vector instead?
|
|
|
|
}
|
|
|
|
|
2017-06-21 04:38:06 +08:00
|
|
|
unsigned AMDGPUTTIImpl::getNumberOfRegisters(bool Vec) const {
|
|
|
|
// This is really the number of registers to fill when vectorizing /
|
|
|
|
// interleaving loops, so we lie to avoid trying to use all registers.
|
|
|
|
return getHardwareNumberOfRegisters(Vec) >> 3;
|
|
|
|
}
|
|
|
|
|
Const correctness for TTI::getRegisterBitWidth
Summary: The method TargetTransformInfo::getRegisterBitWidth() is declared const, but the type erasing implementation classes (TargetTransformInfo::Concept & TargetTransformInfo::Model) that were introduced by Chandler in https://reviews.llvm.org/D7293 do not have the method declared const. This is an NFC to tidy up the const consistency between TTI and its implementation.
Reviewers: chandlerc, rnk, reames
Reviewed By: reames
Subscribers: reames, jfb, arsenm, dschuff, nemanjai, nhaehnle, javed.absar, sbc100, jgravelle-google, llvm-commits
Differential Revision: https://reviews.llvm.org/D33903
llvm-svn: 305189
2017-06-12 22:22:21 +08:00
|
|
|
unsigned AMDGPUTTIImpl::getRegisterBitWidth(bool Vector) const {
|
2017-06-21 04:38:06 +08:00
|
|
|
return 32;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned AMDGPUTTIImpl::getMinVectorRegisterBitWidth() const {
|
|
|
|
return 32;
|
2015-12-24 13:14:55 +08:00
|
|
|
}
|
2014-07-20 02:15:16 +08:00
|
|
|
|
2016-10-03 18:31:34 +08:00
|
|
|
unsigned AMDGPUTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
|
2017-03-27 22:04:01 +08:00
|
|
|
AMDGPUAS AS = ST->getAMDGPUAS();
|
|
|
|
if (AddrSpace == AS.GLOBAL_ADDRESS ||
|
|
|
|
AddrSpace == AS.CONSTANT_ADDRESS ||
|
|
|
|
AddrSpace == AS.FLAT_ADDRESS)
|
2016-07-01 08:56:27 +08:00
|
|
|
return 128;
|
2017-03-27 22:04:01 +08:00
|
|
|
if (AddrSpace == AS.LOCAL_ADDRESS ||
|
|
|
|
AddrSpace == AS.REGION_ADDRESS)
|
2016-07-01 08:56:27 +08:00
|
|
|
return 64;
|
2017-03-27 22:04:01 +08:00
|
|
|
if (AddrSpace == AS.PRIVATE_ADDRESS)
|
2016-07-01 08:56:27 +08:00
|
|
|
return 8 * ST->getMaxPrivateElementSize();
|
2017-03-27 22:04:01 +08:00
|
|
|
|
|
|
|
if (ST->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS &&
|
|
|
|
(AddrSpace == AS.PARAM_D_ADDRESS ||
|
|
|
|
AddrSpace == AS.PARAM_I_ADDRESS ||
|
|
|
|
(AddrSpace >= AS.CONSTANT_BUFFER_0 &&
|
|
|
|
AddrSpace <= AS.CONSTANT_BUFFER_15)))
|
|
|
|
return 128;
|
|
|
|
llvm_unreachable("unhandled address space");
|
2016-07-01 08:56:27 +08:00
|
|
|
}
|
|
|
|
|
2017-02-23 11:58:53 +08:00
|
|
|
bool AMDGPUTTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
|
|
|
|
unsigned Alignment,
|
|
|
|
unsigned AddrSpace) const {
|
|
|
|
// We allow vectorization of flat stores, even though we may need to decompose
|
|
|
|
// them later if they may access private memory. We don't have enough context
|
|
|
|
// here, and legalization can handle it.
|
2017-03-27 22:04:01 +08:00
|
|
|
if (AddrSpace == ST->getAMDGPUAS().PRIVATE_ADDRESS) {
|
2017-02-23 11:58:53 +08:00
|
|
|
return (Alignment >= 4 || ST->hasUnalignedScratchAccess()) &&
|
|
|
|
ChainSizeInBytes <= ST->getMaxPrivateElementSize();
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool AMDGPUTTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
|
|
|
|
unsigned Alignment,
|
|
|
|
unsigned AddrSpace) const {
|
|
|
|
return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool AMDGPUTTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
|
|
|
|
unsigned Alignment,
|
|
|
|
unsigned AddrSpace) const {
|
|
|
|
return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
|
|
|
|
}
|
|
|
|
|
2015-05-07 01:12:25 +08:00
|
|
|
unsigned AMDGPUTTIImpl::getMaxInterleaveFactor(unsigned VF) {
|
2017-03-09 08:07:00 +08:00
|
|
|
// Disable unrolling if the loop is not vectorized.
|
2017-06-21 04:38:06 +08:00
|
|
|
// TODO: Enable this again.
|
2017-03-09 08:07:00 +08:00
|
|
|
if (VF == 1)
|
|
|
|
return 1;
|
|
|
|
|
2017-06-21 04:38:06 +08:00
|
|
|
return 8;
|
2014-07-20 02:15:16 +08:00
|
|
|
}
|
2015-12-02 03:08:39 +08:00
|
|
|
|
2016-03-25 09:00:32 +08:00
|
|
|
int AMDGPUTTIImpl::getArithmeticInstrCost(
|
|
|
|
unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
|
|
|
|
TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
|
[X86] updating TTI costs for arithmetic instructions on X86\SLM arch.
updated instructions:
pmulld, pmullw, pmulhw, mulsd, mulps, mulpd, divss, divps, divsd, divpd, addpd and subpd.
special optimization case which replaces pmulld with pmullw\pmulhw\pshuf seq.
In case if the real operands bitwidth <= 16.
Differential Revision: https://reviews.llvm.org/D28104
llvm-svn: 291657
2017-01-11 16:23:37 +08:00
|
|
|
TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args ) {
|
2016-03-25 09:00:32 +08:00
|
|
|
|
|
|
|
EVT OrigTy = TLI->getValueType(DL, Ty);
|
|
|
|
if (!OrigTy.isSimple()) {
|
|
|
|
return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
|
|
|
|
Opd1PropInfo, Opd2PropInfo);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Legalize the type.
|
|
|
|
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
|
|
|
|
int ISD = TLI->InstructionOpcodeToISD(Opcode);
|
|
|
|
|
|
|
|
// Because we don't have any legal vector operations, but the legal types, we
|
|
|
|
// need to account for split vectors.
|
|
|
|
unsigned NElts = LT.second.isVector() ?
|
|
|
|
LT.second.getVectorNumElements() : 1;
|
|
|
|
|
|
|
|
MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
|
|
|
|
|
|
|
|
switch (ISD) {
|
2016-03-25 09:16:40 +08:00
|
|
|
case ISD::SHL:
|
|
|
|
case ISD::SRL:
|
|
|
|
case ISD::SRA: {
|
|
|
|
if (SLT == MVT::i64)
|
|
|
|
return get64BitInstrCost() * LT.first * NElts;
|
|
|
|
|
|
|
|
// i32
|
|
|
|
return getFullRateInstrCost() * LT.first * NElts;
|
|
|
|
}
|
|
|
|
case ISD::ADD:
|
|
|
|
case ISD::SUB:
|
|
|
|
case ISD::AND:
|
|
|
|
case ISD::OR:
|
|
|
|
case ISD::XOR: {
|
|
|
|
if (SLT == MVT::i64){
|
|
|
|
// and, or and xor are typically split into 2 VALU instructions.
|
|
|
|
return 2 * getFullRateInstrCost() * LT.first * NElts;
|
|
|
|
}
|
|
|
|
|
|
|
|
return LT.first * NElts * getFullRateInstrCost();
|
|
|
|
}
|
|
|
|
case ISD::MUL: {
|
|
|
|
const int QuarterRateCost = getQuarterRateInstrCost();
|
|
|
|
if (SLT == MVT::i64) {
|
|
|
|
const int FullRateCost = getFullRateInstrCost();
|
|
|
|
return (4 * QuarterRateCost + (2 * 2) * FullRateCost) * LT.first * NElts;
|
|
|
|
}
|
|
|
|
|
|
|
|
// i32
|
|
|
|
return QuarterRateCost * NElts * LT.first;
|
|
|
|
}
|
2016-03-25 09:00:32 +08:00
|
|
|
case ISD::FADD:
|
|
|
|
case ISD::FSUB:
|
|
|
|
case ISD::FMUL:
|
|
|
|
if (SLT == MVT::f64)
|
|
|
|
return LT.first * NElts * get64BitInstrCost();
|
|
|
|
|
|
|
|
if (SLT == MVT::f32 || SLT == MVT::f16)
|
|
|
|
return LT.first * NElts * getFullRateInstrCost();
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ISD::FDIV:
|
|
|
|
case ISD::FREM:
|
|
|
|
// FIXME: frem should be handled separately. The fdiv in it is most of it,
|
|
|
|
// but the current lowering is also not entirely correct.
|
|
|
|
if (SLT == MVT::f64) {
|
|
|
|
int Cost = 4 * get64BitInstrCost() + 7 * getQuarterRateInstrCost();
|
|
|
|
|
|
|
|
// Add cost of workaround.
|
|
|
|
if (ST->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS)
|
|
|
|
Cost += 3 * getFullRateInstrCost();
|
|
|
|
|
|
|
|
return LT.first * Cost * NElts;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Assuming no fp32 denormals lowering.
|
|
|
|
if (SLT == MVT::f32 || SLT == MVT::f16) {
|
|
|
|
assert(!ST->hasFP32Denormals() && "will change when supported");
|
|
|
|
int Cost = 7 * getFullRateInstrCost() + 1 * getQuarterRateInstrCost();
|
|
|
|
return LT.first * NElts * Cost;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
|
|
|
|
Opd1PropInfo, Opd2PropInfo);
|
|
|
|
}
|
|
|
|
|
2015-12-17 02:37:19 +08:00
|
|
|
unsigned AMDGPUTTIImpl::getCFInstrCost(unsigned Opcode) {
|
|
|
|
// XXX - For some reason this isn't called for switch.
|
|
|
|
switch (Opcode) {
|
|
|
|
case Instruction::Br:
|
|
|
|
case Instruction::Ret:
|
|
|
|
return 10;
|
|
|
|
default:
|
|
|
|
return BaseT::getCFInstrCost(Opcode);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-02 03:08:39 +08:00
|
|
|
int AMDGPUTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
|
|
|
|
unsigned Index) {
|
|
|
|
switch (Opcode) {
|
|
|
|
case Instruction::ExtractElement:
|
2017-05-11 05:29:33 +08:00
|
|
|
case Instruction::InsertElement: {
|
|
|
|
unsigned EltSize
|
|
|
|
= DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType());
|
|
|
|
if (EltSize < 32) {
|
|
|
|
if (EltSize == 16 && Index == 0 && ST->has16BitInsts())
|
|
|
|
return 0;
|
|
|
|
return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
|
|
|
|
}
|
|
|
|
|
2016-03-25 08:14:11 +08:00
|
|
|
// Extracts are just reads of a subregister, so are free. Inserts are
|
|
|
|
// considered free because we don't want to have any cost for scalarizing
|
|
|
|
// operations, and we don't have to copy into a different register class.
|
|
|
|
|
2015-12-02 03:08:39 +08:00
|
|
|
// Dynamic indexing isn't free and is best avoided.
|
|
|
|
return Index == ~0u ? 2 : 0;
|
2017-05-11 05:29:33 +08:00
|
|
|
}
|
2015-12-02 03:08:39 +08:00
|
|
|
default:
|
|
|
|
return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
|
|
|
|
}
|
|
|
|
}
|
2015-12-16 02:04:38 +08:00
|
|
|
|
2017-02-16 10:01:13 +08:00
|
|
|
static bool isIntrinsicSourceOfDivergence(const IntrinsicInst *I) {
|
2015-12-16 02:04:38 +08:00
|
|
|
switch (I->getIntrinsicID()) {
|
2016-02-11 13:32:51 +08:00
|
|
|
case Intrinsic::amdgcn_workitem_id_x:
|
|
|
|
case Intrinsic::amdgcn_workitem_id_y:
|
|
|
|
case Intrinsic::amdgcn_workitem_id_z:
|
2016-12-13 00:52:19 +08:00
|
|
|
case Intrinsic::amdgcn_interp_mov:
|
2015-12-16 02:04:38 +08:00
|
|
|
case Intrinsic::amdgcn_interp_p1:
|
|
|
|
case Intrinsic::amdgcn_interp_p2:
|
|
|
|
case Intrinsic::amdgcn_mbcnt_hi:
|
|
|
|
case Intrinsic::amdgcn_mbcnt_lo:
|
|
|
|
case Intrinsic::r600_read_tidig_x:
|
|
|
|
case Intrinsic::r600_read_tidig_y:
|
|
|
|
case Intrinsic::r600_read_tidig_z:
|
2017-01-31 01:09:47 +08:00
|
|
|
case Intrinsic::amdgcn_atomic_inc:
|
|
|
|
case Intrinsic::amdgcn_atomic_dec:
|
2016-03-14 23:37:18 +08:00
|
|
|
case Intrinsic::amdgcn_image_atomic_swap:
|
|
|
|
case Intrinsic::amdgcn_image_atomic_add:
|
|
|
|
case Intrinsic::amdgcn_image_atomic_sub:
|
|
|
|
case Intrinsic::amdgcn_image_atomic_smin:
|
|
|
|
case Intrinsic::amdgcn_image_atomic_umin:
|
|
|
|
case Intrinsic::amdgcn_image_atomic_smax:
|
|
|
|
case Intrinsic::amdgcn_image_atomic_umax:
|
|
|
|
case Intrinsic::amdgcn_image_atomic_and:
|
|
|
|
case Intrinsic::amdgcn_image_atomic_or:
|
|
|
|
case Intrinsic::amdgcn_image_atomic_xor:
|
|
|
|
case Intrinsic::amdgcn_image_atomic_inc:
|
|
|
|
case Intrinsic::amdgcn_image_atomic_dec:
|
|
|
|
case Intrinsic::amdgcn_image_atomic_cmpswap:
|
2016-03-19 00:24:31 +08:00
|
|
|
case Intrinsic::amdgcn_buffer_atomic_swap:
|
|
|
|
case Intrinsic::amdgcn_buffer_atomic_add:
|
|
|
|
case Intrinsic::amdgcn_buffer_atomic_sub:
|
|
|
|
case Intrinsic::amdgcn_buffer_atomic_smin:
|
|
|
|
case Intrinsic::amdgcn_buffer_atomic_umin:
|
|
|
|
case Intrinsic::amdgcn_buffer_atomic_smax:
|
|
|
|
case Intrinsic::amdgcn_buffer_atomic_umax:
|
|
|
|
case Intrinsic::amdgcn_buffer_atomic_and:
|
|
|
|
case Intrinsic::amdgcn_buffer_atomic_or:
|
|
|
|
case Intrinsic::amdgcn_buffer_atomic_xor:
|
|
|
|
case Intrinsic::amdgcn_buffer_atomic_cmpswap:
|
2016-04-22 12:04:08 +08:00
|
|
|
case Intrinsic::amdgcn_ps_live:
|
2017-01-31 01:09:47 +08:00
|
|
|
case Intrinsic::amdgcn_ds_swizzle:
|
2015-12-16 02:04:38 +08:00
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool isArgPassedInSGPR(const Argument *A) {
|
|
|
|
const Function *F = A->getParent();
|
|
|
|
|
|
|
|
// Arguments to compute shaders are never a source of divergence.
|
2017-04-20 01:42:34 +08:00
|
|
|
CallingConv::ID CC = F->getCallingConv();
|
|
|
|
switch (CC) {
|
|
|
|
case CallingConv::AMDGPU_KERNEL:
|
|
|
|
case CallingConv::SPIR_KERNEL:
|
2015-12-16 02:04:38 +08:00
|
|
|
return true;
|
2017-04-20 01:42:34 +08:00
|
|
|
case CallingConv::AMDGPU_VS:
|
2017-05-02 23:41:10 +08:00
|
|
|
case CallingConv::AMDGPU_HS:
|
2017-04-20 01:42:34 +08:00
|
|
|
case CallingConv::AMDGPU_GS:
|
|
|
|
case CallingConv::AMDGPU_PS:
|
|
|
|
case CallingConv::AMDGPU_CS:
|
|
|
|
// For non-compute shaders, SGPR inputs are marked with either inreg or byval.
|
|
|
|
// Everything else is in VGPRs.
|
|
|
|
return F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::InReg) ||
|
|
|
|
F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::ByVal);
|
|
|
|
default:
|
|
|
|
// TODO: Should calls support inreg for SGPR inputs?
|
|
|
|
return false;
|
|
|
|
}
|
2015-12-16 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
///
|
|
|
|
/// \returns true if the result of the value could potentially be
|
|
|
|
/// different across workitems in a wavefront.
|
|
|
|
bool AMDGPUTTIImpl::isSourceOfDivergence(const Value *V) const {
|
|
|
|
|
|
|
|
if (const Argument *A = dyn_cast<Argument>(V))
|
|
|
|
return !isArgPassedInSGPR(A);
|
|
|
|
|
|
|
|
// Loads from the private address space are divergent, because threads
|
|
|
|
// can execute the load instruction with the same inputs and get different
|
|
|
|
// results.
|
|
|
|
//
|
|
|
|
// All other loads are not divergent, because if threads issue loads with the
|
|
|
|
// same arguments, they will always get the same result.
|
|
|
|
if (const LoadInst *Load = dyn_cast<LoadInst>(V))
|
2017-03-27 22:04:01 +08:00
|
|
|
return Load->getPointerAddressSpace() == ST->getAMDGPUAS().PRIVATE_ADDRESS;
|
2015-12-16 02:04:38 +08:00
|
|
|
|
2016-03-18 00:21:59 +08:00
|
|
|
// Atomics are divergent because they are executed sequentially: when an
|
|
|
|
// atomic operation refers to the same address in each thread, then each
|
|
|
|
// thread after the first sees the value written by the previous thread as
|
|
|
|
// original value.
|
|
|
|
if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V))
|
|
|
|
return true;
|
|
|
|
|
2017-02-16 10:01:13 +08:00
|
|
|
if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V))
|
|
|
|
return isIntrinsicSourceOfDivergence(Intrinsic);
|
2015-12-16 02:04:38 +08:00
|
|
|
|
|
|
|
// Assume all function calls are a source of divergence.
|
|
|
|
if (isa<CallInst>(V) || isa<InvokeInst>(V))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2017-05-11 05:29:33 +08:00
|
|
|
|
2017-06-16 03:33:10 +08:00
|
|
|
bool AMDGPUTTIImpl::isAlwaysUniform(const Value *V) const {
|
|
|
|
if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) {
|
|
|
|
switch (Intrinsic->getIntrinsicID()) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case Intrinsic::amdgcn_readfirstlane:
|
|
|
|
case Intrinsic::amdgcn_readlane:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-05-11 05:29:33 +08:00
|
|
|
unsigned AMDGPUTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
|
|
|
|
Type *SubTp) {
|
|
|
|
if (ST->hasVOP3PInsts()) {
|
|
|
|
VectorType *VT = cast<VectorType>(Tp);
|
|
|
|
if (VT->getNumElements() == 2 &&
|
|
|
|
DL.getTypeSizeInBits(VT->getElementType()) == 16) {
|
|
|
|
// With op_sel VOP3P instructions freely can access the low half or high
|
|
|
|
// half of a register, so any swizzle is free.
|
|
|
|
|
|
|
|
switch (Kind) {
|
|
|
|
case TTI::SK_Broadcast:
|
|
|
|
case TTI::SK_Reverse:
|
|
|
|
case TTI::SK_PermuteSingleSrc:
|
|
|
|
return 0;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
|
|
|
|
}
|
2017-08-08 01:08:44 +08:00
|
|
|
|
|
|
|
bool AMDGPUTTIImpl::areInlineCompatible(const Function *Caller,
|
|
|
|
const Function *Callee) const {
|
|
|
|
const TargetMachine &TM = getTLI()->getTargetMachine();
|
|
|
|
const FeatureBitset &CallerBits =
|
|
|
|
TM.getSubtargetImpl(*Caller)->getFeatureBits();
|
|
|
|
const FeatureBitset &CalleeBits =
|
|
|
|
TM.getSubtargetImpl(*Callee)->getFeatureBits();
|
|
|
|
|
|
|
|
FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
|
|
|
|
FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
|
|
|
|
return ((RealCallerBits & RealCalleeBits) == RealCalleeBits);
|
|
|
|
}
|