2017-09-01 05:56:16 +08:00
|
|
|
//===- AMDGPUAnnotateKernelFeaturesPass.cpp -------------------------------===//
|
2015-11-07 02:01:57 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
/// \file This pass adds target attributes to functions which use intrinsics
|
|
|
|
/// which will impact calling convention lowering.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "AMDGPU.h"
|
2017-02-19 02:29:53 +08:00
|
|
|
#include "AMDGPUSubtarget.h"
|
2017-09-01 05:56:16 +08:00
|
|
|
#include "Utils/AMDGPUBaseInfo.h"
|
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
|
|
|
#include "llvm/ADT/StringRef.h"
|
2016-08-12 03:18:50 +08:00
|
|
|
#include "llvm/ADT/Triple.h"
|
2017-09-01 05:56:16 +08:00
|
|
|
#include "llvm/Analysis/CallGraph.h"
|
2017-07-14 05:43:42 +08:00
|
|
|
#include "llvm/Analysis/CallGraphSCCPass.h"
|
2017-05-19 01:21:13 +08:00
|
|
|
#include "llvm/CodeGen/TargetPassConfig.h"
|
2017-09-01 05:56:16 +08:00
|
|
|
#include "llvm/IR/CallSite.h"
|
|
|
|
#include "llvm/IR/Constant.h"
|
2016-06-07 04:03:31 +08:00
|
|
|
#include "llvm/IR/Constants.h"
|
2017-09-01 05:56:16 +08:00
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/Instruction.h"
|
2015-11-07 02:01:57 +08:00
|
|
|
#include "llvm/IR/Instructions.h"
|
2017-09-01 05:56:16 +08:00
|
|
|
#include "llvm/IR/Intrinsics.h"
|
2015-11-07 02:01:57 +08:00
|
|
|
#include "llvm/IR/Module.h"
|
2017-09-01 05:56:16 +08:00
|
|
|
#include "llvm/IR/Type.h"
|
|
|
|
#include "llvm/IR/Use.h"
|
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/Casting.h"
|
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
|
|
|
#include "llvm/Target/TargetMachine.h"
|
2015-11-07 02:01:57 +08:00
|
|
|
|
|
|
|
#define DEBUG_TYPE "amdgpu-annotate-kernel-features"
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2017-07-14 05:43:42 +08:00
|
|
|
class AMDGPUAnnotateKernelFeatures : public CallGraphSCCPass {
|
2015-11-07 02:01:57 +08:00
|
|
|
private:
|
2017-07-14 05:43:42 +08:00
|
|
|
const TargetMachine *TM = nullptr;
|
2017-03-27 22:04:01 +08:00
|
|
|
AMDGPUAS AS;
|
2016-04-26 03:27:24 +08:00
|
|
|
|
2017-07-14 05:43:42 +08:00
|
|
|
bool addFeatureAttributes(Function &F);
|
|
|
|
|
2015-11-07 02:01:57 +08:00
|
|
|
public:
|
|
|
|
static char ID;
|
|
|
|
|
2017-07-14 05:43:42 +08:00
|
|
|
AMDGPUAnnotateKernelFeatures() : CallGraphSCCPass(ID) {}
|
|
|
|
|
|
|
|
bool doInitialization(CallGraph &CG) override;
|
|
|
|
bool runOnSCC(CallGraphSCC &SCC) override;
|
2017-09-01 05:56:16 +08:00
|
|
|
|
2016-10-01 10:56:57 +08:00
|
|
|
StringRef getPassName() const override {
|
2015-11-07 02:01:57 +08:00
|
|
|
return "AMDGPU Annotate Kernel Features";
|
|
|
|
}
|
|
|
|
|
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
|
|
AU.setPreservesAll();
|
2017-07-14 05:43:42 +08:00
|
|
|
CallGraphSCCPass::getAnalysisUsage(AU);
|
2015-11-07 02:01:57 +08:00
|
|
|
}
|
2016-06-07 04:03:31 +08:00
|
|
|
|
2017-03-27 22:04:01 +08:00
|
|
|
static bool visitConstantExpr(const ConstantExpr *CE, AMDGPUAS AS);
|
2016-06-07 04:03:31 +08:00
|
|
|
static bool visitConstantExprsRecursively(
|
|
|
|
const Constant *EntryC,
|
2017-03-27 22:04:01 +08:00
|
|
|
SmallPtrSet<const Constant *, 8> &ConstantExprVisited,
|
|
|
|
AMDGPUAS AS);
|
2015-11-07 02:01:57 +08:00
|
|
|
};
|
|
|
|
|
2017-09-01 05:56:16 +08:00
|
|
|
} // end anonymous namespace
|
2015-11-07 02:01:57 +08:00
|
|
|
|
|
|
|
char AMDGPUAnnotateKernelFeatures::ID = 0;
|
|
|
|
|
|
|
|
char &llvm::AMDGPUAnnotateKernelFeaturesID = AMDGPUAnnotateKernelFeatures::ID;
|
|
|
|
|
2016-04-26 03:27:24 +08:00
|
|
|
INITIALIZE_PASS(AMDGPUAnnotateKernelFeatures, DEBUG_TYPE,
|
|
|
|
"Add AMDGPU function attributes", false, false)
|
|
|
|
|
2015-11-07 02:01:57 +08:00
|
|
|
|
2016-06-07 04:03:31 +08:00
|
|
|
// The queue ptr is only needed when casting to flat, not from it.
|
2017-03-27 22:04:01 +08:00
|
|
|
static bool castRequiresQueuePtr(unsigned SrcAS, const AMDGPUAS &AS) {
|
|
|
|
return SrcAS == AS.LOCAL_ADDRESS || SrcAS == AS.PRIVATE_ADDRESS;
|
2016-04-26 03:27:24 +08:00
|
|
|
}
|
2015-11-07 02:01:57 +08:00
|
|
|
|
2017-03-27 22:04:01 +08:00
|
|
|
static bool castRequiresQueuePtr(const AddrSpaceCastInst *ASC,
|
|
|
|
const AMDGPUAS &AS) {
|
|
|
|
return castRequiresQueuePtr(ASC->getSrcAddressSpace(), AS);
|
2016-06-07 04:03:31 +08:00
|
|
|
}
|
|
|
|
|
2017-03-27 22:04:01 +08:00
|
|
|
bool AMDGPUAnnotateKernelFeatures::visitConstantExpr(const ConstantExpr *CE,
|
|
|
|
AMDGPUAS AS) {
|
2016-06-07 04:03:31 +08:00
|
|
|
if (CE->getOpcode() == Instruction::AddrSpaceCast) {
|
|
|
|
unsigned SrcAS = CE->getOperand(0)->getType()->getPointerAddressSpace();
|
2017-03-27 22:04:01 +08:00
|
|
|
return castRequiresQueuePtr(SrcAS, AS);
|
2016-06-07 04:03:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool AMDGPUAnnotateKernelFeatures::visitConstantExprsRecursively(
|
|
|
|
const Constant *EntryC,
|
2017-03-27 22:04:01 +08:00
|
|
|
SmallPtrSet<const Constant *, 8> &ConstantExprVisited,
|
|
|
|
AMDGPUAS AS) {
|
2016-06-07 04:03:31 +08:00
|
|
|
|
|
|
|
if (!ConstantExprVisited.insert(EntryC).second)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
SmallVector<const Constant *, 16> Stack;
|
|
|
|
Stack.push_back(EntryC);
|
|
|
|
|
|
|
|
while (!Stack.empty()) {
|
|
|
|
const Constant *C = Stack.pop_back_val();
|
|
|
|
|
|
|
|
// Check this constant expression.
|
|
|
|
if (const auto *CE = dyn_cast<ConstantExpr>(C)) {
|
2017-03-27 22:04:01 +08:00
|
|
|
if (visitConstantExpr(CE, AS))
|
2016-06-07 04:03:31 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Visit all sub-expressions.
|
|
|
|
for (const Use &U : C->operands()) {
|
|
|
|
const auto *OpC = dyn_cast<Constant>(U);
|
|
|
|
if (!OpC)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!ConstantExprVisited.insert(OpC).second)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
Stack.push_back(OpC);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-07-14 05:43:42 +08:00
|
|
|
// We do not need to note the x workitem or workgroup id because they are always
|
|
|
|
// initialized.
|
|
|
|
//
|
|
|
|
// TODO: We should not add the attributes if the known compile time workgroup
|
|
|
|
// size is 1 for y/z.
|
2017-07-18 06:35:50 +08:00
|
|
|
static StringRef intrinsicToAttrName(Intrinsic::ID ID,
|
|
|
|
bool &NonKernelOnly,
|
|
|
|
bool &IsQueuePtr) {
|
2017-07-14 05:43:42 +08:00
|
|
|
switch (ID) {
|
2017-07-18 06:35:50 +08:00
|
|
|
case Intrinsic::amdgcn_workitem_id_x:
|
|
|
|
NonKernelOnly = true;
|
|
|
|
return "amdgpu-work-item-id-x";
|
|
|
|
case Intrinsic::amdgcn_workgroup_id_x:
|
|
|
|
NonKernelOnly = true;
|
|
|
|
return "amdgpu-work-group-id-x";
|
2017-07-14 05:43:42 +08:00
|
|
|
case Intrinsic::amdgcn_workitem_id_y:
|
|
|
|
case Intrinsic::r600_read_tidig_y:
|
|
|
|
return "amdgpu-work-item-id-y";
|
|
|
|
case Intrinsic::amdgcn_workitem_id_z:
|
|
|
|
case Intrinsic::r600_read_tidig_z:
|
|
|
|
return "amdgpu-work-item-id-z";
|
|
|
|
case Intrinsic::amdgcn_workgroup_id_y:
|
|
|
|
case Intrinsic::r600_read_tgid_y:
|
|
|
|
return "amdgpu-work-group-id-y";
|
|
|
|
case Intrinsic::amdgcn_workgroup_id_z:
|
|
|
|
case Intrinsic::r600_read_tgid_z:
|
|
|
|
return "amdgpu-work-group-id-z";
|
|
|
|
case Intrinsic::amdgcn_dispatch_ptr:
|
|
|
|
return "amdgpu-dispatch-ptr";
|
|
|
|
case Intrinsic::amdgcn_dispatch_id:
|
|
|
|
return "amdgpu-dispatch-id";
|
2017-07-14 08:11:13 +08:00
|
|
|
case Intrinsic::amdgcn_kernarg_segment_ptr:
|
|
|
|
return "amdgpu-kernarg-segment-ptr";
|
2017-07-28 23:52:08 +08:00
|
|
|
case Intrinsic::amdgcn_implicitarg_ptr:
|
|
|
|
return "amdgpu-implicitarg-ptr";
|
2017-07-14 05:43:42 +08:00
|
|
|
case Intrinsic::amdgcn_queue_ptr:
|
|
|
|
case Intrinsic::trap:
|
|
|
|
case Intrinsic::debugtrap:
|
|
|
|
IsQueuePtr = true;
|
|
|
|
return "amdgpu-queue-ptr";
|
|
|
|
default:
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool handleAttr(Function &Parent, const Function &Callee,
|
|
|
|
StringRef Name) {
|
|
|
|
if (Callee.hasFnAttribute(Name)) {
|
|
|
|
Parent.addFnAttr(Name);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void copyFeaturesToFunction(Function &Parent, const Function &Callee,
|
|
|
|
bool &NeedQueuePtr) {
|
2017-07-18 06:35:50 +08:00
|
|
|
// X ids unnecessarily propagated to kernels.
|
2017-07-14 05:43:42 +08:00
|
|
|
static const StringRef AttrNames[] = {
|
2017-07-18 06:35:50 +08:00
|
|
|
{ "amdgpu-work-item-id-x" },
|
2017-07-14 05:43:42 +08:00
|
|
|
{ "amdgpu-work-item-id-y" },
|
|
|
|
{ "amdgpu-work-item-id-z" },
|
2017-07-18 06:35:50 +08:00
|
|
|
{ "amdgpu-work-group-id-x" },
|
2017-07-14 05:43:42 +08:00
|
|
|
{ "amdgpu-work-group-id-y" },
|
|
|
|
{ "amdgpu-work-group-id-z" },
|
|
|
|
{ "amdgpu-dispatch-ptr" },
|
2017-07-14 08:11:13 +08:00
|
|
|
{ "amdgpu-dispatch-id" },
|
2017-07-28 23:52:08 +08:00
|
|
|
{ "amdgpu-kernarg-segment-ptr" },
|
|
|
|
{ "amdgpu-implicitarg-ptr" }
|
2017-07-14 05:43:42 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
if (handleAttr(Parent, Callee, "amdgpu-queue-ptr"))
|
|
|
|
NeedQueuePtr = true;
|
|
|
|
|
|
|
|
for (StringRef AttrName : AttrNames)
|
|
|
|
handleAttr(Parent, Callee, AttrName);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool AMDGPUAnnotateKernelFeatures::addFeatureAttributes(Function &F) {
|
2017-07-19 00:44:58 +08:00
|
|
|
const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(F);
|
|
|
|
bool HasFlat = ST.hasFlatAddressSpace();
|
|
|
|
bool HasApertureRegs = ST.hasApertureRegs();
|
2016-06-07 04:03:31 +08:00
|
|
|
SmallPtrSet<const Constant *, 8> ConstantExprVisited;
|
|
|
|
|
2017-07-14 05:43:42 +08:00
|
|
|
bool Changed = false;
|
|
|
|
bool NeedQueuePtr = false;
|
2017-07-19 00:44:58 +08:00
|
|
|
bool HaveCall = false;
|
2017-07-18 06:35:50 +08:00
|
|
|
bool IsFunc = !AMDGPU::isEntryFunctionCC(F.getCallingConv());
|
2017-07-14 05:43:42 +08:00
|
|
|
|
|
|
|
for (BasicBlock &BB : F) {
|
|
|
|
for (Instruction &I : BB) {
|
|
|
|
CallSite CS(&I);
|
|
|
|
if (CS) {
|
|
|
|
Function *Callee = CS.getCalledFunction();
|
|
|
|
|
|
|
|
// TODO: Do something with indirect calls.
|
2017-07-19 00:44:58 +08:00
|
|
|
if (!Callee) {
|
|
|
|
if (!CS.isInlineAsm())
|
|
|
|
HaveCall = true;
|
2017-07-14 05:43:42 +08:00
|
|
|
continue;
|
2017-07-19 00:44:58 +08:00
|
|
|
}
|
2017-07-14 05:43:42 +08:00
|
|
|
|
|
|
|
Intrinsic::ID IID = Callee->getIntrinsicID();
|
|
|
|
if (IID == Intrinsic::not_intrinsic) {
|
2017-07-19 00:44:58 +08:00
|
|
|
HaveCall = true;
|
2017-07-14 05:43:42 +08:00
|
|
|
copyFeaturesToFunction(F, *Callee, NeedQueuePtr);
|
|
|
|
Changed = true;
|
|
|
|
} else {
|
2017-07-18 06:35:50 +08:00
|
|
|
bool NonKernelOnly = false;
|
|
|
|
StringRef AttrName = intrinsicToAttrName(IID,
|
|
|
|
NonKernelOnly, NeedQueuePtr);
|
|
|
|
if (!AttrName.empty() && (IsFunc || !NonKernelOnly)) {
|
2017-07-14 05:43:42 +08:00
|
|
|
F.addFnAttr(AttrName);
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (NeedQueuePtr || HasApertureRegs)
|
|
|
|
continue;
|
|
|
|
|
2016-04-26 03:27:24 +08:00
|
|
|
if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(&I)) {
|
2017-07-14 05:43:42 +08:00
|
|
|
if (castRequiresQueuePtr(ASC, AS)) {
|
|
|
|
NeedQueuePtr = true;
|
|
|
|
continue;
|
|
|
|
}
|
2016-04-26 03:27:24 +08:00
|
|
|
}
|
2016-06-07 04:03:31 +08:00
|
|
|
|
|
|
|
for (const Use &U : I.operands()) {
|
|
|
|
const auto *OpC = dyn_cast<Constant>(U);
|
|
|
|
if (!OpC)
|
|
|
|
continue;
|
|
|
|
|
2017-07-14 05:43:42 +08:00
|
|
|
if (visitConstantExprsRecursively(OpC, ConstantExprVisited, AS)) {
|
|
|
|
NeedQueuePtr = true;
|
|
|
|
break;
|
|
|
|
}
|
2016-06-07 04:03:31 +08:00
|
|
|
}
|
2016-04-26 03:27:24 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-14 05:43:42 +08:00
|
|
|
if (NeedQueuePtr) {
|
|
|
|
F.addFnAttr("amdgpu-queue-ptr");
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
|
2017-07-19 00:44:58 +08:00
|
|
|
// TODO: We could refine this to captured pointers that could possibly be
|
|
|
|
// accessed by flat instructions. For now this is mostly a poor way of
|
|
|
|
// estimating whether there are calls before argument lowering.
|
|
|
|
if (HasFlat && !IsFunc && HaveCall) {
|
|
|
|
F.addFnAttr("amdgpu-flat-scratch");
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
|
2017-07-14 05:43:42 +08:00
|
|
|
return Changed;
|
2016-04-26 03:27:24 +08:00
|
|
|
}
|
2015-11-07 02:01:57 +08:00
|
|
|
|
2017-07-14 05:43:42 +08:00
|
|
|
bool AMDGPUAnnotateKernelFeatures::runOnSCC(CallGraphSCC &SCC) {
|
|
|
|
Module &M = SCC.getCallGraph().getModule();
|
2015-11-07 02:01:57 +08:00
|
|
|
Triple TT(M.getTargetTriple());
|
2016-01-30 13:10:59 +08:00
|
|
|
|
2017-07-14 05:43:42 +08:00
|
|
|
bool Changed = false;
|
|
|
|
for (CallGraphNode *I : SCC) {
|
|
|
|
Function *F = I->getFunction();
|
|
|
|
if (!F || F->isDeclaration())
|
|
|
|
continue;
|
2015-11-07 02:01:57 +08:00
|
|
|
|
2017-07-14 05:43:42 +08:00
|
|
|
Changed |= addFeatureAttributes(*F);
|
|
|
|
}
|
2015-11-07 02:01:57 +08:00
|
|
|
|
2017-07-14 05:43:42 +08:00
|
|
|
return Changed;
|
|
|
|
}
|
2016-04-26 03:27:24 +08:00
|
|
|
|
2017-07-14 05:43:42 +08:00
|
|
|
bool AMDGPUAnnotateKernelFeatures::doInitialization(CallGraph &CG) {
|
|
|
|
auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
|
|
|
|
if (!TPC)
|
|
|
|
report_fatal_error("TargetMachine is required");
|
2016-04-26 03:27:24 +08:00
|
|
|
|
2017-07-14 05:43:42 +08:00
|
|
|
AS = AMDGPU::getAMDGPUAS(CG.getModule());
|
|
|
|
TM = &TPC->getTM<TargetMachine>();
|
|
|
|
return false;
|
2015-11-07 02:01:57 +08:00
|
|
|
}
|
|
|
|
|
2017-07-14 05:43:42 +08:00
|
|
|
Pass *llvm::createAMDGPUAnnotateKernelFeaturesPass() {
|
2017-05-19 01:21:13 +08:00
|
|
|
return new AMDGPUAnnotateKernelFeatures();
|
2015-11-07 02:01:57 +08:00
|
|
|
}
|