2015-12-16 04:55:55 +08:00
|
|
|
//===-- AMDGPUAnnotateUniformValues.cpp - ---------------------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
/// \file
|
|
|
|
/// This pass adds amdgpu.uniform metadata to IR values so this information
|
|
|
|
/// can be used during instruction selection.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "AMDGPU.h"
|
|
|
|
#include "AMDGPUIntrinsicInfo.h"
|
2016-12-09 01:28:47 +08:00
|
|
|
#include "llvm/ADT/SetVector.h"
|
2015-12-16 04:55:55 +08:00
|
|
|
#include "llvm/Analysis/DivergenceAnalysis.h"
|
2016-12-09 01:28:47 +08:00
|
|
|
#include "llvm/Analysis/LoopInfo.h"
|
|
|
|
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
|
2015-12-16 04:55:55 +08:00
|
|
|
#include "llvm/IR/IRBuilder.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/IR/InstVisitor.h"
|
2015-12-16 04:55:55 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "amdgpu-annotate-uniform"
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
class AMDGPUAnnotateUniformValues : public FunctionPass,
|
|
|
|
public InstVisitor<AMDGPUAnnotateUniformValues> {
|
|
|
|
DivergenceAnalysis *DA;
|
2016-12-09 01:28:47 +08:00
|
|
|
MemoryDependenceResults *MDR;
|
|
|
|
LoopInfo *LI;
|
|
|
|
DenseMap<Value*, GetElementPtrInst*> noClobberClones;
|
|
|
|
bool isKernelFunc;
|
2017-03-27 22:04:01 +08:00
|
|
|
AMDGPUAS AMDGPUASI;
|
2015-12-16 04:55:55 +08:00
|
|
|
|
|
|
|
public:
|
|
|
|
static char ID;
|
|
|
|
AMDGPUAnnotateUniformValues() :
|
|
|
|
FunctionPass(ID) { }
|
|
|
|
bool doInitialization(Module &M) override;
|
|
|
|
bool runOnFunction(Function &F) override;
|
2016-10-01 10:56:57 +08:00
|
|
|
StringRef getPassName() const override {
|
|
|
|
return "AMDGPU Annotate Uniform Values";
|
|
|
|
}
|
2015-12-16 04:55:55 +08:00
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
|
|
AU.addRequired<DivergenceAnalysis>();
|
2016-12-09 01:28:47 +08:00
|
|
|
AU.addRequired<MemoryDependenceWrapperPass>();
|
|
|
|
AU.addRequired<LoopInfoWrapperPass>();
|
2015-12-16 04:55:55 +08:00
|
|
|
AU.setPreservesAll();
|
|
|
|
}
|
|
|
|
|
2016-02-13 07:45:29 +08:00
|
|
|
void visitBranchInst(BranchInst &I);
|
2015-12-16 04:55:55 +08:00
|
|
|
void visitLoadInst(LoadInst &I);
|
2016-12-09 01:28:47 +08:00
|
|
|
bool isClobberedInFunction(LoadInst * Load);
|
2015-12-16 04:55:55 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
} // End anonymous namespace
|
|
|
|
|
|
|
|
INITIALIZE_PASS_BEGIN(AMDGPUAnnotateUniformValues, DEBUG_TYPE,
|
|
|
|
"Add AMDGPU uniform metadata", false, false)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(DivergenceAnalysis)
|
2016-12-09 01:28:47 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
|
2015-12-16 04:55:55 +08:00
|
|
|
INITIALIZE_PASS_END(AMDGPUAnnotateUniformValues, DEBUG_TYPE,
|
|
|
|
"Add AMDGPU uniform metadata", false, false)
|
|
|
|
|
|
|
|
char AMDGPUAnnotateUniformValues::ID = 0;
|
|
|
|
|
2016-02-13 07:45:29 +08:00
|
|
|
static void setUniformMetadata(Instruction *I) {
|
|
|
|
I->setMetadata("amdgpu.uniform", MDNode::get(I->getContext(), {}));
|
|
|
|
}
|
2016-12-09 01:28:47 +08:00
|
|
|
static void setNoClobberMetadata(Instruction *I) {
|
|
|
|
I->setMetadata("amdgpu.noclobber", MDNode::get(I->getContext(), {}));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void DFS(BasicBlock *Root, SetVector<BasicBlock*> & Set) {
|
|
|
|
for (auto I : predecessors(Root))
|
|
|
|
if (Set.insert(I))
|
|
|
|
DFS(I, Set);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool AMDGPUAnnotateUniformValues::isClobberedInFunction(LoadInst * Load) {
|
|
|
|
// 1. get Loop for the Load->getparent();
|
|
|
|
// 2. if it exists, collect all the BBs from the most outer
|
|
|
|
// loop and check for the writes. If NOT - start DFS over all preds.
|
|
|
|
// 3. Start DFS over all preds from the most outer loop header.
|
|
|
|
SetVector<BasicBlock *> Checklist;
|
|
|
|
BasicBlock *Start = Load->getParent();
|
|
|
|
Checklist.insert(Start);
|
|
|
|
const Value *Ptr = Load->getPointerOperand();
|
|
|
|
const Loop *L = LI->getLoopFor(Start);
|
|
|
|
if (L) {
|
|
|
|
const Loop *P = L;
|
|
|
|
do {
|
|
|
|
L = P;
|
|
|
|
P = P->getParentLoop();
|
|
|
|
} while (P);
|
|
|
|
Checklist.insert(L->block_begin(), L->block_end());
|
|
|
|
Start = L->getHeader();
|
|
|
|
}
|
|
|
|
|
|
|
|
DFS(Start, Checklist);
|
|
|
|
for (auto &BB : Checklist) {
|
|
|
|
BasicBlock::iterator StartIt = (BB == Load->getParent()) ?
|
|
|
|
BasicBlock::iterator(Load) : BB->end();
|
|
|
|
if (MDR->getPointerDependencyFrom(MemoryLocation(Ptr),
|
|
|
|
true, StartIt, BB, Load).isClobber())
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2016-02-13 07:45:29 +08:00
|
|
|
|
|
|
|
void AMDGPUAnnotateUniformValues::visitBranchInst(BranchInst &I) {
|
|
|
|
if (I.isUnconditional())
|
|
|
|
return;
|
|
|
|
|
|
|
|
Value *Cond = I.getCondition();
|
|
|
|
if (!DA->isUniform(Cond))
|
|
|
|
return;
|
|
|
|
|
|
|
|
setUniformMetadata(I.getParent()->getTerminator());
|
|
|
|
}
|
|
|
|
|
2015-12-16 04:55:55 +08:00
|
|
|
void AMDGPUAnnotateUniformValues::visitLoadInst(LoadInst &I) {
|
|
|
|
Value *Ptr = I.getPointerOperand();
|
|
|
|
if (!DA->isUniform(Ptr))
|
|
|
|
return;
|
2017-03-27 22:04:01 +08:00
|
|
|
auto isGlobalLoad = [&](LoadInst &Load)->bool {
|
|
|
|
return Load.getPointerAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS;
|
2016-12-09 01:28:47 +08:00
|
|
|
};
|
|
|
|
// We're tracking up to the Function boundaries
|
|
|
|
// We cannot go beyond because of FunctionPass restrictions
|
|
|
|
// Thus we can ensure that memory not clobbered for memory
|
|
|
|
// operations that live in kernel only.
|
|
|
|
bool NotClobbered = isKernelFunc && !isClobberedInFunction(&I);
|
|
|
|
Instruction *PtrI = dyn_cast<Instruction>(Ptr);
|
|
|
|
if (!PtrI && NotClobbered && isGlobalLoad(I)) {
|
|
|
|
if (isa<Argument>(Ptr) || isa<GlobalValue>(Ptr)) {
|
|
|
|
// Lookup for the existing GEP
|
|
|
|
if (noClobberClones.count(Ptr)) {
|
|
|
|
PtrI = noClobberClones[Ptr];
|
|
|
|
} else {
|
|
|
|
// Create GEP of the Value
|
|
|
|
Function *F = I.getParent()->getParent();
|
|
|
|
Value *Idx = Constant::getIntegerValue(
|
|
|
|
Type::getInt32Ty(Ptr->getContext()), APInt(64, 0));
|
|
|
|
// Insert GEP at the entry to make it dominate all uses
|
|
|
|
PtrI = GetElementPtrInst::Create(
|
|
|
|
Ptr->getType()->getPointerElementType(), Ptr,
|
|
|
|
ArrayRef<Value*>(Idx), Twine(""), F->getEntryBlock().getFirstNonPHI());
|
|
|
|
}
|
|
|
|
I.replaceUsesOfWith(Ptr, PtrI);
|
|
|
|
}
|
|
|
|
}
|
2015-12-16 04:55:55 +08:00
|
|
|
|
2016-12-09 01:28:47 +08:00
|
|
|
if (PtrI) {
|
2016-02-13 07:45:29 +08:00
|
|
|
setUniformMetadata(PtrI);
|
2016-12-09 01:28:47 +08:00
|
|
|
if (NotClobbered)
|
|
|
|
setNoClobberMetadata(PtrI);
|
|
|
|
}
|
2015-12-16 04:55:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool AMDGPUAnnotateUniformValues::doInitialization(Module &M) {
|
2017-03-27 22:04:01 +08:00
|
|
|
AMDGPUASI = AMDGPU::getAMDGPUAS(M);
|
2015-12-16 04:55:55 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool AMDGPUAnnotateUniformValues::runOnFunction(Function &F) {
|
2016-04-26 06:23:44 +08:00
|
|
|
if (skipFunction(F))
|
|
|
|
return false;
|
|
|
|
|
2016-12-09 01:28:47 +08:00
|
|
|
DA = &getAnalysis<DivergenceAnalysis>();
|
|
|
|
MDR = &getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
|
|
|
|
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
|
|
|
|
isKernelFunc = F.getCallingConv() == CallingConv::AMDGPU_KERNEL;
|
2015-12-16 04:55:55 +08:00
|
|
|
|
2016-12-09 01:28:47 +08:00
|
|
|
visit(F);
|
|
|
|
noClobberClones.clear();
|
2015-12-16 04:55:55 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
FunctionPass *
|
|
|
|
llvm::createAMDGPUAnnotateUniformValues() {
|
|
|
|
return new AMDGPUAnnotateUniformValues();
|
|
|
|
}
|