2012-03-16 13:51:52 +08:00
|
|
|
//===- CodeMetrics.cpp - Code cost measurements ---------------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements code cost measurement utilities.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2015-01-04 20:03:27 +08:00
|
|
|
#include "llvm/Analysis/AssumptionCache.h"
|
2012-03-16 13:51:52 +08:00
|
|
|
#include "llvm/Analysis/CodeMetrics.h"
|
2014-09-07 21:49:57 +08:00
|
|
|
#include "llvm/Analysis/LoopInfo.h"
|
2013-01-21 21:04:33 +08:00
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
2014-09-07 21:49:57 +08:00
|
|
|
#include "llvm/Analysis/ValueTracking.h"
|
2014-03-04 19:01:28 +08:00
|
|
|
#include "llvm/IR/CallSite.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
2014-09-07 21:49:57 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2015-03-24 03:32:43 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2014-09-07 21:49:57 +08:00
|
|
|
|
|
|
|
#define DEBUG_TYPE "code-metrics"
|
2012-03-16 13:51:52 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2014-09-07 21:49:57 +08:00
|
|
|
static void completeEphemeralValues(SmallVector<const Value *, 16> &WorkSet,
|
|
|
|
SmallPtrSetImpl<const Value*> &EphValues) {
|
|
|
|
SmallPtrSet<const Value *, 32> Visited;
|
|
|
|
|
|
|
|
// Make sure that all of the items in WorkSet are in our EphValues set.
|
|
|
|
EphValues.insert(WorkSet.begin(), WorkSet.end());
|
|
|
|
|
|
|
|
// Note: We don't speculate PHIs here, so we'll miss instruction chains kept
|
|
|
|
// alive only by ephemeral values.
|
|
|
|
|
|
|
|
while (!WorkSet.empty()) {
|
Treat the WorkSet used to find ephemeral values as double-ended
We need to make sure that we visit all operands of an instruction before moving
deeper in the operand graph. We had been pushing operands onto the back of the work
set, and popping them off the back as well, meaning that we might visit an
instruction before visiting all of its uses that sit in between it and the call
to @llvm.assume.
To provide an explicit example, given the following:
%q0 = extractelement <4 x float> %rd, i32 0
%q1 = extractelement <4 x float> %rd, i32 1
%q2 = extractelement <4 x float> %rd, i32 2
%q3 = extractelement <4 x float> %rd, i32 3
%q4 = fadd float %q0, %q1
%q5 = fadd float %q2, %q3
%q6 = fadd float %q4, %q5
%qi = fcmp olt float %q6, %q5
call void @llvm.assume(i1 %qi)
%q5 is used by both %qi and %q6. When we visit %qi, it will be marked as
ephemeral, and we'll queue %q6 and %q5. %q6 will be marked as ephemeral and
we'll queue %q4 and %q5. Under the old system, we'd then visit %q4, which
would become ephemeral, %q1 and then %q0, which would become ephemeral as
well, and now we have a problem. We'd visit %rd, but it would not be marked as
ephemeral because we've not yet visited %q2 and %q3 (because we've not yet
visited %q5).
This will be covered by a test case in a follow-up commit that enables
ephemeral-value awareness in the SLP vectorizer.
llvm-svn: 219815
2014-10-16 01:34:48 +08:00
|
|
|
const Value *V = WorkSet.front();
|
|
|
|
WorkSet.erase(WorkSet.begin());
|
|
|
|
|
2014-11-19 15:49:26 +08:00
|
|
|
if (!Visited.insert(V).second)
|
2014-09-07 21:49:57 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// If all uses of this value are ephemeral, then so is this value.
|
2015-10-25 03:30:37 +08:00
|
|
|
if (!std::all_of(V->user_begin(), V->user_end(),
|
|
|
|
[&](const User *U) { return EphValues.count(U); }))
|
2014-09-07 21:49:57 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
EphValues.insert(V);
|
|
|
|
DEBUG(dbgs() << "Ephemeral Value: " << *V << "\n");
|
|
|
|
|
|
|
|
if (const User *U = dyn_cast<User>(V))
|
|
|
|
for (const Value *J : U->operands()) {
|
|
|
|
if (isSafeToSpeculativelyExecute(J))
|
|
|
|
WorkSet.push_back(J);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find all ephemeral values.
|
2015-01-04 20:03:27 +08:00
|
|
|
void CodeMetrics::collectEphemeralValues(
|
|
|
|
const Loop *L, AssumptionCache *AC,
|
|
|
|
SmallPtrSetImpl<const Value *> &EphValues) {
|
2014-09-07 21:49:57 +08:00
|
|
|
SmallVector<const Value *, 16> WorkSet;
|
|
|
|
|
2015-01-04 20:03:27 +08:00
|
|
|
for (auto &AssumeVH : AC->assumptions()) {
|
|
|
|
if (!AssumeVH)
|
|
|
|
continue;
|
|
|
|
Instruction *I = cast<Instruction>(AssumeVH);
|
|
|
|
|
2014-09-07 21:49:57 +08:00
|
|
|
// Filter out call sites outside of the loop so we don't to a function's
|
|
|
|
// worth of work for each of its loops (and, in the common case, ephemeral
|
|
|
|
// values in the loop are likely due to @llvm.assume calls in the loop).
|
|
|
|
if (!L->contains(I->getParent()))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
WorkSet.push_back(I);
|
|
|
|
}
|
|
|
|
|
|
|
|
completeEphemeralValues(WorkSet, EphValues);
|
|
|
|
}
|
|
|
|
|
2015-01-04 20:03:27 +08:00
|
|
|
void CodeMetrics::collectEphemeralValues(
|
|
|
|
const Function *F, AssumptionCache *AC,
|
|
|
|
SmallPtrSetImpl<const Value *> &EphValues) {
|
2014-09-07 21:49:57 +08:00
|
|
|
SmallVector<const Value *, 16> WorkSet;
|
|
|
|
|
2015-01-04 20:03:27 +08:00
|
|
|
for (auto &AssumeVH : AC->assumptions()) {
|
|
|
|
if (!AssumeVH)
|
|
|
|
continue;
|
|
|
|
Instruction *I = cast<Instruction>(AssumeVH);
|
|
|
|
assert(I->getParent()->getParent() == F &&
|
|
|
|
"Found assumption for the wrong function!");
|
2014-09-07 21:49:57 +08:00
|
|
|
WorkSet.push_back(I);
|
2015-01-04 20:03:27 +08:00
|
|
|
}
|
2014-09-07 21:49:57 +08:00
|
|
|
|
|
|
|
completeEphemeralValues(WorkSet, EphValues);
|
|
|
|
}
|
|
|
|
|
2012-03-16 13:51:52 +08:00
|
|
|
/// analyzeBasicBlock - Fill in the current structure with information gleaned
|
|
|
|
/// from the specified block.
|
|
|
|
void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB,
|
2014-09-07 21:49:57 +08:00
|
|
|
const TargetTransformInfo &TTI,
|
|
|
|
SmallPtrSetImpl<const Value*> &EphValues) {
|
2012-03-16 13:51:52 +08:00
|
|
|
++NumBlocks;
|
|
|
|
unsigned NumInstsBeforeThisBB = NumInsts;
|
|
|
|
for (BasicBlock::const_iterator II = BB->begin(), E = BB->end();
|
|
|
|
II != E; ++II) {
|
2014-09-07 21:49:57 +08:00
|
|
|
// Skip ephemeral values.
|
Analysis: Remove implicit ilist iterator conversions
Remove implicit ilist iterator conversions from LLVMAnalysis.
I came across something really scary in `llvm::isKnownNotFullPoison()`
which relied on `Instruction::getNextNode()` being completely broken
(not surprising, but scary nevertheless). This function is documented
(and coded to) return `nullptr` when it gets to the sentinel, but with
an `ilist_half_node` as a sentinel, the sentinel check looks into some
other memory and we don't recognize we've hit the end.
Rooting out these scary cases is the reason I'm removing the implicit
conversions before doing anything else with `ilist`; I'm not at all
surprised that clients rely on badness.
I found another scary case -- this time, not relying on badness, just
bad (but I guess getting lucky so far) -- in
`ObjectSizeOffsetEvaluator::compute_()`. Here, we save out the
insertion point, do some things, and then restore it. Previously, we
let the iterator auto-convert to `Instruction*`, and then set it back
using the `Instruction*` version:
Instruction *PrevInsertPoint = Builder.GetInsertPoint();
/* Logic that may change insert point */
if (PrevInsertPoint)
Builder.SetInsertPoint(PrevInsertPoint);
The check for `PrevInsertPoint` doesn't protect correctly against bad
accesses. If the insertion point has been set to the end of a basic
block (i.e., `SetInsertPoint(SomeBB)`), then `GetInsertPoint()` returns
an iterator pointing at the list sentinel. The version of
`SetInsertPoint()` that's getting called will then call
`PrevInsertPoint->getParent()`, which explodes horribly. The only
reason this hasn't blown up is that it's fairly unlikely the builder is
adding to the end of the block; usually, we're adding instructions
somewhere before the terminator.
llvm-svn: 249925
2015-10-10 08:53:03 +08:00
|
|
|
if (EphValues.count(&*II))
|
2014-09-07 21:49:57 +08:00
|
|
|
continue;
|
|
|
|
|
2012-03-16 13:51:52 +08:00
|
|
|
// Special handling for calls.
|
|
|
|
if (isa<CallInst>(II) || isa<InvokeInst>(II)) {
|
|
|
|
ImmutableCallSite CS(cast<Instruction>(II));
|
|
|
|
|
|
|
|
if (const Function *F = CS.getCalledFunction()) {
|
|
|
|
// If a function is both internal and has a single use, then it is
|
|
|
|
// extremely likely to get inlined in the future (it was probably
|
|
|
|
// exposed by an interleaved devirtualization pass).
|
|
|
|
if (!CS.isNoInline() && F->hasInternalLinkage() && F->hasOneUse())
|
|
|
|
++NumInlineCandidates;
|
|
|
|
|
|
|
|
// If this call is to function itself, then the function is recursive.
|
|
|
|
// Inlining it into other functions is a bad idea, because this is
|
|
|
|
// basically just a form of loop peeling, and our metrics aren't useful
|
|
|
|
// for that case.
|
|
|
|
if (F == BB->getParent())
|
|
|
|
isRecursive = true;
|
|
|
|
|
2013-01-22 19:26:02 +08:00
|
|
|
if (TTI.isLoweredToCall(F))
|
|
|
|
++NumCalls;
|
|
|
|
} else {
|
2012-03-16 13:51:52 +08:00
|
|
|
// We don't want inline asm to count as a call - that would prevent loop
|
|
|
|
// unrolling. The argument setup cost is still real, though.
|
|
|
|
if (!isa<InlineAsm>(CS.getCalledValue()))
|
|
|
|
++NumCalls;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) {
|
|
|
|
if (!AI->isStaticAlloca())
|
|
|
|
this->usesDynamicAlloca = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isa<ExtractElementInst>(II) || II->getType()->isVectorTy())
|
|
|
|
++NumVectorInsts;
|
|
|
|
|
2015-08-14 13:09:07 +08:00
|
|
|
if (II->getType()->isTokenTy() && II->isUsedOutsideOfBlock(BB))
|
|
|
|
notDuplicatable = true;
|
|
|
|
|
2016-02-13 05:01:31 +08:00
|
|
|
if (const CallInst *CI = dyn_cast<CallInst>(II)) {
|
2014-03-18 00:19:07 +08:00
|
|
|
if (CI->cannotDuplicate())
|
2012-12-21 00:04:27 +08:00
|
|
|
notDuplicatable = true;
|
2016-02-13 05:01:31 +08:00
|
|
|
if (CI->isConvergent())
|
|
|
|
convergent = true;
|
|
|
|
}
|
2012-12-21 00:04:27 +08:00
|
|
|
|
|
|
|
if (const InvokeInst *InvI = dyn_cast<InvokeInst>(II))
|
2014-03-18 00:19:07 +08:00
|
|
|
if (InvI->cannotDuplicate())
|
2012-12-21 00:04:27 +08:00
|
|
|
notDuplicatable = true;
|
|
|
|
|
2013-01-22 19:26:02 +08:00
|
|
|
NumInsts += TTI.getUserCost(&*II);
|
2012-03-16 13:51:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (isa<ReturnInst>(BB->getTerminator()))
|
|
|
|
++NumRets;
|
|
|
|
|
|
|
|
// We never want to inline functions that contain an indirectbr. This is
|
|
|
|
// incorrect because all the blockaddress's (in static global initializers
|
|
|
|
// for example) would be referring to the original function, and this indirect
|
|
|
|
// jump would jump from the inlined copy of the function into the original
|
|
|
|
// function which is extremely undefined behavior.
|
|
|
|
// FIXME: This logic isn't really right; we can safely inline functions
|
|
|
|
// with indirectbr's as long as no other function or global references the
|
|
|
|
// blockaddress of a block within the current function. And as a QOI issue,
|
|
|
|
// if someone is using a blockaddress without an indirectbr, and that
|
|
|
|
// reference somehow ends up in another function or global, we probably
|
|
|
|
// don't want to inline this function.
|
2012-12-21 00:04:27 +08:00
|
|
|
notDuplicatable |= isa<IndirectBrInst>(BB->getTerminator());
|
2012-03-16 13:51:52 +08:00
|
|
|
|
|
|
|
// Remember NumInsts for this BB.
|
|
|
|
NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB;
|
|
|
|
}
|