llvm-project/llvm/lib/IR/LLVMContext.cpp

362 lines
12 KiB
C++
Raw Normal View History

//===-- LLVMContext.cpp - Implement LLVMContext ---------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements LLVMContext, as a wrapper around the opaque
// class LLVMContextImpl.
//
//===----------------------------------------------------------------------===//
#include "llvm/IR/LLVMContext.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "LLVMContextImpl.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cstdlib>
#include <string>
#include <utility>
using namespace llvm;
LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) {
// Create the fixed metadata kinds. This is done in the same order as the
// MD_* enum values so that they correspond.
// Create the 'dbg' metadata kind.
unsigned DbgID = getMDKindID("dbg");
assert(DbgID == MD_dbg && "dbg kind id drifted"); (void)DbgID;
// Create the 'tbaa' metadata kind.
unsigned TBAAID = getMDKindID("tbaa");
assert(TBAAID == MD_tbaa && "tbaa kind id drifted"); (void)TBAAID;
// Create the 'prof' metadata kind.
unsigned ProfID = getMDKindID("prof");
assert(ProfID == MD_prof && "prof kind id drifted"); (void)ProfID;
// Create the 'fpmath' metadata kind.
unsigned FPAccuracyID = getMDKindID("fpmath");
assert(FPAccuracyID == MD_fpmath && "fpmath kind id drifted");
(void)FPAccuracyID;
// Create the 'range' metadata kind.
unsigned RangeID = getMDKindID("range");
assert(RangeID == MD_range && "range kind id drifted");
(void)RangeID;
// Create the 'tbaa.struct' metadata kind.
unsigned TBAAStructID = getMDKindID("tbaa.struct");
assert(TBAAStructID == MD_tbaa_struct && "tbaa.struct kind id drifted");
(void)TBAAStructID;
// Create the 'invariant.load' metadata kind.
unsigned InvariantLdId = getMDKindID("invariant.load");
assert(InvariantLdId == MD_invariant_load && "invariant.load kind id drifted");
(void)InvariantLdId;
Add scoped-noalias metadata This commit adds scoped noalias metadata. The primary motivations for this feature are: 1. To preserve noalias function attribute information when inlining 2. To provide the ability to model block-scope C99 restrict pointers Neither of these two abilities are added here, only the necessary infrastructure. In fact, there should be no change to existing functionality, only the addition of new features. The logic that converts noalias function parameters into this metadata during inlining will come in a follow-up commit. What is added here is the ability to generally specify noalias memory-access sets. Regarding the metadata, alias-analysis scopes are defined similar to TBAA nodes: !scope0 = metadata !{ metadata !"scope of foo()" } !scope1 = metadata !{ metadata !"scope 1", metadata !scope0 } !scope2 = metadata !{ metadata !"scope 2", metadata !scope0 } !scope3 = metadata !{ metadata !"scope 2.1", metadata !scope2 } !scope4 = metadata !{ metadata !"scope 2.2", metadata !scope2 } Loads and stores can be tagged with an alias-analysis scope, and also, with a noalias tag for a specific scope: ... = load %ptr1, !alias.scope !{ !scope1 } ... = load %ptr2, !alias.scope !{ !scope1, !scope2 }, !noalias !{ !scope1 } When evaluating an aliasing query, if one of the instructions is associated with an alias.scope id that is identical to the noalias scope associated with the other instruction, or is a descendant (in the scope hierarchy) of the noalias scope associated with the other instruction, then the two memory accesses are assumed not to alias. Note that is the first element of the scope metadata is a string, then it can be combined accross functions and translation units. The string can be replaced by a self-reference to create globally unqiue scope identifiers. [Note: This overview is slightly stylized, since the metadata nodes really need to just be numbers (!0 instead of !scope0), and the scope lists are also global unnamed metadata.] Existing noalias metadata in a callee is "cloned" for use by the inlined code. This is necessary because the aliasing scopes are unique to each call site (because of possible control dependencies on the aliasing properties). For example, consider a function: foo(noalias a, noalias b) { *a = *b; } that gets inlined into bar() { ... if (...) foo(a1, b1); ... if (...) foo(a2, b2); } -- now just because we know that a1 does not alias with b1 at the first call site, and a2 does not alias with b2 at the second call site, we cannot let inlining these functons have the metadata imply that a1 does not alias with b2. llvm-svn: 213864
2014-07-24 22:25:39 +08:00
// Create the 'alias.scope' metadata kind.
unsigned AliasScopeID = getMDKindID("alias.scope");
assert(AliasScopeID == MD_alias_scope && "alias.scope kind id drifted");
(void)AliasScopeID;
// Create the 'noalias' metadata kind.
unsigned NoAliasID = getMDKindID("noalias");
assert(NoAliasID == MD_noalias && "noalias kind id drifted");
(void)NoAliasID;
// Create the 'nontemporal' metadata kind.
unsigned NonTemporalID = getMDKindID("nontemporal");
assert(NonTemporalID == MD_nontemporal && "nontemporal kind id drifted");
(void)NonTemporalID;
// Create the 'llvm.mem.parallel_loop_access' metadata kind.
unsigned MemParallelLoopAccessID = getMDKindID("llvm.mem.parallel_loop_access");
assert(MemParallelLoopAccessID == MD_mem_parallel_loop_access &&
"mem_parallel_loop_access kind id drifted");
(void)MemParallelLoopAccessID;
// Create the 'nonnull' metadata kind.
unsigned NonNullID = getMDKindID("nonnull");
assert(NonNullID == MD_nonnull && "nonnull kind id drifted");
(void)NonNullID;
// Create the 'dereferenceable' metadata kind.
unsigned DereferenceableID = getMDKindID("dereferenceable");
assert(DereferenceableID == MD_dereferenceable &&
"dereferenceable kind id drifted");
(void)DereferenceableID;
// Create the 'dereferenceable_or_null' metadata kind.
unsigned DereferenceableOrNullID = getMDKindID("dereferenceable_or_null");
assert(DereferenceableOrNullID == MD_dereferenceable_or_null &&
"dereferenceable_or_null kind id drifted");
(void)DereferenceableOrNullID;
// Create the 'make.implicit' metadata kind.
unsigned MakeImplicitID = getMDKindID("make.implicit");
assert(MakeImplicitID == MD_make_implicit &&
"make.implicit kind id drifted");
(void)MakeImplicitID;
// Create the 'unpredictable' metadata kind.
unsigned UnpredictableID = getMDKindID("unpredictable");
assert(UnpredictableID == MD_unpredictable &&
"unpredictable kind id drifted");
(void)UnpredictableID;
// Create the 'invariant.group' metadata kind.
unsigned InvariantGroupId = getMDKindID("invariant.group");
assert(InvariantGroupId == MD_invariant_group &&
"invariant.group kind id drifted");
(void)InvariantGroupId;
// Create the 'align' metadata kind.
unsigned AlignID = getMDKindID("align");
assert(AlignID == MD_align && "align kind id drifted");
(void)AlignID;
// Create the 'llvm.loop' metadata kind.
unsigned LoopID = getMDKindID("llvm.loop");
assert(LoopID == MD_loop && "llvm.loop kind id drifted");
(void)LoopID;
IR: New representation for CFI and virtual call optimization pass metadata. The bitset metadata currently used in LLVM has a few problems: 1. It has the wrong name. The name "bitset" refers to an implementation detail of one use of the metadata (i.e. its original use case, CFI). This makes it harder to understand, as the name makes no sense in the context of virtual call optimization. 2. It is represented using a global named metadata node, rather than being directly associated with a global. This makes it harder to manipulate the metadata when rebuilding global variables, summarise it as part of ThinLTO and drop unused metadata when associated globals are dropped. For this reason, CFI does not currently work correctly when both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable globals, and fails to associate metadata with the rebuilt globals. As I understand it, the same problem could also affect ASan, which rebuilds globals with a red zone. This patch solves both of those problems in the following way: 1. Rename the metadata to "type metadata". This new name reflects how the metadata is currently being used (i.e. to represent type information for CFI and vtable opt). The new name is reflected in the name for the associated intrinsic (llvm.type.test) and pass (LowerTypeTests). 2. Attach metadata directly to the globals that it pertains to, rather than using the "llvm.bitsets" global metadata node as we are doing now. This is done using the newly introduced capability to attach metadata to global variables (r271348 and r271358). See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html Differential Revision: http://reviews.llvm.org/D21053 llvm-svn: 273729
2016-06-25 05:21:32 +08:00
unsigned TypeID = getMDKindID("type");
assert(TypeID == MD_type && "type kind id drifted");
(void)TypeID;
auto *DeoptEntry = pImpl->getOrInsertBundleTag("deopt");
assert(DeoptEntry->second == LLVMContext::OB_deopt &&
"deopt operand bundle id drifted!");
(void)DeoptEntry;
auto *FuncletEntry = pImpl->getOrInsertBundleTag("funclet");
assert(FuncletEntry->second == LLVMContext::OB_funclet &&
"funclet operand bundle id drifted!");
(void)FuncletEntry;
auto *GCTransitionEntry = pImpl->getOrInsertBundleTag("gc-transition");
assert(GCTransitionEntry->second == LLVMContext::OB_gc_transition &&
"gc-transition operand bundle id drifted!");
(void)GCTransitionEntry;
}
LLVMContext::~LLVMContext() { delete pImpl; }
void LLVMContext::addModule(Module *M) {
pImpl->OwnedModules.insert(M);
}
void LLVMContext::removeModule(Module *M) {
pImpl->OwnedModules.erase(M);
}
//===----------------------------------------------------------------------===//
// Recoverable Backend Errors
//===----------------------------------------------------------------------===//
void LLVMContext::
setInlineAsmDiagnosticHandler(InlineAsmDiagHandlerTy DiagHandler,
void *DiagContext) {
pImpl->InlineAsmDiagHandler = DiagHandler;
pImpl->InlineAsmDiagContext = DiagContext;
}
/// getInlineAsmDiagnosticHandler - Return the diagnostic handler set by
/// setInlineAsmDiagnosticHandler.
LLVMContext::InlineAsmDiagHandlerTy
LLVMContext::getInlineAsmDiagnosticHandler() const {
return pImpl->InlineAsmDiagHandler;
}
/// getInlineAsmDiagnosticContext - Return the diagnostic context set by
/// setInlineAsmDiagnosticHandler.
void *LLVMContext::getInlineAsmDiagnosticContext() const {
return pImpl->InlineAsmDiagContext;
}
void LLVMContext::setDiagnosticHandler(DiagnosticHandlerTy DiagnosticHandler,
void *DiagnosticContext,
bool RespectFilters) {
pImpl->DiagnosticHandler = DiagnosticHandler;
pImpl->DiagnosticContext = DiagnosticContext;
pImpl->RespectDiagnosticFilters = RespectFilters;
}
[OptRemark,LDist] RFC: Add hotness attribute Summary: This is the first set of changes implementing the RFC from http://thread.gmane.org/gmane.comp.compilers.llvm.devel/98334 This is a cross-sectional patch; rather than implementing the hotness attribute for all optimization remarks and all passes in a patch set, it implements it for the 'missed-optimization' remark for Loop Distribution. My goal is to shake out the design issues before scaling it up to other types and passes. Hotness is computed as an integer as the multiplication of the block frequency with the function entry count. It's only printed in opt currently since clang prints the diagnostic fields directly. E.g.: remark: /tmp/t.c:3:3: loop not distributed: use -Rpass-analysis=loop-distribute for more info (hotness: 300) A new API added is similar to emitOptimizationRemarkMissed. The difference is that it additionally takes a code region that the diagnostic corresponds to. From this, hotness is computed using BFI. The new API is exposed via an analysis pass so that it can be made dependent on LazyBFI. (Thanks to Hal for the analysis pass idea.) This feature can all be enabled by setDiagnosticHotnessRequested in the LLVM context. If this is off, LazyBFI is not calculated (D22141) so there should be no overhead. A new command-line option is added to turn this on in opt. My plan is to switch all user of emitOptimizationRemark* to use this module instead. Reviewers: hfinkel Subscribers: rcox2, mzolotukhin, llvm-commits Differential Revision: http://reviews.llvm.org/D21771 llvm-svn: 275583
2016-07-16 01:23:20 +08:00
void LLVMContext::setDiagnosticHotnessRequested(bool Requested) {
pImpl->DiagnosticHotnessRequested = Requested;
}
bool LLVMContext::getDiagnosticHotnessRequested() const {
return pImpl->DiagnosticHotnessRequested;
}
Output optimization remarks in YAML (Re-committed after moving the template specialization under the yaml namespace. GCC was complaining about this.) This allows various presentation of this data using an external tool. This was first recommended here[1]. As an example, consider this module: 1 int foo(); 2 int bar(); 3 4 int baz() { 5 return foo() + bar(); 6 } The inliner generates these missed-optimization remarks today (the hotness information is pulled from PGO): remark: /tmp/s.c:5:10: foo will not be inlined into baz (hotness: 30) remark: /tmp/s.c:5:18: bar will not be inlined into baz (hotness: 30) Now with -pass-remarks-output=<yaml-file>, we generate this YAML file: --- !Missed Pass: inline Name: NotInlined DebugLoc: { File: /tmp/s.c, Line: 5, Column: 10 } Function: baz Hotness: 30 Args: - Callee: foo - String: will not be inlined into - Caller: baz ... --- !Missed Pass: inline Name: NotInlined DebugLoc: { File: /tmp/s.c, Line: 5, Column: 18 } Function: baz Hotness: 30 Args: - Callee: bar - String: will not be inlined into - Caller: baz ... This is a summary of the high-level decisions: * There is a new streaming interface to emit optimization remarks. E.g. for the inliner remark above: ORE.emit(DiagnosticInfoOptimizationRemarkMissed( DEBUG_TYPE, "NotInlined", &I) << NV("Callee", Callee) << " will not be inlined into " << NV("Caller", CS.getCaller()) << setIsVerbose()); NV stands for named value and allows the YAML client to process a remark using its name (NotInlined) and the named arguments (Callee and Caller) without parsing the text of the message. Subsequent patches will update ORE users to use the new streaming API. * I am using YAML I/O for writing the YAML file. YAML I/O requires you to specify reading and writing at once but reading is highly non-trivial for some of the more complex LLVM types. Since it's not clear that we (ever) want to use LLVM to parse this YAML file, the code supports and asserts that we're writing only. On the other hand, I did experiment that the class hierarchy starting at DiagnosticInfoOptimizationBase can be mapped back from YAML generated here (see D24479). * The YAML stream is stored in the LLVM context. * In the example, we can probably further specify the IR value used, i.e. print "Function" rather than "Value". * As before hotness is computed in the analysis pass instead of DiganosticInfo. This avoids the layering problem since BFI is in Analysis while DiagnosticInfo is in IR. [1] https://reviews.llvm.org/D19678#419445 Differential Revision: https://reviews.llvm.org/D24587 llvm-svn: 282539
2016-09-28 04:55:07 +08:00
yaml::Output *LLVMContext::getDiagnosticsOutputFile() {
return pImpl->DiagnosticsOutputFile.get();
}
void LLVMContext::setDiagnosticsOutputFile(yaml::Output *F) {
pImpl->DiagnosticsOutputFile.reset(F);
}
LLVMContext::DiagnosticHandlerTy LLVMContext::getDiagnosticHandler() const {
return pImpl->DiagnosticHandler;
}
void *LLVMContext::getDiagnosticContext() const {
return pImpl->DiagnosticContext;
}
void LLVMContext::setYieldCallback(YieldCallbackTy Callback, void *OpaqueHandle)
{
pImpl->YieldCallback = Callback;
pImpl->YieldOpaqueHandle = OpaqueHandle;
}
void LLVMContext::yield() {
if (pImpl->YieldCallback)
pImpl->YieldCallback(this, pImpl->YieldOpaqueHandle);
}
void LLVMContext::emitError(const Twine &ErrorStr) {
diagnose(DiagnosticInfoInlineAsm(ErrorStr));
}
void LLVMContext::emitError(const Instruction *I, const Twine &ErrorStr) {
assert (I && "Invalid instruction");
diagnose(DiagnosticInfoInlineAsm(*I, ErrorStr));
}
static bool isDiagnosticEnabled(const DiagnosticInfo &DI) {
// Optimization remarks are selective. They need to check whether the regexp
// pattern, passed via one of the -pass-remarks* flags, matches the name of
// the pass that is emitting the diagnostic. If there is no match, ignore the
// diagnostic and return.
if (auto *Remark = dyn_cast<DiagnosticInfoOptimizationBase>(&DI))
return Remark->isEnabled();
return true;
}
const char *
LLVMContext::getDiagnosticMessagePrefix(DiagnosticSeverity Severity) {
switch (Severity) {
case DS_Error:
return "error";
case DS_Warning:
return "warning";
case DS_Remark:
return "remark";
case DS_Note:
return "note";
}
llvm_unreachable("Unknown DiagnosticSeverity");
}
void LLVMContext::diagnose(const DiagnosticInfo &DI) {
// If there is a report handler, use it.
if (pImpl->DiagnosticHandler) {
if (!pImpl->RespectDiagnosticFilters || isDiagnosticEnabled(DI))
pImpl->DiagnosticHandler(DI, pImpl->DiagnosticContext);
return;
}
if (!isDiagnosticEnabled(DI))
return;
// Otherwise, print the message with a prefix based on the severity.
DiagnosticPrinterRawOStream DP(errs());
errs() << getDiagnosticMessagePrefix(DI.getSeverity()) << ": ";
DI.print(DP);
errs() << "\n";
if (DI.getSeverity() == DS_Error)
exit(1);
}
void LLVMContext::emitError(unsigned LocCookie, const Twine &ErrorStr) {
diagnose(DiagnosticInfoInlineAsm(LocCookie, ErrorStr));
}
//===----------------------------------------------------------------------===//
// Metadata Kind Uniquing
//===----------------------------------------------------------------------===//
/// Return a unique non-zero ID for the specified metadata kind.
unsigned LLVMContext::getMDKindID(StringRef Name) const {
// If this is new, assign it its ID.
return pImpl->CustomMDKindNames.insert(
std::make_pair(
Name, pImpl->CustomMDKindNames.size()))
.first->second;
}
2009-08-11 14:31:57 +08:00
2015-08-25 07:20:16 +08:00
/// getHandlerNames - Populate client-supplied smallvector using custom
/// metadata name and ID.
void LLVMContext::getMDKindNames(SmallVectorImpl<StringRef> &Names) const {
Names.resize(pImpl->CustomMDKindNames.size());
for (StringMap<unsigned>::const_iterator I = pImpl->CustomMDKindNames.begin(),
E = pImpl->CustomMDKindNames.end(); I != E; ++I)
Names[I->second] = I->first();
}
void LLVMContext::getOperandBundleTags(SmallVectorImpl<StringRef> &Tags) const {
pImpl->getOperandBundleTags(Tags);
}
uint32_t LLVMContext::getOperandBundleTagID(StringRef Tag) const {
return pImpl->getOperandBundleTagID(Tag);
}
void LLVMContext::setGC(const Function &Fn, std::string GCName) {
auto It = pImpl->GCNames.find(&Fn);
if (It == pImpl->GCNames.end()) {
pImpl->GCNames.insert(std::make_pair(&Fn, std::move(GCName)));
return;
}
It->second = std::move(GCName);
}
const std::string &LLVMContext::getGC(const Function &Fn) {
return pImpl->GCNames[&Fn];
}
void LLVMContext::deleteGC(const Function &Fn) {
pImpl->GCNames.erase(&Fn);
}
bool LLVMContext::shouldDiscardValueNames() const {
return pImpl->DiscardValueNames;
}
bool LLVMContext::isODRUniquingDebugTypes() const { return !!pImpl->DITypeMap; }
void LLVMContext::enableDebugTypeODRUniquing() {
if (pImpl->DITypeMap)
return;
pImpl->DITypeMap.emplace();
}
void LLVMContext::disableDebugTypeODRUniquing() { pImpl->DITypeMap.reset(); }
void LLVMContext::setDiscardValueNames(bool Discard) {
pImpl->DiscardValueNames = Discard;
}
OptBisect &LLVMContext::getOptBisect() {
return pImpl->getOptBisect();
}