2015-11-24 14:07:49 +08:00
|
|
|
//===- FunctionImport.cpp - ThinLTO Summary-based Function Import ---------===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2015-11-24 14:07:49 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements Function import based on summaries.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/Transforms/IPO/FunctionImport.h"
|
2017-10-11 06:49:55 +08:00
|
|
|
#include "llvm/ADT/ArrayRef.h"
|
|
|
|
#include "llvm/ADT/STLExtras.h"
|
|
|
|
#include "llvm/ADT/SetVector.h"
|
2016-03-26 13:40:34 +08:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2016-03-27 23:27:30 +08:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2017-10-11 06:49:55 +08:00
|
|
|
#include "llvm/ADT/StringMap.h"
|
2018-02-18 08:01:36 +08:00
|
|
|
#include "llvm/ADT/StringRef.h"
|
2018-02-19 23:14:50 +08:00
|
|
|
#include "llvm/ADT/StringSet.h"
|
2017-05-02 04:42:32 +08:00
|
|
|
#include "llvm/Bitcode/BitcodeReader.h"
|
2015-11-24 14:07:49 +08:00
|
|
|
#include "llvm/IR/AutoUpgrade.h"
|
2017-12-16 08:18:12 +08:00
|
|
|
#include "llvm/IR/Constants.h"
|
2017-10-11 06:49:55 +08:00
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/GlobalAlias.h"
|
|
|
|
#include "llvm/IR/GlobalObject.h"
|
|
|
|
#include "llvm/IR/GlobalValue.h"
|
|
|
|
#include "llvm/IR/GlobalVariable.h"
|
|
|
|
#include "llvm/IR/Metadata.h"
|
2015-11-24 14:07:49 +08:00
|
|
|
#include "llvm/IR/Module.h"
|
2017-10-11 06:49:55 +08:00
|
|
|
#include "llvm/IR/ModuleSummaryIndex.h"
|
2015-11-24 14:07:49 +08:00
|
|
|
#include "llvm/IRReader/IRReader.h"
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-14 05:15:01 +08:00
|
|
|
#include "llvm/InitializePasses.h"
|
2017-10-11 06:49:55 +08:00
|
|
|
#include "llvm/Linker/IRMover.h"
|
|
|
|
#include "llvm/Object/ModuleSymbolTable.h"
|
|
|
|
#include "llvm/Object/SymbolicFile.h"
|
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/Casting.h"
|
2015-11-24 14:07:49 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
2017-10-11 06:49:55 +08:00
|
|
|
#include "llvm/Support/Error.h"
|
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
|
|
|
#include "llvm/Support/FileSystem.h"
|
2015-11-24 14:07:49 +08:00
|
|
|
#include "llvm/Support/SourceMgr.h"
|
2017-10-11 06:49:55 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2016-05-25 22:03:11 +08:00
|
|
|
#include "llvm/Transforms/IPO/Internalize.h"
|
2017-12-16 08:18:12 +08:00
|
|
|
#include "llvm/Transforms/Utils/Cloning.h"
|
2016-02-11 02:11:31 +08:00
|
|
|
#include "llvm/Transforms/Utils/FunctionImportUtils.h"
|
2017-12-16 08:18:12 +08:00
|
|
|
#include "llvm/Transforms/Utils/ValueMapper.h"
|
2017-10-11 06:49:55 +08:00
|
|
|
#include <cassert>
|
|
|
|
#include <memory>
|
|
|
|
#include <set>
|
|
|
|
#include <string>
|
|
|
|
#include <system_error>
|
|
|
|
#include <tuple>
|
|
|
|
#include <utility>
|
2015-12-09 16:17:35 +08:00
|
|
|
|
2015-11-24 14:07:49 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2017-10-11 06:49:55 +08:00
|
|
|
#define DEBUG_TYPE "function-import"
|
|
|
|
|
2018-10-17 07:49:50 +08:00
|
|
|
STATISTIC(NumImportedFunctionsThinLink,
|
|
|
|
"Number of functions thin link decided to import");
|
|
|
|
STATISTIC(NumImportedHotFunctionsThinLink,
|
|
|
|
"Number of hot functions thin link decided to import");
|
|
|
|
STATISTIC(NumImportedCriticalFunctionsThinLink,
|
|
|
|
"Number of critical functions thin link decided to import");
|
|
|
|
STATISTIC(NumImportedGlobalVarsThinLink,
|
|
|
|
"Number of global variables thin link decided to import");
|
|
|
|
STATISTIC(NumImportedFunctions, "Number of functions imported in backend");
|
|
|
|
STATISTIC(NumImportedGlobalVars,
|
|
|
|
"Number of global variables imported in backend");
|
2017-01-06 05:34:18 +08:00
|
|
|
STATISTIC(NumImportedModules, "Number of modules imported from");
|
|
|
|
STATISTIC(NumDeadSymbols, "Number of dead stripped symbols in index");
|
|
|
|
STATISTIC(NumLiveSymbols, "Number of live symbols in index");
|
2016-03-27 23:27:30 +08:00
|
|
|
|
2015-11-25 06:55:46 +08:00
|
|
|
/// Limit on instruction count of imported functions.
|
|
|
|
static cl::opt<unsigned> ImportInstrLimit(
|
|
|
|
"import-instr-limit", cl::init(100), cl::Hidden, cl::value_desc("N"),
|
|
|
|
cl::desc("Only import functions with less than N instructions"));
|
|
|
|
|
2018-04-01 23:54:40 +08:00
|
|
|
static cl::opt<int> ImportCutoff(
|
|
|
|
"import-cutoff", cl::init(-1), cl::Hidden, cl::value_desc("N"),
|
|
|
|
cl::desc("Only import first N functions if N>=0 (default -1)"));
|
|
|
|
|
2016-02-11 07:31:45 +08:00
|
|
|
static cl::opt<float>
|
|
|
|
ImportInstrFactor("import-instr-evolution-factor", cl::init(0.7),
|
|
|
|
cl::Hidden, cl::value_desc("x"),
|
|
|
|
cl::desc("As we import functions, multiply the "
|
|
|
|
"`import-instr-limit` threshold by this factor "
|
|
|
|
"before processing newly imported functions"));
|
2016-09-30 01:32:07 +08:00
|
|
|
|
2016-09-30 11:01:17 +08:00
|
|
|
static cl::opt<float> ImportHotInstrFactor(
|
|
|
|
"import-hot-evolution-factor", cl::init(1.0), cl::Hidden,
|
|
|
|
cl::value_desc("x"),
|
|
|
|
cl::desc("As we import functions called from hot callsite, multiply the "
|
|
|
|
"`import-instr-limit` threshold by this factor "
|
|
|
|
"before processing newly imported functions"));
|
|
|
|
|
[thinlto] Basic thinlto fdo heuristic
Summary:
This patch improves thinlto importer
by importing 3x larger functions that are called from hot block.
I compared performance with the trunk on spec, and there
were about 2% on povray and 3.33% on milc. These results seems
to be consistant and match the results Teresa got with her simple
heuristic. Some benchmarks got slower but I think they are just
noisy (mcf, xalancbmki, omnetpp)- running the benchmarks again with
more iterations to confirm. Geomean of all benchmarks including the noisy ones
were about +0.02%.
I see much better improvement on google branch with Easwaran patch
for pgo callsite inlining (the inliner actually inline those big functions)
Over all I see +0.5% improvement, and I get +8.65% on povray.
So I guess we will see much bigger change when Easwaran patch will land
(it depends on new pass manager), but it is still worth putting this to trunk
before it.
Implementation details changes:
- Removed CallsiteCount.
- ProfileCount got replaced by Hotness
- hot-import-multiplier is set to 3.0 for now,
didn't have time to tune it up, but I see that we get most of the interesting
functions with 3, so there is no much performance difference with higher, and
binary size doesn't grow as much as with 10.0.
Reviewers: eraman, mehdi_amini, tejohnson
Subscribers: mehdi_amini, llvm-commits
Differential Revision: https://reviews.llvm.org/D24638
llvm-svn: 282437
2016-09-27 04:37:32 +08:00
|
|
|
static cl::opt<float> ImportHotMultiplier(
|
2017-07-28 09:02:34 +08:00
|
|
|
"import-hot-multiplier", cl::init(10.0), cl::Hidden, cl::value_desc("x"),
|
2016-09-30 01:32:07 +08:00
|
|
|
cl::desc("Multiply the `import-instr-limit` threshold for hot callsites"));
|
|
|
|
|
2017-07-08 05:01:00 +08:00
|
|
|
static cl::opt<float> ImportCriticalMultiplier(
|
|
|
|
"import-critical-multiplier", cl::init(100.0), cl::Hidden,
|
|
|
|
cl::value_desc("x"),
|
|
|
|
cl::desc(
|
|
|
|
"Multiply the `import-instr-limit` threshold for critical callsites"));
|
|
|
|
|
2016-09-30 01:32:07 +08:00
|
|
|
// FIXME: This multiplier was not really tuned up.
|
|
|
|
static cl::opt<float> ImportColdMultiplier(
|
|
|
|
"import-cold-multiplier", cl::init(0), cl::Hidden, cl::value_desc("N"),
|
|
|
|
cl::desc("Multiply the `import-instr-limit` threshold for cold callsites"));
|
2016-02-11 07:31:45 +08:00
|
|
|
|
2016-03-27 23:27:30 +08:00
|
|
|
static cl::opt<bool> PrintImports("print-imports", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Print imported functions"));
|
|
|
|
|
2018-08-18 00:53:47 +08:00
|
|
|
static cl::opt<bool> PrintImportFailures(
|
|
|
|
"print-import-failures", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Print information for functions rejected for importing"));
|
|
|
|
|
2017-01-06 05:34:18 +08:00
|
|
|
static cl::opt<bool> ComputeDead("compute-dead", cl::init(true), cl::Hidden,
|
|
|
|
cl::desc("Compute dead symbols"));
|
|
|
|
|
2016-07-09 07:01:49 +08:00
|
|
|
static cl::opt<bool> EnableImportMetadata(
|
|
|
|
"enable-import-metadata", cl::init(
|
|
|
|
#if !defined(NDEBUG)
|
|
|
|
true /*Enabled with asserts.*/
|
|
|
|
#else
|
|
|
|
false
|
|
|
|
#endif
|
|
|
|
),
|
|
|
|
cl::Hidden, cl::desc("Enable import metadata like 'thinlto_src_module'"));
|
|
|
|
|
2017-10-11 06:49:55 +08:00
|
|
|
/// Summary file to use for function importing when using -function-import from
|
|
|
|
/// the command line.
|
|
|
|
static cl::opt<std::string>
|
|
|
|
SummaryFile("summary-file",
|
|
|
|
cl::desc("The summary file to use for function importing."));
|
|
|
|
|
2017-12-16 08:18:12 +08:00
|
|
|
/// Used when testing importing from distributed indexes via opt
|
|
|
|
// -function-import.
|
|
|
|
static cl::opt<bool>
|
|
|
|
ImportAllIndex("import-all-index",
|
|
|
|
cl::desc("Import all external functions in index."));
|
|
|
|
|
2015-11-24 14:07:49 +08:00
|
|
|
// Load lazily a module from \p FileName in \p Context.
|
|
|
|
static std::unique_ptr<Module> loadFile(const std::string &FileName,
|
|
|
|
LLVMContext &Context) {
|
|
|
|
SMDiagnostic Err;
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Loading '" << FileName << "'\n");
|
2016-01-22 08:15:53 +08:00
|
|
|
// Metadata isn't loaded until functions are imported, to minimize
|
|
|
|
// the memory overhead.
|
2016-01-08 22:17:41 +08:00
|
|
|
std::unique_ptr<Module> Result =
|
|
|
|
getLazyIRFileModule(FileName, Err, Context,
|
|
|
|
/* ShouldLazyLoadMetadata = */ true);
|
2015-11-24 14:07:49 +08:00
|
|
|
if (!Result) {
|
|
|
|
Err.print("function-import", errs());
|
2016-04-01 13:33:11 +08:00
|
|
|
report_fatal_error("Abort");
|
2015-11-24 14:07:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2016-03-26 13:40:34 +08:00
|
|
|
/// Given a list of possible callee implementation for a call site, select one
|
|
|
|
/// that fits the \p Threshold.
|
|
|
|
///
|
|
|
|
/// FIXME: select "best" instead of first that fits. But what is "best"?
|
|
|
|
/// - The smallest: more likely to be inlined.
|
|
|
|
/// - The one with the least outgoing edges (already well optimized).
|
|
|
|
/// - One from a module already being imported from in order to reduce the
|
|
|
|
/// number of source modules parsed/linked.
|
|
|
|
/// - One that has PGO data attached.
|
|
|
|
/// - [insert you fancy metric here]
|
2016-04-16 14:56:44 +08:00
|
|
|
static const GlobalValueSummary *
|
2016-04-27 08:32:13 +08:00
|
|
|
selectCallee(const ModuleSummaryIndex &Index,
|
2017-05-05 02:03:25 +08:00
|
|
|
ArrayRef<std::unique_ptr<GlobalValueSummary>> CalleeSummaryList,
|
2018-08-18 00:53:47 +08:00
|
|
|
unsigned Threshold, StringRef CallerModulePath,
|
|
|
|
FunctionImporter::ImportFailureReason &Reason,
|
|
|
|
GlobalValue::GUID GUID) {
|
|
|
|
Reason = FunctionImporter::ImportFailureReason::None;
|
2016-03-26 13:40:34 +08:00
|
|
|
auto It = llvm::find_if(
|
2016-04-24 22:57:11 +08:00
|
|
|
CalleeSummaryList,
|
|
|
|
[&](const std::unique_ptr<GlobalValueSummary> &SummaryPtr) {
|
|
|
|
auto *GVSummary = SummaryPtr.get();
|
2018-08-18 00:53:47 +08:00
|
|
|
if (!Index.isGlobalValueLive(GVSummary)) {
|
|
|
|
Reason = FunctionImporter::ImportFailureReason::NotLive;
|
2018-01-29 16:03:30 +08:00
|
|
|
return false;
|
2018-08-18 00:53:47 +08:00
|
|
|
}
|
2018-01-29 16:03:30 +08:00
|
|
|
|
2017-08-20 02:04:25 +08:00
|
|
|
// For SamplePGO, in computeImportForFunction the OriginalId
|
|
|
|
// may have been used to locate the callee summary list (See
|
|
|
|
// comment there).
|
|
|
|
// The mapping from OriginalId to GUID may return a GUID
|
|
|
|
// that corresponds to a static variable. Filter it out here.
|
|
|
|
// This can happen when
|
|
|
|
// 1) There is a call to a library function which is not defined
|
|
|
|
// in the index.
|
|
|
|
// 2) There is a static variable with the OriginalGUID identical
|
|
|
|
// to the GUID of the library function in 1);
|
|
|
|
// When this happens, the logic for SamplePGO kicks in and
|
|
|
|
// the static variable in 2) will be found, which needs to be
|
|
|
|
// filtered out.
|
2018-08-18 00:53:47 +08:00
|
|
|
if (GVSummary->getSummaryKind() == GlobalValueSummary::GlobalVarKind) {
|
|
|
|
Reason = FunctionImporter::ImportFailureReason::GlobalVar;
|
2017-08-20 02:04:25 +08:00
|
|
|
return false;
|
2018-08-18 00:53:47 +08:00
|
|
|
}
|
|
|
|
if (GlobalValue::isInterposableLinkage(GVSummary->linkage())) {
|
|
|
|
Reason = FunctionImporter::ImportFailureReason::InterposableLinkage;
|
2016-05-03 08:27:28 +08:00
|
|
|
// There is no point in importing these, we can't inline them
|
2016-04-20 12:17:36 +08:00
|
|
|
return false;
|
2018-08-18 00:53:47 +08:00
|
|
|
}
|
2016-03-26 13:40:34 +08:00
|
|
|
|
2017-12-16 08:18:12 +08:00
|
|
|
auto *Summary = cast<FunctionSummary>(GVSummary->getBaseObject());
|
2016-03-26 13:40:34 +08:00
|
|
|
|
2017-01-13 06:04:45 +08:00
|
|
|
// If this is a local function, make sure we import the copy
|
|
|
|
// in the caller's module. The only time a local function can
|
|
|
|
// share an entry in the index is if there is a local with the same name
|
|
|
|
// in another module that had the same source file name (in a different
|
|
|
|
// directory), where each was compiled in their own directory so there
|
|
|
|
// was not distinguishing path.
|
|
|
|
// However, do the import from another module if there is only one
|
|
|
|
// entry in the list - in that case this must be a reference due
|
|
|
|
// to indirect call profile data, since a function pointer can point to
|
|
|
|
// a local in another module.
|
|
|
|
if (GlobalValue::isLocalLinkage(Summary->linkage()) &&
|
|
|
|
CalleeSummaryList.size() > 1 &&
|
2018-08-18 00:53:47 +08:00
|
|
|
Summary->modulePath() != CallerModulePath) {
|
|
|
|
Reason =
|
|
|
|
FunctionImporter::ImportFailureReason::LocalLinkageNotInModule;
|
2017-01-13 06:04:45 +08:00
|
|
|
return false;
|
2018-08-18 00:53:47 +08:00
|
|
|
}
|
2017-01-13 06:04:45 +08:00
|
|
|
|
2019-11-09 07:50:55 +08:00
|
|
|
if ((Summary->instCount() > Threshold) &&
|
|
|
|
!Summary->fflags().AlwaysInline) {
|
2018-08-18 00:53:47 +08:00
|
|
|
Reason = FunctionImporter::ImportFailureReason::TooLarge;
|
2017-07-18 03:25:38 +08:00
|
|
|
return false;
|
2018-08-18 00:53:47 +08:00
|
|
|
}
|
2017-07-18 03:25:38 +08:00
|
|
|
|
2018-11-07 03:41:35 +08:00
|
|
|
// Skip if it isn't legal to import (e.g. may reference unpromotable
|
|
|
|
// locals).
|
2018-08-18 00:53:47 +08:00
|
|
|
if (Summary->notEligibleToImport()) {
|
|
|
|
Reason = FunctionImporter::ImportFailureReason::NotEligible;
|
2016-04-27 08:32:13 +08:00
|
|
|
return false;
|
2018-08-18 00:53:47 +08:00
|
|
|
}
|
2016-04-27 08:32:13 +08:00
|
|
|
|
2018-11-07 03:41:35 +08:00
|
|
|
// Don't bother importing if we can't inline it anyway.
|
|
|
|
if (Summary->fflags().NoInline) {
|
|
|
|
Reason = FunctionImporter::ImportFailureReason::NoInline;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-03-26 13:40:34 +08:00
|
|
|
return true;
|
|
|
|
});
|
2016-04-24 22:57:11 +08:00
|
|
|
if (It == CalleeSummaryList.end())
|
2016-03-26 13:40:34 +08:00
|
|
|
return nullptr;
|
2015-12-09 16:17:35 +08:00
|
|
|
|
2017-07-18 03:25:38 +08:00
|
|
|
return cast<GlobalValueSummary>(It->get());
|
2016-03-26 13:40:34 +08:00
|
|
|
}
|
2015-12-09 16:17:35 +08:00
|
|
|
|
2017-10-11 06:49:55 +08:00
|
|
|
namespace {
|
|
|
|
|
2016-12-16 04:48:19 +08:00
|
|
|
using EdgeInfo = std::tuple<const FunctionSummary *, unsigned /* Threshold */,
|
|
|
|
GlobalValue::GUID>;
|
2016-03-26 13:40:34 +08:00
|
|
|
|
2017-10-11 06:49:55 +08:00
|
|
|
} // anonymous namespace
|
|
|
|
|
2017-09-13 23:16:38 +08:00
|
|
|
static ValueInfo
|
|
|
|
updateValueInfoForIndirectCalls(const ModuleSummaryIndex &Index, ValueInfo VI) {
|
|
|
|
if (!VI.getSummaryList().empty())
|
|
|
|
return VI;
|
|
|
|
// For SamplePGO, the indirect call targets for local functions will
|
|
|
|
// have its original name annotated in profile. We try to find the
|
|
|
|
// corresponding PGOFuncName as the GUID.
|
|
|
|
// FIXME: Consider updating the edges in the graph after building
|
|
|
|
// it, rather than needing to perform this mapping on each walk.
|
|
|
|
auto GUID = Index.getGUIDFromOriginalID(VI.getGUID());
|
|
|
|
if (GUID == 0)
|
2018-01-22 21:35:40 +08:00
|
|
|
return ValueInfo();
|
2017-09-13 23:16:38 +08:00
|
|
|
return Index.getValueInfo(GUID);
|
|
|
|
}
|
|
|
|
|
2018-03-12 18:30:50 +08:00
|
|
|
static void computeImportForReferencedGlobals(
|
2019-11-07 20:13:35 +08:00
|
|
|
const FunctionSummary &Summary, const ModuleSummaryIndex &Index,
|
|
|
|
const GVSummaryMapTy &DefinedGVSummaries,
|
2018-03-12 18:30:50 +08:00
|
|
|
FunctionImporter::ImportMapTy &ImportList,
|
|
|
|
StringMap<FunctionImporter::ExportSetTy> *ExportLists) {
|
|
|
|
for (auto &VI : Summary.refs()) {
|
|
|
|
if (DefinedGVSummaries.count(VI.getGUID())) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(
|
|
|
|
dbgs() << "Ref ignored! Target already in destination module.\n");
|
2018-03-12 18:30:50 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-06-28 02:03:39 +08:00
|
|
|
LLVM_DEBUG(dbgs() << " ref -> " << VI << "\n");
|
2018-03-12 18:30:50 +08:00
|
|
|
|
[ThinLTO] Import local variables from the same module as caller
Summary:
We can sometimes end up with multiple copies of a local variable that
have the same GUID in the index. This happens when there are local
variables with the same name that are in different source files having the
same name/path at compile time (but compiled into different bitcode objects).
In this case make sure we import the copy in the caller's module.
This enables importing both of the variables having the same GUID
(but which will have different promoted names since the module paths,
and therefore the module hashes, will be distinct).
Importing the wrong copy is particularly problematic for read only
variables, since we must import them as a local copy whenever
referenced. Otherwise we get undefs at link time.
Note that the llvm-lto.cpp and ThinLTOCodeGenerator changes are needed
for testing the distributed index case via clang, which will be sent as
a separate clang-side patch shortly. We were previously not doing the
dead code/read only computation before computing imports when testing
distributed index generation (like it was for testing importing and
other ThinLTO mechanisms alone).
Reviewers: evgeny777
Subscribers: mehdi_amini, inglorion, eraman, steven_wu, dexonsmith, dang, llvm-commits
Differential Revision: https://reviews.llvm.org/D55047
llvm-svn: 347886
2018-11-30 01:02:42 +08:00
|
|
|
// If this is a local variable, make sure we import the copy
|
|
|
|
// in the caller's module. The only time a local variable can
|
|
|
|
// share an entry in the index is if there is a local with the same name
|
|
|
|
// in another module that had the same source file name (in a different
|
|
|
|
// directory), where each was compiled in their own directory so there
|
|
|
|
// was not distinguishing path.
|
|
|
|
auto LocalNotInModule = [&](const GlobalValueSummary *RefSummary) -> bool {
|
|
|
|
return GlobalValue::isLocalLinkage(RefSummary->linkage()) &&
|
|
|
|
RefSummary->modulePath() != Summary.modulePath();
|
|
|
|
};
|
|
|
|
|
2019-11-07 20:13:35 +08:00
|
|
|
auto MarkExported = [&](const ValueInfo &VI, const GlobalValueSummary *S) {
|
|
|
|
if (ExportLists)
|
2019-11-15 21:13:19 +08:00
|
|
|
(*ExportLists)[S->modulePath()].insert(VI);
|
2019-11-07 20:13:35 +08:00
|
|
|
};
|
|
|
|
|
2018-03-12 18:30:50 +08:00
|
|
|
for (auto &RefSummary : VI.getSummaryList())
|
2018-11-16 15:08:00 +08:00
|
|
|
if (isa<GlobalVarSummary>(RefSummary.get()) &&
|
2019-11-07 20:13:35 +08:00
|
|
|
Index.canImportGlobalVar(RefSummary.get(), /* AnalyzeRefs */ true) &&
|
[ThinLTO] Import local variables from the same module as caller
Summary:
We can sometimes end up with multiple copies of a local variable that
have the same GUID in the index. This happens when there are local
variables with the same name that are in different source files having the
same name/path at compile time (but compiled into different bitcode objects).
In this case make sure we import the copy in the caller's module.
This enables importing both of the variables having the same GUID
(but which will have different promoted names since the module paths,
and therefore the module hashes, will be distinct).
Importing the wrong copy is particularly problematic for read only
variables, since we must import them as a local copy whenever
referenced. Otherwise we get undefs at link time.
Note that the llvm-lto.cpp and ThinLTOCodeGenerator changes are needed
for testing the distributed index case via clang, which will be sent as
a separate clang-side patch shortly. We were previously not doing the
dead code/read only computation before computing imports when testing
distributed index generation (like it was for testing importing and
other ThinLTO mechanisms alone).
Reviewers: evgeny777
Subscribers: mehdi_amini, inglorion, eraman, steven_wu, dexonsmith, dang, llvm-commits
Differential Revision: https://reviews.llvm.org/D55047
llvm-svn: 347886
2018-11-30 01:02:42 +08:00
|
|
|
!LocalNotInModule(RefSummary.get())) {
|
2018-10-17 07:49:50 +08:00
|
|
|
auto ILI = ImportList[RefSummary->modulePath()].insert(VI.getGUID());
|
|
|
|
// Only update stat if we haven't already imported this variable.
|
|
|
|
if (ILI.second)
|
|
|
|
NumImportedGlobalVarsThinLink++;
|
2019-11-07 20:13:35 +08:00
|
|
|
MarkExported(VI, RefSummary.get());
|
2019-11-09 01:50:34 +08:00
|
|
|
// Promote referenced functions and variables. We don't promote
|
|
|
|
// objects referenced by writeonly variable initializer, because
|
|
|
|
// we convert such variables initializers to "zeroinitializer".
|
|
|
|
// See processGlobalForThinLTO.
|
|
|
|
if (!Index.isWriteOnly(cast<GlobalVarSummary>(RefSummary.get())))
|
|
|
|
for (const auto &VI : RefSummary->refs())
|
|
|
|
for (const auto &RefFn : VI.getSummaryList())
|
|
|
|
MarkExported(VI, RefFn.get());
|
2018-03-12 18:30:50 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-18 00:53:47 +08:00
|
|
|
static const char *
|
|
|
|
getFailureName(FunctionImporter::ImportFailureReason Reason) {
|
|
|
|
switch (Reason) {
|
|
|
|
case FunctionImporter::ImportFailureReason::None:
|
|
|
|
return "None";
|
|
|
|
case FunctionImporter::ImportFailureReason::GlobalVar:
|
|
|
|
return "GlobalVar";
|
|
|
|
case FunctionImporter::ImportFailureReason::NotLive:
|
|
|
|
return "NotLive";
|
|
|
|
case FunctionImporter::ImportFailureReason::TooLarge:
|
|
|
|
return "TooLarge";
|
|
|
|
case FunctionImporter::ImportFailureReason::InterposableLinkage:
|
|
|
|
return "InterposableLinkage";
|
|
|
|
case FunctionImporter::ImportFailureReason::LocalLinkageNotInModule:
|
|
|
|
return "LocalLinkageNotInModule";
|
|
|
|
case FunctionImporter::ImportFailureReason::NotEligible:
|
|
|
|
return "NotEligible";
|
2018-11-07 03:41:35 +08:00
|
|
|
case FunctionImporter::ImportFailureReason::NoInline:
|
|
|
|
return "NoInline";
|
2018-08-18 00:53:47 +08:00
|
|
|
}
|
|
|
|
llvm_unreachable("invalid reason");
|
|
|
|
}
|
|
|
|
|
2016-03-26 13:40:34 +08:00
|
|
|
/// Compute the list of functions to import for a given caller. Mark these
|
|
|
|
/// imported functions and the symbols they reference in their source module as
|
|
|
|
/// exported from their source module.
|
|
|
|
static void computeImportForFunction(
|
2016-04-10 23:17:26 +08:00
|
|
|
const FunctionSummary &Summary, const ModuleSummaryIndex &Index,
|
[thinlto] Basic thinlto fdo heuristic
Summary:
This patch improves thinlto importer
by importing 3x larger functions that are called from hot block.
I compared performance with the trunk on spec, and there
were about 2% on povray and 3.33% on milc. These results seems
to be consistant and match the results Teresa got with her simple
heuristic. Some benchmarks got slower but I think they are just
noisy (mcf, xalancbmki, omnetpp)- running the benchmarks again with
more iterations to confirm. Geomean of all benchmarks including the noisy ones
were about +0.02%.
I see much better improvement on google branch with Easwaran patch
for pgo callsite inlining (the inliner actually inline those big functions)
Over all I see +0.5% improvement, and I get +8.65% on povray.
So I guess we will see much bigger change when Easwaran patch will land
(it depends on new pass manager), but it is still worth putting this to trunk
before it.
Implementation details changes:
- Removed CallsiteCount.
- ProfileCount got replaced by Hotness
- hot-import-multiplier is set to 3.0 for now,
didn't have time to tune it up, but I see that we get most of the interesting
functions with 3, so there is no much performance difference with higher, and
binary size doesn't grow as much as with 10.0.
Reviewers: eraman, mehdi_amini, tejohnson
Subscribers: mehdi_amini, llvm-commits
Differential Revision: https://reviews.llvm.org/D24638
llvm-svn: 282437
2016-09-27 04:37:32 +08:00
|
|
|
const unsigned Threshold, const GVSummaryMapTy &DefinedGVSummaries,
|
2016-03-26 13:40:34 +08:00
|
|
|
SmallVectorImpl<EdgeInfo> &Worklist,
|
2016-08-16 13:47:12 +08:00
|
|
|
FunctionImporter::ImportMapTy &ImportList,
|
Restore "[ThinLTO] Ensure we always select the same function copy to import"
This reverts commit r337081, therefore restoring r337050 (and fix in
r337059), with test fix for bot failure described after the original
description below.
In order to always import the same copy of a linkonce function,
even when encountering it with different thresholds (a higher one then a
lower one), keep track of the summary we decided to import.
This ensures that the backend only gets a single definition to import
for each GUID, so that it doesn't need to choose one.
Move the largest threshold the GUID was considered for import into the
current module out of the ImportMap (which is part of a larger map
maintained across the whole index), and into a new map just maintained
for the current module we are computing imports for. This saves some
memory since we no longer have the thresholds maintained across the
whole index (and throughout the in-process backends when doing a normal
non-distributed ThinLTO build), at the cost of some additional
information being maintained for each invocation of ComputeImportForModule
(the selected summary pointer for each import).
There is an additional map lookup for each callee being considered for
importing, however, this was able to subsume a map lookup in the
Worklist iteration that invokes computeImportForFunction. We also are
able to avoid calling selectCallee if we already failed to import at the
same or higher threshold.
I compared the run time and peak memory for the SPEC2006 471.omnetpp
benchmark (running in-process ThinLTO backends), as well as for a large
internal benchmark with a distributed ThinLTO build (so just looking at
the thin link time/memory). Across a number of runs with and without
this change there was no significant change in the time and memory.
(I tried a few other variations of the change but they also didn't
improve time or peak memory).
The new commit removes a test that no longer makes sense
(Transforms/FunctionImport/hotness_based_import2.ll), as exposed by the
reverse-iteration bot. The test depends on the order of processing the
summary call edges, and actually depended on the old problematic
behavior of selecting more than one summary for a given GUID when
encountered with different thresholds. There was no guarantee even
before that we would eventually pick the linkonce copy with the hottest
call edges, it just happened to work with the test and the old code, and
there was no guarantee that we would end up importing the selected
version of the copy that had the hottest call edges (since the backend
would effectively import only one of the selected copies).
Reviewers: davidxl
Subscribers: mehdi_amini, inglorion, llvm-commits
Differential Revision: https://reviews.llvm.org/D48670
llvm-svn: 337184
2018-07-16 23:30:27 +08:00
|
|
|
StringMap<FunctionImporter::ExportSetTy> *ExportLists,
|
|
|
|
FunctionImporter::ImportThresholdsTy &ImportThresholds) {
|
2019-11-07 20:13:35 +08:00
|
|
|
computeImportForReferencedGlobals(Summary, Index, DefinedGVSummaries,
|
|
|
|
ImportList, ExportLists);
|
2018-04-01 23:54:40 +08:00
|
|
|
static int ImportCount = 0;
|
2016-03-26 13:40:34 +08:00
|
|
|
for (auto &Edge : Summary.calls()) {
|
2017-05-05 02:03:25 +08:00
|
|
|
ValueInfo VI = Edge.first;
|
2018-06-28 02:03:39 +08:00
|
|
|
LLVM_DEBUG(dbgs() << " edge -> " << VI << " Threshold:" << Threshold
|
|
|
|
<< "\n");
|
2016-03-26 13:40:34 +08:00
|
|
|
|
2018-04-01 23:54:40 +08:00
|
|
|
if (ImportCutoff >= 0 && ImportCount >= ImportCutoff) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "ignored! import-cutoff value of " << ImportCutoff
|
|
|
|
<< " reached.\n");
|
2018-04-01 23:54:40 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-09-13 23:16:38 +08:00
|
|
|
VI = updateValueInfoForIndirectCalls(Index, VI);
|
|
|
|
if (!VI)
|
|
|
|
continue;
|
SamplePGO ThinLTO ICP fix for local functions.
Summary:
In SamplePGO, if the profile is collected from non-LTO binary, and used to drive ThinLTO, the indirect call promotion may fail because ThinLTO adjusts local function names to avoid conflicts. There are two places of where the mismatch can happen:
1. thin-link prepends SourceFileName to front of FuncName to build the GUID (GlobalValue::getGlobalIdentifier). Unlike instrumentation FDO, SamplePGO does not use the PGOFuncName scheme and therefore the indirect call target profile data contains a hash of the OriginalName.
2. backend compiler promotes some local functions to global and appends .llvm.{$ModuleHash} to the end of the FuncName to derive PromotedFunctionName
This patch tries at the best effort to find the GUID from the original local function name (in profile), and use that in ICP promotion, and in SamplePGO matching that happens in the backend after importing/inlining:
1. in thin-link, it builds the map from OriginalName to GUID so that when thin-link reads in indirect call target profile (represented by OriginalName), it knows which GUID to import.
2. in backend compiler, if sample profile reader cannot find a profile match for PromotedFunctionName, it will try to find if there is a match for OriginalFunctionName.
3. in backend compiler, we build symbol table entry for OriginalFunctionName and pointer to the same symbol of PromotedFunctionName, so that ICP can find the correct target to promote.
Reviewers: mehdi_amini, tejohnson
Reviewed By: tejohnson
Subscribers: llvm-commits, Prazek
Differential Revision: https://reviews.llvm.org/D30754
llvm-svn: 297757
2017-03-15 01:33:01 +08:00
|
|
|
|
2017-05-05 02:03:25 +08:00
|
|
|
if (DefinedGVSummaries.count(VI.getGUID())) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "ignored! Target already in destination module.\n");
|
2015-11-24 14:07:49 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-09-30 01:32:07 +08:00
|
|
|
auto GetBonusMultiplier = [](CalleeInfo::HotnessType Hotness) -> float {
|
|
|
|
if (Hotness == CalleeInfo::HotnessType::Hot)
|
|
|
|
return ImportHotMultiplier;
|
|
|
|
if (Hotness == CalleeInfo::HotnessType::Cold)
|
|
|
|
return ImportColdMultiplier;
|
2017-07-08 05:01:00 +08:00
|
|
|
if (Hotness == CalleeInfo::HotnessType::Critical)
|
|
|
|
return ImportCriticalMultiplier;
|
2016-09-30 01:32:07 +08:00
|
|
|
return 1.0;
|
|
|
|
};
|
|
|
|
|
[thinlto] Basic thinlto fdo heuristic
Summary:
This patch improves thinlto importer
by importing 3x larger functions that are called from hot block.
I compared performance with the trunk on spec, and there
were about 2% on povray and 3.33% on milc. These results seems
to be consistant and match the results Teresa got with her simple
heuristic. Some benchmarks got slower but I think they are just
noisy (mcf, xalancbmki, omnetpp)- running the benchmarks again with
more iterations to confirm. Geomean of all benchmarks including the noisy ones
were about +0.02%.
I see much better improvement on google branch with Easwaran patch
for pgo callsite inlining (the inliner actually inline those big functions)
Over all I see +0.5% improvement, and I get +8.65% on povray.
So I guess we will see much bigger change when Easwaran patch will land
(it depends on new pass manager), but it is still worth putting this to trunk
before it.
Implementation details changes:
- Removed CallsiteCount.
- ProfileCount got replaced by Hotness
- hot-import-multiplier is set to 3.0 for now,
didn't have time to tune it up, but I see that we get most of the interesting
functions with 3, so there is no much performance difference with higher, and
binary size doesn't grow as much as with 10.0.
Reviewers: eraman, mehdi_amini, tejohnson
Subscribers: mehdi_amini, llvm-commits
Differential Revision: https://reviews.llvm.org/D24638
llvm-svn: 282437
2016-09-27 04:37:32 +08:00
|
|
|
const auto NewThreshold =
|
2018-01-26 03:27:17 +08:00
|
|
|
Threshold * GetBonusMultiplier(Edge.second.getHotness());
|
2016-09-30 11:01:17 +08:00
|
|
|
|
2018-08-18 00:53:47 +08:00
|
|
|
auto IT = ImportThresholds.insert(std::make_pair(
|
|
|
|
VI.getGUID(), std::make_tuple(NewThreshold, nullptr, nullptr)));
|
Restore "[ThinLTO] Ensure we always select the same function copy to import"
This reverts commit r337081, therefore restoring r337050 (and fix in
r337059), with test fix for bot failure described after the original
description below.
In order to always import the same copy of a linkonce function,
even when encountering it with different thresholds (a higher one then a
lower one), keep track of the summary we decided to import.
This ensures that the backend only gets a single definition to import
for each GUID, so that it doesn't need to choose one.
Move the largest threshold the GUID was considered for import into the
current module out of the ImportMap (which is part of a larger map
maintained across the whole index), and into a new map just maintained
for the current module we are computing imports for. This saves some
memory since we no longer have the thresholds maintained across the
whole index (and throughout the in-process backends when doing a normal
non-distributed ThinLTO build), at the cost of some additional
information being maintained for each invocation of ComputeImportForModule
(the selected summary pointer for each import).
There is an additional map lookup for each callee being considered for
importing, however, this was able to subsume a map lookup in the
Worklist iteration that invokes computeImportForFunction. We also are
able to avoid calling selectCallee if we already failed to import at the
same or higher threshold.
I compared the run time and peak memory for the SPEC2006 471.omnetpp
benchmark (running in-process ThinLTO backends), as well as for a large
internal benchmark with a distributed ThinLTO build (so just looking at
the thin link time/memory). Across a number of runs with and without
this change there was no significant change in the time and memory.
(I tried a few other variations of the change but they also didn't
improve time or peak memory).
The new commit removes a test that no longer makes sense
(Transforms/FunctionImport/hotness_based_import2.ll), as exposed by the
reverse-iteration bot. The test depends on the order of processing the
summary call edges, and actually depended on the old problematic
behavior of selecting more than one summary for a given GUID when
encountered with different thresholds. There was no guarantee even
before that we would eventually pick the linkonce copy with the hottest
call edges, it just happened to work with the test and the old code, and
there was no guarantee that we would end up importing the selected
version of the copy that had the hottest call edges (since the backend
would effectively import only one of the selected copies).
Reviewers: davidxl
Subscribers: mehdi_amini, inglorion, llvm-commits
Differential Revision: https://reviews.llvm.org/D48670
llvm-svn: 337184
2018-07-16 23:30:27 +08:00
|
|
|
bool PreviouslyVisited = !IT.second;
|
2018-08-18 00:53:47 +08:00
|
|
|
auto &ProcessedThreshold = std::get<0>(IT.first->second);
|
|
|
|
auto &CalleeSummary = std::get<1>(IT.first->second);
|
|
|
|
auto &FailureInfo = std::get<2>(IT.first->second);
|
Restore "[ThinLTO] Ensure we always select the same function copy to import"
This reverts commit r337081, therefore restoring r337050 (and fix in
r337059), with test fix for bot failure described after the original
description below.
In order to always import the same copy of a linkonce function,
even when encountering it with different thresholds (a higher one then a
lower one), keep track of the summary we decided to import.
This ensures that the backend only gets a single definition to import
for each GUID, so that it doesn't need to choose one.
Move the largest threshold the GUID was considered for import into the
current module out of the ImportMap (which is part of a larger map
maintained across the whole index), and into a new map just maintained
for the current module we are computing imports for. This saves some
memory since we no longer have the thresholds maintained across the
whole index (and throughout the in-process backends when doing a normal
non-distributed ThinLTO build), at the cost of some additional
information being maintained for each invocation of ComputeImportForModule
(the selected summary pointer for each import).
There is an additional map lookup for each callee being considered for
importing, however, this was able to subsume a map lookup in the
Worklist iteration that invokes computeImportForFunction. We also are
able to avoid calling selectCallee if we already failed to import at the
same or higher threshold.
I compared the run time and peak memory for the SPEC2006 471.omnetpp
benchmark (running in-process ThinLTO backends), as well as for a large
internal benchmark with a distributed ThinLTO build (so just looking at
the thin link time/memory). Across a number of runs with and without
this change there was no significant change in the time and memory.
(I tried a few other variations of the change but they also didn't
improve time or peak memory).
The new commit removes a test that no longer makes sense
(Transforms/FunctionImport/hotness_based_import2.ll), as exposed by the
reverse-iteration bot. The test depends on the order of processing the
summary call edges, and actually depended on the old problematic
behavior of selecting more than one summary for a given GUID when
encountered with different thresholds. There was no guarantee even
before that we would eventually pick the linkonce copy with the hottest
call edges, it just happened to work with the test and the old code, and
there was no guarantee that we would end up importing the selected
version of the copy that had the hottest call edges (since the backend
would effectively import only one of the selected copies).
Reviewers: davidxl
Subscribers: mehdi_amini, inglorion, llvm-commits
Differential Revision: https://reviews.llvm.org/D48670
llvm-svn: 337184
2018-07-16 23:30:27 +08:00
|
|
|
|
2018-10-17 07:49:50 +08:00
|
|
|
bool IsHotCallsite =
|
|
|
|
Edge.second.getHotness() == CalleeInfo::HotnessType::Hot;
|
|
|
|
bool IsCriticalCallsite =
|
|
|
|
Edge.second.getHotness() == CalleeInfo::HotnessType::Critical;
|
|
|
|
|
Restore "[ThinLTO] Ensure we always select the same function copy to import"
This reverts commit r337081, therefore restoring r337050 (and fix in
r337059), with test fix for bot failure described after the original
description below.
In order to always import the same copy of a linkonce function,
even when encountering it with different thresholds (a higher one then a
lower one), keep track of the summary we decided to import.
This ensures that the backend only gets a single definition to import
for each GUID, so that it doesn't need to choose one.
Move the largest threshold the GUID was considered for import into the
current module out of the ImportMap (which is part of a larger map
maintained across the whole index), and into a new map just maintained
for the current module we are computing imports for. This saves some
memory since we no longer have the thresholds maintained across the
whole index (and throughout the in-process backends when doing a normal
non-distributed ThinLTO build), at the cost of some additional
information being maintained for each invocation of ComputeImportForModule
(the selected summary pointer for each import).
There is an additional map lookup for each callee being considered for
importing, however, this was able to subsume a map lookup in the
Worklist iteration that invokes computeImportForFunction. We also are
able to avoid calling selectCallee if we already failed to import at the
same or higher threshold.
I compared the run time and peak memory for the SPEC2006 471.omnetpp
benchmark (running in-process ThinLTO backends), as well as for a large
internal benchmark with a distributed ThinLTO build (so just looking at
the thin link time/memory). Across a number of runs with and without
this change there was no significant change in the time and memory.
(I tried a few other variations of the change but they also didn't
improve time or peak memory).
The new commit removes a test that no longer makes sense
(Transforms/FunctionImport/hotness_based_import2.ll), as exposed by the
reverse-iteration bot. The test depends on the order of processing the
summary call edges, and actually depended on the old problematic
behavior of selecting more than one summary for a given GUID when
encountered with different thresholds. There was no guarantee even
before that we would eventually pick the linkonce copy with the hottest
call edges, it just happened to work with the test and the old code, and
there was no guarantee that we would end up importing the selected
version of the copy that had the hottest call edges (since the backend
would effectively import only one of the selected copies).
Reviewers: davidxl
Subscribers: mehdi_amini, inglorion, llvm-commits
Differential Revision: https://reviews.llvm.org/D48670
llvm-svn: 337184
2018-07-16 23:30:27 +08:00
|
|
|
const FunctionSummary *ResolvedCalleeSummary = nullptr;
|
|
|
|
if (CalleeSummary) {
|
|
|
|
assert(PreviouslyVisited);
|
|
|
|
// Since the traversal of the call graph is DFS, we can revisit a function
|
|
|
|
// a second time with a higher threshold. In this case, it is added back
|
|
|
|
// to the worklist with the new threshold (so that its own callee chains
|
|
|
|
// can be considered with the higher threshold).
|
|
|
|
if (NewThreshold <= ProcessedThreshold) {
|
|
|
|
LLVM_DEBUG(
|
|
|
|
dbgs() << "ignored! Target was already imported with Threshold "
|
|
|
|
<< ProcessedThreshold << "\n");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// Update with new larger threshold.
|
|
|
|
ProcessedThreshold = NewThreshold;
|
|
|
|
ResolvedCalleeSummary = cast<FunctionSummary>(CalleeSummary);
|
|
|
|
} else {
|
|
|
|
// If we already rejected importing a callee at the same or higher
|
|
|
|
// threshold, don't waste time calling selectCallee.
|
|
|
|
if (PreviouslyVisited && NewThreshold <= ProcessedThreshold) {
|
|
|
|
LLVM_DEBUG(
|
|
|
|
dbgs() << "ignored! Target was already rejected with Threshold "
|
|
|
|
<< ProcessedThreshold << "\n");
|
2018-08-18 00:53:47 +08:00
|
|
|
if (PrintImportFailures) {
|
|
|
|
assert(FailureInfo &&
|
|
|
|
"Expected FailureInfo for previously rejected candidate");
|
|
|
|
FailureInfo->Attempts++;
|
|
|
|
}
|
Restore "[ThinLTO] Ensure we always select the same function copy to import"
This reverts commit r337081, therefore restoring r337050 (and fix in
r337059), with test fix for bot failure described after the original
description below.
In order to always import the same copy of a linkonce function,
even when encountering it with different thresholds (a higher one then a
lower one), keep track of the summary we decided to import.
This ensures that the backend only gets a single definition to import
for each GUID, so that it doesn't need to choose one.
Move the largest threshold the GUID was considered for import into the
current module out of the ImportMap (which is part of a larger map
maintained across the whole index), and into a new map just maintained
for the current module we are computing imports for. This saves some
memory since we no longer have the thresholds maintained across the
whole index (and throughout the in-process backends when doing a normal
non-distributed ThinLTO build), at the cost of some additional
information being maintained for each invocation of ComputeImportForModule
(the selected summary pointer for each import).
There is an additional map lookup for each callee being considered for
importing, however, this was able to subsume a map lookup in the
Worklist iteration that invokes computeImportForFunction. We also are
able to avoid calling selectCallee if we already failed to import at the
same or higher threshold.
I compared the run time and peak memory for the SPEC2006 471.omnetpp
benchmark (running in-process ThinLTO backends), as well as for a large
internal benchmark with a distributed ThinLTO build (so just looking at
the thin link time/memory). Across a number of runs with and without
this change there was no significant change in the time and memory.
(I tried a few other variations of the change but they also didn't
improve time or peak memory).
The new commit removes a test that no longer makes sense
(Transforms/FunctionImport/hotness_based_import2.ll), as exposed by the
reverse-iteration bot. The test depends on the order of processing the
summary call edges, and actually depended on the old problematic
behavior of selecting more than one summary for a given GUID when
encountered with different thresholds. There was no guarantee even
before that we would eventually pick the linkonce copy with the hottest
call edges, it just happened to work with the test and the old code, and
there was no guarantee that we would end up importing the selected
version of the copy that had the hottest call edges (since the backend
would effectively import only one of the selected copies).
Reviewers: davidxl
Subscribers: mehdi_amini, inglorion, llvm-commits
Differential Revision: https://reviews.llvm.org/D48670
llvm-svn: 337184
2018-07-16 23:30:27 +08:00
|
|
|
continue;
|
|
|
|
}
|
2017-07-27 23:09:06 +08:00
|
|
|
|
2018-08-18 00:53:47 +08:00
|
|
|
FunctionImporter::ImportFailureReason Reason;
|
Restore "[ThinLTO] Ensure we always select the same function copy to import"
This reverts commit r337081, therefore restoring r337050 (and fix in
r337059), with test fix for bot failure described after the original
description below.
In order to always import the same copy of a linkonce function,
even when encountering it with different thresholds (a higher one then a
lower one), keep track of the summary we decided to import.
This ensures that the backend only gets a single definition to import
for each GUID, so that it doesn't need to choose one.
Move the largest threshold the GUID was considered for import into the
current module out of the ImportMap (which is part of a larger map
maintained across the whole index), and into a new map just maintained
for the current module we are computing imports for. This saves some
memory since we no longer have the thresholds maintained across the
whole index (and throughout the in-process backends when doing a normal
non-distributed ThinLTO build), at the cost of some additional
information being maintained for each invocation of ComputeImportForModule
(the selected summary pointer for each import).
There is an additional map lookup for each callee being considered for
importing, however, this was able to subsume a map lookup in the
Worklist iteration that invokes computeImportForFunction. We also are
able to avoid calling selectCallee if we already failed to import at the
same or higher threshold.
I compared the run time and peak memory for the SPEC2006 471.omnetpp
benchmark (running in-process ThinLTO backends), as well as for a large
internal benchmark with a distributed ThinLTO build (so just looking at
the thin link time/memory). Across a number of runs with and without
this change there was no significant change in the time and memory.
(I tried a few other variations of the change but they also didn't
improve time or peak memory).
The new commit removes a test that no longer makes sense
(Transforms/FunctionImport/hotness_based_import2.ll), as exposed by the
reverse-iteration bot. The test depends on the order of processing the
summary call edges, and actually depended on the old problematic
behavior of selecting more than one summary for a given GUID when
encountered with different thresholds. There was no guarantee even
before that we would eventually pick the linkonce copy with the hottest
call edges, it just happened to work with the test and the old code, and
there was no guarantee that we would end up importing the selected
version of the copy that had the hottest call edges (since the backend
would effectively import only one of the selected copies).
Reviewers: davidxl
Subscribers: mehdi_amini, inglorion, llvm-commits
Differential Revision: https://reviews.llvm.org/D48670
llvm-svn: 337184
2018-07-16 23:30:27 +08:00
|
|
|
CalleeSummary = selectCallee(Index, VI.getSummaryList(), NewThreshold,
|
2018-08-18 00:53:47 +08:00
|
|
|
Summary.modulePath(), Reason, VI.getGUID());
|
Restore "[ThinLTO] Ensure we always select the same function copy to import"
This reverts commit r337081, therefore restoring r337050 (and fix in
r337059), with test fix for bot failure described after the original
description below.
In order to always import the same copy of a linkonce function,
even when encountering it with different thresholds (a higher one then a
lower one), keep track of the summary we decided to import.
This ensures that the backend only gets a single definition to import
for each GUID, so that it doesn't need to choose one.
Move the largest threshold the GUID was considered for import into the
current module out of the ImportMap (which is part of a larger map
maintained across the whole index), and into a new map just maintained
for the current module we are computing imports for. This saves some
memory since we no longer have the thresholds maintained across the
whole index (and throughout the in-process backends when doing a normal
non-distributed ThinLTO build), at the cost of some additional
information being maintained for each invocation of ComputeImportForModule
(the selected summary pointer for each import).
There is an additional map lookup for each callee being considered for
importing, however, this was able to subsume a map lookup in the
Worklist iteration that invokes computeImportForFunction. We also are
able to avoid calling selectCallee if we already failed to import at the
same or higher threshold.
I compared the run time and peak memory for the SPEC2006 471.omnetpp
benchmark (running in-process ThinLTO backends), as well as for a large
internal benchmark with a distributed ThinLTO build (so just looking at
the thin link time/memory). Across a number of runs with and without
this change there was no significant change in the time and memory.
(I tried a few other variations of the change but they also didn't
improve time or peak memory).
The new commit removes a test that no longer makes sense
(Transforms/FunctionImport/hotness_based_import2.ll), as exposed by the
reverse-iteration bot. The test depends on the order of processing the
summary call edges, and actually depended on the old problematic
behavior of selecting more than one summary for a given GUID when
encountered with different thresholds. There was no guarantee even
before that we would eventually pick the linkonce copy with the hottest
call edges, it just happened to work with the test and the old code, and
there was no guarantee that we would end up importing the selected
version of the copy that had the hottest call edges (since the backend
would effectively import only one of the selected copies).
Reviewers: davidxl
Subscribers: mehdi_amini, inglorion, llvm-commits
Differential Revision: https://reviews.llvm.org/D48670
llvm-svn: 337184
2018-07-16 23:30:27 +08:00
|
|
|
if (!CalleeSummary) {
|
|
|
|
// Update with new larger threshold if this was a retry (otherwise
|
2018-08-18 00:53:47 +08:00
|
|
|
// we would have already inserted with NewThreshold above). Also
|
|
|
|
// update failure info if requested.
|
|
|
|
if (PreviouslyVisited) {
|
Restore "[ThinLTO] Ensure we always select the same function copy to import"
This reverts commit r337081, therefore restoring r337050 (and fix in
r337059), with test fix for bot failure described after the original
description below.
In order to always import the same copy of a linkonce function,
even when encountering it with different thresholds (a higher one then a
lower one), keep track of the summary we decided to import.
This ensures that the backend only gets a single definition to import
for each GUID, so that it doesn't need to choose one.
Move the largest threshold the GUID was considered for import into the
current module out of the ImportMap (which is part of a larger map
maintained across the whole index), and into a new map just maintained
for the current module we are computing imports for. This saves some
memory since we no longer have the thresholds maintained across the
whole index (and throughout the in-process backends when doing a normal
non-distributed ThinLTO build), at the cost of some additional
information being maintained for each invocation of ComputeImportForModule
(the selected summary pointer for each import).
There is an additional map lookup for each callee being considered for
importing, however, this was able to subsume a map lookup in the
Worklist iteration that invokes computeImportForFunction. We also are
able to avoid calling selectCallee if we already failed to import at the
same or higher threshold.
I compared the run time and peak memory for the SPEC2006 471.omnetpp
benchmark (running in-process ThinLTO backends), as well as for a large
internal benchmark with a distributed ThinLTO build (so just looking at
the thin link time/memory). Across a number of runs with and without
this change there was no significant change in the time and memory.
(I tried a few other variations of the change but they also didn't
improve time or peak memory).
The new commit removes a test that no longer makes sense
(Transforms/FunctionImport/hotness_based_import2.ll), as exposed by the
reverse-iteration bot. The test depends on the order of processing the
summary call edges, and actually depended on the old problematic
behavior of selecting more than one summary for a given GUID when
encountered with different thresholds. There was no guarantee even
before that we would eventually pick the linkonce copy with the hottest
call edges, it just happened to work with the test and the old code, and
there was no guarantee that we would end up importing the selected
version of the copy that had the hottest call edges (since the backend
would effectively import only one of the selected copies).
Reviewers: davidxl
Subscribers: mehdi_amini, inglorion, llvm-commits
Differential Revision: https://reviews.llvm.org/D48670
llvm-svn: 337184
2018-07-16 23:30:27 +08:00
|
|
|
ProcessedThreshold = NewThreshold;
|
2018-08-18 00:53:47 +08:00
|
|
|
if (PrintImportFailures) {
|
|
|
|
assert(FailureInfo &&
|
|
|
|
"Expected FailureInfo for previously rejected candidate");
|
|
|
|
FailureInfo->Reason = Reason;
|
|
|
|
FailureInfo->Attempts++;
|
|
|
|
FailureInfo->MaxHotness =
|
|
|
|
std::max(FailureInfo->MaxHotness, Edge.second.getHotness());
|
|
|
|
}
|
|
|
|
} else if (PrintImportFailures) {
|
|
|
|
assert(!FailureInfo &&
|
|
|
|
"Expected no FailureInfo for newly rejected candidate");
|
2019-08-15 23:54:37 +08:00
|
|
|
FailureInfo = std::make_unique<FunctionImporter::ImportFailureInfo>(
|
2018-08-18 00:53:47 +08:00
|
|
|
VI, Edge.second.getHotness(), Reason, 1);
|
|
|
|
}
|
Restore "[ThinLTO] Ensure we always select the same function copy to import"
This reverts commit r337081, therefore restoring r337050 (and fix in
r337059), with test fix for bot failure described after the original
description below.
In order to always import the same copy of a linkonce function,
even when encountering it with different thresholds (a higher one then a
lower one), keep track of the summary we decided to import.
This ensures that the backend only gets a single definition to import
for each GUID, so that it doesn't need to choose one.
Move the largest threshold the GUID was considered for import into the
current module out of the ImportMap (which is part of a larger map
maintained across the whole index), and into a new map just maintained
for the current module we are computing imports for. This saves some
memory since we no longer have the thresholds maintained across the
whole index (and throughout the in-process backends when doing a normal
non-distributed ThinLTO build), at the cost of some additional
information being maintained for each invocation of ComputeImportForModule
(the selected summary pointer for each import).
There is an additional map lookup for each callee being considered for
importing, however, this was able to subsume a map lookup in the
Worklist iteration that invokes computeImportForFunction. We also are
able to avoid calling selectCallee if we already failed to import at the
same or higher threshold.
I compared the run time and peak memory for the SPEC2006 471.omnetpp
benchmark (running in-process ThinLTO backends), as well as for a large
internal benchmark with a distributed ThinLTO build (so just looking at
the thin link time/memory). Across a number of runs with and without
this change there was no significant change in the time and memory.
(I tried a few other variations of the change but they also didn't
improve time or peak memory).
The new commit removes a test that no longer makes sense
(Transforms/FunctionImport/hotness_based_import2.ll), as exposed by the
reverse-iteration bot. The test depends on the order of processing the
summary call edges, and actually depended on the old problematic
behavior of selecting more than one summary for a given GUID when
encountered with different thresholds. There was no guarantee even
before that we would eventually pick the linkonce copy with the hottest
call edges, it just happened to work with the test and the old code, and
there was no guarantee that we would end up importing the selected
version of the copy that had the hottest call edges (since the backend
would effectively import only one of the selected copies).
Reviewers: davidxl
Subscribers: mehdi_amini, inglorion, llvm-commits
Differential Revision: https://reviews.llvm.org/D48670
llvm-svn: 337184
2018-07-16 23:30:27 +08:00
|
|
|
LLVM_DEBUG(
|
|
|
|
dbgs() << "ignored! No qualifying callee with summary found.\n");
|
|
|
|
continue;
|
|
|
|
}
|
2016-04-16 14:56:44 +08:00
|
|
|
|
Restore "[ThinLTO] Ensure we always select the same function copy to import"
This reverts commit r337081, therefore restoring r337050 (and fix in
r337059), with test fix for bot failure described after the original
description below.
In order to always import the same copy of a linkonce function,
even when encountering it with different thresholds (a higher one then a
lower one), keep track of the summary we decided to import.
This ensures that the backend only gets a single definition to import
for each GUID, so that it doesn't need to choose one.
Move the largest threshold the GUID was considered for import into the
current module out of the ImportMap (which is part of a larger map
maintained across the whole index), and into a new map just maintained
for the current module we are computing imports for. This saves some
memory since we no longer have the thresholds maintained across the
whole index (and throughout the in-process backends when doing a normal
non-distributed ThinLTO build), at the cost of some additional
information being maintained for each invocation of ComputeImportForModule
(the selected summary pointer for each import).
There is an additional map lookup for each callee being considered for
importing, however, this was able to subsume a map lookup in the
Worklist iteration that invokes computeImportForFunction. We also are
able to avoid calling selectCallee if we already failed to import at the
same or higher threshold.
I compared the run time and peak memory for the SPEC2006 471.omnetpp
benchmark (running in-process ThinLTO backends), as well as for a large
internal benchmark with a distributed ThinLTO build (so just looking at
the thin link time/memory). Across a number of runs with and without
this change there was no significant change in the time and memory.
(I tried a few other variations of the change but they also didn't
improve time or peak memory).
The new commit removes a test that no longer makes sense
(Transforms/FunctionImport/hotness_based_import2.ll), as exposed by the
reverse-iteration bot. The test depends on the order of processing the
summary call edges, and actually depended on the old problematic
behavior of selecting more than one summary for a given GUID when
encountered with different thresholds. There was no guarantee even
before that we would eventually pick the linkonce copy with the hottest
call edges, it just happened to work with the test and the old code, and
there was no guarantee that we would end up importing the selected
version of the copy that had the hottest call edges (since the backend
would effectively import only one of the selected copies).
Reviewers: davidxl
Subscribers: mehdi_amini, inglorion, llvm-commits
Differential Revision: https://reviews.llvm.org/D48670
llvm-svn: 337184
2018-07-16 23:30:27 +08:00
|
|
|
// "Resolve" the summary
|
|
|
|
CalleeSummary = CalleeSummary->getBaseObject();
|
|
|
|
ResolvedCalleeSummary = cast<FunctionSummary>(CalleeSummary);
|
|
|
|
|
2019-11-09 07:50:55 +08:00
|
|
|
assert((ResolvedCalleeSummary->fflags().AlwaysInline ||
|
|
|
|
(ResolvedCalleeSummary->instCount() <= NewThreshold)) &&
|
Restore "[ThinLTO] Ensure we always select the same function copy to import"
This reverts commit r337081, therefore restoring r337050 (and fix in
r337059), with test fix for bot failure described after the original
description below.
In order to always import the same copy of a linkonce function,
even when encountering it with different thresholds (a higher one then a
lower one), keep track of the summary we decided to import.
This ensures that the backend only gets a single definition to import
for each GUID, so that it doesn't need to choose one.
Move the largest threshold the GUID was considered for import into the
current module out of the ImportMap (which is part of a larger map
maintained across the whole index), and into a new map just maintained
for the current module we are computing imports for. This saves some
memory since we no longer have the thresholds maintained across the
whole index (and throughout the in-process backends when doing a normal
non-distributed ThinLTO build), at the cost of some additional
information being maintained for each invocation of ComputeImportForModule
(the selected summary pointer for each import).
There is an additional map lookup for each callee being considered for
importing, however, this was able to subsume a map lookup in the
Worklist iteration that invokes computeImportForFunction. We also are
able to avoid calling selectCallee if we already failed to import at the
same or higher threshold.
I compared the run time and peak memory for the SPEC2006 471.omnetpp
benchmark (running in-process ThinLTO backends), as well as for a large
internal benchmark with a distributed ThinLTO build (so just looking at
the thin link time/memory). Across a number of runs with and without
this change there was no significant change in the time and memory.
(I tried a few other variations of the change but they also didn't
improve time or peak memory).
The new commit removes a test that no longer makes sense
(Transforms/FunctionImport/hotness_based_import2.ll), as exposed by the
reverse-iteration bot. The test depends on the order of processing the
summary call edges, and actually depended on the old problematic
behavior of selecting more than one summary for a given GUID when
encountered with different thresholds. There was no guarantee even
before that we would eventually pick the linkonce copy with the hottest
call edges, it just happened to work with the test and the old code, and
there was no guarantee that we would end up importing the selected
version of the copy that had the hottest call edges (since the backend
would effectively import only one of the selected copies).
Reviewers: davidxl
Subscribers: mehdi_amini, inglorion, llvm-commits
Differential Revision: https://reviews.llvm.org/D48670
llvm-svn: 337184
2018-07-16 23:30:27 +08:00
|
|
|
"selectCallee() didn't honor the threshold");
|
|
|
|
|
|
|
|
auto ExportModulePath = ResolvedCalleeSummary->modulePath();
|
|
|
|
auto ILI = ImportList[ExportModulePath].insert(VI.getGUID());
|
|
|
|
// We previously decided to import this GUID definition if it was already
|
|
|
|
// inserted in the set of imports from the exporting module.
|
|
|
|
bool PreviouslyImported = !ILI.second;
|
2018-10-17 07:49:50 +08:00
|
|
|
if (!PreviouslyImported) {
|
|
|
|
NumImportedFunctionsThinLink++;
|
|
|
|
if (IsHotCallsite)
|
|
|
|
NumImportedHotFunctionsThinLink++;
|
|
|
|
if (IsCriticalCallsite)
|
|
|
|
NumImportedCriticalFunctionsThinLink++;
|
|
|
|
}
|
Restore "[ThinLTO] Ensure we always select the same function copy to import"
This reverts commit r337081, therefore restoring r337050 (and fix in
r337059), with test fix for bot failure described after the original
description below.
In order to always import the same copy of a linkonce function,
even when encountering it with different thresholds (a higher one then a
lower one), keep track of the summary we decided to import.
This ensures that the backend only gets a single definition to import
for each GUID, so that it doesn't need to choose one.
Move the largest threshold the GUID was considered for import into the
current module out of the ImportMap (which is part of a larger map
maintained across the whole index), and into a new map just maintained
for the current module we are computing imports for. This saves some
memory since we no longer have the thresholds maintained across the
whole index (and throughout the in-process backends when doing a normal
non-distributed ThinLTO build), at the cost of some additional
information being maintained for each invocation of ComputeImportForModule
(the selected summary pointer for each import).
There is an additional map lookup for each callee being considered for
importing, however, this was able to subsume a map lookup in the
Worklist iteration that invokes computeImportForFunction. We also are
able to avoid calling selectCallee if we already failed to import at the
same or higher threshold.
I compared the run time and peak memory for the SPEC2006 471.omnetpp
benchmark (running in-process ThinLTO backends), as well as for a large
internal benchmark with a distributed ThinLTO build (so just looking at
the thin link time/memory). Across a number of runs with and without
this change there was no significant change in the time and memory.
(I tried a few other variations of the change but they also didn't
improve time or peak memory).
The new commit removes a test that no longer makes sense
(Transforms/FunctionImport/hotness_based_import2.ll), as exposed by the
reverse-iteration bot. The test depends on the order of processing the
summary call edges, and actually depended on the old problematic
behavior of selecting more than one summary for a given GUID when
encountered with different thresholds. There was no guarantee even
before that we would eventually pick the linkonce copy with the hottest
call edges, it just happened to work with the test and the old code, and
there was no guarantee that we would end up importing the selected
version of the copy that had the hottest call edges (since the backend
would effectively import only one of the selected copies).
Reviewers: davidxl
Subscribers: mehdi_amini, inglorion, llvm-commits
Differential Revision: https://reviews.llvm.org/D48670
llvm-svn: 337184
2018-07-16 23:30:27 +08:00
|
|
|
|
|
|
|
// Make exports in the source module.
|
|
|
|
if (ExportLists) {
|
|
|
|
auto &ExportList = (*ExportLists)[ExportModulePath];
|
2019-11-15 21:13:19 +08:00
|
|
|
ExportList.insert(VI);
|
Restore "[ThinLTO] Ensure we always select the same function copy to import"
This reverts commit r337081, therefore restoring r337050 (and fix in
r337059), with test fix for bot failure described after the original
description below.
In order to always import the same copy of a linkonce function,
even when encountering it with different thresholds (a higher one then a
lower one), keep track of the summary we decided to import.
This ensures that the backend only gets a single definition to import
for each GUID, so that it doesn't need to choose one.
Move the largest threshold the GUID was considered for import into the
current module out of the ImportMap (which is part of a larger map
maintained across the whole index), and into a new map just maintained
for the current module we are computing imports for. This saves some
memory since we no longer have the thresholds maintained across the
whole index (and throughout the in-process backends when doing a normal
non-distributed ThinLTO build), at the cost of some additional
information being maintained for each invocation of ComputeImportForModule
(the selected summary pointer for each import).
There is an additional map lookup for each callee being considered for
importing, however, this was able to subsume a map lookup in the
Worklist iteration that invokes computeImportForFunction. We also are
able to avoid calling selectCallee if we already failed to import at the
same or higher threshold.
I compared the run time and peak memory for the SPEC2006 471.omnetpp
benchmark (running in-process ThinLTO backends), as well as for a large
internal benchmark with a distributed ThinLTO build (so just looking at
the thin link time/memory). Across a number of runs with and without
this change there was no significant change in the time and memory.
(I tried a few other variations of the change but they also didn't
improve time or peak memory).
The new commit removes a test that no longer makes sense
(Transforms/FunctionImport/hotness_based_import2.ll), as exposed by the
reverse-iteration bot. The test depends on the order of processing the
summary call edges, and actually depended on the old problematic
behavior of selecting more than one summary for a given GUID when
encountered with different thresholds. There was no guarantee even
before that we would eventually pick the linkonce copy with the hottest
call edges, it just happened to work with the test and the old code, and
there was no guarantee that we would end up importing the selected
version of the copy that had the hottest call edges (since the backend
would effectively import only one of the selected copies).
Reviewers: davidxl
Subscribers: mehdi_amini, inglorion, llvm-commits
Differential Revision: https://reviews.llvm.org/D48670
llvm-svn: 337184
2018-07-16 23:30:27 +08:00
|
|
|
if (!PreviouslyImported) {
|
|
|
|
// This is the first time this function was exported from its source
|
|
|
|
// module, so mark all functions and globals it references as exported
|
|
|
|
// to the outside if they are defined in the same source module.
|
|
|
|
// For efficiency, we unconditionally add all the referenced GUIDs
|
|
|
|
// to the ExportList for this module, and will prune out any not
|
|
|
|
// defined in the module later in a single pass.
|
2019-11-15 21:13:19 +08:00
|
|
|
for (auto &Edge : ResolvedCalleeSummary->calls())
|
|
|
|
ExportList.insert(Edge.first);
|
|
|
|
|
|
|
|
for (auto &Ref : ResolvedCalleeSummary->refs())
|
|
|
|
ExportList.insert(Ref);
|
Restore "[ThinLTO] Ensure we always select the same function copy to import"
This reverts commit r337081, therefore restoring r337050 (and fix in
r337059), with test fix for bot failure described after the original
description below.
In order to always import the same copy of a linkonce function,
even when encountering it with different thresholds (a higher one then a
lower one), keep track of the summary we decided to import.
This ensures that the backend only gets a single definition to import
for each GUID, so that it doesn't need to choose one.
Move the largest threshold the GUID was considered for import into the
current module out of the ImportMap (which is part of a larger map
maintained across the whole index), and into a new map just maintained
for the current module we are computing imports for. This saves some
memory since we no longer have the thresholds maintained across the
whole index (and throughout the in-process backends when doing a normal
non-distributed ThinLTO build), at the cost of some additional
information being maintained for each invocation of ComputeImportForModule
(the selected summary pointer for each import).
There is an additional map lookup for each callee being considered for
importing, however, this was able to subsume a map lookup in the
Worklist iteration that invokes computeImportForFunction. We also are
able to avoid calling selectCallee if we already failed to import at the
same or higher threshold.
I compared the run time and peak memory for the SPEC2006 471.omnetpp
benchmark (running in-process ThinLTO backends), as well as for a large
internal benchmark with a distributed ThinLTO build (so just looking at
the thin link time/memory). Across a number of runs with and without
this change there was no significant change in the time and memory.
(I tried a few other variations of the change but they also didn't
improve time or peak memory).
The new commit removes a test that no longer makes sense
(Transforms/FunctionImport/hotness_based_import2.ll), as exposed by the
reverse-iteration bot. The test depends on the order of processing the
summary call edges, and actually depended on the old problematic
behavior of selecting more than one summary for a given GUID when
encountered with different thresholds. There was no guarantee even
before that we would eventually pick the linkonce copy with the hottest
call edges, it just happened to work with the test and the old code, and
there was no guarantee that we would end up importing the selected
version of the copy that had the hottest call edges (since the backend
would effectively import only one of the selected copies).
Reviewers: davidxl
Subscribers: mehdi_amini, inglorion, llvm-commits
Differential Revision: https://reviews.llvm.org/D48670
llvm-svn: 337184
2018-07-16 23:30:27 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-03-26 13:40:34 +08:00
|
|
|
|
2016-12-16 02:21:01 +08:00
|
|
|
auto GetAdjustedThreshold = [](unsigned Threshold, bool IsHotCallsite) {
|
|
|
|
// Adjust the threshold for next level of imported functions.
|
|
|
|
// The threshold is different for hot callsites because we can then
|
|
|
|
// inline chains of hot calls.
|
|
|
|
if (IsHotCallsite)
|
|
|
|
return Threshold * ImportHotInstrFactor;
|
|
|
|
return Threshold * ImportInstrFactor;
|
|
|
|
};
|
|
|
|
|
|
|
|
const auto AdjThreshold = GetAdjustedThreshold(Threshold, IsHotCallsite);
|
|
|
|
|
2018-04-01 23:54:40 +08:00
|
|
|
ImportCount++;
|
|
|
|
|
2016-03-26 13:40:34 +08:00
|
|
|
// Insert the newly imported function to the worklist.
|
2017-05-05 02:03:25 +08:00
|
|
|
Worklist.emplace_back(ResolvedCalleeSummary, AdjThreshold, VI.getGUID());
|
2016-03-26 13:40:34 +08:00
|
|
|
}
|
|
|
|
}
|
2016-02-11 07:31:45 +08:00
|
|
|
|
2016-03-26 13:40:34 +08:00
|
|
|
/// Given the list of globals defined in a module, compute the list of imports
|
|
|
|
/// as well as the list of "exports", i.e. the list of symbols referenced from
|
|
|
|
/// another module (that may require promotion).
|
|
|
|
static void ComputeImportForModule(
|
2016-04-26 05:09:51 +08:00
|
|
|
const GVSummaryMapTy &DefinedGVSummaries, const ModuleSummaryIndex &Index,
|
2018-08-18 00:53:47 +08:00
|
|
|
StringRef ModName, FunctionImporter::ImportMapTy &ImportList,
|
2017-06-02 04:30:06 +08:00
|
|
|
StringMap<FunctionImporter::ExportSetTy> *ExportLists = nullptr) {
|
2016-03-26 13:40:34 +08:00
|
|
|
// Worklist contains the list of function imported in this module, for which
|
|
|
|
// we will analyse the callees and may import further down the callgraph.
|
|
|
|
SmallVector<EdgeInfo, 128> Worklist;
|
Restore "[ThinLTO] Ensure we always select the same function copy to import"
This reverts commit r337081, therefore restoring r337050 (and fix in
r337059), with test fix for bot failure described after the original
description below.
In order to always import the same copy of a linkonce function,
even when encountering it with different thresholds (a higher one then a
lower one), keep track of the summary we decided to import.
This ensures that the backend only gets a single definition to import
for each GUID, so that it doesn't need to choose one.
Move the largest threshold the GUID was considered for import into the
current module out of the ImportMap (which is part of a larger map
maintained across the whole index), and into a new map just maintained
for the current module we are computing imports for. This saves some
memory since we no longer have the thresholds maintained across the
whole index (and throughout the in-process backends when doing a normal
non-distributed ThinLTO build), at the cost of some additional
information being maintained for each invocation of ComputeImportForModule
(the selected summary pointer for each import).
There is an additional map lookup for each callee being considered for
importing, however, this was able to subsume a map lookup in the
Worklist iteration that invokes computeImportForFunction. We also are
able to avoid calling selectCallee if we already failed to import at the
same or higher threshold.
I compared the run time and peak memory for the SPEC2006 471.omnetpp
benchmark (running in-process ThinLTO backends), as well as for a large
internal benchmark with a distributed ThinLTO build (so just looking at
the thin link time/memory). Across a number of runs with and without
this change there was no significant change in the time and memory.
(I tried a few other variations of the change but they also didn't
improve time or peak memory).
The new commit removes a test that no longer makes sense
(Transforms/FunctionImport/hotness_based_import2.ll), as exposed by the
reverse-iteration bot. The test depends on the order of processing the
summary call edges, and actually depended on the old problematic
behavior of selecting more than one summary for a given GUID when
encountered with different thresholds. There was no guarantee even
before that we would eventually pick the linkonce copy with the hottest
call edges, it just happened to work with the test and the old code, and
there was no guarantee that we would end up importing the selected
version of the copy that had the hottest call edges (since the backend
would effectively import only one of the selected copies).
Reviewers: davidxl
Subscribers: mehdi_amini, inglorion, llvm-commits
Differential Revision: https://reviews.llvm.org/D48670
llvm-svn: 337184
2018-07-16 23:30:27 +08:00
|
|
|
FunctionImporter::ImportThresholdsTy ImportThresholds;
|
2016-03-26 13:40:34 +08:00
|
|
|
|
|
|
|
// Populate the worklist with the import for the functions in the current
|
|
|
|
// module
|
2016-04-24 22:57:11 +08:00
|
|
|
for (auto &GVSummary : DefinedGVSummaries) {
|
2018-06-28 02:03:39 +08:00
|
|
|
#ifndef NDEBUG
|
|
|
|
// FIXME: Change the GVSummaryMapTy to hold ValueInfo instead of GUID
|
|
|
|
// so this map look up (and possibly others) can be avoided.
|
|
|
|
auto VI = Index.getValueInfo(GVSummary.first);
|
|
|
|
#endif
|
2017-06-02 04:30:06 +08:00
|
|
|
if (!Index.isGlobalValueLive(GVSummary.second)) {
|
2018-06-28 02:03:39 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Ignores Dead GUID: " << VI << "\n");
|
2017-01-06 05:34:18 +08:00
|
|
|
continue;
|
|
|
|
}
|
2017-09-14 13:02:59 +08:00
|
|
|
auto *FuncSummary =
|
|
|
|
dyn_cast<FunctionSummary>(GVSummary.second->getBaseObject());
|
2016-04-16 15:02:16 +08:00
|
|
|
if (!FuncSummary)
|
|
|
|
// Skip import for global variables
|
|
|
|
continue;
|
2018-06-28 02:03:39 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Initialize import for " << VI << "\n");
|
2016-04-16 14:56:44 +08:00
|
|
|
computeImportForFunction(*FuncSummary, Index, ImportInstrLimit,
|
2016-08-16 13:47:12 +08:00
|
|
|
DefinedGVSummaries, Worklist, ImportList,
|
Restore "[ThinLTO] Ensure we always select the same function copy to import"
This reverts commit r337081, therefore restoring r337050 (and fix in
r337059), with test fix for bot failure described after the original
description below.
In order to always import the same copy of a linkonce function,
even when encountering it with different thresholds (a higher one then a
lower one), keep track of the summary we decided to import.
This ensures that the backend only gets a single definition to import
for each GUID, so that it doesn't need to choose one.
Move the largest threshold the GUID was considered for import into the
current module out of the ImportMap (which is part of a larger map
maintained across the whole index), and into a new map just maintained
for the current module we are computing imports for. This saves some
memory since we no longer have the thresholds maintained across the
whole index (and throughout the in-process backends when doing a normal
non-distributed ThinLTO build), at the cost of some additional
information being maintained for each invocation of ComputeImportForModule
(the selected summary pointer for each import).
There is an additional map lookup for each callee being considered for
importing, however, this was able to subsume a map lookup in the
Worklist iteration that invokes computeImportForFunction. We also are
able to avoid calling selectCallee if we already failed to import at the
same or higher threshold.
I compared the run time and peak memory for the SPEC2006 471.omnetpp
benchmark (running in-process ThinLTO backends), as well as for a large
internal benchmark with a distributed ThinLTO build (so just looking at
the thin link time/memory). Across a number of runs with and without
this change there was no significant change in the time and memory.
(I tried a few other variations of the change but they also didn't
improve time or peak memory).
The new commit removes a test that no longer makes sense
(Transforms/FunctionImport/hotness_based_import2.ll), as exposed by the
reverse-iteration bot. The test depends on the order of processing the
summary call edges, and actually depended on the old problematic
behavior of selecting more than one summary for a given GUID when
encountered with different thresholds. There was no guarantee even
before that we would eventually pick the linkonce copy with the hottest
call edges, it just happened to work with the test and the old code, and
there was no guarantee that we would end up importing the selected
version of the copy that had the hottest call edges (since the backend
would effectively import only one of the selected copies).
Reviewers: davidxl
Subscribers: mehdi_amini, inglorion, llvm-commits
Differential Revision: https://reviews.llvm.org/D48670
llvm-svn: 337184
2018-07-16 23:30:27 +08:00
|
|
|
ExportLists, ImportThresholds);
|
2016-03-26 13:40:34 +08:00
|
|
|
}
|
2015-11-24 14:07:49 +08:00
|
|
|
|
2016-09-30 11:01:17 +08:00
|
|
|
// Process the newly imported functions and add callees to the worklist.
|
2016-03-26 13:40:34 +08:00
|
|
|
while (!Worklist.empty()) {
|
|
|
|
auto FuncInfo = Worklist.pop_back_val();
|
2016-12-16 04:48:19 +08:00
|
|
|
auto *Summary = std::get<0>(FuncInfo);
|
|
|
|
auto Threshold = std::get<1>(FuncInfo);
|
2015-11-24 14:07:49 +08:00
|
|
|
|
2016-04-16 15:02:16 +08:00
|
|
|
computeImportForFunction(*Summary, Index, Threshold, DefinedGVSummaries,
|
Restore "[ThinLTO] Ensure we always select the same function copy to import"
This reverts commit r337081, therefore restoring r337050 (and fix in
r337059), with test fix for bot failure described after the original
description below.
In order to always import the same copy of a linkonce function,
even when encountering it with different thresholds (a higher one then a
lower one), keep track of the summary we decided to import.
This ensures that the backend only gets a single definition to import
for each GUID, so that it doesn't need to choose one.
Move the largest threshold the GUID was considered for import into the
current module out of the ImportMap (which is part of a larger map
maintained across the whole index), and into a new map just maintained
for the current module we are computing imports for. This saves some
memory since we no longer have the thresholds maintained across the
whole index (and throughout the in-process backends when doing a normal
non-distributed ThinLTO build), at the cost of some additional
information being maintained for each invocation of ComputeImportForModule
(the selected summary pointer for each import).
There is an additional map lookup for each callee being considered for
importing, however, this was able to subsume a map lookup in the
Worklist iteration that invokes computeImportForFunction. We also are
able to avoid calling selectCallee if we already failed to import at the
same or higher threshold.
I compared the run time and peak memory for the SPEC2006 471.omnetpp
benchmark (running in-process ThinLTO backends), as well as for a large
internal benchmark with a distributed ThinLTO build (so just looking at
the thin link time/memory). Across a number of runs with and without
this change there was no significant change in the time and memory.
(I tried a few other variations of the change but they also didn't
improve time or peak memory).
The new commit removes a test that no longer makes sense
(Transforms/FunctionImport/hotness_based_import2.ll), as exposed by the
reverse-iteration bot. The test depends on the order of processing the
summary call edges, and actually depended on the old problematic
behavior of selecting more than one summary for a given GUID when
encountered with different thresholds. There was no guarantee even
before that we would eventually pick the linkonce copy with the hottest
call edges, it just happened to work with the test and the old code, and
there was no guarantee that we would end up importing the selected
version of the copy that had the hottest call edges (since the backend
would effectively import only one of the selected copies).
Reviewers: davidxl
Subscribers: mehdi_amini, inglorion, llvm-commits
Differential Revision: https://reviews.llvm.org/D48670
llvm-svn: 337184
2018-07-16 23:30:27 +08:00
|
|
|
Worklist, ImportList, ExportLists,
|
|
|
|
ImportThresholds);
|
2016-03-26 13:40:34 +08:00
|
|
|
}
|
2018-08-18 00:53:47 +08:00
|
|
|
|
|
|
|
// Print stats about functions considered but rejected for importing
|
|
|
|
// when requested.
|
|
|
|
if (PrintImportFailures) {
|
|
|
|
dbgs() << "Missed imports into module " << ModName << "\n";
|
|
|
|
for (auto &I : ImportThresholds) {
|
|
|
|
auto &ProcessedThreshold = std::get<0>(I.second);
|
|
|
|
auto &CalleeSummary = std::get<1>(I.second);
|
|
|
|
auto &FailureInfo = std::get<2>(I.second);
|
|
|
|
if (CalleeSummary)
|
|
|
|
continue; // We are going to import.
|
|
|
|
assert(FailureInfo);
|
|
|
|
FunctionSummary *FS = nullptr;
|
|
|
|
if (!FailureInfo->VI.getSummaryList().empty())
|
|
|
|
FS = dyn_cast<FunctionSummary>(
|
|
|
|
FailureInfo->VI.getSummaryList()[0]->getBaseObject());
|
|
|
|
dbgs() << FailureInfo->VI
|
|
|
|
<< ": Reason = " << getFailureName(FailureInfo->Reason)
|
|
|
|
<< ", Threshold = " << ProcessedThreshold
|
|
|
|
<< ", Size = " << (FS ? (int)FS->instCount() : -1)
|
|
|
|
<< ", MaxHotness = " << getHotnessName(FailureInfo->MaxHotness)
|
|
|
|
<< ", Attempts = " << FailureInfo->Attempts << "\n";
|
|
|
|
}
|
|
|
|
}
|
2016-03-26 13:40:34 +08:00
|
|
|
}
|
2015-12-09 16:17:35 +08:00
|
|
|
|
2018-03-12 18:30:50 +08:00
|
|
|
#ifndef NDEBUG
|
2019-11-19 17:46:09 +08:00
|
|
|
static bool isGlobalVarSummary(const ModuleSummaryIndex &Index, ValueInfo VI) {
|
|
|
|
auto SL = VI.getSummaryList();
|
|
|
|
return SL.empty()
|
|
|
|
? false
|
|
|
|
: SL[0]->getSummaryKind() == GlobalValueSummary::GlobalVarKind;
|
|
|
|
}
|
|
|
|
|
2018-03-12 18:30:50 +08:00
|
|
|
static bool isGlobalVarSummary(const ModuleSummaryIndex &Index,
|
|
|
|
GlobalValue::GUID G) {
|
2019-11-19 17:46:09 +08:00
|
|
|
if (const auto &VI = Index.getValueInfo(G))
|
|
|
|
return isGlobalVarSummary(Index, VI);
|
2018-03-12 18:30:50 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <class T>
|
2018-04-04 19:45:11 +08:00
|
|
|
static unsigned numGlobalVarSummaries(const ModuleSummaryIndex &Index,
|
|
|
|
T &Cont) {
|
2018-03-12 18:30:50 +08:00
|
|
|
unsigned NumGVS = 0;
|
|
|
|
for (auto &V : Cont)
|
2019-11-19 17:46:09 +08:00
|
|
|
if (isGlobalVarSummary(Index, V))
|
2018-03-12 18:30:50 +08:00
|
|
|
++NumGVS;
|
|
|
|
return NumGVS;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-11-16 02:00:23 +08:00
|
|
|
#ifndef NDEBUG
|
2019-11-15 21:13:19 +08:00
|
|
|
static bool
|
|
|
|
checkVariableImport(const ModuleSummaryIndex &Index,
|
|
|
|
StringMap<FunctionImporter::ImportMapTy> &ImportLists,
|
|
|
|
StringMap<FunctionImporter::ExportSetTy> &ExportLists) {
|
|
|
|
|
|
|
|
DenseSet<GlobalValue::GUID> FlattenedImports;
|
|
|
|
|
|
|
|
for (auto &ImportPerModule : ImportLists)
|
|
|
|
for (auto &ExportPerModule : ImportPerModule.second)
|
|
|
|
FlattenedImports.insert(ExportPerModule.second.begin(),
|
|
|
|
ExportPerModule.second.end());
|
|
|
|
|
|
|
|
// Checks that all GUIDs of read/writeonly vars we see in export lists
|
|
|
|
// are also in the import lists. Otherwise we my face linker undefs,
|
|
|
|
// because readonly and writeonly vars are internalized in their
|
|
|
|
// source modules.
|
|
|
|
auto IsReadOrWriteOnlyVar = [&](StringRef ModulePath, const ValueInfo &VI) {
|
|
|
|
auto *GVS = dyn_cast_or_null<GlobalVarSummary>(
|
|
|
|
Index.findSummaryInModule(VI, ModulePath));
|
|
|
|
return GVS && (Index.isReadOnly(GVS) || Index.isWriteOnly(GVS));
|
|
|
|
};
|
|
|
|
|
|
|
|
for (auto &ExportPerModule : ExportLists)
|
|
|
|
for (auto &VI : ExportPerModule.second)
|
|
|
|
if (!FlattenedImports.count(VI.getGUID()) &&
|
|
|
|
IsReadOrWriteOnlyVar(ExportPerModule.first(), VI))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2019-11-16 02:00:23 +08:00
|
|
|
#endif
|
2019-11-15 21:13:19 +08:00
|
|
|
|
2016-04-13 05:13:11 +08:00
|
|
|
/// Compute all the import and export for every module using the Index.
|
2016-03-26 13:40:34 +08:00
|
|
|
void llvm::ComputeCrossModuleImport(
|
|
|
|
const ModuleSummaryIndex &Index,
|
2016-04-26 05:09:51 +08:00
|
|
|
const StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries,
|
2016-03-26 13:40:34 +08:00
|
|
|
StringMap<FunctionImporter::ImportMapTy> &ImportLists,
|
2017-06-02 04:30:06 +08:00
|
|
|
StringMap<FunctionImporter::ExportSetTy> &ExportLists) {
|
2016-03-26 13:40:34 +08:00
|
|
|
// For each module that has function defined, compute the import/export lists.
|
2016-04-16 15:02:16 +08:00
|
|
|
for (auto &DefinedGVSummaries : ModuleToDefinedGVSummaries) {
|
2016-08-16 13:47:12 +08:00
|
|
|
auto &ImportList = ImportLists[DefinedGVSummaries.first()];
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Computing import for Module '"
|
|
|
|
<< DefinedGVSummaries.first() << "'\n");
|
2018-08-18 00:53:47 +08:00
|
|
|
ComputeImportForModule(DefinedGVSummaries.second, Index,
|
|
|
|
DefinedGVSummaries.first(), ImportList,
|
2017-06-02 04:30:06 +08:00
|
|
|
&ExportLists);
|
2016-03-26 13:40:34 +08:00
|
|
|
}
|
2015-11-24 14:07:49 +08:00
|
|
|
|
2016-12-16 12:11:51 +08:00
|
|
|
// When computing imports we added all GUIDs referenced by anything
|
|
|
|
// imported from the module to its ExportList. Now we prune each ExportList
|
|
|
|
// of any not defined in that module. This is more efficient than checking
|
|
|
|
// while computing imports because some of the summary lists may be long
|
|
|
|
// due to linkonce (comdat) copies.
|
|
|
|
for (auto &ELI : ExportLists) {
|
|
|
|
const auto &DefinedGVSummaries =
|
|
|
|
ModuleToDefinedGVSummaries.lookup(ELI.first());
|
2019-11-14 23:07:13 +08:00
|
|
|
for (auto EI = ELI.second.begin(); EI != ELI.second.end();) {
|
2019-11-15 21:13:19 +08:00
|
|
|
if (!DefinedGVSummaries.count(EI->getGUID()))
|
|
|
|
ELI.second.erase(EI++);
|
2019-11-14 23:07:13 +08:00
|
|
|
else
|
|
|
|
++EI;
|
|
|
|
}
|
2016-12-16 12:11:51 +08:00
|
|
|
}
|
|
|
|
|
2019-11-15 21:13:19 +08:00
|
|
|
assert(checkVariableImport(Index, ImportLists, ExportLists));
|
2016-03-26 13:40:34 +08:00
|
|
|
#ifndef NDEBUG
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Import/Export lists for " << ImportLists.size()
|
|
|
|
<< " modules:\n");
|
2016-03-26 13:40:34 +08:00
|
|
|
for (auto &ModuleImports : ImportLists) {
|
|
|
|
auto ModName = ModuleImports.first();
|
|
|
|
auto &Exports = ExportLists[ModName];
|
2018-03-12 18:30:50 +08:00
|
|
|
unsigned NumGVS = numGlobalVarSummaries(Index, Exports);
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "* Module " << ModName << " exports "
|
|
|
|
<< Exports.size() - NumGVS << " functions and " << NumGVS
|
|
|
|
<< " vars. Imports from " << ModuleImports.second.size()
|
|
|
|
<< " modules.\n");
|
2016-03-26 13:40:34 +08:00
|
|
|
for (auto &Src : ModuleImports.second) {
|
|
|
|
auto SrcModName = Src.first();
|
2018-03-12 18:30:50 +08:00
|
|
|
unsigned NumGVSPerMod = numGlobalVarSummaries(Index, Src.second);
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << " - " << Src.second.size() - NumGVSPerMod
|
|
|
|
<< " functions imported from " << SrcModName << "\n");
|
|
|
|
LLVM_DEBUG(dbgs() << " - " << NumGVSPerMod
|
|
|
|
<< " global vars imported from " << SrcModName << "\n");
|
2016-03-26 13:40:34 +08:00
|
|
|
}
|
2015-12-03 10:37:33 +08:00
|
|
|
}
|
2016-03-26 13:40:34 +08:00
|
|
|
#endif
|
2015-12-03 10:37:33 +08:00
|
|
|
}
|
|
|
|
|
2017-12-16 08:18:12 +08:00
|
|
|
#ifndef NDEBUG
|
2018-03-12 18:30:50 +08:00
|
|
|
static void dumpImportListForModule(const ModuleSummaryIndex &Index,
|
|
|
|
StringRef ModulePath,
|
2017-12-16 08:18:12 +08:00
|
|
|
FunctionImporter::ImportMapTy &ImportList) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "* Module " << ModulePath << " imports from "
|
|
|
|
<< ImportList.size() << " modules.\n");
|
2017-12-16 08:18:12 +08:00
|
|
|
for (auto &Src : ImportList) {
|
|
|
|
auto SrcModName = Src.first();
|
2018-03-12 18:30:50 +08:00
|
|
|
unsigned NumGVSPerMod = numGlobalVarSummaries(Index, Src.second);
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << " - " << Src.second.size() - NumGVSPerMod
|
|
|
|
<< " functions imported from " << SrcModName << "\n");
|
|
|
|
LLVM_DEBUG(dbgs() << " - " << NumGVSPerMod << " vars imported from "
|
|
|
|
<< SrcModName << "\n");
|
2017-12-16 08:18:12 +08:00
|
|
|
}
|
|
|
|
}
|
2017-12-16 08:29:31 +08:00
|
|
|
#endif
|
2017-12-16 08:18:12 +08:00
|
|
|
|
2016-04-13 05:13:11 +08:00
|
|
|
/// Compute all the imports for the given module in the Index.
|
|
|
|
void llvm::ComputeCrossModuleImportForModule(
|
|
|
|
StringRef ModulePath, const ModuleSummaryIndex &Index,
|
|
|
|
FunctionImporter::ImportMapTy &ImportList) {
|
|
|
|
// Collect the list of functions this module defines.
|
|
|
|
// GUID -> Summary
|
2016-04-26 05:09:51 +08:00
|
|
|
GVSummaryMapTy FunctionSummaryMap;
|
2016-04-24 22:57:11 +08:00
|
|
|
Index.collectDefinedFunctionsForModule(ModulePath, FunctionSummaryMap);
|
2016-04-13 05:13:11 +08:00
|
|
|
|
|
|
|
// Compute the import list for this module.
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Computing import for Module '" << ModulePath << "'\n");
|
2018-08-18 00:53:47 +08:00
|
|
|
ComputeImportForModule(FunctionSummaryMap, Index, ModulePath, ImportList);
|
2016-04-13 05:13:11 +08:00
|
|
|
|
|
|
|
#ifndef NDEBUG
|
2018-03-12 18:30:50 +08:00
|
|
|
dumpImportListForModule(Index, ModulePath, ImportList);
|
2017-12-16 08:18:12 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mark all external summaries in Index for import into the given module.
|
|
|
|
// Used for distributed builds using a distributed index.
|
|
|
|
void llvm::ComputeCrossModuleImportForModuleFromIndex(
|
|
|
|
StringRef ModulePath, const ModuleSummaryIndex &Index,
|
|
|
|
FunctionImporter::ImportMapTy &ImportList) {
|
|
|
|
for (auto &GlobalList : Index) {
|
|
|
|
// Ignore entries for undefined references.
|
|
|
|
if (GlobalList.second.SummaryList.empty())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
auto GUID = GlobalList.first;
|
|
|
|
assert(GlobalList.second.SummaryList.size() == 1 &&
|
|
|
|
"Expected individual combined index to have one summary per GUID");
|
|
|
|
auto &Summary = GlobalList.second.SummaryList[0];
|
|
|
|
// Skip the summaries for the importing module. These are included to
|
|
|
|
// e.g. record required linkage changes.
|
|
|
|
if (Summary->modulePath() == ModulePath)
|
|
|
|
continue;
|
Restore "[ThinLTO] Ensure we always select the same function copy to import"
This reverts commit r337081, therefore restoring r337050 (and fix in
r337059), with test fix for bot failure described after the original
description below.
In order to always import the same copy of a linkonce function,
even when encountering it with different thresholds (a higher one then a
lower one), keep track of the summary we decided to import.
This ensures that the backend only gets a single definition to import
for each GUID, so that it doesn't need to choose one.
Move the largest threshold the GUID was considered for import into the
current module out of the ImportMap (which is part of a larger map
maintained across the whole index), and into a new map just maintained
for the current module we are computing imports for. This saves some
memory since we no longer have the thresholds maintained across the
whole index (and throughout the in-process backends when doing a normal
non-distributed ThinLTO build), at the cost of some additional
information being maintained for each invocation of ComputeImportForModule
(the selected summary pointer for each import).
There is an additional map lookup for each callee being considered for
importing, however, this was able to subsume a map lookup in the
Worklist iteration that invokes computeImportForFunction. We also are
able to avoid calling selectCallee if we already failed to import at the
same or higher threshold.
I compared the run time and peak memory for the SPEC2006 471.omnetpp
benchmark (running in-process ThinLTO backends), as well as for a large
internal benchmark with a distributed ThinLTO build (so just looking at
the thin link time/memory). Across a number of runs with and without
this change there was no significant change in the time and memory.
(I tried a few other variations of the change but they also didn't
improve time or peak memory).
The new commit removes a test that no longer makes sense
(Transforms/FunctionImport/hotness_based_import2.ll), as exposed by the
reverse-iteration bot. The test depends on the order of processing the
summary call edges, and actually depended on the old problematic
behavior of selecting more than one summary for a given GUID when
encountered with different thresholds. There was no guarantee even
before that we would eventually pick the linkonce copy with the hottest
call edges, it just happened to work with the test and the old code, and
there was no guarantee that we would end up importing the selected
version of the copy that had the hottest call edges (since the backend
would effectively import only one of the selected copies).
Reviewers: davidxl
Subscribers: mehdi_amini, inglorion, llvm-commits
Differential Revision: https://reviews.llvm.org/D48670
llvm-svn: 337184
2018-07-16 23:30:27 +08:00
|
|
|
// Add an entry to provoke importing by thinBackend.
|
|
|
|
ImportList[Summary->modulePath()].insert(GUID);
|
2016-04-13 05:13:11 +08:00
|
|
|
}
|
2017-12-16 08:18:12 +08:00
|
|
|
#ifndef NDEBUG
|
2018-03-12 18:30:50 +08:00
|
|
|
dumpImportListForModule(Index, ModulePath, ImportList);
|
2016-04-13 05:13:11 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2017-06-02 04:30:06 +08:00
|
|
|
void llvm::computeDeadSymbols(
|
|
|
|
ModuleSummaryIndex &Index,
|
2018-01-29 16:03:30 +08:00
|
|
|
const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols,
|
|
|
|
function_ref<PrevailingType(GlobalValue::GUID)> isPrevailing) {
|
2017-06-02 04:30:06 +08:00
|
|
|
assert(!Index.withGlobalValueDeadStripping());
|
2017-01-06 05:34:18 +08:00
|
|
|
if (!ComputeDead)
|
2017-06-02 04:30:06 +08:00
|
|
|
return;
|
2017-01-06 05:34:18 +08:00
|
|
|
if (GUIDPreservedSymbols.empty())
|
|
|
|
// Don't do anything when nothing is live, this is friendly with tests.
|
2017-06-02 04:30:06 +08:00
|
|
|
return;
|
|
|
|
unsigned LiveSymbols = 0;
|
2017-05-05 02:03:25 +08:00
|
|
|
SmallVector<ValueInfo, 128> Worklist;
|
|
|
|
Worklist.reserve(GUIDPreservedSymbols.size() * 2);
|
|
|
|
for (auto GUID : GUIDPreservedSymbols) {
|
|
|
|
ValueInfo VI = Index.getValueInfo(GUID);
|
|
|
|
if (!VI)
|
|
|
|
continue;
|
2017-06-02 04:30:06 +08:00
|
|
|
for (auto &S : VI.getSummaryList())
|
|
|
|
S->setLive(true);
|
2017-01-06 05:34:18 +08:00
|
|
|
}
|
2017-06-02 04:30:06 +08:00
|
|
|
|
2017-01-06 05:34:18 +08:00
|
|
|
// Add values flagged in the index as live roots to the worklist.
|
2018-06-28 02:03:39 +08:00
|
|
|
for (const auto &Entry : Index) {
|
|
|
|
auto VI = Index.getValueInfo(Entry);
|
2017-06-02 04:30:06 +08:00
|
|
|
for (auto &S : Entry.second.SummaryList)
|
|
|
|
if (S->isLive()) {
|
2018-06-28 02:03:39 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Live root: " << VI << "\n");
|
|
|
|
Worklist.push_back(VI);
|
2017-06-02 04:30:06 +08:00
|
|
|
++LiveSymbols;
|
|
|
|
break;
|
|
|
|
}
|
2018-06-28 02:03:39 +08:00
|
|
|
}
|
2017-06-02 04:30:06 +08:00
|
|
|
|
|
|
|
// Make value live and add it to the worklist if it was not live before.
|
[ThinLTO] Fix handling of weak interposable symbols
Summary:
Keep aliasees alive if their alias is live, otherwise we end up with an
alias to a declaration, which is invalid. This can happen when the
aliasee is weak and non-prevailing.
This fix exposed the fact that we were then attempting to internalize
the weak symbol, which was not exported as it was not prevailing. We
should not internalize interposable symbols in general, unless this is
the prevailing copy, since it can lead to incorrect inlining and other
optimizations. Most of the changes in this patch are due to the
restructuring required to pass down the prevailing callback.
Finally, while implementing the test cases, I found that in the case of
a weak aliasee that is still marked not live because its alias isn't
live, after dropping the definition we incorrectly marked the
declaration with weak linkage when resolving prevailing symbols in the
module. This was due to some special case handling for symbols marked
WeakLinkage in the summary located before instead of after a subsequent
check for the symbol being a declaration. It turns out that we don't
actually need this special case handling any more (looking back at the
history, when that was added the code was structured quite differently)
- we will correctly mark with weak linkage further below when the
definition hasn't been dropped.
Fixes PR42542.
Reviewers: pcc
Subscribers: mehdi_amini, inglorion, steven_wu, dexonsmith, dang, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D66264
llvm-svn: 369766
2019-08-23 23:18:58 +08:00
|
|
|
auto visit = [&](ValueInfo VI, bool IsAliasee) {
|
2017-09-13 23:16:38 +08:00
|
|
|
// FIXME: If we knew which edges were created for indirect call profiles,
|
|
|
|
// we could skip them here. Any that are live should be reached via
|
|
|
|
// other edges, e.g. reference edges. Otherwise, using a profile collected
|
|
|
|
// on a slightly different binary might provoke preserving, importing
|
|
|
|
// and ultimately promoting calls to functions not linked into this
|
|
|
|
// binary, which increases the binary size unnecessarily. Note that
|
|
|
|
// if this code changes, the importer needs to change so that edges
|
|
|
|
// to functions marked dead are skipped.
|
|
|
|
VI = updateValueInfoForIndirectCalls(Index, VI);
|
|
|
|
if (!VI)
|
|
|
|
return;
|
2019-01-03 07:18:20 +08:00
|
|
|
|
[ThinLTO] Refine reachability check to fix compile time increase
Summary:
A recent fix to the ThinLTO whole program dead code elimination (D56117)
increased the thin link time on a large MSAN'ed binary by 2x.
It's likely that the time increased elsewhere, but was more noticeable
here since it was already large and ended up timing out.
That change made it so we would repeatedly scan all copies of linkonce
symbols for liveness every time they were encountered during the graph
traversal. This was needed since we only mark one copy of an aliasee as
live when we encounter a live alias. This patch fixes the issue in a
more efficient manner by simply proactively visiting the aliasee (thus
marking all copies live) when we encounter a live alias.
Two notes: One, this requires a hash table lookup (finding the aliasee
summary in the index based on aliasee GUID). However, the impact of this
seems to be small compared to the original pre-D56117 thin link time. It
could be addressed if we keep the aliasee ValueInfo in the alias summary
instead of the aliasee GUID, which I am exploring in a separate patch.
Second, we only populate the aliasee GUID field when reading summaries
from bitcode (whether we are reading individual summaries and merging on
the fly to form the compiled index, or reading in a serialized combined
index). Thankfully, that's currently the only way we can get to this
code as we don't yet support reading summaries from LLVM assembly
directly into a tool that performs the thin link (they must be converted
to bitcode first). I added a FIXME, however I have the fix under test
already. The easiest fix is to simply populate this field always, which
isn't hard, but more likely the change I am exploring to store the
ValueInfo instead as described above will subsume this. I don't want to
hold up the regression fix for this though.
Reviewers: trentxintong
Subscribers: mehdi_amini, inglorion, dexonsmith, llvm-commits
Differential Revision: https://reviews.llvm.org/D57203
llvm-svn: 352438
2019-01-29 06:27:05 +08:00
|
|
|
if (llvm::any_of(VI.getSummaryList(),
|
2019-01-03 07:18:20 +08:00
|
|
|
[](const std::unique_ptr<llvm::GlobalValueSummary> &S) {
|
|
|
|
return S->isLive();
|
|
|
|
}))
|
|
|
|
return;
|
2018-01-29 16:03:30 +08:00
|
|
|
|
2018-03-13 13:08:48 +08:00
|
|
|
// We only keep live symbols that are known to be non-prevailing if any are
|
2018-10-08 23:12:48 +08:00
|
|
|
// available_externally, linkonceodr, weakodr. Those symbols are discarded
|
|
|
|
// later in the EliminateAvailableExternally pass and setting them to
|
|
|
|
// not-live could break downstreams users of liveness information (PR36483)
|
|
|
|
// or limit optimization opportunities.
|
2018-03-13 13:08:48 +08:00
|
|
|
if (isPrevailing(VI.getGUID()) == PrevailingType::No) {
|
2018-10-08 23:12:48 +08:00
|
|
|
bool KeepAliveLinkage = false;
|
2018-03-13 13:08:48 +08:00
|
|
|
bool Interposable = false;
|
|
|
|
for (auto &S : VI.getSummaryList()) {
|
2018-10-08 23:12:48 +08:00
|
|
|
if (S->linkage() == GlobalValue::AvailableExternallyLinkage ||
|
|
|
|
S->linkage() == GlobalValue::WeakODRLinkage ||
|
|
|
|
S->linkage() == GlobalValue::LinkOnceODRLinkage)
|
|
|
|
KeepAliveLinkage = true;
|
2018-03-13 13:08:48 +08:00
|
|
|
else if (GlobalValue::isInterposableLinkage(S->linkage()))
|
|
|
|
Interposable = true;
|
|
|
|
}
|
|
|
|
|
[ThinLTO] Fix handling of weak interposable symbols
Summary:
Keep aliasees alive if their alias is live, otherwise we end up with an
alias to a declaration, which is invalid. This can happen when the
aliasee is weak and non-prevailing.
This fix exposed the fact that we were then attempting to internalize
the weak symbol, which was not exported as it was not prevailing. We
should not internalize interposable symbols in general, unless this is
the prevailing copy, since it can lead to incorrect inlining and other
optimizations. Most of the changes in this patch are due to the
restructuring required to pass down the prevailing callback.
Finally, while implementing the test cases, I found that in the case of
a weak aliasee that is still marked not live because its alias isn't
live, after dropping the definition we incorrectly marked the
declaration with weak linkage when resolving prevailing symbols in the
module. This was due to some special case handling for symbols marked
WeakLinkage in the summary located before instead of after a subsequent
check for the symbol being a declaration. It turns out that we don't
actually need this special case handling any more (looking back at the
history, when that was added the code was structured quite differently)
- we will correctly mark with weak linkage further below when the
definition hasn't been dropped.
Fixes PR42542.
Reviewers: pcc
Subscribers: mehdi_amini, inglorion, steven_wu, dexonsmith, dang, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D66264
llvm-svn: 369766
2019-08-23 23:18:58 +08:00
|
|
|
if (!IsAliasee) {
|
|
|
|
if (!KeepAliveLinkage)
|
|
|
|
return;
|
2018-03-13 13:08:48 +08:00
|
|
|
|
[ThinLTO] Fix handling of weak interposable symbols
Summary:
Keep aliasees alive if their alias is live, otherwise we end up with an
alias to a declaration, which is invalid. This can happen when the
aliasee is weak and non-prevailing.
This fix exposed the fact that we were then attempting to internalize
the weak symbol, which was not exported as it was not prevailing. We
should not internalize interposable symbols in general, unless this is
the prevailing copy, since it can lead to incorrect inlining and other
optimizations. Most of the changes in this patch are due to the
restructuring required to pass down the prevailing callback.
Finally, while implementing the test cases, I found that in the case of
a weak aliasee that is still marked not live because its alias isn't
live, after dropping the definition we incorrectly marked the
declaration with weak linkage when resolving prevailing symbols in the
module. This was due to some special case handling for symbols marked
WeakLinkage in the summary located before instead of after a subsequent
check for the symbol being a declaration. It turns out that we don't
actually need this special case handling any more (looking back at the
history, when that was added the code was structured quite differently)
- we will correctly mark with weak linkage further below when the
definition hasn't been dropped.
Fixes PR42542.
Reviewers: pcc
Subscribers: mehdi_amini, inglorion, steven_wu, dexonsmith, dang, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D66264
llvm-svn: 369766
2019-08-23 23:18:58 +08:00
|
|
|
if (Interposable)
|
|
|
|
report_fatal_error(
|
|
|
|
"Interposable and available_externally/linkonce_odr/weak_odr "
|
|
|
|
"symbol");
|
|
|
|
}
|
2018-03-13 13:08:48 +08:00
|
|
|
}
|
2018-01-29 16:03:30 +08:00
|
|
|
|
2017-06-02 04:30:06 +08:00
|
|
|
for (auto &S : VI.getSummaryList())
|
|
|
|
S->setLive(true);
|
|
|
|
++LiveSymbols;
|
|
|
|
Worklist.push_back(VI);
|
|
|
|
};
|
2017-01-06 05:34:18 +08:00
|
|
|
|
|
|
|
while (!Worklist.empty()) {
|
2017-05-05 02:03:25 +08:00
|
|
|
auto VI = Worklist.pop_back_val();
|
|
|
|
for (auto &Summary : VI.getSummaryList()) {
|
[ThinLTO] Refine reachability check to fix compile time increase
Summary:
A recent fix to the ThinLTO whole program dead code elimination (D56117)
increased the thin link time on a large MSAN'ed binary by 2x.
It's likely that the time increased elsewhere, but was more noticeable
here since it was already large and ended up timing out.
That change made it so we would repeatedly scan all copies of linkonce
symbols for liveness every time they were encountered during the graph
traversal. This was needed since we only mark one copy of an aliasee as
live when we encounter a live alias. This patch fixes the issue in a
more efficient manner by simply proactively visiting the aliasee (thus
marking all copies live) when we encounter a live alias.
Two notes: One, this requires a hash table lookup (finding the aliasee
summary in the index based on aliasee GUID). However, the impact of this
seems to be small compared to the original pre-D56117 thin link time. It
could be addressed if we keep the aliasee ValueInfo in the alias summary
instead of the aliasee GUID, which I am exploring in a separate patch.
Second, we only populate the aliasee GUID field when reading summaries
from bitcode (whether we are reading individual summaries and merging on
the fly to form the compiled index, or reading in a serialized combined
index). Thankfully, that's currently the only way we can get to this
code as we don't yet support reading summaries from LLVM assembly
directly into a tool that performs the thin link (they must be converted
to bitcode first). I added a FIXME, however I have the fix under test
already. The easiest fix is to simply populate this field always, which
isn't hard, but more likely the change I am exploring to store the
ValueInfo instead as described above will subsume this. I don't want to
hold up the regression fix for this though.
Reviewers: trentxintong
Subscribers: mehdi_amini, inglorion, dexonsmith, llvm-commits
Differential Revision: https://reviews.llvm.org/D57203
llvm-svn: 352438
2019-01-29 06:27:05 +08:00
|
|
|
if (auto *AS = dyn_cast<AliasSummary>(Summary.get())) {
|
|
|
|
// If this is an alias, visit the aliasee VI to ensure that all copies
|
|
|
|
// are marked live and it is added to the worklist for further
|
|
|
|
// processing of its references.
|
[ThinLTO] Fix handling of weak interposable symbols
Summary:
Keep aliasees alive if their alias is live, otherwise we end up with an
alias to a declaration, which is invalid. This can happen when the
aliasee is weak and non-prevailing.
This fix exposed the fact that we were then attempting to internalize
the weak symbol, which was not exported as it was not prevailing. We
should not internalize interposable symbols in general, unless this is
the prevailing copy, since it can lead to incorrect inlining and other
optimizations. Most of the changes in this patch are due to the
restructuring required to pass down the prevailing callback.
Finally, while implementing the test cases, I found that in the case of
a weak aliasee that is still marked not live because its alias isn't
live, after dropping the definition we incorrectly marked the
declaration with weak linkage when resolving prevailing symbols in the
module. This was due to some special case handling for symbols marked
WeakLinkage in the summary located before instead of after a subsequent
check for the symbol being a declaration. It turns out that we don't
actually need this special case handling any more (looking back at the
history, when that was added the code was structured quite differently)
- we will correctly mark with weak linkage further below when the
definition hasn't been dropped.
Fixes PR42542.
Reviewers: pcc
Subscribers: mehdi_amini, inglorion, steven_wu, dexonsmith, dang, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D66264
llvm-svn: 369766
2019-08-23 23:18:58 +08:00
|
|
|
visit(AS->getAliaseeVI(), true);
|
[ThinLTO] Refine reachability check to fix compile time increase
Summary:
A recent fix to the ThinLTO whole program dead code elimination (D56117)
increased the thin link time on a large MSAN'ed binary by 2x.
It's likely that the time increased elsewhere, but was more noticeable
here since it was already large and ended up timing out.
That change made it so we would repeatedly scan all copies of linkonce
symbols for liveness every time they were encountered during the graph
traversal. This was needed since we only mark one copy of an aliasee as
live when we encounter a live alias. This patch fixes the issue in a
more efficient manner by simply proactively visiting the aliasee (thus
marking all copies live) when we encounter a live alias.
Two notes: One, this requires a hash table lookup (finding the aliasee
summary in the index based on aliasee GUID). However, the impact of this
seems to be small compared to the original pre-D56117 thin link time. It
could be addressed if we keep the aliasee ValueInfo in the alias summary
instead of the aliasee GUID, which I am exploring in a separate patch.
Second, we only populate the aliasee GUID field when reading summaries
from bitcode (whether we are reading individual summaries and merging on
the fly to form the compiled index, or reading in a serialized combined
index). Thankfully, that's currently the only way we can get to this
code as we don't yet support reading summaries from LLVM assembly
directly into a tool that performs the thin link (they must be converted
to bitcode first). I added a FIXME, however I have the fix under test
already. The easiest fix is to simply populate this field always, which
isn't hard, but more likely the change I am exploring to store the
ValueInfo instead as described above will subsume this. I don't want to
hold up the regression fix for this though.
Reviewers: trentxintong
Subscribers: mehdi_amini, inglorion, dexonsmith, llvm-commits
Differential Revision: https://reviews.llvm.org/D57203
llvm-svn: 352438
2019-01-29 06:27:05 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
Summary->setLive(true);
|
|
|
|
for (auto Ref : Summary->refs())
|
[ThinLTO] Fix handling of weak interposable symbols
Summary:
Keep aliasees alive if their alias is live, otherwise we end up with an
alias to a declaration, which is invalid. This can happen when the
aliasee is weak and non-prevailing.
This fix exposed the fact that we were then attempting to internalize
the weak symbol, which was not exported as it was not prevailing. We
should not internalize interposable symbols in general, unless this is
the prevailing copy, since it can lead to incorrect inlining and other
optimizations. Most of the changes in this patch are due to the
restructuring required to pass down the prevailing callback.
Finally, while implementing the test cases, I found that in the case of
a weak aliasee that is still marked not live because its alias isn't
live, after dropping the definition we incorrectly marked the
declaration with weak linkage when resolving prevailing symbols in the
module. This was due to some special case handling for symbols marked
WeakLinkage in the summary located before instead of after a subsequent
check for the symbol being a declaration. It turns out that we don't
actually need this special case handling any more (looking back at the
history, when that was added the code was structured quite differently)
- we will correctly mark with weak linkage further below when the
definition hasn't been dropped.
Fixes PR42542.
Reviewers: pcc
Subscribers: mehdi_amini, inglorion, steven_wu, dexonsmith, dang, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D66264
llvm-svn: 369766
2019-08-23 23:18:58 +08:00
|
|
|
visit(Ref, false);
|
[ThinLTO] Refine reachability check to fix compile time increase
Summary:
A recent fix to the ThinLTO whole program dead code elimination (D56117)
increased the thin link time on a large MSAN'ed binary by 2x.
It's likely that the time increased elsewhere, but was more noticeable
here since it was already large and ended up timing out.
That change made it so we would repeatedly scan all copies of linkonce
symbols for liveness every time they were encountered during the graph
traversal. This was needed since we only mark one copy of an aliasee as
live when we encounter a live alias. This patch fixes the issue in a
more efficient manner by simply proactively visiting the aliasee (thus
marking all copies live) when we encounter a live alias.
Two notes: One, this requires a hash table lookup (finding the aliasee
summary in the index based on aliasee GUID). However, the impact of this
seems to be small compared to the original pre-D56117 thin link time. It
could be addressed if we keep the aliasee ValueInfo in the alias summary
instead of the aliasee GUID, which I am exploring in a separate patch.
Second, we only populate the aliasee GUID field when reading summaries
from bitcode (whether we are reading individual summaries and merging on
the fly to form the compiled index, or reading in a serialized combined
index). Thankfully, that's currently the only way we can get to this
code as we don't yet support reading summaries from LLVM assembly
directly into a tool that performs the thin link (they must be converted
to bitcode first). I added a FIXME, however I have the fix under test
already. The easiest fix is to simply populate this field always, which
isn't hard, but more likely the change I am exploring to store the
ValueInfo instead as described above will subsume this. I don't want to
hold up the regression fix for this though.
Reviewers: trentxintong
Subscribers: mehdi_amini, inglorion, dexonsmith, llvm-commits
Differential Revision: https://reviews.llvm.org/D57203
llvm-svn: 352438
2019-01-29 06:27:05 +08:00
|
|
|
if (auto *FS = dyn_cast<FunctionSummary>(Summary.get()))
|
2017-06-02 04:30:06 +08:00
|
|
|
for (auto Call : FS->calls())
|
[ThinLTO] Fix handling of weak interposable symbols
Summary:
Keep aliasees alive if their alias is live, otherwise we end up with an
alias to a declaration, which is invalid. This can happen when the
aliasee is weak and non-prevailing.
This fix exposed the fact that we were then attempting to internalize
the weak symbol, which was not exported as it was not prevailing. We
should not internalize interposable symbols in general, unless this is
the prevailing copy, since it can lead to incorrect inlining and other
optimizations. Most of the changes in this patch are due to the
restructuring required to pass down the prevailing callback.
Finally, while implementing the test cases, I found that in the case of
a weak aliasee that is still marked not live because its alias isn't
live, after dropping the definition we incorrectly marked the
declaration with weak linkage when resolving prevailing symbols in the
module. This was due to some special case handling for symbols marked
WeakLinkage in the summary located before instead of after a subsequent
check for the symbol being a declaration. It turns out that we don't
actually need this special case handling any more (looking back at the
history, when that was added the code was structured quite differently)
- we will correctly mark with weak linkage further below when the
definition hasn't been dropped.
Fixes PR42542.
Reviewers: pcc
Subscribers: mehdi_amini, inglorion, steven_wu, dexonsmith, dang, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D66264
llvm-svn: 369766
2019-08-23 23:18:58 +08:00
|
|
|
visit(Call.first, false);
|
2017-01-06 05:34:18 +08:00
|
|
|
}
|
|
|
|
}
|
2017-06-02 04:30:06 +08:00
|
|
|
Index.setWithGlobalValueDeadStripping();
|
|
|
|
|
|
|
|
unsigned DeadSymbols = Index.size() - LiveSymbols;
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << LiveSymbols << " symbols Live, and " << DeadSymbols
|
|
|
|
<< " symbols Dead \n");
|
2017-06-02 04:30:06 +08:00
|
|
|
NumDeadSymbols += DeadSymbols;
|
|
|
|
NumLiveSymbols += LiveSymbols;
|
2017-01-06 05:34:18 +08:00
|
|
|
}
|
|
|
|
|
2018-11-16 15:08:00 +08:00
|
|
|
// Compute dead symbols and propagate constants in combined index.
|
|
|
|
void llvm::computeDeadSymbolsWithConstProp(
|
|
|
|
ModuleSummaryIndex &Index,
|
|
|
|
const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols,
|
|
|
|
function_ref<PrevailingType(GlobalValue::GUID)> isPrevailing,
|
|
|
|
bool ImportEnabled) {
|
|
|
|
computeDeadSymbols(Index, GUIDPreservedSymbols, isPrevailing);
|
2019-12-04 05:56:07 +08:00
|
|
|
if (ImportEnabled)
|
2019-07-05 23:25:05 +08:00
|
|
|
Index.propagateAttributes(GUIDPreservedSymbols);
|
2018-11-16 15:08:00 +08:00
|
|
|
}
|
|
|
|
|
2016-05-10 21:48:23 +08:00
|
|
|
/// Compute the set of summaries needed for a ThinLTO backend compilation of
|
|
|
|
/// \p ModulePath.
|
|
|
|
void llvm::gatherImportedSummariesForModule(
|
|
|
|
StringRef ModulePath,
|
|
|
|
const StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries,
|
2016-08-16 13:46:05 +08:00
|
|
|
const FunctionImporter::ImportMapTy &ImportList,
|
2016-05-10 21:48:23 +08:00
|
|
|
std::map<std::string, GVSummaryMapTy> &ModuleToSummariesForIndex) {
|
|
|
|
// Include all summaries from the importing module.
|
2020-01-29 03:23:46 +08:00
|
|
|
ModuleToSummariesForIndex[std::string(ModulePath)] =
|
2016-05-10 21:48:23 +08:00
|
|
|
ModuleToDefinedGVSummaries.lookup(ModulePath);
|
2016-08-16 13:46:05 +08:00
|
|
|
// Include summaries for imports.
|
2016-08-16 13:49:12 +08:00
|
|
|
for (auto &ILI : ImportList) {
|
2020-01-29 03:23:46 +08:00
|
|
|
auto &SummariesForIndex =
|
|
|
|
ModuleToSummariesForIndex[std::string(ILI.first())];
|
2016-08-16 13:46:05 +08:00
|
|
|
const auto &DefinedGVSummaries =
|
|
|
|
ModuleToDefinedGVSummaries.lookup(ILI.first());
|
|
|
|
for (auto &GI : ILI.second) {
|
Restore "[ThinLTO] Ensure we always select the same function copy to import"
This reverts commit r337081, therefore restoring r337050 (and fix in
r337059), with test fix for bot failure described after the original
description below.
In order to always import the same copy of a linkonce function,
even when encountering it with different thresholds (a higher one then a
lower one), keep track of the summary we decided to import.
This ensures that the backend only gets a single definition to import
for each GUID, so that it doesn't need to choose one.
Move the largest threshold the GUID was considered for import into the
current module out of the ImportMap (which is part of a larger map
maintained across the whole index), and into a new map just maintained
for the current module we are computing imports for. This saves some
memory since we no longer have the thresholds maintained across the
whole index (and throughout the in-process backends when doing a normal
non-distributed ThinLTO build), at the cost of some additional
information being maintained for each invocation of ComputeImportForModule
(the selected summary pointer for each import).
There is an additional map lookup for each callee being considered for
importing, however, this was able to subsume a map lookup in the
Worklist iteration that invokes computeImportForFunction. We also are
able to avoid calling selectCallee if we already failed to import at the
same or higher threshold.
I compared the run time and peak memory for the SPEC2006 471.omnetpp
benchmark (running in-process ThinLTO backends), as well as for a large
internal benchmark with a distributed ThinLTO build (so just looking at
the thin link time/memory). Across a number of runs with and without
this change there was no significant change in the time and memory.
(I tried a few other variations of the change but they also didn't
improve time or peak memory).
The new commit removes a test that no longer makes sense
(Transforms/FunctionImport/hotness_based_import2.ll), as exposed by the
reverse-iteration bot. The test depends on the order of processing the
summary call edges, and actually depended on the old problematic
behavior of selecting more than one summary for a given GUID when
encountered with different thresholds. There was no guarantee even
before that we would eventually pick the linkonce copy with the hottest
call edges, it just happened to work with the test and the old code, and
there was no guarantee that we would end up importing the selected
version of the copy that had the hottest call edges (since the backend
would effectively import only one of the selected copies).
Reviewers: davidxl
Subscribers: mehdi_amini, inglorion, llvm-commits
Differential Revision: https://reviews.llvm.org/D48670
llvm-svn: 337184
2018-07-16 23:30:27 +08:00
|
|
|
const auto &DS = DefinedGVSummaries.find(GI);
|
2016-08-16 13:46:05 +08:00
|
|
|
assert(DS != DefinedGVSummaries.end() &&
|
|
|
|
"Expected a defined summary for imported global value");
|
Restore "[ThinLTO] Ensure we always select the same function copy to import"
This reverts commit r337081, therefore restoring r337050 (and fix in
r337059), with test fix for bot failure described after the original
description below.
In order to always import the same copy of a linkonce function,
even when encountering it with different thresholds (a higher one then a
lower one), keep track of the summary we decided to import.
This ensures that the backend only gets a single definition to import
for each GUID, so that it doesn't need to choose one.
Move the largest threshold the GUID was considered for import into the
current module out of the ImportMap (which is part of a larger map
maintained across the whole index), and into a new map just maintained
for the current module we are computing imports for. This saves some
memory since we no longer have the thresholds maintained across the
whole index (and throughout the in-process backends when doing a normal
non-distributed ThinLTO build), at the cost of some additional
information being maintained for each invocation of ComputeImportForModule
(the selected summary pointer for each import).
There is an additional map lookup for each callee being considered for
importing, however, this was able to subsume a map lookup in the
Worklist iteration that invokes computeImportForFunction. We also are
able to avoid calling selectCallee if we already failed to import at the
same or higher threshold.
I compared the run time and peak memory for the SPEC2006 471.omnetpp
benchmark (running in-process ThinLTO backends), as well as for a large
internal benchmark with a distributed ThinLTO build (so just looking at
the thin link time/memory). Across a number of runs with and without
this change there was no significant change in the time and memory.
(I tried a few other variations of the change but they also didn't
improve time or peak memory).
The new commit removes a test that no longer makes sense
(Transforms/FunctionImport/hotness_based_import2.ll), as exposed by the
reverse-iteration bot. The test depends on the order of processing the
summary call edges, and actually depended on the old problematic
behavior of selecting more than one summary for a given GUID when
encountered with different thresholds. There was no guarantee even
before that we would eventually pick the linkonce copy with the hottest
call edges, it just happened to work with the test and the old code, and
there was no guarantee that we would end up importing the selected
version of the copy that had the hottest call edges (since the backend
would effectively import only one of the selected copies).
Reviewers: davidxl
Subscribers: mehdi_amini, inglorion, llvm-commits
Differential Revision: https://reviews.llvm.org/D48670
llvm-svn: 337184
2018-07-16 23:30:27 +08:00
|
|
|
SummariesForIndex[GI] = DS->second;
|
2016-05-10 21:48:23 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-10 23:54:09 +08:00
|
|
|
/// Emit the files \p ModulePath will import from into \p OutputFilename.
|
2018-07-11 04:06:04 +08:00
|
|
|
std::error_code llvm::EmitImportsFiles(
|
|
|
|
StringRef ModulePath, StringRef OutputFilename,
|
|
|
|
const std::map<std::string, GVSummaryMapTy> &ModuleToSummariesForIndex) {
|
2016-05-10 23:54:09 +08:00
|
|
|
std::error_code EC;
|
2019-08-05 13:43:48 +08:00
|
|
|
raw_fd_ostream ImportsOS(OutputFilename, EC, sys::fs::OpenFlags::OF_None);
|
2016-05-10 23:54:09 +08:00
|
|
|
if (EC)
|
|
|
|
return EC;
|
2018-07-11 04:06:04 +08:00
|
|
|
for (auto &ILI : ModuleToSummariesForIndex)
|
|
|
|
// The ModuleToSummariesForIndex map includes an entry for the current
|
|
|
|
// Module (needed for writing out the index files). We don't want to
|
|
|
|
// include it in the imports file, however, so filter it out.
|
|
|
|
if (ILI.first != ModulePath)
|
|
|
|
ImportsOS << ILI.first << "\n";
|
2016-05-10 23:54:09 +08:00
|
|
|
return std::error_code();
|
|
|
|
}
|
|
|
|
|
2018-02-05 23:44:27 +08:00
|
|
|
bool llvm::convertToDeclaration(GlobalValue &GV) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Converting to a declaration: `" << GV.getName()
|
|
|
|
<< "\n");
|
2018-01-29 16:03:30 +08:00
|
|
|
if (Function *F = dyn_cast<Function>(&GV)) {
|
|
|
|
F->deleteBody();
|
|
|
|
F->clearMetadata();
|
2018-01-31 10:51:03 +08:00
|
|
|
F->setComdat(nullptr);
|
2018-01-29 16:03:30 +08:00
|
|
|
} else if (GlobalVariable *V = dyn_cast<GlobalVariable>(&GV)) {
|
|
|
|
V->setInitializer(nullptr);
|
|
|
|
V->setLinkage(GlobalValue::ExternalLinkage);
|
|
|
|
V->clearMetadata();
|
2018-01-31 10:51:03 +08:00
|
|
|
V->setComdat(nullptr);
|
2018-02-05 23:44:27 +08:00
|
|
|
} else {
|
|
|
|
GlobalValue *NewGV;
|
|
|
|
if (GV.getValueType()->isFunctionTy())
|
|
|
|
NewGV =
|
|
|
|
Function::Create(cast<FunctionType>(GV.getValueType()),
|
2018-12-18 17:52:52 +08:00
|
|
|
GlobalValue::ExternalLinkage, GV.getAddressSpace(),
|
|
|
|
"", GV.getParent());
|
2018-02-05 23:44:27 +08:00
|
|
|
else
|
|
|
|
NewGV =
|
|
|
|
new GlobalVariable(*GV.getParent(), GV.getValueType(),
|
|
|
|
/*isConstant*/ false, GlobalValue::ExternalLinkage,
|
|
|
|
/*init*/ nullptr, "",
|
|
|
|
/*insertbefore*/ nullptr, GV.getThreadLocalMode(),
|
|
|
|
GV.getType()->getAddressSpace());
|
|
|
|
NewGV->takeName(&GV);
|
|
|
|
GV.replaceAllUsesWith(NewGV);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
2018-01-29 16:03:30 +08:00
|
|
|
}
|
|
|
|
|
[LTO] Drop non-prevailing definitions only if linkage is not local or appending
Summary:
This fixes PR 37422
In ELF, non-weak symbols can also be non-prevailing. In this particular
PR, the __llvm_profile_* symbols are non-prevailing but weren't getting
dropped - causing multiply-defined errors with lld.
Also add a test, strong_non_prevailing.ll, to ensure that multiple
copies of a strong symbol are dropped.
To fix the test regressions exposed by this fix,
- do not mark prevailing copies for symbols with 'appending' linkage.
There's no one prevailing copy for such symbols.
- fix the prevailing version in dead-strip-fulllto.ll
- explicitly pass exported symbols to llvm-lto in fumcimport.ll and
funcimport_var.ll
Reviewers: tejohnson, pcc
Subscribers: mehdi_amini, inglorion, eraman, steven_wu, dexonsmith,
dang, srhines, llvm-commits
Differential Revision: https://reviews.llvm.org/D54125
llvm-svn: 346436
2018-11-09 04:10:07 +08:00
|
|
|
/// Fixup prevailing symbol linkages in \p TheModule based on summary analysis.
|
|
|
|
void llvm::thinLTOResolvePrevailingInModule(
|
2016-05-25 22:03:11 +08:00
|
|
|
Module &TheModule, const GVSummaryMapTy &DefinedGlobals) {
|
|
|
|
auto updateLinkage = [&](GlobalValue &GV) {
|
|
|
|
// See if the global summary analysis computed a new resolved linkage.
|
|
|
|
const auto &GS = DefinedGlobals.find(GV.getGUID());
|
|
|
|
if (GS == DefinedGlobals.end())
|
|
|
|
return;
|
|
|
|
auto NewLinkage = GS->second->linkage();
|
|
|
|
if (NewLinkage == GV.getLinkage())
|
|
|
|
return;
|
[LTO] Drop non-prevailing definitions only if linkage is not local or appending
Summary:
This fixes PR 37422
In ELF, non-weak symbols can also be non-prevailing. In this particular
PR, the __llvm_profile_* symbols are non-prevailing but weren't getting
dropped - causing multiply-defined errors with lld.
Also add a test, strong_non_prevailing.ll, to ensure that multiple
copies of a strong symbol are dropped.
To fix the test regressions exposed by this fix,
- do not mark prevailing copies for symbols with 'appending' linkage.
There's no one prevailing copy for such symbols.
- fix the prevailing version in dead-strip-fulllto.ll
- explicitly pass exported symbols to llvm-lto in fumcimport.ll and
funcimport_var.ll
Reviewers: tejohnson, pcc
Subscribers: mehdi_amini, inglorion, eraman, steven_wu, dexonsmith,
dang, srhines, llvm-commits
Differential Revision: https://reviews.llvm.org/D54125
llvm-svn: 346436
2018-11-09 04:10:07 +08:00
|
|
|
if (GlobalValue::isLocalLinkage(GV.getLinkage()) ||
|
2019-10-22 20:57:23 +08:00
|
|
|
// Don't internalize anything here, because the code below
|
|
|
|
// lacks necessary correctness checks. Leave this job to
|
|
|
|
// LLVM 'internalize' pass.
|
2019-10-22 17:24:12 +08:00
|
|
|
GlobalValue::isLocalLinkage(NewLinkage) ||
|
[LTO] Drop non-prevailing definitions only if linkage is not local or appending
Summary:
This fixes PR 37422
In ELF, non-weak symbols can also be non-prevailing. In this particular
PR, the __llvm_profile_* symbols are non-prevailing but weren't getting
dropped - causing multiply-defined errors with lld.
Also add a test, strong_non_prevailing.ll, to ensure that multiple
copies of a strong symbol are dropped.
To fix the test regressions exposed by this fix,
- do not mark prevailing copies for symbols with 'appending' linkage.
There's no one prevailing copy for such symbols.
- fix the prevailing version in dead-strip-fulllto.ll
- explicitly pass exported symbols to llvm-lto in fumcimport.ll and
funcimport_var.ll
Reviewers: tejohnson, pcc
Subscribers: mehdi_amini, inglorion, eraman, steven_wu, dexonsmith,
dang, srhines, llvm-commits
Differential Revision: https://reviews.llvm.org/D54125
llvm-svn: 346436
2018-11-09 04:10:07 +08:00
|
|
|
// In case it was dead and already converted to declaration.
|
|
|
|
GV.isDeclaration())
|
2017-07-07 03:58:26 +08:00
|
|
|
return;
|
[ThinLTO] Fix handling of weak interposable symbols
Summary:
Keep aliasees alive if their alias is live, otherwise we end up with an
alias to a declaration, which is invalid. This can happen when the
aliasee is weak and non-prevailing.
This fix exposed the fact that we were then attempting to internalize
the weak symbol, which was not exported as it was not prevailing. We
should not internalize interposable symbols in general, unless this is
the prevailing copy, since it can lead to incorrect inlining and other
optimizations. Most of the changes in this patch are due to the
restructuring required to pass down the prevailing callback.
Finally, while implementing the test cases, I found that in the case of
a weak aliasee that is still marked not live because its alias isn't
live, after dropping the definition we incorrectly marked the
declaration with weak linkage when resolving prevailing symbols in the
module. This was due to some special case handling for symbols marked
WeakLinkage in the summary located before instead of after a subsequent
check for the symbol being a declaration. It turns out that we don't
actually need this special case handling any more (looking back at the
history, when that was added the code was structured quite differently)
- we will correctly mark with weak linkage further below when the
definition hasn't been dropped.
Fixes PR42542.
Reviewers: pcc
Subscribers: mehdi_amini, inglorion, steven_wu, dexonsmith, dang, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D66264
llvm-svn: 369766
2019-08-23 23:18:58 +08:00
|
|
|
|
2017-01-21 05:54:58 +08:00
|
|
|
// Check for a non-prevailing def that has interposable linkage
|
|
|
|
// (e.g. non-odr weak or linkonce). In that case we can't simply
|
|
|
|
// convert to available_externally, since it would lose the
|
|
|
|
// interposable property and possibly get inlined. Simply drop
|
|
|
|
// the definition in that case.
|
|
|
|
if (GlobalValue::isAvailableExternallyLinkage(NewLinkage) &&
|
2018-02-05 23:44:27 +08:00
|
|
|
GlobalValue::isInterposableLinkage(GV.getLinkage())) {
|
|
|
|
if (!convertToDeclaration(GV))
|
|
|
|
// FIXME: Change this to collect replaced GVs and later erase
|
[LTO] Drop non-prevailing definitions only if linkage is not local or appending
Summary:
This fixes PR 37422
In ELF, non-weak symbols can also be non-prevailing. In this particular
PR, the __llvm_profile_* symbols are non-prevailing but weren't getting
dropped - causing multiply-defined errors with lld.
Also add a test, strong_non_prevailing.ll, to ensure that multiple
copies of a strong symbol are dropped.
To fix the test regressions exposed by this fix,
- do not mark prevailing copies for symbols with 'appending' linkage.
There's no one prevailing copy for such symbols.
- fix the prevailing version in dead-strip-fulllto.ll
- explicitly pass exported symbols to llvm-lto in fumcimport.ll and
funcimport_var.ll
Reviewers: tejohnson, pcc
Subscribers: mehdi_amini, inglorion, eraman, steven_wu, dexonsmith,
dang, srhines, llvm-commits
Differential Revision: https://reviews.llvm.org/D54125
llvm-svn: 346436
2018-11-09 04:10:07 +08:00
|
|
|
// them from the parent module once thinLTOResolvePrevailingGUID is
|
2018-02-05 23:44:27 +08:00
|
|
|
// changed to enable this for aliases.
|
|
|
|
llvm_unreachable("Expected GV to be converted");
|
|
|
|
} else {
|
[ThinLTO] Auto-hide prevailing linkonce_odr only when all copies eligible
Summary:
We hit undefined references building with ThinLTO when one source file
contained explicit instantiations of a template method (weak_odr) but
there were also implicit instantiations in another file (linkonce_odr),
and the latter was the prevailing copy. In this case the symbol was
marked hidden when the prevailing linkonce_odr copy was promoted to
weak_odr. It led to unsats when the resulting shared library was linked
with other code that contained a reference (expecting to be resolved due
to the explicit instantiation).
Add a CanAutoHide flag to the GV summary to allow the thin link to
identify when all copies are eligible for auto-hiding (because they were
all originally linkonce_odr global unnamed addr), and only do the
auto-hide in that case.
Most of the changes here are due to plumbing the new flag through the
bitcode and llvm assembly, and resulting test changes. I augmented the
existing auto-hide test to check for this situation.
Reviewers: pcc
Subscribers: mehdi_amini, inglorion, eraman, dexonsmith, arphaman, dang, llvm-commits, steven_wu, wmi
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59709
llvm-svn: 360466
2019-05-11 04:08:24 +08:00
|
|
|
// If all copies of the original symbol had global unnamed addr and
|
|
|
|
// linkonce_odr linkage, it should be an auto hide symbol. In that case
|
|
|
|
// the thin link would have marked it as CanAutoHide. Add hidden visibility
|
|
|
|
// to the symbol to preserve the property.
|
|
|
|
if (NewLinkage == GlobalValue::WeakODRLinkage &&
|
|
|
|
GS->second->canAutoHide()) {
|
|
|
|
assert(GV.hasLinkOnceODRLinkage() && GV.hasGlobalUnnamedAddr());
|
2018-02-10 02:34:08 +08:00
|
|
|
GV.setVisibility(GlobalValue::HiddenVisibility);
|
[ThinLTO] Auto-hide prevailing linkonce_odr only when all copies eligible
Summary:
We hit undefined references building with ThinLTO when one source file
contained explicit instantiations of a template method (weak_odr) but
there were also implicit instantiations in another file (linkonce_odr),
and the latter was the prevailing copy. In this case the symbol was
marked hidden when the prevailing linkonce_odr copy was promoted to
weak_odr. It led to unsats when the resulting shared library was linked
with other code that contained a reference (expecting to be resolved due
to the explicit instantiation).
Add a CanAutoHide flag to the GV summary to allow the thin link to
identify when all copies are eligible for auto-hiding (because they were
all originally linkonce_odr global unnamed addr), and only do the
auto-hide in that case.
Most of the changes here are due to plumbing the new flag through the
bitcode and llvm assembly, and resulting test changes. I augmented the
existing auto-hide test to check for this situation.
Reviewers: pcc
Subscribers: mehdi_amini, inglorion, eraman, dexonsmith, arphaman, dang, llvm-commits, steven_wu, wmi
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59709
llvm-svn: 360466
2019-05-11 04:08:24 +08:00
|
|
|
}
|
2018-02-10 02:34:08 +08:00
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "ODR fixing up linkage for `" << GV.getName()
|
|
|
|
<< "` from " << GV.getLinkage() << " to " << NewLinkage
|
|
|
|
<< "\n");
|
2017-01-21 05:54:58 +08:00
|
|
|
GV.setLinkage(NewLinkage);
|
|
|
|
}
|
|
|
|
// Remove declarations from comdats, including available_externally
|
2016-08-16 05:00:04 +08:00
|
|
|
// as this is a declaration for the linker, and will be dropped eventually.
|
|
|
|
// It is illegal for comdats to contain declarations.
|
|
|
|
auto *GO = dyn_cast_or_null<GlobalObject>(&GV);
|
2017-01-21 05:54:58 +08:00
|
|
|
if (GO && GO->isDeclarationForLinker() && GO->hasComdat())
|
2016-08-16 05:00:04 +08:00
|
|
|
GO->setComdat(nullptr);
|
2016-05-25 22:03:11 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
// Process functions and global now
|
|
|
|
for (auto &GV : TheModule)
|
|
|
|
updateLinkage(GV);
|
|
|
|
for (auto &GV : TheModule.globals())
|
|
|
|
updateLinkage(GV);
|
|
|
|
for (auto &GV : TheModule.aliases())
|
|
|
|
updateLinkage(GV);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Run internalization on \p TheModule based on symmary analysis.
|
|
|
|
void llvm::thinLTOInternalizeModule(Module &TheModule,
|
|
|
|
const GVSummaryMapTy &DefinedGlobals) {
|
|
|
|
// Declare a callback for the internalize pass that will ask for every
|
|
|
|
// candidate GlobalValue if it can be internalized or not.
|
|
|
|
auto MustPreserveGV = [&](const GlobalValue &GV) -> bool {
|
|
|
|
// Lookup the linkage recorded in the summaries during global analysis.
|
2017-05-10 06:43:31 +08:00
|
|
|
auto GS = DefinedGlobals.find(GV.getGUID());
|
2016-05-25 22:03:11 +08:00
|
|
|
if (GS == DefinedGlobals.end()) {
|
|
|
|
// Must have been promoted (possibly conservatively). Find original
|
|
|
|
// name so that we can access the correct summary and see if it can
|
|
|
|
// be internalized again.
|
|
|
|
// FIXME: Eventually we should control promotion instead of promoting
|
|
|
|
// and internalizing again.
|
|
|
|
StringRef OrigName =
|
|
|
|
ModuleSummaryIndex::getOriginalNameBeforePromote(GV.getName());
|
|
|
|
std::string OrigId = GlobalValue::getGlobalIdentifier(
|
|
|
|
OrigName, GlobalValue::InternalLinkage,
|
|
|
|
TheModule.getSourceFileName());
|
2017-05-10 06:43:31 +08:00
|
|
|
GS = DefinedGlobals.find(GlobalValue::getGUID(OrigId));
|
2016-06-09 09:14:13 +08:00
|
|
|
if (GS == DefinedGlobals.end()) {
|
|
|
|
// Also check the original non-promoted non-globalized name. In some
|
|
|
|
// cases a preempted weak value is linked in as a local copy because
|
|
|
|
// it is referenced by an alias (IRLinker::linkGlobalValueProto).
|
|
|
|
// In that case, since it was originally not a local value, it was
|
|
|
|
// recorded in the index using the original name.
|
|
|
|
// FIXME: This may not be needed once PR27866 is fixed.
|
2017-05-10 06:43:31 +08:00
|
|
|
GS = DefinedGlobals.find(GlobalValue::getGUID(OrigName));
|
2016-06-09 09:14:13 +08:00
|
|
|
assert(GS != DefinedGlobals.end());
|
|
|
|
}
|
2017-05-10 06:43:31 +08:00
|
|
|
}
|
|
|
|
return !GlobalValue::isLocalLinkage(GS->second->linkage());
|
2016-05-25 22:03:11 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
// FIXME: See if we can just internalize directly here via linkage changes
|
|
|
|
// based on the index, rather than invoking internalizeModule.
|
2017-10-11 06:49:55 +08:00
|
|
|
internalizeModule(TheModule, MustPreserveGV);
|
2016-05-25 22:03:11 +08:00
|
|
|
}
|
|
|
|
|
2017-12-16 08:18:12 +08:00
|
|
|
/// Make alias a clone of its aliasee.
|
|
|
|
static Function *replaceAliasWithAliasee(Module *SrcModule, GlobalAlias *GA) {
|
|
|
|
Function *Fn = cast<Function>(GA->getBaseObject());
|
|
|
|
|
|
|
|
ValueToValueMapTy VMap;
|
|
|
|
Function *NewFn = CloneFunction(Fn, VMap);
|
[ThinLTO] Use original alias visibility when importing
Summary:
When we import an alias, we do so by making a clone of the aliasee. Just
as this clone uses the original alias name and linkage, it should also
use the same visibility (not the aliasee's visibility). Otherwise,
linker behavior is affected (e.g. if the aliasee was hidden, but the
alias is not, the resulting imported clone should not be hidden,
otherwise the linker will make the final symbol hidden which is
incorrect).
Reviewers: wmi
Subscribers: mehdi_amini, inglorion, eraman, steven_wu, dexonsmith, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62535
llvm-svn: 361989
2019-05-30 00:50:46 +08:00
|
|
|
// Clone should use the original alias's linkage, visibility and name, and we
|
|
|
|
// ensure all uses of alias instead use the new clone (casted if necessary).
|
2017-12-16 08:18:12 +08:00
|
|
|
NewFn->setLinkage(GA->getLinkage());
|
[ThinLTO] Use original alias visibility when importing
Summary:
When we import an alias, we do so by making a clone of the aliasee. Just
as this clone uses the original alias name and linkage, it should also
use the same visibility (not the aliasee's visibility). Otherwise,
linker behavior is affected (e.g. if the aliasee was hidden, but the
alias is not, the resulting imported clone should not be hidden,
otherwise the linker will make the final symbol hidden which is
incorrect).
Reviewers: wmi
Subscribers: mehdi_amini, inglorion, eraman, steven_wu, dexonsmith, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62535
llvm-svn: 361989
2019-05-30 00:50:46 +08:00
|
|
|
NewFn->setVisibility(GA->getVisibility());
|
2017-12-16 08:18:12 +08:00
|
|
|
GA->replaceAllUsesWith(ConstantExpr::getBitCast(NewFn, GA->getType()));
|
|
|
|
NewFn->takeName(GA);
|
|
|
|
return NewFn;
|
|
|
|
}
|
|
|
|
|
2018-11-16 15:08:00 +08:00
|
|
|
// Internalize values that we marked with specific attribute
|
|
|
|
// in processGlobalForThinLTO.
|
2019-07-05 23:25:05 +08:00
|
|
|
static void internalizeGVsAfterImport(Module &M) {
|
2018-11-16 15:08:00 +08:00
|
|
|
for (auto &GV : M.globals())
|
|
|
|
// Skip GVs which have been converted to declarations
|
|
|
|
// by dropDeadSymbols.
|
|
|
|
if (!GV.isDeclaration() && GV.hasAttribute("thinlto-internalize")) {
|
|
|
|
GV.setLinkage(GlobalValue::InternalLinkage);
|
|
|
|
GV.setVisibility(GlobalValue::DefaultVisibility);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-03 10:37:33 +08:00
|
|
|
// Automatically import functions in Module \p DestModule based on the summaries
|
|
|
|
// index.
|
2016-11-10 01:49:19 +08:00
|
|
|
Expected<bool> FunctionImporter::importFunctions(
|
2017-05-20 07:32:21 +08:00
|
|
|
Module &DestModule, const FunctionImporter::ImportMapTy &ImportList) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Starting import for Module "
|
|
|
|
<< DestModule.getModuleIdentifier() << "\n");
|
2018-03-12 18:30:50 +08:00
|
|
|
unsigned ImportedCount = 0, ImportedGVCount = 0;
|
2015-11-24 14:07:49 +08:00
|
|
|
|
2017-02-04 00:56:27 +08:00
|
|
|
IRMover Mover(DestModule);
|
2015-12-09 16:17:35 +08:00
|
|
|
// Do the actual import of functions now, one Module at a time
|
2016-03-26 13:40:34 +08:00
|
|
|
std::set<StringRef> ModuleNameOrderedList;
|
|
|
|
for (auto &FunctionsToImportPerModule : ImportList) {
|
|
|
|
ModuleNameOrderedList.insert(FunctionsToImportPerModule.first());
|
|
|
|
}
|
|
|
|
for (auto &Name : ModuleNameOrderedList) {
|
2015-12-09 16:17:35 +08:00
|
|
|
// Get the module for the import
|
2016-03-26 13:40:34 +08:00
|
|
|
const auto &FunctionsToImportPerModule = ImportList.find(Name);
|
|
|
|
assert(FunctionsToImportPerModule != ImportList.end());
|
2016-11-13 15:00:17 +08:00
|
|
|
Expected<std::unique_ptr<Module>> SrcModuleOrErr = ModuleLoader(Name);
|
|
|
|
if (!SrcModuleOrErr)
|
|
|
|
return SrcModuleOrErr.takeError();
|
|
|
|
std::unique_ptr<Module> SrcModule = std::move(*SrcModuleOrErr);
|
2015-12-09 16:17:35 +08:00
|
|
|
assert(&DestModule.getContext() == &SrcModule->getContext() &&
|
|
|
|
"Context mismatch");
|
|
|
|
|
2016-01-22 08:15:53 +08:00
|
|
|
// If modules were created with lazy metadata loading, materialize it
|
|
|
|
// now, before linking it (otherwise this will be a noop).
|
2016-11-10 01:49:19 +08:00
|
|
|
if (Error Err = SrcModule->materializeMetadata())
|
|
|
|
return std::move(Err);
|
2015-12-18 01:14:09 +08:00
|
|
|
|
2016-03-26 13:40:34 +08:00
|
|
|
auto &ImportGUIDs = FunctionsToImportPerModule->second;
|
|
|
|
// Find the globals to import
|
2017-02-04 00:56:27 +08:00
|
|
|
SetVector<GlobalValue *> GlobalsToImport;
|
2016-07-07 02:12:23 +08:00
|
|
|
for (Function &F : *SrcModule) {
|
|
|
|
if (!F.hasName())
|
2016-04-05 02:52:23 +08:00
|
|
|
continue;
|
2016-07-07 02:12:23 +08:00
|
|
|
auto GUID = F.getGUID();
|
2016-04-05 02:52:23 +08:00
|
|
|
auto Import = ImportGUIDs.count(GUID);
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing function "
|
|
|
|
<< GUID << " " << F.getName() << " from "
|
|
|
|
<< SrcModule->getSourceFileName() << "\n");
|
2016-04-05 02:52:23 +08:00
|
|
|
if (Import) {
|
2016-11-10 01:49:19 +08:00
|
|
|
if (Error Err = F.materialize())
|
|
|
|
return std::move(Err);
|
2016-07-09 07:01:49 +08:00
|
|
|
if (EnableImportMetadata) {
|
|
|
|
// Add 'thinlto_src_module' metadata for statistics and debugging.
|
|
|
|
F.setMetadata(
|
|
|
|
"thinlto_src_module",
|
2017-10-11 06:49:55 +08:00
|
|
|
MDNode::get(DestModule.getContext(),
|
|
|
|
{MDString::get(DestModule.getContext(),
|
|
|
|
SrcModule->getSourceFileName())}));
|
2016-07-09 07:01:49 +08:00
|
|
|
}
|
2016-07-07 02:12:23 +08:00
|
|
|
GlobalsToImport.insert(&F);
|
2016-03-26 13:40:34 +08:00
|
|
|
}
|
|
|
|
}
|
2016-07-07 02:12:23 +08:00
|
|
|
for (GlobalVariable &GV : SrcModule->globals()) {
|
2016-03-26 13:40:34 +08:00
|
|
|
if (!GV.hasName())
|
|
|
|
continue;
|
|
|
|
auto GUID = GV.getGUID();
|
2016-04-05 02:52:23 +08:00
|
|
|
auto Import = ImportGUIDs.count(GUID);
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing global "
|
|
|
|
<< GUID << " " << GV.getName() << " from "
|
|
|
|
<< SrcModule->getSourceFileName() << "\n");
|
2016-04-05 02:52:23 +08:00
|
|
|
if (Import) {
|
2016-11-10 01:49:19 +08:00
|
|
|
if (Error Err = GV.materialize())
|
|
|
|
return std::move(Err);
|
2018-03-12 18:30:50 +08:00
|
|
|
ImportedGVCount += GlobalsToImport.insert(&GV);
|
2016-03-26 13:40:34 +08:00
|
|
|
}
|
|
|
|
}
|
2016-07-07 02:12:23 +08:00
|
|
|
for (GlobalAlias &GA : SrcModule->aliases()) {
|
|
|
|
if (!GA.hasName())
|
2016-03-26 13:40:34 +08:00
|
|
|
continue;
|
2016-07-07 02:12:23 +08:00
|
|
|
auto GUID = GA.getGUID();
|
2017-12-16 08:18:12 +08:00
|
|
|
auto Import = ImportGUIDs.count(GUID);
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing alias "
|
|
|
|
<< GUID << " " << GA.getName() << " from "
|
|
|
|
<< SrcModule->getSourceFileName() << "\n");
|
2017-12-16 08:18:12 +08:00
|
|
|
if (Import) {
|
|
|
|
if (Error Err = GA.materialize())
|
|
|
|
return std::move(Err);
|
|
|
|
// Import alias as a copy of its aliasee.
|
|
|
|
GlobalObject *Base = GA.getBaseObject();
|
|
|
|
if (Error Err = Base->materialize())
|
|
|
|
return std::move(Err);
|
|
|
|
auto *Fn = replaceAliasWithAliasee(SrcModule.get(), &GA);
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Is importing aliasee fn " << Base->getGUID()
|
|
|
|
<< " " << Base->getName() << " from "
|
|
|
|
<< SrcModule->getSourceFileName() << "\n");
|
2017-12-16 08:18:12 +08:00
|
|
|
if (EnableImportMetadata) {
|
|
|
|
// Add 'thinlto_src_module' metadata for statistics and debugging.
|
|
|
|
Fn->setMetadata(
|
|
|
|
"thinlto_src_module",
|
|
|
|
MDNode::get(DestModule.getContext(),
|
|
|
|
{MDString::get(DestModule.getContext(),
|
|
|
|
SrcModule->getSourceFileName())}));
|
|
|
|
}
|
|
|
|
GlobalsToImport.insert(Fn);
|
|
|
|
}
|
2016-03-26 13:40:34 +08:00
|
|
|
}
|
|
|
|
|
2017-01-05 06:54:33 +08:00
|
|
|
// Upgrade debug info after we're done materializing all the globals and we
|
|
|
|
// have loaded all the required metadata!
|
|
|
|
UpgradeDebugInfo(*SrcModule);
|
|
|
|
|
2015-12-09 16:17:35 +08:00
|
|
|
// Link in the specified functions.
|
2016-03-26 13:40:34 +08:00
|
|
|
if (renameModuleForThinLTO(*SrcModule, Index, &GlobalsToImport))
|
2016-03-19 08:40:31 +08:00
|
|
|
return true;
|
|
|
|
|
2016-03-27 23:27:30 +08:00
|
|
|
if (PrintImports) {
|
|
|
|
for (const auto *GV : GlobalsToImport)
|
|
|
|
dbgs() << DestModule.getSourceFileName() << ": Import " << GV->getName()
|
|
|
|
<< " from " << SrcModule->getSourceFileName() << "\n";
|
|
|
|
}
|
|
|
|
|
2017-02-04 00:56:27 +08:00
|
|
|
if (Mover.move(std::move(SrcModule), GlobalsToImport.getArrayRef(),
|
|
|
|
[](GlobalValue &, IRMover::ValueAdder) {},
|
2017-02-04 01:01:14 +08:00
|
|
|
/*IsPerformingImport=*/true))
|
2015-12-09 16:17:35 +08:00
|
|
|
report_fatal_error("Function Import: link error");
|
|
|
|
|
2016-03-26 13:40:34 +08:00
|
|
|
ImportedCount += GlobalsToImport.size();
|
2017-01-06 05:34:18 +08:00
|
|
|
NumImportedModules++;
|
2015-12-09 16:17:35 +08:00
|
|
|
}
|
2015-12-18 01:14:09 +08:00
|
|
|
|
2019-07-05 23:25:05 +08:00
|
|
|
internalizeGVsAfterImport(DestModule);
|
2018-11-16 15:08:00 +08:00
|
|
|
|
2018-03-12 18:30:50 +08:00
|
|
|
NumImportedFunctions += (ImportedCount - ImportedGVCount);
|
|
|
|
NumImportedGlobalVars += ImportedGVCount;
|
2016-03-27 23:27:30 +08:00
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Imported " << ImportedCount - ImportedGVCount
|
|
|
|
<< " functions for Module "
|
|
|
|
<< DestModule.getModuleIdentifier() << "\n");
|
|
|
|
LLVM_DEBUG(dbgs() << "Imported " << ImportedGVCount
|
|
|
|
<< " global variables for Module "
|
|
|
|
<< DestModule.getModuleIdentifier() << "\n");
|
2015-12-03 10:37:33 +08:00
|
|
|
return ImportedCount;
|
2015-11-24 14:07:49 +08:00
|
|
|
}
|
|
|
|
|
2016-12-21 08:50:12 +08:00
|
|
|
static bool doImportingForModule(Module &M) {
|
|
|
|
if (SummaryFile.empty())
|
|
|
|
report_fatal_error("error: -function-import requires -summary-file\n");
|
|
|
|
Expected<std::unique_ptr<ModuleSummaryIndex>> IndexPtrOrErr =
|
2018-11-14 01:35:04 +08:00
|
|
|
getModuleSummaryIndexForFile(SummaryFile);
|
2016-12-21 08:50:12 +08:00
|
|
|
if (!IndexPtrOrErr) {
|
|
|
|
logAllUnhandledErrors(IndexPtrOrErr.takeError(), errs(),
|
|
|
|
"Error loading file '" + SummaryFile + "': ");
|
|
|
|
return false;
|
2016-07-19 05:22:24 +08:00
|
|
|
}
|
2016-12-21 08:50:12 +08:00
|
|
|
std::unique_ptr<ModuleSummaryIndex> Index = std::move(*IndexPtrOrErr);
|
2016-07-19 05:22:24 +08:00
|
|
|
|
|
|
|
// First step is collecting the import list.
|
|
|
|
FunctionImporter::ImportMapTy ImportList;
|
2017-12-16 08:18:12 +08:00
|
|
|
// If requested, simply import all functions in the index. This is used
|
|
|
|
// when testing distributed backend handling via the opt tool, when
|
|
|
|
// we have distributed indexes containing exactly the summaries to import.
|
|
|
|
if (ImportAllIndex)
|
|
|
|
ComputeCrossModuleImportForModuleFromIndex(M.getModuleIdentifier(), *Index,
|
|
|
|
ImportList);
|
|
|
|
else
|
|
|
|
ComputeCrossModuleImportForModule(M.getModuleIdentifier(), *Index,
|
|
|
|
ImportList);
|
2016-07-19 05:22:24 +08:00
|
|
|
|
2016-11-15 03:21:41 +08:00
|
|
|
// Conservatively mark all internal values as promoted. This interface is
|
|
|
|
// only used when doing importing via the function importing pass. The pass
|
|
|
|
// is only enabled when testing importing via the 'opt' tool, which does
|
|
|
|
// not do the ThinLink that would normally determine what values to promote.
|
|
|
|
for (auto &I : *Index) {
|
2017-05-05 02:03:25 +08:00
|
|
|
for (auto &S : I.second.SummaryList) {
|
2016-11-15 03:21:41 +08:00
|
|
|
if (GlobalValue::isLocalLinkage(S->linkage()))
|
|
|
|
S->setLinkage(GlobalValue::ExternalLinkage);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-19 05:22:24 +08:00
|
|
|
// Next we need to promote to global scope and rename any local values that
|
|
|
|
// are potentially exported to other modules.
|
|
|
|
if (renameModuleForThinLTO(M, *Index, nullptr)) {
|
|
|
|
errs() << "Error renaming module\n";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Perform the import now.
|
|
|
|
auto ModuleLoader = [&M](StringRef Identifier) {
|
2020-01-29 03:23:46 +08:00
|
|
|
return loadFile(std::string(Identifier), M.getContext());
|
2016-07-19 05:22:24 +08:00
|
|
|
};
|
|
|
|
FunctionImporter Importer(*Index, ModuleLoader);
|
2017-02-03 02:42:25 +08:00
|
|
|
Expected<bool> Result = Importer.importFunctions(M, ImportList);
|
2016-11-10 01:49:19 +08:00
|
|
|
|
|
|
|
// FIXME: Probably need to propagate Errors through the pass manager.
|
|
|
|
if (!Result) {
|
|
|
|
logAllUnhandledErrors(Result.takeError(), errs(),
|
|
|
|
"Error importing module: ");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return *Result;
|
2016-07-19 05:22:24 +08:00
|
|
|
}
|
|
|
|
|
2015-12-24 18:03:35 +08:00
|
|
|
namespace {
|
2017-10-11 06:49:55 +08:00
|
|
|
|
2015-11-24 14:07:49 +08:00
|
|
|
/// Pass that performs cross-module function import provided a summary file.
|
2016-07-19 05:22:24 +08:00
|
|
|
class FunctionImportLegacyPass : public ModulePass {
|
2015-11-24 14:07:49 +08:00
|
|
|
public:
|
|
|
|
/// Pass identification, replacement for typeid
|
|
|
|
static char ID;
|
|
|
|
|
2017-10-11 06:49:55 +08:00
|
|
|
explicit FunctionImportLegacyPass() : ModulePass(ID) {}
|
|
|
|
|
2015-12-08 03:21:11 +08:00
|
|
|
/// Specify pass name for debug output
|
2016-10-01 10:56:57 +08:00
|
|
|
StringRef getPassName() const override { return "Function Importing"; }
|
2015-12-08 03:21:11 +08:00
|
|
|
|
2015-11-24 14:07:49 +08:00
|
|
|
bool runOnModule(Module &M) override {
|
2016-04-23 06:06:11 +08:00
|
|
|
if (skipModule(M))
|
|
|
|
return false;
|
|
|
|
|
2016-12-21 08:50:12 +08:00
|
|
|
return doImportingForModule(M);
|
2015-11-24 14:07:49 +08:00
|
|
|
}
|
|
|
|
};
|
2017-10-11 06:49:55 +08:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
2015-11-24 14:07:49 +08:00
|
|
|
|
2016-07-19 05:22:24 +08:00
|
|
|
PreservedAnalyses FunctionImportPass::run(Module &M,
|
2016-08-09 08:28:38 +08:00
|
|
|
ModuleAnalysisManager &AM) {
|
2016-12-21 08:50:12 +08:00
|
|
|
if (!doImportingForModule(M))
|
2016-07-19 05:22:24 +08:00
|
|
|
return PreservedAnalyses::all();
|
|
|
|
|
|
|
|
return PreservedAnalyses::none();
|
|
|
|
}
|
|
|
|
|
|
|
|
char FunctionImportLegacyPass::ID = 0;
|
|
|
|
INITIALIZE_PASS(FunctionImportLegacyPass, "function-import",
|
|
|
|
"Summary Based Function Import", false, false)
|
2015-11-24 14:07:49 +08:00
|
|
|
|
|
|
|
namespace llvm {
|
2017-10-11 06:49:55 +08:00
|
|
|
|
2016-12-21 08:50:12 +08:00
|
|
|
Pass *createFunctionImportPass() {
|
|
|
|
return new FunctionImportLegacyPass();
|
2015-12-08 03:21:11 +08:00
|
|
|
}
|
2017-10-11 06:49:55 +08:00
|
|
|
|
|
|
|
} // end namespace llvm
|