llvm-project/llvm/examples/ThinLtoJIT/ThinLtoModuleIndex.h

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

95 lines
2.7 KiB
C
Raw Normal View History

Add ThinLtoJIT example Summary: Prototype of a JIT compiler that utilizes ThinLTO summaries to compile modules ahead of time. This is an implementation of the concept I presented in my "ThinLTO Summaries in JIT Compilation" talk at the 2018 Developers' Meeting: http://llvm.org/devmtg/2018-10/talk-abstracts.html#lt8 Upfront the JIT first populates the *combined ThinLTO module index*, which provides fast access to the global call-graph and module paths by function. Next, it loads the main function's module and compiles it. All functions in the module will be emitted with prolog instructions that *fire a discovery flag* once execution reaches them. In parallel, the *discovery thread* is busy-watching the existing flags. Once it detects one has fired, it uses the module index to find all functions that are reachable from it within a given number of calls and submits their defining modules to the compilation pipeline. While execution continues, more flags are fired and further modules added. Ideally the JIT can be tuned in a way, so that in the majority of cases the code on the execution path can be compiled ahead of time. In cases where it doesn't work, the JIT has a *definition generator* in place that loads modules if missing functions are reached. Reviewers: lhames, dblaikie, jfb, tejohnson, pree-jackie, AlexDenisov, kavon Subscribers: mgorny, mehdi_amini, inglorion, hiraditya, steven_wu, dexonsmith, arphaman, jfb, merge_guards_bot, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D72486
2020-01-11 08:09:42 +08:00
#ifndef LLVM_EXAMPLES_THINLTOJIT_THINLTOJITMODULEINDEX_H
#define LLVM_EXAMPLES_THINLTOJIT_THINLTOJITMODULEINDEX_H
#include "llvm/ADT/Optional.h"
#include "llvm/ExecutionEngine/Orc/Core.h"
#include "llvm/ExecutionEngine/Orc/ThreadSafeModule.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/ModuleSummaryIndex.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ThreadPool.h"
#include <cstdint>
#include <future>
#include <mutex>
#include <set>
#include <vector>
namespace llvm {
namespace orc {
class SymbolStringPtr;
class ThinLtoModuleIndex {
static constexpr bool HaveGVs = false;
public:
ThinLtoModuleIndex(ExecutionSession &ES, unsigned ParseModuleThreads)
: ES(ES), CombinedSummaryIndex(HaveGVs),
ParseModuleWorkers(llvm::hardware_concurrency(ParseModuleThreads)),
Add ThinLtoJIT example Summary: Prototype of a JIT compiler that utilizes ThinLTO summaries to compile modules ahead of time. This is an implementation of the concept I presented in my "ThinLTO Summaries in JIT Compilation" talk at the 2018 Developers' Meeting: http://llvm.org/devmtg/2018-10/talk-abstracts.html#lt8 Upfront the JIT first populates the *combined ThinLTO module index*, which provides fast access to the global call-graph and module paths by function. Next, it loads the main function's module and compiles it. All functions in the module will be emitted with prolog instructions that *fire a discovery flag* once execution reaches them. In parallel, the *discovery thread* is busy-watching the existing flags. Once it detects one has fired, it uses the module index to find all functions that are reachable from it within a given number of calls and submits their defining modules to the compilation pipeline. While execution continues, more flags are fired and further modules added. Ideally the JIT can be tuned in a way, so that in the majority of cases the code on the execution path can be compiled ahead of time. In cases where it doesn't work, the JIT has a *definition generator* in place that loads modules if missing functions are reached. Reviewers: lhames, dblaikie, jfb, tejohnson, pree-jackie, AlexDenisov, kavon Subscribers: mgorny, mehdi_amini, inglorion, hiraditya, steven_wu, dexonsmith, arphaman, jfb, merge_guards_bot, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D72486
2020-01-11 08:09:42 +08:00
NumParseModuleThreads(ParseModuleThreads) {}
Error add(StringRef InputPath);
GlobalValueSummary *getSummary(GlobalValue::GUID Function) const;
std::vector<StringRef> getAllModulePaths() const;
Optional<StringRef> getModulePathForSymbol(StringRef Name) const;
template <typename RangeT> void scheduleModuleParsing(const RangeT &Paths);
ThreadSafeModule takeModule(StringRef Path);
// Blocking module parsing, returns a Null-module on error.
// Only used for the main module.
ThreadSafeModule parseModuleFromFile(StringRef Path);
std::vector<std::string> selectNextPaths();
unsigned getNumDiscoveredModules() const;
void discoverCalleeModulePaths(FunctionSummary *S, unsigned LookaheadLevels);
VModuleKey getModuleId(StringRef Path) const {
return CombinedSummaryIndex.getModuleId(Path);
}
private:
ExecutionSession &ES;
ModuleSummaryIndex CombinedSummaryIndex;
uint64_t NextModuleId{0};
struct PathRankEntry {
uint32_t Count{0};
uint32_t MinDist{100};
};
StringMap<PathRankEntry> PathRank;
ThreadPool ParseModuleWorkers;
unsigned NumParseModuleThreads;
std::mutex ScheduledModulesLock;
StringMap<std::shared_future<void>> ScheduledModules;
std::mutex ParsedModulesLock;
StringMap<ThreadSafeModule> ParsedModules;
void updatePathRank(StringRef Path, unsigned Distance);
void addToWorklist(std::vector<FunctionSummary *> &List,
ArrayRef<FunctionSummary::EdgeTy> Calls);
std::vector<StringRef> selectAllPaths();
std::vector<StringRef> selectHotPaths(unsigned Count);
void scheduleModuleParsingPrelocked(StringRef Path);
Expected<ThreadSafeModule> doParseModule(StringRef Path);
};
template <typename RangeT>
inline void ThinLtoModuleIndex::scheduleModuleParsing(const RangeT &Paths) {
std::lock_guard<std::mutex> Lock(ScheduledModulesLock);
for (const auto &Path : Paths) {
scheduleModuleParsingPrelocked(Path);
}
}
} // namespace orc
} // namespace llvm
#endif