llvm-project/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

239 lines
7.2 KiB
C++
Raw Normal View History

//===--------- LLJIT.cpp - An ORC-based JIT for compiling LLVM IR ---------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "llvm/ExecutionEngine/Orc/LLJIT.h"
#include "llvm/ExecutionEngine/Orc/OrcError.h"
#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
#include "llvm/ExecutionEngine/SectionMemoryManager.h"
#include "llvm/IR/Mangler.h"
namespace llvm {
namespace orc {
Error LLJITBuilderState::prepareForConstruction() {
if (!JTMB) {
if (auto JTMBOrErr = JITTargetMachineBuilder::detectHost())
JTMB = std::move(*JTMBOrErr);
else
return JTMBOrErr.takeError();
}
return Error::success();
}
LLJIT::~LLJIT() {
if (CompileThreads)
CompileThreads->wait();
}
Error LLJIT::defineAbsolute(StringRef Name, JITEvaluatedSymbol Sym) {
auto InternedName = ES->intern(Name);
SymbolMap Symbols({{InternedName, Sym}});
return Main.define(absoluteSymbols(std::move(Symbols)));
}
[ORC] Add ThreadSafeModule and ThreadSafeContext wrappers to support concurrent compilation of IR in the JIT. ThreadSafeContext is a pair of an LLVMContext and a mutex that can be used to lock that context when it needs to be accessed from multiple threads. ThreadSafeModule is a pair of a unique_ptr<Module> and a shared_ptr<ThreadSafeContext>. This allows the lifetime of a ThreadSafeContext to be managed automatically in terms of the ThreadSafeModules that refer to it: Once all modules using a ThreadSafeContext are destructed, and providing the client has not held on to a copy of shared context pointer, the context will be automatically destructed. This scheme is necessary due to the following constraits: (1) We need multiple contexts for multithreaded compilation (at least one per compile thread plus one to store any IR not currently being compiled, though one context per module is simpler). (2) We need to free contexts that are no longer being used so that the JIT does not leak memory over time. (3) Module lifetimes are not predictable (modules are compiled as needed depending on the flow of JIT'd code) so there is no single point where contexts could be reclaimed. JIT clients not using concurrency can safely use one ThreadSafeContext for all ThreadSafeModules. JIT clients who want to be able to compile concurrently should use a different ThreadSafeContext for each module, or call setCloneToNewContextOnEmit on their top-level IRLayer. The former reduces compile latency (since no clone step is needed) at the cost of additional memory overhead for uncompiled modules (as every uncompiled module will duplicate the LLVM types, constants and metadata that have been shared). llvm-svn: 343055
2018-09-26 09:24:12 +08:00
Error LLJIT::addIRModule(JITDylib &JD, ThreadSafeModule TSM) {
assert(TSM && "Can not add null module");
[ORC] Change the locking scheme for ThreadSafeModule. ThreadSafeModule/ThreadSafeContext are used to manage lifetimes and locking for LLVMContexts in ORCv2. Prior to this patch contexts were locked as soon as an associated Module was emitted (to be compiled and linked), and were not unlocked until the emit call returned. This could lead to deadlocks if interdependent modules that shared contexts were compiled on different threads: when, during emission of the first module, the dependence was discovered the second module (which would provide the required symbol) could not be emitted as the thread emitting the first module still held the lock. This patch eliminates this possibility by moving to a finer-grained locking scheme. Each client holds the module lock only while they are actively operating on it. To make this finer grained locking simpler/safer to implement this patch removes the explicit lock method, 'getContextLock', from ThreadSafeModule and replaces it with a new method, 'withModuleDo', that implicitly locks the context, calls a user-supplied function object to operate on the Module, then implicitly unlocks the context before returning the result. ThreadSafeModule TSM = getModule(...); size_t NumFunctions = TSM.withModuleDo( [](Module &M) { // <- context locked before entry to lambda. return M.size(); }); Existing ORCv2 layers that operate on ThreadSafeModules are updated to use the new method. This method is used to introduce Module locking into each of the existing layers. llvm-svn: 367686
2019-08-02 23:21:37 +08:00
if (auto Err =
TSM.withModuleDo([&](Module &M) { return applyDataLayout(M); }))
return Err;
return CompileLayer->add(JD, std::move(TSM), ES->allocateVModule());
}
Error LLJIT::addObjectFile(JITDylib &JD, std::unique_ptr<MemoryBuffer> Obj) {
assert(Obj && "Can not add null object");
return ObjLinkingLayer->add(JD, std::move(Obj), ES->allocateVModule());
}
Expected<JITEvaluatedSymbol> LLJIT::lookupLinkerMangled(JITDylib &JD,
StringRef Name) {
return ES->lookup(JITDylibSearchList({{&JD, true}}), ES->intern(Name));
}
std::unique_ptr<ObjectLayer>
LLJIT::createObjectLinkingLayer(LLJITBuilderState &S, ExecutionSession &ES) {
// If the config state provided an ObjectLinkingLayer factory then use it.
if (S.CreateObjectLinkingLayer)
return S.CreateObjectLinkingLayer(ES, S.JTMB->getTargetTriple());
// Otherwise default to creating an RTDyldObjectLinkingLayer that constructs
// a new SectionMemoryManager for each object.
auto GetMemMgr = []() { return std::make_unique<SectionMemoryManager>(); };
auto ObjLinkingLayer =
std::make_unique<RTDyldObjectLinkingLayer>(ES, std::move(GetMemMgr));
if (S.JTMB->getTargetTriple().isOSBinFormatCOFF())
ObjLinkingLayer->setOverrideObjectFlagsWithResponsibilityFlags(true);
// FIXME: Explicit conversion to std::unique_ptr<ObjectLayer> added to silence
// errors from some GCC / libstdc++ bots. Remove this conversion (i.e.
// just return ObjLinkingLayer) once those bots are upgraded.
return std::unique_ptr<ObjectLayer>(std::move(ObjLinkingLayer));
}
Expected<IRCompileLayer::CompileFunction>
LLJIT::createCompileFunction(LLJITBuilderState &S,
JITTargetMachineBuilder JTMB) {
/// If there is a custom compile function creator set then use it.
if (S.CreateCompileFunction)
return S.CreateCompileFunction(std::move(JTMB));
// Otherwise default to creating a SimpleCompiler, or ConcurrentIRCompiler,
// depending on the number of threads requested.
if (S.NumCompileThreads > 0)
return ConcurrentIRCompiler(std::move(JTMB));
auto TM = JTMB.createTargetMachine();
if (!TM)
return TM.takeError();
return TMOwningSimpleCompiler(std::move(*TM));
}
LLJIT::LLJIT(LLJITBuilderState &S, Error &Err)
: ES(S.ES ? std::move(S.ES) : std::make_unique<ExecutionSession>()),
Main(this->ES->getMainJITDylib()), DL(""), CtorRunner(Main),
DtorRunner(Main) {
ErrorAsOutParameter _(&Err);
ObjLinkingLayer = createObjectLinkingLayer(S, *ES);
if (auto DLOrErr = S.JTMB->getDefaultDataLayoutForTarget())
DL = std::move(*DLOrErr);
else {
Err = DLOrErr.takeError();
return;
}
{
auto CompileFunction = createCompileFunction(S, std::move(*S.JTMB));
if (!CompileFunction) {
Err = CompileFunction.takeError();
return;
}
CompileLayer = std::make_unique<IRCompileLayer>(
*ES, *ObjLinkingLayer, std::move(*CompileFunction));
}
if (S.NumCompileThreads > 0) {
CompileLayer->setCloneToNewContextOnEmit(true);
CompileThreads = std::make_unique<ThreadPool>(S.NumCompileThreads);
ES->setDispatchMaterialization(
[this](JITDylib &JD, std::unique_ptr<MaterializationUnit> MU) {
auto Work = [MU = std::move(MU), &JD] { MU->doMaterialize(JD); };
CompileThreads->async(std::move(Work));
});
}
}
std::string LLJIT::mangle(StringRef UnmangledName) {
std::string MangledName;
{
raw_string_ostream MangledNameStream(MangledName);
Mangler::getNameWithPrefix(MangledNameStream, UnmangledName, DL);
}
return MangledName;
}
Error LLJIT::applyDataLayout(Module &M) {
if (M.getDataLayout().isDefault())
M.setDataLayout(DL);
if (M.getDataLayout() != DL)
return make_error<StringError>(
"Added modules have incompatible data layouts",
inconvertibleErrorCode());
return Error::success();
}
void LLJIT::recordCtorDtors(Module &M) {
CtorRunner.add(getConstructors(M));
DtorRunner.add(getDestructors(M));
}
Error LLLazyJITBuilderState::prepareForConstruction() {
if (auto Err = LLJITBuilderState::prepareForConstruction())
return Err;
TT = JTMB->getTargetTriple();
return Error::success();
}
[ORC] Add ThreadSafeModule and ThreadSafeContext wrappers to support concurrent compilation of IR in the JIT. ThreadSafeContext is a pair of an LLVMContext and a mutex that can be used to lock that context when it needs to be accessed from multiple threads. ThreadSafeModule is a pair of a unique_ptr<Module> and a shared_ptr<ThreadSafeContext>. This allows the lifetime of a ThreadSafeContext to be managed automatically in terms of the ThreadSafeModules that refer to it: Once all modules using a ThreadSafeContext are destructed, and providing the client has not held on to a copy of shared context pointer, the context will be automatically destructed. This scheme is necessary due to the following constraits: (1) We need multiple contexts for multithreaded compilation (at least one per compile thread plus one to store any IR not currently being compiled, though one context per module is simpler). (2) We need to free contexts that are no longer being used so that the JIT does not leak memory over time. (3) Module lifetimes are not predictable (modules are compiled as needed depending on the flow of JIT'd code) so there is no single point where contexts could be reclaimed. JIT clients not using concurrency can safely use one ThreadSafeContext for all ThreadSafeModules. JIT clients who want to be able to compile concurrently should use a different ThreadSafeContext for each module, or call setCloneToNewContextOnEmit on their top-level IRLayer. The former reduces compile latency (since no clone step is needed) at the cost of additional memory overhead for uncompiled modules (as every uncompiled module will duplicate the LLVM types, constants and metadata that have been shared). llvm-svn: 343055
2018-09-26 09:24:12 +08:00
Error LLLazyJIT::addLazyIRModule(JITDylib &JD, ThreadSafeModule TSM) {
assert(TSM && "Can not add null module");
[ORC] Change the locking scheme for ThreadSafeModule. ThreadSafeModule/ThreadSafeContext are used to manage lifetimes and locking for LLVMContexts in ORCv2. Prior to this patch contexts were locked as soon as an associated Module was emitted (to be compiled and linked), and were not unlocked until the emit call returned. This could lead to deadlocks if interdependent modules that shared contexts were compiled on different threads: when, during emission of the first module, the dependence was discovered the second module (which would provide the required symbol) could not be emitted as the thread emitting the first module still held the lock. This patch eliminates this possibility by moving to a finer-grained locking scheme. Each client holds the module lock only while they are actively operating on it. To make this finer grained locking simpler/safer to implement this patch removes the explicit lock method, 'getContextLock', from ThreadSafeModule and replaces it with a new method, 'withModuleDo', that implicitly locks the context, calls a user-supplied function object to operate on the Module, then implicitly unlocks the context before returning the result. ThreadSafeModule TSM = getModule(...); size_t NumFunctions = TSM.withModuleDo( [](Module &M) { // <- context locked before entry to lambda. return M.size(); }); Existing ORCv2 layers that operate on ThreadSafeModules are updated to use the new method. This method is used to introduce Module locking into each of the existing layers. llvm-svn: 367686
2019-08-02 23:21:37 +08:00
if (auto Err = TSM.withModuleDo([&](Module &M) -> Error {
if (auto Err = applyDataLayout(M))
return Err;
[ORC] Change the locking scheme for ThreadSafeModule. ThreadSafeModule/ThreadSafeContext are used to manage lifetimes and locking for LLVMContexts in ORCv2. Prior to this patch contexts were locked as soon as an associated Module was emitted (to be compiled and linked), and were not unlocked until the emit call returned. This could lead to deadlocks if interdependent modules that shared contexts were compiled on different threads: when, during emission of the first module, the dependence was discovered the second module (which would provide the required symbol) could not be emitted as the thread emitting the first module still held the lock. This patch eliminates this possibility by moving to a finer-grained locking scheme. Each client holds the module lock only while they are actively operating on it. To make this finer grained locking simpler/safer to implement this patch removes the explicit lock method, 'getContextLock', from ThreadSafeModule and replaces it with a new method, 'withModuleDo', that implicitly locks the context, calls a user-supplied function object to operate on the Module, then implicitly unlocks the context before returning the result. ThreadSafeModule TSM = getModule(...); size_t NumFunctions = TSM.withModuleDo( [](Module &M) { // <- context locked before entry to lambda. return M.size(); }); Existing ORCv2 layers that operate on ThreadSafeModules are updated to use the new method. This method is used to introduce Module locking into each of the existing layers. llvm-svn: 367686
2019-08-02 23:21:37 +08:00
recordCtorDtors(M);
return Error::success();
}))
return Err;
return CODLayer->add(JD, std::move(TSM), ES->allocateVModule());
}
LLLazyJIT::LLLazyJIT(LLLazyJITBuilderState &S, Error &Err) : LLJIT(S, Err) {
// If LLJIT construction failed then bail out.
if (Err)
return;
ErrorAsOutParameter _(&Err);
/// Take/Create the lazy-compile callthrough manager.
if (S.LCTMgr)
LCTMgr = std::move(S.LCTMgr);
else {
if (auto LCTMgrOrErr = createLocalLazyCallThroughManager(
S.TT, *ES, S.LazyCompileFailureAddr))
LCTMgr = std::move(*LCTMgrOrErr);
else {
Err = LCTMgrOrErr.takeError();
return;
}
}
// Take/Create the indirect stubs manager builder.
auto ISMBuilder = std::move(S.ISMBuilder);
// If none was provided, try to build one.
if (!ISMBuilder)
ISMBuilder = createLocalIndirectStubsManagerBuilder(S.TT);
// No luck. Bail out.
if (!ISMBuilder) {
Err = make_error<StringError>("Could not construct "
"IndirectStubsManagerBuilder for target " +
S.TT.str(),
inconvertibleErrorCode());
return;
}
// Create the transform layer.
TransformLayer = std::make_unique<IRTransformLayer>(*ES, *CompileLayer);
// Create the COD layer.
CODLayer = std::make_unique<CompileOnDemandLayer>(
*ES, *TransformLayer, *LCTMgr, std::move(ISMBuilder));
if (S.NumCompileThreads > 0)
CODLayer->setCloneToNewContextOnEmit(true);
}
} // End namespace orc.
} // End namespace llvm.