llvm-project/llvm/lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp

280 lines
9.6 KiB
C++
Raw Normal View History

//===----- CompileOnDemandLayer.cpp - Lazily emit IR on first call --------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h"
#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
using namespace llvm;
using namespace llvm::orc;
static ThreadSafeModule extractSubModule(ThreadSafeModule &TSM,
StringRef Suffix,
GVPredicate ShouldExtract) {
auto DeleteExtractedDefs = [](GlobalValue &GV) {
// Bump the linkage: this global will be provided by the external module.
GV.setLinkage(GlobalValue::ExternalLinkage);
// Delete the definition in the source module.
[ORC] Add ThreadSafeModule and ThreadSafeContext wrappers to support concurrent compilation of IR in the JIT. ThreadSafeContext is a pair of an LLVMContext and a mutex that can be used to lock that context when it needs to be accessed from multiple threads. ThreadSafeModule is a pair of a unique_ptr<Module> and a shared_ptr<ThreadSafeContext>. This allows the lifetime of a ThreadSafeContext to be managed automatically in terms of the ThreadSafeModules that refer to it: Once all modules using a ThreadSafeContext are destructed, and providing the client has not held on to a copy of shared context pointer, the context will be automatically destructed. This scheme is necessary due to the following constraits: (1) We need multiple contexts for multithreaded compilation (at least one per compile thread plus one to store any IR not currently being compiled, though one context per module is simpler). (2) We need to free contexts that are no longer being used so that the JIT does not leak memory over time. (3) Module lifetimes are not predictable (modules are compiled as needed depending on the flow of JIT'd code) so there is no single point where contexts could be reclaimed. JIT clients not using concurrency can safely use one ThreadSafeContext for all ThreadSafeModules. JIT clients who want to be able to compile concurrently should use a different ThreadSafeContext for each module, or call setCloneToNewContextOnEmit on their top-level IRLayer. The former reduces compile latency (since no clone step is needed) at the cost of additional memory overhead for uncompiled modules (as every uncompiled module will duplicate the LLVM types, constants and metadata that have been shared). llvm-svn: 343055
2018-09-26 09:24:12 +08:00
if (isa<Function>(GV)) {
auto &F = cast<Function>(GV);
F.deleteBody();
F.setPersonalityFn(nullptr);
} else if (isa<GlobalVariable>(GV)) {
cast<GlobalVariable>(GV).setInitializer(nullptr);
} else if (isa<GlobalAlias>(GV)) {
// We need to turn deleted aliases into function or variable decls based
// on the type of their aliasee.
auto &A = cast<GlobalAlias>(GV);
Constant *Aliasee = A.getAliasee();
assert(A.hasName() && "Anonymous alias?");
assert(Aliasee->hasName() && "Anonymous aliasee");
std::string AliasName = A.getName();
if (isa<Function>(Aliasee)) {
auto *F = cloneFunctionDecl(*A.getParent(), *cast<Function>(Aliasee));
A.replaceAllUsesWith(F);
A.eraseFromParent();
F->setName(AliasName);
} else if (isa<GlobalVariable>(Aliasee)) {
auto *G = cloneGlobalVariableDecl(*A.getParent(),
*cast<GlobalVariable>(Aliasee));
A.replaceAllUsesWith(G);
A.eraseFromParent();
G->setName(AliasName);
} else
llvm_unreachable("Alias to unsupported type");
[ORC] Add ThreadSafeModule and ThreadSafeContext wrappers to support concurrent compilation of IR in the JIT. ThreadSafeContext is a pair of an LLVMContext and a mutex that can be used to lock that context when it needs to be accessed from multiple threads. ThreadSafeModule is a pair of a unique_ptr<Module> and a shared_ptr<ThreadSafeContext>. This allows the lifetime of a ThreadSafeContext to be managed automatically in terms of the ThreadSafeModules that refer to it: Once all modules using a ThreadSafeContext are destructed, and providing the client has not held on to a copy of shared context pointer, the context will be automatically destructed. This scheme is necessary due to the following constraits: (1) We need multiple contexts for multithreaded compilation (at least one per compile thread plus one to store any IR not currently being compiled, though one context per module is simpler). (2) We need to free contexts that are no longer being used so that the JIT does not leak memory over time. (3) Module lifetimes are not predictable (modules are compiled as needed depending on the flow of JIT'd code) so there is no single point where contexts could be reclaimed. JIT clients not using concurrency can safely use one ThreadSafeContext for all ThreadSafeModules. JIT clients who want to be able to compile concurrently should use a different ThreadSafeContext for each module, or call setCloneToNewContextOnEmit on their top-level IRLayer. The former reduces compile latency (since no clone step is needed) at the cost of additional memory overhead for uncompiled modules (as every uncompiled module will duplicate the LLVM types, constants and metadata that have been shared). llvm-svn: 343055
2018-09-26 09:24:12 +08:00
} else
llvm_unreachable("Unsupported global type");
};
auto NewTSMod = cloneToNewContext(TSM, ShouldExtract, DeleteExtractedDefs);
[ORC] Add ThreadSafeModule and ThreadSafeContext wrappers to support concurrent compilation of IR in the JIT. ThreadSafeContext is a pair of an LLVMContext and a mutex that can be used to lock that context when it needs to be accessed from multiple threads. ThreadSafeModule is a pair of a unique_ptr<Module> and a shared_ptr<ThreadSafeContext>. This allows the lifetime of a ThreadSafeContext to be managed automatically in terms of the ThreadSafeModules that refer to it: Once all modules using a ThreadSafeContext are destructed, and providing the client has not held on to a copy of shared context pointer, the context will be automatically destructed. This scheme is necessary due to the following constraits: (1) We need multiple contexts for multithreaded compilation (at least one per compile thread plus one to store any IR not currently being compiled, though one context per module is simpler). (2) We need to free contexts that are no longer being used so that the JIT does not leak memory over time. (3) Module lifetimes are not predictable (modules are compiled as needed depending on the flow of JIT'd code) so there is no single point where contexts could be reclaimed. JIT clients not using concurrency can safely use one ThreadSafeContext for all ThreadSafeModules. JIT clients who want to be able to compile concurrently should use a different ThreadSafeContext for each module, or call setCloneToNewContextOnEmit on their top-level IRLayer. The former reduces compile latency (since no clone step is needed) at the cost of additional memory overhead for uncompiled modules (as every uncompiled module will duplicate the LLVM types, constants and metadata that have been shared). llvm-svn: 343055
2018-09-26 09:24:12 +08:00
auto &M = *NewTSMod.getModule();
M.setModuleIdentifier((M.getModuleIdentifier() + Suffix).str());
[ORC] Add ThreadSafeModule and ThreadSafeContext wrappers to support concurrent compilation of IR in the JIT. ThreadSafeContext is a pair of an LLVMContext and a mutex that can be used to lock that context when it needs to be accessed from multiple threads. ThreadSafeModule is a pair of a unique_ptr<Module> and a shared_ptr<ThreadSafeContext>. This allows the lifetime of a ThreadSafeContext to be managed automatically in terms of the ThreadSafeModules that refer to it: Once all modules using a ThreadSafeContext are destructed, and providing the client has not held on to a copy of shared context pointer, the context will be automatically destructed. This scheme is necessary due to the following constraits: (1) We need multiple contexts for multithreaded compilation (at least one per compile thread plus one to store any IR not currently being compiled, though one context per module is simpler). (2) We need to free contexts that are no longer being used so that the JIT does not leak memory over time. (3) Module lifetimes are not predictable (modules are compiled as needed depending on the flow of JIT'd code) so there is no single point where contexts could be reclaimed. JIT clients not using concurrency can safely use one ThreadSafeContext for all ThreadSafeModules. JIT clients who want to be able to compile concurrently should use a different ThreadSafeContext for each module, or call setCloneToNewContextOnEmit on their top-level IRLayer. The former reduces compile latency (since no clone step is needed) at the cost of additional memory overhead for uncompiled modules (as every uncompiled module will duplicate the LLVM types, constants and metadata that have been shared). llvm-svn: 343055
2018-09-26 09:24:12 +08:00
return NewTSMod;
}
namespace llvm {
namespace orc {
class PartitioningIRMaterializationUnit : public IRMaterializationUnit {
public:
PartitioningIRMaterializationUnit(ExecutionSession &ES, ThreadSafeModule TSM,
CompileOnDemandLayer2 &Parent)
[ORC] Add ThreadSafeModule and ThreadSafeContext wrappers to support concurrent compilation of IR in the JIT. ThreadSafeContext is a pair of an LLVMContext and a mutex that can be used to lock that context when it needs to be accessed from multiple threads. ThreadSafeModule is a pair of a unique_ptr<Module> and a shared_ptr<ThreadSafeContext>. This allows the lifetime of a ThreadSafeContext to be managed automatically in terms of the ThreadSafeModules that refer to it: Once all modules using a ThreadSafeContext are destructed, and providing the client has not held on to a copy of shared context pointer, the context will be automatically destructed. This scheme is necessary due to the following constraits: (1) We need multiple contexts for multithreaded compilation (at least one per compile thread plus one to store any IR not currently being compiled, though one context per module is simpler). (2) We need to free contexts that are no longer being used so that the JIT does not leak memory over time. (3) Module lifetimes are not predictable (modules are compiled as needed depending on the flow of JIT'd code) so there is no single point where contexts could be reclaimed. JIT clients not using concurrency can safely use one ThreadSafeContext for all ThreadSafeModules. JIT clients who want to be able to compile concurrently should use a different ThreadSafeContext for each module, or call setCloneToNewContextOnEmit on their top-level IRLayer. The former reduces compile latency (since no clone step is needed) at the cost of additional memory overhead for uncompiled modules (as every uncompiled module will duplicate the LLVM types, constants and metadata that have been shared). llvm-svn: 343055
2018-09-26 09:24:12 +08:00
: IRMaterializationUnit(ES, std::move(TSM)), Parent(Parent) {}
PartitioningIRMaterializationUnit(
ThreadSafeModule TSM, SymbolFlagsMap SymbolFlags,
SymbolNameToDefinitionMap SymbolToDefinition,
CompileOnDemandLayer2 &Parent)
[ORC] Add ThreadSafeModule and ThreadSafeContext wrappers to support concurrent compilation of IR in the JIT. ThreadSafeContext is a pair of an LLVMContext and a mutex that can be used to lock that context when it needs to be accessed from multiple threads. ThreadSafeModule is a pair of a unique_ptr<Module> and a shared_ptr<ThreadSafeContext>. This allows the lifetime of a ThreadSafeContext to be managed automatically in terms of the ThreadSafeModules that refer to it: Once all modules using a ThreadSafeContext are destructed, and providing the client has not held on to a copy of shared context pointer, the context will be automatically destructed. This scheme is necessary due to the following constraits: (1) We need multiple contexts for multithreaded compilation (at least one per compile thread plus one to store any IR not currently being compiled, though one context per module is simpler). (2) We need to free contexts that are no longer being used so that the JIT does not leak memory over time. (3) Module lifetimes are not predictable (modules are compiled as needed depending on the flow of JIT'd code) so there is no single point where contexts could be reclaimed. JIT clients not using concurrency can safely use one ThreadSafeContext for all ThreadSafeModules. JIT clients who want to be able to compile concurrently should use a different ThreadSafeContext for each module, or call setCloneToNewContextOnEmit on their top-level IRLayer. The former reduces compile latency (since no clone step is needed) at the cost of additional memory overhead for uncompiled modules (as every uncompiled module will duplicate the LLVM types, constants and metadata that have been shared). llvm-svn: 343055
2018-09-26 09:24:12 +08:00
: IRMaterializationUnit(std::move(TSM), std::move(SymbolFlags),
std::move(SymbolToDefinition)),
Parent(Parent) {}
private:
void materialize(MaterializationResponsibility R) override {
Parent.emitPartition(std::move(R), std::move(TSM),
std::move(SymbolToDefinition));
}
void discard(const JITDylib &V, const SymbolStringPtr &Name) override {
// All original symbols were materialized by the CODLayer and should be
// final. The function bodies provided by M should never be overridden.
llvm_unreachable("Discard should never be called on an "
"ExtractingIRMaterializationUnit");
}
mutable std::mutex SourceModuleMutex;
CompileOnDemandLayer2 &Parent;
};
Optional<CompileOnDemandLayer2::GlobalValueSet>
CompileOnDemandLayer2::compileRequested(GlobalValueSet Requested) {
return std::move(Requested);
}
Optional<CompileOnDemandLayer2::GlobalValueSet>
CompileOnDemandLayer2::compileWholeModule(GlobalValueSet Requested) {
return None;
}
CompileOnDemandLayer2::CompileOnDemandLayer2(
ExecutionSession &ES, IRLayer &BaseLayer, LazyCallThroughManager &LCTMgr,
[ORC] Add ThreadSafeModule and ThreadSafeContext wrappers to support concurrent compilation of IR in the JIT. ThreadSafeContext is a pair of an LLVMContext and a mutex that can be used to lock that context when it needs to be accessed from multiple threads. ThreadSafeModule is a pair of a unique_ptr<Module> and a shared_ptr<ThreadSafeContext>. This allows the lifetime of a ThreadSafeContext to be managed automatically in terms of the ThreadSafeModules that refer to it: Once all modules using a ThreadSafeContext are destructed, and providing the client has not held on to a copy of shared context pointer, the context will be automatically destructed. This scheme is necessary due to the following constraits: (1) We need multiple contexts for multithreaded compilation (at least one per compile thread plus one to store any IR not currently being compiled, though one context per module is simpler). (2) We need to free contexts that are no longer being used so that the JIT does not leak memory over time. (3) Module lifetimes are not predictable (modules are compiled as needed depending on the flow of JIT'd code) so there is no single point where contexts could be reclaimed. JIT clients not using concurrency can safely use one ThreadSafeContext for all ThreadSafeModules. JIT clients who want to be able to compile concurrently should use a different ThreadSafeContext for each module, or call setCloneToNewContextOnEmit on their top-level IRLayer. The former reduces compile latency (since no clone step is needed) at the cost of additional memory overhead for uncompiled modules (as every uncompiled module will duplicate the LLVM types, constants and metadata that have been shared). llvm-svn: 343055
2018-09-26 09:24:12 +08:00
IndirectStubsManagerBuilder BuildIndirectStubsManager)
: IRLayer(ES), BaseLayer(BaseLayer), LCTMgr(LCTMgr),
[ORC] Add ThreadSafeModule and ThreadSafeContext wrappers to support concurrent compilation of IR in the JIT. ThreadSafeContext is a pair of an LLVMContext and a mutex that can be used to lock that context when it needs to be accessed from multiple threads. ThreadSafeModule is a pair of a unique_ptr<Module> and a shared_ptr<ThreadSafeContext>. This allows the lifetime of a ThreadSafeContext to be managed automatically in terms of the ThreadSafeModules that refer to it: Once all modules using a ThreadSafeContext are destructed, and providing the client has not held on to a copy of shared context pointer, the context will be automatically destructed. This scheme is necessary due to the following constraits: (1) We need multiple contexts for multithreaded compilation (at least one per compile thread plus one to store any IR not currently being compiled, though one context per module is simpler). (2) We need to free contexts that are no longer being used so that the JIT does not leak memory over time. (3) Module lifetimes are not predictable (modules are compiled as needed depending on the flow of JIT'd code) so there is no single point where contexts could be reclaimed. JIT clients not using concurrency can safely use one ThreadSafeContext for all ThreadSafeModules. JIT clients who want to be able to compile concurrently should use a different ThreadSafeContext for each module, or call setCloneToNewContextOnEmit on their top-level IRLayer. The former reduces compile latency (since no clone step is needed) at the cost of additional memory overhead for uncompiled modules (as every uncompiled module will duplicate the LLVM types, constants and metadata that have been shared). llvm-svn: 343055
2018-09-26 09:24:12 +08:00
BuildIndirectStubsManager(std::move(BuildIndirectStubsManager)) {}
void CompileOnDemandLayer2::setPartitionFunction(PartitionFunction Partition) {
this->Partition = std::move(Partition);
}
void CompileOnDemandLayer2::emit(MaterializationResponsibility R, VModuleKey K,
[ORC] Add ThreadSafeModule and ThreadSafeContext wrappers to support concurrent compilation of IR in the JIT. ThreadSafeContext is a pair of an LLVMContext and a mutex that can be used to lock that context when it needs to be accessed from multiple threads. ThreadSafeModule is a pair of a unique_ptr<Module> and a shared_ptr<ThreadSafeContext>. This allows the lifetime of a ThreadSafeContext to be managed automatically in terms of the ThreadSafeModules that refer to it: Once all modules using a ThreadSafeContext are destructed, and providing the client has not held on to a copy of shared context pointer, the context will be automatically destructed. This scheme is necessary due to the following constraits: (1) We need multiple contexts for multithreaded compilation (at least one per compile thread plus one to store any IR not currently being compiled, though one context per module is simpler). (2) We need to free contexts that are no longer being used so that the JIT does not leak memory over time. (3) Module lifetimes are not predictable (modules are compiled as needed depending on the flow of JIT'd code) so there is no single point where contexts could be reclaimed. JIT clients not using concurrency can safely use one ThreadSafeContext for all ThreadSafeModules. JIT clients who want to be able to compile concurrently should use a different ThreadSafeContext for each module, or call setCloneToNewContextOnEmit on their top-level IRLayer. The former reduces compile latency (since no clone step is needed) at the cost of additional memory overhead for uncompiled modules (as every uncompiled module will duplicate the LLVM types, constants and metadata that have been shared). llvm-svn: 343055
2018-09-26 09:24:12 +08:00
ThreadSafeModule TSM) {
assert(TSM.getModule() && "Null module");
auto &ES = getExecutionSession();
[ORC] Add ThreadSafeModule and ThreadSafeContext wrappers to support concurrent compilation of IR in the JIT. ThreadSafeContext is a pair of an LLVMContext and a mutex that can be used to lock that context when it needs to be accessed from multiple threads. ThreadSafeModule is a pair of a unique_ptr<Module> and a shared_ptr<ThreadSafeContext>. This allows the lifetime of a ThreadSafeContext to be managed automatically in terms of the ThreadSafeModules that refer to it: Once all modules using a ThreadSafeContext are destructed, and providing the client has not held on to a copy of shared context pointer, the context will be automatically destructed. This scheme is necessary due to the following constraits: (1) We need multiple contexts for multithreaded compilation (at least one per compile thread plus one to store any IR not currently being compiled, though one context per module is simpler). (2) We need to free contexts that are no longer being used so that the JIT does not leak memory over time. (3) Module lifetimes are not predictable (modules are compiled as needed depending on the flow of JIT'd code) so there is no single point where contexts could be reclaimed. JIT clients not using concurrency can safely use one ThreadSafeContext for all ThreadSafeModules. JIT clients who want to be able to compile concurrently should use a different ThreadSafeContext for each module, or call setCloneToNewContextOnEmit on their top-level IRLayer. The former reduces compile latency (since no clone step is needed) at the cost of additional memory overhead for uncompiled modules (as every uncompiled module will duplicate the LLVM types, constants and metadata that have been shared). llvm-svn: 343055
2018-09-26 09:24:12 +08:00
auto &M = *TSM.getModule();
// First, do some cleanup on the module:
cleanUpModule(M);
// Now sort the callables and non-callables, build re-exports and lodge the
// actual module with the implementation dylib.
auto &PDR = getPerDylibResources(R.getTargetJITDylib());
MangleAndInterner Mangle(ES, M.getDataLayout());
SymbolAliasMap NonCallables;
SymbolAliasMap Callables;
for (auto &GV : M.global_values()) {
assert(GV.hasName() && !GV.hasLocalLinkage() &&
"GlobalValues must have been promoted before adding to "
"CompileOnDemandLayer");
if (GV.isDeclaration() || GV.hasAppendingLinkage())
continue;
auto Name = Mangle(GV.getName());
auto Flags = JITSymbolFlags::fromGlobalValue(GV);
if (Flags.isCallable())
Callables[Name] = SymbolAliasMapEntry(Name, Flags);
else
NonCallables[Name] = SymbolAliasMapEntry(Name, Flags);
}
// Create a partitioning materialization unit and lodge it with the
// implementation dylib.
if (auto Err = PDR.getImplDylib().define(
llvm::make_unique<PartitioningIRMaterializationUnit>(
ES, std::move(TSM), *this))) {
ES.reportError(std::move(Err));
R.failMaterialization();
return;
}
R.replace(reexports(PDR.getImplDylib(), std::move(NonCallables)));
R.replace(lazyReexports(LCTMgr, PDR.getISManager(), PDR.getImplDylib(),
std::move(Callables)));
}
CompileOnDemandLayer2::PerDylibResources &
CompileOnDemandLayer2::getPerDylibResources(JITDylib &TargetD) {
auto I = DylibResources.find(&TargetD);
if (I == DylibResources.end()) {
auto &ImplD =
getExecutionSession().createJITDylib(TargetD.getName() + ".impl");
TargetD.withSearchOrderDo([&](const JITDylibList &TargetSearchOrder) {
ImplD.setSearchOrder(TargetSearchOrder, false);
});
PerDylibResources PDR(ImplD, BuildIndirectStubsManager());
I = DylibResources.insert(std::make_pair(&TargetD, std::move(PDR))).first;
}
return I->second;
}
void CompileOnDemandLayer2::cleanUpModule(Module &M) {
for (auto &F : M.functions()) {
if (F.isDeclaration())
continue;
if (F.hasAvailableExternallyLinkage()) {
F.deleteBody();
F.setPersonalityFn(nullptr);
continue;
}
}
}
void CompileOnDemandLayer2::expandPartition(GlobalValueSet &Partition) {
// Expands the partition to ensure the following rules hold:
// (1) If any alias is in the partition, its aliasee is also in the partition.
// (2) If any aliasee is in the partition, its aliases are also in the
// partiton.
// (3) If any global variable is in the partition then all global variables
// are in the partition.
assert(!Partition.empty() && "Unexpected empty partition");
const Module &M = *(*Partition.begin())->getParent();
bool ContainsGlobalVariables = false;
std::vector<const GlobalValue *> GVsToAdd;
for (auto *GV : Partition)
if (isa<GlobalAlias>(GV))
GVsToAdd.push_back(
cast<GlobalValue>(cast<GlobalAlias>(GV)->getAliasee()));
else if (isa<GlobalVariable>(GV))
ContainsGlobalVariables = true;
for (auto &A : M.aliases())
if (Partition.count(cast<GlobalValue>(A.getAliasee())))
GVsToAdd.push_back(&A);
if (ContainsGlobalVariables)
for (auto &G : M.globals())
GVsToAdd.push_back(&G);
for (auto *GV : GVsToAdd)
Partition.insert(GV);
}
void CompileOnDemandLayer2::emitPartition(
MaterializationResponsibility R, ThreadSafeModule TSM,
IRMaterializationUnit::SymbolNameToDefinitionMap Defs) {
// FIXME: Need a 'notify lazy-extracting/emitting' callback to tie the
// extracted module key, extracted module, and source module key
// together. This could be used, for example, to provide a specific
// memory manager instance to the linking layer.
auto &ES = getExecutionSession();
GlobalValueSet RequestedGVs;
for (auto &Name : R.getRequestedSymbols()) {
assert(Defs.count(Name) && "No definition for symbol");
RequestedGVs.insert(Defs[Name]);
}
auto GVsToExtract = Partition(RequestedGVs);
// Take a 'None' partition to mean the whole module (as opposed to an empty
// partition, which means "materialize nothing"). Emit the whole module
// unmodified to the base layer.
if (GVsToExtract == None) {
Defs.clear();
BaseLayer.emit(std::move(R), ES.allocateVModule(), std::move(TSM));
return;
}
// If the partition is empty, return the whole module to the symbol table.
if (GVsToExtract->empty()) {
R.replace(llvm::make_unique<PartitioningIRMaterializationUnit>(
std::move(TSM), R.getSymbols(), std::move(Defs), *this));
return;
}
expandPartition(*GVsToExtract);
// Extract the requested partiton (plus any necessary aliases) and
// put the rest back into the impl dylib.
auto ShouldExtract = [&](const GlobalValue &GV) -> bool {
return GVsToExtract->count(&GV);
};
auto ExtractedTSM = extractSubModule(TSM, ".submodule", ShouldExtract);
R.replace(llvm::make_unique<PartitioningIRMaterializationUnit>(
ES, std::move(TSM), *this));
BaseLayer.emit(std::move(R), ES.allocateVModule(), std::move(ExtractedTSM));
}
} // end namespace orc
} // end namespace llvm