Kill ModuleProvider and ghost linkage by inverting the relationship between

Modules and ModuleProviders. Because the "ModuleProvider" simply materializes
GlobalValues now, and doesn't provide modules, it's renamed to
"GVMaterializer". Code that used to need a ModuleProvider to materialize
Functions can now materialize the Functions directly. Functions no longer use a
magic linkage to record that they're materializable; they simply ask the
GVMaterializer.

Because the C ABI must never change, we can't remove LLVMModuleProviderRef or
the functions that refer to it. Instead, because Module now exposes the same
functionality ModuleProvider used to, we store a Module* in any
LLVMModuleProviderRef and translate in the wrapper methods.  The bindings to
other languages still use the ModuleProvider concept.  It would probably be
worth some time to update them to follow the C++ more closely, but I don't
intend to do it.

Fixes http://llvm.org/PR5737 and http://llvm.org/PR5735.

llvm-svn: 94686
This commit is contained in:
Jeffrey Yasskin 2010-01-27 20:34:15 +00:00
parent 377dc2f91f
commit 091217be6f
62 changed files with 624 additions and 720 deletions

View File

@ -171,10 +171,7 @@ add a set of optimizations to run. The code looks like this:</p>
<div class="doc_code">
<pre>
ExistingModuleProvider *OurModuleProvider =
new ExistingModuleProvider(TheModule);
FunctionPassManager OurFPM(OurModuleProvider);
FunctionPassManager OurFPM(TheModule);
// Set up the optimizer pipeline. Start with registering info about how the
// target lays out data structures.
@ -198,19 +195,13 @@ add a set of optimizations to run. The code looks like this:</p>
</pre>
</div>
<p>This code defines two objects, an <tt>ExistingModuleProvider</tt> and a
<tt>FunctionPassManager</tt>. The former is basically a wrapper around our
<tt>Module</tt> that the PassManager requires. It provides certain flexibility
that we're not going to take advantage of here, so I won't dive into any details
about it.</p>
<p>The meat of the matter here, is the definition of "<tt>OurFPM</tt>". It
requires a pointer to the <tt>Module</tt> (through the <tt>ModuleProvider</tt>)
to construct itself. Once it is set up, we use a series of "add" calls to add
a bunch of LLVM passes. The first pass is basically boilerplate, it adds a pass
so that later optimizations know how the data structures in the program are
laid out. The "<tt>TheExecutionEngine</tt>" variable is related to the JIT,
which we will get to in the next section.</p>
<p>This code defines a <tt>FunctionPassManager</tt>, "<tt>OurFPM</tt>". It
requires a pointer to the <tt>Module</tt> to construct itself. Once it is set
up, we use a series of "add" calls to add a bunch of LLVM passes. The first
pass is basically boilerplate, it adds a pass so that later optimizations know
how the data structures in the program are laid out. The
"<tt>TheExecutionEngine</tt>" variable is related to the JIT, which we will get
to in the next section.</p>
<p>In this case, we choose to add 4 optimization passes. The passes we chose
here are a pretty standard set of "cleanup" optimizations that are useful for
@ -302,8 +293,8 @@ by adding a global variable and a call in <tt>main</tt>:</p>
...
int main() {
..
<b>// Create the JIT. This takes ownership of the module and module provider.
TheExecutionEngine = EngineBuilder(OurModuleProvider).create();</b>
<b>// Create the JIT. This takes ownership of the module.
TheExecutionEngine = EngineBuilder(TheModule).create();</b>
..
}
</pre>
@ -515,7 +506,6 @@ at runtime.</p>
#include "llvm/ExecutionEngine/JIT.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/ModuleProvider.h"
#include "llvm/PassManager.h"
#include "llvm/Analysis/Verifier.h"
#include "llvm/Target/TargetData.h"
@ -1084,13 +1074,10 @@ int main() {
// Make the module, which holds all the code.
TheModule = new Module("my cool jit", Context);
ExistingModuleProvider *OurModuleProvider =
new ExistingModuleProvider(TheModule);
// Create the JIT. This takes ownership of the module.
TheExecutionEngine = EngineBuilder(TheModule).create();
// Create the JIT. This takes ownership of the module and module provider.
TheExecutionEngine = EngineBuilder(OurModuleProvider).create();
FunctionPassManager OurFPM(OurModuleProvider);
FunctionPassManager OurFPM(TheModule);
// Set up the optimizer pipeline. Start with registering info about how the
// target lays out data structures.

View File

@ -906,7 +906,6 @@ if/then/else and for expressions.. To build this example, use:
#include "llvm/ExecutionEngine/JIT.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/ModuleProvider.h"
#include "llvm/PassManager.h"
#include "llvm/Analysis/Verifier.h"
#include "llvm/Target/TargetData.h"
@ -1720,13 +1719,10 @@ int main() {
// Make the module, which holds all the code.
TheModule = new Module("my cool jit", Context);
ExistingModuleProvider *OurModuleProvider =
new ExistingModuleProvider(TheModule);
// Create the JIT. This takes ownership of the module.
TheExecutionEngine = EngineBuilder(TheModule).create();
// Create the JIT. This takes ownership of the module and module provider.
TheExecutionEngine = EngineBuilder(OurModuleProvider).create();
FunctionPassManager OurFPM(OurModuleProvider);
FunctionPassManager OurFPM(TheModule);
// Set up the optimizer pipeline. Start with registering info about how the
// target lays out data structures.

View File

@ -825,7 +825,6 @@ if/then/else and for expressions.. To build this example, use:
#include "llvm/ExecutionEngine/JIT.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/ModuleProvider.h"
#include "llvm/PassManager.h"
#include "llvm/Analysis/Verifier.h"
#include "llvm/Target/TargetData.h"
@ -1757,13 +1756,10 @@ int main() {
// Make the module, which holds all the code.
TheModule = new Module("my cool jit", Context);
ExistingModuleProvider *OurModuleProvider =
new ExistingModuleProvider(TheModule);
// Create the JIT. This takes ownership of the module.
TheExecutionEngine = EngineBuilder(TheModule).create();
// Create the JIT. This takes ownership of the module and module provider.
TheExecutionEngine = EngineBuilder(OurModuleProvider).create();
FunctionPassManager OurFPM(OurModuleProvider);
FunctionPassManager OurFPM(TheModule);
// Set up the optimizer pipeline. Start with registering info about how the
// target lays out data structures.

View File

@ -1008,7 +1008,6 @@ variables and var/in support. To build this example, use:
#include "llvm/ExecutionEngine/JIT.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/ModuleProvider.h"
#include "llvm/PassManager.h"
#include "llvm/Analysis/Verifier.h"
#include "llvm/Target/TargetData.h"
@ -2105,13 +2104,10 @@ int main() {
// Make the module, which holds all the code.
TheModule = new Module("my cool jit", Context);
ExistingModuleProvider *OurModuleProvider =
new ExistingModuleProvider(TheModule);
// Create the JIT. This takes ownership of the module.
TheExecutionEngine = EngineBuilder(TheModule).create();
// Create the JIT. This takes ownership of the module and module provider.
TheExecutionEngine = EngineBuilder(OurModuleProvider).create();
FunctionPassManager OurFPM(OurModuleProvider);
FunctionPassManager OurFPM(TheModule);
// Set up the optimizer pipeline. Start with registering info about how the
// target lays out data structures.

View File

@ -27,7 +27,6 @@
#include "BrainF.h"
#include "llvm/Constants.h"
#include "llvm/ModuleProvider.h"
#include "llvm/Analysis/Verifier.h"
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/ExecutionEngine/GenericValue.h"

View File

@ -28,7 +28,6 @@
#include "llvm/DerivedTypes.h"
#include "llvm/Constants.h"
#include "llvm/Instructions.h"
#include "llvm/ModuleProvider.h"
#include "llvm/Analysis/Verifier.h"
#include "llvm/ExecutionEngine/JIT.h"
#include "llvm/ExecutionEngine/Interpreter.h"

View File

@ -39,7 +39,6 @@
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Instructions.h"
#include "llvm/ModuleProvider.h"
#include "llvm/ExecutionEngine/JIT.h"
#include "llvm/ExecutionEngine/Interpreter.h"
#include "llvm/ExecutionEngine/GenericValue.h"

View File

@ -4,7 +4,6 @@
#include "llvm/ExecutionEngine/JIT.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/ModuleProvider.h"
#include "llvm/PassManager.h"
#include "llvm/Analysis/Verifier.h"
#include "llvm/Target/TargetData.h"
@ -573,13 +572,10 @@ int main() {
// Make the module, which holds all the code.
TheModule = new Module("my cool jit", Context);
ExistingModuleProvider *OurModuleProvider =
new ExistingModuleProvider(TheModule);
// Create the JIT. This takes ownership of the module.
TheExecutionEngine = EngineBuilder(TheModule).create();
// Create the JIT. This takes ownership of the module and module provider.
TheExecutionEngine = EngineBuilder(OurModuleProvider).create();
FunctionPassManager OurFPM(OurModuleProvider);
FunctionPassManager OurFPM(TheModule);
// Set up the optimizer pipeline. Start with registering info about how the
// target lays out data structures.

View File

@ -4,7 +4,6 @@
#include "llvm/ExecutionEngine/JIT.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/ModuleProvider.h"
#include "llvm/PassManager.h"
#include "llvm/Analysis/Verifier.h"
#include "llvm/Target/TargetData.h"
@ -818,13 +817,10 @@ int main() {
// Make the module, which holds all the code.
TheModule = new Module("my cool jit", Context);
ExistingModuleProvider *OurModuleProvider =
new ExistingModuleProvider(TheModule);
// Create the JIT. This takes ownership of the module.
TheExecutionEngine = EngineBuilder(TheModule).create();
// Create the JIT. This takes ownership of the module and module provider.
TheExecutionEngine = EngineBuilder(OurModuleProvider).create();
FunctionPassManager OurFPM(OurModuleProvider);
FunctionPassManager OurFPM(TheModule);
// Set up the optimizer pipeline. Start with registering info about how the
// target lays out data structures.

View File

@ -4,7 +4,6 @@
#include "llvm/ExecutionEngine/JIT.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/ModuleProvider.h"
#include "llvm/PassManager.h"
#include "llvm/Analysis/Verifier.h"
#include "llvm/Target/TargetData.h"
@ -936,13 +935,10 @@ int main() {
// Make the module, which holds all the code.
TheModule = new Module("my cool jit", Context);
ExistingModuleProvider *OurModuleProvider =
new ExistingModuleProvider(TheModule);
// Create the JIT. This takes ownership of the module.
TheExecutionEngine = EngineBuilder(TheModule).create();
// Create the JIT. This takes ownership of the module and module provider.
TheExecutionEngine = EngineBuilder(OurModuleProvider).create();
FunctionPassManager OurFPM(OurModuleProvider);
FunctionPassManager OurFPM(TheModule);
// Set up the optimizer pipeline. Start with registering info about how the
// target lays out data structures.

View File

@ -4,7 +4,6 @@
#include "llvm/ExecutionEngine/JIT.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/ModuleProvider.h"
#include "llvm/PassManager.h"
#include "llvm/Analysis/Verifier.h"
#include "llvm/Target/TargetData.h"
@ -1100,13 +1099,10 @@ int main() {
// Make the module, which holds all the code.
TheModule = new Module("my cool jit", Context);
ExistingModuleProvider *OurModuleProvider =
new ExistingModuleProvider(TheModule);
// Create the JIT. This takes ownership of the module.
TheExecutionEngine = EngineBuilder(TheModule).create();
// Create the JIT. This takes ownership of the module and module provider.
TheExecutionEngine = EngineBuilder(OurModuleProvider).create();
FunctionPassManager OurFPM(OurModuleProvider);
FunctionPassManager OurFPM(TheModule);
// Set up the optimizer pipeline. Start with registering info about how the
// target lays out data structures.

View File

@ -23,7 +23,6 @@
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Instructions.h"
#include "llvm/ModuleProvider.h"
#include "llvm/ExecutionEngine/JIT.h"
#include "llvm/ExecutionEngine/Interpreter.h"
#include "llvm/ExecutionEngine/GenericValue.h"

View File

@ -78,8 +78,9 @@ typedef struct LLVMOpaqueValue *LLVMValueRef;
typedef struct LLVMOpaqueBasicBlock *LLVMBasicBlockRef;
typedef struct LLVMOpaqueBuilder *LLVMBuilderRef;
/* Used to provide a module to JIT or interpreter.
* See the llvm::ModuleProvider class.
/* Interface used to provide a module to JIT or interpreter. This is now just a
* synonym for llvm::Module, but we have to keep using the different type to
* keep binary compatibility.
*/
typedef struct LLVMOpaqueModuleProvider *LLVMModuleProviderRef;
@ -210,8 +211,7 @@ typedef enum {
LLVMDLLImportLinkage, /**< Function to be imported from DLL */
LLVMDLLExportLinkage, /**< Function to be accessible from DLL */
LLVMExternalWeakLinkage,/**< ExternalWeak linkage description */
LLVMGhostLinkage, /**< Stand-in functions for streaming fns from
bitcode */
LLVMGhostLinkage, /**< Obsolete */
LLVMCommonLinkage, /**< Tentative definitions */
LLVMLinkerPrivateLinkage /**< Like Private, but linker removes. */
} LLVMLinkage;
@ -914,17 +914,15 @@ LLVMValueRef LLVMBuildPtrDiff(LLVMBuilderRef, LLVMValueRef LHS,
/*===-- Module providers --------------------------------------------------===*/
/* Encapsulates the module M in a module provider, taking ownership of the
* module.
* See the constructor llvm::ExistingModuleProvider::ExistingModuleProvider.
/* Changes the type of M so it can be passed to FunctionPassManagers and the
* JIT. They take ModuleProviders for historical reasons.
*/
LLVMModuleProviderRef
LLVMCreateModuleProviderForExistingModule(LLVMModuleRef M);
/* Destroys the module provider MP as well as the contained module.
* See the destructor llvm::ModuleProvider::~ModuleProvider.
/* Destroys the module M.
*/
void LLVMDisposeModuleProvider(LLVMModuleProviderRef MP);
void LLVMDisposeModuleProvider(LLVMModuleProviderRef M);
/*===-- Memory buffers ----------------------------------------------------===*/
@ -981,7 +979,6 @@ void LLVMDisposePassManager(LLVMPassManagerRef PM);
}
namespace llvm {
class ModuleProvider;
class MemoryBuffer;
class PassManagerBase;
@ -1018,11 +1015,16 @@ namespace llvm {
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(BasicBlock, LLVMBasicBlockRef )
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRBuilder<>, LLVMBuilderRef )
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(PATypeHolder, LLVMTypeHandleRef )
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ModuleProvider, LLVMModuleProviderRef)
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(MemoryBuffer, LLVMMemoryBufferRef )
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(LLVMContext, LLVMContextRef )
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(Use, LLVMUseIteratorRef )
DEFINE_STDCXX_CONVERSION_FUNCTIONS(PassManagerBase, LLVMPassManagerRef )
/* LLVMModuleProviderRef exists for historical reasons, but now just holds a
* Module.
*/
inline Module *unwrap(LLVMModuleProviderRef MP) {
return reinterpret_cast<Module*>(MP);
}
#undef DEFINE_STDCXX_CONVERSION_FUNCTIONS
#undef DEFINE_ISA_CONVERSION_FUNCTIONS

View File

@ -27,7 +27,6 @@ namespace llvm {
class MemoryBuffer;
// Forward declare classes
class ModuleProvider; // From VMCore
class Module; // From VMCore
class Archive; // Declared below
class ArchiveMemberHeader; // Internal implementation class
@ -374,14 +373,14 @@ class Archive {
/// returns the associated module that defines that symbol. This method can
/// be called as many times as necessary. This is handy for linking the
/// archive into another module based on unresolved symbols. Note that the
/// ModuleProvider returned by this accessor should not be deleted by the
/// caller. It is managed internally by the Archive class. It is possible
/// that multiple calls to this accessor will return the same ModuleProvider
/// instance because the associated module defines multiple symbols.
/// @returns The ModuleProvider* found or null if the archive does not
/// contain a module that defines the \p symbol.
/// Module returned by this accessor should not be deleted by the caller. It
/// is managed internally by the Archive class. It is possible that multiple
/// calls to this accessor will return the same Module instance because the
/// associated module defines multiple symbols.
/// @returns The Module* found or null if the archive does not contain a
/// module that defines the \p symbol.
/// @brief Look up a module by symbol name.
ModuleProvider* findModuleDefiningSymbol(
Module* findModuleDefiningSymbol(
const std::string& symbol, ///< Symbol to be sought
std::string* ErrMessage ///< Error message storage, if non-zero
);
@ -397,7 +396,7 @@ class Archive {
/// @brief Look up multiple symbols in the archive.
bool findModulesDefiningSymbols(
std::set<std::string>& symbols, ///< Symbols to be sought
std::set<ModuleProvider*>& modules, ///< The modules matching \p symbols
std::set<Module*>& modules, ///< The modules matching \p symbols
std::string* ErrMessage ///< Error msg storage, if non-zero
);
@ -513,9 +512,9 @@ class Archive {
/// This type is used to keep track of bitcode modules loaded from the
/// symbol table. It maps the file offset to a pair that consists of the
/// associated ArchiveMember and the ModuleProvider.
/// associated ArchiveMember and the Module.
/// @brief Module mapping type
typedef std::map<unsigned,std::pair<ModuleProvider*,ArchiveMember*> >
typedef std::map<unsigned,std::pair<Module*,ArchiveMember*> >
ModuleMap;

View File

@ -18,21 +18,20 @@
namespace llvm {
class Module;
class ModuleProvider;
class MemoryBuffer;
class ModulePass;
class BitstreamWriter;
class LLVMContext;
class raw_ostream;
/// getBitcodeModuleProvider - Read the header of the specified bitcode buffer
/// getLazyBitcodeModule - Read the header of the specified bitcode buffer
/// and prepare for lazy deserialization of function bodies. If successful,
/// this takes ownership of 'buffer' and returns a non-null pointer. On
/// error, this returns null, *does not* take ownership of Buffer, and fills
/// in *ErrMsg with an error description if ErrMsg is non-null.
ModuleProvider *getBitcodeModuleProvider(MemoryBuffer *Buffer,
LLVMContext& Context,
std::string *ErrMsg = 0);
Module *getLazyBitcodeModule(MemoryBuffer *Buffer,
LLVMContext& Context,
std::string *ErrMsg = 0);
/// ParseBitcodeFile - Read the specified bitcode file, returning the module.
/// If an error occurs, this returns null and fills in *ErrMsg if it is

View File

@ -36,7 +36,6 @@ class JITEventListener;
class JITMemoryManager;
class MachineCodeInfo;
class Module;
class ModuleProvider;
class MutexGuard;
class TargetData;
class Type;
@ -95,9 +94,9 @@ class ExecutionEngine {
friend class EngineBuilder; // To allow access to JITCtor and InterpCtor.
protected:
/// Modules - This is a list of ModuleProvider's that we are JIT'ing from. We
/// use a smallvector to optimize for the case where there is only one module.
SmallVector<ModuleProvider*, 1> Modules;
/// Modules - This is a list of Modules that we are JIT'ing from. We use a
/// smallvector to optimize for the case where there is only one module.
SmallVector<Module*, 1> Modules;
void setTargetData(const TargetData *td) {
TD = td;
@ -109,13 +108,13 @@ protected:
// To avoid having libexecutionengine depend on the JIT and interpreter
// libraries, the JIT and Interpreter set these functions to ctor pointers
// at startup time if they are linked in.
static ExecutionEngine *(*JITCtor)(ModuleProvider *MP,
static ExecutionEngine *(*JITCtor)(Module *M,
std::string *ErrorStr,
JITMemoryManager *JMM,
CodeGenOpt::Level OptLevel,
bool GVsWithCode,
CodeModel::Model CMM);
static ExecutionEngine *(*InterpCtor)(ModuleProvider *MP,
static ExecutionEngine *(*InterpCtor)(Module *M,
std::string *ErrorStr);
/// LazyFunctionCreator - If an unknown function is needed, this function
@ -141,8 +140,8 @@ public:
/// create - This is the factory method for creating an execution engine which
/// is appropriate for the current machine. This takes ownership of the
/// module provider.
static ExecutionEngine *create(ModuleProvider *MP,
/// module.
static ExecutionEngine *create(Module *M,
bool ForceInterpreter = false,
std::string *ErrorStr = 0,
CodeGenOpt::Level OptLevel =
@ -165,11 +164,11 @@ public:
/// createJIT - This is the factory method for creating a JIT for the current
/// machine, it does not fall back to the interpreter. This takes ownership
/// of the ModuleProvider and JITMemoryManager if successful.
/// of the Module and JITMemoryManager if successful.
///
/// Clients should make sure to initialize targets prior to calling this
/// function.
static ExecutionEngine *createJIT(ModuleProvider *MP,
static ExecutionEngine *createJIT(Module *M,
std::string *ErrorStr = 0,
JITMemoryManager *JMM = 0,
CodeGenOpt::Level OptLevel =
@ -178,11 +177,11 @@ public:
CodeModel::Model CMM =
CodeModel::Default);
/// addModuleProvider - Add a ModuleProvider to the list of modules that we
/// can JIT from. Note that this takes ownership of the ModuleProvider: when
/// the ExecutionEngine is destroyed, it destroys the MP as well.
virtual void addModuleProvider(ModuleProvider *P) {
Modules.push_back(P);
/// addModule - Add a Module to the list of modules that we can JIT from.
/// Note that this takes ownership of the Module: when the ExecutionEngine is
/// destroyed, it destroys the Module as well.
virtual void addModule(Module *M) {
Modules.push_back(M);
}
//===----------------------------------------------------------------------===//
@ -190,16 +189,9 @@ public:
const TargetData *getTargetData() const { return TD; }
/// removeModuleProvider - Remove a ModuleProvider from the list of modules.
/// Relases the Module from the ModuleProvider, materializing it in the
/// process, and returns the materialized Module.
virtual Module* removeModuleProvider(ModuleProvider *P,
std::string *ErrInfo = 0);
/// deleteModuleProvider - Remove a ModuleProvider from the list of modules,
/// and deletes the ModuleProvider and owned Module. Avoids materializing
/// the underlying module.
virtual void deleteModuleProvider(ModuleProvider *P,std::string *ErrInfo = 0);
/// removeModule - Remove a Module from the list of modules. Returns true if
/// M is found.
virtual bool removeModule(Module *M);
/// FindFunctionNamed - Search all of the active modules to find the one that
/// defines FnName. This is very slow operation and shouldn't be used for
@ -393,7 +385,7 @@ public:
}
protected:
explicit ExecutionEngine(ModuleProvider *P);
explicit ExecutionEngine(Module *M);
void emitGlobals();
@ -422,7 +414,7 @@ namespace EngineKind {
class EngineBuilder {
private:
ModuleProvider *MP;
Module *M;
EngineKind::Kind WhichEngine;
std::string *ErrorStr;
CodeGenOpt::Level OptLevel;
@ -443,16 +435,11 @@ class EngineBuilder {
public:
/// EngineBuilder - Constructor for EngineBuilder. If create() is called and
/// is successful, the created engine takes ownership of the module
/// provider.
EngineBuilder(ModuleProvider *mp) : MP(mp) {
/// is successful, the created engine takes ownership of the module.
EngineBuilder(Module *m) : M(m) {
InitEngine();
}
/// EngineBuilder - Overloaded constructor that automatically creates an
/// ExistingModuleProvider for an existing module.
EngineBuilder(Module *m);
/// setEngineKind - Controls whether the user wants the interpreter, the JIT,
/// or whichever engine works. This option defaults to EngineKind::Either.
EngineBuilder &setEngineKind(EngineKind::Kind w) {

View File

@ -0,0 +1,66 @@
//===-- llvm/GVMaterializer.h - Interface for GV materializers --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file provides an abstract interface for loading a module from some
// place. This interface allows incremental or random access loading of
// functions from the file. This is useful for applications like JIT compilers
// or interprocedural optimizers that do not need the entire program in memory
// at the same time.
//
//===----------------------------------------------------------------------===//
#ifndef GVMATERIALIZER_H
#define GVMATERIALIZER_H
#include <string>
namespace llvm {
class Function;
class GlobalValue;
class Module;
class GVMaterializer {
protected:
GVMaterializer() {}
public:
virtual ~GVMaterializer();
/// isMaterializable - True if GV can be materialized from whatever backing
/// store this GVMaterializer uses and has not been materialized yet.
virtual bool isMaterializable(const GlobalValue *GV) const = 0;
/// isDematerializable - True if GV has been materialized and can be
/// dematerialized back to whatever backing store this GVMaterializer uses.
virtual bool isDematerializable(const GlobalValue *GV) const = 0;
/// Materialize - make sure the given GlobalValue is fully read. If the
/// module is corrupt, this returns true and fills in the optional string with
/// information about the problem. If successful, this returns false.
///
virtual bool Materialize(GlobalValue *GV, std::string *ErrInfo = 0) = 0;
/// Dematerialize - If the given GlobalValue is read in, and if the
/// GVMaterializer supports it, release the memory for the GV, and set it up
/// to be materialized lazily. If the Materializer doesn't support this
/// capability, this method is a noop.
///
virtual void Dematerialize(GlobalValue *) {}
/// MaterializeModule - make sure the entire Module has been completely read.
/// On error, this returns true and fills in the optional string with
/// information about the problem. If successful, this returns false.
///
virtual bool MaterializeModule(Module *M, std::string *ErrInfo = 0) = 0;
};
} // End llvm namespace
#endif

View File

@ -43,7 +43,6 @@ public:
DLLImportLinkage, ///< Function to be imported from DLL
DLLExportLinkage, ///< Function to be accessible from DLL.
ExternalWeakLinkage,///< ExternalWeak linkage description.
GhostLinkage, ///< Stand-in functions for streaming fns from BC files.
CommonLinkage ///< Tentative definitions.
};
@ -132,7 +131,6 @@ public:
bool hasDLLImportLinkage() const { return Linkage == DLLImportLinkage; }
bool hasDLLExportLinkage() const { return Linkage == DLLExportLinkage; }
bool hasExternalWeakLinkage() const { return Linkage == ExternalWeakLinkage; }
bool hasGhostLinkage() const { return Linkage == GhostLinkage; }
bool hasCommonLinkage() const { return Linkage == CommonLinkage; }
void setLinkage(LinkageTypes LT) { Linkage = LT; }
@ -164,12 +162,33 @@ public:
/// create a GlobalValue) from the GlobalValue Src to this one.
virtual void copyAttributesFrom(const GlobalValue *Src);
/// hasNotBeenReadFromBitcode - If a module provider is being used to lazily
/// stream in functions from disk, this method can be used to check to see if
/// the function has been read in yet or not. Unless you are working on the
/// JIT or something else that streams stuff in lazily, you don't need to
/// worry about this.
bool hasNotBeenReadFromBitcode() const { return Linkage == GhostLinkage; }
/// @name Materialization
/// Materialization is used to construct functions only as they're needed. This
/// is useful to reduce memory usage in LLVM or parsing work done by the
/// BitcodeReader to load the Module.
/// @{
/// isMaterializable - If this function's Module is being lazily streamed in
/// functions from disk or some other source, this method can be used to check
/// to see if the function has been read in yet or not.
bool isMaterializable() const;
/// isDematerializable - Returns true if this function was loaded from a
/// GVMaterializer that's still attached to its Module and that knows how to
/// dematerialize the function.
bool isDematerializable() const;
/// Materialize - make sure this GlobalValue is fully read. If the module is
/// corrupt, this returns true and fills in the optional string with
/// information about the problem. If successful, this returns false.
bool Materialize(std::string *ErrInfo = 0);
/// Dematerialize - If this GlobalValue is read in, and if the GVMaterializer
/// supports it, release the memory for the function, and set it up to be
/// materialized lazily. If !isDematerializable(), this method is a noop.
void Dematerialize();
/// @}
/// Override from Constant class. No GlobalValue's are null values so this
/// always returns false.

View File

@ -19,12 +19,14 @@
#include "llvm/GlobalVariable.h"
#include "llvm/GlobalAlias.h"
#include "llvm/Metadata.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/System/DataTypes.h"
#include <vector>
namespace llvm {
class FunctionType;
class GVMaterializer;
class LLVMContext;
class MDSymbolTable;
@ -145,6 +147,7 @@ private:
std::string GlobalScopeAsm; ///< Inline Asm at global scope.
ValueSymbolTable *ValSymTab; ///< Symbol table for values
TypeSymbolTable *TypeSymTab; ///< Symbol table for types
OwningPtr<GVMaterializer> Materializer; ///< Used to materialize GlobalValues
std::string ModuleID; ///< Human readable identifier for the module
std::string TargetTriple; ///< Platform target triple Module compiled on
std::string DataLayout; ///< Target data description
@ -346,6 +349,50 @@ public:
/// null if there is none by that name.
const Type *getTypeByName(StringRef Name) const;
/// @}
/// @name Materialization
/// @{
/// setMaterializer - Sets the GVMaterializer to GVM. This module must not
/// yet have a Materializer. To reset the materializer for a module that
/// already has one, call MaterializeAllPermanently first. Destroying this
/// module will destroy its materializer without materializing any more
/// GlobalValues. Without destroying the Module, there is no way to detach or
/// destroy a materializer without materializing all the GVs it controls, to
/// avoid leaving orphan unmaterialized GVs.
void setMaterializer(GVMaterializer *GVM);
/// getMaterializer - Retrieves the GVMaterializer, if any, for this Module.
GVMaterializer *getMaterializer() const { return Materializer.get(); }
/// isMaterializable - True if the definition of GV has yet to be materialized
/// from the GVMaterializer.
bool isMaterializable(const GlobalValue *GV) const;
/// isDematerializable - Returns true if this GV was loaded from this Module's
/// GVMaterializer and the GVMaterializer knows how to dematerialize the GV.
bool isDematerializable(const GlobalValue *GV) const;
/// Materialize - Make sure the GlobalValue is fully read. If the module is
/// corrupt, this returns true and fills in the optional string with
/// information about the problem. If successful, this returns false.
bool Materialize(GlobalValue *GV, std::string *ErrInfo = 0);
/// Dematerialize - If the GlobalValue is read in, and if the GVMaterializer
/// supports it, release the memory for the function, and set it up to be
/// materialized lazily. If !isDematerializable(), this method is a noop.
void Dematerialize(GlobalValue *GV);
/// MaterializeAll - Make sure all GlobalValues in this Module are fully read.
/// If the module is corrupt, this returns true and fills in the optional
/// string with information about the problem. If successful, this returns
/// false.
bool MaterializeAll(std::string *ErrInfo = 0);
/// MaterializeAllPermanently - Make sure all GlobalValues in this Module are
/// fully read and clear the Materializer. If the module is corrupt, this
/// returns true, fills in the optional string with information about the
/// problem, and DOES NOT clear the old Materializer. If successful, this
/// returns false.
bool MaterializeAllPermanently(std::string *ErrInfo = 0);
/// @}
/// @name Direct access to the globals list, functions list, and symbol table
/// @{

View File

@ -1,88 +0,0 @@
//===-- llvm/ModuleProvider.h - Interface for module providers --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file provides an abstract interface for loading a module from some
// place. This interface allows incremental or random access loading of
// functions from the file. This is useful for applications like JIT compilers
// or interprocedural optimizers that do not need the entire program in memory
// at the same time.
//
//===----------------------------------------------------------------------===//
#ifndef MODULEPROVIDER_H
#define MODULEPROVIDER_H
#include <string>
namespace llvm {
class Function;
class Module;
class ModuleProvider {
protected:
Module *TheModule;
ModuleProvider();
public:
virtual ~ModuleProvider();
/// getModule - returns the module this provider is encapsulating.
///
Module* getModule() { return TheModule; }
/// materializeFunction - make sure the given function is fully read. If the
/// module is corrupt, this returns true and fills in the optional string
/// with information about the problem. If successful, this returns false.
///
virtual bool materializeFunction(Function *F, std::string *ErrInfo = 0) = 0;
/// dematerializeFunction - If the given function is read in, and if the
/// module provider supports it, release the memory for the function, and set
/// it up to be materialized lazily. If the provider doesn't support this
/// capability, this method is a noop.
///
virtual void dematerializeFunction(Function *) {}
/// materializeModule - make sure the entire Module has been completely read.
/// On error, return null and fill in the error string if specified.
///
virtual Module* materializeModule(std::string *ErrInfo = 0) = 0;
/// releaseModule - no longer delete the Module* when provider is destroyed.
/// On error, return null and fill in the error string if specified.
///
virtual Module* releaseModule(std::string *ErrInfo = 0) {
// Since we're losing control of this Module, we must hand it back complete
if (!materializeModule(ErrInfo))
return 0;
Module *tempM = TheModule;
TheModule = 0;
return tempM;
}
};
/// ExistingModuleProvider - Allow conversion from a fully materialized Module
/// into a ModuleProvider, allowing code that expects a ModuleProvider to work
/// if we just have a Module. Note that the ModuleProvider takes ownership of
/// the Module specified.
struct ExistingModuleProvider : public ModuleProvider {
explicit ExistingModuleProvider(Module *M) {
TheModule = M;
}
bool materializeFunction(Function *, std::string * = 0) {
return false;
}
Module* materializeModule(std::string * = 0) { return TheModule; }
};
} // End llvm namespace
#endif

View File

@ -24,7 +24,6 @@ namespace llvm {
class Pass;
class ModulePass;
class Module;
class ModuleProvider;
class PassManagerImpl;
class FunctionPassManagerImpl;
@ -71,8 +70,8 @@ private:
class FunctionPassManager : public PassManagerBase {
public:
/// FunctionPassManager ctor - This initializes the pass manager. It needs,
/// but does not take ownership of, the specified module provider.
explicit FunctionPassManager(ModuleProvider *P);
/// but does not take ownership of, the specified Module.
explicit FunctionPassManager(Module *M);
~FunctionPassManager();
/// add - Add a pass to the queue of passes to run. This passes
@ -96,15 +95,9 @@ public:
///
bool doFinalization();
/// getModuleProvider - Return the module provider that this passmanager is
/// currently using. This is the module provider that it uses when a function
/// is optimized that is non-resident in the module.
ModuleProvider *getModuleProvider() const { return MP; }
void setModuleProvider(ModuleProvider *NewMP) { MP = NewMP; }
private:
FunctionPassManagerImpl *FPM;
ModuleProvider *MP;
Module *M;
};
} // End llvm namespace

View File

@ -23,44 +23,39 @@
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/ModuleProvider.h"
namespace llvm {
/// If the given MemoryBuffer holds a bitcode image, return a ModuleProvider
/// for it which does lazy deserialization of function bodies. Otherwise,
/// attempt to parse it as LLVM Assembly and return a fully populated
/// ModuleProvider. This function *always* takes ownership of the given
/// MemoryBuffer.
inline ModuleProvider *getIRModuleProvider(MemoryBuffer *Buffer,
SMDiagnostic &Err,
LLVMContext &Context) {
/// If the given MemoryBuffer holds a bitcode image, return a Module for it
/// which does lazy deserialization of function bodies. Otherwise, attempt to
/// parse it as LLVM Assembly and return a fully populated Module. This
/// function *always* takes ownership of the given MemoryBuffer.
inline Module *getIRModule(MemoryBuffer *Buffer,
SMDiagnostic &Err,
LLVMContext &Context) {
if (isBitcode((const unsigned char *)Buffer->getBufferStart(),
(const unsigned char *)Buffer->getBufferEnd())) {
std::string ErrMsg;
ModuleProvider *MP = getBitcodeModuleProvider(Buffer, Context, &ErrMsg);
if (MP == 0) {
Module *M = getLazyBitcodeModule(Buffer, Context, &ErrMsg);
if (M == 0) {
Err = SMDiagnostic(Buffer->getBufferIdentifier(), -1, -1, ErrMsg, "");
// ParseBitcodeFile does not take ownership of the Buffer in the
// case of an error.
delete Buffer;
}
return MP;
return M;
}
Module *M = ParseAssembly(Buffer, 0, Err, Context);
if (M == 0)
return 0;
return new ExistingModuleProvider(M);
return ParseAssembly(Buffer, 0, Err, Context);
}
/// If the given file holds a bitcode image, return a ModuleProvider
/// If the given file holds a bitcode image, return a Module
/// for it which does lazy deserialization of function bodies. Otherwise,
/// attempt to parse it as LLVM Assembly and return a fully populated
/// ModuleProvider.
inline ModuleProvider *getIRFileModuleProvider(const std::string &Filename,
SMDiagnostic &Err,
LLVMContext &Context) {
/// Module.
inline Module *getIRFileModule(const std::string &Filename,
SMDiagnostic &Err,
LLVMContext &Context) {
std::string ErrMsg;
MemoryBuffer *F = MemoryBuffer::getFileOrSTDIN(Filename.c_str(), &ErrMsg);
if (F == 0) {
@ -69,7 +64,7 @@ namespace llvm {
return 0;
}
return getIRModuleProvider(F, Err, Context);
return getIRModule(F, Err, Context);
}
/// If the given MemoryBuffer holds a bitcode image, return a Module

View File

@ -14,7 +14,6 @@
#include "ArchiveInternals.h"
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/ModuleProvider.h"
#include "llvm/Module.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/System/Process.h"
@ -173,8 +172,8 @@ void Archive::cleanUpMemory() {
foreignST = 0;
}
// Delete any ModuleProviders and ArchiveMember's we've allocated as a result
// of symbol table searches.
// Delete any Modules and ArchiveMember's we've allocated as a result of
// symbol table searches.
for (ModuleMap::iterator I=modules.begin(), E=modules.end(); I != E; ++I ) {
delete I->second.first;
delete I->second.second;
@ -221,51 +220,37 @@ bool llvm::GetBitcodeSymbols(const sys::Path& fName,
return true;
}
ModuleProvider *MP = getBitcodeModuleProvider(Buffer.get(), Context, ErrMsg);
if (!MP)
Module *M = ParseBitcodeFile(Buffer.get(), Context, ErrMsg);
if (!M)
return true;
// Get the module from the provider
Module* M = MP->materializeModule();
if (M == 0) {
delete MP;
return true;
}
// Get the symbols
getSymbols(M, symbols);
// Done with the module.
delete MP;
delete M;
return true;
}
ModuleProvider*
Module*
llvm::GetBitcodeSymbols(const unsigned char *BufPtr, unsigned Length,
const std::string& ModuleID,
LLVMContext& Context,
std::vector<std::string>& symbols,
std::string* ErrMsg) {
// Get the module provider
MemoryBuffer *Buffer =MemoryBuffer::getNewMemBuffer(Length, ModuleID.c_str());
// Get the module.
std::auto_ptr<MemoryBuffer> Buffer(
MemoryBuffer::getNewMemBuffer(Length, ModuleID.c_str()));
memcpy((char*)Buffer->getBufferStart(), BufPtr, Length);
ModuleProvider *MP = getBitcodeModuleProvider(Buffer, Context, ErrMsg);
if (!MP)
Module *M = ParseBitcodeFile(Buffer.get(), Context, ErrMsg);
if (!M)
return 0;
// Get the module from the provider
Module* M = MP->materializeModule();
if (M == 0) {
delete MP;
return 0;
}
// Get the symbols
getSymbols(M, symbols);
// Done with the module. Note that ModuleProvider will delete the
// Module when it is deleted. Also note that its the caller's responsibility
// to delete the ModuleProvider.
return MP;
// Done with the module. Note that it's the caller's responsibility to delete
// the Module.
return M;
}

View File

@ -77,11 +77,11 @@ namespace llvm {
std::vector<std::string>& symbols,
std::string* ErrMsg);
ModuleProvider* GetBitcodeSymbols(const unsigned char*Buffer,unsigned Length,
const std::string& ModuleID,
LLVMContext& Context,
std::vector<std::string>& symbols,
std::string* ErrMsg);
Module* GetBitcodeSymbols(const unsigned char*Buffer,unsigned Length,
const std::string& ModuleID,
LLVMContext& Context,
std::vector<std::string>& symbols,
std::string* ErrMsg);
}
#endif

View File

@ -452,9 +452,9 @@ Archive* Archive::OpenAndLoadSymbols(const sys::Path& file,
return result.release();
}
// Look up one symbol in the symbol table and return a ModuleProvider for the
// module that defines that symbol.
ModuleProvider*
// Look up one symbol in the symbol table and return the module that defines
// that symbol.
Module*
Archive::findModuleDefiningSymbol(const std::string& symbol,
std::string* ErrMsg) {
SymTabType::iterator SI = symTab.find(symbol);
@ -483,27 +483,27 @@ Archive::findModuleDefiningSymbol(const std::string& symbol,
if (!mbr)
return 0;
// Now, load the bitcode module to get the ModuleProvider
// Now, load the bitcode module to get the Module.
std::string FullMemberName = archPath.str() + "(" +
mbr->getPath().str() + ")";
MemoryBuffer *Buffer =MemoryBuffer::getNewMemBuffer(mbr->getSize(),
FullMemberName.c_str());
memcpy((char*)Buffer->getBufferStart(), mbr->getData(), mbr->getSize());
ModuleProvider *mp = getBitcodeModuleProvider(Buffer, Context, ErrMsg);
if (!mp)
Module *m = getLazyBitcodeModule(Buffer, Context, ErrMsg);
if (!m)
return 0;
modules.insert(std::make_pair(fileOffset, std::make_pair(mp, mbr)));
modules.insert(std::make_pair(fileOffset, std::make_pair(m, mbr)));
return mp;
return m;
}
// Look up multiple symbols in the symbol table and return a set of
// ModuleProviders that define those symbols.
// Modules that define those symbols.
bool
Archive::findModulesDefiningSymbols(std::set<std::string>& symbols,
std::set<ModuleProvider*>& result,
std::set<Module*>& result,
std::string* error) {
if (!mapfile || !base) {
if (error)
@ -536,19 +536,19 @@ Archive::findModulesDefiningSymbols(std::set<std::string>& symbols,
std::vector<std::string> symbols;
std::string FullMemberName = archPath.str() + "(" +
mbr->getPath().str() + ")";
ModuleProvider* MP =
Module* M =
GetBitcodeSymbols((const unsigned char*)At, mbr->getSize(),
FullMemberName, Context, symbols, error);
if (MP) {
if (M) {
// Insert the module's symbols into the symbol table
for (std::vector<std::string>::iterator I = symbols.begin(),
E=symbols.end(); I != E; ++I ) {
symTab.insert(std::make_pair(*I, offset));
}
// Insert the ModuleProvider and the ArchiveMember into the table of
// Insert the Module and the ArchiveMember into the table of
// modules.
modules.insert(std::make_pair(offset, std::make_pair(MP, mbr)));
modules.insert(std::make_pair(offset, std::make_pair(M, mbr)));
} else {
if (error)
*error = "Can't parse bitcode member: " +
@ -571,11 +571,11 @@ Archive::findModulesDefiningSymbols(std::set<std::string>& symbols,
for (std::set<std::string>::iterator I=symbols.begin(),
E=symbols.end(); I != E;) {
// See if this symbol exists
ModuleProvider* mp = findModuleDefiningSymbol(*I,error);
if (mp) {
// The symbol exists, insert the ModuleProvider into our result,
// duplicates wil be ignored
result.insert(mp);
Module* m = findModuleDefiningSymbol(*I,error);
if (m) {
// The symbol exists, insert the Module into our result, duplicates will
// be ignored.
result.insert(m);
// Remove the symbol now that its been resolved, being careful to
// post-increment the iterator.

View File

@ -12,12 +12,12 @@
//===----------------------------------------------------------------------===//
#include "ArchiveInternals.h"
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/Module.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/System/Signals.h"
#include "llvm/System/Process.h"
#include "llvm/ModuleProvider.h"
#include "llvm/System/Signals.h"
#include <fstream>
#include <ostream>
#include <iomanip>
@ -225,12 +225,12 @@ Archive::writeMember(
std::vector<std::string> symbols;
std::string FullMemberName = archPath.str() + "(" + member.getPath().str()
+ ")";
ModuleProvider* MP =
Module* M =
GetBitcodeSymbols((const unsigned char*)data,fSize,
FullMemberName, Context, symbols, ErrMsg);
// If the bitcode parsed successfully
if ( MP ) {
if ( M ) {
for (std::vector<std::string>::iterator SI = symbols.begin(),
SE = symbols.end(); SI != SE; ++SI) {
@ -244,7 +244,7 @@ Archive::writeMember(
}
}
// We don't need this module any more.
delete MP;
delete M;
} else {
delete mFile;
if (ErrMsg)

View File

@ -2566,7 +2566,6 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
return Error(LinkageLoc, "invalid linkage for function declaration");
break;
case GlobalValue::AppendingLinkage:
case GlobalValue::GhostLinkage:
case GlobalValue::CommonLinkage:
return Error(LinkageLoc, "invalid function linkage type");
}

View File

@ -59,8 +59,8 @@ LLVMBool LLVMGetBitcodeModuleProvider(LLVMMemoryBufferRef MemBuf,
char **OutMessage) {
std::string Message;
*OutMP = wrap(getBitcodeModuleProvider(unwrap(MemBuf), getGlobalContext(),
&Message));
*OutMP = reinterpret_cast<LLVMModuleProviderRef>(
getLazyBitcodeModule(unwrap(MemBuf), getGlobalContext(), &Message));
if (!*OutMP) {
if (OutMessage)
@ -77,8 +77,8 @@ LLVMBool LLVMGetBitcodeModuleProviderInContext(LLVMContextRef ContextRef,
char **OutMessage) {
std::string Message;
*OutMP = wrap(getBitcodeModuleProvider(unwrap(MemBuf), *unwrap(ContextRef),
&Message));
*OutMP = reinterpret_cast<LLVMModuleProviderRef>(
getLazyBitcodeModule(unwrap(MemBuf), *unwrap(ContextRef), &Message));
if (!*OutMP) {
if (OutMessage)
*OutMessage = strdup(Message.c_str());

View File

@ -28,7 +28,8 @@
using namespace llvm;
void BitcodeReader::FreeState() {
delete Buffer;
if (BufferOwned)
delete Buffer;
Buffer = 0;
std::vector<PATypeHolder>().swap(TypeList);
ValueList.clear();
@ -1241,11 +1242,7 @@ bool BitcodeReader::RememberAndSkipFunctionBody() {
// Save the current stream state.
uint64_t CurBit = Stream.GetCurrentBitNo();
DeferredFunctionInfo[Fn] = std::make_pair(CurBit, Fn->getLinkage());
// Set the functions linkage to GhostLinkage so we know it is lazily
// deserialized.
Fn->setLinkage(GlobalValue::GhostLinkage);
DeferredFunctionInfo[Fn] = CurBit;
// Skip over the function block for now.
if (Stream.SkipBlock())
@ -1253,17 +1250,10 @@ bool BitcodeReader::RememberAndSkipFunctionBody() {
return false;
}
bool BitcodeReader::ParseModule(const std::string &ModuleID) {
// Reject multiple MODULE_BLOCK's in a single bitstream.
if (TheModule)
return Error("Multiple MODULE_BLOCKs in same stream");
bool BitcodeReader::ParseModule() {
if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID))
return Error("Malformed block record");
// Otherwise, create the module.
TheModule = new Module(ModuleID, Context);
SmallVector<uint64_t, 64> Record;
std::vector<std::string> SectionTable;
std::vector<std::string> GCTable;
@ -1520,7 +1510,7 @@ bool BitcodeReader::ParseModule(const std::string &ModuleID) {
return Error("Premature end of bitstream");
}
bool BitcodeReader::ParseBitcode() {
bool BitcodeReader::ParseBitcodeInto(Module *M) {
TheModule = 0;
if (Buffer->getBufferSize() & 3)
@ -1564,7 +1554,11 @@ bool BitcodeReader::ParseBitcode() {
return Error("Malformed BlockInfoBlock");
break;
case bitc::MODULE_BLOCK_ID:
if (ParseModule(Buffer->getBufferIdentifier()))
// Reject multiple MODULE_BLOCK's in a single bitstream.
if (TheModule)
return Error("Multiple MODULE_BLOCKs in same stream");
TheModule = M;
if (ParseModule())
return true;
break;
default:
@ -2299,22 +2293,28 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
}
//===----------------------------------------------------------------------===//
// ModuleProvider implementation
// GVMaterializer implementation
//===----------------------------------------------------------------------===//
bool BitcodeReader::materializeFunction(Function *F, std::string *ErrInfo) {
// If it already is material, ignore the request.
if (!F->hasNotBeenReadFromBitcode()) return false;
bool BitcodeReader::isMaterializable(const GlobalValue *GV) const {
if (const Function *F = dyn_cast<Function>(GV)) {
return F->isDeclaration() &&
DeferredFunctionInfo.count(const_cast<Function*>(F));
}
return false;
}
DenseMap<Function*, std::pair<uint64_t, unsigned> >::iterator DFII =
DeferredFunctionInfo.find(F);
bool BitcodeReader::Materialize(GlobalValue *GV, std::string *ErrInfo) {
Function *F = dyn_cast<Function>(GV);
// If it's not a function or is already material, ignore the request.
if (!F || !F->isMaterializable()) return false;
DenseMap<Function*, uint64_t>::iterator DFII = DeferredFunctionInfo.find(F);
assert(DFII != DeferredFunctionInfo.end() && "Deferred function not found!");
// Move the bit stream to the saved position of the deferred function body and
// restore the real linkage type for the function.
Stream.JumpToBit(DFII->second.first);
F->setLinkage((GlobalValue::LinkageTypes)DFII->second.second);
// Move the bit stream to the saved position of the deferred function body.
Stream.JumpToBit(DFII->second);
if (ParseFunctionBody(F)) {
if (ErrInfo) *ErrInfo = ErrorString;
@ -2336,27 +2336,36 @@ bool BitcodeReader::materializeFunction(Function *F, std::string *ErrInfo) {
return false;
}
void BitcodeReader::dematerializeFunction(Function *F) {
// If this function isn't materialized, or if it is a proto, this is a noop.
if (F->hasNotBeenReadFromBitcode() || F->isDeclaration())
bool BitcodeReader::isDematerializable(const GlobalValue *GV) const {
const Function *F = dyn_cast<Function>(GV);
if (!F || F->isDeclaration())
return false;
return DeferredFunctionInfo.count(const_cast<Function*>(F));
}
void BitcodeReader::Dematerialize(GlobalValue *GV) {
Function *F = dyn_cast<Function>(GV);
// If this function isn't dematerializable, this is a noop.
if (!F || !isDematerializable(F))
return;
assert(DeferredFunctionInfo.count(F) && "No info to read function later?");
// Just forget the function body, we can remat it later.
F->deleteBody();
F->setLinkage(GlobalValue::GhostLinkage);
}
Module *BitcodeReader::materializeModule(std::string *ErrInfo) {
bool BitcodeReader::MaterializeModule(Module *M, std::string *ErrInfo) {
assert(M == TheModule &&
"Can only Materialize the Module this BitcodeReader is attached to.");
// Iterate over the module, deserializing any functions that are still on
// disk.
for (Module::iterator F = TheModule->begin(), E = TheModule->end();
F != E; ++F)
if (F->hasNotBeenReadFromBitcode() &&
materializeFunction(F, ErrInfo))
return 0;
if (F->isMaterializable() &&
Materialize(F, ErrInfo))
return true;
// Upgrade any intrinsic calls that slipped through (should not happen!) and
// delete the old functions to clean up. We can't do this unless the entire
@ -2380,19 +2389,7 @@ Module *BitcodeReader::materializeModule(std::string *ErrInfo) {
// Check debug info intrinsics.
CheckDebugInfoIntrinsics(TheModule);
return TheModule;
}
/// This method is provided by the parent ModuleProvde class and overriden
/// here. It simply releases the module from its provided and frees up our
/// state.
/// @brief Release our hold on the generated module
Module *BitcodeReader::releaseModule(std::string *ErrInfo) {
// Since we're losing control of this Module, we must hand it back complete
Module *M = ModuleProvider::releaseModule(ErrInfo);
FreeState();
return M;
return false;
}
@ -2400,45 +2397,41 @@ Module *BitcodeReader::releaseModule(std::string *ErrInfo) {
// External interface
//===----------------------------------------------------------------------===//
/// getBitcodeModuleProvider - lazy function-at-a-time loading from a file.
/// getLazyBitcodeModule - lazy function-at-a-time loading from a file.
///
ModuleProvider *llvm::getBitcodeModuleProvider(MemoryBuffer *Buffer,
LLVMContext& Context,
std::string *ErrMsg) {
Module *llvm::getLazyBitcodeModule(MemoryBuffer *Buffer,
LLVMContext& Context,
std::string *ErrMsg) {
Module *M = new Module(Buffer->getBufferIdentifier(), Context);
BitcodeReader *R = new BitcodeReader(Buffer, Context);
if (R->ParseBitcode()) {
M->setMaterializer(R);
if (R->ParseBitcodeInto(M)) {
if (ErrMsg)
*ErrMsg = R->getErrorString();
// Don't let the BitcodeReader dtor delete 'Buffer'.
R->releaseMemoryBuffer();
delete R;
delete M; // Also deletes R.
return 0;
}
return R;
// Have the BitcodeReader dtor delete 'Buffer'.
R->setBufferOwned(true);
return M;
}
/// ParseBitcodeFile - Read the specified bitcode file, returning the module.
/// If an error occurs, return null and fill in *ErrMsg if non-null.
Module *llvm::ParseBitcodeFile(MemoryBuffer *Buffer, LLVMContext& Context,
std::string *ErrMsg){
BitcodeReader *R;
R = static_cast<BitcodeReader*>(getBitcodeModuleProvider(Buffer, Context,
ErrMsg));
if (!R) return 0;
// Read in the entire module.
Module *M = R->materializeModule(ErrMsg);
Module *M = getLazyBitcodeModule(Buffer, Context, ErrMsg);
if (!M) return 0;
// Don't let the BitcodeReader dtor delete 'Buffer', regardless of whether
// there was an error.
R->releaseMemoryBuffer();
static_cast<BitcodeReader*>(M->getMaterializer())->setBufferOwned(false);
// If there was no error, tell ModuleProvider not to delete it when its dtor
// is run.
if (M)
M = R->releaseModule(ErrMsg);
delete R;
// Read in the entire module, and destroy the BitcodeReader.
if (M->MaterializeAllPermanently(ErrMsg)) {
delete M;
return NULL;
}
return M;
}

View File

@ -14,7 +14,7 @@
#ifndef BITCODE_READER_H
#define BITCODE_READER_H
#include "llvm/ModuleProvider.h"
#include "llvm/GVMaterializer.h"
#include "llvm/Attributes.h"
#include "llvm/Type.h"
#include "llvm/OperandTraits.h"
@ -121,9 +121,11 @@ public:
void AssignValue(Value *V, unsigned Idx);
};
class BitcodeReader : public ModuleProvider {
class BitcodeReader : public GVMaterializer {
LLVMContext &Context;
Module *TheModule;
MemoryBuffer *Buffer;
bool BufferOwned;
BitstreamReader StreamFile;
BitstreamCursor Stream;
@ -160,9 +162,9 @@ class BitcodeReader : public ModuleProvider {
bool HasReversedFunctionsWithBodies;
/// DeferredFunctionInfo - When function bodies are initially scanned, this
/// map contains info about where to find deferred function body (in the
/// stream) and what linkage the original function had.
DenseMap<Function*, std::pair<uint64_t, unsigned> > DeferredFunctionInfo;
/// map contains info about where to find deferred function body in the
/// stream.
DenseMap<Function*, uint64_t> DeferredFunctionInfo;
/// BlockAddrFwdRefs - These are blockaddr references to basic blocks. These
/// are resolved lazily when functions are loaded.
@ -171,7 +173,8 @@ class BitcodeReader : public ModuleProvider {
public:
explicit BitcodeReader(MemoryBuffer *buffer, LLVMContext &C)
: Context(C), Buffer(buffer), ErrorString(0), ValueList(C), MDValueList(C) {
: Context(C), TheModule(0), Buffer(buffer), BufferOwned(false),
ErrorString(0), ValueList(C), MDValueList(C) {
HasReversedFunctionsWithBodies = false;
}
~BitcodeReader() {
@ -180,17 +183,15 @@ public:
void FreeState();
/// releaseMemoryBuffer - This causes the reader to completely forget about
/// the memory buffer it contains, which prevents the buffer from being
/// destroyed when it is deleted.
void releaseMemoryBuffer() {
Buffer = 0;
}
/// setBufferOwned - If this is true, the reader will destroy the MemoryBuffer
/// when the reader is destroyed.
void setBufferOwned(bool Owned) { BufferOwned = Owned; }
virtual bool materializeFunction(Function *F, std::string *ErrInfo = 0);
virtual Module *materializeModule(std::string *ErrInfo = 0);
virtual void dematerializeFunction(Function *F);
virtual Module *releaseModule(std::string *ErrInfo = 0);
virtual bool isMaterializable(const GlobalValue *GV) const;
virtual bool isDematerializable(const GlobalValue *GV) const;
virtual bool Materialize(GlobalValue *GV, std::string *ErrInfo = 0);
virtual bool MaterializeModule(Module *M, std::string *ErrInfo = 0);
virtual void Dematerialize(GlobalValue *GV);
bool Error(const char *Str) {
ErrorString = Str;
@ -200,7 +201,7 @@ public:
/// @brief Main interface to parsing a bitcode buffer.
/// @returns true if an error occurred.
bool ParseBitcode();
bool ParseBitcodeInto(Module *M);
private:
const Type *getTypeByID(unsigned ID, bool isTypeTable = false);
Value *getFnValueByID(unsigned ID, const Type *Ty) {
@ -248,7 +249,7 @@ private:
}
bool ParseModule(const std::string &ModuleID);
bool ParseModule();
bool ParseAttributeBlock();
bool ParseTypeTable();
bool ParseTypeSymbolTable();

View File

@ -280,7 +280,6 @@ static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) {
static unsigned getEncodedLinkage(const GlobalValue *GV) {
switch (GV->getLinkage()) {
default: llvm_unreachable("Invalid linkage!");
case GlobalValue::GhostLinkage: // Map ghost linkage onto external.
case GlobalValue::ExternalLinkage: return 0;
case GlobalValue::WeakAnyLinkage: return 1;
case GlobalValue::AppendingLinkage: return 2;

View File

@ -18,7 +18,6 @@
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Module.h"
#include "llvm/ModuleProvider.h"
#include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/Debug.h"
@ -36,25 +35,25 @@ using namespace llvm;
STATISTIC(NumInitBytes, "Number of bytes of global vars initialized");
STATISTIC(NumGlobals , "Number of global vars initialized");
ExecutionEngine *(*ExecutionEngine::JITCtor)(ModuleProvider *MP,
ExecutionEngine *(*ExecutionEngine::JITCtor)(Module *M,
std::string *ErrorStr,
JITMemoryManager *JMM,
CodeGenOpt::Level OptLevel,
bool GVsWithCode,
CodeModel::Model CMM) = 0;
ExecutionEngine *(*ExecutionEngine::InterpCtor)(ModuleProvider *MP,
ExecutionEngine *(*ExecutionEngine::InterpCtor)(Module *M,
std::string *ErrorStr) = 0;
ExecutionEngine::EERegisterFn ExecutionEngine::ExceptionTableRegister = 0;
ExecutionEngine::ExecutionEngine(ModuleProvider *P)
ExecutionEngine::ExecutionEngine(Module *M)
: EEState(*this),
LazyFunctionCreator(0) {
CompilingLazily = false;
GVCompilationDisabled = false;
SymbolSearchingDisabled = false;
Modules.push_back(P);
assert(P && "ModuleProvider is null?");
Modules.push_back(M);
assert(M && "Module is null?");
}
ExecutionEngine::~ExecutionEngine() {
@ -69,38 +68,18 @@ char* ExecutionEngine::getMemoryForGV(const GlobalVariable* GV) {
return new char[GVSize];
}
/// removeModuleProvider - Remove a ModuleProvider from the list of modules.
/// Relases the Module from the ModuleProvider, materializing it in the
/// process, and returns the materialized Module.
Module* ExecutionEngine::removeModuleProvider(ModuleProvider *P,
std::string *ErrInfo) {
for(SmallVector<ModuleProvider *, 1>::iterator I = Modules.begin(),
/// removeModule - Remove a Module from the list of modules.
bool ExecutionEngine::removeModule(Module *M) {
for(SmallVector<Module *, 1>::iterator I = Modules.begin(),
E = Modules.end(); I != E; ++I) {
ModuleProvider *MP = *I;
if (MP == P) {
Module *Found = *I;
if (Found == M) {
Modules.erase(I);
clearGlobalMappingsFromModule(MP->getModule());
return MP->releaseModule(ErrInfo);
}
}
return NULL;
}
/// deleteModuleProvider - Remove a ModuleProvider from the list of modules,
/// and deletes the ModuleProvider and owned Module. Avoids materializing
/// the underlying module.
void ExecutionEngine::deleteModuleProvider(ModuleProvider *P,
std::string *ErrInfo) {
for(SmallVector<ModuleProvider *, 1>::iterator I = Modules.begin(),
E = Modules.end(); I != E; ++I) {
ModuleProvider *MP = *I;
if (MP == P) {
Modules.erase(I);
clearGlobalMappingsFromModule(MP->getModule());
delete MP;
return;
clearGlobalMappingsFromModule(M);
return true;
}
}
return false;
}
/// FindFunctionNamed - Search all of the active modules to find the one that
@ -108,7 +87,7 @@ void ExecutionEngine::deleteModuleProvider(ModuleProvider *P,
/// general code.
Function *ExecutionEngine::FindFunctionNamed(const char *FnName) {
for (unsigned i = 0, e = Modules.size(); i != e; ++i) {
if (Function *F = Modules[i]->getModule()->getFunction(FnName))
if (Function *F = Modules[i]->getFunction(FnName))
return F;
}
return 0;
@ -316,7 +295,7 @@ void ExecutionEngine::runStaticConstructorsDestructors(Module *module,
void ExecutionEngine::runStaticConstructorsDestructors(bool isDtors) {
// Execute global ctors/dtors for each module in the program.
for (unsigned m = 0, e = Modules.size(); m != e; ++m)
runStaticConstructorsDestructors(Modules[m]->getModule(), isDtors);
runStaticConstructorsDestructors(Modules[m], isDtors);
}
#ifndef NDEBUG
@ -393,12 +372,12 @@ int ExecutionEngine::runFunctionAsMain(Function *Fn,
/// Interpreter or there's an error. If even an Interpreter cannot be created,
/// NULL is returned.
///
ExecutionEngine *ExecutionEngine::create(ModuleProvider *MP,
ExecutionEngine *ExecutionEngine::create(Module *M,
bool ForceInterpreter,
std::string *ErrorStr,
CodeGenOpt::Level OptLevel,
bool GVsWithCode) {
return EngineBuilder(MP)
return EngineBuilder(M)
.setEngineKind(ForceInterpreter
? EngineKind::Interpreter
: EngineKind::JIT)
@ -412,12 +391,6 @@ ExecutionEngine *ExecutionEngine::create(Module *M) {
return EngineBuilder(M).create();
}
/// EngineBuilder - Overloaded constructor that automatically creates an
/// ExistingModuleProvider for an existing module.
EngineBuilder::EngineBuilder(Module *m) : MP(new ExistingModuleProvider(m)) {
InitEngine();
}
ExecutionEngine *EngineBuilder::create() {
// Make sure we can resolve symbols in the program as well. The zero arg
// to the function tells DynamicLibrary to load the program, not a library.
@ -442,7 +415,7 @@ ExecutionEngine *EngineBuilder::create() {
if (WhichEngine & EngineKind::JIT) {
if (ExecutionEngine::JITCtor) {
ExecutionEngine *EE =
ExecutionEngine::JITCtor(MP, ErrorStr, JMM, OptLevel,
ExecutionEngine::JITCtor(M, ErrorStr, JMM, OptLevel,
AllocateGVsWithCode, CMModel);
if (EE) return EE;
}
@ -452,7 +425,7 @@ ExecutionEngine *EngineBuilder::create() {
// an interpreter instead.
if (WhichEngine & EngineKind::Interpreter) {
if (ExecutionEngine::InterpCtor)
return ExecutionEngine::InterpCtor(MP, ErrorStr);
return ExecutionEngine::InterpCtor(M, ErrorStr);
if (ErrorStr)
*ErrorStr = "Interpreter has not been linked in.";
return 0;
@ -968,7 +941,7 @@ void ExecutionEngine::emitGlobals() {
if (Modules.size() != 1) {
for (unsigned m = 0, e = Modules.size(); m != e; ++m) {
Module &M = *Modules[m]->getModule();
Module &M = *Modules[m];
for (Module::const_global_iterator I = M.global_begin(),
E = M.global_end(); I != E; ++I) {
const GlobalValue *GV = I;
@ -1002,7 +975,7 @@ void ExecutionEngine::emitGlobals() {
std::vector<const GlobalValue*> NonCanonicalGlobals;
for (unsigned m = 0, e = Modules.size(); m != e; ++m) {
Module &M = *Modules[m]->getModule();
Module &M = *Modules[m];
for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
I != E; ++I) {
// In the multi-module case, see what this global maps to.

View File

@ -174,20 +174,16 @@ void LLVMFreeMachineCodeForFunction(LLVMExecutionEngineRef EE, LLVMValueRef F) {
}
void LLVMAddModuleProvider(LLVMExecutionEngineRef EE, LLVMModuleProviderRef MP){
unwrap(EE)->addModuleProvider(unwrap(MP));
unwrap(EE)->addModule(unwrap(MP));
}
LLVMBool LLVMRemoveModuleProvider(LLVMExecutionEngineRef EE,
LLVMModuleProviderRef MP,
LLVMModuleRef *OutMod, char **OutError) {
std::string Error;
if (Module *Gone = unwrap(EE)->removeModuleProvider(unwrap(MP), &Error)) {
*OutMod = wrap(Gone);
return 0;
}
if (OutError)
*OutError = strdup(Error.c_str());
return 1;
Module *M = unwrap(MP);
unwrap(EE)->removeModule(M);
*OutMod = wrap(M);
return 0;
}
LLVMBool LLVMFindFunction(LLVMExecutionEngineRef EE, const char *Name,

View File

@ -17,7 +17,6 @@
#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Module.h"
#include "llvm/ModuleProvider.h"
#include <cstring>
using namespace llvm;
@ -33,20 +32,20 @@ extern "C" void LLVMLinkInInterpreter() { }
/// create - Create a new interpreter object. This can never fail.
///
ExecutionEngine *Interpreter::create(ModuleProvider *MP, std::string* ErrStr) {
// Tell this ModuleProvide to materialize and release the module
if (!MP->materializeModule(ErrStr))
ExecutionEngine *Interpreter::create(Module *M, std::string* ErrStr) {
// Tell this Module to materialize everything and release the GVMaterializer.
if (M->MaterializeAllPermanently(ErrStr))
// We got an error, just return 0
return 0;
return new Interpreter(MP);
return new Interpreter(M);
}
//===----------------------------------------------------------------------===//
// Interpreter ctor - Initialize stuff
//
Interpreter::Interpreter(ModuleProvider *M)
: ExecutionEngine(M), TD(M->getModule()) {
Interpreter::Interpreter(Module *M)
: ExecutionEngine(M), TD(M) {
memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
setTargetData(&TD);

View File

@ -94,7 +94,7 @@ class Interpreter : public ExecutionEngine, public InstVisitor<Interpreter> {
std::vector<Function*> AtExitHandlers;
public:
explicit Interpreter(ModuleProvider *M);
explicit Interpreter(Module *M);
~Interpreter();
/// runAtExitHandlers - Run any functions registered by the program's calls to
@ -108,7 +108,7 @@ public:
/// create - Create an interpreter ExecutionEngine. This can never fail.
///
static ExecutionEngine *create(ModuleProvider *M, std::string *ErrorStr = 0);
static ExecutionEngine *create(Module *M, std::string *ErrorStr = 0);
/// run - Start execution with the specified function and arguments.
///

View File

@ -18,7 +18,6 @@
#include "llvm/Function.h"
#include "llvm/GlobalVariable.h"
#include "llvm/Instructions.h"
#include "llvm/ModuleProvider.h"
#include "llvm/CodeGen/JITCodeEmitter.h"
#include "llvm/CodeGen/MachineCodeInfo.h"
#include "llvm/ExecutionEngine/GenericValue.h"
@ -193,17 +192,17 @@ void DarwinRegisterFrame(void* FrameBegin) {
/// createJIT - This is the factory method for creating a JIT for the current
/// machine, it does not fall back to the interpreter. This takes ownership
/// of the module provider.
ExecutionEngine *ExecutionEngine::createJIT(ModuleProvider *MP,
/// of the module.
ExecutionEngine *ExecutionEngine::createJIT(Module *M,
std::string *ErrorStr,
JITMemoryManager *JMM,
CodeGenOpt::Level OptLevel,
bool GVsWithCode,
CodeModel::Model CMM) {
return JIT::createJIT(MP, ErrorStr, JMM, OptLevel, GVsWithCode, CMM);
return JIT::createJIT(M, ErrorStr, JMM, OptLevel, GVsWithCode, CMM);
}
ExecutionEngine *JIT::createJIT(ModuleProvider *MP,
ExecutionEngine *JIT::createJIT(Module *M,
std::string *ErrorStr,
JITMemoryManager *JMM,
CodeGenOpt::Level OptLevel,
@ -215,13 +214,13 @@ ExecutionEngine *JIT::createJIT(ModuleProvider *MP,
return 0;
// Pick a target either via -march or by guessing the native arch.
TargetMachine *TM = JIT::selectTarget(MP, ErrorStr);
TargetMachine *TM = JIT::selectTarget(M, ErrorStr);
if (!TM || (ErrorStr && ErrorStr->length() > 0)) return 0;
TM->setCodeModel(CMM);
// If the target supports JIT code generation, create a the JIT.
if (TargetJITInfo *TJ = TM->getJITInfo()) {
return new JIT(MP, *TM, *TJ, JMM, OptLevel, GVsWithCode);
return new JIT(M, *TM, *TJ, JMM, OptLevel, GVsWithCode);
} else {
if (ErrorStr)
*ErrorStr = "target does not support JIT code generation";
@ -229,12 +228,12 @@ ExecutionEngine *JIT::createJIT(ModuleProvider *MP,
}
}
JIT::JIT(ModuleProvider *MP, TargetMachine &tm, TargetJITInfo &tji,
JIT::JIT(Module *M, TargetMachine &tm, TargetJITInfo &tji,
JITMemoryManager *JMM, CodeGenOpt::Level OptLevel, bool GVsWithCode)
: ExecutionEngine(MP), TM(tm), TJI(tji), AllocateGVsWithCode(GVsWithCode) {
: ExecutionEngine(M), TM(tm), TJI(tji), AllocateGVsWithCode(GVsWithCode) {
setTargetData(TM.getTargetData());
jitstate = new JITState(MP);
jitstate = new JITState(M);
// Initialize JCE
JCE = createEmitter(*this, JMM, TM);
@ -278,16 +277,15 @@ JIT::~JIT() {
delete &TM;
}
/// addModuleProvider - Add a new ModuleProvider to the JIT. If we previously
/// removed the last ModuleProvider, we need re-initialize jitstate with a valid
/// ModuleProvider.
void JIT::addModuleProvider(ModuleProvider *MP) {
/// addModule - Add a new Module to the JIT. If we previously removed the last
/// Module, we need re-initialize jitstate with a valid Module.
void JIT::addModule(Module *M) {
MutexGuard locked(lock);
if (Modules.empty()) {
assert(!jitstate && "jitstate should be NULL if Modules vector is empty!");
jitstate = new JITState(MP);
jitstate = new JITState(M);
FunctionPassManager &PM = jitstate->getPM(locked);
PM.add(new TargetData(*TM.getTargetData()));
@ -302,18 +300,17 @@ void JIT::addModuleProvider(ModuleProvider *MP) {
PM.doInitialization();
}
ExecutionEngine::addModuleProvider(MP);
ExecutionEngine::addModule(M);
}
/// removeModuleProvider - If we are removing the last ModuleProvider,
/// invalidate the jitstate since the PassManager it contains references a
/// released ModuleProvider.
Module *JIT::removeModuleProvider(ModuleProvider *MP, std::string *E) {
Module *result = ExecutionEngine::removeModuleProvider(MP, E);
/// removeModule - If we are removing the last Module, invalidate the jitstate
/// since the PassManager it contains references a released Module.
bool JIT::removeModule(Module *M) {
bool result = ExecutionEngine::removeModule(M);
MutexGuard locked(lock);
if (jitstate->getMP() == MP) {
if (jitstate->getModule() == M) {
delete jitstate;
jitstate = 0;
}
@ -336,62 +333,6 @@ Module *JIT::removeModuleProvider(ModuleProvider *MP, std::string *E) {
return result;
}
/// deleteModuleProvider - Remove a ModuleProvider from the list of modules,
/// and deletes the ModuleProvider and owned Module. Avoids materializing
/// the underlying module.
void JIT::deleteModuleProvider(ModuleProvider *MP, std::string *E) {
ExecutionEngine::deleteModuleProvider(MP, E);
MutexGuard locked(lock);
if (jitstate->getMP() == MP) {
delete jitstate;
jitstate = 0;
}
if (!jitstate && !Modules.empty()) {
jitstate = new JITState(Modules[0]);
FunctionPassManager &PM = jitstate->getPM(locked);
PM.add(new TargetData(*TM.getTargetData()));
// Turn the machine code intermediate representation into bytes in memory
// that may be executed.
if (TM.addPassesToEmitMachineCode(PM, *JCE, CodeGenOpt::Default)) {
llvm_report_error("Target does not support machine code emission!");
}
// Initialize passes.
PM.doInitialization();
}
}
/// materializeFunction - make sure the given function is fully read. If the
/// module is corrupt, this returns true and fills in the optional string with
/// information about the problem. If successful, this returns false.
bool JIT::materializeFunction(Function *F, std::string *ErrInfo) {
// Read in the function if it exists in this Module.
if (F->hasNotBeenReadFromBitcode()) {
// Determine the module provider this function is provided by.
Module *M = F->getParent();
ModuleProvider *MP = 0;
for (unsigned i = 0, e = Modules.size(); i != e; ++i) {
if (Modules[i]->getModule() == M) {
MP = Modules[i];
break;
}
}
if (MP)
return MP->materializeFunction(F, ErrInfo);
if (ErrInfo)
*ErrInfo = "Function isn't in a module we know about!";
return true;
}
// Succeed if the function is already read.
return false;
}
/// run - Start execution with the specified function and arguments.
///
GenericValue JIT::runFunction(Function *F,
@ -661,7 +602,7 @@ void *JIT::getPointerToFunction(Function *F) {
// Now that this thread owns the lock, make sure we read in the function if it
// exists in this Module.
std::string ErrorMsg;
if (materializeFunction(F, &ErrorMsg)) {
if (F->Materialize(&ErrorMsg)) {
llvm_report_error("Error reading function '" + F->getName()+
"' from bitcode file: " + ErrorMsg);
}

View File

@ -30,20 +30,20 @@ class TargetMachine;
class JITState {
private:
FunctionPassManager PM; // Passes to compile a function
ModuleProvider *MP; // ModuleProvider used to create the PM
Module *M; // Module used to create the PM
/// PendingFunctions - Functions which have not been code generated yet, but
/// were called from a function being code generated.
std::vector<AssertingVH<Function> > PendingFunctions;
public:
explicit JITState(ModuleProvider *MP) : PM(MP), MP(MP) {}
explicit JITState(Module *M) : PM(M), M(M) {}
FunctionPassManager &getPM(const MutexGuard &L) {
return PM;
}
ModuleProvider *getMP() const { return MP; }
Module *getModule() const { return M; }
std::vector<AssertingVH<Function> > &getPendingFunctions(const MutexGuard &L){
return PendingFunctions;
}
@ -63,7 +63,7 @@ class JIT : public ExecutionEngine {
JITState *jitstate;
JIT(ModuleProvider *MP, TargetMachine &tm, TargetJITInfo &tji,
JIT(Module *M, TargetMachine &tm, TargetJITInfo &tji,
JITMemoryManager *JMM, CodeGenOpt::Level OptLevel,
bool AllocateGVsWithCode);
public:
@ -80,35 +80,22 @@ public:
/// create - Create an return a new JIT compiler if there is one available
/// for the current target. Otherwise, return null.
///
static ExecutionEngine *create(ModuleProvider *MP,
static ExecutionEngine *create(Module *M,
std::string *Err,
JITMemoryManager *JMM,
CodeGenOpt::Level OptLevel =
CodeGenOpt::Default,
bool GVsWithCode = true,
CodeModel::Model CMM = CodeModel::Default) {
return ExecutionEngine::createJIT(MP, Err, JMM, OptLevel, GVsWithCode,
return ExecutionEngine::createJIT(M, Err, JMM, OptLevel, GVsWithCode,
CMM);
}
virtual void addModuleProvider(ModuleProvider *MP);
virtual void addModule(Module *M);
/// removeModuleProvider - Remove a ModuleProvider from the list of modules.
/// Relases the Module from the ModuleProvider, materializing it in the
/// process, and returns the materialized Module.
virtual Module *removeModuleProvider(ModuleProvider *MP,
std::string *ErrInfo = 0);
/// deleteModuleProvider - Remove a ModuleProvider from the list of modules,
/// and deletes the ModuleProvider and owned Module. Avoids materializing
/// the underlying module.
virtual void deleteModuleProvider(ModuleProvider *P,std::string *ErrInfo = 0);
/// materializeFunction - make sure the given function is fully read. If the
/// module is corrupt, this returns true and fills in the optional string with
/// information about the problem. If successful, this returns false.
///
bool materializeFunction(Function *F, std::string *ErrInfo = 0);
/// removeModule - Remove a Module from the list of modules. Returns true if
/// M is found.
virtual bool removeModule(Module *M);
/// runFunction - Start execution with the specified function and arguments.
///
@ -177,9 +164,9 @@ public:
/// selectTarget - Pick a target either via -march or by guessing the native
/// arch. Add any CPU features specified via -mcpu or -mattr.
static TargetMachine *selectTarget(ModuleProvider *MP, std::string *Err);
static TargetMachine *selectTarget(Module *M, std::string *Err);
static ExecutionEngine *createJIT(ModuleProvider *MP,
static ExecutionEngine *createJIT(Module *M,
std::string *ErrorStr,
JITMemoryManager *JMM,
CodeGenOpt::Level OptLevel,

View File

@ -63,7 +63,7 @@ static JIT *TheJIT = 0;
// A declaration may stop being a declaration once it's fully read from bitcode.
// This function returns true if F is fully read and is still a declaration.
static bool isNonGhostDeclaration(const Function *F) {
return F->isDeclaration() && !F->hasNotBeenReadFromBitcode();
return F->isDeclaration() && !F->isMaterializable();
}
//===----------------------------------------------------------------------===//

View File

@ -15,7 +15,6 @@
#include "JIT.h"
#include "llvm/Module.h"
#include "llvm/ModuleProvider.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/raw_ostream.h"
@ -43,10 +42,8 @@ MAttrs("mattr",
/// selectTarget - Pick a target either via -march or by guessing the native
/// arch. Add any CPU features specified via -mcpu or -mattr.
TargetMachine *JIT::selectTarget(ModuleProvider *MP, std::string *ErrorStr) {
Module &Mod = *MP->getModule();
Triple TheTriple(Mod.getTargetTriple());
TargetMachine *JIT::selectTarget(Module *Mod, std::string *ErrorStr) {
Triple TheTriple(Mod->getTargetTriple());
if (TheTriple.getTriple().empty())
TheTriple.setTriple(sys::getHostTriple());

View File

@ -14,7 +14,6 @@
#include "llvm/Linker.h"
#include "llvm/Module.h"
#include "llvm/ModuleProvider.h"
#include "llvm/ADT/SetOperations.h"
#include "llvm/Bitcode/Archive.h"
#include "llvm/Config/config.h"
@ -139,8 +138,10 @@ Linker::LinkInArchive(const sys::Path &Filename, bool &is_native) {
do {
CurrentlyUndefinedSymbols = UndefinedSymbols;
// Find the modules we need to link into the target module
std::set<ModuleProvider*> Modules;
// Find the modules we need to link into the target module. Note that arch
// keeps ownership of these modules and may return the same Module* from a
// subsequent call.
std::set<Module*> Modules;
if (!arch->findModulesDefiningSymbols(UndefinedSymbols, Modules, &ErrMsg))
return error("Cannot find symbols in '" + Filename.str() +
"': " + ErrMsg);
@ -156,19 +157,17 @@ Linker::LinkInArchive(const sys::Path &Filename, bool &is_native) {
NotDefinedByArchive.insert(UndefinedSymbols.begin(),
UndefinedSymbols.end());
// Loop over all the ModuleProviders that we got back from the archive
for (std::set<ModuleProvider*>::iterator I=Modules.begin(), E=Modules.end();
// Loop over all the Modules that we got back from the archive
for (std::set<Module*>::iterator I=Modules.begin(), E=Modules.end();
I != E; ++I) {
// Get the module we must link in.
std::string moduleErrorMsg;
std::auto_ptr<Module> AutoModule((*I)->releaseModule( &moduleErrorMsg ));
if (!moduleErrorMsg.empty())
return error("Could not load a module: " + moduleErrorMsg);
Module* aModule = AutoModule.get();
Module* aModule = *I;
if (aModule != NULL) {
if (aModule->MaterializeAll(&moduleErrorMsg))
return error("Could not load a module: " + moduleErrorMsg);
verbose(" Linking in module: " + aModule->getModuleIdentifier());
// Link it in

View File

@ -122,9 +122,9 @@ ARMSubtarget::GVIsIndirectSymbol(GlobalValue *GV, Reloc::Model RelocM) const {
if (RelocM == Reloc::Static)
return false;
// GV with ghost linkage (in JIT lazy compilation mode) do not require an
// extra load from stub.
bool isDecl = GV->isDeclaration() && !GV->hasNotBeenReadFromBitcode();
// Materializable GVs (in JIT lazy compilation mode) do not require an extra
// load from stub.
bool isDecl = GV->isDeclaration() && !GV->isMaterializable();
if (!isTargetDarwin()) {
// Extra load is needed for all externally visible.

View File

@ -307,8 +307,6 @@ namespace {
Out << "GlobalValue::DLLExportLinkage"; break;
case GlobalValue::ExternalWeakLinkage:
Out << "GlobalValue::ExternalWeakLinkage"; break;
case GlobalValue::GhostLinkage:
Out << "GlobalValue::GhostLinkage"; break;
case GlobalValue::CommonLinkage:
Out << "GlobalValue::CommonLinkage"; break;
}

View File

@ -130,7 +130,7 @@ bool PPCSubtarget::hasLazyResolverStub(const GlobalValue *GV,
return false;
// If symbol visibility is hidden, the extra load is not needed if
// the symbol is definitely defined in the current translation unit.
bool isDecl = GV->isDeclaration() && !GV->hasNotBeenReadFromBitcode();
bool isDecl = GV->isDeclaration() && !GV->isMaterializable();
if (GV->hasHiddenVisibility() && !isDecl && !GV->hasCommonLinkage())
return false;
return GV->hasWeakLinkage() || GV->hasLinkOnceLinkage() ||

View File

@ -53,9 +53,9 @@ ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const {
if (GV->hasDLLImportLinkage())
return X86II::MO_DLLIMPORT;
// GV with ghost linkage (in JIT lazy compilation mode) do not require an
// Materializable GVs (in JIT lazy compilation mode) do not require an
// extra load from stub.
bool isDecl = GV->isDeclaration() && !GV->hasNotBeenReadFromBitcode();
bool isDecl = GV->isDeclaration() && !GV->isMaterializable();
// X86-64 in PIC mode.
if (isPICStyleRIPRel()) {

View File

@ -150,8 +150,6 @@ void XCoreAsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
case GlobalValue::PrivateLinkage:
case GlobalValue::LinkerPrivateLinkage:
break;
case GlobalValue::GhostLinkage:
llvm_unreachable("Should not have any unmaterialized functions!");
case GlobalValue::DLLImportLinkage:
llvm_unreachable("DLLImport linkage is not supported by this target!");
case GlobalValue::DLLExportLinkage:

View File

@ -467,7 +467,6 @@ static LinkageCategory categorize(const Function *F) {
case GlobalValue::AppendingLinkage:
case GlobalValue::DLLImportLinkage:
case GlobalValue::DLLExportLinkage:
case GlobalValue::GhostLinkage:
case GlobalValue::CommonLinkage:
return ExternalStrong;
}

View File

@ -1402,8 +1402,6 @@ static void PrintLinkage(GlobalValue::LinkageTypes LT,
case GlobalValue::AvailableExternallyLinkage:
Out << "available_externally ";
break;
// This is invalid syntax and just a debugging aid.
case GlobalValue::GhostLinkage: Out << "ghost "; break;
}
}

View File

@ -20,12 +20,13 @@
#include "llvm/GlobalAlias.h"
#include "llvm/LLVMContext.h"
#include "llvm/TypeSymbolTable.h"
#include "llvm/ModuleProvider.h"
#include "llvm/InlineAsm.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cstdlib>
#include <cstring>
@ -932,8 +933,6 @@ LLVMLinkage LLVMGetLinkage(LLVMValueRef Global) {
return LLVMDLLExportLinkage;
case GlobalValue::ExternalWeakLinkage:
return LLVMExternalWeakLinkage;
case GlobalValue::GhostLinkage:
return LLVMGhostLinkage;
case GlobalValue::CommonLinkage:
return LLVMCommonLinkage;
}
@ -988,7 +987,8 @@ void LLVMSetLinkage(LLVMValueRef Global, LLVMLinkage Linkage) {
GV->setLinkage(GlobalValue::ExternalWeakLinkage);
break;
case LLVMGhostLinkage:
GV->setLinkage(GlobalValue::GhostLinkage);
DEBUG(errs()
<< "LLVMSetLinkage(): LLVMGhostLinkage is no longer supported.");
break;
case LLVMCommonLinkage:
GV->setLinkage(GlobalValue::CommonLinkage);
@ -1965,7 +1965,7 @@ LLVMValueRef LLVMBuildPtrDiff(LLVMBuilderRef B, LLVMValueRef LHS,
LLVMModuleProviderRef
LLVMCreateModuleProviderForExistingModule(LLVMModuleRef M) {
return wrap(new ExistingModuleProvider(unwrap(M)));
return reinterpret_cast<LLVMModuleProviderRef>(M);
}
void LLVMDisposeModuleProvider(LLVMModuleProviderRef MP) {

View File

@ -0,0 +1,18 @@
//===-- GVMaterializer.cpp - Base implementation for GV materializers -----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Minimal implementation of the abstract interface for materializing
// GlobalValues.
//
//===----------------------------------------------------------------------===//
#include "llvm/GVMaterializer.h"
using namespace llvm;
GVMaterializer::~GVMaterializer() {}

View File

@ -43,6 +43,19 @@ static bool removeDeadUsersOfConstant(const Constant *C) {
return true;
}
bool GlobalValue::isMaterializable() const {
return getParent()->isMaterializable(this);
}
bool GlobalValue::isDematerializable() const {
return getParent()->isDematerializable(this);
}
bool GlobalValue::Materialize(std::string *ErrInfo) {
return getParent()->Materialize(this, ErrInfo);
}
void GlobalValue::Dematerialize() {
getParent()->Dematerialize(this);
}
/// removeDeadConstantUsers - If there are any dead constant users dangling
/// off of this global value, remove them. This method is useful for clients
/// that want to check to see if a global is unused, but don't want to deal

View File

@ -15,6 +15,7 @@
#include "llvm/InstrTypes.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/GVMaterializer.h"
#include "llvm/LLVMContext.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
@ -56,7 +57,7 @@ template class llvm::SymbolTableListTraits<GlobalAlias, Module>;
//
Module::Module(StringRef MID, LLVMContext& C)
: Context(C), ModuleID(MID), DataLayout("") {
: Context(C), Materializer(NULL), ModuleID(MID), DataLayout("") {
ValSymTab = new ValueSymbolTable();
TypeSymTab = new TypeSymbolTable();
NamedMDSymTab = new MDSymbolTable();
@ -371,6 +372,52 @@ std::string Module::getTypeName(const Type *Ty) const {
return ""; // Must not have found anything...
}
//===----------------------------------------------------------------------===//
// Methods to control the materialization of GlobalValues in the Module.
//
void Module::setMaterializer(GVMaterializer *GVM) {
assert(!Materializer &&
"Module already has a GVMaterializer. Call MaterializeAllPermanently"
" to clear it out before setting another one.");
Materializer.reset(GVM);
}
bool Module::isMaterializable(const GlobalValue *GV) const {
if (Materializer)
return Materializer->isMaterializable(GV);
return false;
}
bool Module::isDematerializable(const GlobalValue *GV) const {
if (Materializer)
return Materializer->isDematerializable(GV);
return false;
}
bool Module::Materialize(GlobalValue *GV, std::string *ErrInfo) {
if (Materializer)
return Materializer->Materialize(GV, ErrInfo);
return false;
}
void Module::Dematerialize(GlobalValue *GV) {
if (Materializer)
return Materializer->Dematerialize(GV);
}
bool Module::MaterializeAll(std::string *ErrInfo) {
if (!Materializer)
return false;
return Materializer->MaterializeModule(this, ErrInfo);
}
bool Module::MaterializeAllPermanently(std::string *ErrInfo) {
if (MaterializeAll(ErrInfo))
return true;
Materializer.reset();
return false;
}
//===----------------------------------------------------------------------===//
// Other module related stuff.
//

View File

@ -1,26 +0,0 @@
//===-- ModuleProvider.cpp - Base implementation for module providers -----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Minimal implementation of the abstract interface for providing a module.
//
//===----------------------------------------------------------------------===//
#include "llvm/ModuleProvider.h"
#include "llvm/Module.h"
using namespace llvm;
/// ctor - always have a valid Module
///
ModuleProvider::ModuleProvider() : TheModule(0) { }
/// dtor - when we leave, we take our Module with us
///
ModuleProvider::~ModuleProvider() {
delete TheModule;
}

View File

@ -16,7 +16,6 @@
#include "llvm/Pass.h"
#include "llvm/PassManager.h"
#include "llvm/Module.h"
#include "llvm/ModuleProvider.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/Debug.h"

View File

@ -18,7 +18,6 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/Timer.h"
#include "llvm/Module.h"
#include "llvm/ModuleProvider.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/raw_ostream.h"
@ -1194,15 +1193,13 @@ bool BBPassManager::doFinalization(Function &F) {
// FunctionPassManager implementation
/// Create new Function pass manager
FunctionPassManager::FunctionPassManager(ModuleProvider *P) {
FunctionPassManager::FunctionPassManager(Module *m) : M(m) {
FPM = new FunctionPassManagerImpl(0);
// FPM is the top level manager.
FPM->setTopLevelManager(FPM);
AnalysisResolver *AR = new AnalysisResolver(*FPM);
FPM->setResolver(AR);
MP = P;
}
FunctionPassManager::~FunctionPassManager() {
@ -1224,7 +1221,7 @@ void FunctionPassManager::add(Pass *P) {
///
bool FunctionPassManager::run(Function &F) {
std::string errstr;
if (MP->materializeFunction(&F, &errstr)) {
if (F.Materialize(&errstr)) {
llvm_report_error("Error reading bitcode file: " + errstr);
}
return FPM->run(F);
@ -1234,13 +1231,13 @@ bool FunctionPassManager::run(Function &F) {
/// doInitialization - Run all of the initializers for the function passes.
///
bool FunctionPassManager::doInitialization() {
return FPM->doInitialization(*MP->getModule());
return FPM->doInitialization(*M);
}
/// doFinalization - Run all of the finalizers for the function passes.
///
bool FunctionPassManager::doFinalization() {
return FPM->doFinalization(*MP->getModule());
return FPM->doFinalization(*M);
}
//===----------------------------------------------------------------------===//

View File

@ -47,7 +47,6 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/Metadata.h"
#include "llvm/Module.h"
#include "llvm/ModuleProvider.h"
#include "llvm/Pass.h"
#include "llvm/PassManager.h"
#include "llvm/TypeSymbolTable.h"
@ -413,10 +412,10 @@ void Verifier::visit(Instruction &I) {
void Verifier::visitGlobalValue(GlobalValue &GV) {
Assert1(!GV.isDeclaration() ||
GV.isMaterializable() ||
GV.hasExternalLinkage() ||
GV.hasDLLImportLinkage() ||
GV.hasExternalWeakLinkage() ||
GV.hasGhostLinkage() ||
(isa<GlobalAlias>(GV) &&
(GV.hasLocalLinkage() || GV.hasWeakLinkage())),
"Global is external, but doesn't have external or dllimport or weak linkage!",
@ -648,9 +647,11 @@ void Verifier::visitFunction(Function &F) {
"Function takes metadata but isn't an intrinsic", I, &F);
}
if (F.isDeclaration()) {
if (F.isMaterializable()) {
// Function has a body somewhere we can't see.
} else if (F.isDeclaration()) {
Assert1(F.hasExternalLinkage() || F.hasDLLImportLinkage() ||
F.hasExternalWeakLinkage() || F.hasGhostLinkage(),
F.hasExternalWeakLinkage(),
"invalid linkage type for function declaration", &F);
} else {
// Verify that this function (which has a body) is not named "llvm.*". It
@ -1913,12 +1914,10 @@ bool llvm::verifyFunction(const Function &f, VerifierFailureAction action) {
Function &F = const_cast<Function&>(f);
assert(!F.isDeclaration() && "Cannot verify external functions");
ExistingModuleProvider MP(F.getParent());
FunctionPassManager FPM(&MP);
FunctionPassManager FPM(F.getParent());
Verifier *V = new Verifier(action);
FPM.add(V);
FPM.run(F);
MP.releaseModule();
return V->Broken;
}

View File

@ -15,7 +15,6 @@
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/ModuleProvider.h"
#include "llvm/PassManager.h"
#include "llvm/Pass.h"
#include "llvm/ADT/Triple.h"
@ -334,8 +333,7 @@ int main(int argc, char **argv) {
PM.run(mod);
} else {
// Build up all of the passes that we want to do to the module.
ExistingModuleProvider Provider(M.release());
FunctionPassManager Passes(&Provider);
FunctionPassManager Passes(M.get());
// Add the target data from the target machine, if it exists, or the module.
if (const TargetData *TD = Target.getTargetData())

View File

@ -15,7 +15,6 @@
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/ModuleProvider.h"
#include "llvm/Type.h"
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/CodeGen/LinkAllCodegenComponents.h"
@ -110,28 +109,28 @@ int main(int argc, char **argv, char * const *envp) {
// Load the bitcode...
std::string ErrorMsg;
ModuleProvider *MP = NULL;
Module *Mod = NULL;
if (MemoryBuffer *Buffer = MemoryBuffer::getFileOrSTDIN(InputFile,&ErrorMsg)){
MP = getBitcodeModuleProvider(Buffer, Context, &ErrorMsg);
if (!MP) delete Buffer;
Mod = getLazyBitcodeModule(Buffer, Context, &ErrorMsg);
if (!Mod) delete Buffer;
}
if (!MP) {
if (!Mod) {
errs() << argv[0] << ": error loading program '" << InputFile << "': "
<< ErrorMsg << "\n";
exit(1);
}
// Get the module as the MP could go away once EE takes over.
Module *Mod = NoLazyCompilation
? MP->materializeModule(&ErrorMsg) : MP->getModule();
if (!Mod) {
errs() << argv[0] << ": bitcode didn't read correctly.\n";
errs() << "Reason: " << ErrorMsg << "\n";
exit(1);
// If not jitting lazily, load the whole bitcode file eagerly too.
if (NoLazyCompilation) {
if (Mod->MaterializeAllPermanently(&ErrorMsg)) {
errs() << argv[0] << ": bitcode didn't read correctly.\n";
errs() << "Reason: " << ErrorMsg << "\n";
exit(1);
}
}
EngineBuilder builder(MP);
EngineBuilder builder(Mod);
builder.setErrorStr(&ErrorMsg);
builder.setEngineKind(ForceInterpreter
? EngineKind::Interpreter

View File

@ -21,7 +21,6 @@
#include "llvm/Linker.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/ModuleProvider.h"
#include "llvm/PassManager.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Triple.h"
@ -392,8 +391,7 @@ bool LTOCodeGenerator::generateAssemblyCode(formatted_raw_ostream& out,
// Make sure everything is still good.
passes.add(createVerifierPass());
FunctionPassManager* codeGenPasses =
new FunctionPassManager(new ExistingModuleProvider(mergedModule));
FunctionPassManager* codeGenPasses = new FunctionPassManager(mergedModule);
codeGenPasses->add(new TargetData(*_target->getTargetData()));

View File

@ -17,7 +17,6 @@
#include "llvm/Constants.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/ModuleProvider.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Bitcode/ReaderWriter.h"
@ -69,14 +68,13 @@ bool LTOModule::isBitcodeFileForTarget(const char* path,
// takes ownership of buffer
bool LTOModule::isTargetMatch(MemoryBuffer* buffer, const char* triplePrefix)
{
OwningPtr<ModuleProvider> mp(getBitcodeModuleProvider(buffer,
getGlobalContext()));
// on success, mp owns buffer and both are deleted at end of this method
if (!mp) {
OwningPtr<Module> m(getLazyBitcodeModule(buffer, getGlobalContext()));
// on success, m owns buffer and both are deleted at end of this method
if (!m) {
delete buffer;
return false;
}
std::string actualTarget = mp->getModule()->getTargetTriple();
std::string actualTarget = m->getTargetTriple();
return (strncmp(actualTarget.c_str(), triplePrefix,
strlen(triplePrefix)) == 0);
}

View File

@ -14,7 +14,6 @@
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/ModuleProvider.h"
#include "llvm/PassManager.h"
#include "llvm/CallGraphSCCPass.h"
#include "llvm/Bitcode/ReaderWriter.h"
@ -427,7 +426,7 @@ int main(int argc, char **argv) {
FunctionPassManager *FPasses = NULL;
if (OptLevelO1 || OptLevelO2 || OptLevelO3) {
FPasses = new FunctionPassManager(new ExistingModuleProvider(M.get()));
FPasses = new FunctionPassManager(M.get());
if (TD)
FPasses->add(new TargetData(*TD));
}

View File

@ -12,7 +12,6 @@
#include "llvm/LLVMContext.h"
#include "llvm/Instructions.h"
#include "llvm/Module.h"
#include "llvm/ModuleProvider.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/CodeGen/MachineCodeInfo.h"
#include "llvm/ExecutionEngine/JIT.h"

View File

@ -23,7 +23,6 @@
#include "llvm/GlobalVariable.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/ModuleProvider.h"
#include "llvm/Support/IRBuilder.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SourceMgr.h"
@ -194,11 +193,10 @@ class JITTest : public testing::Test {
protected:
virtual void SetUp() {
M = new Module("<main>", Context);
MP = new ExistingModuleProvider(M);
RJMM = new RecordingJITMemoryManager;
RJMM->setPoisonMemory(true);
std::string Error;
TheJIT.reset(EngineBuilder(MP).setEngineKind(EngineKind::JIT)
TheJIT.reset(EngineBuilder(M).setEngineKind(EngineKind::JIT)
.setJITMemoryManager(RJMM)
.setErrorStr(&Error).create());
ASSERT_TRUE(TheJIT.get() != NULL) << Error;
@ -209,8 +207,7 @@ class JITTest : public testing::Test {
}
LLVMContext Context;
Module *M; // Owned by MP.
ModuleProvider *MP; // Owned by ExecutionEngine.
Module *M; // Owned by ExecutionEngine.
RecordingJITMemoryManager *RJMM;
OwningPtr<ExecutionEngine> TheJIT;
};
@ -223,14 +220,13 @@ class JITTest : public testing::Test {
TEST(JIT, GlobalInFunction) {
LLVMContext context;
Module *M = new Module("<main>", context);
ExistingModuleProvider *MP = new ExistingModuleProvider(M);
JITMemoryManager *MemMgr = JITMemoryManager::CreateDefaultMemManager();
// Tell the memory manager to poison freed memory so that accessing freed
// memory is more easily tested.
MemMgr->setPoisonMemory(true);
std::string Error;
OwningPtr<ExecutionEngine> JIT(EngineBuilder(MP)
OwningPtr<ExecutionEngine> JIT(EngineBuilder(M)
.setEngineKind(EngineKind::JIT)
.setErrorStr(&Error)
.setJITMemoryManager(MemMgr)
@ -428,7 +424,8 @@ TEST_F(JITTest, ModuleDeletion) {
"} ");
Function *func = M->getFunction("main");
TheJIT->getPointerToFunction(func);
TheJIT->deleteModuleProvider(MP);
TheJIT->removeModule(M);
delete M;
SmallPtrSet<const void*, 2> FunctionsDeallocated;
for (unsigned i = 0, e = RJMM->deallocateFunctionBodyCalls.size();
@ -647,36 +644,70 @@ std::string AssembleToBitcode(LLVMContext &Context, const char *Assembly) {
}
// Returns a newly-created ExecutionEngine that reads the bitcode in 'Bitcode'
// lazily. The associated ModuleProvider (owned by the ExecutionEngine) is
// returned in MP. Both will be NULL on an error. Bitcode must live at least
// as long as the ExecutionEngine.
// lazily. The associated Module (owned by the ExecutionEngine) is returned in
// M. Both will be NULL on an error. Bitcode must live at least as long as the
// ExecutionEngine.
ExecutionEngine *getJITFromBitcode(
LLVMContext &Context, const std::string &Bitcode, ModuleProvider *&MP) {
LLVMContext &Context, const std::string &Bitcode, Module *&M) {
// c_str() is null-terminated like MemoryBuffer::getMemBuffer requires.
MemoryBuffer *BitcodeBuffer =
MemoryBuffer::getMemBuffer(Bitcode.c_str(),
Bitcode.c_str() + Bitcode.size(),
"Bitcode for test");
std::string errMsg;
MP = getBitcodeModuleProvider(BitcodeBuffer, Context, &errMsg);
if (MP == NULL) {
M = getLazyBitcodeModule(BitcodeBuffer, Context, &errMsg);
if (M == NULL) {
ADD_FAILURE() << errMsg;
delete BitcodeBuffer;
return NULL;
}
ExecutionEngine *TheJIT = EngineBuilder(MP)
ExecutionEngine *TheJIT = EngineBuilder(M)
.setEngineKind(EngineKind::JIT)
.setErrorStr(&errMsg)
.create();
if (TheJIT == NULL) {
ADD_FAILURE() << errMsg;
delete MP;
MP = NULL;
delete M;
M = NULL;
return NULL;
}
return TheJIT;
}
TEST(LazyLoadedJITTest, MaterializableAvailableExternallyFunctionIsntCompiled) {
LLVMContext Context;
const std::string Bitcode =
AssembleToBitcode(Context,
"define available_externally i32 "
" @JITTest_AvailableExternallyFunction() { "
" ret i32 7 "
"} "
" "
"define i32 @func() { "
" %result = tail call i32 "
" @JITTest_AvailableExternallyFunction() "
" ret i32 %result "
"} ");
ASSERT_FALSE(Bitcode.empty()) << "Assembling failed";
Module *M;
OwningPtr<ExecutionEngine> TheJIT(getJITFromBitcode(Context, Bitcode, M));
ASSERT_TRUE(TheJIT.get()) << "Failed to create JIT.";
TheJIT->DisableLazyCompilation(true);
Function *funcIR = M->getFunction("func");
Function *availableFunctionIR =
M->getFunction("JITTest_AvailableExternallyFunction");
// Double-check that the available_externally function is still unmaterialized
// when getPointerToFunction needs to find out if it's available_externally.
EXPECT_TRUE(availableFunctionIR->isMaterializable());
int32_t (*func)() = reinterpret_cast<int32_t(*)()>(
(intptr_t)TheJIT->getPointerToFunction(funcIR));
EXPECT_EQ(42, func()) << "func should return 42 from the static version,"
<< " not 7 from the IR version.";
}
TEST(LazyLoadedJITTest, EagerCompiledRecursionThroughGhost) {
LLVMContext Context;
const std::string Bitcode =
@ -697,16 +728,15 @@ TEST(LazyLoadedJITTest, EagerCompiledRecursionThroughGhost) {
" ret i32 %result "
"} ");
ASSERT_FALSE(Bitcode.empty()) << "Assembling failed";
ModuleProvider *MP;
OwningPtr<ExecutionEngine> TheJIT(getJITFromBitcode(Context, Bitcode, MP));
Module *M;
OwningPtr<ExecutionEngine> TheJIT(getJITFromBitcode(Context, Bitcode, M));
ASSERT_TRUE(TheJIT.get()) << "Failed to create JIT.";
TheJIT->DisableLazyCompilation(true);
Module *M = MP->getModule();
Function *recur1IR = M->getFunction("recur1");
Function *recur2IR = M->getFunction("recur2");
EXPECT_TRUE(recur1IR->hasNotBeenReadFromBitcode());
EXPECT_TRUE(recur2IR->hasNotBeenReadFromBitcode());
EXPECT_TRUE(recur1IR->isMaterializable());
EXPECT_TRUE(recur2IR->isMaterializable());
int32_t (*recur1)(int32_t) = reinterpret_cast<int32_t(*)(int32_t)>(
(intptr_t)TheJIT->getPointerToFunction(recur1IR));