Merge branch 'master' into feature-fix-force-recovery

# Conflicts:
#	fdbclient/ManagementAPI.actor.cpp
#	fdbserver/ClusterController.actor.cpp
#	fdbserver/workloads/KillRegion.actor.cpp
This commit is contained in:
Evan Tschannen 2019-02-18 17:09:06 -08:00
commit 065a45e05f
222 changed files with 6830 additions and 2763 deletions

7
.gitignore vendored
View File

@ -16,7 +16,9 @@ packaging/msi/FDBInstaller.msi
*.pom
bindings/java/pom*.xml
bindings/java/src*/main/overview.html
bindings/java/src*/main/com/apple/foundationdb/*Options.java
bindings/java/src*/main/com/apple/foundationdb/NetworkOptions.java
bindings/java/src*/main/com/apple/foundationdb/DatabaseOptions.java
bindings/java/src*/main/com/apple/foundationdb/TransactionOptions.java
bindings/java/src*/main/com/apple/foundationdb/StreamingMode.java
bindings/java/src*/main/com/apple/foundationdb/MutationType.java
bindings/java/src*/main/com/apple/foundationdb/ConflictRangeType.java
@ -49,6 +51,7 @@ bindings/go/godoc
bindings/java/.classstamp*
bindings/java/classes*/
bindings/java/javadoc*/
packaging/docker/website
# Testing and logging
packaging/msi/*.log
@ -73,6 +76,8 @@ foundationdb.VC.db
foundationdb.VC.VC.opendb
ipch/
compile_commands.json
flow/actorcompiler/obj
flow/coveragetool/obj
# Temporary and user configuration files
*~

View File

@ -17,11 +17,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
cmake_minimum_required(VERSION 3.12)
project(fdb
project(foundationdb
VERSION 6.1.0
DESCRIPTION "FoundationDB is a scalable, fault-tolerant, ordered key-value store with full ACID transactions."
HOMEPAGE_URL "http://www.foundationdb.org/"
LANGUAGES C CXX ASM Java)
LANGUAGES ASM C CXX)
set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH};${PROJECT_SOURCE_DIR}/cmake")
message (STATUS "${PROJECT_SOURCE_DIR} ${PROJECT_BINARY_DIR}")
@ -39,23 +39,26 @@ endif()
set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/bin)
set(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/lib)
set(OPEN_FOR_IDE OFF CACHE BOOL "Open this in an IDE (won't compile/link)")
################################################################################
# Packages used for bindings
################################################################################
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
find_package(PythonInterp 3.4 REQUIRED)
set(Python_ADDITIONAL_VERSIONS 3.4 3.5 3.5)
find_package(PythonLibs 3.4 REQUIRED)
################################################################################
# Compiler configuration
################################################################################
include(ConfigureCompiler)
################################################################################
# Compiler configuration
################################################################################
include(FDBComponents)
################################################################################
# Get repository information
################################################################################
@ -102,10 +105,15 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/fdbclient/versions.h.cmake ${CMAKE_CU
# Flow
################################################################################
# Flow and other tools are written in C# - so we need that dependency
include(EnableCsharp)
# First thing we need is the actor compiler - and to compile and run the
# actor compiler, we need mono
include(CompileActorCompiler)
include(CompileCoverageTool)
# with the actor compiler, we can now make the flow commands available
include(FlowCommands)
@ -115,50 +123,6 @@ include(FlowCommands)
include(CompileVexillographer)
# This macro can be used to install symlinks, which turns out to be
# non-trivial due to CMake version differences and limitations on how
# files can be installed when building binary packages.
#
# The rule for binary packaging is that files (including symlinks) must
# be installed with the standard CMake install() macro.
#
# The rule for non-binary packaging is that CMake 2.6 cannot install()
# symlinks, but can create the symlink at install-time via scripting.
# Though, we assume that CMake 2.6 isn't going to be used to generate
# packages because versions later than 2.8.3 are superior for that purpose.
#
# _filepath: the absolute path to the file to symlink
# _sympath: absolute path of the installed symlink
macro(InstallSymlink _filepath _sympath)
get_filename_component(_symname ${_sympath} NAME)
get_filename_component(_installdir ${_sympath} PATH)
if (BINARY_PACKAGING_MODE)
execute_process(COMMAND "${CMAKE_COMMAND}" -E create_symlink
${_filepath}
${CMAKE_CURRENT_BINARY_DIR}/${_symname})
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${_symname}
DESTINATION ${_installdir}
COMPONENT clients)
else ()
# scripting the symlink installation at install time should work
# for CMake 2.6.x and 2.8.x
install(CODE "
if (\"\$ENV{DESTDIR}\" STREQUAL \"\")
execute_process(COMMAND \"${CMAKE_COMMAND}\" -E create_symlink
${_filepath}
${_installdir}/${_symname})
else ()
execute_process(COMMAND \"${CMAKE_COMMAND}\" -E create_symlink
${_filepath}
\$ENV{DESTDIR}/${_installdir}/${_symname})
endif ()
"
COMPONENT clients)
endif ()
endmacro(InstallSymlink)
################################################################################
# Generate config file
################################################################################
@ -194,16 +158,31 @@ set(SEED "0x${SEED_}" CACHE STRING "Random seed for testing")
################################################################################
include(CompileBoost)
if(WITH_TLS)
add_subdirectory(FDBLibTLS)
endif()
add_subdirectory(flow)
add_subdirectory(fdbrpc)
add_subdirectory(fdbclient)
add_subdirectory(fdbserver)
add_subdirectory(fdbcli)
add_subdirectory(fdbmonitor)
if(NOT WIN32)
add_subdirectory(fdbmonitor)
else()
add_subdirectory(fdbservice)
endif()
add_subdirectory(bindings)
add_subdirectory(fdbbackup)
add_subdirectory(tests)
if(WITH_DOCUMENTATION)
add_subdirectory(documentation)
endif()
include(CPack)
if(WIN32)
add_subdirectory(packaging/msi)
else()
include(CPack)
endif()
################################################################################
# process compile commands for IDE
@ -219,3 +198,11 @@ if (CMAKE_EXPORT_COMPILE_COMMANDS)
)
add_custom_target(procossed_compile_commands ALL DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json)
endif()
################################################################################
# Inform user which components we are going to build
################################################################################
print_components()
message(STATUS "CPACK_COMPONENTS_ALL ${CPACK_COMPONENTS_ALL}")

13
FDBLibTLS/CMakeLists.txt Normal file
View File

@ -0,0 +1,13 @@
set(SRCS
FDBLibTLSPlugin.cpp
FDBLibTLSPlugin.h
FDBLibTLSPolicy.cpp
FDBLibTLSPolicy.h
FDBLibTLSSession.cpp
FDBLibTLSSession.h
FDBLibTLSVerify.cpp
FDBLibTLSVerify.h
ReferenceCounted.h)
add_library(FDBLibTLS ${SRCS})
target_link_libraries(FDBLibTLS PUBLIC LibreSSL boost_target)

166
README.md
View File

@ -27,7 +27,13 @@ Developers interested in using the FoundationDB store for an application can get
Developers on a OS for which there is no binary package, or who would like to start hacking on the code can get started by compiling from source.
#### macOS
Currently there are two build systems: a collection of Makefiles and a
CMake-based. Both of them should work for most users and CMake will eventually
become the only build system available.
## Makefile
#### MacOS
1. Check out this repo on your Mac.
1. Install the Xcode command-line tools.
@ -63,3 +69,161 @@ Developers on a OS for which there is no binary package, or who would like to st
1. Run `make`.
This will build the fdbserver binary and the python bindings. If you want to build our other bindings, you will need to install a runtime for the language whose binding you want to build. Each binding has an `.mk` file which provides specific targets for that binding.
## CMake
FoundationDB is currently in the process of migrating the build system to cmake.
The CMake build system is currently used by several developers. However, most of
the testing and packaging infrastructure still uses the old VisualStudio+Make
based build system.
To build with CMake, generally the following is required (works on Linux and
Mac OS - for Windows see below):
1. Check out this repository.
2. Install cmake Version 3.12 or higher [CMake](https://cmake.org/)
1. Download version 1.67 of [Boost](https://sourceforge.net/projects/boost/files/boost/1.52.0/).
1. Unpack boost (you don't need to compile it)
1. Install [Mono](http://www.mono-project.com/download/stable/).
1. Install a [JDK](http://www.oracle.com/technetwork/java/javase/downloads/index.html). FoundationDB currently builds with Java 8.
1. Create a build directory (you can have the build directory anywhere you
like): `mkdir build`
1. `cd build`
1. `cmake -DBOOST_ROOT=<PATH_TO_BOOST> <PATH_TO_FOUNDATIONDB_DIRECTORY>`
1. `make`
CMake will try to find its dependencies. However, for LibreSSL this can be often
problematic (especially if OpenSSL is installed as well). For that we recommend
passing the argument `-DLibreSSL_ROOT` to cmake. So, for example, if you
LibreSSL is installed under /usr/local/libressl-2.8.3, you should call cmake like
this:
```
cmake -DLibreSSL_ROOT=/usr/local/libressl-2.8.3/ ../foundationdb
```
FoundationDB will build just fine without LibreSSL, however, the resulting
binaries won't support TLS connections.
### Language Bindings
The language bindings that are supported by cmake will have a corresponding
`README.md` file in the corresponding `bindings/lang` directory.
Generally, cmake will build all language bindings for which it can find all
necessary dependencies. After each successful cmake run, cmake will tell you
which language bindings it is going to build.
### Generating compile_commands.json
CMake can build a compilation database for you. However, the default generated
one is not too useful as it operates on the generated files. When running make,
the build system will create another `compile_commands.json` file in the source
directory. This can than be used for tools like
[CCLS](https://github.com/MaskRay/ccls),
[CQuery](https://github.com/cquery-project/cquery), etc. This way you can get
code-completion and code navigation in flow. It is not yet perfect (it will show
a few errors) but we are constantly working on improving the development experience.
### Using IDEs
CMake has built in support for a number of popular IDEs. However, because flow
files are precompiled with the actor compiler, an IDE will not be very useful as
a user will only be presented with the generated code - which is not what she
wants to edit and get IDE features for.
The good news is, that it is possible to generate project files for editing
flow with a supported IDE. There is a cmake option called `OPEN_FOR_IDE` which
will generate a project which can be opened in an IDE for editing. You won't be
able to build this project, but you will be able to edit the files and get most
edit and navigation features your IDE supports.
For example, if you want to use XCode to make changes to FoundationDB you can
create a XCode-project with the following command:
```
cmake -G Xcode -DOPEN_FOR_IDE=ON <FDB_SOURCE_DIRECTORY>
```
You should create a second build-directory which you will use for building
(probably with make or ninja) and debugging.
### Linux
There are no special requirements for Linux. However, we are currently working
on a Docker-based build as well.
If you want to create a package you have to tell cmake what platform it is for.
And then you can build by simply calling `cpack`. So for debian, call:
```
cmake -DINSTALL_LAYOUT=DEB <FDB_SOURCE_DIR>
make
cpack
```
For RPM simply replace `DEB` with `RPM`.
### MacOS
The build under MacOS will work the same way as on Linux. To get LibreSSL and boost you
can use [Hombrew](https://brew.sh/). LibreSSL will not be installed in
`/usr/local` instead it will stay in `/usr/local/Cellar`. So the cmake command
will look somethink like this:
```
cmake -DLibreSSL_ROOT=/usr/local/Cellar/libressl/2.8.3 <PATH_TO_FOUNDATIONDB_SOURCE>
```
To generate a installable package, you have to call CMake with the corresponding
arguments and then use cpack to generate the package:
```
cmake -DINSTALL_LAYOUT=OSX <FDB_SOURCE_DIR>
make
cpack
```
### Windows
Under Windows, the build instructions are very similar, with the main difference
that Visual Studio is used to compile.
1. Install Visual Studio 2017 (Community Edition is tested)
2. Install cmake Version 3.12 or higher [CMake](https://cmake.org/)
1. Download version 1.67 of [Boost](https://sourceforge.net/projects/boost/files/boost/1.52.0/).
1. Unpack boost (you don't need to compile it)
1. Install [Mono](http://www.mono-project.com/download/stable/).
1. Install a [JDK](http://www.oracle.com/technetwork/java/javase/downloads/index.html). FoundationDB currently builds with Java 8.
1. Set JAVA_HOME to the unpacked location and JAVA_COMPILE to
$JAVA_HOME/bin/javac
1. (Optional) Install [WIX](http://wixtoolset.org/). Without it Visual Studio
won't build the Windows installer.
1. Create a build directory (you can have the build directory anywhere you
like): `mkdir build`
1. `cd build`
1. `cmake -G "Visual Studio 15 2017 Win64" -DBOOST_ROOT=<PATH_TO_BOOST> <PATH_TO_FOUNDATIONDB_DIRECTORY>`
1. This should succeed. In which case you can build using msbuild:
`msbuild /p:Configuration=Release fdb.sln`. You can also open the resulting
solution in Visual Studio and compile from there. However, be aware that
using Visual Studio for development is currently not supported as Visual
Studio will only know about the generated files.
If you want TLS support to be enabled under Windows you currently have to build
and install LibreSSL yourself as the newer LibreSSL versions are not provided
for download from the LibreSSL homepage. To build LibreSSL:
1. Download and unpack libressl (>= 2.8.2)
2. `cd libressl-2.8.2`
3. `mkdir build`
4. `cd build`
5. `cmake -G "Visual Studio 15 2017 Win64" ..`
6. Open the generated `LibreSSL.sln` in Visual Studio as administrator (this is
necessary for the install)
7. Build the `INSTALL` project in `Release` mode
This will install LibreSSL under `C:\Program Files\LibreSSL`. After that `cmake`
will automatically find it and build with TLS support.
If you installed WIX before running `cmake` you should find the
`FDBInstaller.msi` in your build directory under `packaging/msi`.

View File

@ -1,3 +1,12 @@
add_subdirectory(c)
add_subdirectory(flow)
add_subdirectory(python)
add_subdirectory(java)
if(WITH_JAVA)
add_subdirectory(java)
endif()
if(WITH_GO)
add_subdirectory(go)
endif()
if(WITH_RUBY)
add_subdirectory(ruby)
endif()

View File

@ -5,39 +5,51 @@ set(FDB_C_SRCS
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/foundationdb)
set(platform)
set(asm_file ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.g.S)
set(platform "linux")
if(APPLE)
set(platform "osx")
else()
set(platform "linux")
elseif(WIN32)
set(platform "windows")
set(asm_file ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.g.asm)
endif()
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.g.S
${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h
COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/generate_asm.py ${platform}
add_custom_command(OUTPUT ${asm_file} ${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h
COMMAND $<TARGET_FILE:Python::Interpreter> ${CMAKE_CURRENT_SOURCE_DIR}/generate_asm.py ${platform}
${CMAKE_CURRENT_SOURCE_DIR}/fdb_c.cpp
${CMAKE_CURRENT_BINARY_DIR}/fdb_c.g.S
${asm_file}
${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/generate_asm.py ${CMAKE_CURRENT_SOURCE_DIR}/fdb_c.cpp
COMMENT "Generate C bindings")
add_custom_target(fdb_c_generated DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.g.S
add_custom_target(fdb_c_generated DEPENDS ${asm_file}
${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h)
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_options.g.h
COMMAND ${MONO_EXECUTABLE} ${VEXILLOGRAPHER_EXE} ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options c ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_options.g.h
DEPENDS ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options vexillographer
COMMENT "Generate C options")
add_custom_target(fdb_c_options DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_options.g.h)
vexillographer_compile(TARGET fdb_c_options LANG c OUT ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_options.g.h
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_options.g.h)
include(GenerateExportHeader)
add_library(fdb_c SHARED ${FDB_C_SRCS} ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.g.S)
add_library(fdb_c SHARED ${FDB_C_SRCS} ${asm_file})
add_dependencies(fdb_c fdb_c_generated fdb_c_options)
target_link_libraries(fdb_c PUBLIC fdbclient)
target_include_directories(fdb_c PUBLIC
${CMAKE_CURRENT_BINARY_DIR}
${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_CURRENT_BINARY_DIR}/foundationdb)
if(WIN32)
enable_language(ASM_MASM)
set_property(SOURCE ${asm_file} PROPERTY LANGUAGE ASM_MASM)
endif()
# The tests don't build on windows
if(NOT WIN32)
add_executable(fdb_c_performance_test test/performance_test.c test/test.h)
target_link_libraries(fdb_c_performance_test PRIVATE fdb_c)
add_executable(fdb_c_ryw_benchmark test/ryw_benchmark.c test/test.h)
target_link_libraries(fdb_c_ryw_benchmark PRIVATE fdb_c)
endif()
# TODO: re-enable once the old vcxproj-based build system is removed.
#generate_export_header(fdb_c EXPORT_MACRO_NAME "DLLEXPORT"
# EXPORT_FILE_NAME ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_export.h)

View File

@ -19,6 +19,7 @@
*/
#define FDB_API_VERSION 610
#define FDB_INCLUDE_LEGACY_TYPES
#include "fdbclient/MultiVersionTransaction.h"
#include "foundationdb/fdb_c.h"
@ -31,17 +32,18 @@ int g_api_version = 0;
*
* type mapping:
* FDBFuture -> ThreadSingleAssignmentVarBase
* FDBCluster -> char
* FDBDatabase -> IDatabase
* FDBTransaction -> ITransaction
*/
#define TSAVB(f) ((ThreadSingleAssignmentVarBase*)(f))
#define TSAV(T, f) ((ThreadSingleAssignmentVar<T>*)(f))
#define CLUSTER(c) ((char*)c)
#define DB(d) ((IDatabase*)d)
#define TXN(t) ((ITransaction*)t)
// Legacy (pre API version 610)
#define CLUSTER(c) ((char*)c)
/*
* While we could just use the MultiVersionApi instance directly, this #define allows us to swap in any other IClientApi instance (e.g. from ThreadSafeApi)
*/
@ -132,16 +134,6 @@ fdb_error_t fdb_add_network_thread_completion_hook(void (*hook)(void*), void *ho
CATCH_AND_RETURN( API->addNetworkThreadCompletionHook(hook, hook_parameter); );
}
extern "C" DLLEXPORT
FDBFuture* fdb_cluster_configure_database( FDBCluster* c, int config_type,
int config_mode, uint8_t const* db_name,
int db_name_length )
{
// Obsolete, but needed for linker compatibility with api version 12 and below
return (FDBFuture*)ThreadFuture<Void>(client_invalid_operation()).extractPtr();
}
extern "C" DLLEXPORT
void fdb_future_cancel( FDBFuture* f ) {
CATCH_AND_DIE(
@ -235,14 +227,14 @@ fdb_error_t fdb_future_get_key( FDBFuture* f, uint8_t const** out_key,
}
extern "C" DLLEXPORT
fdb_error_t fdb_future_get_cluster( FDBFuture* f, FDBCluster** out_cluster ) {
fdb_error_t fdb_future_get_cluster_v609( FDBFuture* f, FDBCluster** out_cluster ) {
CATCH_AND_RETURN(
*out_cluster = (FDBCluster*)
( (TSAV( char*, f )->get() ) ); );
}
extern "C" DLLEXPORT
fdb_error_t fdb_future_get_database( FDBFuture* f, FDBDatabase** out_database ) {
fdb_error_t fdb_future_get_database_v609( FDBFuture* f, FDBDatabase** out_database ) {
CATCH_AND_RETURN(
*out_database = (FDBDatabase*)
( (TSAV( Reference<IDatabase>, f )->get() ).extractPtr() ); );
@ -294,7 +286,7 @@ fdb_error_t fdb_future_get_string_array(
}
extern "C" DLLEXPORT
FDBFuture* fdb_create_cluster( const char* cluster_file_path ) {
FDBFuture* fdb_create_cluster_v609( const char* cluster_file_path ) {
char *path;
if(cluster_file_path) {
path = new char[strlen(cluster_file_path) + 1];
@ -308,7 +300,7 @@ FDBFuture* fdb_create_cluster( const char* cluster_file_path ) {
}
extern "C" DLLEXPORT
fdb_error_t fdb_cluster_set_option( FDBCluster* c,
fdb_error_t fdb_cluster_set_option_v609( FDBCluster* c,
FDBClusterOption option,
uint8_t const* value,
int value_length )
@ -318,19 +310,32 @@ fdb_error_t fdb_cluster_set_option( FDBCluster* c,
}
extern "C" DLLEXPORT
void fdb_cluster_destroy( FDBCluster* c ) {
void fdb_cluster_destroy_v609( FDBCluster* c ) {
CATCH_AND_DIE( delete[] CLUSTER(c); );
}
extern "C" DLLEXPORT
FDBFuture* fdb_cluster_create_database( FDBCluster* c, uint8_t const* db_name,
FDBFuture* fdb_cluster_create_database_v609( FDBCluster* c, uint8_t const* db_name,
int db_name_length )
{
if(strncmp((const char*)db_name, "DB", db_name_length) != 0) {
return (FDBFuture*)ThreadFuture<Reference<IDatabase>>(invalid_database_name()).extractPtr();
}
return (FDBFuture*) API->createDatabase(CLUSTER(c)).extractPtr();
FDBDatabase *db;
fdb_error_t err = fdb_create_database(CLUSTER(c), &db);
if(err) {
return (FDBFuture*)ThreadFuture<Reference<IDatabase>>(Error(err)).extractPtr();
}
return (FDBFuture*)ThreadFuture<Reference<IDatabase>>(Reference<IDatabase>(DB(db))).extractPtr();
}
extern "C" DLLEXPORT
fdb_error_t fdb_create_database( const char* cluster_file_path, FDBDatabase** out_database ) {
CATCH_AND_RETURN(
*out_database = (FDBDatabase*)API->createDatabase( cluster_file_path ? cluster_file_path : "" ).extractPtr();
);
}
extern "C" DLLEXPORT
@ -663,6 +668,12 @@ fdb_error_t fdb_select_api_version_impl( int runtime_version, int header_version
// Versioned API changes -- descending order by version (new changes at top)
// FDB_API_CHANGED( function, ver ) means there is a new implementation as of ver, and a function function_(ver-1) is the old implementation
// FDB_API_REMOVED( function, ver ) means the function was removed as of ver, and function_(ver-1) is the old implementation
FDB_API_REMOVED( fdb_create_cluster, 610 );
FDB_API_REMOVED( fdb_cluster_create_database, 610 );
FDB_API_REMOVED( fdb_cluster_set_option, 610 );
FDB_API_REMOVED( fdb_cluster_destroy, 610 );
FDB_API_REMOVED( fdb_future_get_cluster, 610 );
FDB_API_REMOVED( fdb_future_get_database, 610 );
FDB_API_CHANGED( fdb_future_get_error, 23 );
FDB_API_REMOVED( fdb_future_is_error, 23 );
FDB_API_CHANGED( fdb_future_get_keyvalue_array, 14 );

View File

@ -62,7 +62,6 @@ extern "C" {
/* Pointers to these opaque types represent objects in the FDB API */
typedef struct FDB_future FDBFuture;
typedef struct FDB_cluster FDBCluster;
typedef struct FDB_database FDBDatabase;
typedef struct FDB_transaction FDBTransaction;
@ -100,7 +99,7 @@ extern "C" {
} FDBKeyValue;
#pragma pack(pop)
DLLEXPORT void fdb_future_cancel( FDBFuture *f );
DLLEXPORT void fdb_future_cancel( FDBFuture* f );
DLLEXPORT void fdb_future_release_memory( FDBFuture* f );
@ -128,12 +127,6 @@ extern "C" {
fdb_future_get_key( FDBFuture* f, uint8_t const** out_key,
int* out_key_length );
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
fdb_future_get_cluster( FDBFuture* f, FDBCluster** out_cluster );
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
fdb_future_get_database( FDBFuture* f, FDBDatabase** out_database );
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
fdb_future_get_value( FDBFuture* f, fdb_bool_t *out_present,
uint8_t const** out_value,
@ -148,17 +141,8 @@ extern "C" {
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_get_string_array(FDBFuture* f,
const char*** out_strings, int* out_count);
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_create_cluster( const char* cluster_file_path );
DLLEXPORT void fdb_cluster_destroy( FDBCluster* c );
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
fdb_cluster_set_option( FDBCluster* c, FDBClusterOption option,
uint8_t const* value, int value_length );
DLLEXPORT WARN_UNUSED_RESULT FDBFuture*
fdb_cluster_create_database( FDBCluster* c, uint8_t const* db_name,
int db_name_length );
fdb_create_database( const char* cluster_file_path, FDBDatabase** out_database );
DLLEXPORT void fdb_database_destroy( FDBDatabase* d );
@ -269,6 +253,35 @@ extern "C" {
/* LEGACY API VERSIONS */
#if FDB_API_VERSION < 610 || defined FDB_INCLUDE_LEGACY_TYPES
typedef struct FDB_cluster FDBCluster;
typedef enum {
// This option is only a placeholder for C compatibility and should not be used
FDB_CLUSTER_OPTION_DUMMY_DO_NOT_USE=-1
} FDBClusterOption;
#endif
#if FDB_API_VERSION < 610
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
fdb_future_get_cluster( FDBFuture* f, FDBCluster** out_cluster );
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
fdb_future_get_database( FDBFuture* f, FDBDatabase** out_database );
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_create_cluster( const char* cluster_file_path );
DLLEXPORT void fdb_cluster_destroy( FDBCluster* c );
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
fdb_cluster_set_option( FDBCluster* c, FDBClusterOption option,
uint8_t const* value, int value_length );
DLLEXPORT WARN_UNUSED_RESULT FDBFuture*
fdb_cluster_create_database( FDBCluster* c, uint8_t const* db_name,
int db_name_length );
#endif
#if FDB_API_VERSION < 23
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
fdb_future_get_error( FDBFuture* f,

View File

@ -236,22 +236,8 @@ FDBDatabase* openDatabase(struct ResultSet *rs, pthread_t *netThread) {
checkError(fdb_setup_network(), "setup network", rs);
pthread_create(netThread, NULL, &runNetwork, NULL);
FDBFuture *f = fdb_create_cluster(NULL);
checkError(fdb_future_block_until_ready(f), "block for cluster", rs);
FDBCluster *cluster;
checkError(fdb_future_get_cluster(f, &cluster), "get cluster", rs);
fdb_future_destroy(f);
f = fdb_cluster_create_database(cluster, (uint8_t*)"DB", 2);
checkError(fdb_future_block_until_ready(f), "block for database", rs);
FDBDatabase *db;
checkError(fdb_future_get_database(f, &db), "get database", rs);
fdb_future_destroy(f);
fdb_cluster_destroy(cluster);
checkError(fdb_create_database(NULL, &db), "create database", rs);
return db;
}

View File

@ -0,0 +1,42 @@
set(SRCS
DirectoryLayer.actor.cpp
DirectoryLayer.h
DirectoryPartition.h
DirectorySubspace.cpp
DirectorySubspace.h
FDBLoanerTypes.h
HighContentionAllocator.actor.cpp
HighContentionAllocator.h
IDirectory.h
Node.actor.cpp
Subspace.cpp
Subspace.h
Tuple.cpp
Tuple.h
fdb_flow.actor.cpp
fdb_flow.h)
add_flow_target(NAME fdb_flow SRCS ${SRCS} STATIC_LIBRARY)
target_link_libraries(fdb_flow PUBLIC fdb_c)
add_subdirectory(tester)
# generate flow-package
foreach(f IN LISTS SRCS)
if(f MATCHES ".*\\.h$")
list(APPEND headers ${CMAKE_CURRENT_SOURCE_DIR}/${f})
endif()
endforeach()
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/packages)
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/packages)
set(package_dir ${CMAKE_CURRENT_BINARY_DIR}/packages/fdb-flow-${CMAKE_PROJECT_VERSION})
set(tar_file ${CMAKE_BINARY_DIR}/packages/fdb-flow-${CMAKE_PROJECT_VERSION}.tar.gz)
add_custom_command(OUTPUT ${tar_file}
COMMAND
${CMAKE_COMMAND} -E make_directory ${package_dir} &&
${CMAKE_COMMAND} -E copy $<TARGET_FILE:fdb_flow> ${headers} ${package_dir} &&
${CMAKE_COMMAND} -E tar czf ${tar_file} ${package_dir}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/packages
COMMENT "Build fdb_flow package")
add_custom_target(package_flow DEPENDS ${tar_file})
add_dependencies(packages package_flow)

View File

@ -34,8 +34,7 @@ THREAD_FUNC networkThread(void* fdb) {
ACTOR Future<Void> _test() {
API *fdb = FDB::API::selectAPIVersion(610);
auto c = fdb->createCluster( std::string() );
auto db = c->createDatabase();
auto db = fdb->createDatabase();
state Reference<Transaction> tr( new Transaction(db) );
// tr->setVersion(1);
@ -189,13 +188,13 @@ namespace FDB {
}
Reference<Cluster> API::createCluster( std::string const& connFilename ) {
CFuture f( fdb_create_cluster( connFilename.c_str() ) );
f.blockUntilReady();
return Reference<Cluster>(new Cluster(connFilename));
}
FDBCluster* c;
throw_on_error( fdb_future_get_cluster( f.f, &c ) );
return Reference<Cluster>( new Cluster(c) );
Reference<DatabaseContext> API::createDatabase(std::string const& connFilename) {
FDBDatabase *db;
throw_on_error(fdb_create_database(connFilename.c_str(), &db));
return Reference<DatabaseContext>(new DatabaseContext(db));
}
int API::getAPIVersion() const {
@ -203,14 +202,7 @@ namespace FDB {
}
Reference<DatabaseContext> Cluster::createDatabase() {
const char *dbName = "DB";
CFuture f( fdb_cluster_create_database( c, (uint8_t*)dbName, (int)strlen(dbName) ) );
f.blockUntilReady();
FDBDatabase* db;
throw_on_error( fdb_future_get_database( f.f, &db ) );
return Reference<DatabaseContext>( new DatabaseContext(db) );
return API::getInstance()->createDatabase(connFilename.c_str());
}
void DatabaseContext::setDatabaseOption(FDBDatabaseOption option, Optional<StringRef> value) {

View File

@ -44,20 +44,21 @@ namespace FDB {
private:
FDBDatabase* db;
explicit DatabaseContext( FDBDatabase* db ) : db(db) {}
friend class API;
};
// Deprecated: Use createDatabase instead.
class Cluster : public ReferenceCounted<Cluster>, NonCopyable {
public:
~Cluster() {
fdb_cluster_destroy( c );
}
~Cluster() {}
Reference<DatabaseContext> createDatabase();
private:
explicit Cluster( FDBCluster* c ) : c(c) {}
FDBCluster* c;
explicit Cluster( std::string connFilename ) : connFilename(connFilename) {}
std::string connFilename;
friend class API;
};
@ -73,8 +74,11 @@ namespace FDB {
void runNetwork();
void stopNetwork();
// Deprecated: Use createDatabase instead.
Reference<Cluster> createCluster( std::string const& connFilename );
Reference<DatabaseContext> createDatabase( std::string const& connFilename="" );
bool evaluatePredicate(FDBErrorPredicate pred, Error const& e);
int getAPIVersion() const;

View File

@ -0,0 +1,6 @@
set(TEST_SRCS
DirectoryTester.actor.cpp
Tester.actor.cpp
Tester.actor.h)
add_flow_target(NAME fdb_flow_tester EXECUTABLE SRCS ${TEST_SRCS})
target_link_libraries(fdb_flow_tester fdb_flow)

View File

@ -334,17 +334,17 @@ struct DirectoryRemoveIfExistsFunc : InstructionFunc {
if(count.getInt(0) == 0) {
logOp(format("remove_if_exists %s", pathToString(directory->getPath()).c_str()));
bool _ = wait(executeMutation(instruction, [this] () {
wait(success(executeMutation(instruction, [this] () {
return directory->removeIfExists(instruction->tr);
}));
})));
}
else {
IDirectory::Path path = wait(popPath(data));
logOp(format("remove_if_exists %s", pathToString(combinePaths(directory->getPath(), path)).c_str()));
bool _ = wait(executeMutation(instruction, [this, path] () {
wait(success(executeMutation(instruction, [this, path] () {
return directory->removeIfExists(instruction->tr, path);
}));
})));
}
return Void();

View File

@ -28,7 +28,7 @@
#include <string.h>
#endif
// Otherwise we have to type setupNetwork(), Cluster::createCluster(), etc.
// Otherwise we have to type setupNetwork(), FDB::open(), etc.
using namespace FDB;
std::map<std::string, FDBMutationType> optionInfo;
@ -353,7 +353,7 @@ struct PopFunc : InstructionFunc {
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
state std::vector<StackItem> items = data->stack.pop();
for(StackItem item : items) {
Standalone<StringRef> _ = wait(item.value);
wait(success(item.value));
}
return Void();
}
@ -1714,8 +1714,7 @@ ACTOR void startTest(std::string clusterFilename, StringRef prefix, int apiVersi
startThread(networkThread, fdb);
// Connect to the default cluster/database, and create a transaction
auto cluster = fdb->createCluster(clusterFilename);
Reference<DatabaseContext> db = cluster->createDatabase();
auto db = fdb->createDatabase(clusterFilename);
Reference<FlowTesterData> data = Reference<FlowTesterData>(new FlowTesterData(fdb));
wait(runTest(data, db, prefix));
@ -1744,8 +1743,7 @@ ACTOR void _test_versionstamp() {
fdb->setupNetwork();
startThread(networkThread, fdb);
auto c = fdb->createCluster(std::string());
auto db = c->createDatabase();
auto db = fdb->createDatabase();
state Reference<Transaction> tr(new Transaction(db));
state Future<FDBStandalone<StringRef>> ftrVersion = tr->getVersionstamp();

121
bindings/go/CMakeLists.txt Normal file
View File

@ -0,0 +1,121 @@
set(SRCS
src/_stacktester/directory.go
src/fdb/directory/allocator.go
src/fdb/directory/node.go
src/fdb/futures.go
src/fdb/subspace/subspace.go
src/_stacktester/stacktester.go
src/fdb/directory/directory.go
src/fdb/doc.go
src/fdb/transaction.go
src/fdb/directory/directoryLayer.go
src/fdb/errors.go
src/fdb/keyselector.go
src/fdb/tuple/tuple.go
src/fdb/cluster.go
src/fdb/directory/directoryPartition.go
src/fdb/fdb.go
src/fdb/range.go
src/fdb/tuple/tuple_test.go
src/fdb/database.go
src/fdb/directory/directorySubspace.go
src/fdb/fdb_test.go
src/fdb/snapshot.go)
set(GOPATH ${CMAKE_CURRENT_BINARY_DIR})
set(GO_PACKAGE_ROOT github.com/apple/foundationdb/bindings/go)
set(GO_IMPORT_PATH ${GO_PACKAGE_ROOT}/src)
set(GO_DEST ${GOPATH}/src/${GO_PACKAGE_ROOT})
if(APPLE)
set(GOPLATFORM darwin_amd64)
elseif(WIN32)
set(GOPLATFORM windows_amd64)
else()
set(GOPLATFORM linux_amd64)
endif()
set(GO_PACKAGE_OUTDIR ${GOPATH}/pkg/${GOPLATFORM}/${GO_IMPORT_PATH})
file(MAKE_DIRECTORY ${GOPATH}
${GO_DEST})
set(go_options_file ${GO_DEST}/src/fdb/generated.go)
set(go_env GOPATH=${GOPATH}
C_INCLUDE_PATH=${CMAKE_BINARY_DIR}/bindings/c/foundationdb:${CMAKE_SOURCE_DIR}/bindings/c
CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/lib)
foreach(src_file IN LISTS SRCS)
set(dest_file ${GO_DEST}/${src_file})
get_filename_component(dest_dir ${dest_file} DIRECTORY)
list(APPEND SRCS_OUT ${dest_file})
add_custom_command(OUTPUT ${dest_file}
COMMAND ${CMAKE_COMMAND} -E make_directory ${dest_dir} &&
${CMAKE_COMMAND} -E copy ${src_file} ${dest_file}
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${src_file}
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMENT "Creating fdb_go_path")
endforeach()
add_custom_target(copy_go_sources DEPENDS ${SRCS_OUT})
add_custom_command(OUTPUT ${go_options_file}
COMMAND ${GO_EXECUTABLE} run ${CMAKE_CURRENT_SOURCE_DIR}/src/_util/translate_fdb_options.go
-in ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options
-out ${go_options_file}
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/_util/translate_fdb_options.go
${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options
COMMENT "Generate FDBOptions for GO")
add_custom_target(go_options_file DEPENDS ${go_options_file})
add_dependencies(go_options_file copy_go_sources)
function(build_go_package)
set(options LIBRARY EXECUTABLE)
set(oneValueArgs NAME PATH)
set(multiValueArgs)
cmake_parse_arguments(BGP "${options}" "${oneValueArgs}" "${multiValueArgs}" "${ARGN}")
if(NOT BGP_NAME OR NOT BGP_PATH)
message(FATAL_ERROR "NAME and PATH arguments are missing")
endif()
if(BGP_LIBRARY AND BGP_EXECUTABLE)
message(FATAL_ERROR "Package can't be a library and an executable")
endif()
if(NOT BGP_LIBRARY AND NOT BGP_EXECUTABLE)
message(FATAL_ERROR "Missing type")
endif()
if(BGP_LIBRARY)
if(WIN32)
set(outfile ${GO_PACKAGE_OUTDIR}/${BGP_PATH}.lib)
else()
set(outfile ${GO_PACKAGE_OUTDIR}/${BGP_PATH}.a)
endif()
else()
get_filename_component(exec_filename ${BGP_PATH} NAME)
if(WIN32)
set(outfile ${GOPATH}/bin/${exec_filename}.exe)
else()
set(outfile ${GOPATH}/bin/${exec_filename})
endif()
endif()
add_custom_command(OUTPUT ${outfile}
COMMAND ${CMAKE_COMMAND} -E env ${go_env}
${GO_EXECUTABLE} install ${GO_IMPORT_PATH}/${BGP_PATH}
DEPENDS ${fdb_options_file}
COMMENT "Building ${BGP_NAME}")
add_custom_target(${BGP_NAME} ALL DEPENDS ${outfile})
endfunction()
build_go_package(LIBRARY NAME fdb_go PATH fdb)
add_dependencies(fdb_go fdb_c go_options_file)
build_go_package(LIBRARY NAME tuple_go PATH fdb/tuple)
add_dependencies(tuple_go fdb_go)
build_go_package(LIBRARY NAME subspace_go PATH fdb/subspace)
add_dependencies(subspace_go tuple_go)
build_go_package(LIBRARY NAME directory_go PATH fdb/directory)
add_dependencies(directory_go tuple_go)
build_go_package(EXECUTABLE NAME fdb_go_tester PATH _stacktester)
add_dependencies(fdb_go_tester directory_go)

View File

@ -1,9 +0,0 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "ab4fef131ee828e96ba67d31a7d690bd5f2f42040c6766b1b12fe856f87e0ff7"
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -1,2 +0,0 @@
# The FoundationDB go bindings currently have no external golang dependencies outside of
# the go standard library.

View File

@ -31,3 +31,21 @@ Documentation
* [API documentation](https://godoc.org/github.com/apple/foundationdb/bindings/go/src/fdb)
* [Tutorial](https://apple.github.io/foundationdb/class-scheduling.html)
Modules
-------
If you used the bindings with modules before the addition of the `go.mod` file in the foundation repo,
it may be necessary to update the import path in your `go.mod`.
By default, a module enabled `go get` will add something like this to your `go.mod`:
github.com/apple/foundationdb vx.x.x-xxxxxxxxxxxxxx-xxxxxxxxxxxx
You will need to delete that line, then run `go get github.com/apple/foundationdb/bindings/go@version`.
You should now have a line like this in your `go.mod`:
github.com/apple/foundationdb/bindings/go vx.x.x-xxxxxxxxxxxxxx-xxxxxxxxxxxx
Note: `@version` is only necessary if you previously locked to a
specific version or commit, in which case you'd replace `version` with a commit hash or tag.

View File

@ -11,6 +11,9 @@
# library.
#
# Currently, this script doesn't work with modules enabled.
GO111MODULE=off
DESTDIR="${DESTDIR:-}"
FDBVER="${FDBVER:-}"
REMOTE="${REMOTE:-github.com}"

4
bindings/go/go.mod Normal file
View File

@ -0,0 +1,4 @@
module github.com/apple/foundationdb/bindings/go
// The FoundationDB go bindings currently have no external golang dependencies outside of
// the go standard library.

View File

@ -893,7 +893,7 @@ func main() {
log.Fatal("API version not equal to value selected")
}
db, e = fdb.Open(clusterFile, []byte("DB"))
db, e = fdb.OpenDatabase(clusterFile)
if e != nil {
log.Fatal(e)
}

View File

@ -24,8 +24,10 @@ package main
import (
"encoding/xml"
"flag"
"fmt"
"go/doc"
"io"
"io/ioutil"
"log"
"os"
@ -48,22 +50,22 @@ type Options struct {
Scope []Scope
}
func writeOptString(receiver string, function string, opt Option) {
fmt.Printf(`func (o %s) %s(param string) error {
func writeOptString(w io.Writer, receiver string, function string, opt Option) {
fmt.Fprintf(w, `func (o %s) %s(param string) error {
return o.setOpt(%d, []byte(param))
}
`, receiver, function, opt.Code)
}
func writeOptBytes(receiver string, function string, opt Option) {
fmt.Printf(`func (o %s) %s(param []byte) error {
func writeOptBytes(w io.Writer, receiver string, function string, opt Option) {
fmt.Fprintf(w, `func (o %s) %s(param []byte) error {
return o.setOpt(%d, param)
}
`, receiver, function, opt.Code)
}
func writeOptInt(receiver string, function string, opt Option) {
fmt.Printf(`func (o %s) %s(param int64) error {
func writeOptInt(w io.Writer, receiver string, function string, opt Option) {
fmt.Fprintf(w, `func (o %s) %s(param int64) error {
b, e := int64ToBytes(param)
if e != nil {
return e
@ -73,36 +75,36 @@ func writeOptInt(receiver string, function string, opt Option) {
`, receiver, function, opt.Code)
}
func writeOptNone(receiver string, function string, opt Option) {
fmt.Printf(`func (o %s) %s() error {
func writeOptNone(w io.Writer, receiver string, function string, opt Option) {
fmt.Fprintf(w, `func (o %s) %s() error {
return o.setOpt(%d, nil)
}
`, receiver, function, opt.Code)
}
func writeOpt(receiver string, opt Option) {
func writeOpt(w io.Writer, receiver string, opt Option) {
function := "Set" + translateName(opt.Name)
fmt.Println()
fmt.Fprintln(w)
if opt.Description != "" {
fmt.Printf("// %s\n", opt.Description)
fmt.Fprintf(w, "// %s\n", opt.Description)
if opt.ParamDesc != "" {
fmt.Printf("//\n// Parameter: %s\n", opt.ParamDesc)
fmt.Fprintf(w, "//\n// Parameter: %s\n", opt.ParamDesc)
}
} else {
fmt.Printf("// Not yet implemented.\n")
fmt.Fprintf(w, "// Not yet implemented.\n")
}
switch opt.ParamType {
case "String":
writeOptString(receiver, function, opt)
writeOptString(w, receiver, function, opt)
case "Bytes":
writeOptBytes(receiver, function, opt)
writeOptBytes(w, receiver, function, opt)
case "Int":
writeOptInt(receiver, function, opt)
writeOptInt(w, receiver, function, opt)
case "":
writeOptNone(receiver, function, opt)
writeOptNone(w, receiver, function, opt)
default:
log.Fatalf("Totally unexpected ParamType %s", opt.ParamType)
}
@ -112,9 +114,9 @@ func translateName(old string) string {
return strings.Replace(strings.Title(strings.Replace(old, "_", " ", -1)), " ", "", -1)
}
func writeMutation(opt Option) {
func writeMutation(w io.Writer, opt Option) {
tname := translateName(opt.Name)
fmt.Printf(`
fmt.Fprintf(w, `
// %s
func (t Transaction) %s(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, %d)
@ -122,23 +124,38 @@ func (t Transaction) %s(key KeyConvertible, param []byte) {
`, opt.Description, tname, opt.Code)
}
func writeEnum(scope Scope, opt Option, delta int) {
fmt.Println()
func writeEnum(w io.Writer, scope Scope, opt Option, delta int) {
fmt.Fprintln(w)
if opt.Description != "" {
doc.ToText(os.Stdout, opt.Description, "\t// ", "", 73)
doc.ToText(w, opt.Description, "\t// ", "", 73)
// fmt.Printf(" // %s\n", opt.Description)
}
fmt.Printf(" %s %s = %d\n", scope.Name+translateName(opt.Name), scope.Name, opt.Code+delta)
fmt.Fprintf(w, " %s %s = %d\n", scope.Name+translateName(opt.Name), scope.Name, opt.Code+delta)
}
func main() {
var inFile string
var outFile string
flag.StringVar(&inFile, "in", "stdin", "Input file")
flag.StringVar(&outFile, "out", "stdout", "Output file")
flag.Parse()
var err error
v := Options{}
data, err := ioutil.ReadAll(os.Stdin)
if err != nil {
log.Fatal(err)
var data []byte
if inFile == "stdin" {
data, err = ioutil.ReadAll(os.Stdin)
if err != nil {
log.Fatal(err)
}
} else {
data, err = ioutil.ReadFile(inFile)
if err != nil {
log.Fatal(err)
}
}
err = xml.Unmarshal(data, &v)
@ -146,7 +163,17 @@ func main() {
log.Fatal(err)
}
fmt.Print(`/*
var out *os.File
if outFile == "stdout" {
out = os.Stdout
} else {
out, err = os.Create(outFile)
if err != nil {
log.Fatal(err)
}
}
fmt.Fprint(out, `/*
* generated.go
*
* This source file is part of the FoundationDB open source project
@ -197,7 +224,7 @@ func int64ToBytes(i int64) ([]byte, error) {
for _, opt := range scope.Option {
if !opt.Hidden {
writeOpt(receiver, opt)
writeOpt(out, receiver, opt)
}
}
continue
@ -206,7 +233,7 @@ func int64ToBytes(i int64) ([]byte, error) {
if scope.Name == "MutationType" {
for _, opt := range scope.Option {
if !opt.Hidden {
writeMutation(opt)
writeMutation(out, opt)
}
}
continue
@ -223,16 +250,17 @@ func int64ToBytes(i int64) ([]byte, error) {
scope.Name = "conflictRangeType"
}
fmt.Printf(`
fmt.Fprintf(out, `
type %s int
const (
`, scope.Name)
for _, opt := range scope.Option {
if !opt.Hidden {
writeEnum(scope, opt, d)
writeEnum(out, scope, opt, d)
}
}
fmt.Println(")")
fmt.Fprintln(out, ")")
}
out.Close()
}

View File

@ -28,47 +28,18 @@ package fdb
*/
import "C"
import (
"runtime"
)
// Deprecated: Use OpenDatabase or OpenDefault to obtain a database handle directly
// Cluster is a handle to a FoundationDB cluster. Cluster is a lightweight
// object that may be efficiently copied, and is safe for concurrent use by
// multiple goroutines.
//
// It is generally preferable to use Open or OpenDefault to obtain a database
// handle directly.
type Cluster struct {
*cluster
clusterFileName string
}
type cluster struct {
ptr *C.FDBCluster
}
func (c *cluster) destroy() {
C.fdb_cluster_destroy(c.ptr)
}
// OpenDatabase returns a database handle from the FoundationDB cluster. It is
// generally preferable to use Open or OpenDefault to obtain a database handle
// directly.
// Deprecated: Use OpenDatabase or OpenDefault to obtain a database handle directly
// OpenDatabase returns a database handle from the FoundationDB cluster.
//
// In the current release, the database name must be []byte("DB").
// The database name must be []byte("DB").
func (c Cluster) OpenDatabase(dbName []byte) (Database, error) {
f := C.fdb_cluster_create_database(c.ptr, byteSliceToPtr(dbName), C.int(len(dbName)))
fdb_future_block_until_ready(f)
var outd *C.FDBDatabase
if err := C.fdb_future_get_database(f, &outd); err != 0 {
return Database{}, Error{int(err)}
}
C.fdb_future_destroy(f)
d := &database{outd}
runtime.SetFinalizer(d, (*database).destroy)
return Database{d}, nil
return Open(c.clusterFileName, dbName)
}

View File

@ -30,6 +30,7 @@ package fdb
import "C"
import (
"bytes"
"fmt"
"log"
"runtime"
@ -138,7 +139,7 @@ func APIVersion(version int) error {
if e == 2203 {
maxSupportedVersion := C.fdb_get_max_api_version()
if headerVersion > int(maxSupportedVersion) {
return fmt.Errorf("This version of the FoundationDB Go binding is not supported by the installed FoundationDB C library. The binding requires a library that supports API version %d, but the installed library supports a maximum version of %d.", version, maxSupportedVersion)
return fmt.Errorf("This version of the FoundationDB Go binding is not supported by the installed FoundationDB C library. The binding requires a library that supports API version %d, but the installed library supports a maximum version of %d.", headerVersion, maxSupportedVersion)
}
return fmt.Errorf("API version %d is not supported by the installed FoundationDB C library.", version)
}
@ -192,17 +193,10 @@ var apiVersion int
var networkStarted bool
var networkMutex sync.Mutex
type DatabaseId struct {
clusterFile string
dbName string
}
var openClusters map[string]Cluster
var openDatabases map[DatabaseId]Database
var openDatabases map[string]Database
func init() {
openClusters = make(map[string]Cluster)
openDatabases = make(map[DatabaseId]Database)
openDatabases = make(map[string]Database)
}
func startNetwork() error {
@ -222,10 +216,9 @@ func startNetwork() error {
return nil
}
// StartNetwork initializes the FoundationDB client networking engine. It is not
// necessary to call StartNetwork when using the fdb.Open or fdb.OpenDefault
// functions to obtain a database handle. StartNetwork must not be called more
// than once.
// Deprecated: the network is started automatically when a database is opened.
// StartNetwork initializes the FoundationDB client networking engine. StartNetwork
// must not be called more than once.
func StartNetwork() error {
networkMutex.Lock()
defer networkMutex.Unlock()
@ -237,17 +230,15 @@ func StartNetwork() error {
return startNetwork()
}
// DefaultClusterFile should be passed to fdb.Open or fdb.CreateCluster to allow
// the FoundationDB C library to select the platform-appropriate default cluster
// file on the current machine.
// DefaultClusterFile should be passed to fdb.Open to allow the FoundationDB C
// library to select the platform-appropriate default cluster file on the current machine.
const DefaultClusterFile string = ""
// OpenDefault returns a database handle to the default database from the
// FoundationDB cluster identified by the DefaultClusterFile on the current
// machine. The FoundationDB client networking engine will be initialized first,
// if necessary.
// OpenDefault returns a database handle to the FoundationDB cluster identified
// by the DefaultClusterFile on the current machine. The FoundationDB client
// networking engine will be initialized first, if necessary.
func OpenDefault() (Database, error) {
return Open(DefaultClusterFile, []byte("DB"))
return OpenDatabase(DefaultClusterFile)
}
// MustOpenDefault is like OpenDefault but panics if the default database cannot
@ -260,13 +251,9 @@ func MustOpenDefault() Database {
return db
}
// Open returns a database handle to the named database from the FoundationDB
// cluster identified by the provided cluster file and database name. The
// FoundationDB client networking engine will be initialized first, if
// necessary.
//
// In the current release, the database name must be []byte("DB").
func Open(clusterFile string, dbName []byte) (Database, error) {
// Open returns a database handle to the FoundationDB cluster identified
// by the provided cluster file and database name.
func OpenDatabase(clusterFile string) (Database, error) {
networkMutex.Lock()
defer networkMutex.Unlock()
@ -283,27 +270,36 @@ func Open(clusterFile string, dbName []byte) (Database, error) {
}
}
cluster, ok := openClusters[clusterFile]
db, ok := openDatabases[clusterFile]
if !ok {
cluster, e = createCluster(clusterFile)
db, e = createDatabase(clusterFile)
if e != nil {
return Database{}, e
}
openClusters[clusterFile] = cluster
}
db, ok := openDatabases[DatabaseId{clusterFile, string(dbName)}]
if !ok {
db, e = cluster.OpenDatabase(dbName)
if e != nil {
return Database{}, e
}
openDatabases[DatabaseId{clusterFile, string(dbName)}] = db
openDatabases[clusterFile] = db
}
return db, nil
}
func MustOpenDatabase(clusterFile string) Database {
db, err := OpenDatabase(clusterFile)
if err != nil {
panic(err)
}
return db
}
// Deprecated: Use OpenDatabase instead
// The database name must be []byte("DB").
func Open(clusterFile string, dbName []byte) (Database, error) {
if bytes.Compare(dbName, []byte("DB")) != 0 {
return Database{}, Error{2013} // invalid_database_name
}
return OpenDatabase(clusterFile)
}
// Deprecated: Use MustOpenDatabase instead
// MustOpen is like Open but panics if the database cannot be opened.
func MustOpen(clusterFile string, dbName []byte) Database {
db, err := Open(clusterFile, dbName)
@ -313,7 +309,7 @@ func MustOpen(clusterFile string, dbName []byte) Database {
return db
}
func createCluster(clusterFile string) (Cluster, error) {
func createDatabase(clusterFile string) (Database, error) {
var cf *C.char
if len(clusterFile) != 0 {
@ -321,23 +317,18 @@ func createCluster(clusterFile string) (Cluster, error) {
defer C.free(unsafe.Pointer(cf))
}
f := C.fdb_create_cluster(cf)
fdb_future_block_until_ready(f)
var outc *C.FDBCluster
if err := C.fdb_future_get_cluster(f, &outc); err != 0 {
return Cluster{}, Error{int(err)}
var outdb *C.FDBDatabase
if err := C.fdb_create_database(cf, &outdb); err != 0 {
return Database{}, Error{int(err)}
}
C.fdb_future_destroy(f)
db := &database{outdb}
runtime.SetFinalizer(db, (*database).destroy)
c := &cluster{outc}
runtime.SetFinalizer(c, (*cluster).destroy)
return Cluster{c}, nil
return Database{db}, nil
}
// Deprecated: Use OpenDatabase instead.
// CreateCluster returns a cluster handle to the FoundationDB cluster identified
// by the provided cluster file.
func CreateCluster(clusterFile string) (Cluster, error) {
@ -352,7 +343,7 @@ func CreateCluster(clusterFile string) (Cluster, error) {
return Cluster{}, errNetworkNotSetup
}
return createCluster(clusterFile)
return Cluster{clusterFile}, nil
}
func byteSliceToPtr(b []byte) *C.uint8_t {
@ -377,6 +368,30 @@ func (k Key) FDBKey() Key {
return k
}
// String describes the key as a human readable string.
func (k Key) String() string {
return Printable(k)
}
// Printable returns a human readable version of a byte array. The bytes that correspond with
// ASCII printable characters [32-127) are passed through. Other bytes are
// replaced with \x followed by a two character zero-padded hex code for byte.
func Printable(d []byte) string {
buf := new(bytes.Buffer)
for _, b := range d {
if b >= 32 && b < 127 && b != '\\' {
buf.WriteByte(b)
continue
}
if b == '\\' {
buf.WriteString("\\\\")
continue
}
buf.WriteString(fmt.Sprintf("\\x%02x", b))
}
return buf.String()
}
func panicToError(e *error) {
if r := recover(); r != nil {
fe, ok := r.(Error)

View File

@ -24,8 +24,9 @@ package fdb_test
import (
"fmt"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"testing"
"github.com/apple/foundationdb/bindings/go/src/fdb"
)
func ExampleOpenDefault() {
@ -261,3 +262,25 @@ func ExampleRangeIterator() {
// banana is bar
// cherry is baz
}
func TestKeyToString(t *testing.T) {
cases := []struct {
key fdb.Key
expect string
}{
{fdb.Key([]byte{0}), "\\x00"},
{fdb.Key("plain-text"), "plain-text"},
{fdb.Key("\xbdascii☻☺"), "\\xbdascii\\xe2\\x98\\xbb\\xe2\\x98\\xba"},
}
for i, c := range cases {
if s := c.key.String(); s != c.expect {
t.Errorf("got '%v', want '%v' at case %v", s, c.expect, i)
}
}
}
func ExamplePrintable() {
fmt.Println(fdb.Printable([]byte{0, 1, 2, 'a', 'b', 'c', '1', '2', '3', '!', '?', 255}))
// Output: \x00\x01\x02abc123!?\xff
}

View File

@ -92,6 +92,13 @@ func (o NetworkOptions) SetTraceLogGroup(param string) error {
return o.setOpt(33, []byte(param))
}
// Selects trace output format for this client. xml (the default) and json are supported.
//
// Parameter: trace format
func (o NetworkOptions) SetTraceFormat(param string) error {
return o.setOpt(34, []byte(param))
}
// Set internal tuning or debugging knobs
//
// Parameter: knob_name=knob_value

View File

@ -1,7 +1,3 @@
include(UseJava)
find_package(JNI 1.8 REQUIRED)
find_package(Java 1.8 COMPONENTS Development REQUIRED)
set(JAVA_BINDING_SRCS
src/main/com/apple/foundationdb/async/AsyncIterable.java
src/main/com/apple/foundationdb/async/AsyncIterator.java
@ -11,6 +7,7 @@ set(JAVA_BINDING_SRCS
src/main/com/apple/foundationdb/async/CloseableAsyncIterator.java
src/main/com/apple/foundationdb/async/package-info.java
src/main/com/apple/foundationdb/Cluster.java
src/main/com/apple/foundationdb/ClusterOptions.java
src/main/com/apple/foundationdb/Database.java
src/main/com/apple/foundationdb/directory/Directory.java
src/main/com/apple/foundationdb/directory/DirectoryAlreadyExistsException.java
@ -28,8 +25,6 @@ set(JAVA_BINDING_SRCS
src/main/com/apple/foundationdb/FDB.java
src/main/com/apple/foundationdb/FDBDatabase.java
src/main/com/apple/foundationdb/FDBTransaction.java
src/main/com/apple/foundationdb/FutureCluster.java
src/main/com/apple/foundationdb/FutureDatabase.java
src/main/com/apple/foundationdb/FutureKey.java
src/main/com/apple/foundationdb/FutureResult.java
src/main/com/apple/foundationdb/FutureResults.java
@ -98,11 +93,10 @@ set(JAVA_TESTS_SRCS
src/test/com/apple/foundationdb/test/WatchTest.java
src/test/com/apple/foundationdb/test/WhileTrueTest.java)
set(GENERATED_JAVA_DIR ${CMAKE_CURRENT_BINARY_DIR}/src/main/com/foundationdb)
set(GENERATED_JAVA_DIR ${CMAKE_CURRENT_BINARY_DIR}/src/main/com/apple/foundationdb)
file(MAKE_DIRECTORY ${GENERATED_JAVA_DIR})
set(GENERATED_JAVA_FILES
${GENERATED_JAVA_DIR}/ClusterOptions.java
${GENERATED_JAVA_DIR}/ConflictRangeType.java
${GENERATED_JAVA_DIR}/DatabaseOptions.java
${GENERATED_JAVA_DIR}/MutationType.java
@ -111,11 +105,8 @@ set(GENERATED_JAVA_FILES
${GENERATED_JAVA_DIR}/TransactionOptions.java
${GENERATED_JAVA_DIR}/FDBException.java)
add_custom_command(OUTPUT ${GENERATED_JAVA_FILES}
COMMAND ${MONO_EXECUTABLE} ${VEXILLOGRAPHER_EXE} ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options java ${GENERATED_JAVA_DIR}
DEPENDS ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options vexillographer
COMMENT "Generate Java options")
add_custom_target(fdb_java_options DEPENDS ${GENERATED_JAVA_DIR}/StreamingMode.java)
vexillographer_compile(TARGET fdb_java_options LANG java OUT ${GENERATED_JAVA_DIR}
OUTPUT ${GENERATED_JAVA_FILES})
set(SYSTEM_NAME "linux")
if (APPLE)
@ -134,11 +125,67 @@ set_target_properties(fdb_java PROPERTIES
set(CMAKE_JAVA_COMPILE_FLAGS "-source" "1.8" "-target" "1.8")
set(CMAKE_JNI_TARGET TRUE)
set(JAR_VERSION "${FDB_MAJOR}.${FDB_MINOR}.${FDB_REVISION}")
add_jar(fdb-java ${JAVA_BINDING_SRCS} ${GENERATED_JAVA_FILES}
OUTPUT_DIR ${PROJECT_BINARY_DIR}/lib)
add_jar(fdb-java ${JAVA_BINDING_SRCS} ${GENERATED_JAVA_FILES} ${CMAKE_SOURCE_DIR}/LICENSE
OUTPUT_DIR ${PROJECT_BINARY_DIR}/lib VERSION ${CMAKE_PROJECT_VERSION})
add_dependencies(fdb-java fdb_java_options fdb_java)
add_jar(foundationdb-tests SOURCES ${JAVA_TESTS_SRCS} INCLUDE_JARS fdb-java)
add_dependencies(foundationdb-tests fdb_java_options)
install_jar(fdb-java DESTINATION ${FDB_SHARE_DIR}/java COMPONENT clients)
install(TARGETS fdb_java DESTINATION ${FDB_LIB_DIR} COMPONENT clients)
# TODO[mpilman]: The java RPM will require some more effort (mostly on debian). However,
# most people will use the fat-jar, so it is not clear how high this priority is.
#install_jar(fdb-java DESTINATION ${FDB_SHARE_DIR}/java COMPONENT java)
#install(TARGETS fdb_java DESTINATION ${FDB_LIB_DIR} COMPONENT java)
set(FAT_JAR_BINARIES "NOTFOUND" CACHE STRING
"Path of a directory structure with libraries to include in fat jar (a lib directory)")
set(jar_destination ${CMAKE_BINARY_DIR}/packages)
set(unpack_dir ${CMAKE_CURRENT_BINARY_DIR}/fat_jar)
file(MAKE_DIRECTORY ${jar_destination})
file(MAKE_DIRECTORY ${unpack_dir})
message(STATUS "Building fat jar to ${jar_destination}")
get_property(jar_path TARGET fdb-java PROPERTY JAR_FILE)
add_custom_command(OUTPUT ${unpack_dir}/META-INF/MANIFEST.MF
COMMAND ${Java_JAR_EXECUTABLE} xf ${jar_path}
WORKING_DIRECTORY ${unpack_dir}
DEPENDS "${jar_path}"
COMMENT "Unpack jar-file")
add_custom_target(unpack_jar DEPENDS ${unpack_dir}/META-INF/MANIFEST.MF)
add_dependencies(unpack_jar fdb-java)
add_custom_command(OUTPUT ${unpack_dir}/LICENSE
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/LICENSE ${unpack_dir}
COMMENT "copy license")
add_custom_target(copy_license DEPENDS ${unpack_dir}/LICENSE)
add_dependencies(unpack_jar copy_license)
if(FAT_JAR_BINARIES)
add_custom_command(OUTPUT ${unpack_dir}/lib
COMMAND ${CMAKE_COMMAND} -E copy_directory ${FAT_JAR_BINARIES} ${unpack_dir}
COMMENT "copy additional libraries"
DEPENDS ${unpack_dir}/META-INF/MANIFEST.MF)
add_custom_target(copy_libs DEPENDS ${unpack_dir}/lib)
add_dependencies(unpack_jar copy_libs)
endif()
if(WIN32)
set(lib_destination "windows/amd64")
elseif(APPLE)
set(lib_destination "osx/x86_64")
else()
set(lib_destination "linux/amd64")
endif()
set(lib_destination "${unpack_dir}/lib/${lib_destination}")
file(MAKE_DIRECTORY ${lib_destination})
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/lib_copied
COMMAND ${CMAKE_COMMAND} -E copy $<TARGET_FILE:fdb_java> ${lib_destination} &&
${CMAKE_COMMAND} -E touch ${CMAKE_CURRENT_BINARY_DIR}/lib_copied
COMMENT "Copy library")
add_custom_target(copy_lib DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/lib_copied)
add_dependencies(copy_lib unpack_jar)
set(target_jar ${jar_destination}/fdb-java-${CMAKE_PROJECT_VERSION}.jar)
add_custom_command(OUTPUT ${target_jar}
COMMAND ${Java_JAR_EXECUTABLE} cf ${target_jar} .
WORKING_DIRECTORY ${unpack_dir}
COMMENT "Build ${jar_destination}/fdb-java-${CMAKE_PROJECT_VERSION}.jar")
add_custom_target(fat-jar DEPENDS ${target_jar})
add_dependencies(fat-jar copy_lib)
add_dependencies(packages fat-jar)

53
bindings/java/README.md Normal file
View File

@ -0,0 +1,53 @@
<img alt="FoundationDB logo" src="documentation/FDB_logo.png?raw=true" width="400">
FoundationDB is a distributed database designed to handle large volumes of structured data across clusters of commodity servers. It organizes data as an ordered key-value store and employs ACID transactions for all operations. It is especially well-suited for read/write workloads but also has excellent performance for write-intensive workloads. Users interact with the database using API language binding.
To learn more about FoundationDB, visit [foundationdb.org](https://www.foundationdb.org/)
## FoundationDB Java Bindings
In order to build the java bindings,
[JDK](http://www.oracle.com/technetwork/java/javase/downloads/index.html) >= 8
has to be installed. CMake will try to find a JDK installation, if it can find
one it will automatically build the java bindings.
If you have Java installed but cmake fails to find them, set the
`JAVA_HOME`environment variable.
### Fat Jar
By default, the generated jar file will depend on an installed libfdb_java
(provided with the generated RPM/DEB file on Linux). However, users usually find
a Jar-file that contains this library more convenient. This is also what you
will get if you download the jar file from Maven.
This file can be generated by compiling the `packages` target. For example with
make, you can run:
``` shell
make packages
```
#### Multi-Platform Jar-File
If you want to create a jar file that can run on more than one supported
architecture (the offical one supports MacOS, Linux, and Windows), you can do
that by executing the following steps:
1. Create a directory called `lib` somewhere on your file system.
1. Create a subdirectory for each *additional* platform you want to support
(`windows` for windows, `osx` for MacOS, and `linux` for Linux).
1. Under each of those create a subdirectory with the name of the architecture
(currently only `amd64` is supported - on MacOS this has to be called
`x86_64` - `amd64` on all others).
1. Set the cmake variable `FAT_JAR_BINARIES` to this `lib` directory. For
example, if you created this directory structure under `/foo/bar`, the
corresponding cmake command would be:
```
cmake -DFAT_JAR_BINARIES=/foo/bar/lib <PATH_TO_FDB_SOURCE>
```
After executing building the packages (with `make packages` or the packages
target in `Visual Studio`) you will find a jar-file in the `packages`
directory in your build directory.

View File

@ -25,7 +25,7 @@
#include <foundationdb/fdb_c.h>
#define JNI_NULL 0
#define JNI_NULL nullptr
#if defined(__GNUG__)
#define thread_local __thread
@ -38,15 +38,15 @@
#error Missing thread local storage
#endif
static JavaVM* g_jvm = 0;
static thread_local JNIEnv* g_thread_jenv = 0; // Defined for the network thread once it is running, and for any thread that has called registerCallback
static thread_local jmethodID g_IFutureCallback_call_methodID = 0;
static JavaVM* g_jvm = nullptr;
static thread_local JNIEnv* g_thread_jenv = nullptr; // Defined for the network thread once it is running, and for any thread that has called registerCallback
static thread_local jmethodID g_IFutureCallback_call_methodID = JNI_NULL;
static thread_local bool is_external = false;
void detachIfExternalThread(void *ignore) {
if(is_external && g_thread_jenv != 0) {
g_thread_jenv = 0;
g_IFutureCallback_call_methodID = 0;
if(is_external && g_thread_jenv != nullptr) {
g_thread_jenv = nullptr;
g_IFutureCallback_call_methodID = JNI_NULL;
g_jvm->DetachCurrentThread();
}
}
@ -58,7 +58,7 @@ void throwOutOfMem(JNIEnv *jenv) {
if(jenv->ExceptionOccurred())
return;
if( jenv->ThrowNew( illegalArgClass, NULL ) != 0 ) {
if( jenv->ThrowNew( illegalArgClass, nullptr ) != 0 ) {
if( !jenv->ExceptionOccurred() ) {
jenv->FatalError("Could not throw OutOfMemoryError");
} else {
@ -68,7 +68,7 @@ void throwOutOfMem(JNIEnv *jenv) {
}
}
static jthrowable getThrowable(JNIEnv *jenv, fdb_error_t e, const char* msg = NULL) {
static jthrowable getThrowable(JNIEnv *jenv, fdb_error_t e, const char* msg = nullptr) {
jclass excepClass = jenv->FindClass("com/apple/foundationdb/FDBException");
if(jenv->ExceptionOccurred())
return JNI_NULL;
@ -128,11 +128,11 @@ static bool findCallbackMethods(JNIEnv *jenv) {
}
static void callCallback( FDBFuture* f, void* data ) {
if (g_thread_jenv == 0) {
if (g_thread_jenv == nullptr) {
// We are on an external thread and must attach to the JVM.
// The shutdown hook will later detach this thread.
is_external = true;
if( g_jvm != 0 && g_jvm->AttachCurrentThreadAsDaemon((void **) &g_thread_jenv, JNI_NULL) == JNI_OK ) {
if( g_jvm != nullptr && g_jvm->AttachCurrentThreadAsDaemon((void **) &g_thread_jenv, nullptr) == JNI_OK ) {
if( !findCallbackMethods( g_thread_jenv ) ) {
g_thread_jenv->FatalError("FDB: Could not find callback method.\n");
}
@ -169,9 +169,9 @@ JNIEXPORT void JNICALL Java_com_apple_foundationdb_NativeFuture_Future_1register
}
FDBFuture *f = (FDBFuture *)future;
// This is documented as not throwing, but simply returning NULL on OMM.
// This is documented as not throwing, but simply returning null on OOM.
// As belt and suspenders, we will check for pending exceptions and then,
// if there are none and the result is NULL, we'll throw our own OMM.
// if there are none and the result is null, we'll throw our own OOM.
callback = jenv->NewGlobalRef( callback );
if( !callback ) {
if( !jenv->ExceptionOccurred() )
@ -280,7 +280,7 @@ JNIEXPORT jobject JNICALL Java_com_apple_foundationdb_FutureStrings_FutureString
jclass str_clazz = jenv->FindClass("java/lang/String");
if( jenv->ExceptionOccurred() )
return JNI_NULL;
jobjectArray arr = jenv->NewObjectArray(count, str_clazz, NULL);
jobjectArray arr = jenv->NewObjectArray(count, str_clazz, JNI_NULL);
if( !arr ) {
if( !jenv->ExceptionOccurred() )
throwOutOfMem(jenv);
@ -327,7 +327,7 @@ JNIEXPORT jobject JNICALL Java_com_apple_foundationdb_FutureResults_FutureResult
return JNI_NULL;
}
jbyteArray lastKey = NULL;
jbyteArray lastKey = JNI_NULL;
if(count) {
lastKey = jenv->NewByteArray(kvs[count - 1].key_length);
if( !lastKey ) {
@ -378,7 +378,7 @@ JNIEXPORT jobject JNICALL Java_com_apple_foundationdb_FutureResults_FutureResult
throwOutOfMem(jenv);
return JNI_NULL;
}
uint8_t *keyvalues_barr = (uint8_t *)jenv->GetByteArrayElements(keyValueArray, NULL);
uint8_t *keyvalues_barr = (uint8_t *)jenv->GetByteArrayElements(keyValueArray, JNI_NULL);
if (!keyvalues_barr) {
throwRuntimeEx( jenv, "Error getting handle to native resources" );
return JNI_NULL;
@ -393,7 +393,7 @@ JNIEXPORT jobject JNICALL Java_com_apple_foundationdb_FutureResults_FutureResult
return JNI_NULL;
}
jint *length_barr = jenv->GetIntArrayElements(lengthArray, NULL);
jint *length_barr = jenv->GetIntArrayElements(lengthArray, JNI_NULL);
if( !length_barr ) {
if( !jenv->ExceptionOccurred() )
throwOutOfMem(jenv);
@ -480,38 +480,6 @@ JNIEXPORT jbyteArray JNICALL Java_com_apple_foundationdb_FutureKey_FutureKey_1ge
return result;
}
JNIEXPORT jlong JNICALL Java_com_apple_foundationdb_FutureCluster_FutureCluster_1get(JNIEnv *jenv, jobject, jlong future) {
if( !future ) {
throwParamNotNull(jenv);
return 0;
}
FDBFuture *f = (FDBFuture *)future;
FDBCluster *cluster;
fdb_error_t err = fdb_future_get_cluster(f, &cluster);
if( err ) {
safeThrow( jenv, getThrowable( jenv, err ) );
return 0;
}
return (jlong)cluster;
}
JNIEXPORT jlong JNICALL Java_com_apple_foundationdb_FutureDatabase_FutureDatabase_1get(JNIEnv *jenv, jobject, jlong future) {
if( !future ) {
throwParamNotNull(jenv);
return 0;
}
FDBFuture *f = (FDBFuture *)future;
FDBDatabase *database;
fdb_error_t err = fdb_future_get_database(f, &database);
if( err ) {
safeThrow( jenv, getThrowable( jenv, err ) );
return 0;
}
return (jlong)database;
}
JNIEXPORT jlong JNICALL Java_com_apple_foundationdb_FDBDatabase_Database_1createTransaction(JNIEnv *jenv, jobject, jlong dbPtr) {
if( !dbPtr ) {
throwParamNotNull(jenv);
@ -541,11 +509,11 @@ JNIEXPORT void JNICALL Java_com_apple_foundationdb_FDBDatabase_Database_1setOpti
return;
}
FDBDatabase *c = (FDBDatabase *)dPtr;
uint8_t *barr = NULL;
uint8_t *barr = nullptr;
int size = 0;
if(value != 0) {
barr = (uint8_t *)jenv->GetByteArrayElements( value, NULL );
if(value != JNI_NULL) {
barr = (uint8_t *)jenv->GetByteArrayElements( value, JNI_NULL );
if (!barr) {
throwRuntimeEx( jenv, "Error getting handle to native resources" );
return;
@ -553,7 +521,7 @@ JNIEXPORT void JNICALL Java_com_apple_foundationdb_FDBDatabase_Database_1setOpti
size = jenv->GetArrayLength( value );
}
fdb_error_t err = fdb_database_set_option( c, (FDBDatabaseOption)code, barr, size );
if(value != 0)
if(value != JNI_NULL)
jenv->ReleaseByteArrayElements( value, (jbyte *)barr, JNI_ABORT );
if( err ) {
safeThrow( jenv, getThrowable( jenv, err ) );
@ -564,69 +532,28 @@ JNIEXPORT jboolean JNICALL Java_com_apple_foundationdb_FDB_Error_1predicate(JNIE
return (jboolean)fdb_error_predicate(predicate, code);
}
JNIEXPORT jlong JNICALL Java_com_apple_foundationdb_FDB_Cluster_1create(JNIEnv *jenv, jobject, jstring clusterFileName) {
const char* fileName = 0;
if(clusterFileName != 0) {
fileName = jenv->GetStringUTFChars(clusterFileName, 0);
if( jenv->ExceptionOccurred() )
JNIEXPORT jlong JNICALL Java_com_apple_foundationdb_FDB_Database_1create(JNIEnv *jenv, jobject, jstring clusterFileName) {
const char* fileName = nullptr;
if(clusterFileName != JNI_NULL) {
fileName = jenv->GetStringUTFChars(clusterFileName, JNI_NULL);
if(jenv->ExceptionOccurred()) {
return 0;
}
FDBFuture *cluster = fdb_create_cluster( fileName );
if(clusterFileName != 0)
jenv->ReleaseStringUTFChars( clusterFileName, fileName );
return (jlong)cluster;
}
JNIEXPORT void JNICALL Java_com_apple_foundationdb_Cluster_Cluster_1setOption(JNIEnv *jenv, jobject, jlong cPtr, jint code, jbyteArray value) {
if( !cPtr ) {
throwParamNotNull(jenv);
return;
}
FDBCluster *c = (FDBCluster *)cPtr;
uint8_t *barr = NULL;
int size = 0;
if(value != 0) {
barr = (uint8_t *)jenv->GetByteArrayElements( value, NULL );
if (!barr) {
throwRuntimeEx( jenv, "Error getting handle to native resources" );
return;
}
size = jenv->GetArrayLength( value );
}
fdb_error_t err = fdb_cluster_set_option( c, (FDBClusterOption)code, barr, size );
if(value != 0)
jenv->ReleaseByteArrayElements( value, (jbyte *)barr, JNI_ABORT );
if( err ) {
safeThrow( jenv, getThrowable( jenv, err ) );
}
}
JNIEXPORT void JNICALL Java_com_apple_foundationdb_Cluster_Cluster_1dispose(JNIEnv *jenv, jobject, jlong cPtr) {
if( !cPtr ) {
throwParamNotNull(jenv);
return;
}
fdb_cluster_destroy( (FDBCluster *)cPtr );
}
FDBDatabase *db;
fdb_error_t err = fdb_create_database(fileName, &db);
JNIEXPORT jlong JNICALL Java_com_apple_foundationdb_Cluster_Cluster_1createDatabase(JNIEnv *jenv, jobject, jlong cPtr, jbyteArray dbNameBytes) {
if( !cPtr || !dbNameBytes ) {
throwParamNotNull(jenv);
return 0;
if(clusterFileName != JNI_NULL) {
jenv->ReleaseStringUTFChars(clusterFileName, fileName);
}
FDBCluster *cluster = (FDBCluster *)cPtr;
uint8_t *barr = (uint8_t *)jenv->GetByteArrayElements( dbNameBytes, NULL );
if (!barr) {
throwRuntimeEx( jenv, "Error getting handle to native resources" );
if(err) {
safeThrow(jenv, getThrowable(jenv, err));
return 0;
}
int size = jenv->GetArrayLength( dbNameBytes );
FDBFuture * f = fdb_cluster_create_database( cluster, barr, size );
jenv->ReleaseByteArrayElements( dbNameBytes, (jbyte *)barr, JNI_ABORT );
return (jlong)f;
return (jlong)db;
}
JNIEXPORT void JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1setVersion(JNIEnv *jenv, jobject, jlong tPtr, jlong version) {
@ -655,7 +582,7 @@ JNIEXPORT jlong JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1
}
FDBTransaction *tr = (FDBTransaction *)tPtr;
uint8_t *barr = (uint8_t *)jenv->GetByteArrayElements( keyBytes, NULL );
uint8_t *barr = (uint8_t *)jenv->GetByteArrayElements( keyBytes, JNI_NULL );
if(!barr) {
if( !jenv->ExceptionOccurred() )
throwRuntimeEx( jenv, "Error getting handle to native resources" );
@ -675,7 +602,7 @@ JNIEXPORT jlong JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1
}
FDBTransaction *tr = (FDBTransaction *)tPtr;
uint8_t *barr = (uint8_t *)jenv->GetByteArrayElements( keyBytes, NULL );
uint8_t *barr = (uint8_t *)jenv->GetByteArrayElements( keyBytes, JNI_NULL );
if(!barr) {
if( !jenv->ExceptionOccurred() )
throwRuntimeEx( jenv, "Error getting handle to native resources" );
@ -697,14 +624,14 @@ JNIEXPORT jlong JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1
}
FDBTransaction *tr = (FDBTransaction *)tPtr;
uint8_t *barrBegin = (uint8_t *)jenv->GetByteArrayElements( keyBeginBytes, NULL );
uint8_t *barrBegin = (uint8_t *)jenv->GetByteArrayElements( keyBeginBytes, JNI_NULL );
if (!barrBegin) {
if( !jenv->ExceptionOccurred() )
throwRuntimeEx( jenv, "Error getting handle to native resources" );
return 0;
}
uint8_t *barrEnd = (uint8_t *)jenv->GetByteArrayElements( keyEndBytes, NULL );
uint8_t *barrEnd = (uint8_t *)jenv->GetByteArrayElements( keyEndBytes, JNI_NULL );
if (!barrEnd) {
jenv->ReleaseByteArrayElements( keyBeginBytes, (jbyte *)barrBegin, JNI_ABORT );
if( !jenv->ExceptionOccurred() )
@ -728,14 +655,14 @@ JNIEXPORT void JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1s
}
FDBTransaction *tr = (FDBTransaction *)tPtr;
uint8_t *barrKey = (uint8_t *)jenv->GetByteArrayElements( keyBytes, NULL );
uint8_t *barrKey = (uint8_t *)jenv->GetByteArrayElements( keyBytes, JNI_NULL );
if (!barrKey) {
if( !jenv->ExceptionOccurred() )
throwRuntimeEx( jenv, "Error getting handle to native resources" );
return;
}
uint8_t *barrValue = (uint8_t *)jenv->GetByteArrayElements( valueBytes, NULL );
uint8_t *barrValue = (uint8_t *)jenv->GetByteArrayElements( valueBytes, JNI_NULL );
if (!barrValue) {
jenv->ReleaseByteArrayElements( keyBytes, (jbyte *)barrKey, JNI_ABORT );
if( !jenv->ExceptionOccurred() )
@ -757,7 +684,7 @@ JNIEXPORT void JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1c
}
FDBTransaction *tr = (FDBTransaction *)tPtr;
uint8_t *barr = (uint8_t *)jenv->GetByteArrayElements( keyBytes, NULL );
uint8_t *barr = (uint8_t *)jenv->GetByteArrayElements( keyBytes, JNI_NULL );
if (!barr) {
if( !jenv->ExceptionOccurred() )
throwRuntimeEx( jenv, "Error getting handle to native resources" );
@ -775,14 +702,14 @@ JNIEXPORT void JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1c
}
FDBTransaction *tr = (FDBTransaction *)tPtr;
uint8_t *barrKeyBegin = (uint8_t *)jenv->GetByteArrayElements( keyBeginBytes, NULL );
uint8_t *barrKeyBegin = (uint8_t *)jenv->GetByteArrayElements( keyBeginBytes, JNI_NULL );
if (!barrKeyBegin) {
if( !jenv->ExceptionOccurred() )
throwRuntimeEx( jenv, "Error getting handle to native resources" );
return;
}
uint8_t *barrKeyEnd = (uint8_t *)jenv->GetByteArrayElements( keyEndBytes, NULL );
uint8_t *barrKeyEnd = (uint8_t *)jenv->GetByteArrayElements( keyEndBytes, JNI_NULL );
if (!barrKeyEnd) {
jenv->ReleaseByteArrayElements( keyBeginBytes, (jbyte *)barrKeyBegin, JNI_ABORT );
if( !jenv->ExceptionOccurred() )
@ -805,14 +732,14 @@ JNIEXPORT void JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1m
}
FDBTransaction *tr = (FDBTransaction *)tPtr;
uint8_t *barrKey = (uint8_t *)jenv->GetByteArrayElements( key, NULL );
uint8_t *barrKey = (uint8_t *)jenv->GetByteArrayElements( key, JNI_NULL );
if (!barrKey) {
if( !jenv->ExceptionOccurred() )
throwRuntimeEx( jenv, "Error getting handle to native resources" );
return;
}
uint8_t *barrValue = (uint8_t *)jenv->GetByteArrayElements( value, NULL );
uint8_t *barrValue = (uint8_t *)jenv->GetByteArrayElements( value, JNI_NULL );
if (!barrValue) {
jenv->ReleaseByteArrayElements( key, (jbyte *)barrKey, JNI_ABORT );
if( !jenv->ExceptionOccurred() )
@ -845,11 +772,11 @@ JNIEXPORT void JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1s
return;
}
FDBTransaction *tr = (FDBTransaction *)tPtr;
uint8_t *barr = NULL;
uint8_t *barr = nullptr;
int size = 0;
if(value != 0) {
barr = (uint8_t *)jenv->GetByteArrayElements( value, NULL );
if(value != JNI_NULL) {
barr = (uint8_t *)jenv->GetByteArrayElements( value, JNI_NULL );
if (!barr) {
if( !jenv->ExceptionOccurred() )
throwRuntimeEx( jenv, "Error getting handle to native resources" );
@ -858,7 +785,7 @@ JNIEXPORT void JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1s
size = jenv->GetArrayLength( value );
}
fdb_error_t err = fdb_transaction_set_option( tr, (FDBTransactionOption)code, barr, size );
if(value != 0)
if(value != JNI_NULL)
jenv->ReleaseByteArrayElements( value, (jbyte *)barr, JNI_ABORT );
if( err ) {
safeThrow( jenv, getThrowable( jenv, err ) );
@ -897,7 +824,7 @@ JNIEXPORT jlong JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1
}
FDBTransaction *tr = (FDBTransaction *)tPtr;
uint8_t *barr = (uint8_t *)jenv->GetByteArrayElements( key, NULL );
uint8_t *barr = (uint8_t *)jenv->GetByteArrayElements( key, JNI_NULL );
if (!barr) {
if( !jenv->ExceptionOccurred() )
throwRuntimeEx( jenv, "Error getting handle to native resources" );
@ -944,7 +871,7 @@ JNIEXPORT jlong JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1
}
FDBTransaction *tr = (FDBTransaction *)tPtr;
uint8_t *barr = (uint8_t *)jenv->GetByteArrayElements( key, NULL );
uint8_t *barr = (uint8_t *)jenv->GetByteArrayElements( key, JNI_NULL );
if (!barr) {
if( !jenv->ExceptionOccurred() )
throwRuntimeEx( jenv, "Error getting handle to native resources" );
@ -973,7 +900,7 @@ JNIEXPORT void JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1a
}
FDBTransaction *tr = (FDBTransaction *)tPtr;
uint8_t *begin_barr = (uint8_t *)jenv->GetByteArrayElements( keyBegin, NULL );
uint8_t *begin_barr = (uint8_t *)jenv->GetByteArrayElements( keyBegin, JNI_NULL );
if (!begin_barr) {
if( !jenv->ExceptionOccurred() )
throwRuntimeEx( jenv, "Error getting handle to native resources" );
@ -981,7 +908,7 @@ JNIEXPORT void JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1a
}
int begin_size = jenv->GetArrayLength( keyBegin );
uint8_t *end_barr = (uint8_t *)jenv->GetByteArrayElements( keyEnd, NULL );
uint8_t *end_barr = (uint8_t *)jenv->GetByteArrayElements( keyEnd, JNI_NULL );
if (!end_barr) {
jenv->ReleaseByteArrayElements( keyBegin, (jbyte *)begin_barr, JNI_ABORT );
if( !jenv->ExceptionOccurred() )
@ -1026,10 +953,10 @@ JNIEXPORT void JNICALL Java_com_apple_foundationdb_FDB_Select_1API_1version(JNIE
}
JNIEXPORT void JNICALL Java_com_apple_foundationdb_FDB_Network_1setOption(JNIEnv *jenv, jobject, jint code, jbyteArray value) {
uint8_t *barr = NULL;
uint8_t *barr = nullptr;
int size = 0;
if(value != 0) {
barr = (uint8_t *)jenv->GetByteArrayElements( value, NULL );
if(value != JNI_NULL) {
barr = (uint8_t *)jenv->GetByteArrayElements( value, JNI_NULL );
if (!barr) {
if( !jenv->ExceptionOccurred() )
throwRuntimeEx( jenv, "Error getting handle to native resources" );
@ -1038,7 +965,7 @@ JNIEXPORT void JNICALL Java_com_apple_foundationdb_FDB_Network_1setOption(JNIEnv
size = jenv->GetArrayLength( value );
}
fdb_error_t err = fdb_network_set_option((FDBNetworkOption)code, barr, size);
if(value != 0)
if(value != JNI_NULL)
jenv->ReleaseByteArrayElements( value, (jbyte *)barr, JNI_ABORT );
if( err ) {
safeThrow( jenv, getThrowable( jenv, err ) );
@ -1060,7 +987,7 @@ JNIEXPORT void JNICALL Java_com_apple_foundationdb_FDB_Network_1run(JNIEnv *jenv
return;
}
fdb_error_t hookErr = fdb_add_network_thread_completion_hook( &detachIfExternalThread, NULL );
fdb_error_t hookErr = fdb_add_network_thread_completion_hook( &detachIfExternalThread, nullptr );
if( hookErr ) {
safeThrow( jenv, getThrowable( jenv, hookErr ) );
}

View File

@ -50,7 +50,7 @@ else ifeq ($(PLATFORM),osx)
java_ARCH := x86_64
endif
JAVA_GENERATED_SOURCES := bindings/java/src/main/com/apple/foundationdb/NetworkOptions.java bindings/java/src/main/com/apple/foundationdb/ClusterOptions.java bindings/java/src/main/com/apple/foundationdb/DatabaseOptions.java bindings/java/src/main/com/apple/foundationdb/TransactionOptions.java bindings/java/src/main/com/apple/foundationdb/StreamingMode.java bindings/java/src/main/com/apple/foundationdb/ConflictRangeType.java bindings/java/src/main/com/apple/foundationdb/MutationType.java bindings/java/src/main/com/apple/foundationdb/FDBException.java
JAVA_GENERATED_SOURCES := bindings/java/src/main/com/apple/foundationdb/NetworkOptions.java bindings/java/src/main/com/apple/foundationdb/DatabaseOptions.java bindings/java/src/main/com/apple/foundationdb/TransactionOptions.java bindings/java/src/main/com/apple/foundationdb/StreamingMode.java bindings/java/src/main/com/apple/foundationdb/ConflictRangeType.java bindings/java/src/main/com/apple/foundationdb/MutationType.java bindings/java/src/main/com/apple/foundationdb/FDBException.java
JAVA_SOURCES := $(JAVA_GENERATED_SOURCES) bindings/java/src/main/com/apple/foundationdb/*.java bindings/java/src/main/com/apple/foundationdb/async/*.java bindings/java/src/main/com/apple/foundationdb/tuple/*.java bindings/java/src/main/com/apple/foundationdb/directory/*.java bindings/java/src/main/com/apple/foundationdb/subspace/*.java bindings/java/src/test/com/apple/foundationdb/test/*.java

View File

@ -20,33 +20,29 @@
package com.apple.foundationdb;
import java.nio.charset.Charset;
import java.util.concurrent.Executor;
/**
* The {@code Cluster} represents a connection to a physical set of cooperating machines
* running FoundationDB. A {@code Cluster} is opened with a reference to a cluster file.<br>
* running FoundationDB. A {@code Cluster} is opened with a reference to a cluster file.
*
* This class is deprecated. Use {@link FDB#open} to open a {@link Database} directly<br>
* <br>
* <b>Note:</b> {@code Cluster} objects must be {@link #close closed} when no longer in use
* in order to free any associated resources.
*/
@Deprecated
public class Cluster extends NativeObjectWrapper {
private ClusterOptions options;
private final Executor executor;
private final String clusterFile;
private static final Charset UTF8 = Charset.forName("UTF-8");
protected Cluster(String clusterFile, Executor executor) {
super(0);
protected Cluster(long cPtr, Executor executor) {
super(cPtr);
this.executor = executor;
this.options = new ClusterOptions((code, parameter) -> {
pointerReadLock.lock();
try {
Cluster_setOption(getPtr(), code, parameter);
} finally {
pointerReadLock.unlock();
}
});
this.options = new ClusterOptions((code, parameter) -> {});
this.clusterFile = clusterFile;
}
/**
@ -59,19 +55,8 @@ public class Cluster extends NativeObjectWrapper {
return options;
}
@Override
protected void finalize() throws Throwable {
try {
checkUnclosed("Cluster");
close();
}
finally {
super.finalize();
}
}
/**
* Creates a connection to a specific database on an <i>FDB</i> cluster.
* Creates a connection to the database on an <i>FDB</i> cluster.
*
* @return a {@code Future} that will be set to a {@code Database} upon
* successful connection.
@ -81,7 +66,7 @@ public class Cluster extends NativeObjectWrapper {
}
/**
* Creates a connection to a specific database on an <i>FDB</i> cluster.
* Creates a connection to the database on an <i>FDB</i> cluster.
*
* @param e the {@link Executor} to use when executing asynchronous callbacks for the database
*
@ -89,22 +74,9 @@ public class Cluster extends NativeObjectWrapper {
* successful connection.
*/
public Database openDatabase(Executor e) throws FDBException {
FutureDatabase futureDatabase;
pointerReadLock.lock();
try {
futureDatabase = new FutureDatabase(Cluster_createDatabase(getPtr(), "DB".getBytes(UTF8)), e);
} finally {
pointerReadLock.unlock();
}
return futureDatabase.join();
return FDB.instance().open(clusterFile, e);
}
@Override
protected void closeInternal(long cPtr) {
Cluster_dispose(cPtr);
}
private native void Cluster_dispose(long cPtr);
private native long Cluster_createDatabase(long cPtr, byte[] dbName);
private native void Cluster_setOption(long cPtr, int code, byte[] value) throws FDBException;
protected void closeInternal(long cPtr) {}
}

View File

@ -1,9 +1,9 @@
/*
* FutureCluster.java
* ClusterOptions.java
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -20,21 +20,14 @@
package com.apple.foundationdb;
import java.util.concurrent.Executor;
class FutureCluster extends NativeFuture<Cluster> {
private final Executor executor;
protected FutureCluster(long cPtr, Executor executor) {
super(cPtr);
this.executor = executor;
registerMarshalCallback(executor);
/**
* A set of options that can be set on a {@link Cluster}.
*
* @deprecated There are no cluster options.
*/
@Deprecated
public class ClusterOptions extends OptionsSet {
public ClusterOptions( OptionConsumer consumer ) {
super(consumer);
}
@Override
protected Cluster getIfDone_internal(long cPtr) throws FDBException {
return new Cluster(FutureCluster_get(cPtr), executor);
}
private native long FutureCluster_get(long cPtr) throws FDBException;
}

View File

@ -26,7 +26,6 @@ import java.util.function.Function;
/**
* A mutable, lexicographically ordered mapping from binary keys to binary values.
* A {@code Database} is stored on a FoundationDB {@link Cluster}.
* {@link Transaction}s are used to manipulate data within a single
* {@code Database} -- multiple, concurrent
* {@code Transaction}s on a {@code Database} enforce <b>ACID</b> properties.<br>

View File

@ -54,8 +54,8 @@ import java.util.concurrent.atomic.AtomicInteger;
* to call {@link #open}.
* <br>
* <h3>Client networking</h3>
* The network is started either implicitly with a call to a variant of {@link #open()} or
* {@link #createCluster()}, or started explicitly with a call to {@link #startNetwork()}.
* The network is started either implicitly with a call to a variant of {@link #open()}
* or started explicitly with a call to {@link #startNetwork()}.
* <br>
*
*/
@ -114,8 +114,8 @@ public class FDB {
* Returns a set of options that can be set on a the FoundationDB API. Generally,
* these options to the top level of the API affect the networking engine and
* therefore must be set before the network engine is started. The network is started
* by calls to {@link #startNetwork()} and implicitly by calls to {@link #open()} and
* {@link #createCluster()} (and their respective variants).
* by calls to {@link #startNetwork()} or implicitly by a call to {@link #open()} and
* and its variants.
*
* @return a set of options affecting this instance of the FoundationDB API
*/
@ -218,11 +218,14 @@ public class FDB {
* If the FoundationDB network has not been started, it will be started in the course of this call
* as if {@link FDB#startNetwork()} had been called.
*
* @deprecated Use {@link #open()} instead.
*
* @return a {@code CompletableFuture} that will be set to a FoundationDB {@code Cluster}.
*
* @throws FDBException on errors encountered starting the FoundationDB networking engine
* @throws IllegalStateException if the network had been previously stopped
*/
@Deprecated
public Cluster createCluster() throws IllegalStateException, FDBException {
return createCluster(null);
}
@ -232,6 +235,8 @@ public class FDB {
* has not been started, it will be started in the course of this call as if
* {@link #startNetwork()} had been called.
*
* @deprecated Use {@link #open(String)} instead.
*
* @param clusterFilePath the
* <a href="/foundationdb/administration.html#foundationdb-cluster-file" target="_blank">cluster file</a>
* defining the FoundationDB cluster. This can be {@code null} if the
@ -243,6 +248,7 @@ public class FDB {
* @throws FDBException on errors encountered starting the FoundationDB networking engine
* @throws IllegalStateException if the network had been previously stopped
*/
@Deprecated
public Cluster createCluster(String clusterFilePath) throws IllegalStateException, FDBException {
return createCluster(clusterFilePath, DEFAULT_EXECUTOR);
}
@ -253,6 +259,8 @@ public class FDB {
* {@link Executor} will be used as the default for the execution of all callbacks that
* are produced from using the resulting {@link Cluster}.
*
* @deprecated Use {@link #open(String, Executor)} instead.
*
* @param clusterFilePath the
* <a href="/foundationdb/administration.html#foundationdb-cluster-file" target="_blank">cluster file</a>
* defining the FoundationDB cluster. This can be {@code null} if the
@ -265,16 +273,10 @@ public class FDB {
* @throws FDBException on errors encountered starting the FoundationDB networking engine
* @throws IllegalStateException if the network had been previously stopped
*/
@Deprecated
public Cluster createCluster(String clusterFilePath, Executor e)
throws FDBException, IllegalStateException {
FutureCluster f;
synchronized (this) {
if (!isConnected()) {
startNetwork();
}
f = new FutureCluster(Cluster_create(clusterFilePath), e);
}
return f.join();
return new Cluster(clusterFilePath, e);
}
/**
@ -318,26 +320,21 @@ public class FDB {
* @return a {@code CompletableFuture} that will be set to a FoundationDB {@link Database}
*/
public Database open(String clusterFilePath, Executor e) throws FDBException {
FutureCluster f;
synchronized (this) {
if (!isConnected()) {
synchronized(this) {
if(!isConnected()) {
startNetwork();
}
f = new FutureCluster(Cluster_create(clusterFilePath), e);
}
Cluster c = f.join();
Database db = c.openDatabase(e);
c.close();
return db;
return new FDBDatabase(Database_create(clusterFilePath), e);
}
/**
* Initializes networking. Can only be called once. This version of
* {@code startNetwork()} will create a new thread and execute the networking
* event loop on that thread. This method is called upon {@link Database} or
* {@link Cluster} creation by default if the network has not yet
* been started. If one wishes to control what thread the network runs on,
* event loop on that thread. This method is called upon {@link Database}
* creation by default if the network has not yet been started. If one
* wishes to control what thread the network runs on,
* one should use the version of {@link #startNetwork(Executor) startNetwork()}
* that takes an {@link Executor}.<br>
* <br>
@ -472,5 +469,5 @@ public class FDB {
private native boolean Error_predicate(int predicate, int code);
private native long Cluster_create(String clusterFileName);
private native long Database_create(String clusterFilePath) throws FDBException;
}

View File

@ -1,40 +0,0 @@
/*
* FutureDatabase.java
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.apple.foundationdb;
import java.util.concurrent.Executor;
class FutureDatabase extends NativeFuture<Database> {
private final Executor executor;
FutureDatabase(long cPtr, Executor executor) {
super(cPtr);
this.executor = executor;
registerMarshalCallback(executor);
}
@Override
protected Database getIfDone_internal(long cPtr) throws FDBException {
return new FDBDatabase(FutureDatabase_get(cPtr), executor);
}
private native long FutureDatabase_get(long cPtr) throws FDBException;
}

View File

@ -33,7 +33,6 @@ import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.function.Function;
import com.apple.foundationdb.Cluster;
import com.apple.foundationdb.Database;
import com.apple.foundationdb.FDB;
import com.apple.foundationdb.FDBException;
@ -723,9 +722,7 @@ public class AsyncStackTester {
throw new IllegalStateException("API version not correctly set to " + apiVersion);
}
//ExecutorService executor = Executors.newFixedThreadPool(2);
Cluster cl = fdb.createCluster(args.length > 2 ? args[2] : null);
Database db = cl.openDatabase();
Database db = fdb.open(args.length > 2 ? args[2] : null);
Context c = new AsynchronousContext(db, prefix);
//System.out.println("Starting test...");

View File

@ -5,7 +5,9 @@ set(SRCS
fdb/locality.py
fdb/six.py
fdb/subspace_impl.py
fdb/tuple.py)
fdb/tuple.py
README.rst
MANIFEST.in)
if(APPLE)
list(APPEND SRCS fdb/libfdb_c.dylib.pth)
@ -17,28 +19,53 @@ set(out_files "")
foreach(src ${SRCS})
get_filename_component(dirname ${src} DIRECTORY)
get_filename_component(extname ${src} EXT)
if(NOT EXISTS ${dirname})
file(MAKE_DIRECTORY ${PROJECT_BINARY_DIR}/bindings/python/${dirname})
endif()
set(from_path ${CMAKE_CURRENT_SOURCE_DIR}/${src})
set(to_path ${CMAKE_CURRENT_BINARY_DIR}/${src})
add_custom_command(OUTPUT ${PROJECT_BINARY_DIR}/bindings/python/${src}
COMMAND mkdir -p ${PROJECT_BINARY_DIR}/bindings/python/${dirname}
COMMAND cp ${src} ${PROJECT_BINARY_DIR}/bindings/python/${dirname}/
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${src}
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMENT "copy ${src}")
COMMAND ${CMAKE_COMMAND} -E copy ${from_path} ${to_path}
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${src}
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMENT "copy ${src}")
set(out_files "${out_files};${PROJECT_BINARY_DIR}/bindings/python/${src}")
endforeach()
add_custom_target(python_binding ALL DEPENDS ${out_files})
file(MAKE_DIRECTORY ${PROJECT_BINARY_DIR}/bindings/python/fdb)
set(options_file ${PROJECT_BINARY_DIR}/bindings/python/fdb/fdboptions.py)
add_custom_command(OUTPUT ${options_file}
COMMAND ${MONO_EXECUTABLE} ${VEXILLOGRAPHER_EXE} ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options python ${options_file}
DEPENDS ${PROJECT_SOURCE_DIR}/fdbclient/vexillographer/fdb.options vexillographer
COMMENT "Generate Python options")
add_custom_target(fdb_python_options DEPENDS
${options_file}
${PROJECT_SOURCE_DIR}/fdbclient/vexillographer/fdb.options
vexillographer)
vexillographer_compile(TARGET fdb_python_options LANG python OUT ${options_file}
OUTPUT ${options_file})
add_dependencies(python_binding fdb_python_options)
set(out_files "${out_files};${options_file}")
install(FILES ${out_files} DESTINATION ${FDB_PYTHON_INSTALL_DIR} COMPONENT clients)
# TODO[mpilman]: it is not clear whether we want to have rpms for python
#install(FILES ${out_files} DESTINATION ${FDB_PYTHON_INSTALL_DIR} COMPONENT python)
# Create sdist
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.cmake ${CMAKE_CURRENT_BINARY_DIR}/setup.py)
configure_file(${CMAKE_SOURCE_DIR}/LICENSE ${CMAKE_CURRENT_BINARY_DIR}/LICENSE COPYONLY)
find_program(pycodestyle pycodestyle)
if (pycodestyle)
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/check_py_code_style
COMMAND ${pycodestyle} bindings/python --config=${CMAKE_CURRENT_SOURCE_DIR}/setup.cfg &&
${CMAKE_COMMAND} -E ${CMAKE_CURRENT_BINARY_DIR}/check_py_code_style
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
DEPENDS ${out_files}
COMMENT "Check python code style")
add_custom_target(fdb_python_check DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/check_py_code_style)
else()
add_custom_target(fdb_python_check COMMAND ${CMAKE_COMMAND} -E echo "Skipped Python style check! Missing: pycodestyle")
endif()
set(package_file_name foundationdb-${FDB_VERSION}.tar.gz)
set(package_file ${CMAKE_BINARY_DIR}/packages/${package_file_name})
add_custom_command(OUTPUT ${package_file}
COMMAND $<TARGET_FILE:Python::Interpreter> setup.py sdist &&
${CMAKE_COMMAND} -E copy dist/${package_file_name} ${package_file}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Create Python sdist package")
add_custom_target(python_package DEPENDS ${package_file})
add_dependencies(python_package python_binding)
add_dependencies(packages python_package)

View File

@ -81,17 +81,16 @@ def api_version(ver):
elif err != 0:
raise RuntimeError('FoundationDB API error')
fdb.impl.init_c_api()
list = (
'FDBError',
'predicates',
'Future',
'Cluster',
'Database',
'Transaction',
'KeyValue',
'KeySelector',
'init',
'create_cluster',
'open',
'transactional',
'options',
@ -100,6 +99,12 @@ def api_version(ver):
_add_symbols(fdb.impl, list)
if ver < 610:
globals()["init"] = getattr(fdb.impl, "init")
globals()["open"] = getattr(fdb.impl, "open_v609")
globals()["create_cluster"] = getattr(fdb.impl, "create_cluster")
globals()["Cluster"] = getattr(fdb.impl, "Cluster")
if ver > 22:
import fdb.locality

View File

@ -30,6 +30,7 @@ import datetime
import platform
import os
import sys
import multiprocessing
from fdb import six
@ -38,6 +39,8 @@ _network_thread_reentrant_lock = threading.RLock()
_open_file = open
_thread_local_storage = threading.local()
import weakref
@ -51,11 +54,6 @@ class _ErrorPredicates(object):
self._parent = parent
class _ClusterOptions(object):
def __init__(self, cluster):
self._parent = weakref.proxy(cluster)
class _DatabaseOptions(object):
def __init__(self, db):
self._parent = weakref.proxy(db)
@ -158,7 +156,7 @@ def fill_operations():
add_operation("bit_" + fname, v)
for scope in ['ClusterOption', 'DatabaseOption', 'TransactionOption', 'NetworkOption']:
for scope in ['DatabaseOption', 'TransactionOption', 'NetworkOption']:
fill_options(scope)
fill_options('ErrorPredicate', True)
@ -598,12 +596,27 @@ class Future(_FDBBase):
return bool(self.capi.fdb_future_is_ready(self.fpointer))
def block_until_ready(self):
self.capi.fdb_future_block_until_ready(self.fpointer)
# Checking readiness is faster than using the callback, so it saves us time if we are already
# ready. It also doesn't add much to the cost of this function
if not self.is_ready():
# Blocking in the native client from the main thread prevents Python from handling signals.
# To avoid that behavior, we implement the blocking in Python using semaphores and on_ready.
# Using a Semaphore is faster than an Event, and we create only one per thread to avoid the
# cost of creating one every time.
semaphore = getattr(_thread_local_storage, 'future_block_semaphore', None)
if semaphore is None:
semaphore = multiprocessing.Semaphore(0)
_thread_local_storage.future_block_semaphore = semaphore
# Depending on the event_model, block_until_ready may be remapped to do something asynchronous or
# just fail. really_block_until_ready() is always fdb_future_block_until_ready() and is used e.g.
# for database and cluster futures that should always be available very quickly
really_block_until_ready = block_until_ready
self.on_ready(lambda self: semaphore.release())
try:
semaphore.acquire()
except:
# If this semaphore didn't actually get released, then we need to replace our thread-local
# copy so that later callers still function correctly
_thread_local_storage.future_block_semaphore = multiprocessing.Semaphore(0)
raise
def on_ready(self, callback):
def cb_and_delref(ignore):
@ -878,7 +891,7 @@ class FormerFuture(_FDBBase):
pass
class Database(FormerFuture):
class Database(_FDBBase):
def __init__(self, dpointer):
self.dpointer = dpointer
self.options = _DatabaseOptions(self)
@ -1097,33 +1110,25 @@ class Database(FormerFuture):
fill_operations()
class Cluster(FormerFuture):
def __init__(self, cpointer):
self.cpointer = cpointer
self.options = _ClusterOptions(self)
def __del__(self):
# print('Destroying cluster 0x%x' % self.cpointer)
self.capi.fdb_cluster_destroy(self.cpointer)
class Cluster(_FDBBase):
def __init__(self, cluster_file):
self.cluster_file = cluster_file
self.options = None
def open_database(self, name):
name = paramToBytes(name)
f = Future(self.capi.fdb_cluster_create_database(self.cpointer, name, len(name)))
f.really_block_until_ready()
dpointer = ctypes.c_void_p()
self.capi.fdb_future_get_database(f.fpointer, ctypes.byref(dpointer))
return Database(dpointer)
if name != b'DB':
raise FDBError(2013) # invalid_database_name
def _set_option(self, option, param, length):
self.capi.fdb_cluster_set_option(self.cpointer, option, param, length)
return create_database(self.cluster_file)
def create_database(cluster_file=None):
pointer = ctypes.c_void_p()
_FDBBase.capi.fdb_create_database(optionalParamToBytes(cluster_file)[0], ctypes.byref(pointer))
return Database(pointer)
def create_cluster(cluster_file=None):
f = Future(_FDBBase.capi.fdb_create_cluster(optionalParamToBytes(cluster_file)[0]))
cpointer = ctypes.c_void_p()
f.really_block_until_ready()
_FDBBase.capi.fdb_future_get_cluster(f.fpointer, ctypes.byref(cpointer))
return Cluster(cpointer)
return Cluster(cluster_file)
class KeySelector(object):
@ -1302,177 +1307,160 @@ def optionalParamToBytes(v):
_FDBBase.capi = _capi
_capi.fdb_select_api_version_impl.argtypes = [ctypes.c_int, ctypes.c_int]
_capi.fdb_select_api_version_impl.restype = ctypes.c_int
_capi.fdb_get_error.argtypes = [ctypes.c_int]
_capi.fdb_get_error.restype = ctypes.c_char_p
_capi.fdb_error_predicate.argtypes = [ctypes.c_int, ctypes.c_int]
_capi.fdb_error_predicate.restype = ctypes.c_int
_capi.fdb_setup_network.argtypes = []
_capi.fdb_setup_network.restype = ctypes.c_int
_capi.fdb_setup_network.errcheck = check_error_code
_capi.fdb_network_set_option.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int]
_capi.fdb_network_set_option.restype = ctypes.c_int
_capi.fdb_network_set_option.errcheck = check_error_code
_capi.fdb_run_network.argtypes = []
_capi.fdb_run_network.restype = ctypes.c_int
_capi.fdb_run_network.errcheck = check_error_code
_capi.fdb_stop_network.argtypes = []
_capi.fdb_stop_network.restype = ctypes.c_int
_capi.fdb_stop_network.errcheck = check_error_code
_capi.fdb_future_destroy.argtypes = [ctypes.c_void_p]
_capi.fdb_future_destroy.restype = None
_capi.fdb_future_release_memory.argtypes = [ctypes.c_void_p]
_capi.fdb_future_release_memory.restype = None
_capi.fdb_future_cancel.argtypes = [ctypes.c_void_p]
_capi.fdb_future_cancel.restype = None
_capi.fdb_future_block_until_ready.argtypes = [ctypes.c_void_p]
_capi.fdb_future_block_until_ready.restype = ctypes.c_int
_capi.fdb_future_block_until_ready.errcheck = check_error_code
_capi.fdb_future_is_ready.argtypes = [ctypes.c_void_p]
_capi.fdb_future_is_ready.restype = ctypes.c_int
_CBFUNC = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
_capi.fdb_future_set_callback.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
_capi.fdb_future_set_callback.restype = int
_capi.fdb_future_set_callback.errcheck = check_error_code
def init_c_api():
_capi.fdb_select_api_version_impl.argtypes = [ctypes.c_int, ctypes.c_int]
_capi.fdb_select_api_version_impl.restype = ctypes.c_int
_capi.fdb_future_get_error.argtypes = [ctypes.c_void_p]
_capi.fdb_future_get_error.restype = int
_capi.fdb_future_get_error.errcheck = check_error_code
_capi.fdb_get_error.argtypes = [ctypes.c_int]
_capi.fdb_get_error.restype = ctypes.c_char_p
_capi.fdb_future_get_version.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_int64)]
_capi.fdb_future_get_version.restype = ctypes.c_int
_capi.fdb_future_get_version.errcheck = check_error_code
_capi.fdb_error_predicate.argtypes = [ctypes.c_int, ctypes.c_int]
_capi.fdb_error_predicate.restype = ctypes.c_int
_capi.fdb_future_get_key.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.POINTER(ctypes.c_byte)),
ctypes.POINTER(ctypes.c_int)]
_capi.fdb_future_get_key.restype = ctypes.c_int
_capi.fdb_future_get_key.errcheck = check_error_code
_capi.fdb_setup_network.argtypes = []
_capi.fdb_setup_network.restype = ctypes.c_int
_capi.fdb_setup_network.errcheck = check_error_code
_capi.fdb_future_get_cluster.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_void_p)]
_capi.fdb_future_get_cluster.restype = ctypes.c_int
_capi.fdb_future_get_cluster.errcheck = check_error_code
_capi.fdb_network_set_option.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int]
_capi.fdb_network_set_option.restype = ctypes.c_int
_capi.fdb_network_set_option.errcheck = check_error_code
_capi.fdb_future_get_database.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_void_p)]
_capi.fdb_future_get_database.restype = ctypes.c_int
_capi.fdb_future_get_database.errcheck = check_error_code
_capi.fdb_run_network.argtypes = []
_capi.fdb_run_network.restype = ctypes.c_int
_capi.fdb_run_network.errcheck = check_error_code
_capi.fdb_future_get_value.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.POINTER(ctypes.c_byte)), ctypes.POINTER(ctypes.c_int)]
_capi.fdb_future_get_value.restype = ctypes.c_int
_capi.fdb_future_get_value.errcheck = check_error_code
_capi.fdb_stop_network.argtypes = []
_capi.fdb_stop_network.restype = ctypes.c_int
_capi.fdb_stop_network.errcheck = check_error_code
_capi.fdb_future_get_keyvalue_array.argtypes = [ctypes.c_void_p, ctypes.POINTER(
ctypes.POINTER(KeyValueStruct)), ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int)]
_capi.fdb_future_get_keyvalue_array.restype = int
_capi.fdb_future_get_keyvalue_array.errcheck = check_error_code
_capi.fdb_future_destroy.argtypes = [ctypes.c_void_p]
_capi.fdb_future_destroy.restype = None
_capi.fdb_future_get_string_array.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.POINTER(ctypes.c_char_p)), ctypes.POINTER(ctypes.c_int)]
_capi.fdb_future_get_string_array.restype = int
_capi.fdb_future_get_string_array.errcheck = check_error_code
_capi.fdb_future_release_memory.argtypes = [ctypes.c_void_p]
_capi.fdb_future_release_memory.restype = None
_capi.fdb_create_cluster.argtypes = [ctypes.c_char_p]
_capi.fdb_create_cluster.restype = ctypes.c_void_p
_capi.fdb_future_cancel.argtypes = [ctypes.c_void_p]
_capi.fdb_future_cancel.restype = None
_capi.fdb_cluster_destroy.argtypes = [ctypes.c_void_p]
_capi.fdb_cluster_destroy.restype = None
_capi.fdb_future_block_until_ready.argtypes = [ctypes.c_void_p]
_capi.fdb_future_block_until_ready.restype = ctypes.c_int
_capi.fdb_future_block_until_ready.errcheck = check_error_code
_capi.fdb_cluster_create_database.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int]
_capi.fdb_cluster_create_database.restype = ctypes.c_void_p
_capi.fdb_future_is_ready.argtypes = [ctypes.c_void_p]
_capi.fdb_future_is_ready.restype = ctypes.c_int
_capi.fdb_cluster_set_option.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int]
_capi.fdb_cluster_set_option.restype = ctypes.c_int
_capi.fdb_cluster_set_option.errcheck = check_error_code
_capi.fdb_future_set_callback.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
_capi.fdb_future_set_callback.restype = int
_capi.fdb_future_set_callback.errcheck = check_error_code
_capi.fdb_database_destroy.argtypes = [ctypes.c_void_p]
_capi.fdb_database_destroy.restype = None
_capi.fdb_future_get_error.argtypes = [ctypes.c_void_p]
_capi.fdb_future_get_error.restype = int
_capi.fdb_future_get_error.errcheck = check_error_code
_capi.fdb_database_create_transaction.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_void_p)]
_capi.fdb_database_create_transaction.restype = ctypes.c_int
_capi.fdb_database_create_transaction.errcheck = check_error_code
_capi.fdb_future_get_version.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_int64)]
_capi.fdb_future_get_version.restype = ctypes.c_int
_capi.fdb_future_get_version.errcheck = check_error_code
_capi.fdb_database_set_option.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int]
_capi.fdb_database_set_option.restype = ctypes.c_int
_capi.fdb_database_set_option.errcheck = check_error_code
_capi.fdb_future_get_key.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.POINTER(ctypes.c_byte)),
ctypes.POINTER(ctypes.c_int)]
_capi.fdb_future_get_key.restype = ctypes.c_int
_capi.fdb_future_get_key.errcheck = check_error_code
_capi.fdb_transaction_destroy.argtypes = [ctypes.c_void_p]
_capi.fdb_transaction_destroy.restype = None
_capi.fdb_future_get_value.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.POINTER(ctypes.c_byte)), ctypes.POINTER(ctypes.c_int)]
_capi.fdb_future_get_value.restype = ctypes.c_int
_capi.fdb_future_get_value.errcheck = check_error_code
_capi.fdb_transaction_cancel.argtypes = [ctypes.c_void_p]
_capi.fdb_transaction_cancel.restype = None
_capi.fdb_future_get_keyvalue_array.argtypes = [ctypes.c_void_p, ctypes.POINTER(
ctypes.POINTER(KeyValueStruct)), ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int)]
_capi.fdb_future_get_keyvalue_array.restype = int
_capi.fdb_future_get_keyvalue_array.errcheck = check_error_code
_capi.fdb_transaction_set_read_version.argtypes = [ctypes.c_void_p, ctypes.c_int64]
_capi.fdb_transaction_set_read_version.restype = None
_capi.fdb_future_get_string_array.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.POINTER(ctypes.c_char_p)), ctypes.POINTER(ctypes.c_int)]
_capi.fdb_future_get_string_array.restype = int
_capi.fdb_future_get_string_array.errcheck = check_error_code
_capi.fdb_transaction_get_read_version.argtypes = [ctypes.c_void_p]
_capi.fdb_transaction_get_read_version.restype = ctypes.c_void_p
_capi.fdb_create_database.argtypes = [ctypes.c_char_p, ctypes.POINTER(ctypes.c_void_p)]
_capi.fdb_create_database.restype = ctypes.c_int
_capi.fdb_create_database.errcheck = check_error_code
_capi.fdb_transaction_get.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
_capi.fdb_transaction_get.restype = ctypes.c_void_p
_capi.fdb_database_destroy.argtypes = [ctypes.c_void_p]
_capi.fdb_database_destroy.restype = None
_capi.fdb_transaction_get_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int]
_capi.fdb_transaction_get_key.restype = ctypes.c_void_p
_capi.fdb_database_create_transaction.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_void_p)]
_capi.fdb_database_create_transaction.restype = ctypes.c_int
_capi.fdb_database_create_transaction.errcheck = check_error_code
_capi.fdb_transaction_get_range.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_void_p,
ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int,
ctypes.c_int, ctypes.c_int]
_capi.fdb_transaction_get_range.restype = ctypes.c_void_p
_capi.fdb_database_set_option.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int]
_capi.fdb_database_set_option.restype = ctypes.c_int
_capi.fdb_database_set_option.errcheck = check_error_code
_capi.fdb_transaction_add_conflict_range.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
_capi.fdb_transaction_add_conflict_range.restype = ctypes.c_int
_capi.fdb_transaction_add_conflict_range.errcheck = check_error_code
_capi.fdb_transaction_destroy.argtypes = [ctypes.c_void_p]
_capi.fdb_transaction_destroy.restype = None
_capi.fdb_transaction_get_addresses_for_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int]
_capi.fdb_transaction_get_addresses_for_key.restype = ctypes.c_void_p
_capi.fdb_transaction_cancel.argtypes = [ctypes.c_void_p]
_capi.fdb_transaction_cancel.restype = None
_capi.fdb_transaction_set_option.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int]
_capi.fdb_transaction_set_option.restype = ctypes.c_int
_capi.fdb_transaction_set_option.errcheck = check_error_code
_capi.fdb_transaction_set_read_version.argtypes = [ctypes.c_void_p, ctypes.c_int64]
_capi.fdb_transaction_set_read_version.restype = None
_capi.fdb_transaction_atomic_op.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
_capi.fdb_transaction_atomic_op.restype = None
_capi.fdb_transaction_get_read_version.argtypes = [ctypes.c_void_p]
_capi.fdb_transaction_get_read_version.restype = ctypes.c_void_p
_capi.fdb_transaction_set.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int]
_capi.fdb_transaction_set.restype = None
_capi.fdb_transaction_get.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
_capi.fdb_transaction_get.restype = ctypes.c_void_p
_capi.fdb_transaction_clear.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int]
_capi.fdb_transaction_clear.restype = None
_capi.fdb_transaction_get_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int]
_capi.fdb_transaction_get_key.restype = ctypes.c_void_p
_capi.fdb_transaction_clear_range.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int]
_capi.fdb_transaction_clear_range.restype = None
_capi.fdb_transaction_get_range.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_void_p,
ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int,
ctypes.c_int, ctypes.c_int]
_capi.fdb_transaction_get_range.restype = ctypes.c_void_p
_capi.fdb_transaction_watch.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int]
_capi.fdb_transaction_watch.restype = ctypes.c_void_p
_capi.fdb_transaction_add_conflict_range.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
_capi.fdb_transaction_add_conflict_range.restype = ctypes.c_int
_capi.fdb_transaction_add_conflict_range.errcheck = check_error_code
_capi.fdb_transaction_commit.argtypes = [ctypes.c_void_p]
_capi.fdb_transaction_commit.restype = ctypes.c_void_p
_capi.fdb_transaction_get_addresses_for_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int]
_capi.fdb_transaction_get_addresses_for_key.restype = ctypes.c_void_p
_capi.fdb_transaction_get_committed_version.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_int64)]
_capi.fdb_transaction_get_committed_version.restype = ctypes.c_int
_capi.fdb_transaction_get_committed_version.errcheck = check_error_code
_capi.fdb_transaction_set_option.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int]
_capi.fdb_transaction_set_option.restype = ctypes.c_int
_capi.fdb_transaction_set_option.errcheck = check_error_code
_capi.fdb_transaction_get_versionstamp.argtypes = [ctypes.c_void_p]
_capi.fdb_transaction_get_versionstamp.restype = ctypes.c_void_p
_capi.fdb_transaction_atomic_op.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
_capi.fdb_transaction_atomic_op.restype = None
_capi.fdb_transaction_on_error.argtypes = [ctypes.c_void_p, ctypes.c_int]
_capi.fdb_transaction_on_error.restype = ctypes.c_void_p
_capi.fdb_transaction_set.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int]
_capi.fdb_transaction_set.restype = None
_capi.fdb_transaction_reset.argtypes = [ctypes.c_void_p]
_capi.fdb_transaction_reset.restype = None
_capi.fdb_transaction_clear.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int]
_capi.fdb_transaction_clear.restype = None
_capi.fdb_transaction_clear_range.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int]
_capi.fdb_transaction_clear_range.restype = None
_capi.fdb_transaction_watch.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int]
_capi.fdb_transaction_watch.restype = ctypes.c_void_p
_capi.fdb_transaction_commit.argtypes = [ctypes.c_void_p]
_capi.fdb_transaction_commit.restype = ctypes.c_void_p
_capi.fdb_transaction_get_committed_version.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_int64)]
_capi.fdb_transaction_get_committed_version.restype = ctypes.c_int
_capi.fdb_transaction_get_committed_version.errcheck = check_error_code
_capi.fdb_transaction_get_versionstamp.argtypes = [ctypes.c_void_p]
_capi.fdb_transaction_get_versionstamp.restype = ctypes.c_void_p
_capi.fdb_transaction_on_error.argtypes = [ctypes.c_void_p, ctypes.c_int]
_capi.fdb_transaction_on_error.restype = ctypes.c_void_p
_capi.fdb_transaction_reset.argtypes = [ctypes.c_void_p]
_capi.fdb_transaction_reset.restype = None
if hasattr(ctypes.pythonapi, 'Py_IncRef'):
def _pin_callback(cb):
@ -1660,13 +1648,12 @@ def init_v13(local_address, event_model=None):
return init(event_model)
open_clusters = {}
open_databases = {}
cacheLock = threading.Lock()
def open(cluster_file=None, database_name=b'DB', event_model=None):
def open(cluster_file=None, event_model=None):
"""Opens the given database (or the default database of the cluster indicated
by the fdb.cluster file in a platform-specific location, if no cluster_file
or database_name is provided). Initializes the FDB interface as required."""
@ -1676,17 +1663,21 @@ def open(cluster_file=None, database_name=b'DB', event_model=None):
init(event_model=event_model)
with cacheLock:
if cluster_file not in open_clusters:
open_clusters[cluster_file] = create_cluster(cluster_file)
if cluster_file not in open_databases:
open_databases[cluster_file] = create_database(cluster_file)
if (cluster_file, database_name) not in open_databases:
open_databases[(cluster_file, database_name)] = open_clusters[cluster_file].open_database(database_name)
return open_databases[(cluster_file)]
return open_databases[(cluster_file, database_name)]
def open_v609(cluster_file=None, database_name=b'DB', event_model=None):
if database_name != b'DB':
raise FDBError(2013) # invalid_database_name
return open(cluster_file, event_model)
def open_v13(cluster_id_path, database_name, local_address=None, event_model=None):
return open(cluster_id_path, database_name, event_model)
return open_v609(cluster_id_path, database_name, event_model)
import atexit

View File

@ -24,6 +24,7 @@ import ctypes
import uuid
import struct
import math
import sys
from bisect import bisect_left
from fdb import six
@ -306,6 +307,16 @@ def _reduce_children(child_values):
return bytes_list, version_pos
if sys.version_info < (2, 7):
def _bit_length(x):
s = bin(x) # binary representation: bin(-37) --> '-0b100101'
s = s.lstrip('-0b') # remove leading zeros and minus sign
return len(s)
else:
def _bit_length(x):
return x.bit_length()
def _encode(value, nested=False):
# returns [code][data] (code != 0xFF)
# encoded values are self-terminating
@ -324,7 +335,7 @@ def _encode(value, nested=False):
return b''.join([six.int2byte(INT_ZERO_CODE)]), -1
elif value > 0:
if value >= _size_limits[-1]:
length = (value.bit_length() + 7) // 8
length = (_bit_length(value) + 7) // 8
data = [six.int2byte(POS_INT_END), six.int2byte(length)]
for i in _range(length - 1, -1, -1):
data.append(six.int2byte((value >> (8 * i)) & 0xff))
@ -334,7 +345,7 @@ def _encode(value, nested=False):
return six.int2byte(INT_ZERO_CODE + n) + struct.pack(">Q", value)[-n:], -1
else:
if -value >= _size_limits[-1]:
length = (value.bit_length() + 7) // 8
length = (_bit_length(value) + 7) // 8
value += (1 << (length * 8)) - 1
data = [six.int2byte(NEG_INT_START), six.int2byte(length ^ 0xff)]
for i in _range(length - 1, -1, -1):

View File

@ -0,0 +1,38 @@
from distutils.core import setup
try:
with open("README.rst") as f:
long_desc = f.read()
except:
long_desc = ""
setup(name="foundationdb",
version="${FDB_VERSION}",
author="FoundationDB",
author_email="fdb-dist@apple.com",
description="Python bindings for the FoundationDB database",
url="https://www.foundationdb.org",
packages=['fdb'],
package_data={'fdb': ["fdb/*.py"]},
long_description=long_desc,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Database',
'Topic :: Database :: Front-Ends'
]
)

View File

@ -0,0 +1,16 @@
# we put this generated file into the src dir, as it
# greatly simplifies debugging
vexillographer_compile(TARGET ruby_options LANG ruby
OUT ${CMAKE_CURRENT_SOURCE_DIR}/lib/fdboptions.rb ALL)
configure_file(fdb.gemspec.cmake fdb.gemspec)
set(gem_file fdb-${FDB_VERSION}.gem)
set(gem_target ${CMAKE_BINARY_DIR}/packages/${gem_file})
add_custom_command(OUTPUT ${gem_target}
COMMAND ${GEM_COMMAND} build fdb.gemspec &&
${CMAKE_COMMAND} -E copy ${gem_file} ${gem_target}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Building ruby gem")
add_custom_target(gem_package DEPENDS ${gem_target})
add_dependencies(gem_package ruby_options)
add_dependencies(packages gem_package)

View File

@ -0,0 +1,22 @@
# -*- mode: ruby; -*-
Gem::Specification.new do |s|
s.name = 'fdb'
s.version = '${FDB_VERSION}'
s.date = Time.new.strftime '%Y-%m-%d'
s.summary = "Ruby bindings for the FoundationDB database"
s.description = <<-EOF
Ruby bindings for the FoundationDB database.
Complete documentation of the FoundationDB Ruby API can be found at:
https://apple.github.io/foundationdb/api-ruby.html.
EOF
s.authors = ["FoundationDB"]
s.email = 'fdb-dist@apple.com'
s.files = ["${CMAKE_SOURCE_DIR}/LICENSE", "${CMAKE_CURRENT_SOURCE_DIR}/lib/fdb.rb", "${CMAKE_CURRENT_SOURCE_DIR}/lib/fdbdirectory.rb", "${CMAKE_CURRENT_SOURCE_DIR}/lib/fdbimpl.rb", "${CMAKE_CURRENT_SOURCE_DIR}/lib/fdblocality.rb", "${CMAKE_CURRENT_SOURCE_DIR}/lib/fdboptions.rb", "${CMAKE_CURRENT_SOURCE_DIR}/lib/fdbsubspace.rb", "${CMAKE_CURRENT_SOURCE_DIR}/lib/fdbtuple.rb", "${CMAKE_CURRENT_SOURCE_DIR}/lib/fdbimpl_v609.rb"]
s.homepage = 'https://www.foundationdb.org'
s.license = 'Apache v2'
s.add_dependency('ffi', '>= 1.1.5')
s.required_ruby_version = '>= 1.9.3'
s.requirements << 'These bindings require the FoundationDB client. The client can be obtained from https://www.foundationdb.org/download/.'
end

View File

@ -13,7 +13,7 @@ https://apple.github.io/foundationdb/api-ruby.html.
EOF
s.authors = ["FoundationDB"]
s.email = 'fdb-dist@apple.com'
s.files = ["LICENSE", "lib/fdb.rb", "lib/fdbdirectory.rb", "lib/fdbimpl.rb", "lib/fdblocality.rb", "lib/fdboptions.rb", "lib/fdbsubspace.rb", "lib/fdbtuple.rb"]
s.files = ["LICENSE", "lib/fdb.rb", "lib/fdbdirectory.rb", "lib/fdbimpl.rb", "lib/fdblocality.rb", "lib/fdboptions.rb", "lib/fdbsubspace.rb", "lib/fdbtuple.rb", "lib/fdbimpl_v609.rb"]
s.homepage = 'https://www.foundationdb.org'
s.license = 'Apache v2'
s.add_dependency('ffi', '>= 1.1.5')

View File

@ -70,8 +70,15 @@ module FDB
raise "FoundationDB API version error"
end
FDBC.init_c_api()
require_relative 'fdbtuple'
require_relative 'fdbdirectory'
if version < 610
require_relative 'fdbimpl_v609'
end
if version > 22
require_relative 'fdblocality'
end

View File

@ -64,64 +64,61 @@ module FDB
typedef :int, :fdb_error
typedef :int, :fdb_bool
attach_function :fdb_get_error, [ :fdb_error ], :string
attach_function :fdb_network_set_option, [ :int, :pointer, :int ], :fdb_error
attach_function :fdb_setup_network, [ ], :fdb_error
attach_function :fdb_run_network, [ ], :fdb_error, :blocking => true
attach_function :fdb_stop_network, [ ], :fdb_error
attach_function :fdb_future_cancel, [ :pointer ], :void
attach_function :fdb_future_release_memory, [ :pointer ], :void
attach_function :fdb_future_destroy, [ :pointer ], :void
attach_function :fdb_future_block_until_ready, [ :pointer ], :fdb_error, :blocking => true
attach_function :fdb_future_is_ready, [ :pointer ], :fdb_bool
callback :fdb_future_callback, [ :pointer, :pointer ], :void
attach_function :fdb_future_set_callback, [ :pointer, :fdb_future_callback, :pointer ], :fdb_error
attach_function :fdb_future_get_error, [ :pointer ], :fdb_error
attach_function :fdb_future_get_version, [ :pointer, :pointer ], :fdb_error
attach_function :fdb_future_get_key, [ :pointer, :pointer, :pointer ], :fdb_error
attach_function :fdb_future_get_cluster, [ :pointer, :pointer ], :fdb_error
attach_function :fdb_future_get_database, [ :pointer, :pointer ], :fdb_error
attach_function :fdb_future_get_value, [ :pointer, :pointer, :pointer, :pointer ], :fdb_error
attach_function :fdb_future_get_keyvalue_array, [ :pointer, :pointer, :pointer, :pointer ], :fdb_error
attach_function :fdb_future_get_string_array, [ :pointer, :pointer, :pointer ], :fdb_error
attach_function :fdb_create_cluster, [ :string ], :pointer
attach_function :fdb_cluster_destroy, [ :pointer ], :void
attach_function :fdb_cluster_set_option, [ :pointer, :int, :pointer, :int ], :fdb_error
attach_function :fdb_cluster_create_database, [ :pointer, :pointer, :int ], :pointer
attach_function :fdb_database_destroy, [ :pointer ], :void
attach_function :fdb_database_set_option, [ :pointer, :int, :pointer, :int ], :fdb_error
attach_function :fdb_database_create_transaction, [ :pointer, :pointer ], :fdb_error
attach_function :fdb_transaction_destroy, [ :pointer ], :void
attach_function :fdb_transaction_cancel, [ :pointer ], :void
attach_function :fdb_transaction_atomic_op, [ :pointer, :pointer, :int, :pointer, :int, :int ], :void
attach_function :fdb_transaction_add_conflict_range, [ :pointer, :pointer, :int, :pointer, :int, :int ], :int
attach_function :fdb_transaction_get_addresses_for_key, [ :pointer, :pointer, :int ], :pointer
attach_function :fdb_transaction_set_option, [ :pointer, :int, :pointer, :int ], :fdb_error
attach_function :fdb_transaction_set_read_version, [ :pointer, :int64 ], :void
attach_function :fdb_transaction_get_read_version, [ :pointer ], :pointer
attach_function :fdb_transaction_get, [ :pointer, :pointer, :int, :int ], :pointer
attach_function :fdb_transaction_get_key, [ :pointer, :pointer, :int, :int, :int, :int ], :pointer
attach_function :fdb_transaction_get_range, [ :pointer, :pointer, :int, :int, :int, :pointer, :int, :int, :int, :int, :int, :int, :int, :int, :int ], :pointer
attach_function :fdb_transaction_set, [ :pointer, :pointer, :int, :pointer, :int ], :void
attach_function :fdb_transaction_clear, [ :pointer, :pointer, :int ], :void
attach_function :fdb_transaction_clear_range, [ :pointer, :pointer, :int, :pointer, :int ], :void
attach_function :fdb_transaction_watch, [ :pointer, :pointer, :int ], :pointer
attach_function :fdb_transaction_commit, [ :pointer ], :pointer
attach_function :fdb_transaction_get_committed_version, [ :pointer, :pointer ], :fdb_error
attach_function :fdb_transaction_get_versionstamp, [ :pointer ], :pointer
attach_function :fdb_transaction_on_error, [ :pointer, :fdb_error ], :pointer
attach_function :fdb_transaction_reset, [ :pointer ], :void
attach_function :fdb_select_api_version_impl, [ :int, :int ], :fdb_error
attach_function :fdb_get_max_api_version, [ ], :int
def self.init_c_api
attach_function :fdb_get_error, [ :fdb_error ], :string
attach_function :fdb_network_set_option, [ :int, :pointer, :int ], :fdb_error
attach_function :fdb_setup_network, [ ], :fdb_error
attach_function :fdb_run_network, [ ], :fdb_error, :blocking => true
attach_function :fdb_stop_network, [ ], :fdb_error
attach_function :fdb_future_cancel, [ :pointer ], :void
attach_function :fdb_future_release_memory, [ :pointer ], :void
attach_function :fdb_future_destroy, [ :pointer ], :void
attach_function :fdb_future_block_until_ready, [ :pointer ], :fdb_error, :blocking => true
attach_function :fdb_future_is_ready, [ :pointer ], :fdb_bool
callback :fdb_future_callback, [ :pointer, :pointer ], :void
attach_function :fdb_future_set_callback, [ :pointer, :fdb_future_callback, :pointer ], :fdb_error
attach_function :fdb_future_get_error, [ :pointer ], :fdb_error
attach_function :fdb_future_get_version, [ :pointer, :pointer ], :fdb_error
attach_function :fdb_future_get_key, [ :pointer, :pointer, :pointer ], :fdb_error
attach_function :fdb_future_get_value, [ :pointer, :pointer, :pointer, :pointer ], :fdb_error
attach_function :fdb_future_get_keyvalue_array, [ :pointer, :pointer, :pointer, :pointer ], :fdb_error
attach_function :fdb_future_get_string_array, [ :pointer, :pointer, :pointer ], :fdb_error
attach_function :fdb_create_database, [ :string, :pointer ], :fdb_error
attach_function :fdb_database_destroy, [ :pointer ], :void
attach_function :fdb_database_set_option, [ :pointer, :int, :pointer, :int ], :fdb_error
attach_function :fdb_database_create_transaction, [ :pointer, :pointer ], :fdb_error
attach_function :fdb_transaction_destroy, [ :pointer ], :void
attach_function :fdb_transaction_cancel, [ :pointer ], :void
attach_function :fdb_transaction_atomic_op, [ :pointer, :pointer, :int, :pointer, :int, :int ], :void
attach_function :fdb_transaction_add_conflict_range, [ :pointer, :pointer, :int, :pointer, :int, :int ], :int
attach_function :fdb_transaction_get_addresses_for_key, [ :pointer, :pointer, :int ], :pointer
attach_function :fdb_transaction_set_option, [ :pointer, :int, :pointer, :int ], :fdb_error
attach_function :fdb_transaction_set_read_version, [ :pointer, :int64 ], :void
attach_function :fdb_transaction_get_read_version, [ :pointer ], :pointer
attach_function :fdb_transaction_get, [ :pointer, :pointer, :int, :int ], :pointer
attach_function :fdb_transaction_get_key, [ :pointer, :pointer, :int, :int, :int, :int ], :pointer
attach_function :fdb_transaction_get_range, [ :pointer, :pointer, :int, :int, :int, :pointer, :int, :int, :int, :int, :int, :int, :int, :int, :int ], :pointer
attach_function :fdb_transaction_set, [ :pointer, :pointer, :int, :pointer, :int ], :void
attach_function :fdb_transaction_clear, [ :pointer, :pointer, :int ], :void
attach_function :fdb_transaction_clear_range, [ :pointer, :pointer, :int, :pointer, :int ], :void
attach_function :fdb_transaction_watch, [ :pointer, :pointer, :int ], :pointer
attach_function :fdb_transaction_commit, [ :pointer ], :pointer
attach_function :fdb_transaction_get_committed_version, [ :pointer, :pointer ], :fdb_error
attach_function :fdb_transaction_get_versionstamp, [ :pointer ], :pointer
attach_function :fdb_transaction_on_error, [ :pointer, :fdb_error ], :pointer
attach_function :fdb_transaction_reset, [ :pointer ], :void
end
class KeyValueStruct < FFI::Struct
pack 4
layout :key, :pointer,
@ -156,7 +153,7 @@ module FDB
@@ffi_callbacks
end
[ "Network", "Cluster", "Database", "Transaction" ].each do |scope|
[ "Network", "Database", "Transaction" ].each do |scope|
klass = FDB.const_set("#{scope}Options", Class.new)
klass.class_eval do
define_method(:initialize) do |setfunc|
@ -242,6 +239,10 @@ module FDB
nil
end
class << self
private :init
end
def self.stop()
FDBC.check_error FDBC.fdb_stop_network
end
@ -254,11 +255,10 @@ module FDB
end
end
@@open_clusters = {}
@@open_databases = {}
@@cache_lock = Mutex.new
def self.open( cluster_file = nil, database_name = "DB" )
def self.open( cluster_file = nil )
@@network_thread_monitor.synchronize do
if ! @@network_thread
init
@ -266,15 +266,13 @@ module FDB
end
@@cache_lock.synchronize do
if ! @@open_clusters.has_key? cluster_file
@@open_clusters[cluster_file] = create_cluster( cluster_file )
if ! @@open_databases.has_key? [cluster_file]
dpointer = FFI::MemoryPointer.new :pointer
FDBC.check_error FDBC.fdb_create_database(cluster_file, dpointer)
@@open_databases[cluster_file] = Database.new dpointer.get_pointer(0)
end
if ! @@open_databases.has_key? [cluster_file, database_name]
@@open_databases[[cluster_file, database_name]] = @@open_clusters[cluster_file].open_database(database_name)
end
@@open_databases[[cluster_file, database_name]]
@@open_databases[cluster_file]
end
end
@ -503,41 +501,6 @@ module FDB
end
end
def self.create_cluster(cluster=nil)
f = FDBC.fdb_create_cluster(cluster)
cpointer = FFI::MemoryPointer.new :pointer
FDBC.check_error FDBC.fdb_future_block_until_ready(f)
FDBC.check_error FDBC.fdb_future_get_cluster(f, cpointer)
Cluster.new cpointer.get_pointer(0)
end
class Cluster < FormerFuture
attr_reader :options
def self.finalize(ptr)
proc do
# puts "Destroying cluster #{ptr}"
FDBC.fdb_cluster_destroy(ptr)
end
end
def initialize(cpointer)
@cpointer = cpointer
@options = ClusterOptions.new lambda { |code, param|
FDBC.check_error FDBC.fdb_cluster_set_option(cpointer, code, param, param.nil? ? 0 : param.bytesize)
}
ObjectSpace.define_finalizer(self, self.class.finalize(@cpointer))
end
def open_database(name="DB")
f = FDBC.fdb_cluster_create_database(@cpointer, name, name.bytesize)
dpointer = FFI::MemoryPointer.new :pointer
FDBC.check_error FDBC.fdb_future_block_until_ready(f)
FDBC.check_error FDBC.fdb_future_get_database(f, dpointer)
Database.new dpointer.get_pointer(0)
end
end
class Database < FormerFuture
attr_reader :options

View File

@ -0,0 +1,62 @@
#encoding: BINARY
#
# fdbimpl.rb
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# FoundationDB Ruby API
# Documentation for this API can be found at
# https://apple.github.io/foundationdb/api-ruby.html
module FDB
class << self
alias_method :open_impl, :open
def open( cluster_file = nil, database_name = "DB" )
if database_name != "DB"
raise Error.new(2013) # invalid_database_name
end
open_impl(cluster_file)
end
def create_cluster(cluster_file_path=nil)
Cluster.new cluster_file_path
end
public :init
end
class ClusterOptions
end
class Cluster < FormerFuture
attr_reader :options
def initialize(cluster_file_path)
@cluster_file_path = cluster_file_path
@options = ClusterOptions.new
end
def open_database(name="DB")
FDB.open(@cluster_file_path, name)
end
end
end

View File

@ -1,9 +1,9 @@
FROM ubuntu:15.04
LABEL version=0.0.2
LABEL version=0.0.5
RUN sed -i -e 's/archive.ubuntu.com\|security.ubuntu.com/old-releases.ubuntu.com/g' -e 's/us\.old/old/g' /etc/apt/sources.list && apt-get clean
RUN apt-get update && apt-get --no-install-recommends install -y --force-yes bzip2 ca-certificates=20141019 adduser apt base-files base-passwd bash binutils build-essential cpp cpp-4.9 dpkg dos2unix fakeroot findutils g++=4:4.9.2-2ubuntu2 g++-4.9=4.9.2-10ubuntu13 gawk=1:4.1.1+dfsg-1 gcc-5-base gcc=4:4.9.2-2ubuntu2 gcc-4.9=4.9.2-10ubuntu13 gcc-4.9-base:amd64=4.9.2-10ubuntu13 gcc-5-base:amd64=5.1~rc1-0ubuntu1 gdb git golang golang-go golang-go-linux-amd64 golang-src grep gzip hostname java-common libasan1 liblsan0 libtsan0 libubsan0 libcilkrts5 libgcc-4.9-dev libstdc++-4.9-dev libgl1-mesa-dri libgl1-mesa-glx libmono-system-xml-linq4.0-cil libmono-system-data-datasetextensions4.0-cil libstdc++-4.9-pic locales login m4 make makedev mawk mono-dmcs npm openjdk-8-jdk passwd python-distlib python-gevent python-greenlet python-html5lib python-minimal python-pip python-pkg-resources python-requests python-setuptools python-six python-urllib3 python-yaml python2.7 python2.7-minimal rpm rpm2cpio ruby ruby2.1 rubygems-integration sed tar texinfo tzdata-java udev unzip util-linux valgrind vim wget golang-go.tools curl sphinx-common gnupg python-dev
RUN apt-get update && apt-get --no-install-recommends install -y --force-yes bzip2 ca-certificates=20141019 adduser apt base-files base-passwd bash binutils build-essential cpp cpp-4.9 dpkg dos2unix fakeroot findutils g++=4:4.9.2-2ubuntu2 g++-4.9=4.9.2-10ubuntu13 gawk=1:4.1.1+dfsg-1 gcc-5-base gcc=4:4.9.2-2ubuntu2 gcc-4.9=4.9.2-10ubuntu13 gcc-4.9-base:amd64=4.9.2-10ubuntu13 gcc-5-base:amd64=5.1~rc1-0ubuntu1 gdb git golang golang-go golang-go-linux-amd64 golang-src grep gzip hostname java-common libasan1 liblsan0 libtsan0 libubsan0 libcilkrts5 libgcc-4.9-dev libstdc++-4.9-dev libgl1-mesa-dri libgl1-mesa-glx libmono-system-xml-linq4.0-cil libmono-system-data-datasetextensions4.0-cil libstdc++-4.9-pic locales login m4 make makedev mawk mono-dmcs npm openjdk-8-jdk passwd python-distlib python-gevent python-greenlet python-html5lib python-minimal python-pip python-pkg-resources python-requests python-setuptools python-six python-urllib3 python-yaml python2.7 python2.7-minimal rpm rpm2cpio ruby ruby2.1 rubygems-integration sed tar texinfo tzdata-java udev unzip util-linux valgrind vim wget golang-go.tools curl sphinx-common gnupg python-dev python3 python3-dev
RUN adduser --disabled-password --gecos '' fdb && chown -R fdb /opt && chmod -R 0777 /opt
@ -31,6 +31,8 @@ RUN cd /opt/ && wget https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-2.6.4
./configure CFLAGS="-fPIC -O3" && make -j4 && make install &&\
cd /opt/ && rm -r libressl-2.6.4/ libressl-2.6.4.tar.gz libressl-2.6.4.tar.gz.asc libressl.asc
RUN cd /opt && wget https://cmake.org/files/v3.12/cmake-3.12.1-Linux-x86_64.tar.gz -qO - | tar -xz
RUN LANGUAGE=en_US.UTF-8 LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 locale-gen en_US.UTF-8
RUN dpkg-reconfigure locales
@ -43,3 +45,4 @@ ENV CC=$CC
ARG LIBRARY_PATH=/usr/local/lib
ENV LIBRARY_PATH=$LD_FLAGS
ENV PATH=$PATH:/opt/cmake-3.12.1-Linux-x86_64/bin

44
build/cmake/Dockerfile Normal file
View File

@ -0,0 +1,44 @@
FROM centos:6
LABEL version=0.0.4
RUN yum install -y yum-utils
RUN yum-config-manager --enable rhel-server-rhscl-7-rpms
RUN yum -y install centos-release-scl
RUN yum install -y devtoolset-7
# install cmake
RUN curl -L https://github.com/Kitware/CMake/releases/download/v3.13.4/cmake-3.13.4-Linux-x86_64.tar.gz > /tmp/cmake.tar.gz &&\
echo "563a39e0a7c7368f81bfa1c3aff8b590a0617cdfe51177ddc808f66cc0866c76 /tmp/cmake.tar.gz" > /tmp/cmake-sha.txt &&\
sha256sum -c /tmp/cmake-sha.txt &&\
cd /tmp && tar xf cmake.tar.gz && cp -r cmake-3.13.4-Linux-x86_64/* /usr/local/
# install boost
RUN curl -L https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.tar.bz2 > /tmp/boost.tar.bz2 &&\
cd /tmp && echo "2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba boost.tar.bz2" > boost-sha.txt &&\
sha256sum -c boost-sha.txt && tar xf boost.tar.bz2 && cp -r boost_1_67_0/boost /usr/local/include/ &&\
rm -rf boost.tar.bz2 boost_1_67_0
# install mono (for actorcompiler)
RUN yum install -y epel-release
RUN yum install -y mono-core
# install Java
RUN yum install -y java-1.8.0-openjdk-devel
# install LibreSSL
RUN curl https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-2.8.2.tar.gz > /tmp/libressl.tar.gz &&\
cd /tmp && echo "b8cb31e59f1294557bfc80f2a662969bc064e83006ceef0574e2553a1c254fd5 libressl.tar.gz" > libressl-sha.txt &&\
sha256sum -c libressl-sha.txt && tar xf libressl.tar.gz &&\
cd libressl-2.8.2 && cd /tmp/libressl-2.8.2 && scl enable devtoolset-7 -- ./configure --prefix=/usr/local/stow/libressl CFLAGS="-fPIC -O3" --prefix=/usr/local &&\
cd /tmp/libressl-2.8.2 && scl enable devtoolset-7 -- make -j`nproc` install &&\
rm -rf /tmp/libressl-2.8.2 /tmp/libressl.tar.gz
# install dependencies for bindings and documentation
# python 2.7 is required for the documentation
RUN yum install -y rh-python36-python-devel rh-ruby24 golang python27
# install packaging tools
RUN yum install -y rpm-build debbuild
CMD scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash

236
build/cmake/build.sh Normal file
View File

@ -0,0 +1,236 @@
#!/usr/bin/env bash
arguments_usage() {
cat <<EOF
usage: build.sh [-h] [commands]
-h: print this help message and
abort execution
Will execute the passed commands
in the order they were passed
EOF
}
arguments_parse() {
local __res=0
while getopts ":ho:" opt
do
case ${opt} in
h )
arguments_usage
__res=2
break
;;
\? )
echo "Unknown option ${opt}"
arguments_usage
__res=1
break
;;
esac
done
shift $((OPTIND -1))
commands=("$@")
return ${__res}
}
configure() {
local __res=0
for _ in 1
do
cmake ../src
__res=$?
if [ ${__res} -ne 0 ]
then
break
fi
done
return ${__res}
}
build_fast() {
local __res=0
for _ in 1
do
make -j`nproc`
__res=$?
if [ ${__res} -ne 0 ]
then
break
fi
done
return ${__res}
}
build() {
local __res=0
for _ in 1
do
configure
__res=$?
if [ ${__res} -ne 0 ]
then
break
fi
build_fast
__res=$?
if [ ${__res} -ne 0 ]
then
break
fi
done
return ${__res}
}
package_fast() {
local __res=0
for _ in 1
do
make -j`nproc` packages
__res=$?
if [ ${__res} -ne 0 ]
then
break
fi
done
return ${__res}
}
package() {
local __res=0
for _ in 1
do
configure
__res=$?
if [ ${__res} -ne 0 ]
then
break
fi
package_fast
__res=$?
if [ ${__res} -ne 0 ]
then
break
fi
done
return ${__res}
}
rpm() {
local __res=0
for _ in 1
do
cmake -DINSTALL_LAYOUT=RPM ../src
__res=$?
if [ ${__res} -ne 0 ]
then
break
fi
build_fast
__res=$?
if [ ${__res} -ne 0 ]
then
break
fi
fakeroot cpack
__res=$?
if [ ${__res} -ne 0 ]
then
break
fi
done
return ${__res}
}
deb() {
local __res=0
for _ in 1
do
cmake -DINSTALL_LAYOUT=DEB ../src
__res=$?
if [ ${__res} -ne 0 ]
then
break
fi
build_fast
__res=$?
if [ ${__res} -ne 0 ]
then
break
fi
fakeroot cpack
__res=$?
if [ ${__res} -ne 0 ]
then
break
fi
done
return ${__res}
}
main() {
local __res=0
for _ in 1
do
arguments_parse "$@"
__res=$?
if [ ${__res} -ne 0 ]
then
if [ ${__res} -eq 2 ]
then
# in this case there was no error
# We still want to exit the script
__res=0
fi
break
fi
echo "Num commands ${#commands[@]}"
for command in "${commands[@]}"
do
echo "Command: ${command}"
case ${command} in
configure )
configure
__res=$?
;;
build )
build
__res=$?
;;
build/fast )
build_fast
__res=$?
;;
package )
package
__res=$?
;;
package/fast )
package_fast
__res=$?
;;
rpm )
rpm
;;
deb )
deb
;;
linux-pkgs)
rpm
deb
;;
* )
echo "ERROR: Command not found ($command)"
__res=1
;;
esac
if [ ${__res} -ne 0 ]
then
break
fi
done
done
return ${__res}
}
main "$@"

View File

@ -0,0 +1,3 @@
FROM centos:6
RUN yum install -y yum-utils

View File

@ -0,0 +1,3 @@
FROM ubuntu:16.04
RUN apt-get update

View File

@ -0,0 +1,57 @@
version: "3"
services:
common: &common
image: foundationdb-build:0.0.4
build:
context: .
dockerfile: Dockerfile
build-setup: &build-setup
<<: *common
depends_on: [common]
#debuginfo builds need the build path to be longer than
#the path where debuginfo sources are places. Crazy, yes,
#see the manual for CPACK_RPM_BUILD_SOURCE_DIRS_PREFIX.
volumes:
- ../..:/foundationdb/deep/directory/as/debuginfo/doesnt/work/otherwise/src
- ${BUILDDIR}:/foundationdb/deep/directory/as/debuginfo/doesnt/work/otherwise/build
working_dir: /foundationdb/deep/directory/as/debuginfo/doesnt/work/otherwise/build
configure: &configure
<<: *build-setup
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../src/build/cmake/build.sh configure
build: &build
<<: *build-setup
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../src/build/cmake/build.sh build
build-fast: &build-fast
<<: *build-setup
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../src/build/cmake/build.sh build/fast
rpm: &rpm
<<: *build-setup
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../src/build/cmake/build.sh rpm
deb: &deb
<<: *build-setup
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../src/build/cmake/build.sh deb
linux-pkgs:
<<: *build-setup
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../src/build/cmake/build.sh linux-pkgs
package: &package
<<: *build-setup
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../src/build/cmake/build.sh package
package-fast: &package-fast
<<: *build-setup
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../src/build/cmake/build.sh package/fast
shell:
<<: *build-setup
volumes:
- ..:/foundationdb

View File

@ -2,10 +2,7 @@ version: "3"
services:
common: &common
image: foundationdb-build:0.0.2
build:
context: .
dockerfile: Dockerfile
image: foundationdb/foundationdb-build:0.0.5
build-setup: &build-setup
<<: *common
@ -13,25 +10,72 @@ services:
volumes:
- ..:/foundationdb
working_dir: /foundationdb
environment:
- MAKEJOBS=1
- BUILD_DIR=./work
release-setup: &release-setup
<<: *build-setup
environment:
- MAKEJOBS=1
- RELEASE=true
- BUILD_DIR=./work
snapshot-setup: &snapshot-setup
<<: *build-setup
build-docs:
<<: *build-setup
command: make docpackage
command: bash -c 'make -j "$${MAKEJOBS}" docpackage'
build-release: &build-release
release-packages: &release-packages
<<: *release-setup
command: bash -c 'make -j "$${MAKEJOBS}" packages'
snapshot-packages: &snapshot-packages
<<: *build-setup
environment:
- RELEASE=true
command: make packages
command: bash -c 'make -j "$${MAKEJOBS}" packages'
build-snapshot: &build-snapshot
prb-packages:
<<: *snapshot-packages
release-bindings: &release-bindings
<<: *release-setup
command: bash -c 'make -j "$${MAKEJOBS}" bindings'
snapshot-bindings: &snapshot-bindings
<<: *build-setup
environment:
- RELEASE=false
command: make packages
command: bash -c 'make -j "$${MAKEJOBS}" bindings'
prb-bindings:
<<: *snapshot-bindings
snapshot-cmake: &snapshot-cmake
<<: *build-setup
command: bash -c 'if [ -f CMakeLists.txt ]; then mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake .. && make -j "$${MAKEJOBS}"; fi'
prb-cmake:
<<: *snapshot-cmake
snapshot-ctest: &snapshot-ctest
<<: *build-setup
command: bash -c 'if [ -f CMakeLists.txt ]; then mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake .. && make -j "$${MAKEJOBS}" && ctest -L fast -j "$${MAKEJOBS}" --output-on-failure; fi'
prb-ctest:
<<: *snapshot-ctest
snapshot-correctness: &snapshot-correctness
<<: *build-setup
command: bash -c 'if [ -f CMakeLists.txt ]; then mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake .. && make -j "$${MAKEJOBS}" && ctest -j "$${MAKEJOBS}" --output-on-failure; fi'
prb-correctness:
<<: *snapshot-correctness
build-prb:
<<: *build-snapshot
shell:
<<: *build-setup

View File

@ -22,9 +22,10 @@ TARGETS += packages
CLEAN_TARGETS += packages_clean
PACKAGE_BINARIES = fdbcli fdbserver fdbbackup fdbmonitor fdbrestore fdbdr dr_agent backup_agent
PACKAGE_CONTENTS := $(addprefix bin/, $(PACKAGE_BINARIES)) $(addprefix bin/, $(addsuffix .debug, $(PACKAGE_BINARIES))) lib/libfdb_c.$(DLEXT) bindings/python/fdb/fdboptions.py bindings/c/foundationdb/fdb_c_options.g.h
PROJECT_BINARIES = $(addprefix bin/, $(PACKAGE_BINARIES))
PACKAGE_CONTENTS := $(PROJECT_BINARIES) $(addprefix bin/, $(addsuffix .debug, $(PACKAGE_BINARIES))) lib/libfdb_c.$(DLEXT) bindings/python/fdb/fdboptions.py bindings/c/foundationdb/fdb_c_options.g.h
packages: TGZ FDBSERVERAPI
packages: TGZ BINS FDBSERVERAPI
TGZ: $(PACKAGE_CONTENTS) versions.target lib/libfdb_java.$(DLEXT)
@echo "Archiving tgz"
@ -32,9 +33,17 @@ TGZ: $(PACKAGE_CONTENTS) versions.target lib/libfdb_java.$(DLEXT)
@rm -f packages/FoundationDB-$(PLATFORM)-*.tar.gz
@bash -c "tar -czf packages/FoundationDB-$(PLATFORM)-$(VERSION)-$(PKGRELEASE).tar.gz bin/{fdbmonitor{,.debug},fdbcli{,.debug},fdbserver{,.debug},fdbbackup{,.debug},fdbdr{,.debug},fdbrestore{,.debug},dr_agent{,.debug},coverage.{fdbclient,fdbserver,fdbrpc,flow}.xml} lib/libfdb_c.$(DLEXT){,-debug} lib/libfdb_java.$(DLEXT)* bindings/python/fdb/*.py bindings/c/*.h"
BINS: packages/foundationdb-binaries-$(VERSION)-$(PLATFORM).tar.gz
packages_clean:
@echo "Cleaning packages"
@rm -f packages/FoundationDB-$(PLATFORM)-*.tar.gz packages/fdb-tests-$(VERSION).tar.gz packages/fdb-headers-$(VERSION).tar.gz packages/fdb-bindings-$(VERSION).tar.gz packages/fdb-server-$(VERSION)-$(PLATFORM).tar.gz
@rm -f packages/FoundationDB-$(PLATFORM)-*.tar.gz packages/foundationdb-binaries-$(VERSION)-$(PLATFORM).tar.gz packages/fdb-tests-$(VERSION).tar.gz packages/fdb-headers-$(VERSION).tar.gz packages/fdb-bindings-$(VERSION).tar.gz packages/fdb-server-$(VERSION)-$(PLATFORM).tar.gz
packages/foundationdb-binaries-$(VERSION)-$(PLATFORM).tar.gz: $(PROJECT_BINARIES) versions.target
@echo "Packaging binaries"
@mkdir -p packages
@rm -f packages/foundationdb-binaries-$(VERSION)-$(PLATFORM).tar.gz
@bash -c "tar -czf packages/foundationdb-binaries-$(VERSION)-$(PLATFORM).tar.gz $(PROJECT_BINARIES)"
packages/fdb-server-$(VERSION)-$(PLATFORM).tar.gz: bin/fdbserver bin/fdbcli lib/libfdb_c.$(DLEXT)
@echo "Packaging fdb server api"

119
cmake/AddFdbTest.cmake Normal file
View File

@ -0,0 +1,119 @@
# This configures the fdb testing system in cmake. Currently this simply means
# that it will get a list of all test files and store this list in a parent scope
# so that we can later verify that all of them were assigned to a test.
#
# - TEST_DIRECTORY The directory where all the tests are
# - ERROR_ON_ADDITIONAL_FILES if this is passed verify_fdb_tests will print
# an error if there are any .txt files in the test directory that do not
# correspond to a test or are not ignore by a pattern
# - IGNORE_PATTERNS regular expressions. All files that match any of those
# experessions don't need to be associated with a test
function(configure_testing)
set(options ERROR_ON_ADDITIONAL_FILES)
set(oneValueArgs TEST_DIRECTORY)
set(multiValueArgs IGNORE_PATTERNS)
cmake_parse_arguments(CONFIGURE_TESTING "${options}" "${oneValueArgs}" "${multiValueArgs}" "${ARGN}")
set(no_tests YES)
if(CONFIGURE_TESTING_ERROR_ON_ADDITIONAL_FILES)
file(GLOB_RECURSE candidates "${CONFIGURE_TESTING_TEST_DIRECTORY}/*.txt")
foreach(candidate IN LISTS candidates)
set(candidate_is_test YES)
foreach(pattern IN LISTS CONFIGURE_TESTING_IGNORE_PATTERNS)
if("${candidate}" MATCHES "${pattern}")
set(candidate_is_test NO)
endif()
endforeach()
if(candidate_is_test)
if(no_tests)
set(no_tests NO)
set(fdb_test_files "${candidate}")
else()
set(fdb_test_files "${fdb_test_files};${candidate}")
endif()
endif()
endforeach()
set(fdb_test_files "${fdb_test_files}" PARENT_SCOPE)
endif()
endfunction()
function(verify_testing)
foreach(test_file IN LISTS fdb_test_files)
message(SEND_ERROR "${test_file} found but it is not associated with a test")
endforeach()
endfunction()
# This will add a test that can be run by ctest. This macro can be called
# with the following arguments:
#
# - UNIT will run the test as a unit test (it won't bring up a whole simulated system)
# - TEST_NAME followed the name of the test
# - TIMEOUT followed by a timeout - reaching the timeout makes the test fail (default is
# 3600 seconds). The timeout will be reached whenever it ran either too long in simulated
# time or in real time - whatever is smaller.
# - TEST_FILES followed by typically one test file. The test runner will run
# all these tests in serialized order and within the same directory. This is
# useful for restart tests
function(add_fdb_test)
set(options UNIT IGNORE)
set(oneValueArgs TEST_NAME TIMEOUT)
set(multiValueArgs TEST_FILES)
cmake_parse_arguments(ADD_FDB_TEST "${options}" "${oneValueArgs}" "${multiValueArgs}" "${ARGN}")
set(this_test_timeout ${ADD_FDB_TEST_TIMEOUT})
if(NOT this_test_timeout)
set(this_test_timeout 3600)
endif()
set(test_type "simulation")
set(fdb_test_files_ "${fdb_test_files}")
foreach(test_file IN LISTS ADD_FDB_TEST_TEST_FILES)
list(REMOVE_ITEM fdb_test_files_ "${CMAKE_CURRENT_SOURCE_DIR}/${test_file}")
endforeach()
set(fdb_test_files "${fdb_test_files_}" PARENT_SCOPE)
list(LENGTH ADD_FDB_TEST_TEST_FILES NUM_TEST_FILES)
if(ADD_FDB_TEST_IGNORE AND NOT RUN_IGNORED_TESTS)
return()
endif()
if(ADD_FDB_TEST_UNIT)
set(test_type "test")
endif()
list(GET ADD_FDB_TEST_TEST_FILES 0 first_file)
string(REGEX REPLACE "^(.*)\\.txt$" "\\1" test_name ${first_file})
if("${test_name}" MATCHES "(-\\d)$")
string(REGEX REPLACE "(.*)(-\\d)$" "\\1" test_name_1 ${test_name})
message(STATUS "new testname ${test_name_1}")
endif()
if (NOT "${ADD_FDB_TEST_TEST_NAME}" STREQUAL "")
set(test_name ${ADD_FDB_TEST_TEST_NAME})
endif()
if(ADD_FDB_TEST_UNIT)
message(STATUS
"ADDING UNIT TEST ${test_name}")
else()
message(STATUS
"ADDING SIMULATOR TEST ${test_name}")
endif()
set(test_files "")
foreach(curr_test_file ${ADD_FDB_TEST_TEST_FILES})
set(test_files "${test_files} ${curr_test_file}")
endforeach()
set(BUGGIFY_OPTION "")
if (ENABLE_BUGGIFY)
set(BUGGIFY_OPTION "-B")
endif()
list(TRANSFORM ADD_FDB_TEST_TEST_FILES PREPEND "${CMAKE_CURRENT_SOURCE_DIR}/")
add_test(NAME ${test_name}
COMMAND $<TARGET_FILE:Python::Interpreter> ${TestRunner}
-n ${test_name}
-b ${PROJECT_BINARY_DIR}
-t ${test_type}
-O ${OLD_FDBSERVER_BINARY}
--aggregate-traces ${TEST_AGGREGATE_TRACES}
--keep-logs ${TEST_KEEP_LOGS}
--keep-simdirs ${TEST_KEEP_SIMDIR}
--seed ${SEED}
${BUGGIFY_OPTION}
${ADD_FDB_TEST_TEST_FILES}
WORKING_DIRECTORY ${PROJECT_BINARY_DIR})
get_filename_component(test_dir_full ${first_file} DIRECTORY)
get_filename_component(test_dir ${test_dir_full} NAME)
set_tests_properties(${test_name} PROPERTIES TIMEOUT ${this_test_timeout} LABELS "${test_dir}")
endfunction()

View File

@ -1,36 +1,28 @@
find_program(MONO_EXECUTABLE mono)
find_program(MCS_EXECUTABLE dmcs)
if (NOT MCS_EXECUTABLE)
find_program(MCS_EXECUTABLE mcs)
endif()
set(MONO_FOUND FALSE CACHE INTERNAL "")
if (NOT MCS_EXECUTABLE)
find_program(MCS_EXECUTABLE mcs)
endif()
if (MONO_EXECUTABLE AND MCS_EXECUTABLE)
set(MONO_FOUND True CACHE INTERNAL "")
endif()
if (NOT MONO_FOUND)
message(FATAL_ERROR "Could not find mono")
endif()
set(ACTORCOMPILER_SRCS
${CMAKE_CURRENT_SOURCE_DIR}/flow/actorcompiler/ActorCompiler.cs
${CMAKE_CURRENT_SOURCE_DIR}/flow/actorcompiler/ActorParser.cs
${CMAKE_CURRENT_SOURCE_DIR}/flow/actorcompiler/ParseTree.cs
${CMAKE_CURRENT_SOURCE_DIR}/flow/actorcompiler/Program.cs
${CMAKE_CURRENT_SOURCE_DIR}/flow/actorcompiler/Properties/AssemblyInfo.cs)
set(ACTOR_COMPILER_REFERENCES
"-r:System,System.Core,System.Xml.Linq,System.Data.DataSetExtensions,Microsoft.CSharp,System.Data,System.Xml")
if(WIN32)
add_executable(actorcompiler ${ACTORCOMPILER_SRCS})
target_compile_options(actorcompiler PRIVATE "/langversion:6")
set_property(TARGET actorcompiler PROPERTY VS_DOTNET_REFERENCES
"System"
"System.Core"
"System.Xml.Linq"
"System.Data.DataSetExtensions"
"Microsoft.CSharp"
"System.Data"
"System.Xml")
else()
set(ACTOR_COMPILER_REFERENCES
"-r:System,System.Core,System.Xml.Linq,System.Data.DataSetExtensions,Microsoft.CSharp,System.Data,System.Xml")
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/actorcompiler.exe
COMMAND ${MCS_EXECUTABLE} ARGS ${ACTOR_COMPILER_REFERENCES} ${ACTORCOMPILER_SRCS} "-target:exe" "-out:actorcompiler.exe"
DEPENDS ${ACTORCOMPILER_SRCS}
COMMENT "Compile actor compiler" VERBATIM)
add_custom_target(actorcompiler DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/actorcompiler.exe)
set(actor_exe "${CMAKE_CURRENT_BINARY_DIR}/actorcompiler.exe")
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/actorcompiler.exe
COMMAND ${MCS_EXECUTABLE} ARGS ${ACTOR_COMPILER_REFERENCES} ${ACTORCOMPILER_SRCS} "-target:exe" "-out:actorcompiler.exe"
DEPENDS ${ACTORCOMPILER_SRCS}
COMMENT "Compile actor compiler" VERBATIM)
add_custom_target(actorcompiler DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/actorcompiler.exe)
set(actor_exe "${CMAKE_CURRENT_BINARY_DIR}/actorcompiler.exe")
endif()

View File

@ -0,0 +1,25 @@
set(COVERAGETOOL_SRCS
${CMAKE_CURRENT_SOURCE_DIR}/flow/coveragetool/Program.cs
${CMAKE_CURRENT_SOURCE_DIR}/flow/coveragetool/Properties/AssemblyInfo.cs)
if(WIN32)
add_executable(coveragetool ${COVERAGETOOL_SRCS})
target_compile_options(coveragetool PRIVATE "/langversion:6")
set_property(TARGET coveragetool PROPERTY VS_DOTNET_REFERENCES
"System"
"ystem.Core"
"System.Xml.Linq"
"ystem.Data.DataSetExtensions"
"Microsoft.CSharp"
"ystem.Data"
"System.Xml")
else()
set(COVERAGETOOL_COMPILER_REFERENCES
"-r:System,System.Core,System.Xml.Linq,System.Data.DataSetExtensions,Microsoft.CSharp,System.Data,System.Xml")
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/coveragetool.exe
COMMAND ${MCS_EXECUTABLE} ARGS ${COVERAGETOOL_COMPILER_REFERENCES} ${COVERAGETOOL_SRCS} "-target:exe" "-out:coveragetool.exe"
DEPENDS ${COVERAGETOOL_SRCS}
COMMENT "Compile coveragetool" VERBATIM)
add_custom_target(coveragetool DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/coveragetool.exe)
set(coveragetool_exe "${CMAKE_CURRENT_BINARY_DIR}/coveragetool.exe")
endif()

View File

@ -6,20 +6,49 @@ set(VEXILLOGRAPHER_SRCS
${CMAKE_CURRENT_SOURCE_DIR}/fdbclient/vexillographer/ruby.cs
${CMAKE_CURRENT_SOURCE_DIR}/fdbclient/vexillographer/vexillographer.cs)
set(VEXILLOGRAPHER_REFERENCES "-r:System,System.Core,System.Data,System.Xml,System.Xml.Linq")
set(VEXILLOGRAPHER_EXE "${CMAKE_CURRENT_BINARY_DIR}/vexillographer.exe")
add_custom_command(OUTPUT ${VEXILLOGRAPHER_EXE}
COMMAND ${MCS_EXECUTABLE} ARGS ${VEXILLOGRAPHER_REFERENCES} ${VEXILLOGRAPHER_SRCS} -target:exe -out:${VEXILLOGRAPHER_EXE}
DEPENDS ${VEXILLOGRAPHER_SRCS}
COMMENT "Compile Vexillographer")
add_custom_target(vexillographer DEPENDS ${VEXILLOGRAPHER_EXE})
if(WIN32)
add_executable(vexillographer ${VEXILLOGRAPHER_SRCS})
target_compile_options(vexillographer PRIVATE "/langversion:6")
set_property(TARGET vexillographer PROPERTY VS_DOTNET_REFERENCES
"System"
"System.Core"
"System.Data"
"System.Xml"
"System.Xml.Linq")
else()
set(VEXILLOGRAPHER_REFERENCES "-r:System,System.Core,System.Data,System.Xml,System.Xml.Linq")
set(VEXILLOGRAPHER_EXE "${CMAKE_CURRENT_BINARY_DIR}/vexillographer.exe")
add_custom_command(OUTPUT ${VEXILLOGRAPHER_EXE}
COMMAND ${MCS_EXECUTABLE} ARGS ${VEXILLOGRAPHER_REFERENCES} ${VEXILLOGRAPHER_SRCS} -target:exe -out:${VEXILLOGRAPHER_EXE}
DEPENDS ${VEXILLOGRAPHER_SRCS}
COMMENT "Compile Vexillographer")
add_custom_target(vexillographer DEPENDS ${VEXILLOGRAPHER_EXE})
endif()
set(ERROR_GEN_SRCS
${CMAKE_CURRENT_SOURCE_DIR}/flow/error_gen.cs)
set(ERROR_GEN_REFERENCES "-r:System,System.Core,System.Data,System.Xml,System.Xml.Linq")
set(ERROR_GEN_EXE "${CMAKE_CURRENT_BINARY_DIR}/error_gen.exe")
add_custom_command (OUTPUT ${ERROR_GEN_EXE}
COMMAND ${MCS_EXECUTABLE} ARGS ${ERROR_GEN_REFERENCES} ${ERROR_GEN_SRCS} -target:exe -out:${ERROR_GEN_EXE}
DEPENDS ${ERROR_GEN_SRCS}
COMMENT "Compile error_gen")
add_custom_target(error_gen DEPENDS ${ERROR_GEN_EXE})
function(vexillographer_compile)
set(CX_OPTIONS ALL)
set(CX_ONE_VALUE_ARGS TARGET LANG OUT)
set(CX_MULTI_VALUE_ARGS OUTPUT)
cmake_parse_arguments(VX "${CX_OPTIONS}" "${CX_ONE_VALUE_ARGS}" "${CX_MULTI_VALUE_ARGS}" "${ARGN}")
if(NOT VX_OUTPUT)
set(VX_OUTPUT ${VX_OUT})
endif()
if(WIN32)
add_custom_command(
OUTPUT ${VX_OUTPUT}
COMMAND $<TARGET_FILE:vexillographer> ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options ${VX_LANG} ${VX_OUT}
DEPENDS ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options vexillographer
COMMENT "Generate FDBOptions ${VX_LANG} files")
else()
add_custom_command(
OUTPUT ${VX_OUTPUT}
COMMAND ${MONO_EXECUTABLE} ${VEXILLOGRAPHER_EXE} ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options ${VX_LANG} ${VX_OUT}
DEPENDS ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options vexillographer
COMMENT "Generate FDBOptions ${VX_LANG} files")
endif()
if(VX_ALL)
add_custom_target(${VX_TARGET} ALL DEPENDS ${VX_OUTPUT})
else()
add_custom_target(${VX_TARGET} DEPENDS ${VX_OUTPUT})
endif()
endfunction()

View File

@ -4,9 +4,10 @@ set(USE_VALGRIND OFF CACHE BOOL "Compile for valgrind usage")
set(USE_GOLD_LINKER OFF CACHE BOOL "Use gold linker")
set(ALLOC_INSTRUMENTATION OFF CACHE BOOL "Instrument alloc")
set(WITH_UNDODB OFF CACHE BOOL "Use rr or undodb")
set(OPEN_FOR_IDE OFF CACHE BOOL "Open this in an IDE (won't compile/link)")
set(FDB_RELEASE OFF CACHE BOOL "This is a building of a final release")
add_compile_options(-DCMAKE_BUILD)
find_package(Threads REQUIRED)
if(ALLOC_INSTRUMENTATION)
add_compile_options(-DALLOC_INSTRUMENTATION)
@ -31,8 +32,11 @@ include_directories(${CMAKE_CURRENT_BINARY_DIR})
if (NOT OPEN_FOR_IDE)
add_definitions(-DNO_INTELLISENSE)
endif()
add_definitions(-DUSE_UCONTEXT)
enable_language(ASM)
if(WIN32)
add_definitions(-DUSE_USEFIBERS)
else()
add_definitions(-DUSE_UCONTEXT)
endif()
include(CheckFunctionExists)
set(CMAKE_REQUIRED_INCLUDES stdlib.h malloc.h)
@ -40,7 +44,7 @@ set(CMAKE_REQUIRED_LIBRARIES c)
if(WIN32)
add_compile_options(/W3 /EHsc)
add_compile_options(/W3 /EHsc /std:c++14 /bigobj)
else()
if(USE_GOLD_LINKER)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=gold -Wl,--disable-new-dtags")

27
cmake/EnableCsharp.cmake Normal file
View File

@ -0,0 +1,27 @@
if(WIN32)
# C# is currently only supported on Windows.
# On other platforms we find mono manually
enable_language(CSharp)
else()
# for other platforms we currently use mono
find_program(MONO_EXECUTABLE mono)
find_program(MCS_EXECUTABLE dmcs)
if (NOT MCS_EXECUTABLE)
find_program(MCS_EXECUTABLE mcs)
endif()
set(MONO_FOUND FALSE CACHE INTERNAL "")
if (NOT MCS_EXECUTABLE)
find_program(MCS_EXECUTABLE mcs)
endif()
if (MONO_EXECUTABLE AND MCS_EXECUTABLE)
set(MONO_FOUND True CACHE INTERNAL "")
endif()
if (NOT MONO_FOUND)
message(FATAL_ERROR "Could not find mono")
endif()
endif()

105
cmake/FDBComponents.cmake Normal file
View File

@ -0,0 +1,105 @@
set(FORCE_ALL_COMPONENTS OFF CACHE BOOL "Fails cmake if not all dependencies are found")
################################################################################
# LibreSSL
################################################################################
set(DISABLE_TLS OFF CACHE BOOL "Don't try to find LibreSSL and always build without TLS support")
if(DISABLE_TLS)
set(WITH_TLS OFF)
else()
set(LIBRESSL_USE_STATIC_LIBS TRUE)
find_package(LibreSSL)
if(LibreSSL_FOUND)
set(WITH_TLS ON)
add_compile_options(-DHAVE_OPENSSL)
else()
message(STATUS "LibreSSL NOT Found - Will compile without TLS Support")
message(STATUS "You can set LibreSSL_ROOT to the LibreSSL install directory to help cmake find it")
set(WITH_TLS OFF)
endif()
endif()
################################################################################
# Java Bindings
################################################################################
set(WITH_JAVA OFF)
find_package(JNI 1.8 REQUIRED)
find_package(Java 1.8 COMPONENTS Development)
if(JNI_FOUND AND Java_FOUND AND Java_Development_FOUND)
set(WITH_JAVA ON)
include(UseJava)
enable_language(Java)
else()
set(WITH_JAVA OFF)
endif()
################################################################################
# Python Bindings
################################################################################
find_package(Python COMPONENTS Interpreter)
if(Python_Interpreter_FOUND)
set(WITH_PYTHON ON)
else()
message(FATAL_ERROR "Could not found a suitable python interpreter")
set(WITH_PYTHON OFF)
endif()
################################################################################
# Pip
################################################################################
find_package(Virtualenv)
if (Virtualenv_FOUND)
set(WITH_DOCUMENTATION ON)
else()
set(WITH_DOCUMENTATION OFF)
endif()
################################################################################
# GO
################################################################################
find_program(GO_EXECUTABLE go)
# building the go binaries is currently not supported on Windows
if(GO_EXECUTABLE AND NOT WIN32)
set(WITH_GO ON)
else()
set(WITH_GO OFF)
endif()
################################################################################
# Ruby
################################################################################
find_program(GEM_EXECUTABLE gem)
set(WITH_RUBY OFF)
if(GEM_EXECUTABLE)
set(GEM_COMMAND ${RUBY_EXECUTABLE} ${GEM_EXECUTABLE})
set(WITH_RUBY ON)
endif()
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/packages)
add_custom_target(packages)
function(print_components)
message(STATUS "=========================================")
message(STATUS " Components Build Overview ")
message(STATUS "=========================================")
message(STATUS "Build Java Bindings: ${WITH_JAVA}")
message(STATUS "Build with TLS support: ${WITH_TLS}")
message(STATUS "Build Go bindings: ${WITH_GO}")
message(STATUS "Build Ruby bindings: ${WITH_RUBY}")
message(STATUS "Build Python sdist (make package): ${WITH_PYTHON}")
message(STATUS "Build Documentation (make html): ${WITH_DOCUMENTATION}")
message(STATUS "=========================================")
endfunction()
if(FORCE_ALL_COMPONENTS)
if(NOT WITH_JAVA OR NOT WITH_TLS OR NOT WITH_GO OR NOT WITH_RUBY OR NOT WITH_PYTHON OR NOT WITH_DOCUMENTATION)
print_components()
message(FATAL_ERROR "FORCE_ALL_COMPONENTS is set but not all dependencies could be found")
endif()
endif()

68
cmake/FindLibreSSL.cmake Normal file
View File

@ -0,0 +1,68 @@
# FindLibreSSL
# Support preference of static libs by adjusting CMAKE_FIND_LIBRARY_SUFFIXES
if(LIBRESSL_USE_STATIC_LIBS)
set(_libressl_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
if(WIN32)
set(CMAKE_FIND_LIBRARY_SUFFIXES .lib .a ${CMAKE_FIND_LIBRARY_SUFFIXES})
else()
set(CMAKE_FIND_LIBRARY_SUFFIXES .a )
endif()
endif()
set(_LibreSSL_HINTS "")
if(WIN32)
set(_LibreSSL_HINTS "C:\\Program Files\\LibreSSL")
endif()
find_path(LIBRESSL_INCLUDE_DIR
NAMES
tls.h
PATH_SUFFIXES
include
HINTS
"${_LibreSSL_HINTS}"
)
find_library(LIBRESSL_CRYPTO_LIBRARY
NAMES crypto
NAMES_PER_DIR ${_LIBRESSL_HINTS_AND_PATHS}
PATH_SUFFIXES lib
HINTS "${_LibreSSL_HINTS}")
find_library(LIBRESSL_SSL_LIBRARY
NAMES ssl
PATH_SUFFIXES lib
HINTS "${_LibreSSL_HINTS}")
find_library(LIBRESSL_TLS_LIBRARY
NAMES tls
PATH_SUFFIXES lib
HINTS "${_LibreSSL_HINTS}")
mark_as_advanced(LIBRESSL_CRYPTO_LIBRARY LIBRESSL_SSL_LIBRARY LIBRESSL_TLS_LIBRARY)
find_package_handle_standard_args(LibreSSL
REQUIRED_VARS
LIBRESSL_CRYPTO_LIBRARY
LIBRESSL_SSL_LIBRARY
LIBRESSL_TLS_LIBRARY
LIBRESSL_INCLUDE_DIR
FAIL_MESSAGE
"Could NOT find LibreSSL, try to set the path to LibreSSL root folder in the system variable LibreSSL_ROOT"
)
if(LIBRESSL_FOUND)
add_library(LibreSSL INTERFACE)
target_include_directories(LibreSSL INTERFACE "${LIBRESSL_INCLUDE_DIR}")
# in theory we could make those components. However there are good reasons not to do that:
# 1. FDB links against all of them anyways
# 2. The order in which we link them is important and the dependency graph would become kind of complex...
# So if this module should ever be reused to allow to only link against some of the libraries, this
# should probably be rewritten
target_link_libraries(LibreSSL INTERFACE "${LIBRESSL_TLS_LIBRARY}" "${LIBRESSL_SSL_LIBRARY}" "${LIBRESSL_CRYPTO_LIBRARY}")
endif()
if(LIBRESSL_USE_STATIC_LIBS)
set(CMAKE_FIND_LIBRARY_SUFFIXES ${_libressl_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES})
endif()

7
cmake/FindSphinx.cmake Normal file
View File

@ -0,0 +1,7 @@
find_program(SPHINXBUILD
sphinx-build
DOC "Sphinx-build tool")
find_package_handle_standard_args(Sphinx
FOUND_VAR SPHINX_FOUND
REQUIRED_VARS SPHINXBUILD)

View File

@ -0,0 +1,20 @@
find_program(_VIRTUALENV_EXE virtualenv)
# get version and test that program actually works
if(_VIRTUALENV_EXE)
execute_process(
COMMAND ${_VIRTUALENV_EXE} --version
RESULT_VARIABLE ret_code
OUTPUT_VARIABLE version_string
ERROR_VARIABLE error_output
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(ret_code EQUAL 0 AND NOT ERROR_VARIABLE)
# we found a working virtualenv
set(VIRTUALENV_EXE ${_VIRTUALENV_EXE})
set(VIRTUALENV_VERSION version_string)
endif()
endif()
find_package_handle_standard_args(Virtualenv
REQUIRED_VARS VIRTUALENV_EXE
VERSION_VAR ${VIRTUALENV_VERSION})

18
cmake/FindWIX.cmake Normal file
View File

@ -0,0 +1,18 @@
# Find WIX
set(WIX_INSTALL_DIR $ENV{WIX})
find_program(WIX_CANDLE
candle
HINTS ${WIX_INSTALL_DIR}/bin)
find_program(WIX_LIGHT
light
HINTS ${WIX_INSTALL_DIR}/bin)
find_package_handle_standard_args(WIX
REQUIRED_VARS
WIX_CANDLE
WIX_LIGHT
FAIL_MESSAGE
"Could not find WIX installation - try setting WIX_ROOT or the WIX environment variable")

View File

@ -1,46 +1,159 @@
macro(actor_set varname srcs)
set(${varname})
foreach(src ${srcs})
set(tmp "${src}")
if(${src} MATCHES ".*\\.h")
continue()
elseif(${src} MATCHES ".*\\.actor\\.cpp")
string(REPLACE ".actor.cpp" ".actor.g.cpp" tmp ${src})
set(tmp "${CMAKE_CURRENT_BINARY_DIR}/${tmp}")
endif()
set(${varname} "${${varname}};${tmp}")
endforeach()
endmacro()
define_property(TARGET PROPERTY SOURCE_FILES
BRIEF_DOCS "Source files a flow target is built off"
FULL_DOCS "When compiling a flow target, this property contains a list of the non-generated source files. \
This property is set by the add_flow_target function")
set(ACTOR_TARGET_COUNTER "0")
macro(actor_compile target srcs)
set(options DISABLE_ACTOR_WITHOUT_WAIT)
set(oneValueArg)
set(multiValueArgs)
cmake_parse_arguments(ACTOR_COMPILE "${options}" "${oneValueArgs}" "${multiValueArgs}" "${ARGN}")
set(_tmp_out "")
foreach(src ${srcs})
set(tmp "")
if(${src} MATCHES ".*\\.actor\\.h")
string(REPLACE ".actor.h" ".actor.g.h" tmp ${src})
elseif(${src} MATCHES ".*\\.actor\\.cpp")
string(REPLACE ".actor.cpp" ".actor.g.cpp" tmp ${src})
endif()
set(actor_compiler_flags "")
if(ACTOR_COMPILE_DISABLE_ACTOR_WITHOUT_WAIT)
set(actor_compiler_flags "--disable-actor-without-wait-error")
endif()
if(tmp)
add_custom_command(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${tmp}"
COMMAND ${MONO_EXECUTABLE} ${actor_exe} "${CMAKE_CURRENT_SOURCE_DIR}/${src}" "${CMAKE_CURRENT_BINARY_DIR}/${tmp}" ${actor_compiler_flags} > /dev/null
DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/${src}" actorcompiler ${actor_exe}
COMMENT "Compile actor: ${src}")
set(_tmp_out "${_tmp_out};${CMAKE_CURRENT_BINARY_DIR}/${tmp}")
define_property(TARGET PROPERTY COVERAGE_FILTERS
BRIEF_DOCS "List of filters for the coverage tool"
FULL_DOCS "Holds a list of regular expressions. All filenames matching any regular \
expression in this list will be ignored when the coverage.target.xml file is \
generated. This property is set through the add_flow_target function.")
function(generate_coverage_xml)
if(NOT (${ARGC} EQUAL "1"))
message(FATAL_ERROR "generate_coverage_xml expects one argument")
endif()
set(target_name ${ARGV0})
get_target_property(sources ${target_name} SOURCE_FILES)
get_target_property(filters ${target_name} COVERAGE_FILTER_OUT)
foreach(src IN LISTS sources)
set(include TRUE)
foreach(f IN LISTS filters)
if("${f}" MATCHES "${src}")
set(include FALSE)
endif()
endforeach()
if(include)
list(APPEND in_files ${src})
endif()
endforeach()
MATH(EXPR ACTOR_TARGET_COUNTER "${ACTOR_TARGET_COUNTER}+1")
add_custom_target(${target}_actors_${ACTOR_TARGET_COUNTER} DEPENDS ${_tmp_out})
add_dependencies(${target} ${target}_actors_${ACTOR_TARGET_COUNTER})
target_include_directories(${target} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
target_include_directories(${target} PUBLIC ${CMAKE_CURRENT_BINARY_DIR})
endmacro()
set(target_file ${CMAKE_CURRENT_SOURCE_DIR}/coverage_target_${target_name})
# we can't get the targets output dir through a generator expression as this would
# create a cyclic dependency.
# Instead we follow the following rules:
# - For executable we place the coverage file into the directory EXECUTABLE_OUTPUT_PATH
# - For static libraries we place it into the directory LIBRARY_OUTPUT_PATH
# - For dynamic libraries we place it into LIBRARY_OUTPUT_PATH on Linux and MACOS
# and to EXECUTABLE_OUTPUT_PATH on Windows
get_target_property(type ${target_name} TYPE)
# STATIC_LIBRARY, MODULE_LIBRARY, SHARED_LIBRARY, OBJECT_LIBRARY, INTERFACE_LIBRARY, EXECUTABLE
if(type STREQUAL "STATIC_LIBRARY")
set(target_file ${LIBRARY_OUTPUT_PATH}/coverage.${target_name}.xml)
elseif(type STREQUAL "SHARED_LIBRARY")
if(WIN32)
set(target_file ${EXECUTABLE_OUTPUT_PATH}/coverage.${target_name}.xml)
else()
set(target_file ${LIBRARY_OUTPUT_PATH}/coverage.${target_name}.xml)
endif()
elseif(type STREQUAL "EXECUTABLE")
set(target_file ${EXECUTABLE_OUTPUT_PATH}/coverage.${target_name}.xml)
endif()
if(WIN32)
add_custom_command(
OUTPUT ${target_file}
COMMAND $<TARGET_FILE:coveragetool> ${target_file} ${in_files}
DEPENDS ${in_files}
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMENT "Generate coverage xml")
else()
add_custom_command(
OUTPUT ${target_file}
COMMAND ${MONO_EXECUTABLE} ${coveragetool_exe} ${target_file} ${in_files}
DEPENDS ${in_files}
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMENT "Generate coverage xml")
endif()
add_custom_target(coverage_${target_name} DEPENDS ${target_file})
add_dependencies(coverage_${target_name} coveragetool)
add_dependencies(${target_name} coverage_${target_name})
endfunction()
function(add_flow_target)
set(options EXECUTABLE STATIC_LIBRARY
DYNAMIC_LIBRARY)
set(oneValueArgs NAME)
set(multiValueArgs SRCS COVERAGE_FILTER_OUT DISABLE_ACTOR_WITHOUT_WAIT_WARNING)
cmake_parse_arguments(AFT "${options}" "${oneValueArgs}" "${multiValueArgs}" "${ARGN}")
if(NOT AFT_NAME)
message(FATAL_ERROR "add_flow_target requires option NAME")
endif()
if(NOT AFT_SRCS)
message(FATAL_ERROR "No sources provided")
endif()
if(OPEN_FOR_IDE)
set(sources ${AFT_SRCS} ${AFT_DISABLE_ACTOR_WRITHOUT_WAIT_WARNING})
if(AFT_EXECUTABLE)
set(target_type exec)
add_executable(${AFT_NAME} ${sources})
endif()
if(AFT_STATIC_LIBRARY)
if(target_type)
message(FATAL_ERROR "add_flow_target can only be of one type")
endif()
add_library(${AFT_NAME} STATIC ${sources})
endif()
if(AFT_DYNAMIC_LIBRARY)
if(target_type)
message(FATAL_ERROR "add_flow_target can only be of one type")
endif()
add_library(${AFT_NAME} DYNAMIC ${sources})
endif()
else()
foreach(src IN LISTS AFT_SRCS AFT_DISABLE_ACTOR_WITHOUT_WAIT_WARNING)
if(${src} MATCHES ".*\\.actor\\.(h|cpp)")
list(APPEND actors ${src})
if(${src} MATCHES ".*\\.h")
string(REPLACE ".actor.h" ".actor.g.h" generated ${src})
else()
string(REPLACE ".actor.cpp" ".actor.g.cpp" generated ${src})
endif()
set(actor_compiler_flags "")
foreach(s IN LISTS AFT_DISABLE_ACTOR_WITHOUT_WAIT_WARNING)
if("${s}" STREQUAL "${src}")
set(actor_compiler_flags "--disable-actor-without-wait-warning")
break()
endif()
endforeach()
list(APPEND sources ${generated})
list(APPEND generated_files ${CMAKE_CURRENT_BINARY_DIR}/${generated})
if(WIN32)
add_custom_command(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${generated}"
COMMAND $<TARGET_FILE:actorcompiler> "${CMAKE_CURRENT_SOURCE_DIR}/${src}" "${CMAKE_CURRENT_BINARY_DIR}/${generated}" ${actor_compiler_flags} ${actor_compiler_flags}
DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/${src}" actorcompiler
COMMENT "Compile actor: ${src}")
else()
add_custom_command(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${generated}"
COMMAND ${MONO_EXECUTABLE} ${actor_exe} "${CMAKE_CURRENT_SOURCE_DIR}/${src}" "${CMAKE_CURRENT_BINARY_DIR}/${generated}" ${actor_compiler_flags} ${actor_compiler_flags} > /dev/null
DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/${src}" actorcompiler
COMMENT "Compile actor: ${src}")
endif()
else()
list(APPEND sources ${src})
endif()
endforeach()
if(AFT_EXECUTABLE)
set(target_type exec)
add_executable(${AFT_NAME} ${sources})
endif()
if(AFT_STATIC_LIBRARY)
if(target_type)
message(FATAL_ERROR "add_flow_target can only be of one type")
endif()
add_library(${AFT_NAME} STATIC ${sources})
endif()
if(AFT_DYNAMIC_LIBRARY)
if(target_type)
message(FATAL_ERROR "add_flow_target can only be of one type")
endif()
add_library(${AFT_NAME} DYNAMIC ${sources})
endif()
set_property(TARGET ${AFT_NAME} PROPERTY SOURCE_FILES ${AFT_SRCS})
set_property(TARGET ${AFT_NAME} PROPERTY COVERAGE_FILTERS ${AFT_SRCS})
add_custom_target(${AFT_NAME}_actors DEPENDS ${generated_files})
add_dependencies(${AFT_NAME} ${AFT_NAME}_actors)
generate_coverage_xml(${AFT_NAME})
endif()
target_include_directories(${AFT_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
endfunction()

View File

@ -1,5 +1,27 @@
################################################################################
# Helper Functions
################################################################################
function(install_symlink)
set(options "")
set(one_value_options COMPONENT TO DESTINATION)
set(multi_value_options)
cmake_parse_arguments(SYM "${options}" "${one_value_options}" "${multi_value_options}" "${ARGN}")
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/symlinks)
get_filename_component(fname ${SYM_DESTINATION} NAME)
get_filename_component(dest_dir ${SYM_DESTINATION} DIRECTORY)
set(sl ${CMAKE_CURRENT_BINARY_DIR}/symlinks/${fname})
execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink ${SYM_TO} ${sl})
install(FILES ${sl} DESTINATION ${dest_dir} COMPONENT ${SYM_COMPONENT})
endfunction()
if(NOT INSTALL_LAYOUT)
set(DEFAULT_INSTALL_LAYOUT "STANDALONE")
if(WIN32)
set(DEFAULT_INSTALL_LAYOUT "WIN")
else()
set(DEFAULT_INSTALL_LAYOUT "STANDALONE")
endif()
endif()
set(INSTALL_LAYOUT "${DEFAULT_INSTALL_LAYOUT}"
CACHE STRING "Installation directory layout. Options are: TARGZ (as in tar.gz installer), WIN, STANDALONE, RPM, DEB, OSX")
@ -9,57 +31,77 @@ if(DIR_LAYOUT MATCHES "TARGZ")
set(DIR_LAYOUT "STANDALONE")
endif()
if(UNIX)
get_property(LIB64 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB64_PATHS)
set(FDB_CONFIG_DIR "etc/foundationdb")
if("${LIB64}" STREQUAL "TRUE")
set(LIBSUFFIX 64)
get_property(LIB64 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB64_PATHS)
set(CPACK_PACKAGE_CHECKSUM SHA256)
set(FDB_CONFIG_DIR "etc/foundationdb")
if("${LIB64}" STREQUAL "TRUE")
set(LIBSUFFIX 64)
else()
set(LIBSUFFIX "")
endif()
set(FDB_LIB_NOSUFFIX "lib")
if(DIR_LAYOUT MATCHES "STANDALONE")
set(FDB_LIB_DIR "lib${LIBSUFFIX}")
set(FDB_LIBEXEC_DIR "${FDB_LIB_DIR}")
set(FDB_BIN_DIR "bin")
set(FDB_SBIN_DIR "sbin")
set(FDB_INCLUDE_INSTALL_DIR "include")
set(FDB_PYTHON_INSTALL_DIR "${FDB_LIB_DIR}/python2.7/site-packages/fdb")
set(FDB_SHARE_DIR "share")
elseif(DIR_LAYOUT MATCHES "WIN")
set(CPACK_GENERATOR "ZIP")
set(FDB_CONFIG_DIR "etc")
set(FDB_LIB_DIR "lib")
set(FDB_LIB_NOSUFFIX "lib")
set(FDB_LIBEXEC_DIR "bin")
set(FDB_SHARE_DIR "share")
set(FDB_BIN_DIR "bin")
set(FDB_SBIN_DIR "bin")
set(FDB_INCLUDE_INSTALL_DIR "include")
set(FDB_PYTHON_INSTALL_DIR "${FDB_LIB_DIR}/python2.7/site-packages/fdb")
set(FDB_SHARE_DIR "share")
elseif(DIR_LAYOUT MATCHES "OSX")
set(CPACK_GENERATOR productbuild)
set(CPACK_PACKAGING_INSTALL_PREFIX "/")
set(FDB_CONFIG_DIR "usr/local/etc/foundationdb")
set(FDB_LIB_DIR "usr/local/lib")
set(FDB_LIB_NOSUFFIX "usr/local/lib")
set(FDB_LIBEXEC_DIR "usr/local/libexec")
set(FDB_BIN_DIR "usr/local/bin")
set(FDB_SBIN_DIR "usr/local/libexec")
set(FDB_INCLUDE_INSTALL_DIR "usr/local/include")
set(FDB_PYTHON_INSTALL_DIR "Library/Python/2.7/site-packages/fdb")
set(FDB_SHARE_DIR "usr/local/share")
else()
if(DIR_LAYOUT MATCHES "RPM")
set(CPACK_GENERATOR RPM)
else()
# DEB
set(CPACK_GENERATOR "DEB")
set(LIBSUFFIX "")
endif()
set(FDB_LIB_NOSUFFIX "lib")
if(DIR_LAYOUT MATCHES "STANDALONE")
set(FDB_LIB_DIR "lib${LIBSUFFIX}")
set(FDB_LIBEXEC_DIR "${FDB_LIB_DIR}")
set(FDB_BIN_DIR "bin")
set(FDB_SBIN_DIR "sbin")
set(FDB_INCLUDE_INSTALL_DIR "include")
set(FDB_PYTHON_INSTALL_DIR "${FDB_LIB_DIR}/python2.7/site-packages/fdb")
set(FDB_SHARE_DIR "share")
elseif(DIR_LAYOUT MATCHES "OSX")
set(CPACK_GENERATOR productbuild)
set(CPACK_PACKAGING_INSTALL_PREFIX "/")
set(FDB_LIB_DIR "usr/local/lib")
set(FDB_LIB_NOSUFFIX "usr/lib")
set(FDB_LIBEXEC_DIR "usr/local/libexec")
set(FDB_BIN_DIR "usr/local/bin")
set(FDB_SBIN_DIR "usr/local/sbin")
set(FDB_INCLUDE_INSTALL_DIR "usr/local/include")
set(FDB_PYTHON_INSTALL_DIR "Library/Python/2.7/site-packages/fdb")
set(FDB_SHARE_DIR "usr/local/share")
elseif(DIR_LAYOUT MATCHES "WIN")
# TODO
else()
# for deb and rpm
if(INSTALL_LAYOUT MATCHES "RPM")
set(CPACK_GENERATOR "RPM")
else()
# DEB
set(CPACK_GENERATOR "DEB")
endif()
set(CMAKE_INSTALL_PREFIX "/")
set(CPACK_PACKAGING_INSTALL_PREFIX "/")
set(FDB_LIB_DIR "usr/lib${LIBSUFFIX}")
set(FDB_LIB_NOSUFFIX "usr/lib")
set(FDB_LIBEXEC_DIR "${FDB_LIB_DIR}")
set(FDB_BIN_DIR "usr/bin")
set(FDB_SBIN_DIR "usr/sbin")
set(FDB_INCLUDE_INSTALL_DIR "usr/include")
set(FDB_PYTHON_INSTALL_DIR "${FDB_LIB_DIR}/python2.7/site-packages/fdb")
set(FDB_SHARE_DIR "usr/share")
endif()
set(CMAKE_INSTALL_PREFIX "/")
set(CPACK_PACKAGING_INSTALL_PREFIX "/")
set(FDB_CONFIG_DIR "etc/foundationdb")
set(FDB_LIB_DIR "usr/lib${LIBSUFFIX}")
set(FDB_LIB_NOSUFFIX "usr/lib")
set(FDB_LIBEXEC_DIR ${FDB_LIB_DIR})
set(FDB_BIN_DIR "usr/bin")
set(FDB_SBIN_DIR "usr/sbin")
set(FDB_INCLUDE_INSTALL_DIR "usr/include")
set(FDB_PYTHON_INSTALL_DIR "${FDB_LIB_DIR}/python2.7/site-packages/fdb")
set(FDB_SHARE_DIR "usr/share")
endif()
if(INSTALL_LAYOUT MATCHES "OSX")
set(FDBMONITOR_INSTALL_LOCATION "${FDB_LIBEXEC_DIR}")
else()
set(FDBMONITOR_INSTALL_LOCATION "${FDB_LIB_NOSUFFIX}/foundationdb")
endif()
################################################################################
# Version information
################################################################################
@ -86,16 +128,25 @@ set(CPACK_PACKAGE_ICON ${CMAKE_SOURCE_DIR}/packaging/foundationdb.ico)
set(CPACK_PACKAGE_CONTACT "The FoundationDB Community")
set(CPACK_COMPONENT_server_DEPENDS clients)
if (INSTALL_LAYOUT MATCHES "OSX")
# MacOS needs a file exiension for the LICENSE file
set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/packaging/osx/resources/conclusion.rtf)
set(CPACK_PRODUCTBUILD_RESOURCES_DIR ${CMAKE_SOURCE_DIR}/packaging/osx/resources)
configure_file(${CMAKE_SOURCE_DIR}/LICENSE ${CMAKE_BINARY_DIR}/License.txt COPYONLY)
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_BINARY_DIR}/License.txt)
else()
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE)
set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/README.md)
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE)
endif()
################################################################################
# Configuration for RPM
################################################################################
################################################################################
if(UNIX AND NOT APPLE)
install(DIRECTORY DESTINATION "var/log/foundationdb" COMPONENT server)
install(DIRECTORY DESTINATION "var/lib/foundationdb/data" COMPONENT server)
endif()
if(INSTALL_LAYOUT MATCHES "RPM")
set(CPACK_RPM_server_USER_FILELIST
@ -114,7 +165,8 @@ if(INSTALL_LAYOUT MATCHES "RPM")
"/lib/systemd"
"/lib/systemd/system"
"/etc/rc.d/init.d")
set(CPACK_RPM_DEBUGINFO_PACKAGE ON)
set(CPACK_RPM_server_DEBUGINFO_PACKAGE ON)
set(CPACK_RPM_clients_DEBUGINFO_PACKAGE ON)
set(CPACK_RPM_BUILD_SOURCE_DIRS_PREFIX /usr/src)
set(CPACK_RPM_COMPONENT_INSTALL ON)
set(CPACK_RPM_clients_PRE_INSTALL_SCRIPT_FILE
@ -128,6 +180,11 @@ if(INSTALL_LAYOUT MATCHES "RPM")
set(CPACK_RPM_server_PRE_UNINSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preunserver.sh)
set(CPACK_RPM_server_PACKAGE_REQUIRES
"foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}, initscripts >= 9.03")
set(CPACK_RPM_server_PACKAGE_RE)
#set(CPACK_RPM_java_PACKAGE_REQUIRES
# "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
set(CPACK_RPM_python_PACKAGE_REQUIRES
"foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
endif()
@ -140,12 +197,12 @@ if(INSTALL_LAYOUT MATCHES "DEB")
set(CPACK_DEBIAN_PACKAGE_SECTION "database")
set(CPACK_DEBIAN_ENABLE_COMPONENT_DEPENDS ON)
set(CPACK_DEBIAN_server_PACKAGE_DEPENDS "adduser, libc6 (>= 2.11), python (>= 2.6)")
set(CPACK_DEBIAN_clients_PACKAGE_DEPENDS "adduser, libc6 (>= 2.11)")
set(CPACK_DEBIAN_SERVER_PACKAGE_DEPENDS "adduser, libc6 (>= 2.12), python (>= 2.6), foundationdb-clients (= ${FDB_VERSION})")
set(CPACK_DEBIAN_CLIENTS_PACKAGE_DEPENDS "adduser, libc6 (>= 2.12)")
set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://www.foundationdb.org")
set(CPACK_DEBIAN_clients_PACKAGE_CONTROL_EXTRA
set(CPACK_DEBIAN_CLIENTS_PACKAGE_CONTROL_EXTRA
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-clients/postinst)
set(CPACK_DEBIAN_server_PACKAGE_CONTROL_EXTRA
set(CPACK_DEBIAN_SERVER_PACKAGE_CONTROL_EXTRA
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/conffiles
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/preinst
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/postinst
@ -153,6 +210,22 @@ if(INSTALL_LAYOUT MATCHES "DEB")
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/postrm)
endif()
################################################################################
# MacOS configuration
################################################################################
if(INSTALL_LAYOUT MATCHES "OSX")
set(CPACK_PREFLIGHT_SERVER_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/preinstall)
set(CPACK_POSTFLIGHT_SERVER_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/postinstall)
set(CPACK_POSTFLIGHT_CLIENTS_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/preinstall)
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/osx/uninstall-FoundationDB.sh
DESTINATION "usr/local/foundationdb"
COMPONENT clients)
install(FILES ${CMAKE_SOURCE_DIR}/packaging/osx/com.foundationdb.fdbmonitor.plist
DESTINATION "Library/LaunchDaemons"
COMPONENT server)
endif()
################################################################################
# Server configuration
################################################################################
@ -162,14 +235,27 @@ string(RANDOM LENGTH 8 description2)
set(CLUSTER_DESCRIPTION1 ${description1} CACHE STRING "Cluster description")
set(CLUSTER_DESCRIPTION2 ${description2} CACHE STRING "Cluster description")
install(FILES ${CMAKE_SOURCE_DIR}/packaging/foundationdb.conf
DESTINATION ${FDB_CONFIG_DIR}
COMPONENT server)
install(FILES ${CMAKE_SOURCE_DIR}/packaging/argparse.py
DESTINATION "usr/lib/foundationdb"
COMPONENT server)
install(FILES ${CMAKE_SOURCE_DIR}/packaging/make_public.py
DESTINATION "usr/lib/foundationdb")
if(NOT WIN32)
if(INSTALL_LAYOUT MATCHES "OSX")
install(FILES ${CMAKE_SOURCE_DIR}/packaging/osx/foundationdb.conf.new
DESTINATION ${FDB_CONFIG_DIR}
COMPONENT server)
else()
install(FILES ${CMAKE_SOURCE_DIR}/packaging/foundationdb.conf
DESTINATION ${FDB_CONFIG_DIR}
COMPONENT server)
endif()
install(FILES ${CMAKE_SOURCE_DIR}/packaging/argparse.py
DESTINATION "${FDB_LIB_NOSUFFIX}/foundationdb"
COMPONENT server)
install(FILES ${CMAKE_SOURCE_DIR}/packaging/make_public.py
DESTINATION "${FDB_LIB_NOSUFFIX}/foundationdb"
COMPONENT server)
else()
install(FILES ${CMAKE_BINARY_DIR}/fdb.cluster
DESTINATION "etc"
COMPONENT server)
endif()
if((INSTALL_LAYOUT MATCHES "RPM") OR (INSTALL_LAYOUT MATCHES "DEB"))
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/packaging/foundationdb
${CMAKE_BINARY_DIR}/packaging/rpm)
@ -186,36 +272,18 @@ if((INSTALL_LAYOUT MATCHES "RPM") OR (INSTALL_LAYOUT MATCHES "DEB"))
RESULT_VARIABLE IS_SYSTEMD
OUTPUT_QUIET
ERROR_QUIET)
if(IS_SYSTEMD EQUAL "0")
configure_file(${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb.service
${CMAKE_BINARY_DIR}/packaging/rpm/foundationdb.service)
install(FILES ${CMAKE_BINARY_DIR}/packaging/rpm/foundationdb.service
DESTINATION "lib/systemd/system"
install(FILES ${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb.service
DESTINATION "lib/systemd/system"
COMPONENT server)
if(INSTALL_LAYOUT MATCHES "RPM")
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb-init
DESTINATION "etc/rc.d/init.d"
RENAME "foundationdb"
COMPONENT server)
else()
if(INSTALL_LAYOUT MATCHES "RPM")
install(FILES ${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb-init
DESTINATION "etc/rc.d/init.d"
RENAME "foundationdb"
COMPONENT server)
else()
install(FILES ${CMAKE_SOURCE_DIR}/packaging/deb/foundationdb-init
DESTINATION "etc/init.d"
RENAME "foundationdb"
COMPONENT server)
endif()
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/deb/foundationdb-init
DESTINATION "etc/init.d"
RENAME "foundationdb"
COMPONENT server)
endif()
endif()
################################################################################
# Helper Macros
################################################################################
macro(install_symlink filepath sympath compondent)
install(CODE "execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink ${filepath} ${sympath})" COMPONENT ${component})
install(CODE "message(\"-- Created symlink: ${sympath} -> ${filepath}\")")
endmacro()
macro(install_mkdir dirname component)
install(CODE "execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory ${dirname})" COMPONENT ${component})
install(CODE "message(\"-- Created directory: ${dirname}\")")
endmacro()

View File

@ -0,0 +1,68 @@
# build a virtualenv
set(sphinx_dir ${CMAKE_CURRENT_SOURCE_DIR}/sphinx)
set(venv_dir ${CMAKE_CURRENT_BINARY_DIR}/venv)
set(EXE_SUFFIX "")
if(WIN32)
set(EXE_SUFFIX ".exe")
endif()
set(pip_command ${venv_dir}/bin/pip${EXE_SUFFIX})
set(python_command ${venv_dir}/bin/python${EXE_SUFFIX})
add_custom_command(OUTPUT ${venv_dir}/venv_setup
COMMAND ${VIRTUALENV_EXE} venv &&
${CMAKE_COMMAND} -E copy ${sphinx_dir}/.pip.conf ${venv_dir}/pip.conf &&
. ${venv_dir}/bin/activate &&
${pip_command} install --upgrade pip &&
${pip_command} install --upgrade -r ${sphinx_dir}/requirements.txt &&
${pip_command} install sphinx-autobuild && # somehow this is missing in requirements.txt
${CMAKE_COMMAND} -E touch ${venv_dir}/venv_setup
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Set up virtualenv")
add_custom_target(buildsphinx DEPENDS ${venv_dir}/venv_setup)
file(GLOB_RECURSE SRCS *.rst)
function(add_documentation_target)
set(options)
set(oneValueArgs GENERATOR SPHINX_COMMAND DOCTREE)
set(multiValueArgs ADDITIONAL_ARGUMENTS)
cmake_parse_arguments(ADT "${options}" "${oneValueArgs}" "${multiValueArgs}" "${ARGN}")
if(NOT ADT_GENERATOR)
message(ERROR "GENERATOR is a required argument to add_documentation_target")
endif()
set(target ${ADT_GENERATOR})
set(SPHINX_COMMAND "${venv_dir}/bin/sphinx-build")
if(ADT_SPHINX_COMMAND)
set(SPHINX_COMMAND "${venv_dir}/bin/${ADT_SPHINX_COMMAND}")
endif()
set(doctree "doctree")
if (ADT_DOCTREE)
set(doctree "${ADT_DOCTREE}")
endif()
set(out_dir ${CMAKE_CURRENT_BINARY_DIR}/${target})
add_custom_command(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${target}_done
COMMAND ${CMAKE_COMMAND} -E make_directory ${out_dir} &&
${python_command} ${SPHINX_COMMAND} -b ${target}
-d ${doctree} -c ${sphinx_dir}
${sphinx_dir}/source
${CMAKE_CURRENT_BINARY_DIR}/${target} &&
${CMAKE_COMMAND} -E touch ${CMAKE_CURRENT_BINARY_DIR}/${target}_done
DEPENDS ${SRCS}
WORKING_DIRECTORY ${venv_dir})
message(STATUS "add_custom_target(${target} DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${target}_done)")
add_custom_target(${target} DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${target}_done)
add_dependencies(${target} buildsphinx)
endfunction()
message(STATUS "Add html target")
add_documentation_target(GENERATOR html)
set(tar_file ${CMAKE_BINARY_DIR}/packages/${CMAKE_PROJECT_NAME}-docs-${FDB_VERSION}.tar.gz)
add_custom_command(
OUTPUT ${tar_file}
COMMAND ${CMAKE_COMMAND} -E tar czf ${tar_file} .
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/html)
add_custom_target(package_html DEPENDS ${tar_file})
add_dependencies(package_html html)
add_dependencies(packages package_html)

View File

@ -85,7 +85,7 @@ Specifying the cluster file
All FoundationDB components can be configured to use a specified cluster file:
* The ``fdbcli`` tool allows a cluster file to be passed on the command line using the ``-C`` option.
* The :doc:`client APIs <api-reference>` allow a cluster file to be passed when connecting to a cluster, usually via ``open()`` or ``create_cluster()``.
* The :doc:`client APIs <api-reference>` allow a cluster file to be passed when connecting to a cluster, usually via ``open()``.
* A FoundationDB server or ``backup-agent`` allow a cluster file to be specified in :ref:`foundationdb.conf <foundationdb-conf>`.
In addition, FoundationDB allows you to use the environment variable ``FDB_CLUSTER_FILE`` to specify a cluster file. This approach is helpful if you operate or access more than one cluster.

View File

@ -13,7 +13,6 @@
.. |reset-func-name| replace:: :func:`reset <fdb_transaction_reset()>`
.. |reset-func| replace:: :func:`fdb_transaction_reset()`
.. |cancel-func| replace:: :func:`fdb_transaction_cancel()`
.. |init-func| replace:: FIXME
.. |open-func| replace:: FIXME
.. |set-cluster-file-func| replace:: FIXME
.. |set-local-address-func| replace:: FIXME
@ -292,22 +291,6 @@ See :ref:`developer-guide-programming-with-futures` for further (language-indepe
|future-memory-mine|
.. function:: fdb_error_t fdb_future_get_cluster(FDBFuture* future, FDBCluster** out_cluster)
Extracts a value of type :type:`FDBCluster*` from an :type:`FDBFuture` into a caller-provided variable. |future-warning|
|future-get-return1| |future-get-return2|.
|future-memory-yours1| :type:`FDBCluster` |future-memory-yours2| :func:`fdb_cluster_destroy()` |future-memory-yours3|
.. function:: fdb_error_t fdb_future_get_database(FDBFuture* future, FDBDatabase** out_database)
Extracts a value of type :type:`FDBDatabase*` from an :type:`FDBFuture` into a caller-provided variable. |future-warning|
|future-get-return1| |future-get-return2|.
|future-memory-yours1| :type:`FDBDatabase` |future-memory-yours2| ``fdb_database_destroy(*out_database)`` |future-memory-yours3|
.. function:: fdb_error_t fdb_future_get_value(FDBFuture* future, fdb_bool_t* out_present, uint8_t const** out_value, int* out_value_length)
Extracts a database value from an :type:`FDBFuture` into caller-provided variables. |future-warning|
@ -379,42 +362,6 @@ See :ref:`developer-guide-programming-with-futures` for further (language-indepe
:data:`value_length`
The length of the value pointed to by :data:`value`.
Cluster
=======
.. type:: FDBCluster
An opaque type that represents a Cluster in the FoundationDB C API.
.. function:: FDBFuture* fdb_create_cluster(const char* cluster_file_path)
|future-return0| an :type:`FDBCluster` object. |future-return1| call :func:`fdb_future_get_cluster()` to extract the :type:`FDBCluster` object, |future-return2|
:data:`cluster_file_path`
A NULL-terminated string giving a local path of a :ref:`cluster file <foundationdb-cluster-file>` (often called 'fdb.cluster') which contains connection information for the FoundationDB cluster. If cluster_file_path is NULL or an empty string, then a :ref:`default cluster file <default-cluster-file>` will be used.
.. function:: void fdb_cluster_destroy(FDBCluster* cluster)
Destroys an :type:`FDBCluster` object. It must be called exactly once for each successful call to :func:`fdb_future_get_cluster()`. This function only destroys a handle to the cluster -- your cluster will be fine!
.. function:: fdb_error_t fdb_cluster_set_option(FDBCluster* cluster, FDBClusterOption option, uint8_t const* value, int value_length)
Called to set an option on an :type:`FDBCluster`. |option-parameter| :func:`fdb_cluster_set_option()` returns.
.. type:: FDBClusterOption
|option-doc|
.. function:: FDBFuture* fdb_cluster_create_database(FDBCluster *cluster, uint8_t const* db_name, int db_name_length)
|future-return0| an :type:`FDBDatabase` object. |future-return1| call :func:`fdb_future_get_database()` to extract the :type:`FDBDatabase` object, |future-return2|
:data:`db_name`
A pointer to the name of the database to be opened. |no-null| In the current FoundationDB API, the database name *must* be "DB".
:data:`db_name_length`
|length-of| :data:`db_name`.
Database
========
@ -424,9 +371,19 @@ An |database-blurb1| Modifications to a database are performed via transactions.
An opaque type that represents a database in the FoundationDB C API.
.. function:: fdb_error_t fdb_create_database(const char* cluster_file_path, FDBDatabase** out_database)
Creates a new database connected the specified cluster. The caller assumes ownership of the :type:`FDBDatabase` object and must destroy it with :func:`fdb_database_destroy()`.
:data:`cluster_file_path`
A NULL-terminated string giving a local path of a :ref:`cluster file <foundationdb-cluster-file>` (often called 'fdb.cluster') which contains connection information for the FoundationDB cluster. If cluster_file_path is NULL or an empty string, then a :ref:`default cluster file <default-cluster-file>` will be used.
:data:`*out_database`
Set to point to the newly created :type:`FDBDatabase`.
.. function:: void fdb_database_destroy(FDBDatabase* database)
Destroys an :type:`FDBDatabase` object. It must be called exactly once for each successful call to :func:`fdb_future_get_database()`. This function only destroys a handle to the database -- your database will be fine!
Destroys an :type:`FDBDatabase` object. It must be called exactly once for each successful call to :func:`fdb_create_database()`. This function only destroys a handle to the database -- your database will be fine!
.. function:: fdb_error_t fdb_database_set_option(FDBDatabase* database, FDBDatabaseOption option, uint8_t const* value, int value_length)

View File

@ -127,13 +127,13 @@
Performs lexicographic comparison of byte strings. If the existing value in the database is not present, then ``param`` is stored. Otherwise the larger of the two values is then stored in the database.
.. |atomic-set-versionstamped-key-1| replace::
Transforms ``key`` using a versionstamp for the transaction. This key must be at least 12 bytes long. The final 2 bytes will be interpreted as a 16-bit little-endian integer denoting an index into the key at which to perform the transformation, and then trimmed off the key. The 10 bytes in the key beginning at the index will be overwritten with the versionstamp. If the index plus 10 bytes points past the end of the key, the result will be an error. Sets the transformed key in the database to ``param``.
Transforms ``key`` using a versionstamp for the transaction. This key must be at least 14 bytes long. The final 4 bytes will be interpreted as a 32-bit little-endian integer denoting an index into the key at which to perform the transformation, and then trimmed off the key. The 10 bytes in the key beginning at the index will be overwritten with the versionstamp. If the index plus 10 bytes points past the end of the key, the result will be an error. Sets the transformed key in the database to ``param``.
.. |atomic-set-versionstamped-key-2| replace::
This operation is not compatible with |read-your-writes-disable-option| and will generate an error if used with it.
.. |atomic-set-versionstamped-value| replace::
Transforms ``param`` using a versionstamp for the transaction. This parameter must be at least 10 bytes long, and the first 10 bytes will be overwritten with the versionstamp. Sets ``key`` in the database to the transformed parameter.
Transforms ``param`` using a versionstamp for the transaction. This parameter must be at least 14 bytes long. The final 4 bytes will be interpreted as a 32-bit little-endian integer denoting an index into the parameter at which to perform the transformation, and then trimmed off the key. The 10 bytes in the parameter beginning at the index will be overwritten with the versionstamp. If the index plus 10 bytes points past the end of the parameter, the result will be an error. Sets ``key`` in the database to the transformed parameter.
.. |atomic-versionstamps-1| replace::
A versionstamp is a 10 byte, unique, monotonically (but not sequentially) increasing value for each committed transaction. The first 8 bytes are the committed version of the database. The last 2 bytes are monotonic in the serialization order for transactions.
@ -145,7 +145,7 @@
At this time, versionstamped keys are not compatible with the Tuple layer except in Java and Python. Note that this implies versionstamped keys may not be used with the Subspace and Directory layers except in those languages.
.. |atomic-versionstamps-tuple-warning-value| replace::
At this time, versionstamped values are not compatible with the Tuple layer.
At this time, versionstamped values are not compatible with the Tuple layer except in Java and Python. Note that this implies versionstamped values may not be used with the Subspace and Directory layers except in those languages.
.. |api-version| replace:: 610
@ -232,9 +232,12 @@
.. |option-trace-roll-size-blurb| replace::
Sets the maximum size in bytes of a single trace output file for this FoundationDB client.
.. |option-trace-format-blurb| replace::
Select the format of the trace files for this FoundationDB client. xml (the default) and json are supported.
.. |network-options-warning| replace::
It is an error to set these options after the first call to |open-func| or |init-func| anywhere in your application.
It is an error to set these options after the first call to |open-func| anywhere in your application.
.. |tls-options-burb| replace::
@ -398,7 +401,7 @@
Cancels |future-type-string| and its associated asynchronous operation. If called before the future is ready, attempts to access its value will |error-raise-type| an :ref:`operation_cancelled <developer-guide-error-codes>` |error-type|. Cancelling a future which is already ready has no effect. Note that even if a future is not ready, its associated asynchronous operation may have succesfully completed and be unable to be cancelled.
.. |fdb-open-blurb| replace::
Initializes the FoundationDB API, connects to the cluster specified by the :ref:`cluster file <foundationdb-cluster-file>`, and opens the database with the specified name. This function is often called without any parameters, using only the defaults. If no cluster file is passed, FoundationDB automatically :ref:`determines a cluster file <specifying-a-cluster-file>` with which to connect to a cluster.
Initializes the FoundationDB API and connects to the cluster specified by the :ref:`cluster file <foundationdb-cluster-file>`. This function is often called without any parameters, using only the defaults. If no cluster file is passed, FoundationDB automatically :ref:`determines a cluster file <specifying-a-cluster-file>` with which to connect to a cluster.
.. |fdb-transactional-unknown-result-note| replace::
In some failure scenarios, it is possible that your transaction will be executed twice. See :ref:`developer-guide-unknown-results` for more information.

View File

@ -14,7 +14,6 @@
.. |reset-func-name| replace:: :func:`reset <Transaction.reset>`
.. |reset-func| replace:: :func:`Transaction.reset`
.. |cancel-func| replace:: :func:`Transaction.cancel`
.. |init-func| replace:: :func:`fdb.init`
.. |open-func| replace:: :func:`fdb.open`
.. |on-error-func| replace:: :meth:`Transaction.on_error`
.. |null-type| replace:: ``None``
@ -86,33 +85,18 @@ For API changes between version 13 and |api-version| (for the purpose of porting
Opening a database
==================
After importing the ``fdb`` module and selecting an API version, you probably want to open a :class:`Database`. The simplest way of doing this is using :func:`open`::
After importing the ``fdb`` module and selecting an API version, you probably want to open a :class:`Database` using :func:`open`::
import fdb
fdb.api_version(610)
db = fdb.open()
.. function:: open( cluster_file=None, db_name="DB", event_model=None )
.. function:: open( cluster_file=None, event_model=None )
|fdb-open-blurb|
.. param event_model:: Can be used to select alternate :ref:`api-python-event-models`
.. note:: In this release, db_name must be "DB".
.. note:: ``fdb.open()`` combines the effect of :func:`init`, :func:`create_cluster`, and :meth:`Cluster.open_database`.
.. function:: init()
Initializes the FoundationDB API, creating a thread for the FoundationDB client and initializing the client's networking engine. :func:`init()` can only be called once. If called subsequently or after :func:`open`, it will raise an ``client_invalid_operation`` error.
.. function:: create_cluster( cluster_file=None )
Connects to the cluster specified by :ref:`cluster_file <foundationdb-cluster-file>`, or by a :ref:`default cluster file <default-cluster-file>` if
``cluster_file`` is None. :func:`init` must be called first.
Returns a |future-type| :class:`Cluster` object.
.. data:: options
|network-options-blurb|
@ -133,6 +117,10 @@ After importing the ``fdb`` module and selecting an API version, you probably wa
|option-trace-roll-size-blurb|
.. method :: fdb.options.set_trace_format(format)
|option-trace-format-blurb|
.. method :: fdb.options.set_disable_multi_version_client_api()
|option-disable-multi-version-client-api|
@ -175,19 +163,6 @@ After importing the ``fdb`` module and selecting an API version, you probably wa
|option-tls-key-bytes|
Cluster objects
===============
.. class:: Cluster
.. method:: Cluster.open_database(name="DB")
Opens a database with the given name.
Returns a |future-type| :class:`Database` object.
.. note:: In this release, name **must** be "DB".
.. _api-python-keys:
Keys and values
@ -966,7 +941,7 @@ The following streaming modes are available:
Event models
============
By default, the FoundationDB Python API assumes that the calling program uses threads (as provided by the ``threading`` module) for concurrency. This means that blocking operations will block the current Python thread. This behavior can be changed by specifying the optional ``event_model`` parameter to the :func:`open` or :func:`init` functions.
By default, the FoundationDB Python API assumes that the calling program uses threads (as provided by the ``threading`` module) for concurrency. This means that blocking operations will block the current Python thread. This behavior can be changed by specifying the optional ``event_model`` parameter to the :func:`open` function.
The following event models are available:

View File

@ -12,7 +12,6 @@
.. |reset-func-name| replace:: :meth:`reset <Transaction.reset>`
.. |reset-func| replace:: :meth:`Transaction.reset`
.. |cancel-func| replace:: :meth:`Transaction.cancel`
.. |init-func| replace:: :func:`FDB.init`
.. |open-func| replace:: :func:`FDB.open`
.. |on-error-func| replace:: :meth:`Transaction.on_error`
.. |null-type| replace:: ``nil``
@ -75,28 +74,16 @@ For API changes between version 14 and |api-version| (for the purpose of porting
Opening a database
==================
After requiring the ``FDB`` gem and selecting an API version, you probably want to open a :class:`Database`. The simplest way of doing this is using :func:`open`::
After requiring the ``FDB`` gem and selecting an API version, you probably want to open a :class:`Database` using :func:`open`::
require 'fdb'
FDB.api_version 610
db = FDB.open
.. function:: open( cluster_file=nil, db_name="DB" ) -> Database
.. function:: open( cluster_file=nil ) -> Database
|fdb-open-blurb|
.. note:: In this release, db_name must be "DB".
.. note:: ``fdb.open`` combines the effect of :func:`init`, :func:`create_cluster`, and :meth:`Cluster.open_database`.
.. function:: init() -> nil
Initializes the FoundationDB API, creating a thread for the FoundationDB client and initializing the client's networking engine. :func:`init` can only be called once. If called subsequently or after :func:`open`, it will raise a ``client_invalid_operation`` error.
.. function:: create_cluster(cluster_file=nil) -> Cluster
Connects to the cluster specified by :ref:`cluster_file <foundationdb-cluster-file>`, or by a :ref:`default cluster file <default-cluster-file>` if ``cluster_file`` is ``nil``.
.. global:: FDB.options
|network-options-blurb|
@ -105,17 +92,23 @@ After requiring the ``FDB`` gem and selecting an API version, you probably want
.. method:: FDB.options.set_trace_enable(output_directory) -> nil
|option-trace-enable-blurb|
|option-trace-enable-blurb|
.. warning:: |option-trace-enable-warning|
.. warning:: |option-trace-enable-warning|
.. method:: FDB.options.set_trace_max_logs_size(bytes) -> nil
|option-trace-max-logs-size-blurb|
|option-trace-max-logs-size-blurb|
.. method:: FDB.options.set_trace_roll_size(bytes) -> nil
|option-trace-roll-size-blurb|
|option-trace-roll-size-blurb|
.. method:: FDB.options.set_trace_format(format) -> nil
|option-trace-format-blurb|
.. method:: FDB.options.set_disable_multi_version_client_api() -> nil
|option-disable-multi-version-client-api|
@ -160,17 +153,6 @@ After requiring the ``FDB`` gem and selecting an API version, you probably want
.. method :: FDB.options.set_disable_multi_version_client_api() -> nil
Cluster objects
===============
.. class:: Cluster
.. method:: Cluster.open_database(name="DB") -> Database
Opens a database with the given name.
.. note:: In this release, name **must** be "DB".
.. _api-ruby-keys:
Keys and values

View File

@ -501,7 +501,7 @@ The ``start`` subcommand is used to start a DR backup. If there is already a DR
The ``switch`` subcommand is used to swap the source and destination database clusters of an active DR in differential mode. This means the destination will be unlocked and start streaming data into the source database, which will subsequently be locked.
This command requires both databases to be available. While the switch command is working, both databases will be locked for a few seconds.
This command requires both databases to be available. On the destination cluster, a ``dr_agent`` that points to the source cluster must be running. While the switch command is working, both databases will be locked for a few seconds.
.. program:: fdbdr abort

View File

@ -12,7 +12,6 @@
.. |get-key-func| replace:: get_key()
.. |get-range-func| replace:: get_range()
.. |commit-func| replace:: FIXME
.. |init-func| replace:: FIXME
.. |open-func| replace:: FIXME
.. |set-cluster-file-func| replace:: FIXME
.. |set-local-address-func| replace:: FIXME

View File

@ -12,7 +12,6 @@
.. |get-key-func| replace:: get_key()
.. |get-range-func| replace:: get_range()
.. |commit-func| replace:: ``commit()``
.. |init-func| replace:: FIXME
.. |open-func| replace:: FIXME
.. |set-cluster-file-func| replace:: FIXME
.. |set-local-address-func| replace:: FIXME

View File

@ -322,6 +322,7 @@ JSON Path Name Descriptio
client.messages inconsistent_cluster_file Cluster file is not up to date. It contains the connection string <value>. The current connection string is <value>. This must mean that file permissions or other platform issues have prevented the file from being updated. To change coordinators without manual intervention, the cluster file and its containing folder must be writable by all servers and clients. If a majority of the coordinators referenced by the old connection string are lost, the database will stop working until the correct cluster file is distributed to all processes.
client.messages no_cluster_controller Unable to locate a cluster controller within 2 seconds. Check that there are server processes running.
client.messages quorum_not_reachable Unable to reach a quorum of coordinators.
client.messages server_overloaded Server is under too much load and cannot respond.
client.messages status_incomplete_client Could not retrieve client status information.
client.messages status_incomplete_cluster Could not retrieve cluster status information.
client.messages status_incomplete_coordinators Could not fetch coordinator info.

View File

@ -8,6 +8,8 @@ Release Notes
Features
--------
* Get read version, read, and commit requests are counted and aggregated by server-side latency in configurable latency bands and output in JSON status. `(PR #1084) <https://github.com/apple/foundationdb/pull/1084>`_
Performance
-----------
@ -20,6 +22,18 @@ Status
Bindings
--------
* The API to create a database has been simplified across the bindings. All changes are backward compatible with previous API versions, with one exception in Java noted below. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_
* C: ``FDBCluster`` objects and related methods (``fdb_create_cluster``, ``fdb_cluster_create_database``, ``fdb_cluster_set_option``, ``fdb_cluster_destroy``, ``fdb_future_get_cluster``) have been removed. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_
* C: Added ``fdb_create_database`` that creates a new ``FDBDatabase`` object synchronously and removed ``fdb_future_get_database``. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_
* Python: Removed ``fdb.init``, ``fdb.create_cluster``, and ``fdb.Cluster``. ``fdb.open`` no longer accepts a ``database_name`` parameter. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_
* Java: Deprecated ``FDB.createCluster`` and ``Cluster``. The preferred way to get a ``Database`` is by using ``FDB.open``, which should work in both new and old API versions. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_
* Java: Removed ``Cluster(long cPtr, Executor executor)`` constructor. This is API breaking for any code that has subclassed the ``Cluster`` class and is not protected by API versioning. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_
* Ruby: Removed ``FDB.init``, ``FDB.create_cluster``, and ``FDB.Cluster``. ``FDB.open`` no longer accepts a ``database_name`` parameter. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_
* Golang: Deprecated ``fdb.StartNetwork``, ``fdb.Open``, ``fdb.MustOpen``, and ``fdb.CreateCluster`` and added ``fdb.OpenDatabase`` and ``fdb.MustOpenDatabase``. The preferred way to start the network and get a ``Database`` is by using ``FDB.OpenDatabase`` or ``FDB.OpenDefault``. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_
* Flow: Deprecated ``API::createCluster`` and ``Cluster`` and added ``API::createDatabase``. The preferred way to get a ``Database`` is by using ``API::createDatabase``. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_
* Golang: Added ``fdb.Printable`` to print a human-readable string for a given byte array. Add ``Key.String()``, which converts the ``Key`` to a ``string`` using the ``Printable`` function. `(PR #1010) <https://github.com/apple/foundationdb/pull/1010>`_
* Python: Python signal handling didn't work when waiting on a future. In particular, pressing Ctrl-C would not successfully interrupt the program. `(PR #1138) <https://github.com/apple/foundationdb/pull/1138>`_
Other Changes
-------------

View File

@ -1,25 +1,23 @@
set(FDBBACKUP_SRCS
backup.actor.cpp)
actor_set(FDBBACKUP_BUILD "${FDBBACKUP_SRCS}")
add_executable(fdbbackup "${FDBBACKUP_BUILD}")
actor_compile(fdbbackup "${FDBBACKUP_SRCS}")
add_flow_target(EXECUTABLE NAME fdbbackup SRCS ${FDBBACKUP_SRCS})
target_link_libraries(fdbbackup PRIVATE fdbclient)
install(TARGETS fdbbackup DESTINATION ${FDB_BIN_DIR} COMPONENT clients)
install(PROGRAMS $<TARGET_FILE:fdbbackup>
DESTINATION ${FDB_LIB_DIR}/foundationdb/backup_agent
RENAME backup_agent
install_symlink(
TO /${FDB_BIN_DIR}/fdbbackup
DESTINATION ${FDB_LIB_DIR}/foundationdb/backup_agent/backup_agent
COMPONENT clients)
install(PROGRAMS $<TARGET_FILE:fdbbackup>
DESTINATION ${FDB_BIN_DIR}
RENAME fdbrestore
install_symlink(
TO /${FDB_BIN_DIR}/fdbbackup
DESTINATION ${FDB_BIN_DIR}/fdbrestore
COMPONENT clients)
install(PROGRAMS $<TARGET_FILE:fdbbackup>
DESTINATION ${FDB_BIN_DIR}
RENAME dr_agent
install_symlink(
TO /${FDB_BIN_DIR}/fdbbackup
DESTINATION ${FDB_BIN_DIR}/dr_agent
COMPONENT clients)
install(PROGRAMS $<TARGET_FILE:fdbbackup>
DESTINATION ${FDB_BIN_DIR}
RENAME fdbdr
install_symlink(
TO /${FDB_BIN_DIR}/fdbbackup
DESTINATION ${FDB_BIN_DIR}/fdbdr
COMPONENT clients)

View File

@ -18,6 +18,9 @@
* limitations under the License.
*/
#define BOOST_DATE_TIME_NO_LIB
#include <boost/interprocess/managed_shared_memory.hpp>
#include "flow/flow.h"
#include "flow/FastAlloc.h"
#include "flow/serialize.h"
@ -35,6 +38,7 @@
#include "fdbclient/json_spirit/json_spirit_writer_template.h"
#include "fdbrpc/Platform.h"
#include "fdbrpc/TLSConnection.h"
#include <stdarg.h>
#include <stdio.h>
@ -47,15 +51,11 @@ using std::endl;
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#define NOMINMAX
#include <Windows.h>
#undef min
#undef max
#endif
#include <time.h>
#define BOOST_DATE_TIME_NO_LIB
#include <boost/interprocess/managed_shared_memory.hpp>
#ifdef __linux__
#include <execinfo.h>
#ifdef ALLOC_INSTRUMENTATION
@ -63,10 +63,9 @@ using std::endl;
#endif
#endif
#ifndef WIN32
#if defined(CMAKE_BUILD) || !defined(WIN32)
#include "versions.h"
#endif
#include "flow/SimpleOpt.h"
#include "flow/actorcompiler.h" // This must be the last #include.
@ -141,7 +140,9 @@ CSimpleOpt::SOption g_rgAgentOptions[] = {
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_BLOB_CREDENTIALS, "--blob_credentials", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
SO_END_OF_OPTIONS
};
@ -182,7 +183,9 @@ CSimpleOpt::SOption g_rgBackupStartOptions[] = {
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_BLOB_CREDENTIALS, "--blob_credentials", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
SO_END_OF_OPTIONS
};
@ -211,7 +214,9 @@ CSimpleOpt::SOption g_rgBackupStatusOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
SO_END_OF_OPTIONS
};
@ -238,7 +243,9 @@ CSimpleOpt::SOption g_rgBackupAbortOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
SO_END_OF_OPTIONS
};
@ -267,7 +274,9 @@ CSimpleOpt::SOption g_rgBackupDiscontinueOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
SO_END_OF_OPTIONS
};
@ -296,7 +305,9 @@ CSimpleOpt::SOption g_rgBackupWaitOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
SO_END_OF_OPTIONS
};
@ -321,7 +332,9 @@ CSimpleOpt::SOption g_rgBackupPauseOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
SO_END_OF_OPTIONS
};
@ -358,7 +371,9 @@ CSimpleOpt::SOption g_rgBackupExpireOptions[] = {
{ OPT_EXPIRE_BEFORE_DATETIME, "--expire_before_timestamp", SO_REQ_SEP },
{ OPT_EXPIRE_MIN_RESTORABLE_DAYS, "--min_restorable_days", SO_REQ_SEP },
{ OPT_EXPIRE_DELETE_BEFORE_DAYS, "--delete_before_days", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
SO_END_OF_OPTIONS
};
@ -385,7 +400,9 @@ CSimpleOpt::SOption g_rgBackupDeleteOptions[] = {
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_BLOB_CREDENTIALS, "--blob_credentials", SO_REQ_SEP },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
SO_END_OF_OPTIONS
};
@ -416,7 +433,9 @@ CSimpleOpt::SOption g_rgBackupDescribeOptions[] = {
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_DESCRIBE_DEEP, "--deep", SO_NONE },
{ OPT_DESCRIBE_TIMESTAMPS, "--version_timestamps", SO_NONE },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
SO_END_OF_OPTIONS
};
@ -446,7 +465,9 @@ CSimpleOpt::SOption g_rgBackupDumpOptions[] = {
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_DUMP_BEGIN, "--begin", SO_REQ_SEP },
{ OPT_DUMP_END, "--end", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
SO_END_OF_OPTIONS
};
@ -473,7 +494,9 @@ CSimpleOpt::SOption g_rgBackupListOptions[] = {
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_BLOB_CREDENTIALS, "--blob_credentials", SO_REQ_SEP },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
SO_END_OF_OPTIONS
};
@ -512,7 +535,9 @@ CSimpleOpt::SOption g_rgRestoreOptions[] = {
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_BLOB_CREDENTIALS, "--blob_credentials", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
SO_END_OF_OPTIONS
};
@ -542,7 +567,9 @@ CSimpleOpt::SOption g_rgDBAgentOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
SO_END_OF_OPTIONS
};
@ -573,7 +600,9 @@ CSimpleOpt::SOption g_rgDBStartOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
SO_END_OF_OPTIONS
};
@ -604,7 +633,9 @@ CSimpleOpt::SOption g_rgDBStatusOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
SO_END_OF_OPTIONS
};
@ -633,7 +664,9 @@ CSimpleOpt::SOption g_rgDBSwitchOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
SO_END_OF_OPTIONS
};
@ -663,7 +696,9 @@ CSimpleOpt::SOption g_rgDBAbortOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
SO_END_OF_OPTIONS
};
@ -690,7 +725,9 @@ CSimpleOpt::SOption g_rgDBPauseOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
SO_END_OF_OPTIONS
};
@ -754,6 +791,9 @@ static void printAgentUsage(bool devhelp) {
printf(" -m SIZE, --memory SIZE\n"
" Memory limit. The default value is 8GiB. When specified\n"
" without a unit, MiB is assumed.\n");
#ifndef TLS_DISABLED
printf(TLS_HELP);
#endif
printf(" -v, --version Print version information and exit.\n");
printf(" -h, --help Display this help and exit.\n");
@ -818,6 +858,9 @@ static void printBackupUsage(bool devhelp) {
printf(" -k KEYS List of key ranges to backup.\n"
" If not specified, the entire database will be backed up.\n");
printf(" -n, --dryrun For start or restore operations, performs a trial run with no actual changes made.\n");
#ifndef TLS_DISABLED
printf(TLS_HELP);
#endif
printf(" -v, --version Print version information and exit.\n");
printf(" -w, --wait Wait for the backup to complete (allowed with `start' and `discontinue').\n");
printf(" -z, --no-stop-when-done\n"
@ -862,6 +905,9 @@ static void printRestoreUsage(bool devhelp ) {
printf(" --remove_prefix PREFIX prefix to remove from the restored keys\n");
printf(" --add_prefix PREFIX prefix to add to the restored keys\n");
printf(" -n, --dryrun Perform a trial run with no changes made.\n");
#ifndef TLS_DISABLED
printf(TLS_HELP);
#endif
printf(" -v DBVERSION The version at which the database will be restored.\n");
printf(" -h, --help Display this help and exit.\n");
@ -898,6 +944,9 @@ static void printDBAgentUsage(bool devhelp) {
printf(" -m SIZE, --memory SIZE\n"
" Memory limit. The default value is 8GiB. When specified\n"
" without a unit, MiB is assumed.\n");
#ifndef TLS_DISABLED
printf(TLS_HELP);
#endif
printf(" -v, --version Print version information and exit.\n");
printf(" -h, --help Display this help and exit.\n");
if (devhelp) {
@ -925,6 +974,9 @@ static void printDBBackupUsage(bool devhelp) {
printf(" -k KEYS List of key ranges to backup.\n"
" If not specified, the entire database will be backed up.\n");
printf(" --cleanup Abort will attempt to stop mutation logging on the source cluster.\n");
#ifndef TLS_DISABLED
printf(TLS_HELP);
#endif
printf(" -v, --version Print version information and exit.\n");
printf(" -h, --help Display this help and exit.\n");
printf("\n"
@ -1547,7 +1599,7 @@ ACTOR Future<Void> submitBackup(Database db, std::string url, int snapshotInterv
// Wait for the backup to complete, if requested
if (waitForCompletion) {
printf("Submitted and now waiting for the backup on tag `%s' to complete.\n", printable(StringRef(tagName)).c_str());
int _ = wait(backupAgent.waitBackup(db, tagName));
wait(success(backupAgent.waitBackup(db, tagName)));
}
else {
// Check if a backup agent is running
@ -1751,7 +1803,7 @@ ACTOR Future<Void> discontinueBackup(Database db, std::string tagName, bool wait
// Wait for the backup to complete, if requested
if (waitForCompletion) {
printf("Discontinued and now waiting for the backup on tag `%s' to complete.\n", printable(StringRef(tagName)).c_str());
int _ = wait(backupAgent.waitBackup(db, tagName));
wait(success(backupAgent.waitBackup(db, tagName)));
}
else {
printf("The backup on tag `%s' was successfully discontinued.\n", printable(StringRef(tagName)).c_str());
@ -2448,6 +2500,7 @@ int main(int argc, char* argv[]) {
uint64_t memLimit = 8LL << 30;
Optional<uint64_t> ti;
std::vector<std::string> blobCredentials;
std::string tlsCertPath, tlsKeyPath, tlsCAPath, tlsPassword, tlsVerifyPeers;
Version dumpBegin = 0;
Version dumpEnd = std::numeric_limits<Version>::max();
@ -2712,6 +2765,26 @@ int main(int argc, char* argv[]) {
case OPT_BLOB_CREDENTIALS:
blobCredentials.push_back(args->OptionArg());
break;
#ifndef TLS_DISABLED
case TLSOptions::OPT_TLS_PLUGIN:
args->OptionArg();
break;
case TLSOptions::OPT_TLS_CERTIFICATES:
tlsCertPath = args->OptionArg();
break;
case TLSOptions::OPT_TLS_PASSWORD:
tlsPassword = args->OptionArg();
break;
case TLSOptions::OPT_TLS_CA_FILE:
tlsCAPath = args->OptionArg();
break;
case TLSOptions::OPT_TLS_KEY:
tlsKeyPath = args->OptionArg();
break;
case TLSOptions::OPT_TLS_VERIFY_PEERS:
tlsVerifyPeers = args->OptionArg();
break;
#endif
case OPT_DUMP_BEGIN:
dumpBegin = parseVersion(args->OptionArg());
break;
@ -2839,6 +2912,49 @@ int main(int argc, char* argv[]) {
setNetworkOption(FDBNetworkOptions::ENABLE_SLOW_TASK_PROFILING);
}
setNetworkOption(FDBNetworkOptions::DISABLE_CLIENT_STATISTICS_LOGGING);
// deferred TLS options
if (tlsCertPath.size()) {
try {
setNetworkOption(FDBNetworkOptions::TLS_CERT_PATH, tlsCertPath);
}
catch (Error& e) {
fprintf(stderr, "ERROR: cannot set TLS certificate path to `%s' (%s)\n", tlsCertPath.c_str(), e.what());
return 1;
}
}
if (tlsCAPath.size()) {
try {
setNetworkOption(FDBNetworkOptions::TLS_CA_PATH, tlsCAPath);
}
catch (Error& e) {
fprintf(stderr, "ERROR: cannot set TLS CA path to `%s' (%s)\n", tlsCAPath.c_str(), e.what());
return 1;
}
}
if (tlsKeyPath.size()) {
try {
if (tlsPassword.size())
setNetworkOption(FDBNetworkOptions::TLS_PASSWORD, tlsPassword);
setNetworkOption(FDBNetworkOptions::TLS_KEY_PATH, tlsKeyPath);
}
catch (Error& e) {
fprintf(stderr, "ERROR: cannot set TLS key path to `%s' (%s)\n", tlsKeyPath.c_str(), e.what());
return 1;
}
}
if (tlsVerifyPeers.size()) {
try {
setNetworkOption(FDBNetworkOptions::TLS_VERIFY_PEERS, tlsVerifyPeers);
}
catch (Error& e) {
fprintf(stderr, "ERROR: cannot set TLS peer verification to `%s' (%s)\n", tlsVerifyPeers.c_str(), e.what());
return 1;
}
}
Error::init();
std::set_new_handler( &platform::outOfMemory );
setMemoryQuota( memLimit );

View File

@ -2,12 +2,13 @@ set(FDBCLI_SRCS
fdbcli.actor.cpp
FlowLineNoise.actor.cpp
FlowLineNoise.h
linenoise/linenoise.c
linenoise/linenoise.h)
actor_set(FDBCLI_BUILD "${FDBCLI_SRCS}")
add_executable(fdbcli "${FDBCLI_BUILD}")
actor_compile(fdbcli "${FDBCLI_SRCS}")
if(NOT WIN32)
list(APPEND FDBCLI_SRCS linenoise/linenoise.c)
endif()
add_flow_target(EXECUTABLE NAME fdbcli SRCS ${FDBCLI_SRCS})
target_link_libraries(fdbcli PRIVATE fdbclient)
install(TARGETS fdbcli DESTINATION ${FDB_BIN_DIR} COMPONENT clients)

View File

@ -47,7 +47,7 @@
#include "fdbcli/linenoise/linenoise.h"
#endif
#ifndef WIN32
#if defined(CMAKE_BUILD) || !defined(WIN32)
#include "versions.h"
#endif
@ -672,6 +672,38 @@ std::string logBackupDR(const char *context, std::map<std::string, std::string>
return outputString;
}
int getNumofNonExcludedMachines(StatusObjectReader statusObjCluster) {
StatusObjectReader machineMap;
int numOfNonExcludedMachines = 0;
if (statusObjCluster.get("machines", machineMap)) {
for (auto mach : machineMap.obj()) {
StatusObjectReader machine(mach.second);
if (machine.has("excluded") && !machine.last().get_bool())
numOfNonExcludedMachines++;
}
}
return numOfNonExcludedMachines;
}
std::pair<int, int> getNumOfNonExcludedProcessAndZones(StatusObjectReader statusObjCluster) {
StatusObjectReader processesMap;
std::set<std::string> zones;
int numOfNonExcludedProcesses = 0;
if (statusObjCluster.get("processes", processesMap)) {
for (auto proc : processesMap.obj()) {
StatusObjectReader process(proc.second);
if (process.has("excluded") && process.last().get_bool())
continue;
numOfNonExcludedProcesses++;
std::string zoneId;
if (process.get("locality.zoneid", zoneId)) {
zones.insert(zoneId);
}
}
}
return { numOfNonExcludedProcesses, zones.size() };
}
void printStatus(StatusObjectReader statusObj, StatusClient::StatusLevel level, bool displayDatabaseAvailable = true, bool hideErrorMessages = false) {
if (FlowTransport::transport().incompatibleOutgoingConnectionsPresent()) {
printf("WARNING: One or more of the processes in the cluster is incompatible with this version of fdbcli.\n\n");
@ -754,9 +786,11 @@ void printStatus(StatusObjectReader statusObj, StatusClient::StatusLevel level,
fatalRecoveryState = true;
if (name == "recruiting_transaction_servers") {
description += format("\nNeed at least %d log servers, %d proxies and %d resolvers.", recoveryState["required_logs"].get_int(), recoveryState["required_proxies"].get_int(), recoveryState["required_resolvers"].get_int());
if (statusObjCluster.has("machines") && statusObjCluster.has("processes"))
description += format("\nHave %d processes on %d machines.", statusObjCluster["processes"].get_obj().size(), statusObjCluster["machines"].get_obj().size());
description += format("\nNeed at least %d log servers across unique zones, %d proxies and %d resolvers.", recoveryState["required_logs"].get_int(), recoveryState["required_proxies"].get_int(), recoveryState["required_resolvers"].get_int());
if (statusObjCluster.has("machines") && statusObjCluster.has("processes")) {
auto numOfNonExcludedProcessesAndZones = getNumOfNonExcludedProcessAndZones(statusObjCluster);
description += format("\nHave %d non-excluded processes on %d machines across %d zones.", numOfNonExcludedProcessesAndZones.first, getNumofNonExcludedMachines(statusObjCluster), numOfNonExcludedProcessesAndZones.second);
}
} else if (name == "locking_old_transaction_servers" && recoveryState["missing_logs"].get_str().size()) {
description += format("\nNeed one or more of the following log servers: %s", recoveryState["missing_logs"].get_str().c_str());
}
@ -1651,7 +1685,7 @@ ACTOR Future<bool> fileConfigure(Database db, std::string filePath, bool isNewDa
StatusObject configJSON = config.get_obj();
json_spirit::mValue schema;
if(!json_spirit::read_string( JSONSchemas::configurationSchema.toString(), schema )) {
if(!json_spirit::read_string( JSONSchemas::clusterConfigurationSchema.toString(), schema )) {
ASSERT(false);
}
@ -2561,7 +2595,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
}
if( tokencmp(tokens[0], "waitopen")) {
Version _ = wait( getTransaction(db,tr,options,intrans)->getReadVersion() );
wait(success( getTransaction(db,tr,options,intrans)->getReadVersion() ));
continue;
}
@ -3173,10 +3207,10 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
is_error = true;
} else {
if(tokencmp(tokens[1], "on")) {
int _ = wait(setDDMode(db, 1));
wait(success(setDDMode(db, 1)));
printf("Data distribution is enabled\n");
} else if(tokencmp(tokens[1], "off")) {
int _ = wait(setDDMode(db, 0));
wait(success(setDDMode(db, 0)));
printf("Data distribution is disabled\n");
} else {
printf("Usage: datadistribution <on|off>\n");

View File

@ -220,6 +220,15 @@ static ValueRef doByteMin(const Optional<ValueRef>& existingValueOptional, const
return otherOperand;
}
static Optional<ValueRef> doCompareAndClear(const Optional<ValueRef>& existingValueOptional,
const ValueRef& otherOperand, Arena& ar) {
if (!existingValueOptional.present() || existingValueOptional.get() == otherOperand) {
// Clear the value.
return Optional<ValueRef>();
}
return existingValueOptional; // No change required.
}
/*
* Returns the range corresponding to the specified versionstamp key.
*/

View File

@ -345,7 +345,7 @@ public:
state Reference<IAsyncFile> f = wait(bc->readFile(snapshot.fileName));
int64_t size = wait(f->size());
state Standalone<StringRef> buf = makeString(size);
int _ = wait(f->read(mutateString(buf), buf.size(), 0));
wait(success(f->read(mutateString(buf), buf.size(), 0)));
json_spirit::mValue json;
json_spirit::read_string(buf.toString(), json);
JSONDoc doc(json);
@ -606,13 +606,13 @@ public:
state Optional<Version> metaUnreliableEnd;
std::vector<Future<Void>> metaReads;
metaReads.push_back(store(bc->expiredEndVersion().get(), metaExpiredEnd));
metaReads.push_back(store(bc->unreliableEndVersion().get(), metaUnreliableEnd));
metaReads.push_back(store(metaExpiredEnd, bc->expiredEndVersion().get()));
metaReads.push_back(store(metaUnreliableEnd, bc->unreliableEndVersion().get()));
// Only read log begin/end versions if not doing a deep scan, otherwise scan files and recalculate them.
if(!deepScan) {
metaReads.push_back(store(bc->logBeginVersion().get(), metaLogBegin));
metaReads.push_back(store(bc->logEndVersion().get(), metaLogEnd));
metaReads.push_back(store(metaLogBegin, bc->logBeginVersion().get()));
metaReads.push_back(store(metaLogEnd, bc->logEndVersion().get()));
}
wait(waitForAll(metaReads));
@ -682,7 +682,7 @@ public:
}
state std::vector<LogFile> logs;
wait(store(bc->listLogFiles(scanBegin, scanEnd), logs) && store(bc->listKeyspaceSnapshots(), desc.snapshots));
wait(store(logs, bc->listLogFiles(scanBegin, scanEnd)) && store(desc.snapshots, bc->listKeyspaceSnapshots()));
// List logs in version order so log continuity can be analyzed
std::sort(logs.begin(), logs.end());
@ -842,7 +842,7 @@ public:
progress->step = "Listing files";
}
// Get log files or range files that contain any data at or before expireEndVersion
wait(store(bc->listLogFiles(scanBegin, expireEndVersion - 1), logs) && store(bc->listRangeFiles(scanBegin, expireEndVersion - 1), ranges));
wait(store(logs, bc->listLogFiles(scanBegin, expireEndVersion - 1)) && store(ranges, bc->listRangeFiles(scanBegin, expireEndVersion - 1)));
// The new logBeginVersion will be taken from the last log file, if there is one
state Optional<Version> newLogBeginVersion;
@ -1575,7 +1575,7 @@ ACTOR Future<Version> timeKeeperVersionFromDatetime(std::string datetime, Databa
if (results.size() != 1) {
// No key less than time was found in the database
// Look for a key >= time.
wait( store( versionMap.getRange(tr, time, std::numeric_limits<int64_t>::max(), 1), results) );
wait( store( results, versionMap.getRange(tr, time, std::numeric_limits<int64_t>::max(), 1) ) );
if(results.size() != 1) {
fprintf(stderr, "ERROR: Unable to calculate a version for given date/time.\n");
@ -1615,7 +1615,7 @@ ACTOR Future<Optional<int64_t>> timeKeeperEpochsFromVersion(Version v, Reference
if(mid == min) {
// There aren't any records having a version < v, so just look for any record having a time < now
// and base a result on it
wait(store(versionMap.getRange(tr, 0, (int64_t)now(), 1), results));
wait(store(results, versionMap.getRange(tr, 0, (int64_t)now(), 1)));
if (results.size() != 1) {
// There aren't any timekeeper records to base a result on so return nothing

View File

@ -84,14 +84,9 @@ set(FDBCLIENT_SRCS
${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.cpp)
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.h ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.cpp
COMMAND ${MONO_EXECUTABLE} ${VEXILLOGRAPHER_EXE} ${CMAKE_CURRENT_SOURCE_DIR}/vexillographer/fdb.options cpp ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/vexillographer/fdb.options vexillographer
COMMENT "Generate FDBOptions c++ files")
add_custom_target(fdboptions DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.h ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.cpp)
vexillographer_compile(TARGET fdboptions LANG cpp OUT ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.h ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.cpp)
actor_set(FDBCLIENT_BUILD "${FDBCLIENT_SRCS}")
add_library(fdbclient STATIC ${FDBCLIENT_BUILD})
add_flow_target(STATIC_LIBRARY NAME fdbclient SRCS ${FDBCLIENT_SRCS})
add_dependencies(fdbclient fdboptions)
actor_compile(fdbclient "${FDBCLIENT_SRCS}")
target_link_libraries(fdbclient PUBLIC fdbrpc)

View File

@ -24,11 +24,54 @@
#include "fdbclient/FDBTypes.h"
static const char * typeString[] = { "SetValue", "ClearRange", "AddValue", "DebugKeyRange", "DebugKey", "NoOp", "And", "Or", "Xor", "AppendIfFits", "AvailableForReuse", "Reserved_For_LogProtocolMessage", "Max", "Min", "SetVersionstampedKey", "SetVersionstampedValue", "ByteMin", "ByteMax", "MinV2", "AndV2" };
static const char* typeString[] = { "SetValue",
"ClearRange",
"AddValue",
"DebugKeyRange",
"DebugKey",
"NoOp",
"And",
"Or",
"Xor",
"AppendIfFits",
"AvailableForReuse",
"Reserved_For_LogProtocolMessage",
"Max",
"Min",
"SetVersionstampedKey",
"SetVersionstampedValue",
"ByteMin",
"ByteMax",
"MinV2",
"AndV2",
"CompareAndClear" };
struct MutationRef {
static const int OVERHEAD_BYTES = 12; //12 is the size of Header in MutationList entries
enum Type : uint8_t { SetValue=0, ClearRange, AddValue, DebugKeyRange, DebugKey, NoOp, And, Or, Xor, AppendIfFits, AvailableForReuse, Reserved_For_LogProtocolMessage /* See fdbserver/LogProtocolMessage.h */, Max, Min, SetVersionstampedKey, SetVersionstampedValue, ByteMin, ByteMax, MinV2, AndV2, MAX_ATOMIC_OP };
enum Type : uint8_t {
SetValue = 0,
ClearRange,
AddValue,
DebugKeyRange,
DebugKey,
NoOp,
And,
Or,
Xor,
AppendIfFits,
AvailableForReuse,
Reserved_For_LogProtocolMessage /* See fdbserver/LogProtocolMessage.h */,
Max,
Min,
SetVersionstampedKey,
SetVersionstampedValue,
ByteMin,
ByteMax,
MinV2,
AndV2,
CompareAndClear,
MAX_ATOMIC_OP
};
// This is stored this way for serialization purposes.
uint8_t type;
StringRef param1, param2;
@ -54,10 +97,14 @@ struct MutationRef {
}
// These masks define which mutation types have particular properties (they are used to implement isSingleKeyMutation() etc)
enum {
ATOMIC_MASK = (1 << AddValue) | (1 << And) | (1 << Or) | (1 << Xor) | (1 << AppendIfFits) | (1 << Max) | (1 << Min) | (1 << SetVersionstampedKey) | (1 << SetVersionstampedValue) | (1 << ByteMin) | (1 << ByteMax) | (1 << MinV2) | (1 << AndV2),
SINGLE_KEY_MASK = ATOMIC_MASK | (1<<SetValue),
NON_ASSOCIATIVE_MASK = (1 << AddValue) | (1 << Or) | (1 << Xor) | (1 << Max) | (1 << Min) | (1 << SetVersionstampedKey) | (1 << SetVersionstampedValue) | (1 << MinV2)
enum {
ATOMIC_MASK = (1 << AddValue) | (1 << And) | (1 << Or) | (1 << Xor) | (1 << AppendIfFits) | (1 << Max) |
(1 << Min) | (1 << SetVersionstampedKey) | (1 << SetVersionstampedValue) | (1 << ByteMin) |
(1 << ByteMax) | (1 << MinV2) | (1 << AndV2) | (1 << CompareAndClear),
SINGLE_KEY_MASK = ATOMIC_MASK | (1 << SetValue),
NON_ASSOCIATIVE_MASK = (1 << AddValue) | (1 << Or) | (1 << Xor) | (1 << Max) | (1 << Min) |
(1 << SetVersionstampedKey) | (1 << SetVersionstampedValue) | (1 << MinV2) |
(1 << CompareAndClear)
};
};

View File

@ -712,7 +712,7 @@ namespace dbBackup {
if (endVersion <= beginVersion) {
wait(delay(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY));
Key _ = wait(CopyLogsTaskFunc::addTask(tr, taskBucket, task, prevBeginVersion, beginVersion, TaskCompletionKey::signal(onDone)));
wait(success(CopyLogsTaskFunc::addTask(tr, taskBucket, task, prevBeginVersion, beginVersion, TaskCompletionKey::signal(onDone))));
wait(taskBucket->finish(tr, task));
return Void();
}
@ -755,7 +755,7 @@ namespace dbBackup {
} else {
if(appliedVersion <= stopVersionData) {
wait(delay(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY));
Key _ = wait(CopyLogsTaskFunc::addTask(tr, taskBucket, task, prevBeginVersion, beginVersion, TaskCompletionKey::signal(onDone)));
wait(success(CopyLogsTaskFunc::addTask(tr, taskBucket, task, prevBeginVersion, beginVersion, TaskCompletionKey::signal(onDone))));
wait(taskBucket->finish(tr, task));
return Void();
}
@ -880,7 +880,7 @@ namespace dbBackup {
if (task->params.find(FinishedFullBackupTaskFunc::keyInsertTask) != task->params.end()) {
state Reference<TaskFuture> onDone = futureBucket->unpack(task->params[Task::reservedTaskParamKeyDone]);
Key _ = wait(FinishedFullBackupTaskFunc::addTask(tr, taskBucket, task, TaskCompletionKey::signal(onDone)));
wait(success(FinishedFullBackupTaskFunc::addTask(tr, taskBucket, task, TaskCompletionKey::signal(onDone))));
wait(taskBucket->finish(tr, task));
return Void();
}
@ -926,7 +926,7 @@ namespace dbBackup {
if (endVersion <= beginVersion) {
wait(delay(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY));
Key _ = wait(CopyDiffLogsTaskFunc::addTask(tr, taskBucket, task, prevBeginVersion, beginVersion, TaskCompletionKey::signal(onDone)));
wait(success(CopyDiffLogsTaskFunc::addTask(tr, taskBucket, task, prevBeginVersion, beginVersion, TaskCompletionKey::signal(onDone))));
wait(taskBucket->finish(tr, task));
return Void();
}
@ -1336,13 +1336,13 @@ namespace dbBackup {
if (task->params[BackupAgentBase::destUid].size() == 0) {
TraceEvent("DBA_CopyDiffLogsUpgradeTaskFuncAbortInUpgrade");
Key _ = wait(AbortOldBackupTaskFunc::addTask(tr, taskBucket, task, TaskCompletionKey::signal(onDone)));
wait(success(AbortOldBackupTaskFunc::addTask(tr, taskBucket, task, TaskCompletionKey::signal(onDone))));
} else {
Version beginVersion = BinaryReader::fromStringRef<Version>(task->params[DatabaseBackupAgent::keyBeginVersion], Unversioned());
Subspace config = Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyConfig).get(task->params[DatabaseBackupAgent::keyConfigLogUid]);
tr->set(config.pack(BackupAgentBase::destUid), task->params[BackupAgentBase::destUid]);
tr->set(config.pack(BackupAgentBase::keyDrVersion), BinaryWriter::toValue(DatabaseBackupAgent::LATEST_DR_VERSION, Unversioned()));
Key _ = wait(CopyDiffLogsTaskFunc::addTask(tr, taskBucket, task, 0, beginVersion, TaskCompletionKey::signal(onDone)));
wait(success(CopyDiffLogsTaskFunc::addTask(tr, taskBucket, task, 0, beginVersion, TaskCompletionKey::signal(onDone))));
}
wait(taskBucket->finish(tr, task));
@ -1409,7 +1409,7 @@ namespace dbBackup {
// Start the complete task, if differential is not enabled
if (stopWhenDone.present()) {
// After the Backup completes, clear the backup subspace and update the status
Key _ = wait(FinishedFullBackupTaskFunc::addTask(tr, taskBucket, task, TaskCompletionKey::noSignal()));
wait(success(FinishedFullBackupTaskFunc::addTask(tr, taskBucket, task, TaskCompletionKey::noSignal())));
}
else { // Start the writing of logs, if differential
tr->set(states.pack(DatabaseBackupAgent::keyStateStatus), StringRef(BackupAgentBase::getStateText(BackupAgentBase::STATE_DIFFERENTIAL)));
@ -1417,10 +1417,10 @@ namespace dbBackup {
allPartsDone = futureBucket->future(tr);
Version prevBeginVersion = BinaryReader::fromStringRef<Version>(task->params[DatabaseBackupAgent::keyPrevBeginVersion], Unversioned());
Key _ = wait(CopyDiffLogsTaskFunc::addTask(tr, taskBucket, task, prevBeginVersion, restoreVersion, TaskCompletionKey::joinWith(allPartsDone)));
wait(success(CopyDiffLogsTaskFunc::addTask(tr, taskBucket, task, prevBeginVersion, restoreVersion, TaskCompletionKey::joinWith(allPartsDone))));
// After the Backup completes, clear the backup subspace and update the status
Key _ = wait(FinishedFullBackupTaskFunc::addTask(tr, taskBucket, task, TaskCompletionKey::noSignal(), allPartsDone));
wait(success(FinishedFullBackupTaskFunc::addTask(tr, taskBucket, task, TaskCompletionKey::noSignal(), allPartsDone)));
}
wait(taskBucket->finish(tr, task));
@ -1574,7 +1574,7 @@ namespace dbBackup {
if(task->params[DatabaseBackupAgent::keyDatabasesInSync] != std::string("t")) {
for (; rangeCount < backupRanges.size(); ++rangeCount) {
Key _ = wait(BackupRangeTaskFunc::addTask(tr, taskBucket, task, backupRanges[rangeCount].begin, backupRanges[rangeCount].end, TaskCompletionKey::joinWith(kvBackupRangeComplete)));
wait(success(BackupRangeTaskFunc::addTask(tr, taskBucket, task, backupRanges[rangeCount].begin, backupRanges[rangeCount].end, TaskCompletionKey::joinWith(kvBackupRangeComplete))));
}
}
else {
@ -1582,13 +1582,13 @@ namespace dbBackup {
}
// After the BackupRangeTask completes, set the stop key which will stop the BackupLogsTask
Key _ = wait(FinishFullBackupTaskFunc::addTask(tr, taskBucket, task, TaskCompletionKey::noSignal(), kvBackupRangeComplete));
wait(success(FinishFullBackupTaskFunc::addTask(tr, taskBucket, task, TaskCompletionKey::noSignal(), kvBackupRangeComplete)));
// Backup the logs which will create BackupLogRange tasks
Key _ = wait(CopyLogsTaskFunc::addTask(tr, taskBucket, task, 0, beginVersion, TaskCompletionKey::joinWith(kvBackupComplete)));
wait(success(CopyLogsTaskFunc::addTask(tr, taskBucket, task, 0, beginVersion, TaskCompletionKey::joinWith(kvBackupComplete))));
// After the Backup completes, clear the backup subspace and update the status
Key _ = wait(BackupRestorableTaskFunc::addTask(tr, taskBucket, task, TaskCompletionKey::noSignal(), kvBackupComplete));
wait(success(BackupRestorableTaskFunc::addTask(tr, taskBucket, task, TaskCompletionKey::noSignal(), kvBackupComplete)));
wait(taskBucket->finish(tr, task));
return Void();
@ -1906,7 +1906,7 @@ public:
throw;
}
int _ = wait( backupAgent->waitBackup(dest, tagName, true) );
wait(success( backupAgent->waitBackup(dest, tagName, true) ));
TraceEvent("DBA_SwitchoverStopped");
@ -1940,7 +1940,7 @@ public:
TraceEvent("DBA_SwitchoverSubmitted");
int _ = wait( drAgent.waitSubmitted(backupAgent->taskBucket->src, tagName) );
wait(success( drAgent.waitSubmitted(backupAgent->taskBucket->src, tagName) ));
TraceEvent("DBA_SwitchoverStarted");
@ -2150,7 +2150,7 @@ public:
loop{
try {
Version _ = wait(tr->getReadVersion()); //get the read version before getting a version from the source database to prevent the time differential from going negative
wait(success(tr->getReadVersion())); //get the read version before getting a version from the source database to prevent the time differential from going negative
state Transaction scrTr(backupAgent->taskBucket->src);
scrTr.setOption(FDBTransactionOptions::LOCK_AWARE);

View File

@ -63,7 +63,7 @@ void parse( std::vector<RegionInfo>* regions, ValueRef const& v ) {
RegionInfo info;
json_spirit::mArray datacenters;
dc.get("datacenters", datacenters);
bool nonSatelliteDatacenters = 0;
int nonSatelliteDatacenters = 0;
for (StatusObjectReader s : datacenters) {
std::string idStr;
if (s.has("satellite") && s.last().get_int() == 1) {

View File

@ -75,8 +75,12 @@ public:
Error deferredError;
bool lockAware;
bool isError() {
return deferredError.code() != invalid_error_code;
}
void checkDeferredError() {
if( deferredError.code() != invalid_error_code ) {
if(isError()) {
throw deferredError;
}
}
@ -91,6 +95,8 @@ public:
Future<Void> clientInfoMonitor, Standalone<StringRef> dbId, int taskID, LocalityData const& clientLocality,
bool enableLocalityLoadBalance, bool lockAware, int apiVersion = Database::API_VERSION_LATEST );
explicit DatabaseContext( const Error &err );
// Key DB-specific information
AsyncTrigger masterProxiesChangeTrigger;
Future<Void> monitorMasterProxiesInfoChange;

View File

@ -1144,8 +1144,8 @@ namespace fileBackup {
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
wait(taskBucket->keepRunning(tr, task)
&& storeOrThrow(backup.snapshotBeginVersion().get(tr), snapshotBeginVersion)
&& store(backup.snapshotRangeFileCount().getD(tr), snapshotRangeFileCount)
&& storeOrThrow(snapshotBeginVersion, backup.snapshotBeginVersion().get(tr))
&& store(snapshotRangeFileCount, backup.snapshotRangeFileCount().getD(tr))
);
break;
@ -1201,7 +1201,7 @@ namespace fileBackup {
if (nextKey != endKey) {
// Add task to cover nextKey to the end, using the priority of the current task
Key _ = wait(addTask(tr, taskBucket, task, task->getPriority(), nextKey, endKey, TaskCompletionKey::joinWith(onDone), Reference<TaskFuture>(), task->getPriority()));
wait(success(addTask(tr, taskBucket, task, task->getPriority(), nextKey, endKey, TaskCompletionKey::joinWith(onDone), Reference<TaskFuture>(), task->getPriority())));
}
return Void();
@ -1324,15 +1324,15 @@ namespace fileBackup {
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
wait( store(config.snapshotBeginVersion().getOrThrow(tr), snapshotBeginVersion)
&& store(config.snapshotTargetEndVersion().getOrThrow(tr), snapshotTargetEndVersion)
&& store(config.backupRanges().getOrThrow(tr), backupRanges)
&& store(config.snapshotIntervalSeconds().getOrThrow(tr), snapshotIntervalSeconds)
wait( store(snapshotBeginVersion, config.snapshotBeginVersion().getOrThrow(tr))
&& store(snapshotTargetEndVersion, config.snapshotTargetEndVersion().getOrThrow(tr))
&& store(backupRanges, config.backupRanges().getOrThrow(tr))
&& store(snapshotIntervalSeconds, config.snapshotIntervalSeconds().getOrThrow(tr))
// The next two parameters are optional
&& store(config.snapshotBatchFuture().get(tr), snapshotBatchFutureKey)
&& store(config.snapshotBatchSize().get(tr), snapshotBatchSize)
&& store(config.latestSnapshotEndVersion().get(tr), latestSnapshotEndVersion)
&& store(tr->getReadVersion(), recentReadVersion)
&& store(snapshotBatchFutureKey, config.snapshotBatchFuture().get(tr))
&& store(snapshotBatchSize, config.snapshotBatchSize().get(tr))
&& store(latestSnapshotEndVersion, config.latestSnapshotEndVersion().get(tr))
&& store(recentReadVersion, tr->getReadVersion())
&& taskBucket->keepRunning(tr, task));
// If the snapshot batch future key does not exist, create it, set it, and commit
@ -1375,7 +1375,7 @@ namespace fileBackup {
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
state Future<std::vector<std::pair<Key, bool>>> bounds = config.snapshotRangeDispatchMap().getRange(tr, beginKey, keyAfter(normalKeys.end), CLIENT_KNOBS->TOO_MANY);
wait(success(bounds) && taskBucket->keepRunning(tr, task) && store(tr->getReadVersion(), recentReadVersion));
wait(success(bounds) && taskBucket->keepRunning(tr, task) && store(recentReadVersion, tr->getReadVersion()));
if(bounds.get().empty())
break;
@ -1579,7 +1579,7 @@ namespace fileBackup {
endReads.push_back( config.snapshotRangeDispatchMap().get(tr, range.end));
}
wait(store(config.snapshotBatchSize().getOrThrow(tr), snapshotBatchSize.get())
wait(store(snapshotBatchSize.get(), config.snapshotBatchSize().getOrThrow(tr))
&& waitForAll(beginReads) && waitForAll(endReads) && taskBucket->keepRunning(tr, task));
// Snapshot batch size should be either oldBatchSize or newBatchSize. If new, this transaction is already done.
@ -1683,8 +1683,8 @@ namespace fileBackup {
state Key snapshotBatchFutureKey;
state Key snapshotBatchDispatchDoneKey;
wait( store(config.snapshotBatchFuture().getOrThrow(tr), snapshotBatchFutureKey)
&& store(config.snapshotBatchDispatchDoneKey().getOrThrow(tr), snapshotBatchDispatchDoneKey));
wait( store(snapshotBatchFutureKey, config.snapshotBatchFuture().getOrThrow(tr))
&& store(snapshotBatchDispatchDoneKey, config.snapshotBatchDispatchDoneKey().getOrThrow(tr)));
state Reference<TaskFuture> snapshotBatchFuture = futureBucket->unpack(snapshotBatchFutureKey);
state Reference<TaskFuture> snapshotBatchDispatchDoneFuture = futureBucket->unpack(snapshotBatchDispatchDoneKey);
@ -2010,11 +2010,11 @@ namespace fileBackup {
state Optional<std::string> tag;
state Optional<Version> latestSnapshotEndVersion;
wait(store(config.stopWhenDone().getOrThrow(tr), stopWhenDone)
&& store(config.getLatestRestorableVersion(tr), restorableVersion)
&& store(config.stateEnum().getOrThrow(tr), backupState)
&& store(config.tag().get(tr), tag)
&& store(config.latestSnapshotEndVersion().get(tr), latestSnapshotEndVersion));
wait(store(stopWhenDone, config.stopWhenDone().getOrThrow(tr))
&& store(restorableVersion, config.getLatestRestorableVersion(tr))
&& store(backupState, config.stateEnum().getOrThrow(tr))
&& store(tag, config.tag().get(tr))
&& store(latestSnapshotEndVersion, config.latestSnapshotEndVersion().get(tr)));
// If restorable, update the last restorable version for this tag
if(restorableVersion.present() && tag.present()) {
@ -2051,13 +2051,13 @@ namespace fileBackup {
state int priority = latestSnapshotEndVersion.present() ? 1 : 0;
// Add the initial log range task to read/copy the mutations and the next logs dispatch task which will run after this batch is done
Key _ = wait(BackupLogRangeTaskFunc::addTask(tr, taskBucket, task, priority, beginVersion, endVersion, TaskCompletionKey::joinWith(logDispatchBatchFuture)));
Key _ = wait(BackupLogsDispatchTask::addTask(tr, taskBucket, task, priority, beginVersion, endVersion, TaskCompletionKey::signal(onDone), logDispatchBatchFuture));
wait(success(BackupLogRangeTaskFunc::addTask(tr, taskBucket, task, priority, beginVersion, endVersion, TaskCompletionKey::joinWith(logDispatchBatchFuture))));
wait(success(BackupLogsDispatchTask::addTask(tr, taskBucket, task, priority, beginVersion, endVersion, TaskCompletionKey::signal(onDone), logDispatchBatchFuture)));
// Do not erase at the first time
if (prevBeginVersion > 0) {
state Key destUidValue = wait(config.destUidValue().getOrThrow(tr));
Key _ = wait(EraseLogRangeTaskFunc::addTask(tr, taskBucket, config.getUid(), TaskCompletionKey::joinWith(logDispatchBatchFuture), destUidValue, beginVersion));
wait(success(EraseLogRangeTaskFunc::addTask(tr, taskBucket, config.getUid(), TaskCompletionKey::joinWith(logDispatchBatchFuture), destUidValue, beginVersion)));
}
wait(taskBucket->finish(tr, task));
@ -2108,7 +2108,7 @@ namespace fileBackup {
tr->setOption(FDBTransactionOptions::COMMIT_ON_FIRST_PROXY);
state Key destUidValue = wait(backup.destUidValue().getOrThrow(tr));
Key _ = wait(EraseLogRangeTaskFunc::addTask(tr, taskBucket, backup.getUid(), TaskCompletionKey::noSignal(), destUidValue));
wait(success(EraseLogRangeTaskFunc::addTask(tr, taskBucket, backup.getUid(), TaskCompletionKey::noSignal(), destUidValue)));
backup.stateEnum().set(tr, EBackupState::STATE_COMPLETED);
@ -2161,7 +2161,7 @@ namespace fileBackup {
if(!bc) {
// Backup container must be present if we're still here
wait(store(config.backupContainer().getOrThrow(tr), bc));
wait(store(bc, config.backupContainer().getOrThrow(tr)));
}
BackupConfig::RangeFileMapT::PairsType rangeresults = wait(config.snapshotRangeFileMap().getRange(tr, startKey, {}, batchSize));
@ -2242,11 +2242,11 @@ namespace fileBackup {
state Optional<Version> firstSnapshotEndVersion;
state Optional<std::string> tag;
wait(store(config.stopWhenDone().getOrThrow(tr), stopWhenDone)
&& store(config.stateEnum().getOrThrow(tr), backupState)
&& store(config.getLatestRestorableVersion(tr), restorableVersion)
&& store(config.firstSnapshotEndVersion().get(tr), firstSnapshotEndVersion)
&& store(config.tag().get(tr), tag));
wait(store(stopWhenDone, config.stopWhenDone().getOrThrow(tr))
&& store(backupState, config.stateEnum().getOrThrow(tr))
&& store(restorableVersion, config.getLatestRestorableVersion(tr))
&& store(firstSnapshotEndVersion, config.firstSnapshotEndVersion().get(tr))
&& store(tag, config.tag().get(tr)));
// If restorable, update the last restorable version for this tag
if(restorableVersion.present() && tag.present()) {
@ -2348,12 +2348,12 @@ namespace fileBackup {
wait(config.initNewSnapshot(tr, 0));
// Using priority 1 for both of these to at least start both tasks soon
Key _ = wait(BackupSnapshotDispatchTask::addTask(tr, taskBucket, task, 1, TaskCompletionKey::joinWith(backupFinished)));
Key _ = wait(BackupLogsDispatchTask::addTask(tr, taskBucket, task, 1, 0, beginVersion, TaskCompletionKey::joinWith(backupFinished)));
wait(success(BackupSnapshotDispatchTask::addTask(tr, taskBucket, task, 1, TaskCompletionKey::joinWith(backupFinished))));
wait(success(BackupLogsDispatchTask::addTask(tr, taskBucket, task, 1, 0, beginVersion, TaskCompletionKey::joinWith(backupFinished))));
// If a clean stop is requested, the log and snapshot tasks will quit after the backup is restorable, then the following
// task will clean up and set the completed state.
Key _ = wait(FileBackupFinishedTask::addTask(tr, taskBucket, task, TaskCompletionKey::noSignal(), backupFinished));
wait(success(FileBackupFinishedTask::addTask(tr, taskBucket, task, TaskCompletionKey::noSignal(), backupFinished)));
wait(taskBucket->finish(tr, task));
return Void();
@ -2828,7 +2828,7 @@ namespace fileBackup {
state bool addingToExistingBatch = remainingInBatch > 0;
state Version restoreVersion;
wait(store(restore.restoreVersion().getOrThrow(tr), restoreVersion)
wait(store(restoreVersion, restore.restoreVersion().getOrThrow(tr))
&& checkTaskVersion(tr->getDatabase(), task, name, version));
// If not adding to an existing batch then update the apply mutations end version so the mutations from the
@ -2845,7 +2845,7 @@ namespace fileBackup {
if(!addingToExistingBatch && applyLag > (BUGGIFY ? 1 : CLIENT_KNOBS->CORE_VERSIONSPERSECOND * 300)) {
// Wait a small amount of time and then re-add this same task.
wait(delay(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY));
Key _ = wait(RestoreDispatchTaskFunc::addTask(tr, taskBucket, task, beginVersion, "", 0, batchSize, remainingInBatch));
wait(success(RestoreDispatchTaskFunc::addTask(tr, taskBucket, task, beginVersion, "", 0, batchSize, remainingInBatch)));
TraceEvent("FileRestoreDispatch")
.detail("RestoreUID", restore.getUid())
@ -2885,7 +2885,7 @@ namespace fileBackup {
// If adding to existing batch then blocks could be in progress so create a new Dispatch task that waits for them to finish
if(addingToExistingBatch) {
// Setting next begin to restoreVersion + 1 so that any files in the file map at the restore version won't be dispatched again.
Key _ = wait(RestoreDispatchTaskFunc::addTask(tr, taskBucket, task, restoreVersion + 1, "", 0, batchSize, 0, TaskCompletionKey::noSignal(), allPartsDone));
wait(success(RestoreDispatchTaskFunc::addTask(tr, taskBucket, task, restoreVersion + 1, "", 0, batchSize, 0, TaskCompletionKey::noSignal(), allPartsDone)));
TraceEvent("FileRestoreDispatch")
.detail("RestoreUID", restore.getUid())
@ -2899,7 +2899,7 @@ namespace fileBackup {
}
else if(beginVersion < restoreVersion) {
// If beginVersion is less than restoreVersion then do one more dispatch task to get there
Key _ = wait(RestoreDispatchTaskFunc::addTask(tr, taskBucket, task, restoreVersion, "", 0, batchSize));
wait(success(RestoreDispatchTaskFunc::addTask(tr, taskBucket, task, restoreVersion, "", 0, batchSize)));
TraceEvent("FileRestoreDispatch")
.detail("RestoreUID", restore.getUid())
@ -2913,7 +2913,7 @@ namespace fileBackup {
}
else if(applyLag == 0) {
// If apply lag is 0 then we are done so create the completion task
Key _ = wait(RestoreCompleteTaskFunc::addTask(tr, taskBucket, task, TaskCompletionKey::noSignal()));
wait(success(RestoreCompleteTaskFunc::addTask(tr, taskBucket, task, TaskCompletionKey::noSignal())));
TraceEvent("FileRestoreDispatch")
.detail("RestoreUID", restore.getUid())
@ -2926,7 +2926,7 @@ namespace fileBackup {
} else {
// Applying of mutations is not yet finished so wait a small amount of time and then re-add this same task.
wait(delay(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY));
Key _ = wait(RestoreDispatchTaskFunc::addTask(tr, taskBucket, task, beginVersion, "", 0, batchSize));
wait(success(RestoreDispatchTaskFunc::addTask(tr, taskBucket, task, beginVersion, "", 0, batchSize)));
TraceEvent("FileRestoreDispatch")
.detail("RestoreUID", restore.getUid())
@ -3320,7 +3320,7 @@ namespace fileBackup {
if(firstVersion == invalidVersion) {
wait(restore.logError(tr->getDatabase(), restore_missing_data(), "StartFullRestore: The backup had no data.", this));
std::string tag = wait(restore.tag().getD(tr));
ERestoreState _ = wait(abortRestore(tr, StringRef(tag)));
wait(success(abortRestore(tr, StringRef(tag))));
return Void();
}
@ -3331,7 +3331,7 @@ namespace fileBackup {
restore.setApplyEndVersion(tr, firstVersion);
// Apply range data and log data in order
Key _ = wait(RestoreDispatchTaskFunc::addTask(tr, taskBucket, task, 0, "", 0, CLIENT_KNOBS->RESTORE_DISPATCH_BATCH_SIZE));
wait(success(RestoreDispatchTaskFunc::addTask(tr, taskBucket, task, 0, "", 0, CLIENT_KNOBS->RESTORE_DISPATCH_BATCH_SIZE)));
wait(taskBucket->finish(tr, task));
return Void();
@ -3672,7 +3672,7 @@ public:
state Key destUidValue = wait(config.destUidValue().getOrThrow(tr));
state Version endVersion = wait(tr->getReadVersion());
Key _ = wait(fileBackup::EraseLogRangeTaskFunc::addTask(tr, backupAgent->taskBucket, config.getUid(), TaskCompletionKey::noSignal(), destUidValue));
wait(success(fileBackup::EraseLogRangeTaskFunc::addTask(tr, backupAgent->taskBucket, config.getUid(), TaskCompletionKey::noSignal(), destUidValue)));
config.stateEnum().set(tr, EBackupState::STATE_COMPLETED);
@ -3712,7 +3712,7 @@ public:
// Cancel backup task through tag
wait(tag.cancel(tr));
Key _ = wait(fileBackup::EraseLogRangeTaskFunc::addTask(tr, backupAgent->taskBucket, config.getUid(), TaskCompletionKey::noSignal(), destUidValue));
wait(success(fileBackup::EraseLogRangeTaskFunc::addTask(tr, backupAgent->taskBucket, config.getUid(), TaskCompletionKey::noSignal(), destUidValue)));
config.stateEnum().set(tr, EBackupState::STATE_ABORTED);
@ -3750,9 +3750,9 @@ public:
state Optional<Version> latestRestorableVersion;
state Version recentReadVersion;
wait( store(config.getLatestRestorableVersion(tr), latestRestorableVersion)
&& store(config.backupContainer().getOrThrow(tr), bc)
&& store(tr->getReadVersion(), recentReadVersion)
wait( store(latestRestorableVersion, config.getLatestRestorableVersion(tr))
&& store(bc, config.backupContainer().getOrThrow(tr))
&& store(recentReadVersion, tr->getReadVersion())
);
bool snapshotProgress = false;
@ -3791,20 +3791,20 @@ public:
state Optional<int64_t> snapshotTargetEndVersionTimestamp;
state bool stopWhenDone;
wait( store(config.snapshotBeginVersion().getOrThrow(tr), snapshotBeginVersion)
&& store(config.snapshotTargetEndVersion().getOrThrow(tr), snapshotTargetEndVersion)
&& store(config.snapshotIntervalSeconds().getOrThrow(tr), snapshotInterval)
&& store(config.logBytesWritten().get(tr), logBytesWritten)
&& store(config.rangeBytesWritten().get(tr), rangeBytesWritten)
&& store(config.latestLogEndVersion().get(tr), latestLogEndVersion)
&& store(config.latestSnapshotEndVersion().get(tr), latestSnapshotEndVersion)
&& store(config.stopWhenDone().getOrThrow(tr), stopWhenDone)
wait( store(snapshotBeginVersion, config.snapshotBeginVersion().getOrThrow(tr))
&& store(snapshotTargetEndVersion, config.snapshotTargetEndVersion().getOrThrow(tr))
&& store(snapshotInterval, config.snapshotIntervalSeconds().getOrThrow(tr))
&& store(logBytesWritten, config.logBytesWritten().get(tr))
&& store(rangeBytesWritten, config.rangeBytesWritten().get(tr))
&& store(latestLogEndVersion, config.latestLogEndVersion().get(tr))
&& store(latestSnapshotEndVersion, config.latestSnapshotEndVersion().get(tr))
&& store(stopWhenDone, config.stopWhenDone().getOrThrow(tr))
);
wait( store(getTimestampFromVersion(latestSnapshotEndVersion, tr), latestSnapshotEndVersionTimestamp)
&& store(getTimestampFromVersion(latestLogEndVersion, tr), latestLogEndVersionTimestamp)
&& store(timeKeeperEpochsFromVersion(snapshotBeginVersion, tr), snapshotBeginVersionTimestamp)
&& store(timeKeeperEpochsFromVersion(snapshotTargetEndVersion, tr), snapshotTargetEndVersionTimestamp)
wait( store(latestSnapshotEndVersionTimestamp, getTimestampFromVersion(latestSnapshotEndVersion, tr))
&& store(latestLogEndVersionTimestamp, getTimestampFromVersion(latestLogEndVersion, tr))
&& store(snapshotBeginVersionTimestamp, timeKeeperEpochsFromVersion(snapshotBeginVersion, tr))
&& store(snapshotTargetEndVersionTimestamp, timeKeeperEpochsFromVersion(snapshotTargetEndVersion, tr))
);
statusText += format("Snapshot interval is %lld seconds. ", snapshotInterval);
@ -4010,7 +4010,7 @@ public:
}
}
int _ = wait( waitBackup(backupAgent, cx, tagName.toString(), true) );
wait(success( waitBackup(backupAgent, cx, tagName.toString(), true) ));
TraceEvent("AS_BackupStopped");
ryw_tr->reset();

View File

@ -124,7 +124,7 @@ namespace HTTP {
// Next search will start at the current end of the buffer - delim size + 1
if(sPos >= lookBack)
sPos -= lookBack;
int _ = wait(read_into_string(conn, buf, CLIENT_KNOBS->HTTP_READ_SIZE));
wait(success(read_into_string(conn, buf, CLIENT_KNOBS->HTTP_READ_SIZE)));
}
}
@ -132,7 +132,7 @@ namespace HTTP {
ACTOR Future<Void> read_fixed_into_string(Reference<IConnection> conn, int len, std::string *buf, size_t pos) {
state int stop_size = pos + len;
while(buf->size() < stop_size)
int _ = wait(read_into_string(conn, buf, CLIENT_KNOBS->HTTP_READ_SIZE));
wait(success(read_into_string(conn, buf, CLIENT_KNOBS->HTTP_READ_SIZE)));
return Void();
}

View File

@ -94,7 +94,7 @@ public:
virtual void runNetwork() = 0;
virtual void stopNetwork() = 0;
virtual ThreadFuture<Reference<IDatabase>> createDatabase(const char *clusterFilePath) = 0;
virtual Reference<IDatabase> createDatabase(const char *clusterFilePath) = 0;
virtual void addNetworkThreadCompletionHook(void (*hook)(void*), void *hookParameter) = 0;
};

View File

@ -287,6 +287,7 @@ ACTOR Future<ConfigurationResult::Type> changeConfig( Database cx, std::map<std:
}
state Future<Void> tooLong = delay(4.5);
state std::string versionKey = g_random->randomUniqueID().toString();
loop {
try {
tr.setOption( FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE );
@ -432,6 +433,9 @@ ACTOR Future<ConfigurationResult::Type> changeConfig( Database cx, std::map<std:
for(auto i=m.begin(); i!=m.end(); ++i)
tr.set( StringRef(i->first), StringRef(i->second) );
tr.addReadConflictRange( singleKeyRange(configVersionKey) );
tr.set( configVersionKey, versionKey );
wait( tr.commit() );
break;
} catch (Error& e) {
@ -698,6 +702,7 @@ ConfigureAutoResult parseConfig( StatusObject const& status ) {
ACTOR Future<ConfigurationResult::Type> autoConfig( Database cx, ConfigureAutoResult conf ) {
state Transaction tr(cx);
state std::string versionKey = g_random->randomUniqueID().toString();
if(!conf.address_class.size())
return ConfigurationResult::INCOMPLETE_CONFIGURATION; //FIXME: correct return type
@ -747,6 +752,9 @@ ACTOR Future<ConfigurationResult::Type> autoConfig( Database cx, ConfigureAutoRe
tr.set(kv.first, kv.second);
}
tr.addReadConflictRange( singleKeyRange(configVersionKey) );
tr.set( configVersionKey, versionKey );
wait( tr.commit() );
return ConfigurationResult::SUCCESS;
} catch( Error &e ) {
@ -1125,6 +1133,7 @@ Reference<IQuorumChange> autoQuorumChange( int desired ) { return Reference<IQuo
ACTOR Future<Void> excludeServers( Database cx, vector<AddressExclusion> servers ) {
state Transaction tr(cx);
state std::string versionKey = g_random->randomUniqueID().toString();
state std::string excludeVersionKey = g_random->randomUniqueID().toString();
loop {
try {
tr.setOption( FDBTransactionOptions::ACCESS_SYSTEM_KEYS );
@ -1132,7 +1141,9 @@ ACTOR Future<Void> excludeServers( Database cx, vector<AddressExclusion> servers
tr.setOption( FDBTransactionOptions::LOCK_AWARE );
tr.addReadConflictRange( singleKeyRange(excludedServersVersionKey) ); //To conflict with parallel includeServers
tr.set( excludedServersVersionKey, versionKey );
tr.addReadConflictRange( singleKeyRange(configVersionKey) );
tr.set( configVersionKey, versionKey );
tr.set( excludedServersVersionKey, excludeVersionKey );
for(auto& s : servers)
tr.set( encodeExcludedServersKey(s), StringRef() );
@ -1150,6 +1161,7 @@ ACTOR Future<Void> includeServers( Database cx, vector<AddressExclusion> servers
state bool includeAll = false;
state Transaction tr(cx);
state std::string versionKey = g_random->randomUniqueID().toString();
state std::string excludeVersionKey = g_random->randomUniqueID().toString();
loop {
try {
tr.setOption( FDBTransactionOptions::ACCESS_SYSTEM_KEYS );
@ -1159,15 +1171,27 @@ ACTOR Future<Void> includeServers( Database cx, vector<AddressExclusion> servers
// includeServers might be used in an emergency transaction, so make sure it is retry-self-conflicting and CAUSAL_WRITE_RISKY
tr.setOption( FDBTransactionOptions::CAUSAL_WRITE_RISKY );
tr.addReadConflictRange( singleKeyRange(excludedServersVersionKey) );
tr.addReadConflictRange( singleKeyRange(configVersionKey) );
tr.set( configVersionKey, versionKey );
tr.set( excludedServersVersionKey, excludeVersionKey );
tr.set( excludedServersVersionKey, versionKey );
for(auto& s : servers ) {
if (!s.isValid()) {
tr.clear( excludedServersKeys );
includeAll = true;
} else if (s.isWholeMachine()) {
// Eliminate both any ip-level exclusion (1.2.3.4) and any port-level exclusions (1.2.3.4:5)
tr.clear( KeyRangeRef( encodeExcludedServersKey(s), encodeExcludedServersKey(s) + char(':'+1) ) );
// Eliminate both any ip-level exclusion (1.2.3.4) and any
// port-level exclusions (1.2.3.4:5)
// The range ['IP', 'IP;'] was originally deleted. ';' is
// char(':' + 1). This does not work, as other for all
// x between 0 and 9, 'IPx' will also be in this range.
//
// This is why we now make two clears: first only of the ip
// address, the second will delete all ports.
auto addr = encodeExcludedServersKey(s);
tr.clear(singleKeyRange(addr));
tr.clear(KeyRangeRef(addr + ':', addr + char(':' + 1)));
} else {
tr.clear( encodeExcludedServersKey(s) );
}
@ -1526,6 +1550,12 @@ ACTOR Future<Void> forceRecovery( Reference<ClusterConnectionFile> clusterFile,
when ( wait( clusterInterface->onChange() )) {}
}
}
<<<<<<< HEAD
=======
wait(success(clusterInterface->get().get().forceRecovery.tryGetReply( ForceRecoveryRequest() )));
return Void();
>>>>>>> master
}
ACTOR Future<Void> waitForPrimaryDC( Database cx, StringRef dcId ) {
@ -1566,120 +1596,121 @@ void schemaCoverage( std::string const& spath, bool covered ) {
}
}
bool schemaMatch( StatusObject const schema, StatusObject const result, std::string& errorStr, Severity sev, bool checkCoverage, std::string path, std::string schema_path ) {
bool schemaMatch( json_spirit::mValue const& schemaValue, json_spirit::mValue const& resultValue, std::string& errorStr, Severity sev, bool checkCoverage, std::string path, std::string schemaPath ) {
// Returns true if everything in `result` is permitted by `schema`
// Really this should recurse on "values" rather than "objects"?
bool ok = true;
try {
for(auto& rkv : result) {
auto& key = rkv.first;
auto& rv = rkv.second;
std::string kpath = path + "." + key;
std::string spath = schema_path + "." + key;
if(normJSONType(schemaValue.type()) != normJSONType(resultValue.type())) {
errorStr += format("ERROR: Incorrect value type for key `%s'\n", path.c_str());
TraceEvent(sev, "SchemaMismatch").detail("Path", path).detail("SchemaType", schemaValue.type()).detail("ValueType", resultValue.type());
return false;
}
if(checkCoverage) schemaCoverage(spath);
if(resultValue.type() == json_spirit::obj_type) {
auto& result = resultValue.get_obj();
auto& schema = schemaValue.get_obj();
if (!schema.count(key)) {
errorStr += format("ERROR: Unknown key `%s'\n", kpath.c_str());
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaPath", spath);
ok = false;
continue;
}
auto& sv = schema.at(key);
for(auto& rkv : result) {
auto& key = rkv.first;
auto& rv = rkv.second;
std::string kpath = path + "." + key;
std::string spath = schemaPath + "." + key;
if (sv.type() == json_spirit::obj_type && sv.get_obj().count("$enum")) {
auto& enum_values = sv.get_obj().at("$enum").get_array();
if(checkCoverage) {
schemaCoverage(spath);
}
bool any_match = false;
for(auto& enum_item : enum_values)
if (enum_item == rv) {
any_match = true;
if(checkCoverage) schemaCoverage(spath + ".$enum." + enum_item.get_str());
break;
if(!schema.count(key)) {
errorStr += format("ERROR: Unknown key `%s'\n", kpath.c_str());
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaPath", spath);
ok = false;
continue;
}
auto& sv = schema.at(key);
if(sv.type() == json_spirit::obj_type && sv.get_obj().count("$enum")) {
auto& enum_values = sv.get_obj().at("$enum").get_array();
bool any_match = false;
for(auto& enum_item : enum_values)
if(enum_item == rv) {
any_match = true;
if(checkCoverage) {
schemaCoverage(spath + ".$enum." + enum_item.get_str());
}
break;
}
if(!any_match) {
errorStr += format("ERROR: Unknown value `%s' for key `%s'\n", json_spirit::write_string(rv).c_str(), kpath.c_str());
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaEnumItems", enum_values.size()).detail("Value", json_spirit::write_string(rv));
if(checkCoverage) {
schemaCoverage(spath + ".$enum." + json_spirit::write_string(rv));
}
ok = false;
}
if (!any_match) {
errorStr += format("ERROR: Unknown value `%s' for key `%s'\n", json_spirit::write_string(rv).c_str(), kpath.c_str());
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaEnumItems", enum_values.size()).detail("Value", json_spirit::write_string(rv));
if(checkCoverage) schemaCoverage(spath + ".$enum." + json_spirit::write_string(rv));
ok = false;
}
} else if (sv.type() == json_spirit::obj_type && sv.get_obj().count("$map")) {
if (rv.type() != json_spirit::obj_type) {
errorStr += format("ERROR: Expected an object as the value for key `%s'\n", kpath.c_str());
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaType", sv.type()).detail("ValueType", rv.type());
ok = false;
continue;
}
if(sv.get_obj().at("$map").type() != json_spirit::obj_type) {
continue;
}
auto& schema_obj = sv.get_obj().at("$map").get_obj();
auto& value_obj = rv.get_obj();
if(checkCoverage) schemaCoverage(spath + ".$map");
for(auto& value_pair : value_obj) {
auto vpath = kpath + "[" + value_pair.first + "]";
auto upath = spath + ".$map";
if (value_pair.second.type() != json_spirit::obj_type) {
errorStr += format("ERROR: Expected an object for `%s'\n", vpath.c_str());
TraceEvent(sev, "SchemaMismatch").detail("Path", vpath).detail("ValueType", value_pair.second.type());
} else if(sv.type() == json_spirit::obj_type && sv.get_obj().count("$map")) {
if(rv.type() != json_spirit::obj_type) {
errorStr += format("ERROR: Expected an object as the value for key `%s'\n", kpath.c_str());
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaType", sv.type()).detail("ValueType", rv.type());
ok = false;
continue;
}
if (!schemaMatch(schema_obj, value_pair.second.get_obj(), errorStr, sev, checkCoverage, vpath, upath))
ok = false;
}
} else {
// The schema entry isn't an operator, so it asserts a type and (depending on the type) recursive schema definition
if (normJSONType(sv.type()) != normJSONType(rv.type())) {
errorStr += format("ERROR: Incorrect value type for key `%s'\n", kpath.c_str());
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaType", sv.type()).detail("ValueType", rv.type());
ok = false;
continue;
}
if (rv.type() == json_spirit::array_type) {
auto& value_array = rv.get_array();
auto& schema_array = sv.get_array();
if (!schema_array.size()) {
// An empty schema array means that the value array is required to be empty
if (value_array.size()) {
errorStr += format("ERROR: Expected an empty array for key `%s'\n", kpath.c_str());
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaSize", schema_array.size()).detail("ValueSize", value_array.size());
if(sv.get_obj().at("$map").type() != json_spirit::obj_type) {
continue;
}
auto& schemaVal = sv.get_obj().at("$map");
auto& valueObj = rv.get_obj();
if(checkCoverage) {
schemaCoverage(spath + ".$map");
}
for(auto& valuePair : valueObj) {
auto vpath = kpath + "[" + valuePair.first + "]";
auto upath = spath + ".$map";
if (valuePair.second.type() != json_spirit::obj_type) {
errorStr += format("ERROR: Expected an object for `%s'\n", vpath.c_str());
TraceEvent(sev, "SchemaMismatch").detail("Path", vpath).detail("ValueType", valuePair.second.type());
ok = false;
continue;
}
} else if (schema_array.size() == 1 && schema_array[0].type() == json_spirit::obj_type) {
// A one item schema array means that all items in the value must match the first item in the schema
auto& schema_obj = schema_array[0].get_obj();
int index = 0;
for(auto &value_item : value_array) {
if (value_item.type() != json_spirit::obj_type) {
errorStr += format("ERROR: Expected all array elements to be objects for key `%s'\n", kpath.c_str());
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath + format("[%d]",index)).detail("ValueType", value_item.type());
ok = false;
continue;
}
if (!schemaMatch(schema_obj, value_item.get_obj(), errorStr, sev, checkCoverage, kpath + format("[%d]", index), spath + "[0]"))
ok = false;
index++;
if(!schemaMatch(schemaVal, valuePair.second, errorStr, sev, checkCoverage, vpath, upath)) {
ok = false;
}
} else
ASSERT(false); // Schema doesn't make sense
} else if (rv.type() == json_spirit::obj_type) {
auto& schema_obj = sv.get_obj();
auto& value_obj = rv.get_obj();
if (!schemaMatch(schema_obj, value_obj, errorStr, sev, checkCoverage, kpath, spath))
}
} else {
if(!schemaMatch(sv, rv, errorStr, sev, checkCoverage, kpath, spath)) {
ok = false;
}
}
}
} else if(resultValue.type() == json_spirit::array_type) {
auto& valueArray = resultValue.get_array();
auto& schemaArray = schemaValue.get_array();
if(!schemaArray.size()) {
// An empty schema array means that the value array is required to be empty
if(valueArray.size()) {
errorStr += format("ERROR: Expected an empty array for key `%s'\n", path.c_str());
TraceEvent(sev, "SchemaMismatch").detail("Path", path).detail("SchemaSize", schemaArray.size()).detail("ValueSize", valueArray.size());
return false;
}
} else if(schemaArray.size() == 1) {
// A one item schema array means that all items in the value must match the first item in the schema
int index = 0;
for(auto &valueItem : valueArray) {
if(!schemaMatch(schemaArray[0], valueItem, errorStr, sev, checkCoverage, path + format("[%d]", index), schemaPath + "[0]")) {
ok = false;
}
index++;
}
} else {
ASSERT(false); // Schema doesn't make sense
}
}
return ok;
} catch (std::exception& e) {
TraceEvent(SevError, "SchemaMatchException").detail("What", e.what()).detail("Path", path).detail("SchemaPath", schema_path);
TraceEvent(SevError, "SchemaMatchException").detail("What", e.what()).detail("Path", path).detail("SchemaPath", schemaPath);
throw unknown_error();
}
}

View File

@ -177,6 +177,6 @@ Future<Void> waitForPrimaryDC( Database const& cx, StringRef const& dcId );
Future<std::vector<NetworkAddress>> getCoordinators( Database const& cx );
void schemaCoverage( std::string const& spath, bool covered=true );
bool schemaMatch( StatusObject const schema, StatusObject const result, std::string& errorStr, Severity sev=SevError, bool checkCoverage=false, std::string path = std::string(), std::string schema_path = std::string() );
bool schemaMatch( json_spirit::mValue const& schema, json_spirit::mValue const& result, std::string& errorStr, Severity sev=SevError, bool checkCoverage=false, std::string path = std::string(), std::string schema_path = std::string() );
#endif

View File

@ -1,3 +1,4 @@
/*
* MasterProxyInterface.h
*
@ -26,6 +27,8 @@
#include "fdbclient/StorageServerInterface.h"
#include "fdbclient/CommitTransaction.h"
#include "flow/Stats.h"
struct MasterProxyInterface {
enum { LocationAwareLoadBalance = 1 };
enum { AlwaysFresh = 1 };
@ -74,7 +77,7 @@ struct CommitID {
CommitID( Version version, uint16_t txnBatchId ) : version(version), txnBatchId(txnBatchId) {}
};
struct CommitTransactionRequest {
struct CommitTransactionRequest : TimedRequest {
enum {
FLAG_IS_LOCK_AWARE = 0x1,
FLAG_FIRST_IN_BATCH = 0x2
@ -120,7 +123,7 @@ struct GetReadVersionReply {
}
};
struct GetReadVersionRequest {
struct GetReadVersionRequest : TimedRequest {
enum {
PRIORITY_SYSTEM_IMMEDIATE = 15 << 24, // Highest possible priority, always executed even if writes are otherwise blocked
PRIORITY_DEFAULT = 8 << 24,

View File

@ -212,6 +212,21 @@ void DLTransaction::reset() {
}
// DLDatabase
DLDatabase::DLDatabase(Reference<FdbCApi> api, ThreadFuture<FdbCApi::FDBDatabase*> dbFuture) : api(api), db(nullptr) {
ready = mapThreadFuture<FdbCApi::FDBDatabase*, Void>(dbFuture, [this](ErrorOr<FdbCApi::FDBDatabase*> db){
if(db.isError()) {
return ErrorOr<Void>(db.getError());
}
this->db = db.get();
return ErrorOr<Void>(Void());
});
}
ThreadFuture<Void> DLDatabase::onReady() {
return ready;
}
Reference<ITransaction> DLDatabase::createTransaction() {
FdbCApi::FDBTransaction *tr;
api->databaseCreateTransaction(db, &tr);
@ -251,11 +266,7 @@ void DLApi::init() {
loadClientFunction(&api->setupNetwork, lib, fdbCPath, "fdb_setup_network");
loadClientFunction(&api->runNetwork, lib, fdbCPath, "fdb_run_network");
loadClientFunction(&api->stopNetwork, lib, fdbCPath, "fdb_stop_network");
loadClientFunction(&api->createCluster, lib, fdbCPath, "fdb_create_cluster");
loadClientFunction(&api->clusterCreateDatabase, lib, fdbCPath, "fdb_cluster_create_database");
loadClientFunction(&api->clusterSetOption, lib, fdbCPath, "fdb_cluster_set_option");
loadClientFunction(&api->clusterDestroy, lib, fdbCPath, "fdb_cluster_destroy");
loadClientFunction(&api->createDatabase, lib, fdbCPath, "fdb_create_database", headerVersion >= 610);
loadClientFunction(&api->databaseCreateTransaction, lib, fdbCPath, "fdb_database_create_transaction");
loadClientFunction(&api->databaseSetOption, lib, fdbCPath, "fdb_database_set_option");
@ -282,7 +293,6 @@ void DLApi::init() {
loadClientFunction(&api->transactionCancel, lib, fdbCPath, "fdb_transaction_cancel");
loadClientFunction(&api->transactionAddConflictRange, lib, fdbCPath, "fdb_transaction_add_conflict_range");
loadClientFunction(&api->futureGetCluster, lib, fdbCPath, "fdb_future_get_cluster");
loadClientFunction(&api->futureGetDatabase, lib, fdbCPath, "fdb_future_get_database");
loadClientFunction(&api->futureGetVersion, lib, fdbCPath, "fdb_future_get_version");
loadClientFunction(&api->futureGetError, lib, fdbCPath, "fdb_future_get_error");
@ -293,6 +303,11 @@ void DLApi::init() {
loadClientFunction(&api->futureSetCallback, lib, fdbCPath, "fdb_future_set_callback");
loadClientFunction(&api->futureCancel, lib, fdbCPath, "fdb_future_cancel");
loadClientFunction(&api->futureDestroy, lib, fdbCPath, "fdb_future_destroy");
loadClientFunction(&api->createCluster, lib, fdbCPath, "fdb_create_cluster", headerVersion < 610);
loadClientFunction(&api->clusterCreateDatabase, lib, fdbCPath, "fdb_cluster_create_database", headerVersion < 610);
loadClientFunction(&api->clusterDestroy, lib, fdbCPath, "fdb_cluster_destroy", headerVersion < 610);
loadClientFunction(&api->futureGetCluster, lib, fdbCPath, "fdb_future_get_cluster", headerVersion < 610);
}
void DLApi::selectApiVersion(int apiVersion) {
@ -346,7 +361,7 @@ void DLApi::stopNetwork() {
}
}
ThreadFuture<Reference<IDatabase>> DLApi::createDatabase(const char *clusterFilePath) {
Reference<IDatabase> DLApi::createDatabase609(const char *clusterFilePath) {
FdbCApi::FDBFuture *f = api->createCluster(clusterFilePath);
auto clusterFuture = toThreadFuture<FdbCApi::FDBCluster*>(api, f, [](FdbCApi::FDBFuture *f, FdbCApi *api) {
@ -356,22 +371,35 @@ ThreadFuture<Reference<IDatabase>> DLApi::createDatabase(const char *clusterFile
});
Reference<FdbCApi> innerApi = api;
return flatMapThreadFuture<FdbCApi::FDBCluster*, Reference<IDatabase>>(clusterFuture, [innerApi](ErrorOr<FdbCApi::FDBCluster*> cluster) {
auto dbFuture = flatMapThreadFuture<FdbCApi::FDBCluster*, FdbCApi::FDBDatabase*>(clusterFuture, [innerApi](ErrorOr<FdbCApi::FDBCluster*> cluster) {
if(cluster.isError()) {
return ErrorOr<ThreadFuture<Reference<IDatabase>>>(cluster.getError());
return ErrorOr<ThreadFuture<FdbCApi::FDBDatabase*>>(cluster.getError());
}
auto dbFuture = toThreadFuture<Reference<IDatabase>>(innerApi, innerApi->clusterCreateDatabase(cluster.get(), (uint8_t*)"DB", 2), [](FdbCApi::FDBFuture *f, FdbCApi *api) {
auto innerDbFuture = toThreadFuture<FdbCApi::FDBDatabase*>(innerApi, innerApi->clusterCreateDatabase(cluster.get(), (uint8_t*)"DB", 2), [](FdbCApi::FDBFuture *f, FdbCApi *api) {
FdbCApi::FDBDatabase *db;
api->futureGetDatabase(f, &db);
return Reference<IDatabase>(new DLDatabase(Reference<FdbCApi>::addRef(api), db));
return db;
});
return ErrorOr<ThreadFuture<Reference<IDatabase>>>(mapThreadFuture<Reference<IDatabase>, Reference<IDatabase>>(dbFuture, [cluster, innerApi](ErrorOr<Reference<IDatabase>> db) {
return ErrorOr<ThreadFuture<FdbCApi::FDBDatabase*>>(mapThreadFuture<FdbCApi::FDBDatabase*, FdbCApi::FDBDatabase*>(innerDbFuture, [cluster, innerApi](ErrorOr<FdbCApi::FDBDatabase*> db) {
innerApi->clusterDestroy(cluster.get());
return db;
}));
});
return Reference<DLDatabase>(new DLDatabase(api, dbFuture));
}
Reference<IDatabase> DLApi::createDatabase(const char *clusterFilePath) {
if(headerVersion >= 610) {
FdbCApi::FDBDatabase *db;
api->createDatabase(clusterFilePath, &db);
return Reference<IDatabase>(new DLDatabase(api, db));
}
else {
return DLApi::createDatabase609(clusterFilePath);
}
}
void DLApi::addNetworkThreadCompletionHook(void (*hook)(void*), void *hookParameter) {
@ -634,28 +662,32 @@ void MultiVersionDatabase::Connector::connect() {
connectionFuture.cancel();
}
ThreadFuture<Reference<IDatabase>> dbFuture = client->api->createDatabase(clusterFilePath.c_str());
connectionFuture = flatMapThreadFuture<Reference<IDatabase>, Void>(dbFuture, [this](ErrorOr<Reference<IDatabase>> db) {
if(db.isError()) {
return ErrorOr<ThreadFuture<Void>>(db.getError());
}
else {
candidateDatabase = db.get();
tr = db.get()->createTransaction();
auto versionFuture = mapThreadFuture<Version, Void>(tr->getReadVersion(), [this](ErrorOr<Version> v) {
// If the version attempt returns an error, we regard that as a connection (except operation_cancelled)
if(v.isError() && v.getError().code() == error_code_operation_cancelled) {
return ErrorOr<Void>(v.getError());
}
else {
return ErrorOr<Void>(Void());
}
});
candidateDatabase = client->api->createDatabase(clusterFilePath.c_str());
if(client->external) {
connectionFuture = candidateDatabase.castTo<DLDatabase>()->onReady();
}
else {
connectionFuture = ThreadFuture<Void>(Void());
}
return ErrorOr<ThreadFuture<Void>>(versionFuture);
connectionFuture = flatMapThreadFuture<Void, Void>(connectionFuture, [this](ErrorOr<Void> ready) {
if(ready.isError()) {
return ErrorOr<ThreadFuture<Void>>(ready.getError());
}
tr = candidateDatabase->createTransaction();
return ErrorOr<ThreadFuture<Void>>(mapThreadFuture<Version, Void>(tr->getReadVersion(), [this](ErrorOr<Version> v) {
// If the version attempt returns an error, we regard that as a connection (except operation_cancelled)
if(v.isError() && v.getError().code() == error_code_operation_cancelled) {
return ErrorOr<Void>(v.getError());
}
else {
return ErrorOr<Void>(Void());
}
}));
});
int userParam;
connectionFuture.callOrSetAsCallback(this, userParam, 0);
}
@ -1113,11 +1145,11 @@ void MultiVersionApi::addNetworkThreadCompletionHook(void (*hook)(void*), void *
}
}
ThreadFuture<Reference<IDatabase>> MultiVersionApi::createDatabase(const char *clusterFilePath) {
Reference<IDatabase> MultiVersionApi::createDatabase(const char *clusterFilePath) {
lock.enter();
if(!networkSetup) {
lock.leave();
return network_not_setup();
throw network_not_setup();
}
lock.leave();
@ -1126,21 +1158,15 @@ ThreadFuture<Reference<IDatabase>> MultiVersionApi::createDatabase(const char *c
return Reference<IDatabase>(new MultiVersionDatabase(this, clusterFile, Reference<IDatabase>()));
}
auto databaseFuture = localClient->api->createDatabase(clusterFilePath);
auto db = localClient->api->createDatabase(clusterFilePath);
if(bypassMultiClientApi) {
return databaseFuture;
return db;
}
else {
for(auto it : externalClients) {
TraceEvent("CreatingDatabaseOnExternalClient").detail("LibraryPath", it.second->libPath).detail("Failed", it.second->failed);
}
return mapThreadFuture<Reference<IDatabase>, Reference<IDatabase>>(databaseFuture, [this, clusterFile](ErrorOr<Reference<IDatabase>> database) {
if(database.isError()) {
return database;
}
return ErrorOr<Reference<IDatabase>>(Reference<IDatabase>(new MultiVersionDatabase(this, clusterFile, database.get())));
});
return Reference<IDatabase>(new MultiVersionDatabase(this, clusterFile, db));
}
}

View File

@ -55,12 +55,7 @@ struct FdbCApi : public ThreadSafeReferenceCounted<FdbCApi> {
fdb_error_t (*setupNetwork)();
fdb_error_t (*runNetwork)();
fdb_error_t (*stopNetwork)();
FDBFuture* (*createCluster)(const char *clusterFilePath);
//Cluster
FDBFuture* (*clusterCreateDatabase)(FDBCluster *cluster, uint8_t *dbName, int dbNameLength);
fdb_error_t (*clusterSetOption)(FDBCluster *cluster, FDBClusterOptions::Option option, uint8_t const *value, int valueLength);
void (*clusterDestroy)(FDBCluster *cluster);
fdb_error_t* (*createDatabase)(const char *clusterFilePath, FDBDatabase **db);
//Database
fdb_error_t (*databaseCreateTransaction)(FDBDatabase *database, FDBTransaction **tr);
@ -98,7 +93,6 @@ struct FdbCApi : public ThreadSafeReferenceCounted<FdbCApi> {
uint8_t const *endKeyName, int endKeyNameLength, FDBConflictRangeTypes::Option);
//Future
fdb_error_t (*futureGetCluster)(FDBFuture *f, FDBCluster **outCluster);
fdb_error_t (*futureGetDatabase)(FDBFuture *f, FDBDatabase **outDb);
fdb_error_t (*futureGetVersion)(FDBFuture *f, int64_t *outVersion);
fdb_error_t (*futureGetError)(FDBFuture *f);
@ -109,6 +103,12 @@ struct FdbCApi : public ThreadSafeReferenceCounted<FdbCApi> {
fdb_error_t (*futureSetCallback)(FDBFuture *f, FDBCallback callback, void *callback_parameter);
void (*futureCancel)(FDBFuture *f);
void (*futureDestroy)(FDBFuture *f);
//Legacy Support
FDBFuture* (*createCluster)(const char *clusterFilePath);
FDBFuture* (*clusterCreateDatabase)(FDBCluster *cluster, uint8_t *dbName, int dbNameLength);
void (*clusterDestroy)(FDBCluster *cluster);
fdb_error_t (*futureGetCluster)(FDBFuture *f, FDBCluster **outCluster);
};
class DLTransaction : public ITransaction, ThreadSafeReferenceCounted<DLTransaction> {
@ -159,9 +159,12 @@ private:
class DLDatabase : public IDatabase, ThreadSafeReferenceCounted<DLDatabase> {
public:
DLDatabase(Reference<FdbCApi> api, FdbCApi::FDBDatabase *db) : api(api), db(db) {}
DLDatabase(Reference<FdbCApi> api, FdbCApi::FDBDatabase *db) : api(api), db(db), ready(Void()) {}
DLDatabase(Reference<FdbCApi> api, ThreadFuture<FdbCApi::FDBDatabase*> dbFuture);
~DLDatabase() { api->databaseDestroy(db); }
ThreadFuture<Void> onReady();
Reference<ITransaction> createTransaction();
void setOption(FDBDatabaseOptions::Option option, Optional<StringRef> value = Optional<StringRef>());
@ -170,7 +173,8 @@ public:
private:
const Reference<FdbCApi> api;
FdbCApi::FDBDatabase* const db;
FdbCApi::FDBDatabase* db; // Always set if API version >= 610, otherwise guaranteed to be set when onReady future is set
ThreadFuture<Void> ready;
};
class DLApi : public IClientApi {
@ -185,7 +189,8 @@ public:
void runNetwork();
void stopNetwork();
ThreadFuture<Reference<IDatabase>> createDatabase(const char *clusterFilePath);
Reference<IDatabase> createDatabase(const char *clusterFilePath);
Reference<IDatabase> createDatabase609(const char *clusterFilePath); // legacy database creation
void addNetworkThreadCompletionHook(void (*hook)(void*), void *hookParameter);
@ -327,7 +332,6 @@ private:
Reference<IDatabase> db;
const Reference<ThreadSafeAsyncVar<Reference<IDatabase>>> dbVar;
ThreadFuture<Reference<IDatabase>> dbFuture;
ThreadFuture<Void> changed;
bool cancelled;
@ -355,7 +359,7 @@ public:
void stopNetwork();
void addNetworkThreadCompletionHook(void (*hook)(void*), void *hookParameter);
ThreadFuture<Reference<IDatabase>> createDatabase(const char *clusterFilePath);
Reference<IDatabase> createDatabase(const char *clusterFilePath);
static MultiVersionApi* api;
Reference<ClientInfo> getLocalClient();

View File

@ -35,6 +35,9 @@
#include "fdbclient/MutationList.h"
#include "fdbclient/CoordinationInterface.h"
#include "fdbclient/MonitorLeader.h"
#if defined(CMAKE_BUILD) || !defined(WIN32)
#include "versions.h"
#endif
#include "fdbrpc/TLSConnection.h"
#include "flow/Knobs.h"
#include "fdbclient/Knobs.h"
@ -50,16 +53,15 @@
#undef max
#else
#include <time.h>
#include "versions.h"
#endif
#include "flow/actorcompiler.h" // This must be the last #include.
#include "flow/actorcompiler.h" // This must be the last #include.
extern IRandom* trace_random;
extern const char* getHGVersion();
using std::min;
using std::max;
using std::make_pair;
using std::max;
using std::min;
NetworkOptions networkOptions;
Reference<TLSOptions> tlsOptions;
@ -254,7 +256,7 @@ ACTOR static Future<Standalone<StringRef> > getSampleVersionStamp(Transaction *t
try {
tr->reset();
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
Optional<Value> _ = wait(tr->get(LiteralStringRef("\xff/StatusJsonTestKey62793")));
wait(success(tr->get(LiteralStringRef("\xff/StatusJsonTestKey62793"))));
state Future<Standalone<StringRef> > vstamp = tr->getVersionstamp();
tr->makeSelfConflicting();
wait(tr->commit());
@ -486,6 +488,8 @@ DatabaseContext::DatabaseContext(
clientStatusUpdater.actor = clientStatusUpdateActor(this);
}
DatabaseContext::DatabaseContext( const Error &err ) : deferredError(err), latencies(1000), readLatencies(1000), commitLatencies(1000), GRVLatencies(1000), mutationsPerCommit(1000), bytesPerCommit(1000) {}
ACTOR static Future<Void> monitorClientInfo( Reference<AsyncVar<Optional<ClusterInterface>>> clusterInterface, Reference<ClusterConnectionFile> ccf, Reference<AsyncVar<ClientDBInfo>> outInfo ) {
try {
state Optional<std::string> incorrectConnectionString;
@ -741,6 +745,7 @@ void Cluster::init( Reference<ClusterConnectionFile> connFile, bool startClientI
initTraceEventMetrics();
auto publicIP = determinePublicIPAutomatically( connFile->getConnectionString() );
selectTraceFormatter(networkOptions.traceFormat);
openTraceFile(NetworkAddress(publicIP, ::getpid()), networkOptions.traceRollSize, networkOptions.traceMaxLogsSize, networkOptions.traceDirectory.get(), "trace", networkOptions.traceLogGroup);
TraceEvent("ClientStart")
@ -793,6 +798,14 @@ void setNetworkOption(FDBNetworkOptions::Option option, Optional<StringRef> valu
if(value.present())
networkOptions.traceLogGroup = value.get().toString();
break;
case FDBNetworkOptions::TRACE_FORMAT:
validateOptionValue(value, true);
networkOptions.traceFormat = value.get().toString();
if (!validateTraceFormat(networkOptions.traceFormat)) {
fprintf(stderr, "Unrecognized trace format: `%s'\n", networkOptions.traceFormat.c_str());
throw invalid_option_value();
}
break;
case FDBNetworkOptions::KNOB: {
validateOptionValue(value, true);
@ -1039,7 +1052,8 @@ Future<Standalone<RangeResultRef>> getRange(
bool const& reverse,
TransactionInfo const& info);
Future<Optional<Value>> getValue( Future<Version> const& version, Key const& key, Database const& cx, TransactionInfo const& info, Reference<TransactionLogInfo> const& trLogInfo ) ;
ACTOR Future<Optional<Value>> getValue(Future<Version> version, Key key, Database cx, TransactionInfo info,
Reference<TransactionLogInfo> trLogInfo);
ACTOR Future<Optional<StorageServerInterface>> fetchServerInterface( Database cx, TransactionInfo info, UID id, Future<Version> ver = latestVersion ) {
Optional<Value> val = wait( getValue(ver, serverListKeyFor(id), cx, info, Reference<TransactionLogInfo>()) );
@ -1191,7 +1205,7 @@ ACTOR Future<Void> warmRange_impl( Transaction *self, Database cx, KeyRange keys
try {
tr.setOption( FDBTransactionOptions::LOCK_AWARE );
tr.setOption( FDBTransactionOptions::CAUSAL_READ_RISKY );
Version _ = wait( tr.getReadVersion() );
wait(success( tr.getReadVersion() ));
break;
} catch( Error &e ) {
wait( tr.onError(e) );
@ -1340,7 +1354,9 @@ ACTOR Future<Version> waitForCommittedVersion( Database cx, Version version ) {
}
}
Future<Void> readVersionBatcher( DatabaseContext* const& cx, FutureStream< std::pair< Promise<GetReadVersionReply>, Optional<UID> > > const& versionStream, uint32_t const& flags );
ACTOR Future<Void> readVersionBatcher(
DatabaseContext* cx, FutureStream<std::pair<Promise<GetReadVersionReply>, Optional<UID>>> versionStream,
uint32_t flags);
ACTOR Future< Void > watchValue( Future<Version> version, Key key, Optional<Value> value, Database cx, int readVersionFlags, TransactionInfo info )
{
@ -1855,8 +1871,9 @@ Transaction::Transaction( Database const& cx )
: cx(cx), info(cx->taskID), backoff(CLIENT_KNOBS->DEFAULT_BACKOFF), committedVersion(invalidVersion), versionstampPromise(Promise<Standalone<StringRef>>()), numErrors(0), trLogInfo(createTrLogInfoProbabilistically(cx))
{
setPriority(GetReadVersionRequest::PRIORITY_DEFAULT);
if(cx->lockAware)
if(cx->lockAware) {
options.lockAware = true;
}
}
Transaction::~Transaction() {
@ -3049,11 +3066,14 @@ Future< Standalone<VectorRef<KeyRef>> > Transaction::splitStorageMetrics( KeyRan
void Transaction::checkDeferredError() { cx->checkDeferredError(); }
Reference<TransactionLogInfo> Transaction::createTrLogInfoProbabilistically(const Database &cx) {
double clientSamplingProbability = std::isinf(cx->clientInfo->get().clientTxnInfoSampleRate) ? CLIENT_KNOBS->CSI_SAMPLING_PROBABILITY : cx->clientInfo->get().clientTxnInfoSampleRate;
if (((networkOptions.logClientInfo.present() && networkOptions.logClientInfo.get()) || BUGGIFY) && g_random->random01() < clientSamplingProbability && (!g_network->isSimulated() || !g_simulator.speedUpSimulation))
return Reference<TransactionLogInfo>(new TransactionLogInfo());
else
return Reference<TransactionLogInfo>();
if(!cx->isError()) {
double clientSamplingProbability = std::isinf(cx->clientInfo->get().clientTxnInfoSampleRate) ? CLIENT_KNOBS->CSI_SAMPLING_PROBABILITY : cx->clientInfo->get().clientTxnInfoSampleRate;
if (((networkOptions.logClientInfo.present() && networkOptions.logClientInfo.get()) || BUGGIFY) && g_random->random01() < clientSamplingProbability && (!g_network->isSimulated() || !g_simulator.speedUpSimulation)) {
return Reference<TransactionLogInfo>(new TransactionLogInfo());
}
}
return Reference<TransactionLogInfo>();
}
void enableClientInfoLogging() {

Some files were not shown because too many files have changed in this diff Show More