Merge branch 'master' into remove-cluster-from-bindings
# Conflicts: # bindings/c/fdb_c.cpp
This commit is contained in:
commit
11cce3731b
|
@ -84,3 +84,4 @@ compile_commands.json
|
|||
.envrc
|
||||
.DS_Store
|
||||
temp/
|
||||
/compile_commands.json
|
||||
|
|
|
@ -0,0 +1,214 @@
|
|||
#
|
||||
# CMakeLists.txt
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
cmake_minimum_required(VERSION 3.12)
|
||||
project(fdb
|
||||
VERSION 6.1.0
|
||||
DESCRIPTION "FoundationDB is a scalable, fault-tolerant, ordered key-value store with full ACID transactions."
|
||||
HOMEPAGE_URL "http://www.foundationdb.org/"
|
||||
LANGUAGES C CXX ASM Java)
|
||||
|
||||
set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH};${PROJECT_SOURCE_DIR}/cmake")
|
||||
message (STATUS "${PROJECT_SOURCE_DIR} ${PROJECT_BINARY_DIR}")
|
||||
if("${PROJECT_SOURCE_DIR}" STREQUAL "${PROJECT_BINARY_DIR}")
|
||||
message(FATAL_ERROR "In-source builds are forbidden, unsupported, and stupid!!")
|
||||
endif()
|
||||
|
||||
if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
|
||||
message(STATUS "Setting build type to 'Release' as none was specified")
|
||||
set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build" FORCE)
|
||||
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release"
|
||||
"MinSizeRel" "RelWithDebInfo")
|
||||
endif()
|
||||
|
||||
set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/bin)
|
||||
set(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/lib)
|
||||
|
||||
################################################################################
|
||||
# Packages used for bindings
|
||||
################################################################################
|
||||
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
|
||||
|
||||
find_package(PythonInterp 3.4 REQUIRED)
|
||||
set(Python_ADDITIONAL_VERSIONS 3.4 3.5 3.5)
|
||||
find_package(PythonLibs 3.4 REQUIRED)
|
||||
|
||||
|
||||
################################################################################
|
||||
# Compiler configuration
|
||||
################################################################################
|
||||
|
||||
include(ConfigureCompiler)
|
||||
|
||||
################################################################################
|
||||
# Get repository information
|
||||
################################################################################
|
||||
|
||||
add_custom_target(branch_file ALL DEPENDS ${CURR_BRANCH_FILE})
|
||||
execute_process(
|
||||
COMMAND git rev-parse HEAD
|
||||
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
|
||||
OUTPUT_VARIABLE CURRENT_GIT_VERSION_WNL)
|
||||
string(STRIP "${CURRENT_GIT_VERSION_WNL}" CURRENT_GIT_VERSION)
|
||||
message(STATUS "Current git version ${CURRENT_GIT_VERSION}")
|
||||
|
||||
################################################################################
|
||||
# Version information
|
||||
################################################################################
|
||||
|
||||
set(USE_VERSIONS_TARGET OFF CACHE BOOL "Use the deprecated versions.target file")
|
||||
if(USE_VERSIONS_TARGET)
|
||||
add_custom_target(version_file ALL DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/versions.target)
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/build/get_version.sh ${CMAKE_CURRENT_SOURCE_DIR}/versions.target
|
||||
OUTPUT_VARIABLE FDB_VERSION_WNL)
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/build/get_package_name.sh ${CMAKE_CURRENT_SOURCE_DIR}/versions.target
|
||||
OUTPUT_VARIABLE FDB_PACKAGE_NAME_WNL)
|
||||
string(STRIP "${FDB_VERSION_WNL}" FDB_VERSION)
|
||||
string(STRIP "${FDB_PACKAGE_NAME_WNL}" FDB_PACKAGE_NAME)
|
||||
set(FDB_VERSION_PLAIN ${FDB_VERSION})
|
||||
if(NOT FDB_RELEASE)
|
||||
set(FDB_VERSION "${FDB_VERSION}-PRERELEASE")
|
||||
endif()
|
||||
else()
|
||||
set(FDB_PACKAGE_NAME "${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}")
|
||||
set(FDB_VERSION ${PROJECT_VERSION})
|
||||
set(FDB_VERSION_PLAIN ${FDB_VERSION})
|
||||
endif()
|
||||
|
||||
message(STATUS "FDB version is ${FDB_VERSION}")
|
||||
message(STATUS "FDB package name is ${FDB_PACKAGE_NAME}")
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/fdbclient/versions.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/fdbclient/versions.h)
|
||||
|
||||
|
||||
################################################################################
|
||||
# Flow
|
||||
################################################################################
|
||||
|
||||
# First thing we need is the actor compiler - and to compile and run the
|
||||
# actor compiler, we need mono
|
||||
include(CompileActorCompiler)
|
||||
|
||||
# with the actor compiler, we can now make the flow commands available
|
||||
include(FlowCommands)
|
||||
|
||||
################################################################################
|
||||
# Vexilographer
|
||||
################################################################################
|
||||
|
||||
include(CompileVexillographer)
|
||||
|
||||
# This macro can be used to install symlinks, which turns out to be
|
||||
# non-trivial due to CMake version differences and limitations on how
|
||||
# files can be installed when building binary packages.
|
||||
#
|
||||
# The rule for binary packaging is that files (including symlinks) must
|
||||
# be installed with the standard CMake install() macro.
|
||||
#
|
||||
# The rule for non-binary packaging is that CMake 2.6 cannot install()
|
||||
# symlinks, but can create the symlink at install-time via scripting.
|
||||
# Though, we assume that CMake 2.6 isn't going to be used to generate
|
||||
# packages because versions later than 2.8.3 are superior for that purpose.
|
||||
#
|
||||
# _filepath: the absolute path to the file to symlink
|
||||
# _sympath: absolute path of the installed symlink
|
||||
|
||||
macro(InstallSymlink _filepath _sympath)
|
||||
get_filename_component(_symname ${_sympath} NAME)
|
||||
get_filename_component(_installdir ${_sympath} PATH)
|
||||
|
||||
if (BINARY_PACKAGING_MODE)
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" -E create_symlink
|
||||
${_filepath}
|
||||
${CMAKE_CURRENT_BINARY_DIR}/${_symname})
|
||||
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${_symname}
|
||||
DESTINATION ${_installdir}
|
||||
COMPONENT clients)
|
||||
else ()
|
||||
# scripting the symlink installation at install time should work
|
||||
# for CMake 2.6.x and 2.8.x
|
||||
install(CODE "
|
||||
if (\"\$ENV{DESTDIR}\" STREQUAL \"\")
|
||||
execute_process(COMMAND \"${CMAKE_COMMAND}\" -E create_symlink
|
||||
${_filepath}
|
||||
${_installdir}/${_symname})
|
||||
else ()
|
||||
execute_process(COMMAND \"${CMAKE_COMMAND}\" -E create_symlink
|
||||
${_filepath}
|
||||
\$ENV{DESTDIR}/${_installdir}/${_symname})
|
||||
endif ()
|
||||
"
|
||||
COMPONENT clients)
|
||||
endif ()
|
||||
endmacro(InstallSymlink)
|
||||
|
||||
################################################################################
|
||||
# Generate config file
|
||||
################################################################################
|
||||
|
||||
string(RANDOM LENGTH 8 description1)
|
||||
string(RANDOM LENGTH 8 description2)
|
||||
set(CLUSTER_DESCRIPTION1 ${description1} CACHE STRING "Cluster description")
|
||||
set(CLUSTER_DESCRIPTION2 ${description2} CACHE STRING "Cluster description")
|
||||
|
||||
configure_file(fdb.cluster.cmake ${CMAKE_CURRENT_BINARY_DIR}/fdb.cluster)
|
||||
|
||||
|
||||
################################################################################
|
||||
# testing
|
||||
################################################################################
|
||||
enable_testing()
|
||||
|
||||
################################################################################
|
||||
# Directory structure
|
||||
################################################################################
|
||||
|
||||
include(cmake/InstallLayout.cmake)
|
||||
|
||||
################################################################################
|
||||
# components
|
||||
################################################################################
|
||||
|
||||
include(CompileBoost)
|
||||
add_subdirectory(flow)
|
||||
add_subdirectory(fdbrpc)
|
||||
add_subdirectory(fdbclient)
|
||||
add_subdirectory(fdbserver)
|
||||
add_subdirectory(fdbcli)
|
||||
add_subdirectory(fdbmonitor)
|
||||
add_subdirectory(bindings)
|
||||
add_subdirectory(fdbbackup)
|
||||
|
||||
include(CPack)
|
||||
|
||||
################################################################################
|
||||
# process compile commands for IDE
|
||||
################################################################################
|
||||
|
||||
if (CMAKE_EXPORT_COMPILE_COMMANDS)
|
||||
add_custom_command(
|
||||
OUTPUT ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json
|
||||
COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/build/gen_compile_db.py
|
||||
ARGS -b ${CMAKE_CURRENT_BINARY_DIR} -s ${CMAKE_CURRENT_SOURCE_DIR} -o ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json
|
||||
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/build/gen_compile_db.py ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json
|
||||
COMMENT "Build compile commands for IDE"
|
||||
)
|
||||
add_custom_target(procossed_compile_commands ALL DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json)
|
||||
endif()
|
7
Makefile
7
Makefile
|
@ -15,13 +15,12 @@ ifeq ($(MONO),)
|
|||
MONO := /usr/bin/mono
|
||||
endif
|
||||
|
||||
DMCS := $(shell which dmcs)
|
||||
MCS := $(shell which mcs)
|
||||
ifneq ($(DMCS),)
|
||||
MCS := $(DMCS)
|
||||
ifeq ($(MCS),)
|
||||
MCS := $(shell which dmcs)
|
||||
endif
|
||||
ifeq ($(MCS),)
|
||||
MCS := /usr/bin/dmcs
|
||||
MCS := /usr/bin/mcs
|
||||
endif
|
||||
|
||||
CFLAGS := -Werror -Wno-error=format -fPIC -DNO_INTELLISENSE -fvisibility=hidden -DNDEBUG=1 -Wreturn-type -fno-omit-frame-pointer
|
||||
|
|
19
README.md
19
README.md
|
@ -43,10 +43,23 @@ Developers on a OS for which there is no binary package, or who would like to st
|
|||
1. Install [Docker](https://www.docker.com/).
|
||||
1. Check out the foundationdb repo.
|
||||
1. Build Linux docker image using the file `Dockerfile` located in the `build` source directory.
|
||||
|
||||
```shell
|
||||
cd /dir/path/foundationdb
|
||||
docker build ./build -t <image-tag-name>
|
||||
```
|
||||
|
||||
1. Run the docker image interactively [Docker Run](https://docs.docker.com/engine/reference/run/#general-form) with the directory containing the foundationdb repo mounted [Docker Mounts](https://docs.docker.com/storage/volumes/).
|
||||
`docker run -it -v '/local/dir/path/foundationdb:/docker/dir/path/foundationdb' /bin/bash`
|
||||
1. Navigate to the mounted directory containing the foundationdb repo.
|
||||
`cd /docker/dir/path/foundationdb`
|
||||
|
||||
```shell
|
||||
docker run -it -v '/local/dir/path/foundationdb:/docker/dir/path/foundationdb' <image-tag-name> /bin/bash
|
||||
```
|
||||
|
||||
1. Navigate to the container's mounted directory which contains the foundationdb repo.
|
||||
|
||||
```shell
|
||||
cd /docker/dir/path/foundationdb
|
||||
```
|
||||
1. Run `make`.
|
||||
|
||||
This will build the fdbserver binary and the python bindings. If you want to build our other bindings, you will need to install a runtime for the language whose binding you want to build. Each binding has an `.mk` file which provides specific targets for that binding.
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
add_subdirectory(c)
|
||||
add_subdirectory(python)
|
||||
add_subdirectory(java)
|
|
@ -18,6 +18,7 @@
|
|||
# limitations under the License.
|
||||
#
|
||||
|
||||
import math
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
@ -61,11 +62,31 @@ class Result:
|
|||
def key(self, specification):
|
||||
return self.key_tuple[specification.key_start_index:]
|
||||
|
||||
@staticmethod
|
||||
def elements_equal(el1, el2):
|
||||
if type(el1) != type(el2):
|
||||
return False
|
||||
|
||||
if isinstance(el1, tuple):
|
||||
return Result.tuples_match(el1, el2)
|
||||
|
||||
if isinstance(el1, float) and math.isnan(el1):
|
||||
return math.isnan(el2)
|
||||
|
||||
return el1 == el2
|
||||
|
||||
@staticmethod
|
||||
def tuples_match(t1, t2):
|
||||
if len(t1) != len(t2):
|
||||
return False
|
||||
|
||||
return all([Result.elements_equal(x,y) for x,y in zip(t1, t2)])
|
||||
|
||||
def matches_key(self, rhs, specification):
|
||||
if not isinstance(rhs, Result):
|
||||
return False
|
||||
|
||||
return self.key(specification) == rhs.key(specification)
|
||||
return Result.tuples_match(self.key(specification), rhs.key(specification))
|
||||
|
||||
def matches(self, rhs, specification):
|
||||
if not self.matches_key(rhs, specification):
|
||||
|
|
|
@ -98,7 +98,7 @@ class ResultSet(object):
|
|||
# If these results aren't using sequence numbers, then we match two results based on whether they share the same key
|
||||
else:
|
||||
min_key = min([r.key(self.specification) for r in results.values()])
|
||||
results = {i: r for i, r in results.items() if r.key(self.specification) == min_key}
|
||||
results = {i: r for i, r in results.items() if Result.tuples_match(r.key(self.specification), min_key)}
|
||||
|
||||
# Increment the indices for those testers which produced a result in this iteration
|
||||
for i in results.keys():
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
set(FDB_C_SRCS
|
||||
fdb_c.cpp
|
||||
foundationdb/fdb_c.h
|
||||
ThreadCleanup.cpp)
|
||||
|
||||
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/foundationdb)
|
||||
|
||||
set(platform)
|
||||
if(APPLE)
|
||||
set(platform "osx")
|
||||
else()
|
||||
set(platform "linux")
|
||||
endif()
|
||||
|
||||
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.g.S
|
||||
${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h
|
||||
COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/generate_asm.py ${platform}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/fdb_c.cpp
|
||||
${CMAKE_CURRENT_BINARY_DIR}/fdb_c.g.S
|
||||
${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h
|
||||
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/generate_asm.py ${CMAKE_CURRENT_SOURCE_DIR}/fdb_c.cpp
|
||||
COMMENT "Generate C bindings")
|
||||
add_custom_target(fdb_c_generated DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.g.S
|
||||
${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h)
|
||||
|
||||
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_options.g.h
|
||||
COMMAND ${MONO_EXECUTABLE} ${VEXILLOGRAPHER_EXE} ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options c ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_options.g.h
|
||||
DEPENDS ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options vexillographer
|
||||
COMMENT "Generate C options")
|
||||
add_custom_target(fdb_c_options DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_options.g.h)
|
||||
|
||||
include(GenerateExportHeader)
|
||||
|
||||
add_library(fdb_c SHARED ${FDB_C_SRCS} ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.g.S)
|
||||
add_dependencies(fdb_c fdb_c_generated fdb_c_options)
|
||||
target_link_libraries(fdb_c PUBLIC fdbclient)
|
||||
target_include_directories(fdb_c PUBLIC
|
||||
${CMAKE_CURRENT_BINARY_DIR}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}
|
||||
${CMAKE_CURRENT_BINARY_DIR}/foundationdb)
|
||||
# TODO: re-enable once the old vcxproj-based build system is removed.
|
||||
#generate_export_header(fdb_c EXPORT_MACRO_NAME "DLLEXPORT"
|
||||
# EXPORT_FILE_NAME ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_export.h)
|
||||
install(TARGETS fdb_c
|
||||
EXPORT fdbc
|
||||
DESTINATION ${FDB_LIB_DIR}
|
||||
COMPONENT clients)
|
||||
install(
|
||||
FILES foundationdb/fdb_c.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_options.g.h
|
||||
${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options
|
||||
DESTINATION ${FDB_INCLUDE_INSTALL_DIR}/foundationdb COMPONENT clients)
|
||||
#install(EXPORT fdbc DESTINATION ${FDB_LIB_DIR}/foundationdb COMPONENT clients)
|
|
@ -297,11 +297,15 @@ fdb_error_t fdb_future_get_string_array(
|
|||
|
||||
extern "C" DLLEXPORT
|
||||
FDBFuture* fdb_create_cluster_v609( const char* cluster_file_path ) {
|
||||
char *path = NULL;
|
||||
char *path;
|
||||
if(cluster_file_path) {
|
||||
path = new char[strlen(cluster_file_path) + 1];
|
||||
strcpy(path, cluster_file_path);
|
||||
}
|
||||
else {
|
||||
path = new char[1];
|
||||
path[0] = '\0';
|
||||
}
|
||||
return (FDBFuture*)ThreadFuture<char*>(path).extractPtr();
|
||||
}
|
||||
|
||||
|
@ -340,7 +344,7 @@ FDBFuture* fdb_cluster_create_database_v609( FDBCluster* c, uint8_t const* db_na
|
|||
extern "C" DLLEXPORT
|
||||
fdb_error_t fdb_create_database( const char* cluster_file_path, FDBDatabase** out_database ) {
|
||||
CATCH_AND_RETURN(
|
||||
*out_database = (FDBDatabase*)API->createDatabase( cluster_file_path ? cluster_file_path : "" ).extractPtr();
|
||||
*out_database = (FDBDatabase*)API->createDatabase( cluster_file_path ).extractPtr();
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ fdb_c_CFLAGS := $(fdbclient_CFLAGS)
|
|||
fdb_c_LDFLAGS := $(fdbrpc_LDFLAGS)
|
||||
fdb_c_LIBS := lib/libfdbclient.a lib/libfdbrpc.a lib/libflow.a $(FDB_TLS_LIB)
|
||||
fdb_c_STATIC_LIBS := $(TLS_LIBS)
|
||||
fdb_c_tests_LIBS := -shared -Llib -lfdb_c
|
||||
fdb_c_tests_LIBS := -Llib -lfdb_c
|
||||
fdb_c_tests_HEADERS := -Ibindings/c
|
||||
|
||||
CLEAN_TARGETS += fdb_c_tests_clean
|
||||
|
@ -84,11 +84,11 @@ bindings/c/foundationdb/fdb_c_options.g.h: bin/vexillographer.exe fdbclient/vexi
|
|||
|
||||
bin/fdb_c_performance_test: bindings/c/test/performance_test.c bindings/c/test/test.h fdb_c
|
||||
@echo "Compiling fdb_c_performance_test"
|
||||
@$(CC) $(CFLAGS) $(fdb_c_tests_LIBS) $(fdb_c_tests_HEADERS) -o $@ bindings/c/test/performance_test.c
|
||||
@$(CC) $(CFLAGS) $(fdb_c_tests_HEADERS) -o $@ bindings/c/test/performance_test.c $(fdb_c_tests_LIBS)
|
||||
|
||||
bin/fdb_c_ryw_benchmark: bindings/c/test/ryw_benchmark.c bindings/c/test/test.h fdb_c
|
||||
@echo "Compiling fdb_c_ryw_benchmark"
|
||||
@$(CC) $(CFLAGS) $(fdb_c_tests_LIBS) $(fdb_c_tests_HEADERS) -o $@ bindings/c/test/ryw_benchmark.c
|
||||
@$(CC) $(CFLAGS) $(fdb_c_tests_HEADERS) -o $@ bindings/c/test/ryw_benchmark.c $(fdb_c_tests_LIBS)
|
||||
|
||||
packages/fdb-c-tests-$(VERSION)-$(PLATFORM).tar.gz: bin/fdb_c_performance_test bin/fdb_c_ryw_benchmark
|
||||
@echo "Packaging $@"
|
||||
|
|
|
@ -87,7 +87,7 @@ namespace FDB {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & key & orEqual & offset;
|
||||
serializer(ar, key, orEqual, offset);
|
||||
}
|
||||
};
|
||||
inline bool operator == (const KeySelectorRef& lhs, const KeySelectorRef& rhs) { return lhs.key == rhs.key && lhs.orEqual==rhs.orEqual && lhs.offset==rhs.offset; }
|
||||
|
@ -123,7 +123,7 @@ namespace FDB {
|
|||
int expectedSize() const { return key.expectedSize() + value.expectedSize(); }
|
||||
|
||||
template <class Ar>
|
||||
force_inline void serialize(Ar& ar) { ar & key & value; }
|
||||
force_inline void serialize(Ar& ar) { serializer(ar, key, value); }
|
||||
|
||||
struct OrderByKey {
|
||||
bool operator()(KeyValueRef const& a, KeyValueRef const& b) const {
|
||||
|
@ -171,7 +171,7 @@ namespace FDB {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & ((VectorRef<KeyValueRef>&)*this) & more & readThrough & readToBegin & readThroughEnd;
|
||||
serializer(ar, ((VectorRef<KeyValueRef>&)*this), more, readThrough, readToBegin, readThroughEnd);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -234,7 +234,7 @@ namespace FDB {
|
|||
|
||||
template <class Ar>
|
||||
force_inline void serialize(Ar& ar) {
|
||||
ar & const_cast<KeyRef&>(begin) & const_cast<KeyRef&>(end);
|
||||
serializer(ar, const_cast<KeyRef&>(begin), const_cast<KeyRef&>(end));
|
||||
if( begin > end ) {
|
||||
throw inverted_range();
|
||||
};
|
||||
|
|
|
@ -63,7 +63,7 @@ function printUsage() {
|
|||
echo
|
||||
echo "cmd: One of the commands to run. The options are:"
|
||||
echo " install Download the FDB go bindings and install them"
|
||||
echo " localinstall Install a into the go path a local copy of the repo"
|
||||
echo " localinstall Install into the go path a local copy of the repo"
|
||||
echo " download Download but do not prepare the FoundationDB bindings"
|
||||
echo " help Print this help message and then quit"
|
||||
echo
|
||||
|
|
|
@ -86,7 +86,7 @@ func retryable(wrapped func() (interface{}, error), onError func(Error) FutureNi
|
|||
for {
|
||||
ret, e = wrapped()
|
||||
|
||||
/* No error means success! */
|
||||
// No error means success!
|
||||
if e == nil {
|
||||
return
|
||||
}
|
||||
|
@ -96,8 +96,8 @@ func retryable(wrapped func() (interface{}, error), onError func(Error) FutureNi
|
|||
e = onError(ep).Get()
|
||||
}
|
||||
|
||||
/* If OnError returns an error, then it's not
|
||||
/* retryable; otherwise take another pass at things */
|
||||
// If OnError returns an error, then it's not
|
||||
// retryable; otherwise take another pass at things
|
||||
if e != nil {
|
||||
return
|
||||
}
|
||||
|
@ -125,7 +125,7 @@ func retryable(wrapped func() (interface{}, error), onError func(Error) FutureNi
|
|||
// Transaction and Database objects.
|
||||
func (d Database) Transact(f func(Transaction) (interface{}, error)) (interface{}, error) {
|
||||
tr, e := d.CreateTransaction()
|
||||
/* Any error here is non-retryable */
|
||||
// Any error here is non-retryable
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ func (d Database) Transact(f func(Transaction) (interface{}, error)) (interface{
|
|||
// Transaction, Snapshot and Database objects.
|
||||
func (d Database) ReadTransact(f func(ReadTransaction) (interface{}, error)) (interface{}, error) {
|
||||
tr, e := d.CreateTransaction()
|
||||
/* Any error here is non-retryable */
|
||||
// Any error here is non-retryable
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
|
|
@ -38,9 +38,9 @@ import (
|
|||
"unsafe"
|
||||
)
|
||||
|
||||
/* Would put this in futures.go but for the documented issue with
|
||||
/* exports and functions in preamble
|
||||
/* (https://code.google.com/p/go-wiki/wiki/cgo#Global_functions) */
|
||||
// Would put this in futures.go but for the documented issue with
|
||||
// exports and functions in preamble
|
||||
// (https://code.google.com/p/go-wiki/wiki/cgo#Global_functions)
|
||||
//export unlockMutex
|
||||
func unlockMutex(p unsafe.Pointer) {
|
||||
m := (*sync.Mutex)(p)
|
||||
|
|
|
@ -90,7 +90,11 @@ type ExactRange interface {
|
|||
// that the default zero-value of KeyRange specifies an empty range before all
|
||||
// keys in the database.
|
||||
type KeyRange struct {
|
||||
Begin, End KeyConvertible
|
||||
// The (inclusive) beginning of the range
|
||||
Begin KeyConvertible
|
||||
|
||||
// The (exclusive) end of the range
|
||||
End KeyConvertible
|
||||
}
|
||||
|
||||
// FDBRangeKeys allows KeyRange to satisfy the ExactRange interface.
|
||||
|
|
|
@ -0,0 +1,144 @@
|
|||
include(UseJava)
|
||||
find_package(JNI 1.8 REQUIRED)
|
||||
find_package(Java 1.8 COMPONENTS Development REQUIRED)
|
||||
|
||||
set(JAVA_BINDING_SRCS
|
||||
src/main/com/apple/foundationdb/async/AsyncIterable.java
|
||||
src/main/com/apple/foundationdb/async/AsyncIterator.java
|
||||
src/main/com/apple/foundationdb/async/AsyncUtil.java
|
||||
src/main/com/apple/foundationdb/async/Cancellable.java
|
||||
src/main/com/apple/foundationdb/async/CloneableException.java
|
||||
src/main/com/apple/foundationdb/async/CloseableAsyncIterator.java
|
||||
src/main/com/apple/foundationdb/async/package-info.java
|
||||
src/main/com/apple/foundationdb/Cluster.java
|
||||
src/main/com/apple/foundationdb/Database.java
|
||||
src/main/com/apple/foundationdb/directory/Directory.java
|
||||
src/main/com/apple/foundationdb/directory/DirectoryAlreadyExistsException.java
|
||||
src/main/com/apple/foundationdb/directory/DirectoryException.java
|
||||
src/main/com/apple/foundationdb/directory/DirectoryLayer.java
|
||||
src/main/com/apple/foundationdb/directory/DirectoryMoveException.java
|
||||
src/main/com/apple/foundationdb/directory/DirectoryPartition.java
|
||||
src/main/com/apple/foundationdb/directory/DirectorySubspace.java
|
||||
src/main/com/apple/foundationdb/directory/DirectoryUtil.java
|
||||
src/main/com/apple/foundationdb/directory/DirectoryVersionException.java
|
||||
src/main/com/apple/foundationdb/directory/MismatchedLayerException.java
|
||||
src/main/com/apple/foundationdb/directory/NoSuchDirectoryException.java
|
||||
src/main/com/apple/foundationdb/directory/package-info.java
|
||||
src/main/com/apple/foundationdb/directory/PathUtil.java
|
||||
src/main/com/apple/foundationdb/FDB.java
|
||||
src/main/com/apple/foundationdb/FDBDatabase.java
|
||||
src/main/com/apple/foundationdb/FDBTransaction.java
|
||||
src/main/com/apple/foundationdb/FutureCluster.java
|
||||
src/main/com/apple/foundationdb/FutureDatabase.java
|
||||
src/main/com/apple/foundationdb/FutureKey.java
|
||||
src/main/com/apple/foundationdb/FutureResult.java
|
||||
src/main/com/apple/foundationdb/FutureResults.java
|
||||
src/main/com/apple/foundationdb/FutureStrings.java
|
||||
src/main/com/apple/foundationdb/FutureVersion.java
|
||||
src/main/com/apple/foundationdb/FutureVoid.java
|
||||
src/main/com/apple/foundationdb/JNIUtil.java
|
||||
src/main/com/apple/foundationdb/KeySelector.java
|
||||
src/main/com/apple/foundationdb/KeyValue.java
|
||||
src/main/com/apple/foundationdb/LocalityUtil.java
|
||||
src/main/com/apple/foundationdb/NativeFuture.java
|
||||
src/main/com/apple/foundationdb/NativeObjectWrapper.java
|
||||
src/main/com/apple/foundationdb/OptionConsumer.java
|
||||
src/main/com/apple/foundationdb/OptionsSet.java
|
||||
src/main/com/apple/foundationdb/package-info.java
|
||||
src/main/com/apple/foundationdb/Range.java
|
||||
src/main/com/apple/foundationdb/RangeQuery.java
|
||||
src/main/com/apple/foundationdb/RangeResult.java
|
||||
src/main/com/apple/foundationdb/RangeResultInfo.java
|
||||
src/main/com/apple/foundationdb/RangeResultSummary.java
|
||||
src/main/com/apple/foundationdb/ReadTransaction.java
|
||||
src/main/com/apple/foundationdb/ReadTransactionContext.java
|
||||
src/main/com/apple/foundationdb/subspace/package-info.java
|
||||
src/main/com/apple/foundationdb/subspace/Subspace.java
|
||||
src/main/com/apple/foundationdb/Transaction.java
|
||||
src/main/com/apple/foundationdb/TransactionContext.java
|
||||
src/main/com/apple/foundationdb/tuple/ByteArrayUtil.java
|
||||
src/main/com/apple/foundationdb/tuple/IterableComparator.java
|
||||
src/main/com/apple/foundationdb/tuple/package-info.java
|
||||
src/main/com/apple/foundationdb/tuple/Tuple.java
|
||||
src/main/com/apple/foundationdb/tuple/TupleUtil.java
|
||||
src/main/com/apple/foundationdb/tuple/Versionstamp.java)
|
||||
|
||||
set(JAVA_TESTS_SRCS
|
||||
src/test/com/apple/foundationdb/test/AbstractTester.java
|
||||
src/test/com/apple/foundationdb/test/AsyncDirectoryExtension.java
|
||||
src/test/com/apple/foundationdb/test/AsyncStackTester.java
|
||||
src/test/com/apple/foundationdb/test/BlockingBenchmark.java
|
||||
src/test/com/apple/foundationdb/test/ConcurrentGetSetGet.java
|
||||
src/test/com/apple/foundationdb/test/Context.java
|
||||
src/test/com/apple/foundationdb/test/ContinuousSample.java
|
||||
src/test/com/apple/foundationdb/test/DirectoryExtension.java
|
||||
src/test/com/apple/foundationdb/test/DirectoryOperation.java
|
||||
src/test/com/apple/foundationdb/test/DirectoryTest.java
|
||||
src/test/com/apple/foundationdb/test/DirectoryUtil.java
|
||||
src/test/com/apple/foundationdb/test/Example.java
|
||||
src/test/com/apple/foundationdb/test/Instruction.java
|
||||
src/test/com/apple/foundationdb/test/IterableTest.java
|
||||
src/test/com/apple/foundationdb/test/LocalityTests.java
|
||||
src/test/com/apple/foundationdb/test/ParallelRandomScan.java
|
||||
src/test/com/apple/foundationdb/test/PerformanceTester.java
|
||||
src/test/com/apple/foundationdb/test/RangeTest.java
|
||||
src/test/com/apple/foundationdb/test/RYWBenchmark.java
|
||||
src/test/com/apple/foundationdb/test/SerialInsertion.java
|
||||
src/test/com/apple/foundationdb/test/SerialIteration.java
|
||||
src/test/com/apple/foundationdb/test/SerialTest.java
|
||||
src/test/com/apple/foundationdb/test/Stack.java
|
||||
src/test/com/apple/foundationdb/test/StackEntry.java
|
||||
src/test/com/apple/foundationdb/test/StackOperation.java
|
||||
src/test/com/apple/foundationdb/test/StackTester.java
|
||||
src/test/com/apple/foundationdb/test/StackUtils.java
|
||||
src/test/com/apple/foundationdb/test/TesterArgs.java
|
||||
src/test/com/apple/foundationdb/test/TestResult.java
|
||||
src/test/com/apple/foundationdb/test/TupleTest.java
|
||||
src/test/com/apple/foundationdb/test/VersionstampSmokeTest.java
|
||||
src/test/com/apple/foundationdb/test/WatchTest.java
|
||||
src/test/com/apple/foundationdb/test/WhileTrueTest.java)
|
||||
|
||||
set(GENERATED_JAVA_DIR ${CMAKE_CURRENT_BINARY_DIR}/src/main/com/foundationdb)
|
||||
file(MAKE_DIRECTORY ${GENERATED_JAVA_DIR})
|
||||
|
||||
set(GENERATED_JAVA_FILES
|
||||
${GENERATED_JAVA_DIR}/ClusterOptions.java
|
||||
${GENERATED_JAVA_DIR}/ConflictRangeType.java
|
||||
${GENERATED_JAVA_DIR}/DatabaseOptions.java
|
||||
${GENERATED_JAVA_DIR}/MutationType.java
|
||||
${GENERATED_JAVA_DIR}/NetworkOptions.java
|
||||
${GENERATED_JAVA_DIR}/StreamingMode.java
|
||||
${GENERATED_JAVA_DIR}/TransactionOptions.java
|
||||
${GENERATED_JAVA_DIR}/FDBException.java)
|
||||
|
||||
add_custom_command(OUTPUT ${GENERATED_JAVA_FILES}
|
||||
COMMAND ${MONO_EXECUTABLE} ${VEXILLOGRAPHER_EXE} ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options java ${GENERATED_JAVA_DIR}
|
||||
DEPENDS ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options vexillographer
|
||||
COMMENT "Generate Java options")
|
||||
add_custom_target(fdb_java_options DEPENDS ${GENERATED_JAVA_DIR}/StreamingMode.java)
|
||||
|
||||
set(SYSTEM_NAME "linux")
|
||||
if (APPLE)
|
||||
set(SYSTEM_NAME "osx")
|
||||
endif()
|
||||
|
||||
add_library(fdb_java SHARED fdbJNI.cpp)
|
||||
message(DEBUG ${JNI_INCLUDE_DIRS})
|
||||
message(DEBUG ${JNI_LIBRARIES})
|
||||
target_include_directories(fdb_java PRIVATE ${JNI_INCLUDE_DIRS})
|
||||
# libfdb_java.so is loaded by fdb-java.jar and doesn't need to depened on jvm shared libraries.
|
||||
target_link_libraries(fdb_java PRIVATE fdb_c)
|
||||
set_target_properties(fdb_java PROPERTIES
|
||||
LIBRARY_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib/${SYSTEM_NAME}/amd64/)
|
||||
|
||||
set(CMAKE_JAVA_COMPILE_FLAGS "-source" "1.8" "-target" "1.8")
|
||||
set(CMAKE_JNI_TARGET TRUE)
|
||||
set(JAR_VERSION "${FDB_MAJOR}.${FDB_MINOR}.${FDB_REVISION}")
|
||||
add_jar(fdb-java ${JAVA_BINDING_SRCS} ${GENERATED_JAVA_FILES}
|
||||
OUTPUT_DIR ${PROJECT_BINARY_DIR}/lib)
|
||||
add_dependencies(fdb-java fdb_java_options fdb_java)
|
||||
add_jar(foundationdb-tests SOURCES ${JAVA_TESTS_SRCS} INCLUDE_JARS fdb-java)
|
||||
add_dependencies(foundationdb-tests fdb_java_options)
|
||||
|
||||
install_jar(fdb-java DESTINATION ${FDB_SHARE_DIR}/java COMPONENT clients)
|
||||
install(TARGETS fdb_java DESTINATION ${FDB_LIB_DIR} COMPONENT clients)
|
|
@ -0,0 +1,44 @@
|
|||
set(SRCS
|
||||
fdb/__init__.py
|
||||
fdb/directory_impl.py
|
||||
fdb/impl.py
|
||||
fdb/locality.py
|
||||
fdb/six.py
|
||||
fdb/subspace_impl.py
|
||||
fdb/tuple.py)
|
||||
|
||||
if(APPLE)
|
||||
list(APPEND SRCS fdb/libfdb_c.dylib.pth)
|
||||
else()
|
||||
list(APPEND SRCS fdb/libfdb_c.so.pth)
|
||||
endif()
|
||||
|
||||
set(out_files "")
|
||||
foreach(src ${SRCS})
|
||||
get_filename_component(dirname ${src} DIRECTORY)
|
||||
get_filename_component(extname ${src} EXT)
|
||||
add_custom_command(OUTPUT ${PROJECT_BINARY_DIR}/bindings/python/${src}
|
||||
COMMAND mkdir -p ${PROJECT_BINARY_DIR}/bindings/python/${dirname}
|
||||
COMMAND cp ${src} ${PROJECT_BINARY_DIR}/bindings/python/${dirname}/
|
||||
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${src}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
COMMENT "copy ${src}")
|
||||
set(out_files "${out_files};${PROJECT_BINARY_DIR}/bindings/python/${src}")
|
||||
endforeach()
|
||||
add_custom_target(python_binding ALL DEPENDS ${out_files})
|
||||
|
||||
file(MAKE_DIRECTORY ${PROJECT_BINARY_DIR}/bindings/python/fdb)
|
||||
set(options_file ${PROJECT_BINARY_DIR}/bindings/python/fdb/fdboptions.py)
|
||||
add_custom_command(OUTPUT ${options_file}
|
||||
COMMAND ${MONO_EXECUTABLE} ${VEXILLOGRAPHER_EXE} ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options python ${options_file}
|
||||
DEPENDS ${PROJECT_SOURCE_DIR}/fdbclient/vexillographer/fdb.options vexillographer
|
||||
COMMENT "Generate Python options")
|
||||
add_custom_target(fdb_python_options DEPENDS
|
||||
${options_file}
|
||||
${PROJECT_SOURCE_DIR}/fdbclient/vexillographer/fdb.options
|
||||
vexillographer)
|
||||
|
||||
add_dependencies(python_binding fdb_python_options)
|
||||
|
||||
set(out_files "${out_files};${options_file}")
|
||||
install(FILES ${out_files} DESTINATION ${FDB_PYTHON_INSTALL_DIR} COMPONENT clients)
|
|
@ -157,10 +157,10 @@ module DirectoryExtension
|
|||
exists = directory.exists?(inst.tr)
|
||||
children = exists ? directory.list(inst.tr) : []
|
||||
log_subspace = FDB::Subspace.new([@dir_index], inst.wait_and_pop)
|
||||
inst.tr[log_subspace['path']] = FDB::Tuple.pack(directory.path)
|
||||
inst.tr[log_subspace['layer']] = FDB::Tuple.pack([directory.layer])
|
||||
inst.tr[log_subspace['exists']] = FDB::Tuple.pack([exists ? 1 : 0])
|
||||
inst.tr[log_subspace['children']] = FDB::Tuple.pack(children)
|
||||
inst.tr[log_subspace['path'.encode('utf-8')]] = FDB::Tuple.pack(directory.path)
|
||||
inst.tr[log_subspace['layer'.encode('utf-8')]] = FDB::Tuple.pack([directory.layer])
|
||||
inst.tr[log_subspace['exists'.encode('utf-8')]] = FDB::Tuple.pack([exists ? 1 : 0])
|
||||
inst.tr[log_subspace['children'.encode('utf-8')]] = FDB::Tuple.pack(children)
|
||||
elsif inst.op == 'DIRECTORY_STRIP_PREFIX'
|
||||
str = inst.wait_and_pop
|
||||
throw "String #{str} does not start with raw prefix #{directory.key}" if !str.start_with?(directory.key)
|
||||
|
|
|
@ -9,7 +9,13 @@ RUN adduser --disabled-password --gecos '' fdb && chown -R fdb /opt && chmod -R
|
|||
|
||||
USER fdb
|
||||
|
||||
RUN cd /opt/ && wget http://downloads.sourceforge.net/project/boost/boost/1.52.0/boost_1_52_0.tar.bz2 -qO - | tar -xj
|
||||
# wget of bintray without forcing UTF-8 encoding results in 403 Forbidden
|
||||
RUN cd /opt/ && wget http://downloads.sourceforge.net/project/boost/boost/1.52.0/boost_1_52_0.tar.bz2 &&\
|
||||
wget --local-encoding=UTF-8 https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.tar.bz2 &&\
|
||||
echo '2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba boost_1_67_0.tar.bz2' | sha256sum -c - &&\
|
||||
tar -xjf boost_1_52_0.tar.bz2 &&\
|
||||
tar -xjf boost_1_67_0.tar.bz2 &&\
|
||||
rm boost_1_52_0.tar.bz2 boost_1_67_0.tar.bz2
|
||||
|
||||
USER root
|
||||
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
#!/usr/bin/env python3
|
||||
from argparse import ArgumentParser
|
||||
import os
|
||||
import json
|
||||
import re
|
||||
|
||||
def actorFile(actor: str, build: str, src: str):
|
||||
res = actor.replace(build, src, 1)
|
||||
res = res.replace('actor.g.cpp', 'actor.cpp')
|
||||
return res.replace('actor.g.h', 'actor.h')
|
||||
|
||||
def rreplace(s, old, new, occurrence = 1):
|
||||
li = s.rsplit(old, occurrence)
|
||||
return new.join(li)
|
||||
|
||||
|
||||
def actorCommand(cmd: str, build:str, src: str):
|
||||
r1 = re.compile('-c (.+)(actor\.g\.cpp)')
|
||||
m1 = r1.search(cmd)
|
||||
if m1 is None:
|
||||
return cmd
|
||||
cmd1 = r1.sub('\\1actor.cpp', cmd)
|
||||
return rreplace(cmd1, build, src)
|
||||
|
||||
|
||||
parser = ArgumentParser(description="Generates a new compile_commands.json for rtags+flow")
|
||||
parser.add_argument("-b", help="Build directory", dest="builddir", default=os.getcwd())
|
||||
parser.add_argument("-s", help="Build directory", dest="srcdir", default=os.getcwd())
|
||||
parser.add_argument("-o", help="Output file", dest="out", default="processed_compile_commands.json")
|
||||
parser.add_argument("input", help="compile_commands.json", default="compile_commands.json", nargs="?")
|
||||
args = parser.parse_args()
|
||||
|
||||
print("transform {} with build directory {}".format(args.input, args.builddir))
|
||||
|
||||
with open(args.input) as f:
|
||||
cmds = json.load(f)
|
||||
|
||||
result = []
|
||||
|
||||
for cmd in cmds:
|
||||
cmd['command'] = cmd['command'].replace(' -DNO_INTELLISENSE ', ' ')
|
||||
if cmd['file'].endswith('actor.g.cpp'):
|
||||
# here we need to rewrite the rule
|
||||
cmd['command'] = actorCommand(cmd['command'], args.builddir, args.srcdir)
|
||||
cmd['file'] = actorFile(cmd['file'], args.builddir, args.srcdir)
|
||||
result.append(cmd)
|
||||
else:
|
||||
result.append(cmd)
|
||||
|
||||
with open(args.out, 'w') as f:
|
||||
json.dump(result, f, indent=4)
|
|
@ -0,0 +1,3 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
cat $1 | grep '<PackageName>' | sed -e 's,^[^>]*>,,' -e 's,<.*,,'
|
|
@ -0,0 +1,4 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
cat $1 | grep '<Version>' | sed -e 's,^[^>]*>,,' -e 's,<.*,,'
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
find_program(MONO_EXECUTABLE mono)
|
||||
find_program(MCS_EXECUTABLE dmcs)
|
||||
|
||||
if (NOT MCS_EXECUTABLE)
|
||||
find_program(MCS_EXECUTABLE mcs)
|
||||
endif()
|
||||
|
||||
set(MONO_FOUND FALSE CACHE INTERNAL "")
|
||||
|
||||
if (NOT MCS_EXECUTABLE)
|
||||
find_program(MCS_EXECUTABLE mcs)
|
||||
endif()
|
||||
|
||||
if (MONO_EXECUTABLE AND MCS_EXECUTABLE)
|
||||
set(MONO_FOUND True CACHE INTERNAL "")
|
||||
endif()
|
||||
|
||||
if (NOT MONO_FOUND)
|
||||
message(FATAL_ERROR "Could not find mono")
|
||||
endif()
|
||||
|
||||
set(ACTORCOMPILER_SRCS
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/flow/actorcompiler/ActorCompiler.cs
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/flow/actorcompiler/ActorParser.cs
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/flow/actorcompiler/ParseTree.cs
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/flow/actorcompiler/Program.cs
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/flow/actorcompiler/Properties/AssemblyInfo.cs)
|
||||
set(ACTOR_COMPILER_REFERENCES
|
||||
"-r:System,System.Core,System.Xml.Linq,System.Data.DataSetExtensions,Microsoft.CSharp,System.Data,System.Xml")
|
||||
|
||||
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/actorcompiler.exe
|
||||
COMMAND ${MCS_EXECUTABLE} ARGS ${ACTOR_COMPILER_REFERENCES} ${ACTORCOMPILER_SRCS} "-target:exe" "-out:actorcompiler.exe"
|
||||
DEPENDS ${ACTORCOMPILER_SRCS}
|
||||
COMMENT "Compile actor compiler" VERBATIM)
|
||||
add_custom_target(actorcompiler DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/actorcompiler.exe)
|
||||
set(actor_exe "${CMAKE_CURRENT_BINARY_DIR}/actorcompiler.exe")
|
|
@ -0,0 +1,26 @@
|
|||
find_package(Boost 1.67)
|
||||
|
||||
if(Boost_FOUND)
|
||||
add_library(boost_target INTERFACE)
|
||||
target_link_libraries(boost_target INTERFACE Boost::boost)
|
||||
else()
|
||||
include(ExternalProject)
|
||||
ExternalProject_add(boostProject
|
||||
URL "https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.tar.bz2"
|
||||
URL_HASH SHA256=2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
BUILD_IN_SOURCE ON
|
||||
INSTALL_COMMAND ""
|
||||
UPDATE_COMMAND ""
|
||||
BUILD_BYPRODUCTS <SOURCE_DIR>/boost/config.hpp)
|
||||
|
||||
ExternalProject_Get_property(boostProject SOURCE_DIR)
|
||||
|
||||
set(BOOST_INCLUDE_DIR ${SOURCE_DIR})
|
||||
message(STATUS "Boost include dir ${BOOST_INCLUDE_DIR}")
|
||||
|
||||
add_library(boost_target INTERFACE)
|
||||
add_dependencies(boost_target boostProject)
|
||||
target_include_directories(boost_target INTERFACE ${BOOST_INCLUDE_DIR})
|
||||
endif()
|
|
@ -0,0 +1,25 @@
|
|||
set(VEXILLOGRAPHER_SRCS
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/fdbclient/vexillographer/c.cs
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/fdbclient/vexillographer/cpp.cs
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/fdbclient/vexillographer/java.cs
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/fdbclient/vexillographer/python.cs
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/fdbclient/vexillographer/ruby.cs
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/fdbclient/vexillographer/vexillographer.cs)
|
||||
|
||||
set(VEXILLOGRAPHER_REFERENCES "-r:System,System.Core,System.Data,System.Xml,System.Xml.Linq")
|
||||
set(VEXILLOGRAPHER_EXE "${CMAKE_CURRENT_BINARY_DIR}/vexillographer.exe")
|
||||
add_custom_command(OUTPUT ${VEXILLOGRAPHER_EXE}
|
||||
COMMAND ${MCS_EXECUTABLE} ARGS ${VEXILLOGRAPHER_REFERENCES} ${VEXILLOGRAPHER_SRCS} -target:exe -out:${VEXILLOGRAPHER_EXE}
|
||||
DEPENDS ${VEXILLOGRAPHER_SRCS}
|
||||
COMMENT "Compile Vexillographer")
|
||||
add_custom_target(vexillographer DEPENDS ${VEXILLOGRAPHER_EXE})
|
||||
|
||||
set(ERROR_GEN_SRCS
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/flow/error_gen.cs)
|
||||
set(ERROR_GEN_REFERENCES "-r:System,System.Core,System.Data,System.Xml,System.Xml.Linq")
|
||||
set(ERROR_GEN_EXE "${CMAKE_CURRENT_BINARY_DIR}/error_gen.exe")
|
||||
add_custom_command (OUTPUT ${ERROR_GEN_EXE}
|
||||
COMMAND ${MCS_EXECUTABLE} ARGS ${ERROR_GEN_REFERENCES} ${ERROR_GEN_SRCS} -target:exe -out:${ERROR_GEN_EXE}
|
||||
DEPENDS ${ERROR_GEN_SRCS}
|
||||
COMMENT "Compile error_gen")
|
||||
add_custom_target(error_gen DEPENDS ${ERROR_GEN_EXE})
|
|
@ -0,0 +1,128 @@
|
|||
set(USE_GPERFTOOLS OFF CACHE BOOL "Use gperfools for profiling")
|
||||
set(PORTABLE_BINARY OFF CACHE BOOL "Create a binary that runs on older OS versions")
|
||||
set(USE_VALGRIND OFF CACHE BOOL "Compile for valgrind usage")
|
||||
set(USE_GOLD_LINKER OFF CACHE BOOL "Use gold linker")
|
||||
set(ALLOC_INSTRUMENTATION OFF CACHE BOOL "Instrument alloc")
|
||||
set(WITH_UNDODB OFF CACHE BOOL "Use rr or undodb")
|
||||
set(OPEN_FOR_IDE OFF CACHE BOOL "Open this in an IDE (won't compile/link)")
|
||||
set(FDB_RELEASE OFF CACHE BOOL "This is a building of a final release")
|
||||
|
||||
find_package(Threads REQUIRED)
|
||||
if(ALLOC_INSTRUMENTATION)
|
||||
add_compile_options(-DALLOC_INSTRUMENTATION)
|
||||
endif()
|
||||
if(WITH_UNDODB)
|
||||
add_compile_options(-DWITH_UNDODB)
|
||||
endif()
|
||||
if(DEBUG_TASKS)
|
||||
add_compile_options(-DDEBUG_TASKS)
|
||||
endif()
|
||||
|
||||
if(NDEBUG)
|
||||
add_compile_options(-DNDEBUG)
|
||||
endif()
|
||||
|
||||
if(FDB_RELEASE)
|
||||
add_compile_options(-DFDB_RELEASE)
|
||||
endif()
|
||||
|
||||
include_directories(${CMAKE_SOURCE_DIR})
|
||||
include_directories(${CMAKE_CURRENT_BINARY_DIR})
|
||||
if (NOT OPEN_FOR_IDE)
|
||||
add_definitions(-DNO_INTELLISENSE)
|
||||
endif()
|
||||
add_definitions(-DUSE_UCONTEXT)
|
||||
enable_language(ASM)
|
||||
|
||||
include(CheckFunctionExists)
|
||||
set(CMAKE_REQUIRED_INCLUDES stdlib.h malloc.h)
|
||||
set(CMAKE_REQUIRED_LIBRARIES c)
|
||||
|
||||
|
||||
if(WIN32)
|
||||
add_compile_options(/W3 /EHsc)
|
||||
else()
|
||||
if(USE_GOLD_LINKER)
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=gold -Wl,--disable-new-dtags")
|
||||
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=gold -Wl,--disable-new-dtags")
|
||||
endif()
|
||||
|
||||
set(GCC NO)
|
||||
set(CLANG NO)
|
||||
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang")
|
||||
set(CLANG YES)
|
||||
else()
|
||||
# This is not a very good test. However, as we do not really support many architectures
|
||||
# this is good enough for now
|
||||
set(GCC YES)
|
||||
endif()
|
||||
|
||||
# we always compile with debug symbols. CPack will strip them out
|
||||
# and create a debuginfo rpm
|
||||
add_compile_options(-ggdb)
|
||||
set(USE_ASAN OFF CACHE BOOL "Compile with address sanitizer")
|
||||
if(USE_ASAN)
|
||||
add_compile_options(
|
||||
-fno-omit-frame-pointer -fsanitize=address
|
||||
-DUSE_ASAN)
|
||||
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=address")
|
||||
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=address")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=address ${CMAKE_THREAD_LIBS_INIT}")
|
||||
endif()
|
||||
|
||||
if(PORTABLE_BINARY)
|
||||
message(STATUS "Create a more portable binary")
|
||||
set(CMAKE_MODULE_LINKER_FLAGS "-static-libstdc++ -static-libgcc ${CMAKE_MODULE_LINKER_FLAGS}")
|
||||
set(CMAKE_SHARED_LINKER_FLAGS "-static-libstdc++ -static-libgcc ${CMAKE_SHARED_LINKER_FLAGS}")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "-static-libstdc++ -static-libgcc ${CMAKE_EXE_LINKER_FLAGS}")
|
||||
endif()
|
||||
# Instruction sets we require to be supported by the CPU
|
||||
add_compile_options(
|
||||
-maes
|
||||
-mmmx
|
||||
-mavx
|
||||
-msse4.2)
|
||||
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-std=c++11>)
|
||||
if (USE_VALGRIND)
|
||||
add_compile_options(-DVALGRIND -DUSE_VALGRIND)
|
||||
endif()
|
||||
if (CLANG)
|
||||
if (APPLE)
|
||||
add_compile_options(-stdlib=libc++)
|
||||
endif()
|
||||
add_compile_options(
|
||||
-Wno-unknown-warning-option
|
||||
-Wno-dangling-else
|
||||
-Wno-sign-compare
|
||||
-Wno-comment
|
||||
-Wno-unknown-pragmas
|
||||
-Wno-delete-non-virtual-dtor
|
||||
-Wno-undefined-var-template
|
||||
-Wno-unused-value
|
||||
-Wno-tautological-pointer-compare
|
||||
-Wno-format)
|
||||
endif()
|
||||
if (CMAKE_GENERATOR STREQUAL Xcode)
|
||||
else()
|
||||
add_compile_options(-Werror)
|
||||
endif()
|
||||
add_compile_options($<$<BOOL:${GCC}>:-Wno-pragmas>)
|
||||
add_compile_options(-Wno-error=format
|
||||
-Wno-deprecated
|
||||
-fvisibility=hidden
|
||||
-Wreturn-type
|
||||
-fdiagnostics-color=always
|
||||
-fPIC)
|
||||
|
||||
if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
set(USE_LTO OFF CACHE BOOL "Do link time optimization")
|
||||
if (USE_LTO)
|
||||
add_compile_options($<$<CONFIG:Release>:-flto>)
|
||||
set(CMAKE_AR "gcc-ar")
|
||||
set(CMAKE_C_ARCHIVE_CREATE "<CMAKE_AR> qcs <TARGET> <LINK_FLAGS> <OBJECTS>")
|
||||
set(CMAKE_C_ARCHIVE_FINISH true)
|
||||
set(CMAKE_CXX_ARCHIVE_CREATE "<CMAKE_AR> qcs <TARGET> <LINK_FLAGS> <OBJECTS>")
|
||||
set(CMAKE_CXX_ARCHIVE_FINISH true)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
|
@ -0,0 +1,16 @@
|
|||
find_package(Curses)
|
||||
include(FindPackageHandleStandardArgs)
|
||||
|
||||
if(CURSES_FOUND)
|
||||
find_path(Editline_INCLUDE_DIR editline/readline.h)
|
||||
find_library(Editline_LIBRARY edit)
|
||||
find_package_handle_standard_args(
|
||||
Editline DEFAULT_MSG Editline_LIBRARY Editline_INCLUDE_DIR)
|
||||
if(Editline_FOUND)
|
||||
set(Editline_LIBRARIES ${Editline_LIBRARY} ${CURSES_LIBRARIES})
|
||||
set(Editline_INCLUDE_DIRS ${Editline_INCLUDE_DIR} ${CURSES_INCLUDE_DIRS})
|
||||
mark_as_advanced(Editline_INCLUDE_DIR Editline_LIBRARY)
|
||||
endif()
|
||||
else()
|
||||
set(Editline_FOUND False)
|
||||
endif()
|
|
@ -0,0 +1,51 @@
|
|||
# Tries to find Gperftools.
|
||||
#
|
||||
# Usage of this module as follows:
|
||||
#
|
||||
# find_package(Gperftools)
|
||||
#
|
||||
# Variables used by this module, they can change the default behaviour and need
|
||||
# to be set before calling find_package:
|
||||
#
|
||||
# Gperftools_ROOT_DIR Set this variable to the root installation of
|
||||
# Gperftools if the module has problems finding
|
||||
# the proper installation path.
|
||||
#
|
||||
# Variables defined by this module:
|
||||
#
|
||||
# GPERFTOOLS_FOUND System has Gperftools libs/headers
|
||||
# GPERFTOOLS_LIBRARIES The Gperftools libraries (tcmalloc & profiler)
|
||||
# GPERFTOOLS_INCLUDE_DIR The location of Gperftools headers
|
||||
|
||||
find_library(GPERFTOOLS_TCMALLOC
|
||||
NAMES tcmalloc
|
||||
HINTS ${Gperftools_ROOT_DIR}/lib)
|
||||
|
||||
find_library(GPERFTOOLS_PROFILER
|
||||
NAMES profiler
|
||||
HINTS ${Gperftools_ROOT_DIR}/lib)
|
||||
|
||||
find_library(GPERFTOOLS_TCMALLOC_AND_PROFILER
|
||||
NAMES tcmalloc_and_profiler
|
||||
HINTS ${Gperftools_ROOT_DIR}/lib)
|
||||
|
||||
find_path(GPERFTOOLS_INCLUDE_DIR
|
||||
NAMES gperftools/heap-profiler.h
|
||||
HINTS ${Gperftools_ROOT_DIR}/include)
|
||||
|
||||
set(GPERFTOOLS_LIBRARIES ${GPERFTOOLS_TCMALLOC_AND_PROFILER})
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(
|
||||
Gperftools
|
||||
DEFAULT_MSG
|
||||
GPERFTOOLS_LIBRARIES
|
||||
GPERFTOOLS_INCLUDE_DIR)
|
||||
|
||||
mark_as_advanced(
|
||||
Gperftools_ROOT_DIR
|
||||
GPERFTOOLS_TCMALLOC
|
||||
GPERFTOOLS_PROFILER
|
||||
GPERFTOOLS_TCMALLOC_AND_PROFILER
|
||||
GPERFTOOLS_LIBRARIES
|
||||
GPERFTOOLS_INCLUDE_DIR)
|
|
@ -0,0 +1,46 @@
|
|||
macro(actor_set varname srcs)
|
||||
set(${varname})
|
||||
foreach(src ${srcs})
|
||||
set(tmp "${src}")
|
||||
if(${src} MATCHES ".*\\.h")
|
||||
continue()
|
||||
elseif(${src} MATCHES ".*\\.actor\\.cpp")
|
||||
string(REPLACE ".actor.cpp" ".actor.g.cpp" tmp ${src})
|
||||
set(tmp "${CMAKE_CURRENT_BINARY_DIR}/${tmp}")
|
||||
endif()
|
||||
set(${varname} "${${varname}};${tmp}")
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
set(ACTOR_TARGET_COUNTER "0")
|
||||
macro(actor_compile target srcs)
|
||||
set(options DISABLE_ACTOR_WITHOUT_WAIT)
|
||||
set(oneValueArg)
|
||||
set(multiValueArgs)
|
||||
cmake_parse_arguments(ACTOR_COMPILE "${options}" "${oneValueArgs}" "${multiValueArgs}" "${ARGN}")
|
||||
set(_tmp_out "")
|
||||
foreach(src ${srcs})
|
||||
set(tmp "")
|
||||
if(${src} MATCHES ".*\\.actor\\.h")
|
||||
string(REPLACE ".actor.h" ".actor.g.h" tmp ${src})
|
||||
elseif(${src} MATCHES ".*\\.actor\\.cpp")
|
||||
string(REPLACE ".actor.cpp" ".actor.g.cpp" tmp ${src})
|
||||
endif()
|
||||
set(actor_compiler_flags "")
|
||||
if(ACTOR_COMPILE_DISABLE_ACTOR_WITHOUT_WAIT)
|
||||
set(actor_compiler_flags "--disable-actor-without-wait-error")
|
||||
endif()
|
||||
if(tmp)
|
||||
add_custom_command(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${tmp}"
|
||||
COMMAND ${MONO_EXECUTABLE} ${actor_exe} "${CMAKE_CURRENT_SOURCE_DIR}/${src}" "${CMAKE_CURRENT_BINARY_DIR}/${tmp}" ${actor_compiler_flags} > /dev/null
|
||||
DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/${src}" actorcompiler ${actor_exe}
|
||||
COMMENT "Compile actor: ${src}")
|
||||
set(_tmp_out "${_tmp_out};${CMAKE_CURRENT_BINARY_DIR}/${tmp}")
|
||||
endif()
|
||||
endforeach()
|
||||
MATH(EXPR ACTOR_TARGET_COUNTER "${ACTOR_TARGET_COUNTER}+1")
|
||||
add_custom_target(${target}_actors_${ACTOR_TARGET_COUNTER} DEPENDS ${_tmp_out})
|
||||
add_dependencies(${target} ${target}_actors_${ACTOR_TARGET_COUNTER})
|
||||
target_include_directories(${target} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
target_include_directories(${target} PUBLIC ${CMAKE_CURRENT_BINARY_DIR})
|
||||
endmacro()
|
|
@ -0,0 +1,221 @@
|
|||
if(NOT INSTALL_LAYOUT)
|
||||
set(DEFAULT_INSTALL_LAYOUT "STANDALONE")
|
||||
endif()
|
||||
set(INSTALL_LAYOUT "${DEFAULT_INSTALL_LAYOUT}"
|
||||
CACHE STRING "Installation directory layout. Options are: TARGZ (as in tar.gz installer), WIN, STANDALONE, RPM, DEB, OSX")
|
||||
|
||||
set(DIR_LAYOUT ${INSTALL_LAYOUT})
|
||||
if(DIR_LAYOUT MATCHES "TARGZ")
|
||||
set(DIR_LAYOUT "STANDALONE")
|
||||
endif()
|
||||
|
||||
if(UNIX)
|
||||
get_property(LIB64 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB64_PATHS)
|
||||
set(FDB_CONFIG_DIR "etc/foundationdb")
|
||||
if("${LIB64}" STREQUAL "TRUE")
|
||||
set(LIBSUFFIX 64)
|
||||
else()
|
||||
set(LIBSUFFIX "")
|
||||
endif()
|
||||
set(FDB_LIB_NOSUFFIX "lib")
|
||||
if(DIR_LAYOUT MATCHES "STANDALONE")
|
||||
set(FDB_LIB_DIR "lib${LIBSUFFIX}")
|
||||
set(FDB_LIBEXEC_DIR "${FDB_LIB_DIR}")
|
||||
set(FDB_BIN_DIR "bin")
|
||||
set(FDB_SBIN_DIR "sbin")
|
||||
set(FDB_INCLUDE_INSTALL_DIR "include")
|
||||
set(FDB_PYTHON_INSTALL_DIR "${FDB_LIB_DIR}/python2.7/site-packages/fdb")
|
||||
set(FDB_SHARE_DIR "share")
|
||||
elseif(DIR_LAYOUT MATCHES "OSX")
|
||||
set(CPACK_GENERATOR productbuild)
|
||||
set(CPACK_PACKAGING_INSTALL_PREFIX "/")
|
||||
set(FDB_LIB_DIR "usr/local/lib")
|
||||
set(FDB_LIB_NOSUFFIX "usr/lib")
|
||||
set(FDB_LIBEXEC_DIR "usr/local/libexec")
|
||||
set(FDB_BIN_DIR "usr/local/bin")
|
||||
set(FDB_SBIN_DIR "usr/local/sbin")
|
||||
set(FDB_INCLUDE_INSTALL_DIR "usr/local/include")
|
||||
set(FDB_PYTHON_INSTALL_DIR "Library/Python/2.7/site-packages/fdb")
|
||||
set(FDB_SHARE_DIR "usr/local/share")
|
||||
elseif(DIR_LAYOUT MATCHES "WIN")
|
||||
# TODO
|
||||
else()
|
||||
# for deb and rpm
|
||||
if(INSTALL_LAYOUT MATCHES "RPM")
|
||||
set(CPACK_GENERATOR "RPM")
|
||||
else()
|
||||
# DEB
|
||||
set(CPACK_GENERATOR "DEB")
|
||||
endif()
|
||||
set(CMAKE_INSTALL_PREFIX "/")
|
||||
set(CPACK_PACKAGING_INSTALL_PREFIX "/")
|
||||
set(FDB_LIB_DIR "usr/lib${LIBSUFFIX}")
|
||||
set(FDB_LIB_NOSUFFIX "usr/lib")
|
||||
set(FDB_LIBEXEC_DIR "${FDB_LIB_DIR}")
|
||||
set(FDB_BIN_DIR "usr/bin")
|
||||
set(FDB_SBIN_DIR "usr/sbin")
|
||||
set(FDB_INCLUDE_INSTALL_DIR "usr/include")
|
||||
set(FDB_PYTHON_INSTALL_DIR "${FDB_LIB_DIR}/python2.7/site-packages/fdb")
|
||||
set(FDB_SHARE_DIR "usr/share")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
# Version information
|
||||
################################################################################
|
||||
|
||||
string(REPLACE "." ";" FDB_VERSION_LIST ${FDB_VERSION_PLAIN})
|
||||
list(GET FDB_VERSION_LIST 0 FDB_MAJOR)
|
||||
list(GET FDB_VERSION_LIST 1 FDB_MINOR)
|
||||
list(GET FDB_VERSION_LIST 2 FDB_PATCH)
|
||||
|
||||
################################################################################
|
||||
# General CPack configuration
|
||||
################################################################################
|
||||
|
||||
include(InstallRequiredSystemLibraries)
|
||||
set(CPACK_PACKAGE_NAME "foundationdb")
|
||||
set(CPACK_PACKAGE_VENDOR "FoundationDB <fdb-dist@apple.com>")
|
||||
set(CPACK_PACKAGE_VERSION_MAJOR ${FDB_MAJOR})
|
||||
set(CPACK_PACKAGE_VERSION_MINOR ${FDB_MINOR})
|
||||
set(CPACK_PACKAGE_VERSION_PATCH ${FDB_PATCH})
|
||||
set(CPACK_PACKAGE_DESCRIPTION_FILE ${CMAKE_SOURCE_DIR}/packaging/description)
|
||||
set(CPACK_PACKAGE_DESCRIPTION_SUMMARY
|
||||
"FoundationDB is a scalable, fault-tolerant, ordered key-value store with full ACID transactions.")
|
||||
set(CPACK_PACKAGE_ICON ${CMAKE_SOURCE_DIR}/packaging/foundationdb.ico)
|
||||
set(CPACK_PACKAGE_CONTACT "The FoundationDB Community")
|
||||
set(CPACK_COMPONENT_server_DEPENDS clients)
|
||||
if (INSTALL_LAYOUT MATCHES "OSX")
|
||||
set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/packaging/osx/resources/conclusion.rtf)
|
||||
set(CPACK_PRODUCTBUILD_RESOURCES_DIR ${CMAKE_SOURCE_DIR}/packaging/osx/resources)
|
||||
else()
|
||||
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE)
|
||||
set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/README.md)
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
# Configuration for RPM
|
||||
################################################################################
|
||||
|
||||
if(INSTALL_LAYOUT MATCHES "RPM")
|
||||
set(CPACK_RPM_server_USER_FILELIST
|
||||
"%config(noreplace) /etc/foundationdb/foundationdb.conf"
|
||||
"%attr(0700,foundationdb,foundationdb) /var/log/foundationdb"
|
||||
"%attr(0700, foundationdb, foundationdb) /var/lib/foundationdb")
|
||||
set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION
|
||||
"/usr/sbin"
|
||||
"/usr/share/java"
|
||||
"/usr/lib64/python2.7"
|
||||
"/usr/lib64/python2.7/site-packages"
|
||||
"/var"
|
||||
"/var/log"
|
||||
"/var/lib"
|
||||
"/lib"
|
||||
"/lib/systemd"
|
||||
"/lib/systemd/system"
|
||||
"/etc/rc.d/init.d")
|
||||
set(CPACK_RPM_DEBUGINFO_PACKAGE ON)
|
||||
set(CPACK_RPM_BUILD_SOURCE_DIRS_PREFIX /usr/src)
|
||||
set(CPACK_RPM_COMPONENT_INSTALL ON)
|
||||
set(CPACK_RPM_clients_PRE_INSTALL_SCRIPT_FILE
|
||||
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preclients.sh)
|
||||
set(CPACK_RPM_clients_POST_INSTALL_SCRIPT_FILE
|
||||
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postclients.sh)
|
||||
set(CPACK_RPM_server_PRE_INSTALL_SCRIPT_FILE
|
||||
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preserver.sh)
|
||||
set(CPACK_RPM_server_POST_INSTALL_SCRIPT_FILE
|
||||
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postserver.sh)
|
||||
set(CPACK_RPM_server_PRE_UNINSTALL_SCRIPT_FILE
|
||||
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preunserver.sh)
|
||||
set(CPACK_RPM_server_PACKAGE_REQUIRES
|
||||
"foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
# Configuration for DEB
|
||||
################################################################################
|
||||
|
||||
if(INSTALL_LAYOUT MATCHES "DEB")
|
||||
set(CPACK_DEB_COMPONENT_INSTALL ON)
|
||||
set(CPACK_DEBIAN_PACKAGE_SECTION "database")
|
||||
set(CPACK_DEBIAN_ENABLE_COMPONENT_DEPENDS ON)
|
||||
|
||||
set(CPACK_DEBIAN_server_PACKAGE_DEPENDS "adduser, libc6 (>= 2.11), python (>= 2.6)")
|
||||
set(CPACK_DEBIAN_clients_PACKAGE_DEPENDS "adduser, libc6 (>= 2.11)")
|
||||
set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://www.foundationdb.org")
|
||||
set(CPACK_DEBIAN_clients_PACKAGE_CONTROL_EXTRA
|
||||
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-clients/postinst)
|
||||
set(CPACK_DEBIAN_server_PACKAGE_CONTROL_EXTRA
|
||||
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/conffiles
|
||||
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/preinst
|
||||
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/postinst
|
||||
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/prerm
|
||||
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/postrm)
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
# Server configuration
|
||||
################################################################################
|
||||
|
||||
string(RANDOM LENGTH 8 description1)
|
||||
string(RANDOM LENGTH 8 description2)
|
||||
set(CLUSTER_DESCRIPTION1 ${description1} CACHE STRING "Cluster description")
|
||||
set(CLUSTER_DESCRIPTION2 ${description2} CACHE STRING "Cluster description")
|
||||
|
||||
install(FILES ${CMAKE_SOURCE_DIR}/packaging/foundationdb.conf
|
||||
DESTINATION ${FDB_CONFIG_DIR}
|
||||
COMPONENT server)
|
||||
install(FILES ${CMAKE_SOURCE_DIR}/packaging/argparse.py
|
||||
DESTINATION "usr/lib/foundationdb"
|
||||
COMPONENT server)
|
||||
install(FILES ${CMAKE_SOURCE_DIR}/packaging/make_public.py
|
||||
DESTINATION "usr/lib/foundationdb")
|
||||
if((INSTALL_LAYOUT MATCHES "RPM") OR (INSTALL_LAYOUT MATCHES "DEB"))
|
||||
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/packaging/foundationdb
|
||||
${CMAKE_BINARY_DIR}/packaging/rpm)
|
||||
install(
|
||||
DIRECTORY ${CMAKE_BINARY_DIR}/packaging/foundationdb
|
||||
DESTINATION "var/log"
|
||||
COMPONENT server)
|
||||
install(
|
||||
DIRECTORY ${CMAKE_BINARY_DIR}/packaging/foundationdb
|
||||
DESTINATION "var/lib"
|
||||
COMPONENT server)
|
||||
execute_process(
|
||||
COMMAND pidof systemd
|
||||
RESULT_VARIABLE IS_SYSTEMD
|
||||
OUTPUT_QUIET
|
||||
ERROR_QUIET)
|
||||
if(IS_SYSTEMD EQUAL "0")
|
||||
configure_file(${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb.service
|
||||
${CMAKE_BINARY_DIR}/packaging/rpm/foundationdb.service)
|
||||
install(FILES ${CMAKE_BINARY_DIR}/packaging/rpm/foundationdb.service
|
||||
DESTINATION "lib/systemd/system"
|
||||
COMPONENT server)
|
||||
else()
|
||||
if(INSTALL_LAYOUT MATCHES "RPM")
|
||||
install(FILES ${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb-init
|
||||
DESTINATION "etc/rc.d/init.d"
|
||||
RENAME "foundationdb"
|
||||
COMPONENT server)
|
||||
else()
|
||||
install(FILES ${CMAKE_SOURCE_DIR}/packaging/deb/foundationdb-init
|
||||
DESTINATION "etc/init.d"
|
||||
RENAME "foundationdb"
|
||||
COMPONENT server)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
# Helper Macros
|
||||
################################################################################
|
||||
|
||||
macro(install_symlink filepath sympath compondent)
|
||||
install(CODE "execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink ${filepath} ${sympath})" COMPONENT ${component})
|
||||
install(CODE "message(\"-- Created symlink: ${sympath} -> ${filepath}\")")
|
||||
endmacro()
|
||||
macro(install_mkdir dirname component)
|
||||
install(CODE "execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory ${dirname})" COMPONENT ${component})
|
||||
install(CODE "message(\"-- Created directory: ${dirname}\")")
|
||||
endmacro()
|
|
@ -0,0 +1,2 @@
|
|||
using @BOOST_TOOLSET@ : : @CMAKE_CXX_COMPILER@ : @BOOST_ADDITIONAL_COMPILE_OPTIOINS@ ;
|
||||
using python : @PYTHON_VERSION_MAJOR@.@PYTHON_VERSION_MINOR@ : @PYTHON_EXECUTABLE@ : @PYTHON_INCLUDE_DIRS@ ;
|
|
@ -9,6 +9,7 @@ Administration
|
|||
:hidden:
|
||||
:titlesonly:
|
||||
|
||||
configuration
|
||||
moving-a-cluster
|
||||
tls
|
||||
|
||||
|
@ -18,6 +19,8 @@ This document covers the administration of an existing FoundationDB cluster. We
|
|||
|
||||
To administer an externally accessible cluster, you need to understand basic system tasks. You should begin with how to :ref:`start and stop the database <administration-running-foundationdb>`. Next, you should review management of a cluster, including :ref:`adding <adding-machines-to-a-cluster>` and :ref:`removing <removing-machines-from-a-cluster>` machines, and monitoring :ref:`cluster status <administration-monitoring-cluster-status>` and the basic :ref:`server processes <administration_fdbmonitor>`. You should be familiar with :ref:`managing trace files <administration-managing-trace-files>` and :ref:`other administrative concerns <administration-other-administrative-concerns>`. Finally, you should know how to :ref:`uninstall <administration-removing>` or :ref:`upgrade <upgrading-foundationdb>` the database.
|
||||
|
||||
FoundationDB also provides a number of different :doc:`configuration <configuration>` options which you should know about when setting up a FoundationDB database.
|
||||
|
||||
.. _administration-running-foundationdb:
|
||||
|
||||
Starting and stopping
|
||||
|
|
|
@ -100,7 +100,7 @@ FoundationDB may return the following error codes from API functions. If you nee
|
|||
+-----------------------------------------------+-----+--------------------------------------------------------------------------------+
|
||||
| transaction_invalid_version | 2020| Transaction does not have a valid commit version |
|
||||
+-----------------------------------------------+-----+--------------------------------------------------------------------------------+
|
||||
| transaction_read_only | 2021| Transaction is read-only and therefore does not have a commit version |
|
||||
| no_commit_version | 2021| Transaction is read-only and therefore does not have a commit version |
|
||||
+-----------------------------------------------+-----+--------------------------------------------------------------------------------+
|
||||
| environment_variable_network_option_failed | 2022| Environment variable network option could not be set |
|
||||
+-----------------------------------------------+-----+--------------------------------------------------------------------------------+
|
||||
|
|
|
@ -101,6 +101,15 @@ Set the process using ``configure [proxies|resolvers|logs]=<N>``, where ``<N>``
|
|||
|
||||
For recommendations on appropriate values for process types in large clusters, see :ref:`guidelines-process-class-config`.
|
||||
|
||||
fileconfigure
|
||||
-------------
|
||||
|
||||
The ``fileconfigure`` command is alternative to the ``configure`` command which changes the configuration of the database based on a json document. The command loads a JSON document from the provided file, and change the database configuration to match the contents of the JSON document.
|
||||
|
||||
The format should be the same as the value of the ``configuration`` entry in status JSON without ``excluded_servers`` or ``coordinators_count``. Its syntax is ``fileconfigure [new] <FILENAME>``.
|
||||
|
||||
"The ``new`` option, if present, initializes a new database with the given configuration rather than changing the configuration of an existing one.
|
||||
|
||||
coordinators
|
||||
------------
|
||||
|
||||
|
|
|
@ -355,22 +355,21 @@ FoundationDB will never use processes on the same machine for the replication of
|
|||
FoundationDB replicates data to three machines, and at least three available machines are required to make progress. This is the recommended mode for a cluster of five or more machines in a single datacenter.
|
||||
|
||||
``three_data_hall`` mode
|
||||
FoundationDB replicates data to three machines, and at least three available machines are required to make progress. Every piece of data that has been committed to storage servers
|
||||
will be replicated onto three different data halls, and the cluster will
|
||||
remain available after losing a single data hall and one machine in another
|
||||
data hall.
|
||||
FoundationDB stores data in triplicate, with one copy on a storage server in each of three data halls. The transaction logs are replicated four times, with two data halls containing two replicas apiece. Four available machines (two in each of two data halls) are therefore required to make progress. This configuration enables the cluster to remain available after losing a single data hall and one machine in another data hall.
|
||||
|
||||
Datacenter-aware mode
|
||||
---------------------
|
||||
|
||||
In addition to the more commonly used modes listed above, this version of FoundationDB has support for redundancy across multiple datacenters. Although data will always be triple replicated in this mode, it may not be replicated across all datacenters.
|
||||
In addition to the more commonly used modes listed above, this version of FoundationDB has support for redundancy across multiple datacenters.
|
||||
|
||||
.. note:: When using the datacenter-aware mode, all ``fdbserver`` processes should be passed a valid datacenter identifier on the command line.
|
||||
|
||||
``three_datacenter`` mode
|
||||
*(for 5+ machines in 3 datacenters)*
|
||||
|
||||
FoundationDB attempts to replicate data across three datacenters and will stay up with only two available. Data is replicated 6 times. For maximum availability, you should use five coordination servers: two in two of the datacenters and one in the third datacenter.
|
||||
FoundationDB attempts to replicate data across three datacenters and will stay up with only two available. Data is replicated 6 times. Transaction logs are stored in the same configuration as the ``three_data_hall`` mode, so commit latencies are tied to the latency between datacenters. For maximum availability, you should use five coordination servers: two in two of the datacenters and one in the third datacenter.
|
||||
|
||||
.. warning:: ``three_datacenter`` mode is not compatible with region configuration.
|
||||
|
||||
Changing redundancy mode
|
||||
------------------------
|
||||
|
@ -478,7 +477,7 @@ FoundationDB recommends the ext4 filesystem. (However, see :ref:`Platform Issues
|
|||
* Copy-on-write type filesystems (such as Btrfs) will likely have poor performance with FoundationDB.
|
||||
|
||||
|
||||
Ext4 filesystems should be mounted with mount options ``default,noatime,discard``.
|
||||
Ext4 filesystems should be mounted with mount options ``defaults,noatime,discard``.
|
||||
|
||||
.. note ::
|
||||
The ``noatime`` option disables updating of access times when reading files, an unneeded feature for FoundationDB that increases write activity on the disk. The discard option enables `TRIM <http://en.wikipedia.org/wiki/TRIM>`_ support, allowing the operating system to efficiently inform the SSD of erased blocks, maintaining high write speed and increasing drive lifetime.
|
||||
|
@ -516,6 +515,236 @@ When creating a partition for use with FoundationDB using the standard Linux fdi
|
|||
|
||||
For an SSD with a single partition, the partition should typically begin at sector 2048 (512 byte sectors yields 1024 KiB alignment).
|
||||
|
||||
.. _configuration-configuring-regions:
|
||||
|
||||
Configuring regions
|
||||
===================
|
||||
|
||||
.. note:: In the following text, the term ``datacenter`` is used to denote unique locations that are failure independent from one another. Cloud providers generally expose this property of failure independence with Availability Zones.
|
||||
|
||||
Regions configuration enables automatic failover between two datacenters, without adding a WAN latency for commits, while still maintaining all the consistency properties FoundationDB provides.
|
||||
|
||||
This is made possible by combining two features. The first is asynchronous replication between two regions. By not waiting for the commits to become durable in the remote region before reporting a commit as successful, it means the remote region will slightly lag behind the primary. This is similar to ``fdbdr``, except that the asynchronous replication is done within a single cluster instead of between different FoundationDB clusters.
|
||||
|
||||
The second feature is the ability to add one or more synchronous replicas of the mutation log in a different datacenter. Because this datacenter is only holding a transient copy of the mutations being committed to the database, only a few FoundationDB processes are required to fulfill this role. If the primary datacenter fails, the external mutation log replicas will still allow access to the most recent commits. This allows the lagging remote replica to catch up to the primary. Once the remote replica has applied all the mutations, it can start accepting new commits without suffering any data loss.
|
||||
|
||||
An example configuration would be four total datacenters, two on the east coast, two on the west coast, with a preference for fast write latencies from the west coast. One datacenter on each coast would be sized to store a full copy of the data. The second datacenter on each coast would only have a few FoundationDB processes.
|
||||
|
||||
While everything is healthy, writes need to be made durable in both west coast datacenters before a commit can succeed. The geographic proximity of the two datacenters minimizes the additional commit latency. Reads can be served from either region, and clients can get data from whichever region is closer. Getting a read version from the each coast region will still require communicating with a west coast datacenter. Clients can cache read versions if they can tolerate reading stale data to avoid waiting on read versions.
|
||||
|
||||
If either west coast datacenter fails, the last few mutations will be propagated from the remaining west coast datacenter to the east coast. At this point, FoundationDB will start accepting commits on the east coast. Once the west coast comes back online, the system will automatically start copying all the data that was committed to the east coast back to the west coast replica. Once the west coast has caught up, the system will automatically switch back to accepting writes from the west coast again.
|
||||
|
||||
The west coast mutation logs will maintain their copies of all committed mutations until they have been applied by the east coast datacenter. In the event that the east coast has failed for long enough that the west coast mutation logs no longer have enough disk space to continue storing the mutations, FoundationDB can be requested to drop the east coast replica completely. This decision is not automatic, and requires a manual change to the configuration. The west coast database will then act as a single datacenter database until the east coast comes back online. Because the east coast datacenter was completely dropped from the configuration, to bring the west coast back online FoundationDB will have to copy all the data between the regions.
|
||||
|
||||
If a region failover occurs, clients will generally only see a latency spike of a few seconds.
|
||||
|
||||
Specifying datacenters
|
||||
----------------------
|
||||
|
||||
To use region configurations all processes in the cluster need to specify in which datacenter they are located. This can be done on the command line with either ``--locality_dcid`` or ``--datacenter_id``. This datacenter identifier is case sensitive.
|
||||
|
||||
Clients should also specify their datacenter with the database option ``datacenter_id``. If a client does not specify their datacenter, they will use latency estimates to balance traffic between the two regions. This will result in about 5% of requests being served by the remote regions, so reads will suffer from high tail latencies.
|
||||
|
||||
Changing the region configuration
|
||||
---------------------------------
|
||||
|
||||
To change the region configure, use the ``fileconfigure`` command ``fdbcli``. For example::
|
||||
|
||||
user@host$ fdbcli
|
||||
Using cluster file `/etc/foundationdb/fdb.cluster'.
|
||||
|
||||
The database is available.
|
||||
|
||||
Welcome to the fdbcli. For help, type `help'.
|
||||
fdb> fileconfigure regions.json
|
||||
Configuration changed.
|
||||
|
||||
|
||||
Regions are configured in FoundationDB as a json document. For example::
|
||||
|
||||
"regions":[{
|
||||
"datacenters":[{
|
||||
"id":"WC1",
|
||||
"priority":1,
|
||||
"satellite":1
|
||||
}],
|
||||
"satellite_redundancy_mode":"one_satellite_double",
|
||||
"satellite_logs":2
|
||||
}]
|
||||
|
||||
The ``regions`` object in the json document should be an array. Each element of the array describes the configuration of an individual region.
|
||||
|
||||
Each region is described using an object that contains an array of ``datacenters``. Each region may also optionally provide a ``satellite_redundancy_mode`` and ``satellite_logs``.
|
||||
|
||||
Each datacenter is described with an object that contains the ``id`` and ``priority`` of that datacenter. An ``id`` may be any unique alphanumeric string. Datacenters which hold a full replica of the data are referred to as primary datacenters. Datacenters that only store transaction logs are referred to as satellite datacenters. To specify a datacenter is a satellite, it needs to include ``"satellite" : 1``. The priorities of satellite datacenters are only compared to other satellites datacenters in the same region. The priorities of primary datacenters are only compared to other primary datacenters.
|
||||
|
||||
.. warning:: In release 6.0, FoundationDB supports at most two regions.
|
||||
|
||||
Each region can only have one primary datacenter. A negative priority for a datacenter denotes that the system should not recover the transaction subsystem in that datacenter. The region with the transaction subsystem is referred to as the active region.
|
||||
|
||||
One primary datacenter must have a priority >= 0. The cluster will make the region with the highest priority the active region. If two datacenters have equal priority the cluster will make one of them the active region arbitrarily.
|
||||
|
||||
The ``satellite_redundancy_mode`` is configured per region, and specifies how many copies of each mutation should be replicated to the satellite datacenters.
|
||||
|
||||
``one_satellite_single`` mode
|
||||
|
||||
Keep one copy of the mutation log in the satellite datacenter with the highest priority. If the highest priority satellite is unavailable it will put the transaction log in the satellite datacenter with the next highest priority.
|
||||
|
||||
``one_satellite_double`` mode
|
||||
|
||||
Keep two copies of the mutation log in the satellite datacenter with the highest priority.
|
||||
|
||||
``one_satellite_triple`` mode
|
||||
|
||||
Keep three copies of the mutation log in the satellite datacenter with the highest priority.
|
||||
|
||||
``two_satellite_safe`` mode
|
||||
|
||||
Keep two copies of the mutation log in each of the two satellite datacenters with the highest priorities, for a total of four copies of each mutation. This mode will protect against the simultaneous loss of both the primary and one of the satellite datacenters. If only one satellite is available, it will fall back to only storing two copies of the mutation log in the remaining datacenter.
|
||||
|
||||
``two_satellite_fast`` mode
|
||||
|
||||
Keep two copies of the mutation log in each of the two satellite datacenters with the highest priorities, for a total of four copies of each mutation. FoundationDB will only synchronously wait for one of the two satellite datacenters to make the mutations durable before considering a commit successful. This will reduce tail latencies caused by network issues between datacenters. If only one satellite is available, it will fall back to only storing two copies of the mutation log in the remaining datacenter.
|
||||
|
||||
.. warning:: In release 6.0 this is implemented by waiting for all but 2 of the transaction logs. If ``satellite_logs`` is set to more than 4, FoundationDB will still need to wait for replies from both datacenters.
|
||||
|
||||
The number of ``satellite_logs`` is also configured per region. It represents the desired number of transaction logs that should be recruited in the satellite datacenters. The satellite transaction logs do slightly less work than the primary datacenter transaction logs. So while the ratio of logs to replicas should be kept roughly equal in the primary datacenter and the satellites, a slightly fewer number of satellite transaction logs may be the optimal balance for performance.
|
||||
|
||||
The number of replicas in each region is controlled by redundancy level. For example ``double`` mode will put 2 replicas in each region, for a total of 4 replicas.
|
||||
|
||||
Asymmetric configurations
|
||||
-------------------------
|
||||
|
||||
The fact that satellite policies are configured per region allows for asymmetric configurations. For example, FoudnationDB can have a three datacenter setup where there are two datacenters on the west coast (WC1, WC2) and one datacenter on the east coast (EC1). The west coast region can be set as the preferred active region by setting the priority of its primary datacenter higher than the east coast datacenter. The west coast region should have a satellite policy configured, so that when it is active, FoundationDB is making mutations durable in both west coast datacenters. In the rare event that one of the west coast datacenter have failed, FoundationDB will fail over to the east coast datacenter. Because this region does not a satellite datacenter, the mutations will only be made durable in one datacenter while the transaction subsystem is located here. However this is justifiable because the region will only be active if a datacenter has already been lost.
|
||||
|
||||
This is the region configuration that implements the example::
|
||||
|
||||
"regions":[{
|
||||
"datacenters":[{
|
||||
"id":"WC1",
|
||||
"priority":1,
|
||||
},{
|
||||
"id":"WC2",
|
||||
"priority":0,
|
||||
"satellite":1
|
||||
}],
|
||||
"satellite_redundancy_mode":"one_satellite_double"
|
||||
},{
|
||||
"datacenters":[{
|
||||
"id":"EC1",
|
||||
"priority":0,
|
||||
}]
|
||||
}]
|
||||
|
||||
Changing the usable_regions configuration
|
||||
-----------------------------------------
|
||||
|
||||
The ``usable_regions`` configuration option determines the number of regions which have a replica of the database.
|
||||
|
||||
.. warning:: In release 6.0, ``usable_regions`` can only be configured to the values of ``1`` or ``2``, and a maximum of 2 regions can be defined in the ``regions`` json object.
|
||||
|
||||
Increasing the ``usable_regions`` will start copying data from the active region to the remote region. Reducing the ``usable_regions`` will immediately drop the replicas in the remote region. During these changes, only one primary datacenter can have priority >= 0. This enforces exactly which region will lose its replica.
|
||||
|
||||
Changing the log routers configuration
|
||||
--------------------------------------
|
||||
|
||||
FoundationDB is architected to copy every mutation between regions exactly once. This copying is done by a new role called the log router. When a mutation is committed, it will be randomly assigned to one log router, which will be responsible for copying it across the WAN.
|
||||
|
||||
This log router will pull the mutation from exactly one of the transaction logs. This means a single socket will be used to copy mutations across the WAN per log router. Because of this, if the latency between regions is large the bandwidth-delay product means that the number of log routers could limit the throughput at which mutations can be copied across the WAN. This can be mitigated by either configuring more log routers, or increasing the TCP window scale option.
|
||||
|
||||
To keep the work evenly distributed on the transaction logs, the number of log routers should be a multiple of the number of transaction logs.
|
||||
|
||||
The ``log_routers`` configuration option determines the number of log routers recruited in the remote region.
|
||||
|
||||
Migrating a database to use a region configuration
|
||||
--------------------------------------------------
|
||||
|
||||
To configure an existing database to regions, do the following steps:
|
||||
|
||||
1. Ensure all processes have their dcid locality set on the command line. All processes should exist in the same datacenter. If converting from a ``three_datacenter`` configuration, first configure down to using a single datacenter by changing the replication mode. Then exclude the machines in all datacenters but the one that will become the initial active region.
|
||||
|
||||
2. Configure the region configuration. The datacenter with all the existing processes should have a non-negative priority. The region which will eventually store the remote replica should be added with a negative priority.
|
||||
|
||||
3. Add processes to the cluster in the remote region. These processes will not take data yet, but need to be added to the cluster. If they are added before the region configuration is set they will be assigned data like any other FoundationDB process, which will lead to high latencies.
|
||||
|
||||
4. Configure ``usable_regions=2``. This will cause the cluster to start copying data between the regions.
|
||||
|
||||
5. Watch ``status`` and wait until data movement is complete. This will mean signal that the remote datacenter has a full replica of all of the data in the database.
|
||||
|
||||
6. Change the region configuration to have a non-negative priority for the primary datacenters in both regions. This will enable automatic failover between regions.
|
||||
|
||||
Handling datacenter failures
|
||||
----------------------------
|
||||
|
||||
When a primary datacenter fails, the cluster will go into a degraded state. It will recover to the other region and continue accepting commits, however the mutations bound for the other side will build up on the transaction logs. Eventually, the disks on the primary's transaction logs will fill up, so the database cannot be left in this condition indefinitely.
|
||||
|
||||
.. warning:: While a datacenter has failed, the maximum write throughput of the cluster will be roughly 1/3 of normal performance. This is because the transaction logs need to store all of the mutations being committed, so that once the other datacenter comes back online, it can replay history to catch back up.
|
||||
|
||||
To drop the dead datacenter do the follow steps:
|
||||
|
||||
1. Configure the region configuration so that the dead datacenter has a negative priority.
|
||||
|
||||
2. Configure ``usable_regions=1``.
|
||||
|
||||
If you are running in a configuration without a satellite datacenter, or you have lost all machines in a region simultaneously, the ``force_recovery_with_data_loss`` command from ``fdbcli`` allows you to force a recovery to the other region. This will discard the portion of the mutation log which did not make it across the WAN. Once the database has recovered, immediately follow the previous steps to drop the dead region the normal way.
|
||||
|
||||
.. warning:: In 6.0 the ``force_recovery_with_data_loss`` command from ``fdbcli`` can cause data inconsistencies if it is used when processes from both non-satellite datacenters are still in the cluster. In general this command has not be tested to same degree as the rest of the codebase, and should only be used in extreme emergencies.
|
||||
|
||||
Region change safety
|
||||
--------------------
|
||||
|
||||
The steps described above for both adding and removing replicas are enforced by ``fdbcli``. The following are the specific conditions checked by ``fdbcli``:
|
||||
|
||||
* You cannot change the ``regions`` configuration while also changing ``usable_regions``.
|
||||
|
||||
* You can only change ``usable_regions`` when exactly one region has priority >= 0.
|
||||
|
||||
* When ``usable_regions`` > 1, all regions with priority >= 0 must have a full replica of the data.
|
||||
|
||||
* All storage servers must be in one of the regions specified by the region configuration.
|
||||
|
||||
Monitoring
|
||||
----------
|
||||
|
||||
It is important to ensure the remote replica does not fall too far behind the active replica. To failover between regions, all of the mutations need to be flushed from the active replica to the remote replica. If the remote replica is too far behind, this can take a very long time. The version difference between the datacenters is available in ``status json`` as ``datacenter_version_difference``. This number should be less than 5 million. A large datacenter version difference could indicate that more log routers are needed. It could also be caused by network issues between the regions. If the difference becomes too large the remote replica should be dropped, similar to a datacenter outage that goes on too long.
|
||||
|
||||
Because of asymmetric write latencies in the two regions, it important to route client traffic to the currently active region. The current active region is written in the system key space as the key ``\xff/primaryDatacenter``. Clients can read and watch this key after setting the ``read_system_keys`` transaction option.
|
||||
|
||||
Choosing coordinators
|
||||
---------------------
|
||||
|
||||
Choosing coordinators for a multi-region configuration provides its own set of challenges. A majority of coordinators need to be alive for the cluster to be available. There are two common coordinators setups that allow a cluster to survive the simultaneous loss of a datacenter and one additional machine.
|
||||
|
||||
The first is five coordinators in five different datacenters. The second is nine total coordinators spread across three datacenters. There is some additional benefit to spreading the coordinators across regions rather than datacenters. This is because if an entire region fails, it is still possible to recover to the other region if you are willing to accept a small amount of data loss. However, if you have lost a majority of coordinators, this becomes much more difficult.
|
||||
|
||||
Additionally, if a datacenter fails and then the second datacenter in the region fails 30 seconds later, we can generally survive this scenario. The second datacenter only needs to be alive long enough to copy the tail of the mutation log across the WAN. However if your coordinators are in this second datacenter, you will still experience an outage.
|
||||
|
||||
These considerations mean that best practice is to put three coordinators in the main datacenters of each of the two regions, and then put three additional coordinators in a third region.
|
||||
|
||||
Comparison to other multiple datacenter configurations
|
||||
------------------------------------------------------
|
||||
|
||||
Region configuration provides very similar functionality to ``fdbdr``.
|
||||
|
||||
If you are not using satellite datacenters, the main benefit of a region configuration compared to ``fdbdr`` is that each datacenter is able to restore replication even after losing all copies of a key range. If we simultaneously lose two storage servers in a double replicated cluster, with ``fdbdr`` we would be forced to fail over to the remote region. With region configuration the cluster will automatically copy the missing key range from the remote replica back to the primary datacenter.
|
||||
|
||||
The main disadvantage of using a region configuration is that the total number of processes we can support in a single region is around half when compared against ``fdbdr``. This is because we have processes for both regions in the same cluster, and some singleton components like the failure monitor will have to do twice as much work. In ``fdbdr``, there are two separate clusters for each region, so the total number of processes can scale to about twice as large as using a region configuration.
|
||||
|
||||
Region configuration is better in almost all ways than the ``three_datacenter`` replication mode. Region configuration gives the same ability to survive the loss of one datacenter, however we only need to store two full replicas of the database instead of three. Region configuration is more efficient with how it sends mutations across the WAN. The only reason to use ``three_datacenter`` replication is if low latency reads from all three locations is required.
|
||||
|
||||
Known limitations
|
||||
-----------------
|
||||
|
||||
The 6.0 release still has a number of rough edges related to region configuration. This is a collection of all the issues that have been pointed out in the sections above. These issues should be significantly improved in future releases of FoundationDB:
|
||||
|
||||
* FoundationDB supports replicating data to at most two regions.
|
||||
|
||||
* ``two_satellite_fast`` does not hide latency properly when configured with more than 4 satellite transaction logs.
|
||||
|
||||
* While a datacenter has failed, the maximum write throughput of the cluster will be roughly 1/3 of normal performance.
|
||||
|
||||
* ``force_recovery_with_data_loss`` can cause data inconsistencies if it is used when processes from both non-satellite datacenters are still in the cluster.
|
||||
|
||||
.. _guidelines-process-class-config:
|
||||
|
||||
Guidelines for setting process class
|
||||
|
|
|
@ -685,7 +685,7 @@ For example, suppose you have a polling loop that checks keys for changes once a
|
|||
value = read_keys(db)
|
||||
for k in keys:
|
||||
if cache[k] != value[k]:
|
||||
yield value[k]
|
||||
yield (k, value[k])
|
||||
cache[k] = value[k]
|
||||
time.sleep(1)
|
||||
|
||||
|
@ -706,7 +706,7 @@ With watches, you can eliminate the sleep and perform new reads only after a cha
|
|||
value, watches = watch_keys(db)
|
||||
for k in keys:
|
||||
if cache[k] != value[k]:
|
||||
yield value[k]
|
||||
yield (k, value[k])
|
||||
cache[k] = value[k]
|
||||
fdb.Future.wait_for_any(*watches)
|
||||
|
||||
|
|
|
@ -10,38 +10,38 @@ macOS
|
|||
|
||||
The macOS installation package is supported on macOS 10.7+. It includes the client and (optionally) the server.
|
||||
|
||||
* `FoundationDB-6.0.15.pkg <https://www.foundationdb.org/downloads/6.0.15/macOS/installers/FoundationDB-6.0.15.pkg>`_
|
||||
* `FoundationDB-6.0.18.pkg <https://www.foundationdb.org/downloads/6.0.18/macOS/installers/FoundationDB-6.0.18.pkg>`_
|
||||
|
||||
Ubuntu
|
||||
------
|
||||
|
||||
The Ubuntu packages are supported on 64-bit Ubuntu 12.04+, but beware of the Linux kernel bug in Ubuntu 12.x.
|
||||
|
||||
* `foundationdb-clients-6.0.15-1_amd64.deb <https://www.foundationdb.org/downloads/6.0.15/ubuntu/installers/foundationdb-clients_6.0.15-1_amd64.deb>`_
|
||||
* `foundationdb-server-6.0.15-1_amd64.deb <https://www.foundationdb.org/downloads/6.0.15/ubuntu/installers/foundationdb-server_6.0.15-1_amd64.deb>`_ (depends on the clients package)
|
||||
* `foundationdb-clients-6.0.18-1_amd64.deb <https://www.foundationdb.org/downloads/6.0.18/ubuntu/installers/foundationdb-clients_6.0.18-1_amd64.deb>`_
|
||||
* `foundationdb-server-6.0.18-1_amd64.deb <https://www.foundationdb.org/downloads/6.0.18/ubuntu/installers/foundationdb-server_6.0.18-1_amd64.deb>`_ (depends on the clients package)
|
||||
|
||||
RHEL/CentOS EL6
|
||||
---------------
|
||||
|
||||
The RHEL/CentOS EL6 packages are supported on 64-bit RHEL/CentOS 6.x.
|
||||
|
||||
* `foundationdb-clients-6.0.15-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.15/rhel6/installers/foundationdb-clients-6.0.15-1.el6.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.0.15-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.15/rhel6/installers/foundationdb-server-6.0.15-1.el6.x86_64.rpm>`_ (depends on the clients package)
|
||||
* `foundationdb-clients-6.0.18-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.18/rhel6/installers/foundationdb-clients-6.0.18-1.el6.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.0.18-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.18/rhel6/installers/foundationdb-server-6.0.18-1.el6.x86_64.rpm>`_ (depends on the clients package)
|
||||
|
||||
RHEL/CentOS EL7
|
||||
---------------
|
||||
|
||||
The RHEL/CentOS EL7 packages are supported on 64-bit RHEL/CentOS 7.x.
|
||||
|
||||
* `foundationdb-clients-6.0.15-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.15/rhel7/installers/foundationdb-clients-6.0.15-1.el7.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.0.15-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.15/rhel7/installers/foundationdb-server-6.0.15-1.el7.x86_64.rpm>`_ (depends on the clients package)
|
||||
* `foundationdb-clients-6.0.18-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.18/rhel7/installers/foundationdb-clients-6.0.18-1.el7.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.0.18-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.18/rhel7/installers/foundationdb-server-6.0.18-1.el7.x86_64.rpm>`_ (depends on the clients package)
|
||||
|
||||
Windows
|
||||
-------
|
||||
|
||||
The Windows installer is supported on 64-bit Windows XP and later. It includes the client and (optionally) the server.
|
||||
|
||||
* `foundationdb-6.0.15-x64.msi <https://www.foundationdb.org/downloads/6.0.15/windows/installers/foundationdb-6.0.15-x64.msi>`_
|
||||
* `foundationdb-6.0.18-x64.msi <https://www.foundationdb.org/downloads/6.0.18/windows/installers/foundationdb-6.0.18-x64.msi>`_
|
||||
|
||||
API Language Bindings
|
||||
=====================
|
||||
|
@ -58,18 +58,18 @@ On macOS and Windows, the FoundationDB Python API bindings are installed as part
|
|||
|
||||
If you need to use the FoundationDB Python API from other Python installations or paths, download the Python package:
|
||||
|
||||
* `foundationdb-6.0.15.tar.gz <https://www.foundationdb.org/downloads/6.0.15/bindings/python/foundationdb-6.0.15.tar.gz>`_
|
||||
* `foundationdb-6.0.18.tar.gz <https://www.foundationdb.org/downloads/6.0.18/bindings/python/foundationdb-6.0.18.tar.gz>`_
|
||||
|
||||
Ruby 1.9.3/2.0.0+
|
||||
-----------------
|
||||
|
||||
* `fdb-6.0.15.gem <https://www.foundationdb.org/downloads/6.0.15/bindings/ruby/fdb-6.0.15.gem>`_
|
||||
* `fdb-6.0.18.gem <https://www.foundationdb.org/downloads/6.0.18/bindings/ruby/fdb-6.0.18.gem>`_
|
||||
|
||||
Java 8+
|
||||
-------
|
||||
|
||||
* `fdb-java-6.0.15.jar <https://www.foundationdb.org/downloads/6.0.15/bindings/java/fdb-java-6.0.15.jar>`_
|
||||
* `fdb-java-6.0.15-javadoc.jar <https://www.foundationdb.org/downloads/6.0.15/bindings/java/fdb-java-6.0.15-javadoc.jar>`_
|
||||
* `fdb-java-6.0.18.jar <https://www.foundationdb.org/downloads/6.0.18/bindings/java/fdb-java-6.0.18.jar>`_
|
||||
* `fdb-java-6.0.18-javadoc.jar <https://www.foundationdb.org/downloads/6.0.18/bindings/java/fdb-java-6.0.18-javadoc.jar>`_
|
||||
|
||||
Go 1.1+
|
||||
-------
|
||||
|
|
|
@ -2,6 +2,46 @@
|
|||
Release Notes
|
||||
#############
|
||||
|
||||
6.0.18
|
||||
======
|
||||
|
||||
Fixes
|
||||
-----
|
||||
|
||||
* Backup metadata could falsely indicate that a backup is not usable. `(PR #1007) <https://github.com/apple/foundationdb/pull/1007>`_
|
||||
* Blobstore request failures could cause backup expire and delete operations to skip some files. `(PR #1007) <https://github.com/apple/foundationdb/pull/1007>`_
|
||||
* Blobstore request failures could cause restore to fail to apply some files. `(PR #1007) <https://github.com/apple/foundationdb/pull/1007>`_
|
||||
* Storage servers with large amounts of data would pause for a short period of time after rebooting. `(PR #1001) <https://github.com/apple/foundationdb/pull/1001>`_
|
||||
* The client library could leak memory when a thread died. `(PR #1011) <https://github.com/apple/foundationdb/pull/1011>`_
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
* Added the ability to specify versions as version-days ago from latest log in backup. `(PR #1007) <https://github.com/apple/foundationdb/pull/1007>`_
|
||||
|
||||
6.0.17
|
||||
======
|
||||
|
||||
Fixes
|
||||
-----
|
||||
|
||||
* Existing backups did not make progress when upgraded to 6.0.16. `(PR #962) <https://github.com/apple/foundationdb/pull/962>`_
|
||||
|
||||
6.0.16
|
||||
======
|
||||
|
||||
Performance
|
||||
-----------
|
||||
|
||||
* Added a new backup folder scheme which results in far fewer kv range folders. `(PR #939) <https://github.com/apple/foundationdb/pull/939>`_
|
||||
|
||||
Fixes
|
||||
-----
|
||||
|
||||
* Blobstore REST client attempted to create buckets that already existed. `(PR #923) <https://github.com/apple/foundationdb/pull/923>`_
|
||||
* DNS would fail if IPv6 responses were received. `(PR #945) <https://github.com/apple/foundationdb/pull/945>`_
|
||||
* Backup expiration would occasionally fail due to an incorrect assert. `(PR #926) <https://github.com/apple/foundationdb/pull/926>`_
|
||||
|
||||
6.0.15
|
||||
======
|
||||
|
||||
|
|
|
@ -26,6 +26,8 @@ These documents explain the engineering design of FoundationDB, with detailed in
|
|||
|
||||
* :doc:`testing`: FoundationDB uses a combined regime of robust simulation, live performance testing, and hardware-based failure testing to meet exacting standards of correctness and performance.
|
||||
|
||||
* :doc:`kv-architecture` provides a description of every major role a process in FoundationDB can fulfill.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:titlesonly:
|
||||
|
@ -42,3 +44,4 @@ These documents explain the engineering design of FoundationDB, with detailed in
|
|||
fault-tolerance
|
||||
flow
|
||||
testing
|
||||
kv-architecture
|
||||
|
|
|
@ -70,6 +70,8 @@ The value for each setting can be specified in more than one way. The actual va
|
|||
2. The value of the environment variable, if one has been set;
|
||||
3. The default value
|
||||
|
||||
For the password, rather than using the command-line option, it is recommended to use the environment variable ``FDB_TLS_PASSWORD``, as command-line options are more visible to other processes running on the same host.
|
||||
|
||||
As with all other command-line options to ``fdbserver``, the TLS settings can be specified in the :ref:`[fdbserver] section of the configuration file <foundationdb-conf-fdbserver>`.
|
||||
|
||||
The settings for certificate file, key file, peer verification, password and CA file are interpreted by the software.
|
||||
|
@ -99,6 +101,17 @@ There is no default password. If no password is specified, it is assumed that th
|
|||
Parameters and client bindings
|
||||
------------------------------
|
||||
|
||||
Automatic TLS certificate refresh
|
||||
------------------------------
|
||||
|
||||
The TLS certificate will be automatically refreshed on a configurable cadence. The server will inspect the CA, certificate, and key files in the specified locations periodically, and will begin using the new versions if following criterion were met:
|
||||
|
||||
* They are changed, judging by the last modified time.
|
||||
* They are valid certificates.
|
||||
* The key file matches the certificate file.
|
||||
|
||||
The refresh rate is controlled by ``--knob_tls_cert_refresh_delay_seconds``. Setting it to 0 will disable the refresh.
|
||||
|
||||
The default LibreSSL-based implementation
|
||||
=========================================
|
||||
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
${CLUSTER_DESCRIPTION1}:${CLUSTER_DESCRIPTION1}@127.0.0.1:4500
|
|
@ -0,0 +1,25 @@
|
|||
set(FDBBACKUP_SRCS
|
||||
backup.actor.cpp)
|
||||
|
||||
actor_set(FDBBACKUP_BUILD "${FDBBACKUP_SRCS}")
|
||||
add_executable(fdbbackup "${FDBBACKUP_BUILD}")
|
||||
actor_compile(fdbbackup "${FDBBACKUP_SRCS}")
|
||||
target_link_libraries(fdbbackup PRIVATE fdbclient)
|
||||
|
||||
install(TARGETS fdbbackup DESTINATION ${FDB_BIN_DIR} COMPONENT clients)
|
||||
install(PROGRAMS $<TARGET_FILE:fdbbackup>
|
||||
DESTINATION ${FDB_LIB_DIR}/foundationdb/backup_agent
|
||||
RENAME backup_agent
|
||||
COMPONENT clients)
|
||||
install(PROGRAMS $<TARGET_FILE:fdbbackup>
|
||||
DESTINATION ${FDB_BIN_DIR}
|
||||
RENAME fdbrestore
|
||||
COMPONENT clients)
|
||||
install(PROGRAMS $<TARGET_FILE:fdbbackup>
|
||||
DESTINATION ${FDB_BIN_DIR}
|
||||
RENAME dr_agent
|
||||
COMPONENT clients)
|
||||
install(PROGRAMS $<TARGET_FILE:fdbbackup>
|
||||
DESTINATION ${FDB_BIN_DIR}
|
||||
RENAME fdbdr
|
||||
COMPONENT clients)
|
|
@ -77,7 +77,7 @@ enum enumProgramExe {
|
|||
};
|
||||
|
||||
enum enumBackupType {
|
||||
BACKUP_UNDEFINED=0, BACKUP_START, BACKUP_STATUS, BACKUP_ABORT, BACKUP_WAIT, BACKUP_DISCONTINUE, BACKUP_PAUSE, BACKUP_RESUME, BACKUP_EXPIRE, BACKUP_DELETE, BACKUP_DESCRIBE, BACKUP_LIST
|
||||
BACKUP_UNDEFINED=0, BACKUP_START, BACKUP_STATUS, BACKUP_ABORT, BACKUP_WAIT, BACKUP_DISCONTINUE, BACKUP_PAUSE, BACKUP_RESUME, BACKUP_EXPIRE, BACKUP_DELETE, BACKUP_DESCRIBE, BACKUP_LIST, BACKUP_DUMP
|
||||
};
|
||||
|
||||
enum enumDBType {
|
||||
|
@ -92,8 +92,10 @@ enum enumRestoreType {
|
|||
enum {
|
||||
// Backup constants
|
||||
OPT_DESTCONTAINER, OPT_SNAPSHOTINTERVAL, OPT_ERRORLIMIT, OPT_NOSTOPWHENDONE,
|
||||
OPT_EXPIRE_BEFORE_VERSION, OPT_EXPIRE_BEFORE_DATETIME, OPT_EXPIRE_RESTORABLE_AFTER_VERSION, OPT_EXPIRE_RESTORABLE_AFTER_DATETIME,
|
||||
OPT_EXPIRE_BEFORE_VERSION, OPT_EXPIRE_BEFORE_DATETIME, OPT_EXPIRE_DELETE_BEFORE_DAYS,
|
||||
OPT_EXPIRE_RESTORABLE_AFTER_VERSION, OPT_EXPIRE_RESTORABLE_AFTER_DATETIME, OPT_EXPIRE_MIN_RESTORABLE_DAYS,
|
||||
OPT_BASEURL, OPT_BLOB_CREDENTIALS, OPT_DESCRIBE_DEEP, OPT_DESCRIBE_TIMESTAMPS,
|
||||
OPT_DUMP_BEGIN, OPT_DUMP_END,
|
||||
|
||||
// Backup and Restore constants
|
||||
OPT_TAGNAME, OPT_BACKUPKEYS, OPT_WAITFORDONE,
|
||||
|
@ -110,7 +112,9 @@ enum {
|
|||
//DB constants
|
||||
OPT_SOURCE_CLUSTER,
|
||||
OPT_DEST_CLUSTER,
|
||||
OPT_CLEANUP
|
||||
OPT_CLEANUP,
|
||||
|
||||
OPT_TRACE_FORMAT
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgAgentOptions[] = {
|
||||
|
@ -119,7 +123,6 @@ CSimpleOpt::SOption g_rgAgentOptions[] = {
|
|||
#endif
|
||||
{ OPT_CLUSTERFILE, "-C", SO_REQ_SEP },
|
||||
{ OPT_CLUSTERFILE, "--cluster_file", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
{ OPT_VERSION, "-v", SO_NONE },
|
||||
|
@ -127,6 +130,8 @@ CSimpleOpt::SOption g_rgAgentOptions[] = {
|
|||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_CRASHONERROR, "--crash", SO_NONE },
|
||||
{ OPT_LOCALITY, "--locality_", SO_REQ_SEP },
|
||||
{ OPT_MEMLIMIT, "-m", SO_REQ_SEP },
|
||||
|
@ -162,6 +167,8 @@ CSimpleOpt::SOption g_rgBackupStartOptions[] = {
|
|||
{ OPT_DRYRUN, "--dryrun", SO_NONE },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
|
@ -191,6 +198,8 @@ CSimpleOpt::SOption g_rgBackupStatusOptions[] = {
|
|||
{ OPT_TAGNAME, "--tagname", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
{ OPT_VERSION, "-v", SO_NONE },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
|
@ -216,6 +225,8 @@ CSimpleOpt::SOption g_rgBackupAbortOptions[] = {
|
|||
{ OPT_TAGNAME, "--tagname", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
|
@ -243,6 +254,8 @@ CSimpleOpt::SOption g_rgBackupDiscontinueOptions[] = {
|
|||
{ OPT_WAITFORDONE, "--waitfordone", SO_NONE },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
|
@ -270,6 +283,8 @@ CSimpleOpt::SOption g_rgBackupWaitOptions[] = {
|
|||
{ OPT_NOSTOPWHENDONE, "--no-stop-when-done",SO_NONE },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
|
@ -293,6 +308,8 @@ CSimpleOpt::SOption g_rgBackupPauseOptions[] = {
|
|||
{ OPT_CLUSTERFILE, "--cluster_file", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
|
@ -318,6 +335,8 @@ CSimpleOpt::SOption g_rgBackupExpireOptions[] = {
|
|||
{ OPT_DESTCONTAINER, "--destcontainer", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "-v", SO_NONE },
|
||||
|
@ -337,6 +356,8 @@ CSimpleOpt::SOption g_rgBackupExpireOptions[] = {
|
|||
{ OPT_EXPIRE_RESTORABLE_AFTER_DATETIME, "--restorable_after_timestamp", SO_REQ_SEP },
|
||||
{ OPT_EXPIRE_BEFORE_VERSION, "--expire_before_version", SO_REQ_SEP },
|
||||
{ OPT_EXPIRE_BEFORE_DATETIME, "--expire_before_timestamp", SO_REQ_SEP },
|
||||
{ OPT_EXPIRE_MIN_RESTORABLE_DAYS, "--min_restorable_days", SO_REQ_SEP },
|
||||
{ OPT_EXPIRE_DELETE_BEFORE_DAYS, "--delete_before_days", SO_REQ_SEP },
|
||||
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
@ -349,6 +370,8 @@ CSimpleOpt::SOption g_rgBackupDeleteOptions[] = {
|
|||
{ OPT_DESTCONTAINER, "--destcontainer", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "-v", SO_NONE },
|
||||
|
@ -376,6 +399,8 @@ CSimpleOpt::SOption g_rgBackupDescribeOptions[] = {
|
|||
{ OPT_DESTCONTAINER, "--destcontainer", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "-v", SO_NONE },
|
||||
|
@ -395,6 +420,36 @@ CSimpleOpt::SOption g_rgBackupDescribeOptions[] = {
|
|||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgBackupDumpOptions[] = {
|
||||
#ifdef _WIN32
|
||||
{ OPT_PARENTPID, "--parentpid", SO_REQ_SEP },
|
||||
#endif
|
||||
{ OPT_CLUSTERFILE, "-C", SO_REQ_SEP },
|
||||
{ OPT_CLUSTERFILE, "--cluster_file", SO_REQ_SEP },
|
||||
{ OPT_DESTCONTAINER, "-d", SO_REQ_SEP },
|
||||
{ OPT_DESTCONTAINER, "--destcontainer", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "-v", SO_NONE },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
{ OPT_CRASHONERROR, "--crash", SO_NONE },
|
||||
{ OPT_MEMLIMIT, "-m", SO_REQ_SEP },
|
||||
{ OPT_MEMLIMIT, "--memory", SO_REQ_SEP },
|
||||
{ OPT_HELP, "-?", SO_NONE },
|
||||
{ OPT_HELP, "-h", SO_NONE },
|
||||
{ OPT_HELP, "--help", SO_NONE },
|
||||
{ OPT_DEVHELP, "--dev-help", SO_NONE },
|
||||
{ OPT_BLOB_CREDENTIALS, "--blob_credentials", SO_REQ_SEP },
|
||||
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
|
||||
{ OPT_DUMP_BEGIN, "--begin", SO_REQ_SEP },
|
||||
{ OPT_DUMP_END, "--end", SO_REQ_SEP },
|
||||
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgBackupListOptions[] = {
|
||||
#ifdef _WIN32
|
||||
{ OPT_PARENTPID, "--parentpid", SO_REQ_SEP },
|
||||
|
@ -403,6 +458,8 @@ CSimpleOpt::SOption g_rgBackupListOptions[] = {
|
|||
{ OPT_BASEURL, "--base_url", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "-v", SO_NONE },
|
||||
|
@ -440,6 +497,8 @@ CSimpleOpt::SOption g_rgRestoreOptions[] = {
|
|||
{ OPT_DBVERSION, "-v", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_DRYRUN, "-n", SO_NONE },
|
||||
|
@ -473,6 +532,8 @@ CSimpleOpt::SOption g_rgDBAgentOptions[] = {
|
|||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_CRASHONERROR, "--crash", SO_NONE },
|
||||
{ OPT_LOCALITY, "--locality_", SO_REQ_SEP },
|
||||
{ OPT_MEMLIMIT, "-m", SO_REQ_SEP },
|
||||
|
@ -499,6 +560,8 @@ CSimpleOpt::SOption g_rgDBStartOptions[] = {
|
|||
{ OPT_BACKUPKEYS, "--keys", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
|
@ -528,6 +591,8 @@ CSimpleOpt::SOption g_rgDBStatusOptions[] = {
|
|||
{ OPT_TAGNAME, "--tagname", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
{ OPT_VERSION, "-v", SO_NONE },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
|
@ -555,6 +620,8 @@ CSimpleOpt::SOption g_rgDBSwitchOptions[] = {
|
|||
{ OPT_TAGNAME, "--tagname", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
|
@ -583,6 +650,8 @@ CSimpleOpt::SOption g_rgDBAbortOptions[] = {
|
|||
{ OPT_TAGNAME, "--tagname", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
|
@ -608,6 +677,8 @@ CSimpleOpt::SOption g_rgDBPauseOptions[] = {
|
|||
{ OPT_DEST_CLUSTER, "--destination", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
|
@ -677,6 +748,9 @@ static void printAgentUsage(bool devhelp) {
|
|||
" --logdir PATH Specifes the output directory for trace files. If\n"
|
||||
" unspecified, defaults to the current directory. Has\n"
|
||||
" no effect unless --log is specified.\n");
|
||||
printf(" --trace_format FORMAT\n"
|
||||
" Select the format of the trace files. xml (the default) and json are supported.\n"
|
||||
" Has no effect unless --log is specified.\n");
|
||||
printf(" -m SIZE, --memory SIZE\n"
|
||||
" Memory limit. The default value is 8GiB. When specified\n"
|
||||
" without a unit, MiB is assumed.\n");
|
||||
|
@ -725,10 +799,16 @@ static void printBackupUsage(bool devhelp) {
|
|||
" in the database to obtain a cutoff version very close to the timestamp given in YYYY-MM-DD.HH:MI:SS format (UTC).\n");
|
||||
printf(" --expire_before_version VERSION\n"
|
||||
" Version cutoff for expire operations. Deletes data files containing no data at or after VERSION.\n");
|
||||
printf(" --delete_before_days NUM_DAYS\n"
|
||||
" Another way to specify version cutoff for expire operations. Deletes data files containing no data at or after a\n"
|
||||
" version approximately NUM_DAYS days worth of versions prior to the latest log version in the backup.\n");
|
||||
printf(" --restorable_after_timestamp DATETIME\n"
|
||||
" For expire operations, set minimum acceptable restorability to the version equivalent of DATETIME and later.\n");
|
||||
printf(" --restorable_after_version VERSION\n"
|
||||
" For expire operations, set minimum acceptable restorability to the VERSION and later.\n");
|
||||
printf(" --min_restorable_days NUM_DAYS\n"
|
||||
" For expire operations, set minimum acceptable restorability to approximately NUM_DAYS days worth of versions\n"
|
||||
" prior to the latest log version in the backup.\n");
|
||||
printf(" --version_timestamps\n");
|
||||
printf(" For describe operations, lookup versions in the database to obtain timestamps. A cluster file is required.\n");
|
||||
printf(" -f, --force For expire operations, force expiration even if minimum restorability would be violated.\n");
|
||||
|
@ -737,7 +817,7 @@ static void printBackupUsage(bool devhelp) {
|
|||
printf(" -e ERRORLIMIT The maximum number of errors printed by status (default is 10).\n");
|
||||
printf(" -k KEYS List of key ranges to backup.\n"
|
||||
" If not specified, the entire database will be backed up.\n");
|
||||
printf(" -n, --dry-run For start or restore operations, performs a trial run with no actual changes made.\n");
|
||||
printf(" -n, --dryrun For start or restore operations, performs a trial run with no actual changes made.\n");
|
||||
printf(" -v, --version Print version information and exit.\n");
|
||||
printf(" -w, --wait Wait for the backup to complete (allowed with `start' and `discontinue').\n");
|
||||
printf(" -z, --no-stop-when-done\n"
|
||||
|
@ -781,7 +861,7 @@ static void printRestoreUsage(bool devhelp ) {
|
|||
printf(" -k KEYS List of key ranges from the backup to restore\n");
|
||||
printf(" --remove_prefix PREFIX prefix to remove from the restored keys\n");
|
||||
printf(" --add_prefix PREFIX prefix to add to the restored keys\n");
|
||||
printf(" -n, --dry-run Perform a trial run with no changes made.\n");
|
||||
printf(" -n, --dryrun Perform a trial run with no changes made.\n");
|
||||
printf(" -v DBVERSION The version at which the database will be restored.\n");
|
||||
printf(" -h, --help Display this help and exit.\n");
|
||||
|
||||
|
@ -812,6 +892,9 @@ static void printDBAgentUsage(bool devhelp) {
|
|||
" --logdir PATH Specifes the output directory for trace files. If\n"
|
||||
" unspecified, defaults to the current directory. Has\n"
|
||||
" no effect unless --log is specified.\n");
|
||||
printf(" --trace_format FORMAT\n"
|
||||
" Select the format of the trace files. xml (the default) and json are supported.\n"
|
||||
" Has no effect unless --log is specified.\n");
|
||||
printf(" -m SIZE, --memory SIZE\n"
|
||||
" Memory limit. The default value is 8GiB. When specified\n"
|
||||
" without a unit, MiB is assumed.\n");
|
||||
|
@ -970,6 +1053,7 @@ enumBackupType getBackupType(std::string backupType)
|
|||
values["delete"] = BACKUP_DELETE;
|
||||
values["describe"] = BACKUP_DESCRIBE;
|
||||
values["list"] = BACKUP_LIST;
|
||||
values["dump"] = BACKUP_DUMP;
|
||||
}
|
||||
|
||||
auto i = values.find(backupType);
|
||||
|
@ -1730,11 +1814,10 @@ ACTOR Future<Void> changeDBBackupResumed(Database src, Database dest, bool pause
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> runRestore(Database db, std::string tagName, std::string container, Standalone<VectorRef<KeyRangeRef>> ranges, Version dbVersion, bool performRestore, bool verbose, bool waitForDone, std::string addPrefix, std::string removePrefix) {
|
||||
ACTOR Future<Void> runRestore(Database db, std::string tagName, std::string container, Standalone<VectorRef<KeyRangeRef>> ranges, Version targetVersion, bool performRestore, bool verbose, bool waitForDone, std::string addPrefix, std::string removePrefix) {
|
||||
try
|
||||
{
|
||||
state FileBackupAgent backupAgent;
|
||||
state int64_t restoreVersion = -1;
|
||||
|
||||
if(ranges.size() > 1) {
|
||||
fprintf(stderr, "Currently only a single restore range is supported!\n");
|
||||
|
@ -1743,53 +1826,46 @@ ACTOR Future<Void> runRestore(Database db, std::string tagName, std::string cont
|
|||
|
||||
state KeyRange range = (ranges.size() == 0) ? normalKeys : ranges.front();
|
||||
|
||||
if (performRestore) {
|
||||
if(dbVersion == invalidVersion) {
|
||||
BackupDescription desc = wait(IBackupContainer::openContainer(container)->describeBackup());
|
||||
state Reference<IBackupContainer> bc = IBackupContainer::openContainer(container);
|
||||
|
||||
// If targetVersion is unset then use the maximum restorable version from the backup description
|
||||
if(targetVersion == invalidVersion) {
|
||||
if(verbose)
|
||||
printf("No restore target version given, will use maximum restorable version from backup description.\n");
|
||||
|
||||
BackupDescription desc = wait(bc->describeBackup());
|
||||
|
||||
if(!desc.maxRestorableVersion.present()) {
|
||||
fprintf(stderr, "The specified backup is not restorable to any version.\n");
|
||||
throw restore_error();
|
||||
}
|
||||
|
||||
dbVersion = desc.maxRestorableVersion.get();
|
||||
}
|
||||
Version _restoreVersion = wait(backupAgent.restore(db, KeyRef(tagName), KeyRef(container), waitForDone, dbVersion, verbose, range, KeyRef(addPrefix), KeyRef(removePrefix)));
|
||||
restoreVersion = _restoreVersion;
|
||||
}
|
||||
else {
|
||||
state Reference<IBackupContainer> bc = IBackupContainer::openContainer(container);
|
||||
state BackupDescription description = wait(bc->describeBackup());
|
||||
targetVersion = desc.maxRestorableVersion.get();
|
||||
|
||||
if(dbVersion <= 0) {
|
||||
wait(description.resolveVersionTimes(db));
|
||||
if(description.maxRestorableVersion.present())
|
||||
restoreVersion = description.maxRestorableVersion.get();
|
||||
else {
|
||||
fprintf(stderr, "Backup is not restorable\n");
|
||||
throw restore_invalid_version();
|
||||
}
|
||||
}
|
||||
else
|
||||
restoreVersion = dbVersion;
|
||||
|
||||
state Optional<RestorableFileSet> rset = wait(bc->getRestoreSet(restoreVersion));
|
||||
if(!rset.present()) {
|
||||
fprintf(stderr, "Insufficient data to restore to version %lld\n", restoreVersion);
|
||||
throw restore_invalid_version();
|
||||
if(verbose)
|
||||
printf("Using target restore version %lld\n", targetVersion);
|
||||
}
|
||||
|
||||
// Display the restore information, if requested
|
||||
if (verbose) {
|
||||
printf("[DRY RUN] Restoring backup to version: %lld\n", (long long) restoreVersion);
|
||||
printf("%s\n", description.toString().c_str());
|
||||
}
|
||||
}
|
||||
if (performRestore) {
|
||||
Version restoredVersion = wait(backupAgent.restore(db, KeyRef(tagName), KeyRef(container), waitForDone, targetVersion, verbose, range, KeyRef(addPrefix), KeyRef(removePrefix)));
|
||||
|
||||
if(waitForDone && verbose) {
|
||||
// If restore completed then report version restored
|
||||
printf("Restored to version %lld%s\n", (long long) restoreVersion, (performRestore) ? "" : " (DRY RUN)");
|
||||
// If restore is now complete then report version restored
|
||||
printf("Restored to version %lld\n", restoredVersion);
|
||||
}
|
||||
}
|
||||
else {
|
||||
state Optional<RestorableFileSet> rset = wait(bc->getRestoreSet(targetVersion));
|
||||
|
||||
if(!rset.present()) {
|
||||
fprintf(stderr, "Insufficient data to restore to version %lld. Describe backup for more information.\n", targetVersion);
|
||||
throw restore_invalid_version();
|
||||
}
|
||||
|
||||
printf("Backup can be used to restore to version %lld\n", targetVersion);
|
||||
}
|
||||
|
||||
}
|
||||
catch (Error& e) {
|
||||
if(e.code() == error_code_actor_cancelled)
|
||||
throw;
|
||||
|
@ -1824,6 +1900,33 @@ Reference<IBackupContainer> openBackupContainer(const char *name, std::string de
|
|||
return c;
|
||||
}
|
||||
|
||||
ACTOR Future<Void> dumpBackupData(const char *name, std::string destinationContainer, Version beginVersion, Version endVersion) {
|
||||
state Reference<IBackupContainer> c = openBackupContainer(name, destinationContainer);
|
||||
|
||||
if(beginVersion < 0 || endVersion < 0) {
|
||||
BackupDescription desc = wait(c->describeBackup());
|
||||
|
||||
if(!desc.maxLogEnd.present()) {
|
||||
fprintf(stderr, "ERROR: Backup must have log data in order to use relative begin/end versions.\n");
|
||||
throw backup_invalid_info();
|
||||
}
|
||||
|
||||
if(beginVersion < 0) {
|
||||
beginVersion += desc.maxLogEnd.get();
|
||||
}
|
||||
|
||||
if(endVersion < 0) {
|
||||
endVersion += desc.maxLogEnd.get();
|
||||
}
|
||||
}
|
||||
|
||||
printf("Scanning version range %lld to %lld\n", beginVersion, endVersion);
|
||||
BackupFileList files = wait(c->dumpFileList(beginVersion, endVersion));
|
||||
files.toStream(stdout);
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> expireBackupData(const char *name, std::string destinationContainer, Version endVersion, std::string endDatetime, Database db, bool force, Version restorableAfterVersion, std::string restorableAfterDatetime) {
|
||||
if (!endDatetime.empty()) {
|
||||
Version v = wait( timeKeeperVersionFromDatetime(endDatetime, db) );
|
||||
|
@ -1843,8 +1946,35 @@ ACTOR Future<Void> expireBackupData(const char *name, std::string destinationCon
|
|||
|
||||
try {
|
||||
Reference<IBackupContainer> c = openBackupContainer(name, destinationContainer);
|
||||
wait(c->expireData(endVersion, force, restorableAfterVersion));
|
||||
printf("All data before version %lld is deleted.\n", endVersion);
|
||||
|
||||
state IBackupContainer::ExpireProgress progress;
|
||||
state std::string lastProgress;
|
||||
state Future<Void> expire = c->expireData(endVersion, force, &progress, restorableAfterVersion);
|
||||
|
||||
loop {
|
||||
choose {
|
||||
when(wait(delay(5))) {
|
||||
std::string p = progress.toString();
|
||||
if(p != lastProgress) {
|
||||
int spaces = lastProgress.size() - p.size();
|
||||
printf("\r%s%s", p.c_str(), (spaces > 0 ? std::string(spaces, ' ').c_str() : "") );
|
||||
lastProgress = p;
|
||||
}
|
||||
}
|
||||
when(wait(expire)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::string p = progress.toString();
|
||||
int spaces = lastProgress.size() - p.size();
|
||||
printf("\r%s%s\n", p.c_str(), (spaces > 0 ? std::string(spaces, ' ').c_str() : "") );
|
||||
|
||||
if(endVersion < 0)
|
||||
printf("All data before %lld versions (%lld days) prior to latest backup log has been deleted.\n", -endVersion, -endVersion / ((int64_t)24 * 3600 * CLIENT_KNOBS->CORE_VERSIONSPERSECOND));
|
||||
else
|
||||
printf("All data before version %lld has been deleted.\n", endVersion);
|
||||
}
|
||||
catch (Error& e) {
|
||||
if(e.code() == error_code_actor_cancelled)
|
||||
|
@ -1865,18 +1995,25 @@ ACTOR Future<Void> deleteBackupContainer(const char *name, std::string destinati
|
|||
state int numDeleted = 0;
|
||||
state Future<Void> done = c->deleteContainer(&numDeleted);
|
||||
|
||||
state int lastUpdate = -1;
|
||||
printf("Deleting %s...\n", destinationContainer.c_str());
|
||||
|
||||
loop {
|
||||
choose {
|
||||
when ( wait(done) ) {
|
||||
printf("The entire container has been deleted.\n");
|
||||
break;
|
||||
}
|
||||
when ( wait(delay(3)) ) {
|
||||
printf("%d files have been deleted so far...\n", numDeleted);
|
||||
when ( wait(delay(5)) ) {
|
||||
if(numDeleted != lastUpdate) {
|
||||
printf("\r%d...", numDeleted);
|
||||
lastUpdate = numDeleted;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
printf("\r%d objects deleted\n", numDeleted);
|
||||
printf("The entire container has been deleted.\n");
|
||||
}
|
||||
catch (Error& e) {
|
||||
if(e.code() == error_code_actor_cancelled)
|
||||
throw;
|
||||
|
@ -2073,6 +2210,26 @@ static void addKeyRange(std::string optionValue, Standalone<VectorRef<KeyRangeRe
|
|||
return;
|
||||
}
|
||||
|
||||
Version parseVersion(const char *str) {
|
||||
StringRef s((const uint8_t *)str, strlen(str));
|
||||
|
||||
if(s.endsWith(LiteralStringRef("days")) || s.endsWith(LiteralStringRef("d"))) {
|
||||
float days;
|
||||
if(sscanf(str, "%f", &days) != 1) {
|
||||
fprintf(stderr, "Could not parse version: %s\n", str);
|
||||
flushAndExit(FDB_EXIT_ERROR);
|
||||
}
|
||||
return (double)CLIENT_KNOBS->CORE_VERSIONSPERSECOND * 24 * 3600 * -days;
|
||||
}
|
||||
|
||||
Version ver;
|
||||
if(sscanf(str, "%lld", &ver) != 1) {
|
||||
fprintf(stderr, "Could not parse version: %s\n", str);
|
||||
flushAndExit(FDB_EXIT_ERROR);
|
||||
}
|
||||
return ver;
|
||||
}
|
||||
|
||||
#ifdef ALLOC_INSTRUMENTATION
|
||||
extern uint8_t *g_extra_memory;
|
||||
#endif
|
||||
|
@ -2151,6 +2308,9 @@ int main(int argc, char* argv[]) {
|
|||
case BACKUP_DESCRIBE:
|
||||
args = new CSimpleOpt(argc - 1, &argv[1], g_rgBackupDescribeOptions, SO_O_EXACT);
|
||||
break;
|
||||
case BACKUP_DUMP:
|
||||
args = new CSimpleOpt(argc - 1, &argv[1], g_rgBackupDumpOptions, SO_O_EXACT);
|
||||
break;
|
||||
case BACKUP_LIST:
|
||||
args = new CSimpleOpt(argc - 1, &argv[1], g_rgBackupListOptions, SO_O_EXACT);
|
||||
break;
|
||||
|
@ -2288,6 +2448,8 @@ int main(int argc, char* argv[]) {
|
|||
uint64_t memLimit = 8LL << 30;
|
||||
Optional<uint64_t> ti;
|
||||
std::vector<std::string> blobCredentials;
|
||||
Version dumpBegin = 0;
|
||||
Version dumpEnd = std::numeric_limits<Version>::max();
|
||||
|
||||
if( argc == 1 ) {
|
||||
printUsage(programExe, false);
|
||||
|
@ -2375,6 +2537,11 @@ int main(int argc, char* argv[]) {
|
|||
trace = true;
|
||||
traceDir = args->OptionArg();
|
||||
break;
|
||||
case OPT_TRACE_FORMAT:
|
||||
if (!selectTraceFormatter(args->OptionArg())) {
|
||||
fprintf(stderr, "WARNING: Unrecognized trace format `%s'\n", args->OptionArg());
|
||||
}
|
||||
break;
|
||||
case OPT_TRACE_LOG_GROUP:
|
||||
traceLogGroup = args->OptionArg();
|
||||
break;
|
||||
|
@ -2397,6 +2564,8 @@ int main(int argc, char* argv[]) {
|
|||
break;
|
||||
case OPT_EXPIRE_BEFORE_VERSION:
|
||||
case OPT_EXPIRE_RESTORABLE_AFTER_VERSION:
|
||||
case OPT_EXPIRE_MIN_RESTORABLE_DAYS:
|
||||
case OPT_EXPIRE_DELETE_BEFORE_DAYS:
|
||||
{
|
||||
const char* a = args->OptionArg();
|
||||
long long ver = 0;
|
||||
|
@ -2405,7 +2574,13 @@ int main(int argc, char* argv[]) {
|
|||
printHelpTeaser(argv[0]);
|
||||
return FDB_EXIT_ERROR;
|
||||
}
|
||||
if(optId == OPT_EXPIRE_BEFORE_VERSION)
|
||||
|
||||
// Interpret the value as days worth of versions relative to now (negative)
|
||||
if(optId == OPT_EXPIRE_MIN_RESTORABLE_DAYS || optId == OPT_EXPIRE_DELETE_BEFORE_DAYS) {
|
||||
ver = -ver * 24 * 60 * 60 * CLIENT_KNOBS->CORE_VERSIONSPERSECOND;
|
||||
}
|
||||
|
||||
if(optId == OPT_EXPIRE_BEFORE_VERSION || optId == OPT_EXPIRE_DELETE_BEFORE_DAYS)
|
||||
expireVersion = ver;
|
||||
else
|
||||
expireRestorableAfterVersion = ver;
|
||||
|
@ -2537,6 +2712,12 @@ int main(int argc, char* argv[]) {
|
|||
case OPT_BLOB_CREDENTIALS:
|
||||
blobCredentials.push_back(args->OptionArg());
|
||||
break;
|
||||
case OPT_DUMP_BEGIN:
|
||||
dumpBegin = parseVersion(args->OptionArg());
|
||||
break;
|
||||
case OPT_DUMP_END:
|
||||
dumpEnd = parseVersion(args->OptionArg());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2852,11 +3033,17 @@ int main(int argc, char* argv[]) {
|
|||
// Only pass database optionDatabase Describe will lookup version timestamps if a cluster file was given, but quietly skip them if not.
|
||||
f = stopAfter( describeBackup(argv[0], destinationContainer, describeDeep, describeTimestamps ? Optional<Database>(db) : Optional<Database>()) );
|
||||
break;
|
||||
|
||||
case BACKUP_LIST:
|
||||
initTraceFile();
|
||||
f = stopAfter( listBackup(baseUrl) );
|
||||
break;
|
||||
|
||||
case BACKUP_DUMP:
|
||||
initTraceFile();
|
||||
f = stopAfter( dumpBackupData(argv[0], destinationContainer, dumpBegin, dumpEnd) );
|
||||
break;
|
||||
|
||||
case BACKUP_UNDEFINED:
|
||||
default:
|
||||
fprintf(stderr, "ERROR: Unsupported backup action %s\n", argv[1]);
|
||||
|
@ -2867,8 +3054,13 @@ int main(int argc, char* argv[]) {
|
|||
|
||||
break;
|
||||
case EXE_RESTORE:
|
||||
if(!initCluster())
|
||||
if(dryRun) {
|
||||
initTraceFile();
|
||||
}
|
||||
else if(!initCluster()) {
|
||||
return FDB_EXIT_ERROR;
|
||||
}
|
||||
|
||||
switch(restoreType) {
|
||||
case RESTORE_START:
|
||||
f = stopAfter( runRestore(db, tagName, restoreContainer, backupKeys, dbVersion, !dryRun, !quietDisplay, waitForDone, addPrefix, removePrefix) );
|
||||
|
@ -3004,5 +3196,5 @@ int main(int argc, char* argv[]) {
|
|||
status = FDB_EXIT_MAIN_EXCEPTION;
|
||||
}
|
||||
|
||||
return status;
|
||||
flushAndExit(status);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
set(FDBCLI_SRCS
|
||||
fdbcli.actor.cpp
|
||||
FlowLineNoise.actor.cpp
|
||||
FlowLineNoise.h
|
||||
linenoise/linenoise.c
|
||||
linenoise/linenoise.h)
|
||||
|
||||
actor_set(FDBCLI_BUILD "${FDBCLI_SRCS}")
|
||||
add_executable(fdbcli "${FDBCLI_BUILD}")
|
||||
actor_compile(fdbcli "${FDBCLI_SRCS}")
|
||||
target_link_libraries(fdbcli PRIVATE fdbclient)
|
||||
|
||||
install(TARGETS fdbcli DESTINATION ${FDB_BIN_DIR} COMPONENT clients)
|
|
@ -57,7 +57,7 @@ extern const char* getHGVersion();
|
|||
|
||||
std::vector<std::string> validOptions;
|
||||
|
||||
enum { OPT_CONNFILE, OPT_DATABASE, OPT_HELP, OPT_TRACE, OPT_TRACE_DIR, OPT_TIMEOUT, OPT_EXEC, OPT_NO_STATUS, OPT_STATUS_FROM_JSON, OPT_VERSION };
|
||||
enum { OPT_CONNFILE, OPT_DATABASE, OPT_HELP, OPT_TRACE, OPT_TRACE_DIR, OPT_TIMEOUT, OPT_EXEC, OPT_NO_STATUS, OPT_STATUS_FROM_JSON, OPT_VERSION, OPT_TRACE_FORMAT };
|
||||
|
||||
CSimpleOpt::SOption g_rgOptions[] = {
|
||||
{ OPT_CONNFILE, "-C", SO_REQ_SEP },
|
||||
|
@ -74,6 +74,7 @@ CSimpleOpt::SOption g_rgOptions[] = {
|
|||
{ OPT_STATUS_FROM_JSON, "--status-from-json", SO_REQ_SEP },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
{ OPT_VERSION, "-v", SO_NONE },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
|
@ -401,6 +402,9 @@ static void printProgramUsage(const char* name) {
|
|||
" --log-dir PATH Specifes the output directory for trace files. If\n"
|
||||
" unspecified, defaults to the current directory. Has\n"
|
||||
" no effect unless --log is specified.\n"
|
||||
" --trace_format FORMAT\n"
|
||||
" Select the format of the log files. xml (the default) and json\n"
|
||||
" are supported. Has no effect unless --log is specified.\n"
|
||||
" --exec CMDS Immediately executes the semicolon separated CLI commands\n"
|
||||
" and then exits.\n"
|
||||
" --no-status Disables the initial status check done when starting\n"
|
||||
|
@ -1618,6 +1622,11 @@ ACTOR Future<bool> configure( Database db, std::vector<StringRef> tokens, Refere
|
|||
printf("Type `configure FORCE <TOKEN>*' to configure without this check\n");
|
||||
ret=false;
|
||||
break;
|
||||
case ConfigurationResult::NOT_ENOUGH_WORKERS:
|
||||
printf("ERROR: Not enough processes exist to support the specified configuration\n");
|
||||
printf("Type `configure FORCE <TOKEN>*' to configure without this check\n");
|
||||
ret=false;
|
||||
break;
|
||||
case ConfigurationResult::SUCCESS:
|
||||
printf("Configuration changed\n");
|
||||
ret=false;
|
||||
|
@ -1724,7 +1733,12 @@ ACTOR Future<bool> fileConfigure(Database db, std::string filePath, bool isNewDa
|
|||
break;
|
||||
case ConfigurationResult::REGIONS_CHANGED:
|
||||
printf("ERROR: The region configuration cannot be changed while simultaneously changing usable_regions\n");
|
||||
printf("Type `fileconfigure FORCE <TOKEN>*' to configure without this check\n");
|
||||
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||
ret=false;
|
||||
break;
|
||||
case ConfigurationResult::NOT_ENOUGH_WORKERS:
|
||||
printf("ERROR: Not enough processes exist to support the specified configuration\n");
|
||||
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||
ret=false;
|
||||
break;
|
||||
case ConfigurationResult::SUCCESS:
|
||||
|
@ -2321,6 +2335,11 @@ struct CLIOptions {
|
|||
return 0;
|
||||
case OPT_STATUS_FROM_JSON:
|
||||
return printStatusFromJSON(args.OptionArg());
|
||||
case OPT_TRACE_FORMAT:
|
||||
if (!selectTraceFormatter(args.OptionArg())) {
|
||||
fprintf(stderr, "WARNING: Unrecognized trace format `%s'\n", args.OptionArg());
|
||||
}
|
||||
break;
|
||||
case OPT_VERSION:
|
||||
printVersion();
|
||||
return FDB_EXIT_SUCCESS;
|
||||
|
|
|
@ -276,7 +276,7 @@ public:
|
|||
|
||||
// stopWhenDone will return when the backup is stopped, if enabled. Otherwise, it
|
||||
// will return when the backup directory is restorable.
|
||||
Future<int> waitBackup(Database cx, std::string tagName, bool stopWhenDone = true);
|
||||
Future<int> waitBackup(Database cx, std::string tagName, bool stopWhenDone = true, Reference<IBackupContainer> *pContainer = nullptr, UID *pUID = nullptr);
|
||||
|
||||
static const Key keyLastRestorable;
|
||||
|
||||
|
@ -415,7 +415,7 @@ struct RCGroup {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & items & version & groupKey;
|
||||
serializer(ar, items, version, groupKey);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -615,6 +615,15 @@ public:
|
|||
return configSpace.pack(LiteralStringRef(__FUNCTION__));
|
||||
}
|
||||
|
||||
// Number of kv range files that were both committed to persistent storage AND inserted into
|
||||
// the snapshotRangeFileMap. Note that since insertions could replace 1 or more existing
|
||||
// map entries this is not necessarily the number of entries currently in the map.
|
||||
// This value exists to help with sizing of kv range folders for BackupContainers that
|
||||
// require it.
|
||||
KeyBackedBinaryValue<int64_t> snapshotRangeFileCount() {
|
||||
return configSpace.pack(LiteralStringRef(__FUNCTION__));
|
||||
}
|
||||
|
||||
// Coalesced set of ranges already dispatched for writing.
|
||||
typedef KeyBackedMap<Key, bool> RangeDispatchMapT;
|
||||
RangeDispatchMapT snapshotRangeDispatchMap() {
|
||||
|
@ -671,6 +680,7 @@ public:
|
|||
|
||||
copy.snapshotBeginVersion().set(tr, beginVersion.get());
|
||||
copy.snapshotTargetEndVersion().set(tr, endVersion);
|
||||
copy.snapshotRangeFileCount().set(tr, 0);
|
||||
|
||||
return Void();
|
||||
});
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -96,10 +96,12 @@ struct KeyspaceSnapshotFile {
|
|||
}
|
||||
};
|
||||
|
||||
struct FullBackupListing {
|
||||
struct BackupFileList {
|
||||
std::vector<RangeFile> ranges;
|
||||
std::vector<LogFile> logs;
|
||||
std::vector<KeyspaceSnapshotFile> snapshots;
|
||||
|
||||
void toStream(FILE *fout) const;
|
||||
};
|
||||
|
||||
// The byte counts here only include usable log files and byte counts from kvrange manifests
|
||||
|
@ -108,10 +110,19 @@ struct BackupDescription {
|
|||
std::string url;
|
||||
std::vector<KeyspaceSnapshotFile> snapshots;
|
||||
int64_t snapshotBytes;
|
||||
// The version before which everything has been deleted by an expire
|
||||
Optional<Version> expiredEndVersion;
|
||||
// The latest version before which at least some data has been deleted by an expire
|
||||
Optional<Version> unreliableEndVersion;
|
||||
// The minimum log version in the backup
|
||||
Optional<Version> minLogBegin;
|
||||
// The maximum log version in the backup
|
||||
Optional<Version> maxLogEnd;
|
||||
// The maximum log version for which there is contiguous log version coverage extending back to minLogBegin
|
||||
Optional<Version> contiguousLogEnd;
|
||||
// The maximum version which this backup can be used to restore to
|
||||
Optional<Version> maxRestorableVersion;
|
||||
// The minimum version which this backup can be used to restore to
|
||||
Optional<Version> minRestorableVersion;
|
||||
std::string extendedDetail; // Freeform container-specific info.
|
||||
|
||||
|
@ -153,10 +164,11 @@ public:
|
|||
|
||||
// Create the container
|
||||
virtual Future<Void> create() = 0;
|
||||
virtual Future<bool> exists() = 0;
|
||||
|
||||
// Open a log file or range file for writing
|
||||
virtual Future<Reference<IBackupFile>> writeLogFile(Version beginVersion, Version endVersion, int blockSize) = 0;
|
||||
virtual Future<Reference<IBackupFile>> writeRangeFile(Version version, int blockSize) = 0;
|
||||
virtual Future<Reference<IBackupFile>> writeRangeFile(Version snapshotBeginVersion, int snapshotFileCount, Version fileVersion, int blockSize) = 0;
|
||||
|
||||
// Write a KeyspaceSnapshotFile of range file names representing a full non overlapping
|
||||
// snapshot of the key ranges this backup is targeting.
|
||||
|
@ -165,23 +177,32 @@ public:
|
|||
// Open a file for read by name
|
||||
virtual Future<Reference<IAsyncFile>> readFile(std::string name) = 0;
|
||||
|
||||
struct ExpireProgress {
|
||||
std::string step;
|
||||
int total;
|
||||
int done;
|
||||
std::string toString() const;
|
||||
};
|
||||
// Delete backup files which do not contain any data at or after (more recent than) expireEndVersion.
|
||||
// If force is false, then nothing will be deleted unless there is a restorable snapshot which
|
||||
// - begins at or after expireEndVersion
|
||||
// - ends at or before restorableBeginVersion
|
||||
// If force is true, data is deleted unconditionally which could leave the backup in an unusable state. This is not recommended.
|
||||
// Returns true if expiration was done.
|
||||
virtual Future<Void> expireData(Version expireEndVersion, bool force = false, Version restorableBeginVersion = std::numeric_limits<Version>::max()) = 0;
|
||||
virtual Future<Void> expireData(Version expireEndVersion, bool force = false, ExpireProgress *progress = nullptr, Version restorableBeginVersion = std::numeric_limits<Version>::max()) = 0;
|
||||
|
||||
// Delete entire container. During the process, if pNumDeleted is not null it will be
|
||||
// updated with the count of deleted files so that progress can be seen.
|
||||
virtual Future<Void> deleteContainer(int *pNumDeleted = nullptr) = 0;
|
||||
|
||||
// Return key details about a backup's contents, possibly using cached or stored metadata
|
||||
// unless deepScan is true.
|
||||
virtual Future<BackupDescription> describeBackup(bool deepScan = false) = 0;
|
||||
// Return key details about a backup's contents.
|
||||
// Unless deepScan is true, use cached metadata, if present, as initial contiguous available log range.
|
||||
// If logStartVersionOverride is given, log data prior to that version will be ignored for the purposes
|
||||
// of this describe operation. This can be used to calculate what the restorability of a backup would
|
||||
// be after deleting all data prior to logStartVersionOverride.
|
||||
virtual Future<BackupDescription> describeBackup(bool deepScan = false, Version logStartVersionOverride = invalidVersion) = 0;
|
||||
|
||||
virtual Future<FullBackupListing> dumpFileList() = 0;
|
||||
virtual Future<BackupFileList> dumpFileList(Version begin = 0, Version end = std::numeric_limits<Version>::max()) = 0;
|
||||
|
||||
// Get exactly the files necessary to restore to targetVersion. Returns non-present if
|
||||
// restore to given version is not possible.
|
||||
|
|
|
@ -225,6 +225,20 @@ std::string BlobStoreEndpoint::getResourceURL(std::string resource) {
|
|||
return r;
|
||||
}
|
||||
|
||||
ACTOR Future<bool> bucketExists_impl(Reference<BlobStoreEndpoint> b, std::string bucket) {
|
||||
wait(b->requestRateRead->getAllowance(1));
|
||||
|
||||
std::string resource = std::string("/") + bucket;
|
||||
HTTP::Headers headers;
|
||||
|
||||
Reference<HTTP::Response> r = wait(b->doRequest("HEAD", resource, headers, NULL, 0, {200, 404}));
|
||||
return r->code == 200;
|
||||
}
|
||||
|
||||
Future<bool> BlobStoreEndpoint::bucketExists(std::string const &bucket) {
|
||||
return bucketExists_impl(Reference<BlobStoreEndpoint>::addRef(this), bucket);
|
||||
}
|
||||
|
||||
ACTOR Future<bool> objectExists_impl(Reference<BlobStoreEndpoint> b, std::string bucket, std::string object) {
|
||||
wait(b->requestRateRead->getAllowance(1));
|
||||
|
||||
|
@ -244,8 +258,17 @@ ACTOR Future<Void> deleteObject_impl(Reference<BlobStoreEndpoint> b, std::string
|
|||
|
||||
std::string resource = std::string("/") + bucket + "/" + object;
|
||||
HTTP::Headers headers;
|
||||
// 200 or 204 means object successfully deleted, 404 means it already doesn't exist, so any of those are considered successful
|
||||
Reference<HTTP::Response> r = wait(b->doRequest("DELETE", resource, headers, NULL, 0, {200, 204, 404}));
|
||||
// 200 means object deleted, 404 means it doesn't exist already, so either success code passed above is fine.
|
||||
|
||||
// But if the object already did not exist then the 'delete' is assumed to be successful but a warning is logged.
|
||||
if(r->code == 404) {
|
||||
TraceEvent(SevWarnAlways, "BlobStoreEndpointDeleteObjectMissing")
|
||||
.detail("Host", b->host)
|
||||
.detail("Bucket", bucket)
|
||||
.detail("Object", object);
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
|
@ -310,9 +333,12 @@ Future<Void> BlobStoreEndpoint::deleteRecursively(std::string const &bucket, std
|
|||
ACTOR Future<Void> createBucket_impl(Reference<BlobStoreEndpoint> b, std::string bucket) {
|
||||
wait(b->requestRateWrite->getAllowance(1));
|
||||
|
||||
bool exists = wait(b->bucketExists(bucket));
|
||||
if(!exists) {
|
||||
std::string resource = std::string("/") + bucket;
|
||||
HTTP::Headers headers;
|
||||
Reference<HTTP::Response> r = wait(b->doRequest("PUT", resource, headers, NULL, 0, {200, 409}));
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
|
@ -485,8 +511,8 @@ ACTOR Future<Reference<HTTP::Response>> doRequest_impl(Reference<BlobStoreEndpoi
|
|||
Future<BlobStoreEndpoint::ReusableConnection> frconn = bstore->connect();
|
||||
|
||||
// Make a shallow copy of the queue by calling addref() on each buffer in the chain and then prepending that chain to contentCopy
|
||||
if(pContent != nullptr) {
|
||||
contentCopy.discardAll();
|
||||
if(pContent != nullptr) {
|
||||
PacketBuffer *pFirst = pContent->getUnsent();
|
||||
PacketBuffer *pLast = nullptr;
|
||||
for(PacketBuffer *p = pFirst; p != nullptr; p = p->nextPacketBuffer()) {
|
||||
|
|
|
@ -36,7 +36,7 @@ public:
|
|||
struct Stats {
|
||||
Stats() : requests_successful(0), requests_failed(0), bytes_sent(0) {}
|
||||
Stats operator-(const Stats &rhs);
|
||||
void clear() { memset(this, sizeof(*this), 0); }
|
||||
void clear() { memset(this, 0, sizeof(*this)); }
|
||||
json_spirit::mObject getJSON();
|
||||
|
||||
int64_t requests_successful;
|
||||
|
@ -197,6 +197,9 @@ public:
|
|||
// Get a list of the files in a bucket, see listBucketStream for more argument detail.
|
||||
Future<ListResult> listBucket(std::string const &bucket, Optional<std::string> prefix = {}, Optional<char> delimiter = {}, int maxDepth = 0, std::function<bool(std::string const &)> recurseFilter = nullptr);
|
||||
|
||||
// Check if a bucket exists
|
||||
Future<bool> bucketExists(std::string const &bucket);
|
||||
|
||||
// Check if an object exists in a bucket
|
||||
Future<bool> objectExists(std::string const &bucket, std::string const &object);
|
||||
|
||||
|
|
|
@ -0,0 +1,97 @@
|
|||
set(FDBCLIENT_SRCS
|
||||
AsyncFileBlobStore.actor.cpp
|
||||
AsyncFileBlobStore.actor.h
|
||||
Atomic.h
|
||||
AutoPublicAddress.cpp
|
||||
BackupAgent.h
|
||||
BackupAgentBase.actor.cpp
|
||||
BackupContainer.actor.cpp
|
||||
BackupContainer.h
|
||||
BlobStore.actor.cpp
|
||||
ClientDBInfo.h
|
||||
ClientLogEvents.h
|
||||
ClientWorkerInterface.h
|
||||
ClusterInterface.h
|
||||
CommitTransaction.h
|
||||
CoordinationInterface.h
|
||||
DatabaseBackupAgent.actor.cpp
|
||||
DatabaseConfiguration.cpp
|
||||
DatabaseConfiguration.h
|
||||
DatabaseContext.h
|
||||
EventTypes.actor.h
|
||||
FailureMonitorClient.actor.cpp
|
||||
FailureMonitorClient.h
|
||||
FDBOptions.h
|
||||
FDBTypes.h
|
||||
FileBackupAgent.actor.cpp
|
||||
HTTP.actor.cpp
|
||||
IClientApi.h
|
||||
JsonBuilder.cpp
|
||||
JsonBuilder.h
|
||||
KeyBackedTypes.h
|
||||
KeyRangeMap.actor.cpp
|
||||
KeyRangeMap.h
|
||||
Knobs.cpp
|
||||
Knobs.h
|
||||
ManagementAPI.actor.cpp
|
||||
ManagementAPI.h
|
||||
MasterProxyInterface.h
|
||||
MetricLogger.actor.cpp
|
||||
MetricLogger.h
|
||||
MonitorLeader.actor.cpp
|
||||
MonitorLeader.h
|
||||
MultiVersionAssignmentVars.h
|
||||
MultiVersionTransaction.actor.cpp
|
||||
MultiVersionTransaction.h
|
||||
MutationList.h
|
||||
NativeAPI.actor.cpp
|
||||
NativeAPI.h
|
||||
Notified.h
|
||||
ReadYourWrites.actor.cpp
|
||||
ReadYourWrites.h
|
||||
RunTransaction.actor.h
|
||||
RYWIterator.cpp
|
||||
RYWIterator.h
|
||||
Schemas.cpp
|
||||
Schemas.h
|
||||
SnapshotCache.h
|
||||
Status.h
|
||||
StatusClient.actor.cpp
|
||||
StatusClient.h
|
||||
StorageServerInterface.h
|
||||
Subspace.cpp
|
||||
Subspace.h
|
||||
SystemData.cpp
|
||||
SystemData.h
|
||||
TaskBucket.actor.cpp
|
||||
TaskBucket.h
|
||||
ThreadSafeTransaction.actor.cpp
|
||||
ThreadSafeTransaction.h
|
||||
Tuple.cpp
|
||||
Tuple.h
|
||||
VersionedMap.actor.h
|
||||
VersionedMap.h
|
||||
WriteMap.h
|
||||
json_spirit/json_spirit_error_position.h
|
||||
json_spirit/json_spirit_reader_template.h
|
||||
json_spirit/json_spirit_value.h
|
||||
json_spirit/json_spirit_writer_options.h
|
||||
json_spirit/json_spirit_writer_template.h
|
||||
libb64/cdecode.c
|
||||
libb64/cencode.c
|
||||
md5/md5.c
|
||||
sha1/SHA1.cpp
|
||||
${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.cpp)
|
||||
|
||||
|
||||
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.h ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.cpp
|
||||
COMMAND ${MONO_EXECUTABLE} ${VEXILLOGRAPHER_EXE} ${CMAKE_CURRENT_SOURCE_DIR}/vexillographer/fdb.options cpp ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g
|
||||
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/vexillographer/fdb.options vexillographer
|
||||
COMMENT "Generate FDBOptions c++ files")
|
||||
add_custom_target(fdboptions DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.h ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.cpp)
|
||||
|
||||
actor_set(FDBCLIENT_BUILD "${FDBCLIENT_SRCS}")
|
||||
add_library(fdbclient STATIC ${FDBCLIENT_BUILD})
|
||||
add_dependencies(fdbclient fdboptions)
|
||||
actor_compile(fdbclient "${FDBCLIENT_SRCS}")
|
||||
target_link_libraries(fdbclient PUBLIC fdbrpc)
|
|
@ -39,7 +39,7 @@ struct ClientDBInfo {
|
|||
template <class Archive>
|
||||
void serialize(Archive& ar) {
|
||||
ASSERT( ar.protocolVersion() >= 0x0FDB00A200040001LL );
|
||||
ar & proxies & id & clientTxnInfoSampleRate & clientTxnInfoSizeLimit;
|
||||
serializer(ar, proxies, id, clientTxnInfoSampleRate, clientTxnInfoSizeLimit);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ namespace FdbClientLogEvents {
|
|||
Event(EventType t, double ts) : type(t), startTs(ts) { }
|
||||
Event() { }
|
||||
|
||||
template <typename Ar> Ar& serialize(Ar &ar) { return ar & type & startTs; }
|
||||
template <typename Ar> Ar& serialize(Ar &ar) { return serializer(ar, type, startTs); }
|
||||
|
||||
EventType type{ EVENTTYPEEND };
|
||||
double startTs{ 0 };
|
||||
|
@ -53,9 +53,9 @@ namespace FdbClientLogEvents {
|
|||
|
||||
template <typename Ar> Ar& serialize(Ar &ar) {
|
||||
if (!ar.isDeserializing)
|
||||
return Event::serialize(ar) & latency;
|
||||
return serializer(Event::serialize(ar), latency);
|
||||
else
|
||||
return ar & latency;
|
||||
return serializer(ar, latency);
|
||||
}
|
||||
|
||||
double latency;
|
||||
|
@ -71,9 +71,9 @@ namespace FdbClientLogEvents {
|
|||
|
||||
template <typename Ar> Ar& serialize(Ar &ar) {
|
||||
if (!ar.isDeserializing)
|
||||
return Event::serialize(ar) & latency & valueSize & key;
|
||||
return serializer(Event::serialize(ar), latency, valueSize, key);
|
||||
else
|
||||
return ar & latency & valueSize & key;
|
||||
return serializer(ar, latency, valueSize, key);
|
||||
}
|
||||
|
||||
double latency;
|
||||
|
@ -91,9 +91,9 @@ namespace FdbClientLogEvents {
|
|||
|
||||
template <typename Ar> Ar& serialize(Ar &ar) {
|
||||
if (!ar.isDeserializing)
|
||||
return Event::serialize(ar) & latency & rangeSize & startKey & endKey;
|
||||
return serializer(Event::serialize(ar), latency, rangeSize, startKey, endKey);
|
||||
else
|
||||
return ar & latency & rangeSize & startKey & endKey;
|
||||
return serializer(ar, latency, rangeSize, startKey, endKey);
|
||||
}
|
||||
|
||||
double latency;
|
||||
|
@ -112,9 +112,9 @@ namespace FdbClientLogEvents {
|
|||
|
||||
template <typename Ar> Ar& serialize(Ar &ar) {
|
||||
if (!ar.isDeserializing)
|
||||
return Event::serialize(ar) & latency & numMutations & commitBytes & req.transaction & req.arena;
|
||||
return serializer(Event::serialize(ar), latency, numMutations, commitBytes, req.transaction, req.arena);
|
||||
else
|
||||
return ar & latency & numMutations & commitBytes & req.transaction & req.arena;
|
||||
return serializer(ar, latency, numMutations, commitBytes, req.transaction, req.arena);
|
||||
}
|
||||
|
||||
double latency;
|
||||
|
@ -145,9 +145,9 @@ namespace FdbClientLogEvents {
|
|||
|
||||
template <typename Ar> Ar& serialize(Ar &ar) {
|
||||
if (!ar.isDeserializing)
|
||||
return Event::serialize(ar) & errCode & key;
|
||||
return serializer(Event::serialize(ar), errCode, key);
|
||||
else
|
||||
return ar & errCode & key;
|
||||
return serializer(ar, errCode, key);
|
||||
}
|
||||
|
||||
int errCode;
|
||||
|
@ -164,9 +164,9 @@ namespace FdbClientLogEvents {
|
|||
|
||||
template <typename Ar> Ar& serialize(Ar &ar) {
|
||||
if (!ar.isDeserializing)
|
||||
return Event::serialize(ar) & errCode & startKey & endKey;
|
||||
return serializer(Event::serialize(ar), errCode, startKey, endKey);
|
||||
else
|
||||
return ar & errCode & startKey & endKey;
|
||||
return serializer(ar, errCode, startKey, endKey);
|
||||
}
|
||||
|
||||
int errCode;
|
||||
|
@ -184,9 +184,9 @@ namespace FdbClientLogEvents {
|
|||
|
||||
template <typename Ar> Ar& serialize(Ar &ar) {
|
||||
if (!ar.isDeserializing)
|
||||
return Event::serialize(ar) & errCode & req.transaction & req.arena;
|
||||
return serializer(Event::serialize(ar), errCode, req.transaction, req.arena);
|
||||
else
|
||||
return ar & errCode & req.transaction & req.arena;
|
||||
return serializer(ar, errCode, req.transaction, req.arena);
|
||||
}
|
||||
|
||||
int errCode;
|
||||
|
|
|
@ -40,7 +40,7 @@ struct ClientWorkerInterface {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & reboot & profiler;
|
||||
serializer(ar, reboot, profiler);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -52,7 +52,7 @@ struct RebootRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & deleteData & checkData;
|
||||
serializer(ar, deleteData, checkData);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -77,7 +77,7 @@ struct ProfilerRequest {
|
|||
|
||||
template<class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & reply & type & action & duration & outputFile;
|
||||
serializer(ar, reply, type, action, duration, outputFile);
|
||||
}
|
||||
};
|
||||
BINARY_SERIALIZABLE( ProfilerRequest::Type );
|
||||
|
|
|
@ -52,7 +52,7 @@ struct ClusterInterface {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & openDatabase & failureMonitoring & databaseStatus & ping & getClientWorkers & forceRecovery;
|
||||
serializer(ar, openDatabase, failureMonitoring, databaseStatus, ping, getClientWorkers, forceRecovery);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -93,7 +93,7 @@ struct ClientVersionRef {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & clientVersion & sourceVersion & protocolVersion;
|
||||
serializer(ar, clientVersion, sourceVersion, protocolVersion);
|
||||
}
|
||||
|
||||
size_t expectedSize() const { return clientVersion.size() + sourceVersion.size() + protocolVersion.size(); }
|
||||
|
@ -125,7 +125,7 @@ struct OpenDatabaseRequest {
|
|||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ASSERT( ar.protocolVersion() >= 0x0FDB00A400040001LL );
|
||||
ar & issues & supportedVersions & traceLogGroup & knownClientInfoID & reply & arena;
|
||||
serializer(ar, issues, supportedVersions, traceLogGroup, knownClientInfoID, reply, arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -138,7 +138,7 @@ struct SystemFailureStatus {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & address & status;
|
||||
serializer(ar, address, status);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -159,7 +159,7 @@ struct FailureMonitoringRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & senderStatus & failureInformationVersion & reply;
|
||||
serializer(ar, senderStatus, failureInformationVersion, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -173,7 +173,7 @@ struct FailureMonitoringReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & changes & failureInformationVersion & allOthersFailed & clientRequestIntervalMS & considerServerFailedTimeoutMS & arena;
|
||||
serializer(ar, changes, failureInformationVersion, allOthersFailed, clientRequestIntervalMS, considerServerFailedTimeoutMS, arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -182,7 +182,7 @@ struct StatusRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & reply;
|
||||
serializer(ar, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -196,7 +196,7 @@ struct StatusReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & statusStr;
|
||||
serializer(ar, statusStr);
|
||||
if( ar.isDeserializing ) {
|
||||
json_spirit::mValue mv;
|
||||
if(g_network->isSimulated()) {
|
||||
|
@ -218,7 +218,7 @@ struct GetClientWorkersRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & reply;
|
||||
serializer(ar, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -229,7 +229,7 @@ struct ForceRecoveryRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & reply;
|
||||
serializer(ar, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ struct MutationRef {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & type & param1 & param2;
|
||||
serializer(ar, type, param1, param2);
|
||||
}
|
||||
|
||||
// These masks define which mutation types have particular properties (they are used to implement isSingleKeyMutation() etc)
|
||||
|
@ -101,7 +101,7 @@ struct CommitTransactionRef {
|
|||
|
||||
template <class Ar>
|
||||
force_inline void serialize( Ar& ar ) {
|
||||
ar & read_conflict_ranges & write_conflict_ranges & mutations & read_snapshot;
|
||||
serializer(ar, read_conflict_ranges, write_conflict_ranges, mutations, read_snapshot);
|
||||
}
|
||||
|
||||
// Convenience for internal code required to manipulate these without the Native API
|
||||
|
|
|
@ -122,7 +122,7 @@ struct LeaderInfo {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & changeID & serializedInfo & forward;
|
||||
serializer(ar, changeID, serializedInfo, forward);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -136,7 +136,7 @@ struct GetLeaderRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & key & knownLeader & reply;
|
||||
serializer(ar, key, knownLeader, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ struct SatelliteInfo {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & dcId & priority;
|
||||
serializer(ar, dcId, priority);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -71,8 +71,8 @@ struct RegionInfo {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & dcId & priority & satelliteTLogPolicy & satelliteDesiredTLogCount & satelliteTLogReplicationFactor & satelliteTLogWriteAntiQuorum & satelliteTLogUsableDcs &
|
||||
satelliteTLogPolicyFallback & satelliteTLogReplicationFactorFallback & satelliteTLogWriteAntiQuorumFallback & satelliteTLogUsableDcsFallback & satellites;
|
||||
serializer(ar, dcId, priority, satelliteTLogPolicy, satelliteDesiredTLogCount, satelliteTLogReplicationFactor, satelliteTLogWriteAntiQuorum, satelliteTLogUsableDcs,
|
||||
satelliteTLogPolicyFallback, satelliteTLogReplicationFactorFallback, satelliteTLogWriteAntiQuorumFallback, satelliteTLogUsableDcsFallback, satellites);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -203,7 +203,7 @@ struct DatabaseConfiguration {
|
|||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
if (!ar.isDeserializing) makeConfigurationImmutable();
|
||||
ar & rawConfiguration;
|
||||
serializer(ar, rawConfiguration);
|
||||
if (ar.isDeserializing) {
|
||||
for(auto c=rawConfiguration.begin(); c!=rawConfiguration.end(); ++c)
|
||||
setInternal(c->key, c->value);
|
||||
|
|
|
@ -53,7 +53,7 @@ struct Tag {
|
|||
|
||||
template <class Ar>
|
||||
force_inline void serialize_unversioned(Ar& ar) {
|
||||
ar & locality & id;
|
||||
serializer(ar, locality, id);
|
||||
}
|
||||
};
|
||||
#pragma pack(pop)
|
||||
|
@ -193,7 +193,7 @@ struct KeyRangeRef {
|
|||
|
||||
template <class Ar>
|
||||
force_inline void serialize(Ar& ar) {
|
||||
ar & const_cast<KeyRef&>(begin) & const_cast<KeyRef&>(end);
|
||||
serializer(ar, const_cast<KeyRef&>(begin), const_cast<KeyRef&>(end));
|
||||
if( begin > end ) {
|
||||
throw inverted_range();
|
||||
};
|
||||
|
@ -227,7 +227,7 @@ struct KeyValueRef {
|
|||
int expectedSize() const { return key.expectedSize() + value.expectedSize(); }
|
||||
|
||||
template <class Ar>
|
||||
force_inline void serialize(Ar& ar) { ar & key & value; }
|
||||
force_inline void serialize(Ar& ar) { serializer(ar, key, value); }
|
||||
|
||||
struct OrderByKey {
|
||||
bool operator()(KeyValueRef const& a, KeyValueRef const& b) const {
|
||||
|
@ -385,7 +385,7 @@ public:
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & key & orEqual & offset;
|
||||
serializer(ar, key, orEqual, offset);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -418,7 +418,7 @@ struct KeyRangeWith : KeyRange {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & ((KeyRange&)*this) & value;
|
||||
serializer(ar, ((KeyRange&)*this), value);
|
||||
}
|
||||
};
|
||||
template <class Val>
|
||||
|
@ -470,7 +470,7 @@ struct RangeResultRef : VectorRef<KeyValueRef> {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & ((VectorRef<KeyValueRef>&)*this) & more & readThrough & readToBegin & readThroughEnd;
|
||||
serializer(ar, ((VectorRef<KeyValueRef>&)*this), more, readThrough, readToBegin, readThroughEnd);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -492,7 +492,7 @@ struct KeyValueStoreType {
|
|||
operator StoreType() const { return StoreType(type); }
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) { ar & type; }
|
||||
void serialize(Ar& ar) { serializer(ar, type); }
|
||||
|
||||
std::string toString() const {
|
||||
switch( type ) {
|
||||
|
@ -520,7 +520,7 @@ struct StorageBytes {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & free & total & used & available;
|
||||
serializer(ar, free, total, used, available);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -639,7 +639,7 @@ struct ClusterControllerPriorityInfo {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & processClassFitness & isExcluded & dcFitness;
|
||||
serializer(ar, processClassFitness, isExcluded, dcFitness);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -1004,6 +1004,7 @@ namespace fileBackup {
|
|||
|
||||
// Update the range bytes written in the backup config
|
||||
backup.rangeBytesWritten().atomicOp(tr, file->size(), MutationRef::AddValue);
|
||||
backup.snapshotRangeFileCount().atomicOp(tr, 1, MutationRef::AddValue);
|
||||
|
||||
// See if there is already a file for this key which has an earlier begin, update the map if not.
|
||||
Optional<BackupConfig::RangeSlice> s = wait(backup.snapshotRangeFileMap().get(tr, range.end));
|
||||
|
@ -1129,11 +1130,31 @@ namespace fileBackup {
|
|||
if(done)
|
||||
return Void();
|
||||
|
||||
// Start writing a new file
|
||||
// Start writing a new file after verifying this task should keep running as of a new read version (which must be >= outVersion)
|
||||
outVersion = values.second;
|
||||
// block size must be at least large enough for 3 max size keys and 2 max size values + overhead so 250k conservatively.
|
||||
state int blockSize = BUGGIFY ? g_random->randomInt(250e3, 4e6) : CLIENT_KNOBS->BACKUP_RANGEFILE_BLOCK_SIZE;
|
||||
Reference<IBackupFile> f = wait(bc->writeRangeFile(outVersion, blockSize));
|
||||
state Version snapshotBeginVersion;
|
||||
state int64_t snapshotRangeFileCount;
|
||||
|
||||
state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(cx));
|
||||
loop {
|
||||
try {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
|
||||
wait(taskBucket->keepRunning(tr, task)
|
||||
&& storeOrThrow(backup.snapshotBeginVersion().get(tr), snapshotBeginVersion)
|
||||
&& store(backup.snapshotRangeFileCount().getD(tr), snapshotRangeFileCount)
|
||||
);
|
||||
|
||||
break;
|
||||
} catch(Error &e) {
|
||||
wait(tr->onError(e));
|
||||
}
|
||||
}
|
||||
|
||||
Reference<IBackupFile> f = wait(bc->writeRangeFile(snapshotBeginVersion, snapshotRangeFileCount, outVersion, blockSize));
|
||||
outFile = f;
|
||||
|
||||
// Initialize range file writer and write begin key
|
||||
|
@ -3360,8 +3381,9 @@ class FileBackupAgentImpl {
|
|||
public:
|
||||
static const int MAX_RESTORABLE_FILE_METASECTION_BYTES = 1024 * 8;
|
||||
|
||||
// This method will return the final status of the backup
|
||||
ACTOR static Future<int> waitBackup(FileBackupAgent* backupAgent, Database cx, std::string tagName, bool stopWhenDone) {
|
||||
// This method will return the final status of the backup at tag, and return the URL that was used on the tag
|
||||
// when that status value was read.
|
||||
ACTOR static Future<int> waitBackup(FileBackupAgent* backupAgent, Database cx, std::string tagName, bool stopWhenDone, Reference<IBackupContainer> *pContainer = nullptr, UID *pUID = nullptr) {
|
||||
state std::string backTrace;
|
||||
state KeyBackedTag tag = makeBackupTag(tagName);
|
||||
|
||||
|
@ -3379,13 +3401,20 @@ public:
|
|||
state BackupConfig config(oldUidAndAborted.get().first);
|
||||
state EBackupState status = wait(config.stateEnum().getD(tr, false, EBackupState::STATE_NEVERRAN));
|
||||
|
||||
// Break, if no longer runnable
|
||||
if (!FileBackupAgent::isRunnable(status)) {
|
||||
return status;
|
||||
// Break, if one of the following is true
|
||||
// - no longer runnable
|
||||
// - in differential mode (restorable) and stopWhenDone is not enabled
|
||||
if( !FileBackupAgent::isRunnable(status) || (!stopWhenDone) && (BackupAgentBase::STATE_DIFFERENTIAL == status) ) {
|
||||
|
||||
if(pContainer != nullptr) {
|
||||
Reference<IBackupContainer> c = wait(config.backupContainer().getOrThrow(tr, false, backup_invalid_info()));
|
||||
*pContainer = c;
|
||||
}
|
||||
|
||||
if(pUID != nullptr) {
|
||||
*pUID = oldUidAndAborted.get().first;
|
||||
}
|
||||
|
||||
// Break, if in differential mode (restorable) and stopWhenDone is not enabled
|
||||
if ((!stopWhenDone) && (BackupAgentBase::STATE_DIFFERENTIAL == status)) {
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -4061,7 +4090,7 @@ void FileBackupAgent::setLastRestorable(Reference<ReadYourWritesTransaction> tr,
|
|||
tr->set(lastRestorable.pack(tagName), BinaryWriter::toValue<Version>(version, Unversioned()));
|
||||
}
|
||||
|
||||
Future<int> FileBackupAgent::waitBackup(Database cx, std::string tagName, bool stopWhenDone) {
|
||||
return FileBackupAgentImpl::waitBackup(this, cx, tagName, stopWhenDone);
|
||||
Future<int> FileBackupAgent::waitBackup(Database cx, std::string tagName, bool stopWhenDone, Reference<IBackupContainer> *pContainer, UID *pUID) {
|
||||
return FileBackupAgentImpl::waitBackup(this, cx, tagName, stopWhenDone, pContainer, pUID);
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ namespace HTTP {
|
|||
o.reserve(s.size() * 3);
|
||||
char buf[4];
|
||||
for(auto c : s)
|
||||
if(std::isalnum(c))
|
||||
if(std::isalnum(c) || c == '?' || c == '/' || c == '-' || c == '_' || c == '.')
|
||||
o.append(&c, 1);
|
||||
else {
|
||||
sprintf(buf, "%%%.02X", c);
|
||||
|
@ -292,15 +292,41 @@ namespace HTTP {
|
|||
// Request content is provided as UnsentPacketQueue *pContent which will be depleted as bytes are sent but the queue itself must live for the life of this actor
|
||||
// and be destroyed by the caller
|
||||
// TODO: pSent is very hackish, do something better.
|
||||
ACTOR Future<Reference<HTTP::Response>> doRequest(Reference<IConnection> conn, std::string verb, std::string resource, HTTP::Headers headers, UnsentPacketQueue *pContent, int contentLen, Reference<IRateControl> sendRate, int64_t *pSent, Reference<IRateControl> recvRate) {
|
||||
ACTOR Future<Reference<HTTP::Response>> doRequest(Reference<IConnection> conn, std::string verb, std::string resource, HTTP::Headers headers, UnsentPacketQueue *pContent, int contentLen, Reference<IRateControl> sendRate, int64_t *pSent, Reference<IRateControl> recvRate, std::string requestIDHeader) {
|
||||
state TraceEvent event(SevDebug, "HTTPRequest");
|
||||
|
||||
state UnsentPacketQueue empty;
|
||||
if(pContent == NULL)
|
||||
pContent = ∅
|
||||
|
||||
// There is no standard http request id header field, so either a global default can be set via a knob
|
||||
// or it can be set per-request with the requestIDHeader argument (which overrides the default)
|
||||
if(requestIDHeader.empty()) {
|
||||
requestIDHeader = CLIENT_KNOBS->HTTP_REQUEST_ID_HEADER;
|
||||
}
|
||||
|
||||
state bool earlyResponse = false;
|
||||
state int total_sent = 0;
|
||||
|
||||
event.detail("DebugID", conn->getDebugID());
|
||||
event.detail("RemoteAddress", conn->getPeerAddress());
|
||||
event.detail("Verb", verb);
|
||||
event.detail("Resource", resource);
|
||||
event.detail("RequestContentLen", contentLen);
|
||||
|
||||
try {
|
||||
state std::string requestID;
|
||||
if(!requestIDHeader.empty()) {
|
||||
requestID = g_random->randomUniqueID().toString();
|
||||
requestID = requestID.insert(20, "-");
|
||||
requestID = requestID.insert(16, "-");
|
||||
requestID = requestID.insert(12, "-");
|
||||
requestID = requestID.insert(8, "-");
|
||||
|
||||
headers[requestIDHeader] = requestID;
|
||||
event.detail("RequestIDSent", requestID);
|
||||
}
|
||||
|
||||
// Write headers to a packet buffer chain
|
||||
PacketBuffer *pFirst = new PacketBuffer();
|
||||
PacketBuffer *pLast = writeRequestHeader(verb, resource, headers, pFirst);
|
||||
|
@ -346,19 +372,59 @@ namespace HTTP {
|
|||
}
|
||||
|
||||
wait(responseReading);
|
||||
|
||||
double elapsed = timer() - send_start;
|
||||
if(CLIENT_KNOBS->HTTP_VERBOSE_LEVEL > 0)
|
||||
printf("[%s] HTTP code=%d early=%d, time=%fs %s %s contentLen=%d [%d out, response content len %d]\n",
|
||||
conn->getDebugID().toString().c_str(), r->code, earlyResponse, elapsed, verb.c_str(), resource.c_str(), contentLen, total_sent, (int)r->contentLen);
|
||||
if(CLIENT_KNOBS->HTTP_VERBOSE_LEVEL > 2)
|
||||
|
||||
event.detail("ResponseCode", r->code);
|
||||
event.detail("ResponseContentLen", r->contentLen);
|
||||
event.detail("Elapsed", elapsed);
|
||||
|
||||
Optional<Error> err;
|
||||
if(!requestIDHeader.empty()) {
|
||||
std::string responseID;
|
||||
auto iid = r->headers.find(requestIDHeader);
|
||||
if(iid != r->headers.end()) {
|
||||
responseID = iid->second;
|
||||
}
|
||||
event.detail("RequestIDReceived", responseID);
|
||||
if(requestID != responseID) {
|
||||
err = http_bad_request_id();
|
||||
// Log a non-debug a error
|
||||
TraceEvent(SevError, "HTTPRequestFailedIDMismatch")
|
||||
.detail("DebugID", conn->getDebugID())
|
||||
.detail("RemoteAddress", conn->getPeerAddress())
|
||||
.detail("Verb", verb)
|
||||
.detail("Resource", resource)
|
||||
.detail("RequestContentLen", contentLen)
|
||||
.detail("ResponseCode", r->code)
|
||||
.detail("ResponseContentLen", r->contentLen)
|
||||
.detail("RequestIDSent", requestID)
|
||||
.detail("RequestIDReceived", responseID)
|
||||
.error(err.get());
|
||||
}
|
||||
}
|
||||
|
||||
if(CLIENT_KNOBS->HTTP_VERBOSE_LEVEL > 0) {
|
||||
printf("[%s] HTTP %scode=%d early=%d, time=%fs %s %s contentLen=%d [%d out, response content len %d]\n",
|
||||
conn->getDebugID().toString().c_str(),
|
||||
(err.present() ? format("*ERROR*=%s ", err.get().name()).c_str() : ""),
|
||||
r->code, earlyResponse, elapsed, verb.c_str(), resource.c_str(), contentLen, total_sent, (int)r->contentLen);
|
||||
}
|
||||
if(CLIENT_KNOBS->HTTP_VERBOSE_LEVEL > 2) {
|
||||
printf("[%s] HTTP RESPONSE: %s %s\n%s\n", conn->getDebugID().toString().c_str(), verb.c_str(), resource.c_str(), r->toString().c_str());
|
||||
}
|
||||
|
||||
if(err.present()) {
|
||||
throw err.get();
|
||||
}
|
||||
|
||||
return r;
|
||||
} catch(Error &e) {
|
||||
double elapsed = timer() - send_start;
|
||||
if(CLIENT_KNOBS->HTTP_VERBOSE_LEVEL > 0)
|
||||
if(CLIENT_KNOBS->HTTP_VERBOSE_LEVEL > 0 && e.code() != error_code_http_bad_request_id) {
|
||||
printf("[%s] HTTP *ERROR*=%s early=%d, time=%fs %s %s contentLen=%d [%d out]\n",
|
||||
conn->getDebugID().toString().c_str(), e.name(), earlyResponse, elapsed, verb.c_str(), resource.c_str(), contentLen, total_sent);
|
||||
}
|
||||
event.error(e);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -51,5 +51,5 @@ namespace HTTP {
|
|||
PacketBuffer * writeRequestHeader(std::string const &verb, std::string const &resource, HTTP::Headers const &headers, PacketBuffer *dest);
|
||||
|
||||
// Do an HTTP request to the blob store, parse the response.
|
||||
Future<Reference<Response>> doRequest(Reference<IConnection> const &conn, std::string const &verb, std::string const &resource, HTTP::Headers const &headers, UnsentPacketQueue * const &pContent, int const &contentLen, Reference<IRateControl> const &sendRate, int64_t * const &pSent, Reference<IRateControl> const &recvRate);
|
||||
Future<Reference<Response>> doRequest(Reference<IConnection> const &conn, std::string const &verb, std::string const &resource, HTTP::Headers const &headers, UnsentPacketQueue * const &pContent, int const &contentLen, Reference<IRateControl> const &sendRate, int64_t * const &pSent, Reference<IRateControl> const &recvRate, const std::string &requestHeader = std::string());
|
||||
}
|
||||
|
|
|
@ -148,6 +148,7 @@ ClientKnobs::ClientKnobs(bool randomize) {
|
|||
init( HTTP_READ_SIZE, 128*1024 );
|
||||
init( HTTP_SEND_SIZE, 32*1024 );
|
||||
init( HTTP_VERBOSE_LEVEL, 0 );
|
||||
init( HTTP_REQUEST_ID_HEADER, "" );
|
||||
init( BLOBSTORE_CONNECT_TRIES, 10 );
|
||||
init( BLOBSTORE_CONNECT_TIMEOUT, 10 );
|
||||
init( BLOBSTORE_MAX_CONNECTION_LIFE, 120 );
|
||||
|
|
|
@ -152,6 +152,7 @@ public:
|
|||
int HTTP_SEND_SIZE;
|
||||
int HTTP_READ_SIZE;
|
||||
int HTTP_VERBOSE_LEVEL;
|
||||
std::string HTTP_REQUEST_ID_HEADER;
|
||||
int BLOBSTORE_CONNECT_TRIES;
|
||||
int BLOBSTORE_CONNECT_TIMEOUT;
|
||||
int BLOBSTORE_MAX_CONNECTION_LIFE;
|
||||
|
|
|
@ -294,6 +294,7 @@ ACTOR Future<ConfigurationResult::Type> changeConfig( Database cx, std::map<std:
|
|||
|
||||
if(!creating && !force) {
|
||||
state Future<Standalone<RangeResultRef>> fConfig = tr.getRange(configKeys, CLIENT_KNOBS->TOO_MANY);
|
||||
state Future<vector<ProcessData>> fWorkers = getWorkers(&tr);
|
||||
wait( success(fConfig) || tooLong );
|
||||
|
||||
if(!fConfig.isReady()) {
|
||||
|
@ -378,6 +379,44 @@ ACTOR Future<ConfigurationResult::Type> changeConfig( Database cx, std::map<std:
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
wait( success(fWorkers) || tooLong );
|
||||
if(!fWorkers.isReady()) {
|
||||
return ConfigurationResult::DATABASE_UNAVAILABLE;
|
||||
}
|
||||
|
||||
if(newConfig.regions.size()) {
|
||||
std::map<Optional<Key>, std::set<Optional<Key>>> dcId_zoneIds;
|
||||
for(auto& it : fWorkers.get()) {
|
||||
if( it.processClass.machineClassFitness(ProcessClass::Storage) <= ProcessClass::WorstFit ) {
|
||||
dcId_zoneIds[it.locality.dcId()].insert(it.locality.zoneId());
|
||||
}
|
||||
}
|
||||
for(auto& region : newConfig.regions) {
|
||||
if(dcId_zoneIds[region.dcId].size() < std::max(newConfig.storageTeamSize, newConfig.tLogReplicationFactor)) {
|
||||
return ConfigurationResult::NOT_ENOUGH_WORKERS;
|
||||
}
|
||||
if(region.satelliteTLogReplicationFactor > 0 && region.priority >= 0) {
|
||||
int totalSatelliteProcesses = 0;
|
||||
for(auto& sat : region.satellites) {
|
||||
totalSatelliteProcesses += dcId_zoneIds[sat.dcId].size();
|
||||
}
|
||||
if(totalSatelliteProcesses < region.satelliteTLogReplicationFactor) {
|
||||
return ConfigurationResult::NOT_ENOUGH_WORKERS;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
std::set<Optional<Key>> zoneIds;
|
||||
for(auto& it : fWorkers.get()) {
|
||||
if( it.processClass.machineClassFitness(ProcessClass::Storage) <= ProcessClass::WorstFit ) {
|
||||
zoneIds.insert(it.locality.zoneId());
|
||||
}
|
||||
}
|
||||
if(zoneIds.size() < std::max(newConfig.storageTeamSize, newConfig.tLogReplicationFactor)) {
|
||||
return ConfigurationResult::NOT_ENOUGH_WORKERS;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -54,6 +54,7 @@ public:
|
|||
REGION_NOT_FULLY_REPLICATED,
|
||||
MULTIPLE_ACTIVE_REGIONS,
|
||||
REGIONS_CHANGED,
|
||||
NOT_ENOUGH_WORKERS,
|
||||
SUCCESS
|
||||
};
|
||||
};
|
||||
|
|
|
@ -50,7 +50,7 @@ struct MasterProxyInterface {
|
|||
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar) {
|
||||
ar & locality & commit & getConsistentReadVersion & getKeyServersLocations & waitFailure & getStorageServerRejoinInfo & getRawCommittedVersion & txnState;
|
||||
serializer(ar, locality, commit, getConsistentReadVersion, getKeyServersLocations, waitFailure, getStorageServerRejoinInfo, getRawCommittedVersion, txnState);
|
||||
}
|
||||
|
||||
void initEndpoints() {
|
||||
|
@ -67,7 +67,7 @@ struct CommitID {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & version & txnBatchId;
|
||||
serializer(ar, version, txnBatchId);
|
||||
}
|
||||
|
||||
CommitID() : version(invalidVersion), txnBatchId(0) {}
|
||||
|
@ -93,7 +93,7 @@ struct CommitTransactionRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & transaction & reply & arena & flags & debugID;
|
||||
serializer(ar, transaction, reply, arena, flags, debugID);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -116,7 +116,7 @@ struct GetReadVersionReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & version & locked;
|
||||
serializer(ar, version, locked);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -144,7 +144,7 @@ struct GetReadVersionRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & transactionCount & flags & debugID & reply;
|
||||
serializer(ar, transactionCount, flags, debugID, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -154,7 +154,7 @@ struct GetKeyServerLocationsReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & results & arena;
|
||||
serializer(ar, results, arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -171,7 +171,7 @@ struct GetKeyServerLocationsRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & begin & end & limit & reverse & reply & arena;
|
||||
serializer(ar, begin, end, limit, reverse, reply, arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -183,7 +183,7 @@ struct GetRawCommittedVersionRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & debugID & reply;
|
||||
serializer(ar, debugID, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -196,7 +196,7 @@ struct GetStorageServerRejoinInfoReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & version & tag & newTag & newLocality & history;
|
||||
serializer(ar, version, tag, newTag, newLocality, history);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -210,7 +210,7 @@ struct GetStorageServerRejoinInfoRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & id & dcId & reply;
|
||||
serializer(ar, id, dcId, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -223,7 +223,7 @@ struct TxnStateRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & data & sequence & last & reply & arena;
|
||||
serializer(ar, data, sequence, last, reply, arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -132,7 +132,7 @@ public:
|
|||
|
||||
template <class Ar>
|
||||
void serialize_load( Ar& ar ) {
|
||||
ar & totalBytes;
|
||||
serializer(ar, totalBytes);
|
||||
|
||||
if(totalBytes > 0) {
|
||||
blob_begin = blob_end = new (ar.arena()) Blob;
|
||||
|
@ -142,7 +142,7 @@ public:
|
|||
}
|
||||
template <class Ar>
|
||||
void serialize_save( Ar& ar ) const {
|
||||
ar & totalBytes;
|
||||
serializer(ar, totalBytes);
|
||||
for(auto b = blob_begin; b; b=b->next)
|
||||
ar.serializeBytes(b->data);
|
||||
}
|
||||
|
|
|
@ -68,11 +68,11 @@ struct StorageServerInterface {
|
|||
void serialize( Ar& ar ) {
|
||||
// StorageServerInterface is persisted in the database and in the tLog's data structures, so changes here have to be
|
||||
// versioned carefully!
|
||||
ar & uniqueID & locality & getVersion & getValue & getKey & getKeyValues & getShardState & waitMetrics
|
||||
& splitMetrics & getPhysicalMetrics & waitFailure & getQueuingMetrics & getKeyValueStoreType;
|
||||
serializer(ar, uniqueID, locality, getVersion, getValue, getKey, getKeyValues, getShardState, waitMetrics,
|
||||
splitMetrics, getPhysicalMetrics, waitFailure, getQueuingMetrics, getKeyValueStoreType);
|
||||
|
||||
if( ar.protocolVersion() >= 0x0FDB00A200090001LL )
|
||||
ar & watchValue;
|
||||
serializer(ar, watchValue);
|
||||
}
|
||||
bool operator == (StorageServerInterface const& s) const { return uniqueID == s.uniqueID; }
|
||||
bool operator < (StorageServerInterface const& s) const { return uniqueID < s.uniqueID; }
|
||||
|
@ -103,7 +103,7 @@ struct GetValueReply : public LoadBalancedReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & *(LoadBalancedReply*)this & value;
|
||||
serializer(ar, *(LoadBalancedReply*)this, value);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -118,7 +118,7 @@ struct GetValueRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & key & version & debugID & reply;
|
||||
serializer(ar, key, version, debugID, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -134,7 +134,7 @@ struct WatchValueRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & key & value & version & debugID & reply;
|
||||
serializer(ar, key, value, version, debugID, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -146,7 +146,7 @@ struct GetKeyValuesReply : public LoadBalancedReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & *(LoadBalancedReply*)this & data & version & more & arena;
|
||||
serializer(ar, *(LoadBalancedReply*)this, data, version, more, arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -162,7 +162,7 @@ struct GetKeyValuesRequest {
|
|||
// GetKeyValuesRequest(const KeySelectorRef& begin, const KeySelectorRef& end, Version version, int limit, int limitBytes, Optional<UID> debugID) : begin(begin), end(end), version(version), limit(limit), limitBytes(limitBytes) {}
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & begin & end & version & limit & limitBytes & debugID & reply & arena;
|
||||
serializer(ar, begin, end, version, limit, limitBytes, debugID, reply, arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -174,7 +174,7 @@ struct GetKeyReply : public LoadBalancedReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & *(LoadBalancedReply*)this & sel;
|
||||
serializer(ar, *(LoadBalancedReply*)this, sel);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -189,7 +189,7 @@ struct GetKeyRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & sel & version & reply & arena;
|
||||
serializer(ar, sel, version, reply, arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -208,7 +208,7 @@ struct GetShardStateRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & keys & mode & reply;
|
||||
serializer(ar, keys, mode, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -244,7 +244,7 @@ struct StorageMetrics {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & bytes & bytesPerKSecond & iosPerKSecond;
|
||||
serializer(ar, bytes, bytesPerKSecond, iosPerKSecond);
|
||||
}
|
||||
|
||||
void negate() { operator*=(-1.0); }
|
||||
|
@ -278,7 +278,7 @@ struct WaitMetricsRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & keys & min & max & reply & arena;
|
||||
serializer(ar, keys, min, max, reply, arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -288,7 +288,7 @@ struct SplitMetricsReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & splits & used;
|
||||
serializer(ar, splits, used);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -306,7 +306,7 @@ struct SplitMetricsRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & keys & limits & used & estimated & isLastShard & reply & arena;
|
||||
serializer(ar, keys, limits, used, estimated, isLastShard, reply, arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -317,7 +317,7 @@ struct GetPhysicalMetricsReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & load & free & capacity;
|
||||
serializer(ar, load, free, capacity);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -326,7 +326,7 @@ struct GetPhysicalMetricsRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & reply;
|
||||
serializer(ar, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -336,7 +336,7 @@ struct StorageQueuingMetricsRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & reply;
|
||||
serializer(ar, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -349,7 +349,7 @@ struct StorageQueuingMetricsReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & localTime & instanceID & bytesDurable & bytesInput & v & storageBytes;
|
||||
serializer(ar, localTime, instanceID, bytesDurable, bytesInput, v, storageBytes);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
#pragma once
|
||||
#define FDB_VT_VERSION "${FDB_VERSION}"
|
||||
#define FDB_VT_PACKAGE_NAME "${FDB_PACKAGE_NAME}"
|
|
@ -0,0 +1,6 @@
|
|||
set(FDBMONITOR_SRCS ConvertUTF.h SimpleIni.h fdbmonitor.cpp)
|
||||
|
||||
add_executable(fdbmonitor ${FDBMONITOR_SRCS})
|
||||
target_link_libraries(fdbmonitor flow)
|
||||
|
||||
install(TARGETS fdbmonitor DESTINATION "${FDB_LIB_DIR}/foundationdb" COMPONENT server)
|
|
@ -257,9 +257,9 @@ private:
|
|||
try {
|
||||
TraceEvent("AFCUnderlyingOpenBegin").detail("Filename", filename);
|
||||
if(flags & IAsyncFile::OPEN_CACHED_READ_ONLY)
|
||||
flags = flags & ~IAsyncFile::OPEN_READWRITE | IAsyncFile::OPEN_READONLY;
|
||||
flags = (flags & ~IAsyncFile::OPEN_READWRITE) | IAsyncFile::OPEN_READONLY;
|
||||
else
|
||||
flags = flags & ~IAsyncFile::OPEN_READONLY | IAsyncFile::OPEN_READWRITE;
|
||||
flags = (flags & ~IAsyncFile::OPEN_READONLY) | IAsyncFile::OPEN_READWRITE;
|
||||
state Reference<IAsyncFile> f = wait( IAsyncFileSystem::filesystem()->open(filename, flags | IAsyncFile::OPEN_UNCACHED | IAsyncFile::OPEN_UNBUFFERED, mode) );
|
||||
TraceEvent("AFCUnderlyingOpenEnd").detail("Filename", filename);
|
||||
int64_t l = wait( f->size() );
|
||||
|
|
|
@ -0,0 +1,61 @@
|
|||
set(FDBRPC_SRCS
|
||||
ActorFuzz.actor.cpp
|
||||
AsyncFileCached.actor.h
|
||||
AsyncFileEIO.actor.h
|
||||
AsyncFileKAIO.actor.h
|
||||
AsyncFileNonDurable.actor.h
|
||||
AsyncFileReadAhead.actor.h
|
||||
AsyncFileWinASIO.actor.h
|
||||
AsyncFileCached.actor.cpp
|
||||
AsyncFileNonDurable.actor.cpp
|
||||
AsyncFileWriteChecker.cpp
|
||||
batcher.actor.h
|
||||
crc32c.cpp
|
||||
dsltest.actor.cpp
|
||||
FailureMonitor.actor.cpp
|
||||
FlowTests.actor.cpp
|
||||
FlowTransport.actor.cpp
|
||||
genericactors.actor.h
|
||||
genericactors.actor.cpp
|
||||
IAsyncFile.actor.cpp
|
||||
LoadBalance.actor.h
|
||||
Locality.cpp
|
||||
Net2FileSystem.cpp
|
||||
networksender.actor.h
|
||||
Platform.cpp
|
||||
QueueModel.cpp
|
||||
ReplicationPolicy.cpp
|
||||
ReplicationTypes.cpp
|
||||
ReplicationUtils.cpp
|
||||
sim2.actor.cpp
|
||||
sim_validation.cpp
|
||||
TLSConnection.actor.cpp
|
||||
TraceFileIO.cpp
|
||||
# C files
|
||||
libcoroutine/Common.c
|
||||
libcoroutine/context.c
|
||||
libcoroutine/Coro.c
|
||||
libeio/eio.c
|
||||
zlib/adler32.c
|
||||
zlib/crc32.c
|
||||
zlib/deflate.c
|
||||
zlib/gzclose.c
|
||||
zlib/gzlib.c
|
||||
zlib/gzread.c
|
||||
zlib/gzwrite.c
|
||||
zlib/infback.c
|
||||
zlib/inffast.c
|
||||
zlib/inflate.c
|
||||
zlib/inftrees.c
|
||||
zlib/trees.c
|
||||
zlib/zutil.c)
|
||||
|
||||
if(APPLE)
|
||||
list(APPEND FDBRPC_SRCS libcoroutine/asm.S libcoroutine/context.c)
|
||||
endif()
|
||||
|
||||
actor_set(FDBRPC_BUILD "${FDBRPC_SRCS}")
|
||||
add_library(fdbrpc STATIC ${FDBRPC_BUILD})
|
||||
actor_compile(fdbrpc "${FDBRPC_SRCS}")
|
||||
target_include_directories(fdbrpc PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/libeio)
|
||||
target_link_libraries(fdbrpc PUBLIC flow)
|
|
@ -26,6 +26,7 @@
|
|||
#include "flow/IRandom.h"
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
|
||||
template <class T>
|
||||
class ContinuousSample {
|
||||
|
|
|
@ -1,74 +0,0 @@
|
|||
/*
|
||||
* EndpointGroup.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FLOW_ENDPOINT_GROUP_H
|
||||
#define FLOW_ENDPOINT_GROUP_H
|
||||
#pragma once
|
||||
|
||||
#include "fdbrpc/flow.h"
|
||||
|
||||
// EndpointGroup makes it easier to implement backward compatibility for interface serialization
|
||||
// It also provides a central place to implement more compact serialization for a group of related endpoints in the future.
|
||||
|
||||
/* Typical usage:
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
auto endpoints = endpointGroup(ar);
|
||||
endpoints.require( ar.protocolVersion() <= currentProtocolVersion );
|
||||
endpoints & apple & banana;
|
||||
endpoints.require( ar.protocolVersion() >= 0xabc ); // Following endpoints added in this version
|
||||
endpoints & cherry;
|
||||
endpoints.require( ar.protocolVersion() >= 0xdef ); // .. and then some more were added
|
||||
endpoints & date;
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
|
||||
template <class Ar>
|
||||
struct EndpointGroup : NonCopyable {
|
||||
Ar& ar;
|
||||
bool enabled;
|
||||
|
||||
explicit EndpointGroup( Ar& ar ) : ar(ar), enabled(true) {
|
||||
ASSERT( ar.protocolVersion() != 0 );
|
||||
}
|
||||
EndpointGroup( EndpointGroup&& g ) : ar(g.ar), enabled(g.enabled) {}
|
||||
|
||||
EndpointGroup& require( bool condition ) {
|
||||
enabled = enabled && condition;
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
EndpointGroup& operator & (PromiseStream<T>& stream) {
|
||||
if (enabled)
|
||||
ar & stream;
|
||||
else if (Ar::isDeserializing)
|
||||
stream.sendError( incompatible_protocol_version() );
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
template <class Ar>
|
||||
EndpointGroup<Ar> endpointGroup( Ar& ar ) { return EndpointGroup<Ar>(ar); }
|
||||
|
||||
#endif
|
|
@ -74,7 +74,7 @@ struct FailureStatus {
|
|||
bool operator != (FailureStatus const& r) const { return failed != r.failed; }
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & failed;
|
||||
serializer(ar, failed);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -71,7 +71,7 @@ struct LoadBalancedReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar &ar) {
|
||||
ar & penalty;
|
||||
serializer(ar, penalty);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -110,7 +110,7 @@ public:
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & _class & _source;
|
||||
serializer(ar, _class, _source);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -160,10 +160,13 @@ public:
|
|||
|
||||
std::string describeZone() const { return describeValue(keyZoneId); }
|
||||
std::string describeDataHall() const { return describeValue(keyDataHallId); }
|
||||
std::string describeDcId() const { return describeValue(keyDcId); }
|
||||
std::string describeMachineId() const { return describeValue(keyMachineId); }
|
||||
std::string describeProcessId() const { return describeValue(keyProcessId); }
|
||||
|
||||
Optional<Standalone<StringRef>> processId() const { return get(keyProcessId); }
|
||||
Optional<Standalone<StringRef>> zoneId() const { return get(keyZoneId); }
|
||||
Optional<Standalone<StringRef>> machineId() const { return get(keyMachineId); }
|
||||
Optional<Standalone<StringRef>> machineId() const { return get(keyMachineId); } // default is ""
|
||||
Optional<Standalone<StringRef>> dcId() const { return get(keyDcId); }
|
||||
Optional<Standalone<StringRef>> dataHallId() const { return get(keyDataHallId); }
|
||||
|
||||
|
@ -185,10 +188,10 @@ public:
|
|||
Standalone<StringRef> key;
|
||||
Optional<Standalone<StringRef>> value;
|
||||
uint64_t mapSize = (uint64_t)_data.size();
|
||||
ar & mapSize;
|
||||
serializer(ar, mapSize);
|
||||
if (ar.isDeserializing) {
|
||||
for (size_t i = 0; i < mapSize; i++) {
|
||||
ar & key & value;
|
||||
serializer(ar, key, value);
|
||||
_data[key] = value;
|
||||
}
|
||||
}
|
||||
|
@ -196,24 +199,24 @@ public:
|
|||
for (auto it = _data.begin(); it != _data.end(); it++) {
|
||||
key = it->first;
|
||||
value = it->second;
|
||||
ar & key & value;
|
||||
serializer(ar, key, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
ASSERT(ar.isDeserializing);
|
||||
UID zoneId, dcId, processId;
|
||||
ar & zoneId & dcId;
|
||||
serializer(ar, zoneId, dcId);
|
||||
set(keyZoneId, Standalone<StringRef>(zoneId.toString()));
|
||||
set(keyDcId, Standalone<StringRef>(dcId.toString()));
|
||||
|
||||
if (ar.protocolVersion() >= 0x0FDB00A340000001LL) {
|
||||
ar & processId;
|
||||
serializer(ar, processId);
|
||||
set(keyProcessId, Standalone<StringRef>(processId.toString()));
|
||||
}
|
||||
else {
|
||||
int _machineClass = ProcessClass::UnsetClass;
|
||||
ar & _machineClass;
|
||||
serializer(ar, _machineClass);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -255,7 +258,7 @@ struct ProcessData {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & locality & processClass & address;
|
||||
serializer(ar, locality, processClass, address);
|
||||
}
|
||||
|
||||
struct sort_by_address {
|
||||
|
|
|
@ -43,7 +43,7 @@ struct PerfMetric {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & m_name & m_format_code & m_value & m_averaged;
|
||||
serializer(ar, m_name, m_format_code, m_value, m_averaged);
|
||||
}
|
||||
|
||||
private:
|
||||
|
|
|
@ -101,8 +101,9 @@ int eraseDirectoryRecursive(std::string const& dir) {
|
|||
the directory we're deleting doesn't exist in the first
|
||||
place */
|
||||
if (error && errno != ENOENT) {
|
||||
TraceEvent(SevError, "EraseDirectoryRecursiveError").detail("Directory", dir).GetLastError();
|
||||
throw platform_error();
|
||||
Error e = systemErrorCodeToError();
|
||||
TraceEvent(SevError, "EraseDirectoryRecursiveError").detail("Directory", dir).GetLastError().error(e);
|
||||
throw e;
|
||||
}
|
||||
#else
|
||||
#error Port me!
|
||||
|
|
|
@ -102,6 +102,11 @@ public:
|
|||
return _localitygroup->getRecord(getEntry(localIndex)._id);
|
||||
}
|
||||
|
||||
// Return record array to help debug the locality information for servers
|
||||
virtual std::vector<Reference<LocalityRecord>> const& getRecordArray() const {
|
||||
return _localitygroup->getRecordArray();
|
||||
}
|
||||
|
||||
Reference<LocalityRecord> const& getRecordViaEntry(LocalityEntry localEntry) const {
|
||||
return _localitygroup->getRecord(localEntry._id);
|
||||
}
|
||||
|
@ -167,6 +172,8 @@ public:
|
|||
|
||||
// This function is used to create an subset containing all of the entries within
|
||||
// the specified value for the given key
|
||||
// The returned LocalitySet contains the LocalityRecords that have the same value as
|
||||
// the indexValue under the same indexKey (e.g., zoneid)
|
||||
LocalitySetRef restrict(AttribKey indexKey, AttribValue indexValue ) {
|
||||
LocalitySetRef localitySet;
|
||||
LocalityCacheRecord searchRecord(AttribRecord(indexKey, indexValue), localitySet);
|
||||
|
@ -497,6 +504,7 @@ struct LocalityGroup : public LocalitySet {
|
|||
virtual ~LocalityGroup() { }
|
||||
|
||||
LocalityEntry const& add(LocalityData const& data) {
|
||||
// _recordArray.size() is the new entry index for the new data
|
||||
Reference<LocalityRecord> record(new LocalityRecord(convertToAttribMap(data), _recordArray.size()));
|
||||
_recordArray.push_back(record);
|
||||
return LocalitySet::add(record, *this);
|
||||
|
@ -527,6 +535,9 @@ struct LocalityGroup : public LocalitySet {
|
|||
return _recordArray[recordIndex];
|
||||
}
|
||||
|
||||
// Get the locality info for debug purpose
|
||||
virtual std::vector<Reference<LocalityRecord>> const& getRecordArray() const { return _recordArray; }
|
||||
|
||||
virtual int getMemoryUsed() const {
|
||||
int memorySize = sizeof(_recordArray) + _keymap->getMemoryUsed();
|
||||
for (auto& record : _recordArray) {
|
||||
|
|
|
@ -147,15 +147,53 @@ PolicyAcross::~PolicyAcross()
|
|||
return;
|
||||
}
|
||||
|
||||
// Debug purpose only
|
||||
// Trace all record entries to help debug
|
||||
// fromServers is the servers locality to be printed out.
|
||||
void IReplicationPolicy::traceLocalityRecords(LocalitySetRef const& fromServers) {
|
||||
std::vector<Reference<LocalityRecord>> const& recordArray = fromServers->getRecordArray();
|
||||
TraceEvent("LocalityRecordArray").detail("Size", recordArray.size());
|
||||
for (auto& record : recordArray) {
|
||||
traceOneLocalityRecord(record, fromServers);
|
||||
}
|
||||
}
|
||||
|
||||
void IReplicationPolicy::traceOneLocalityRecord(Reference<LocalityRecord> record, LocalitySetRef const& fromServers) {
|
||||
int localityEntryIndex = record->_entryIndex._id;
|
||||
Reference<KeyValueMap> const& dataMap = record->_dataMap;
|
||||
std::vector<AttribRecord> const& keyValueArray = dataMap->_keyvaluearray;
|
||||
|
||||
TraceEvent("LocalityRecordInfo")
|
||||
.detail("EntryIndex", localityEntryIndex)
|
||||
.detail("KeyValueArraySize", keyValueArray.size());
|
||||
for (int i = 0; i < keyValueArray.size(); ++i) {
|
||||
AttribRecord attribRecord = keyValueArray[i]; // first is key, second is value
|
||||
TraceEvent("LocalityRecordInfo")
|
||||
.detail("EntryIndex", localityEntryIndex)
|
||||
.detail("ArrayIndex", i)
|
||||
.detail("Key", attribRecord.first._id)
|
||||
.detail("Value", attribRecord.second._id)
|
||||
.detail("KeyName", fromServers->keyText(attribRecord.first))
|
||||
.detail("ValueName", fromServers->valueText(attribRecord.second));
|
||||
}
|
||||
}
|
||||
|
||||
// Validate if the team satisfies the replication policy
|
||||
// LocalitySet is the base class about the locality information
|
||||
// solutionSet is the team to be validated
|
||||
// fromServers is the location information of all servers
|
||||
// return true if the team satisfies the policy; false otherwise
|
||||
bool PolicyAcross::validate(
|
||||
std::vector<LocalityEntry> const& solutionSet,
|
||||
LocalitySetRef const& fromServers ) const
|
||||
{
|
||||
bool valid = true;
|
||||
int count = 0;
|
||||
// Get the indexKey from the policy name (e.g., zoneid) in _attribKey
|
||||
AttribKey indexKey = fromServers->keyIndex(_attribKey);
|
||||
auto groupIndexKey = fromServers->getGroupKeyIndex(indexKey);
|
||||
std::map<AttribValue, std::vector<LocalityEntry>> validMap;
|
||||
|
||||
for (auto& item : solutionSet) {
|
||||
auto value = fromServers->getValueViaGroupKey(item, groupIndexKey);
|
||||
if (value.present()) {
|
||||
|
@ -182,9 +220,14 @@ bool PolicyAcross::validate(
|
|||
}
|
||||
}
|
||||
for (auto& itValid : validMap) {
|
||||
// itValid.second is the vector of LocalityEntries that belong to the same locality
|
||||
if (_policy->validate(itValid.second, fromServers)) {
|
||||
if (g_replicationdebug > 4) {
|
||||
printf("Across valid solution: %6lu key: %-7s count:%3d of%3d value: (%3d) %-10s policy: %-10s => %s\n", itValid.second.size(), _attribKey.c_str(), count+1, _count, itValid.first._id, fromServers->valueText(itValid.first).c_str(), _policy->name().c_str(), _policy->info().c_str());
|
||||
printf("Across valid solution: %6lu key: %-7s count:%3d of%3d value: (%3d) %-10s policy: %-10s => "
|
||||
"%s\n",
|
||||
itValid.second.size(), _attribKey.c_str(), count + 1, _count, itValid.first._id,
|
||||
fromServers->valueText(itValid.first).c_str(), _policy->name().c_str(),
|
||||
_policy->info().c_str());
|
||||
if (g_replicationdebug > 5) {
|
||||
for (auto& entry : itValid.second) {
|
||||
printf(" entry: %s\n", fromServers->getEntryInfo(entry).c_str());
|
||||
|
@ -192,8 +235,7 @@ bool PolicyAcross::validate(
|
|||
}
|
||||
}
|
||||
count ++;
|
||||
}
|
||||
else if (g_replicationdebug > 4) {
|
||||
} else if (g_replicationdebug > 4) {
|
||||
printf("Across invalid solution:%5lu key: %-7s value: (%3d) %-10s policy: %-10s => %s\n", itValid.second.size(), _attribKey.c_str(), itValid.first._id, fromServers->valueText(itValid.first).c_str(), _policy->name().c_str(), _policy->info().c_str());
|
||||
if (g_replicationdebug > 5) {
|
||||
for (auto& entry : itValid.second) {
|
||||
|
@ -215,6 +257,10 @@ bool PolicyAcross::validate(
|
|||
return valid;
|
||||
}
|
||||
|
||||
// Choose new servers from "least utilized" alsoServers and append the new servers to results
|
||||
// fromserverse are the servers that have already been chosen and
|
||||
// that should be excluded from being selected as replicas.
|
||||
// FIXME: Simplify this function, such as removing unnecessary printf
|
||||
bool PolicyAcross::selectReplicas(
|
||||
LocalitySetRef & fromServers,
|
||||
std::vector<LocalityEntry> const& alsoServers,
|
||||
|
@ -239,11 +285,15 @@ bool PolicyAcross::selectReplicas(
|
|||
if (value.present()) {
|
||||
auto lowerBound = std::lower_bound(_usedValues.begin(), _usedValues.end(), value.get());
|
||||
if ((lowerBound == _usedValues.end()) || (*lowerBound != value.get())) {
|
||||
//_selected is a set of processes that have the same indexKey and indexValue (value)
|
||||
_selected = fromServers->restrict(indexKey, value.get());
|
||||
if (_selected->size()) {
|
||||
// Pass only the also array item which are valid for the value
|
||||
if (g_replicationdebug > 5) {
|
||||
printf("Across !select key: %-7s value: (%3d) %-10s entry: %s\n", _attribKey.c_str(), value.get()._id, fromServers->valueText(value.get()).c_str(), fromServers->getEntryInfo(alsoServer).c_str());
|
||||
// entry is the locality entry info (entryValue) from the to-be-selected team member alsoServer
|
||||
printf("Across !select key: %-7s value: (%3d) %-10s entry: %s\n", _attribKey.c_str(),
|
||||
value.get()._id, fromServers->valueText(value.get()).c_str(),
|
||||
fromServers->getEntryInfo(alsoServer).c_str());
|
||||
}
|
||||
resultsSize = _newResults.size();
|
||||
if (_policy->selectReplicas(_selected, alsoServers, _newResults))
|
||||
|
@ -256,7 +306,10 @@ bool PolicyAcross::selectReplicas(
|
|||
_addedResults.push_back(_arena, std::pair<int, int>(resultsAdded, resultsSize));
|
||||
}
|
||||
if (g_replicationdebug > 5) {
|
||||
printf("Across !added:%3d key: %-7s count:%3d of%3d value: (%3d) %-10s entry: %s\n", resultsAdded, _attribKey.c_str(), count, _count, value.get()._id, fromServers->valueText(value.get()).c_str(), fromServers->getEntryInfo(alsoServer).c_str());
|
||||
printf("Across !added:%3d key: %-7s count:%3d of%3d value: (%3d) %-10s entry: %s\n",
|
||||
resultsAdded, _attribKey.c_str(), count, _count, value.get()._id,
|
||||
fromServers->valueText(value.get()).c_str(),
|
||||
fromServers->getEntryInfo(alsoServer).c_str());
|
||||
}
|
||||
if (count >= _count) break;
|
||||
_usedValues.insert(lowerBound, value.get());
|
||||
|
@ -308,6 +361,7 @@ bool PolicyAcross::selectReplicas(
|
|||
}
|
||||
}
|
||||
|
||||
// Cannot find replica from the least used alsoServers, now try to find replicas from all servers
|
||||
// Process the remaining values
|
||||
if (count < _count) {
|
||||
if (g_replicationdebug > 3) {
|
||||
|
@ -329,12 +383,18 @@ bool PolicyAcross::selectReplicas(
|
|||
_selected = fromServers->restrict(indexKey, value.get());
|
||||
if (_selected->size()) {
|
||||
if (g_replicationdebug > 5) {
|
||||
printf("Across select:%3d key: %-7s value: (%3d) %-10s entry: %s index:%4d\n", fromServers->size()-checksLeft+1, _attribKey.c_str(), value.get()._id, fromServers->valueText(value.get()).c_str(), fromServers->getEntryInfo(entry).c_str(), recordIndex);
|
||||
printf("Across select:%3d key: %-7s value: (%3d) %-10s entry: %s index:%4d\n",
|
||||
fromServers->size() - checksLeft + 1, _attribKey.c_str(), value.get()._id,
|
||||
fromServers->valueText(value.get()).c_str(),
|
||||
fromServers->getEntryInfo(entry).c_str(), recordIndex);
|
||||
}
|
||||
if (_policy->selectReplicas(_selected, emptyEntryArray, results))
|
||||
{
|
||||
if (g_replicationdebug > 5) {
|
||||
printf("Across added:%4d key: %-7s value: (%3d) %-10s policy: %-10s => %s needed:%3d\n", count+1, _attribKey.c_str(), value.get()._id, fromServers->valueText(value.get()).c_str(), _policy->name().c_str(), _policy->info().c_str(), _count);
|
||||
printf("Across added:%4d key: %-7s value: (%3d) %-10s policy: %-10s => %s needed:%3d\n",
|
||||
count + 1, _attribKey.c_str(), value.get()._id,
|
||||
fromServers->valueText(value.get()).c_str(), _policy->name().c_str(),
|
||||
_policy->info().c_str(), _count);
|
||||
}
|
||||
count ++;
|
||||
if (count >= _count) break;
|
||||
|
|
|
@ -43,6 +43,8 @@ struct IReplicationPolicy : public ReferenceCounted<IReplicationPolicy> {
|
|||
LocalitySetRef & fromServers,
|
||||
std::vector<LocalityEntry> const& alsoServers,
|
||||
std::vector<LocalityEntry> & results ) = 0;
|
||||
virtual void traceLocalityRecords(LocalitySetRef const& fromServers);
|
||||
virtual void traceOneLocalityRecord(Reference<LocalityRecord> record, LocalitySetRef const& fromServers);
|
||||
virtual bool validate(
|
||||
std::vector<LocalityEntry> const& solutionSet,
|
||||
LocalitySetRef const& fromServers ) const = 0;
|
||||
|
@ -134,7 +136,7 @@ struct PolicyAcross : IReplicationPolicy, public ReferenceCounted<PolicyAcross>
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & _attribKey & _count;
|
||||
serializer(ar, _attribKey, _count);
|
||||
serializeReplicationPolicy(ar, _policy);
|
||||
}
|
||||
|
||||
|
@ -205,7 +207,7 @@ struct PolicyAnd : IReplicationPolicy, public ReferenceCounted<PolicyAnd> {
|
|||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
int count = _policies.size();
|
||||
ar & count;
|
||||
serializer(ar, count);
|
||||
_policies.resize(count);
|
||||
for(int i = 0; i < count; i++) {
|
||||
serializeReplicationPolicy(ar, _policies[i]);
|
||||
|
@ -231,7 +233,7 @@ template <class Ar>
|
|||
void serializeReplicationPolicy(Ar& ar, IRepPolicyRef& policy) {
|
||||
if(Ar::isDeserializing) {
|
||||
StringRef name;
|
||||
ar & name;
|
||||
serializer(ar, name);
|
||||
|
||||
if(name == LiteralStringRef("One")) {
|
||||
PolicyOne* pointer = new PolicyOne();
|
||||
|
@ -259,7 +261,7 @@ void serializeReplicationPolicy(Ar& ar, IRepPolicyRef& policy) {
|
|||
else {
|
||||
std::string name = policy ? policy->name() : "None";
|
||||
Standalone<StringRef> nameRef = StringRef(name);
|
||||
ar & nameRef;
|
||||
serializer(ar, nameRef);
|
||||
if(name == "One") {
|
||||
((PolicyOne*)policy.getPtr())->serialize(ar);
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#define FLOW_REPLICATION_TYPES_H
|
||||
#pragma once
|
||||
|
||||
#include <sstream>
|
||||
#include "flow/flow.h"
|
||||
#include "fdbrpc/Locality.h"
|
||||
|
||||
|
@ -140,6 +141,18 @@ struct LocalityRecord : public ReferenceCounted<LocalityRecord> {
|
|||
int getMemoryUsed() const {
|
||||
return sizeof(_entryIndex) + sizeof(_dataMap) + _dataMap->getMemoryUsed();
|
||||
}
|
||||
|
||||
std::string toString() {
|
||||
std::stringstream ss;
|
||||
ss << "KeyValueArraySize:" << _dataMap->_keyvaluearray.size();
|
||||
for (int i = 0; i < _dataMap->size(); ++i) {
|
||||
AttribRecord attribRecord = _dataMap->_keyvaluearray[i]; // first is key, second is value
|
||||
ss << " KeyValueArrayIndex:" << i << " Key:" << attribRecord.first._id <<
|
||||
" Value:" << attribRecord.second._id;
|
||||
}
|
||||
|
||||
return ss.str();
|
||||
}
|
||||
};
|
||||
|
||||
// This class stores the information for string to integer map for keys and values
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#pragma once
|
||||
|
||||
#include "flow/flow.h"
|
||||
#include <cmath>
|
||||
|
||||
struct Smoother {
|
||||
// Times (t) are expected to be nondecreasing
|
||||
|
|
|
@ -46,10 +46,10 @@ static int send_func(void* ctx, const uint8_t* buf, int len) {
|
|||
int w = conn->conn->write( &sb );
|
||||
return w;
|
||||
} catch ( Error& e ) {
|
||||
TraceEvent("TLSConnectionSendError", conn->getDebugID()).error(e);
|
||||
TraceEvent("TLSConnectionSendError", conn->getDebugID()).error(e).suppressFor(1.0);
|
||||
return -1;
|
||||
} catch ( ... ) {
|
||||
TraceEvent("TLSConnectionSendError", conn->getDebugID()).error( unknown_error() );
|
||||
TraceEvent("TLSConnectionSendError", conn->getDebugID()).error( unknown_error() ).suppressFor(1.0);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
@ -62,10 +62,10 @@ static int recv_func(void* ctx, uint8_t* buf, int len) {
|
|||
int r = conn->conn->read( buf, buf + len );
|
||||
return r;
|
||||
} catch ( Error& e ) {
|
||||
TraceEvent("TLSConnectionRecvError", conn->getDebugID()).error(e);
|
||||
TraceEvent("TLSConnectionRecvError", conn->getDebugID()).error(e).suppressFor(1.0);
|
||||
return -1;
|
||||
} catch ( ... ) {
|
||||
TraceEvent("TLSConnectionRecvError", conn->getDebugID()).error( unknown_error() );
|
||||
TraceEvent("TLSConnectionRecvError", conn->getDebugID()).error( unknown_error() ).suppressFor(1.0);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -925,7 +925,7 @@ struct AddReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & sum;
|
||||
serializer(ar, sum);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -938,7 +938,7 @@ struct AddRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & a & b & reply;
|
||||
serializer(ar, a, b, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -74,7 +74,6 @@
|
|||
<ClInclude Include="AsyncFileWriteChecker.h" />
|
||||
<ClInclude Include="ContinuousSample.h" />
|
||||
<ClInclude Include="crc32c.h" />
|
||||
<ClInclude Include="EndpointGroup.h" />
|
||||
<ClInclude Include="FailureMonitor.h" />
|
||||
<ActorCompiler Include="LoadBalance.actor.h">
|
||||
<EnableCompile>false</EnableCompile>
|
||||
|
|
|
@ -137,7 +137,6 @@
|
|||
<ClInclude Include="Platform.h" />
|
||||
<ClInclude Include="ActorFuzz.h" />
|
||||
<ClInclude Include="ContinuousSample.h" />
|
||||
<ClInclude Include="EndpointGroup.h" />
|
||||
<ClInclude Include="fdbrpc.h" />
|
||||
<ClInclude Include="MultiInterface.h" />
|
||||
<ClInclude Include="PerfMetric.h" />
|
||||
|
|
|
@ -0,0 +1,185 @@
|
|||
set(FDBSERVER_SRCS
|
||||
ApplyMetadataMutation.h
|
||||
ClusterController.actor.cpp
|
||||
ClusterRecruitmentInterface.h
|
||||
ConflictSet.h
|
||||
CoordinatedState.actor.cpp
|
||||
CoordinatedState.h
|
||||
Coordination.actor.cpp
|
||||
CoordinationInterface.h
|
||||
CoroFlow.actor.cpp
|
||||
CoroFlow.h
|
||||
DataDistribution.actor.cpp
|
||||
DataDistribution.h
|
||||
DataDistributionQueue.actor.cpp
|
||||
DataDistributionTracker.actor.cpp
|
||||
DBCoreState.h
|
||||
DiskQueue.actor.cpp
|
||||
fdbserver.actor.cpp
|
||||
IDiskQueue.h
|
||||
IKeyValueStore.h
|
||||
IPager.h
|
||||
IVersionedStore.h
|
||||
IndirectShadowPager.actor.cpp
|
||||
IndirectShadowPager.h
|
||||
KeyValueStoreCompressTestData.actor.cpp
|
||||
KeyValueStoreMemory.actor.cpp
|
||||
KeyValueStoreSQLite.actor.cpp
|
||||
Knobs.cpp
|
||||
Knobs.h
|
||||
LeaderElection.actor.cpp
|
||||
LeaderElection.h
|
||||
LogProtocolMessage.h
|
||||
LogRouter.actor.cpp
|
||||
LogSystem.h
|
||||
LogSystemConfig.h
|
||||
LogSystemDiskQueueAdapter.actor.cpp
|
||||
LogSystemDiskQueueAdapter.h
|
||||
LogSystemPeekCursor.actor.cpp
|
||||
MasterInterface.h
|
||||
MasterProxyServer.actor.cpp
|
||||
masterserver.actor.cpp
|
||||
MemoryPager.actor.cpp
|
||||
MemoryPager.h
|
||||
MoveKeys.actor.cpp
|
||||
MoveKeys.h
|
||||
networktest.actor.cpp
|
||||
NetworkTest.h
|
||||
OldTLogServer.actor.cpp
|
||||
Orderer.actor.h
|
||||
pubsub.actor.cpp
|
||||
pubsub.h
|
||||
QuietDatabase.actor.cpp
|
||||
QuietDatabase.h
|
||||
Ratekeeper.actor.cpp
|
||||
Ratekeeper.h
|
||||
RecoveryState.h
|
||||
Restore.actor.cpp
|
||||
RestoreInterface.h
|
||||
Resolver.actor.cpp
|
||||
ResolverInterface.h
|
||||
ServerDBInfo.h
|
||||
SimulatedCluster.actor.cpp
|
||||
SimulatedCluster.h
|
||||
SkipList.cpp
|
||||
sqlite/btree.h
|
||||
sqlite/hash.h
|
||||
sqlite/sqlite3.h
|
||||
sqlite/sqlite3ext.h
|
||||
sqlite/sqliteInt.h
|
||||
sqlite/sqliteLimit.h
|
||||
sqlite/sqlite3.amalgamation.c
|
||||
Status.actor.cpp
|
||||
Status.h
|
||||
StorageMetrics.actor.h
|
||||
StorageMetrics.h
|
||||
storageserver.actor.cpp
|
||||
TagPartitionedLogSystem.actor.cpp
|
||||
template_fdb.h
|
||||
tester.actor.cpp
|
||||
TesterInterface.h
|
||||
TLogInterface.h
|
||||
TLogServer.actor.cpp
|
||||
VersionedBTree.actor.cpp
|
||||
VFSAsync.cpp
|
||||
WaitFailure.actor.cpp
|
||||
WaitFailure.h
|
||||
worker.actor.cpp
|
||||
WorkerInterface.h
|
||||
workloads/ApiCorrectness.actor.cpp
|
||||
workloads/ApiWorkload.actor.cpp
|
||||
workloads/ApiWorkload.h
|
||||
workloads/AsyncFile.actor.h
|
||||
workloads/AsyncFile.cpp
|
||||
workloads/AsyncFileCorrectness.actor.cpp
|
||||
workloads/AsyncFileRead.actor.cpp
|
||||
workloads/AsyncFileWrite.actor.cpp
|
||||
workloads/AtomicOps.actor.cpp
|
||||
workloads/AtomicOpsApiCorrectness.actor.cpp
|
||||
workloads/AtomicRestore.actor.cpp
|
||||
workloads/AtomicSwitchover.actor.cpp
|
||||
workloads/BackgroundSelectors.actor.cpp
|
||||
workloads/BackupCorrectness.actor.cpp
|
||||
workloads/BackupToDBAbort.actor.cpp
|
||||
workloads/BackupToDBCorrectness.actor.cpp
|
||||
workloads/BackupToDBUpgrade.actor.cpp
|
||||
workloads/BulkLoad.actor.cpp
|
||||
workloads/BulkSetup.actor.h
|
||||
workloads/ChangeConfig.actor.cpp
|
||||
workloads/ClientTransactionProfileCorrectness.actor.cpp
|
||||
workloads/CommitBugCheck.actor.cpp
|
||||
workloads/ConfigureDatabase.actor.cpp
|
||||
workloads/ConflictRange.actor.cpp
|
||||
workloads/ConsistencyCheck.actor.cpp
|
||||
workloads/CpuProfiler.actor.cpp
|
||||
workloads/Cycle.actor.cpp
|
||||
workloads/DDBalance.actor.cpp
|
||||
workloads/DDMetrics.actor.cpp
|
||||
workloads/DiskDurability.actor.cpp
|
||||
workloads/DiskDurabilityTest.actor.cpp
|
||||
workloads/DummyWorkload.actor.cpp
|
||||
workloads/FastTriggeredWatches.actor.cpp
|
||||
workloads/FileSystem.actor.cpp
|
||||
workloads/Fuzz.cpp
|
||||
workloads/FuzzApiCorrectness.actor.cpp
|
||||
workloads/Increment.actor.cpp
|
||||
workloads/IndexScan.actor.cpp
|
||||
workloads/Inventory.actor.cpp
|
||||
workloads/KVStoreTest.actor.cpp
|
||||
workloads/LockDatabase.actor.cpp
|
||||
workloads/LogMetrics.actor.cpp
|
||||
workloads/LowLatency.actor.cpp
|
||||
workloads/MachineAttrition.actor.cpp
|
||||
workloads/MemoryKeyValueStore.cpp
|
||||
workloads/MemoryKeyValueStore.h
|
||||
workloads/MemoryLifetime.actor.cpp
|
||||
workloads/MetricLogging.actor.cpp
|
||||
workloads/Performance.actor.cpp
|
||||
workloads/Ping.actor.cpp
|
||||
workloads/PubSubMultiples.actor.cpp
|
||||
workloads/QueuePush.actor.cpp
|
||||
workloads/RandomClogging.actor.cpp
|
||||
workloads/RandomMoveKeys.actor.cpp
|
||||
workloads/RandomSelector.actor.cpp
|
||||
workloads/ReadWrite.actor.cpp
|
||||
workloads/RemoveServersSafely.actor.cpp
|
||||
workloads/Rollback.actor.cpp
|
||||
workloads/RyowCorrectness.actor.cpp
|
||||
workloads/RYWDisable.actor.cpp
|
||||
workloads/RYWPerformance.actor.cpp
|
||||
workloads/SaveAndKill.actor.cpp
|
||||
workloads/SelectorCorrectness.actor.cpp
|
||||
workloads/Serializability.actor.cpp
|
||||
workloads/Sideband.actor.cpp
|
||||
workloads/SlowTaskWorkload.actor.cpp
|
||||
workloads/StatusWorkload.actor.cpp
|
||||
workloads/Storefront.actor.cpp
|
||||
workloads/StreamingRead.actor.cpp
|
||||
workloads/TargetedKill.actor.cpp
|
||||
workloads/TaskBucketCorrectness.actor.cpp
|
||||
workloads/ThreadSafety.actor.cpp
|
||||
workloads/Throughput.actor.cpp
|
||||
workloads/TimeKeeperCorrectness.actor.cpp
|
||||
workloads/UnitPerf.actor.cpp
|
||||
workloads/UnitTests.actor.cpp
|
||||
workloads/Unreadable.actor.cpp
|
||||
workloads/VersionStamp.actor.cpp
|
||||
workloads/WatchAndWait.actor.cpp
|
||||
workloads/Watches.actor.cpp
|
||||
workloads/WorkerErrors.actor.cpp
|
||||
workloads/workloads.h
|
||||
workloads/WriteBandwidth.actor.cpp
|
||||
workloads/WriteDuringRead.actor.cpp)
|
||||
|
||||
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/workloads)
|
||||
|
||||
actor_set(FDBSERVER_BUILD "${FDBSERVER_SRCS}")
|
||||
add_executable(fdbserver ${FDBSERVER_BUILD})
|
||||
actor_compile(fdbserver "${FDBSERVER_SRCS}")
|
||||
target_include_directories(fdbserver PRIVATE
|
||||
${CMAKE_CURRENT_BINARY_DIR}/workloads
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/workloads)
|
||||
target_link_libraries(fdbserver PRIVATE fdbclient)
|
||||
|
||||
install(TARGETS fdbserver DESTINATION ${FDB_SBIN_DIR} COMPONENT server)
|
||||
|
|
@ -61,7 +61,7 @@ struct ClusterControllerFullInterface {
|
|||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ASSERT( ar.protocolVersion() >= 0x0FDB00A200040001LL );
|
||||
ar & clientInterface & recruitFromConfiguration & recruitRemoteFromConfiguration & recruitStorage & registerWorker & getWorkers & registerMaster & getServerDBInfo;
|
||||
serializer(ar, clientInterface, recruitFromConfiguration, recruitRemoteFromConfiguration, recruitStorage, registerWorker, getWorkers, registerMaster, getServerDBInfo);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -77,7 +77,7 @@ struct RecruitFromConfigurationRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & configuration & recruitSeedServers & maxOldLogRouters & reply;
|
||||
serializer(ar, configuration, recruitSeedServers, maxOldLogRouters, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -95,7 +95,7 @@ struct RecruitFromConfigurationReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & tLogs & satelliteTLogs & proxies & resolvers & storageServers & oldLogRouters & dcId & satelliteFallback;
|
||||
serializer(ar, tLogs, satelliteTLogs, proxies, resolvers, storageServers, oldLogRouters, dcId, satelliteFallback);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -110,7 +110,7 @@ struct RecruitRemoteFromConfigurationRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & configuration & dcId & logRouterCount & reply;
|
||||
serializer(ar, configuration, dcId, logRouterCount, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -120,7 +120,7 @@ struct RecruitRemoteFromConfigurationReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & remoteTLogs & logRouters;
|
||||
serializer(ar, remoteTLogs, logRouters);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -130,7 +130,7 @@ struct RecruitStorageReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & worker & processClass;
|
||||
serializer(ar, worker, processClass);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -143,7 +143,7 @@ struct RecruitStorageRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & excludeMachines & excludeAddresses & includeDCs & criticalRecruitment & reply;
|
||||
serializer(ar, excludeMachines, excludeAddresses, includeDCs, criticalRecruitment, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -156,7 +156,7 @@ struct RegisterWorkerReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & processClass & priorityInfo;
|
||||
serializer(ar, processClass, priorityInfo);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -174,7 +174,7 @@ struct RegisterWorkerRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & wi & initialClass & processClass & priorityInfo & generation & reply;
|
||||
serializer(ar, wi, initialClass, processClass, priorityInfo, generation, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -189,7 +189,7 @@ struct GetWorkersRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & flags & reply;
|
||||
serializer(ar, flags, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -213,7 +213,7 @@ struct RegisterMasterRequest {
|
|||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ASSERT( ar.protocolVersion() >= 0x0FDB00A200040001LL );
|
||||
ar & id & mi & logSystemConfig & proxies & resolvers & recoveryCount & registrationCount & configuration & priorCommittedLogServers & recoveryState & recoveryStalled & reply;
|
||||
serializer(ar, id, mi, logSystemConfig, proxies, resolvers, recoveryCount, registrationCount, configuration, priorCommittedLogServers, recoveryState, recoveryStalled, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -225,7 +225,7 @@ struct GetServerDBInfoRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & knownServerInfoID & issues & incompatiblePeers & reply;
|
||||
serializer(ar, knownServerInfoID, issues, incompatiblePeers, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -208,7 +208,7 @@ struct MovableValue {
|
|||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ASSERT( ar.protocolVersion() >= 0x0FDB00A2000D0001LL );
|
||||
ar & value & mode & other;
|
||||
serializer(ar, value, mode, other);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ struct GenerationRegVal {
|
|||
Optional<Value> val;
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & readGen & writeGen & val;
|
||||
serializer(ar, readGen, writeGen, val);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ struct UniqueGeneration {
|
|||
}
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & generation & uid;
|
||||
serializer(ar, generation, uid);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -78,7 +78,7 @@ struct GenerationRegReadRequest {
|
|||
GenerationRegReadRequest( Key key, UniqueGeneration gen ) : key(key), gen(gen) {}
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & key & gen & reply;
|
||||
serializer(ar, key, gen, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -89,7 +89,7 @@ struct GenerationRegReadReply {
|
|||
GenerationRegReadReply( Optional<Value> value, UniqueGeneration gen, UniqueGeneration rgen ) : value(value), gen(gen), rgen(rgen) {}
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & value & gen & rgen;
|
||||
serializer(ar, value, gen, rgen);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -101,7 +101,7 @@ struct GenerationRegWriteRequest {
|
|||
GenerationRegWriteRequest(KeyValue kv, UniqueGeneration gen) : kv(kv), gen(gen) {}
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & kv & gen & reply;
|
||||
serializer(ar, kv, gen, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -126,7 +126,7 @@ struct CandidacyRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & key & myInfo & knownLeader & prevChangeID & reply;
|
||||
serializer(ar, key, myInfo, knownLeader, prevChangeID, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -141,7 +141,7 @@ struct LeaderHeartbeatRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & key & myInfo & prevChangeID & reply;
|
||||
serializer(ar, key, myInfo, prevChangeID, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -155,7 +155,7 @@ struct ForwardRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & key & conn & reply;
|
||||
serializer(ar, key, conn, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ struct CoreTLogSet {
|
|||
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar) {
|
||||
ar & tLogs & tLogWriteAntiQuorum & tLogReplicationFactor & tLogPolicy & tLogLocalities & isLocal & locality & startVersion & satelliteTagLocations;
|
||||
serializer(ar, tLogs, tLogWriteAntiQuorum, tLogReplicationFactor, tLogPolicy, tLogLocalities, isLocal, locality, startVersion, satelliteTagLocations);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -72,11 +72,11 @@ struct OldTLogCoreData {
|
|||
template <class Archive>
|
||||
void serialize(Archive& ar) {
|
||||
if( ar.protocolVersion() >= 0x0FDB00A560010001LL) {
|
||||
ar & tLogs & logRouterTags & epochEnd;
|
||||
serializer(ar, tLogs, logRouterTags, epochEnd);
|
||||
}
|
||||
else if(ar.isDeserializing) {
|
||||
tLogs.push_back(CoreTLogSet());
|
||||
ar & tLogs[0].tLogs & tLogs[0].tLogWriteAntiQuorum & tLogs[0].tLogReplicationFactor & tLogs[0].tLogPolicy & epochEnd & tLogs[0].tLogLocalities;
|
||||
serializer(ar, tLogs[0].tLogs, tLogs[0].tLogWriteAntiQuorum, tLogs[0].tLogReplicationFactor, tLogs[0].tLogPolicy, epochEnd, tLogs[0].tLogLocalities);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -122,18 +122,18 @@ struct DBCoreState {
|
|||
|
||||
ASSERT(ar.protocolVersion() >= 0x0FDB00A460010001LL);
|
||||
if(ar.protocolVersion() >= 0x0FDB00A560010001LL) {
|
||||
ar & tLogs & logRouterTags & oldTLogData & recoveryCount & logSystemType;
|
||||
serializer(ar, tLogs, logRouterTags, oldTLogData, recoveryCount, logSystemType);
|
||||
} else if(ar.isDeserializing) {
|
||||
tLogs.push_back(CoreTLogSet());
|
||||
ar & tLogs[0].tLogs & tLogs[0].tLogWriteAntiQuorum & recoveryCount & tLogs[0].tLogReplicationFactor & logSystemType;
|
||||
serializer(ar, tLogs[0].tLogs, tLogs[0].tLogWriteAntiQuorum, recoveryCount, tLogs[0].tLogReplicationFactor, logSystemType);
|
||||
|
||||
uint64_t tLocalitySize = (uint64_t)tLogs[0].tLogLocalities.size();
|
||||
ar & oldTLogData & tLogs[0].tLogPolicy & tLocalitySize;
|
||||
serializer(ar, oldTLogData, tLogs[0].tLogPolicy, tLocalitySize);
|
||||
if (ar.isDeserializing) {
|
||||
tLogs[0].tLogLocalities.reserve(tLocalitySize);
|
||||
for (size_t i = 0; i < tLocalitySize; i++) {
|
||||
LocalityData locality;
|
||||
ar & locality;
|
||||
serializer(ar, locality);
|
||||
tLogs[0].tLogLocalities.push_back(locality);
|
||||
}
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -881,6 +881,7 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
|||
ASSERT( rd.src.size() );
|
||||
loop {
|
||||
state int stuckCount = 0;
|
||||
// state int bestTeamStuckThreshold = 50;
|
||||
loop {
|
||||
state int tciIndex = 0;
|
||||
state bool foundTeams = true;
|
||||
|
@ -897,6 +898,8 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
|||
req.sources = rd.src;
|
||||
req.completeSources = rd.completeSources;
|
||||
Optional<Reference<IDataDistributionTeam>> bestTeam = wait(brokenPromiseToNever(self->teamCollections[tciIndex].getTeam.getReply(req)));
|
||||
// If a DC has no healthy team, we stop checking the other DCs until
|
||||
// the unhealthy DC is healthy again or is excluded.
|
||||
if(!bestTeam.present()) {
|
||||
foundTeams = false;
|
||||
break;
|
||||
|
@ -922,9 +925,14 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
|||
if (foundTeams && anyHealthy) {
|
||||
break;
|
||||
}
|
||||
|
||||
TEST(true); //did not find a healthy destination team on the first attempt
|
||||
stuckCount++;
|
||||
TraceEvent(stuckCount > 50 ? SevWarnAlways : SevWarn, "BestTeamStuck", masterId).suppressFor(1.0).detail("Count", stuckCount);
|
||||
TraceEvent(stuckCount > 50 ? SevWarnAlways : SevWarn, "BestTeamStuck", masterId)
|
||||
.suppressFor(1.0)
|
||||
.detail("Count", stuckCount)
|
||||
.detail("TeamCollectionId", tciIndex)
|
||||
.detail("NumOfTeamCollections", self->teamCollections.size());
|
||||
wait( delay( SERVER_KNOBS->BEST_TEAM_STUCK_DELAY, TaskDataDistributionLaunch ) );
|
||||
}
|
||||
|
||||
|
@ -936,7 +944,8 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
|||
for(int i = 0; i < bestTeams.size(); i++) {
|
||||
auto& serverIds = bestTeams[i].first->getServerIDs();
|
||||
destinationTeams.push_back(ShardsAffectedByTeamFailure::Team(serverIds, i == 0));
|
||||
if(allHealthy && anyWithSource && !bestTeams[i].second) {
|
||||
if (allHealthy && anyWithSource && !bestTeams[i].second) { // bestTeams[i] is not the source of the
|
||||
// shard
|
||||
int idx = g_random->randomInt(0, serverIds.size());
|
||||
destIds.push_back(serverIds[idx]);
|
||||
healthyIds.push_back(serverIds[idx]);
|
||||
|
@ -955,6 +964,18 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
|||
}
|
||||
}
|
||||
|
||||
// Sanity check
|
||||
state int totalIds = 0;
|
||||
for (auto& destTeam : destinationTeams) {
|
||||
totalIds += destTeam.servers.size();
|
||||
}
|
||||
if (totalIds != self->teamSize) {
|
||||
TraceEvent(SevWarn, "IncorrectDestTeamSize")
|
||||
.suppressFor(1.0)
|
||||
.detail("ExpectedTeamSize", self->teamSize)
|
||||
.detail("DestTeamSize", totalIds);
|
||||
}
|
||||
|
||||
self->shardsAffectedByTeamFailure->moveShard(rd.keys, destinationTeams);
|
||||
|
||||
//FIXME: do not add data in flight to servers that were already in the src.
|
||||
|
@ -977,6 +998,7 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
|||
destIds.insert(destIds.end(), extraIds.begin(), extraIds.end());
|
||||
healthyIds.insert(healthyIds.end(), extraIds.begin(), extraIds.end());
|
||||
extraIds.clear();
|
||||
ASSERT(totalIds == destIds.size()); // Sanity check the destIDs before we move keys
|
||||
doMoveKeys = moveKeys(self->cx, rd.keys, destIds, healthyIds, self->lock, Promise<Void>(), &self->startMoveKeysParallelismLock, &self->finishMoveKeysParallelismLock, self->recoveryVersion, self->teamCollections.size() > 1, relocateShardInterval.pairID );
|
||||
} else {
|
||||
self->fetchKeysComplete.insert( rd );
|
||||
|
|
|
@ -770,6 +770,8 @@ private:
|
|||
uint64_t popped;
|
||||
int payloadSize;
|
||||
};
|
||||
// The on disk format depends on the size of PageHeader.
|
||||
static_assert( sizeof(PageHeader) == 36, "PageHeader must be packed" );
|
||||
|
||||
struct Page : PageHeader {
|
||||
static const int maxPayload = _PAGE_SIZE - sizeof(PageHeader);
|
||||
|
@ -901,7 +903,7 @@ private:
|
|||
|
||||
// Writes go at the end of our reads (but on the next page)
|
||||
self->nextPageSeq = self->nextReadLocation/sizeof(Page)*sizeof(Page);
|
||||
if (self->nextReadLocation % sizeof(Page) > 36) self->nextPageSeq += sizeof(Page);
|
||||
if (self->nextReadLocation % sizeof(Page) > sizeof(PageHeader)) self->nextPageSeq += sizeof(Page);
|
||||
|
||||
TraceEvent("DQRecovered", self->dbgid).detail("LastPoppedSeq", self->lastPoppedSeq).detail("PoppedSeq", self->poppedSeq).detail("NextPageSeq", self->nextPageSeq).detail("File0Name", self->rawQueue->files[0].dbgFilename);
|
||||
self->recovered = true;
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
|
||||
#include "fdbserver/Knobs.h"
|
||||
#include "fdbrpc/Locality.h"
|
||||
#include <cmath>
|
||||
|
||||
ServerKnobs const* SERVER_KNOBS = new ServerKnobs();
|
||||
|
||||
|
@ -377,6 +378,8 @@ ServerKnobs::ServerKnobs(bool randomize, ClientKnobs* clientKnobs) {
|
|||
init( MAX_STORAGE_SERVER_WATCH_BYTES, 100e6 ); if( randomize && BUGGIFY ) MAX_STORAGE_SERVER_WATCH_BYTES = 10e3;
|
||||
init( MAX_BYTE_SAMPLE_CLEAR_MAP_SIZE, 1e9 ); if( randomize && BUGGIFY ) MAX_BYTE_SAMPLE_CLEAR_MAP_SIZE = 1e3;
|
||||
init( LONG_BYTE_SAMPLE_RECOVERY_DELAY, 60.0 );
|
||||
init( BYTE_SAMPLE_LOAD_PARALLELISM, 32 ); if( randomize && BUGGIFY ) BYTE_SAMPLE_LOAD_PARALLELISM = 1;
|
||||
init( BYTE_SAMPLE_LOAD_DELAY, 0.0 ); if( randomize && BUGGIFY ) BYTE_SAMPLE_LOAD_DELAY = 0.1;
|
||||
|
||||
//Wait Failure
|
||||
init( BUGGIFY_OUTSTANDING_WAIT_FAILURE_REQUESTS, 2 );
|
||||
|
|
|
@ -315,6 +315,8 @@ public:
|
|||
int MAX_STORAGE_SERVER_WATCH_BYTES;
|
||||
int MAX_BYTE_SAMPLE_CLEAR_MAP_SIZE;
|
||||
double LONG_BYTE_SAMPLE_RECOVERY_DELAY;
|
||||
int BYTE_SAMPLE_LOAD_PARALLELISM;
|
||||
double BYTE_SAMPLE_LOAD_DELAY;
|
||||
|
||||
//Wait Failure
|
||||
int BUGGIFY_OUTSTANDING_WAIT_FAILURE_REQUESTS;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue