Merge commit '048bfc5c368063d9e009513078dab88be0cbd5b0' into task/tls-upgrade-2
# Conflicts: # .gitignore
This commit is contained in:
commit
1d7fec3074
|
@ -91,4 +91,4 @@ GRTAGS
|
|||
GTAGS
|
||||
GPATH
|
||||
gtags.files
|
||||
ctags.files
|
||||
ctags.files
|
||||
|
|
|
@ -0,0 +1,221 @@
|
|||
#
|
||||
# CMakeLists.txt
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
cmake_minimum_required(VERSION 3.12)
|
||||
project(fdb
|
||||
VERSION 6.1.0
|
||||
DESCRIPTION "FoundationDB is a scalable, fault-tolerant, ordered key-value store with full ACID transactions."
|
||||
HOMEPAGE_URL "http://www.foundationdb.org/"
|
||||
LANGUAGES C CXX ASM Java)
|
||||
|
||||
set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH};${PROJECT_SOURCE_DIR}/cmake")
|
||||
message (STATUS "${PROJECT_SOURCE_DIR} ${PROJECT_BINARY_DIR}")
|
||||
if("${PROJECT_SOURCE_DIR}" STREQUAL "${PROJECT_BINARY_DIR}")
|
||||
message(FATAL_ERROR "In-source builds are forbidden, unsupported, and stupid!!")
|
||||
endif()
|
||||
|
||||
if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
|
||||
message(STATUS "Setting build type to 'Release' as none was specified")
|
||||
set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build" FORCE)
|
||||
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release"
|
||||
"MinSizeRel" "RelWithDebInfo")
|
||||
endif()
|
||||
|
||||
set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/bin)
|
||||
set(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/lib)
|
||||
|
||||
################################################################################
|
||||
# Packages used for bindings
|
||||
################################################################################
|
||||
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
|
||||
|
||||
find_package(PythonInterp 3.4 REQUIRED)
|
||||
set(Python_ADDITIONAL_VERSIONS 3.4 3.5 3.5)
|
||||
find_package(PythonLibs 3.4 REQUIRED)
|
||||
|
||||
|
||||
################################################################################
|
||||
# Compiler configuration
|
||||
################################################################################
|
||||
|
||||
include(ConfigureCompiler)
|
||||
|
||||
################################################################################
|
||||
# Get repository information
|
||||
################################################################################
|
||||
|
||||
add_custom_target(branch_file ALL DEPENDS ${CURR_BRANCH_FILE})
|
||||
execute_process(
|
||||
COMMAND git rev-parse HEAD
|
||||
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
|
||||
OUTPUT_VARIABLE CURRENT_GIT_VERSION_WNL)
|
||||
string(STRIP "${CURRENT_GIT_VERSION_WNL}" CURRENT_GIT_VERSION)
|
||||
message(STATUS "Current git version ${CURRENT_GIT_VERSION}")
|
||||
|
||||
################################################################################
|
||||
# Version information
|
||||
################################################################################
|
||||
|
||||
set(USE_VERSIONS_TARGET OFF CACHE BOOL "Use the deprecated versions.target file")
|
||||
if(USE_VERSIONS_TARGET)
|
||||
add_custom_target(version_file ALL DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/versions.target)
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/build/get_version.sh ${CMAKE_CURRENT_SOURCE_DIR}/versions.target
|
||||
OUTPUT_VARIABLE FDB_VERSION_WNL)
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/build/get_package_name.sh ${CMAKE_CURRENT_SOURCE_DIR}/versions.target
|
||||
OUTPUT_VARIABLE FDB_PACKAGE_NAME_WNL)
|
||||
string(STRIP "${FDB_VERSION_WNL}" FDB_VERSION)
|
||||
string(STRIP "${FDB_PACKAGE_NAME_WNL}" FDB_PACKAGE_NAME)
|
||||
set(FDB_VERSION_PLAIN ${FDB_VERSION})
|
||||
if(NOT FDB_RELEASE)
|
||||
set(FDB_VERSION "${FDB_VERSION}-PRERELEASE")
|
||||
endif()
|
||||
else()
|
||||
set(FDB_PACKAGE_NAME "${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}")
|
||||
set(FDB_VERSION ${PROJECT_VERSION})
|
||||
set(FDB_VERSION_PLAIN ${FDB_VERSION})
|
||||
endif()
|
||||
|
||||
message(STATUS "FDB version is ${FDB_VERSION}")
|
||||
message(STATUS "FDB package name is ${FDB_PACKAGE_NAME}")
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/fdbclient/versions.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/fdbclient/versions.h)
|
||||
|
||||
|
||||
################################################################################
|
||||
# Flow
|
||||
################################################################################
|
||||
|
||||
# First thing we need is the actor compiler - and to compile and run the
|
||||
# actor compiler, we need mono
|
||||
include(CompileActorCompiler)
|
||||
|
||||
# with the actor compiler, we can now make the flow commands available
|
||||
include(FlowCommands)
|
||||
|
||||
################################################################################
|
||||
# Vexilographer
|
||||
################################################################################
|
||||
|
||||
include(CompileVexillographer)
|
||||
|
||||
# This macro can be used to install symlinks, which turns out to be
|
||||
# non-trivial due to CMake version differences and limitations on how
|
||||
# files can be installed when building binary packages.
|
||||
#
|
||||
# The rule for binary packaging is that files (including symlinks) must
|
||||
# be installed with the standard CMake install() macro.
|
||||
#
|
||||
# The rule for non-binary packaging is that CMake 2.6 cannot install()
|
||||
# symlinks, but can create the symlink at install-time via scripting.
|
||||
# Though, we assume that CMake 2.6 isn't going to be used to generate
|
||||
# packages because versions later than 2.8.3 are superior for that purpose.
|
||||
#
|
||||
# _filepath: the absolute path to the file to symlink
|
||||
# _sympath: absolute path of the installed symlink
|
||||
|
||||
macro(InstallSymlink _filepath _sympath)
|
||||
get_filename_component(_symname ${_sympath} NAME)
|
||||
get_filename_component(_installdir ${_sympath} PATH)
|
||||
|
||||
if (BINARY_PACKAGING_MODE)
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" -E create_symlink
|
||||
${_filepath}
|
||||
${CMAKE_CURRENT_BINARY_DIR}/${_symname})
|
||||
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${_symname}
|
||||
DESTINATION ${_installdir}
|
||||
COMPONENT clients)
|
||||
else ()
|
||||
# scripting the symlink installation at install time should work
|
||||
# for CMake 2.6.x and 2.8.x
|
||||
install(CODE "
|
||||
if (\"\$ENV{DESTDIR}\" STREQUAL \"\")
|
||||
execute_process(COMMAND \"${CMAKE_COMMAND}\" -E create_symlink
|
||||
${_filepath}
|
||||
${_installdir}/${_symname})
|
||||
else ()
|
||||
execute_process(COMMAND \"${CMAKE_COMMAND}\" -E create_symlink
|
||||
${_filepath}
|
||||
\$ENV{DESTDIR}/${_installdir}/${_symname})
|
||||
endif ()
|
||||
"
|
||||
COMPONENT clients)
|
||||
endif ()
|
||||
endmacro(InstallSymlink)
|
||||
|
||||
################################################################################
|
||||
# Generate config file
|
||||
################################################################################
|
||||
|
||||
string(RANDOM LENGTH 8 description1)
|
||||
string(RANDOM LENGTH 8 description2)
|
||||
set(CLUSTER_DESCRIPTION1 ${description1} CACHE STRING "Cluster description")
|
||||
set(CLUSTER_DESCRIPTION2 ${description2} CACHE STRING "Cluster description")
|
||||
|
||||
configure_file(fdb.cluster.cmake ${CMAKE_CURRENT_BINARY_DIR}/fdb.cluster)
|
||||
|
||||
|
||||
################################################################################
|
||||
# testing
|
||||
################################################################################
|
||||
enable_testing()
|
||||
|
||||
################################################################################
|
||||
# Directory structure
|
||||
################################################################################
|
||||
|
||||
include(cmake/InstallLayout.cmake)
|
||||
|
||||
################################################################################
|
||||
# Random seed
|
||||
################################################################################
|
||||
|
||||
string(RANDOM LENGTH 8 ALPHABET "0123456789abcdef" SEED_)
|
||||
set(SEED "0x${SEED_}" CACHE STRING "Random seed for testing")
|
||||
|
||||
################################################################################
|
||||
# components
|
||||
################################################################################
|
||||
|
||||
include(CompileBoost)
|
||||
add_subdirectory(flow)
|
||||
add_subdirectory(fdbrpc)
|
||||
add_subdirectory(fdbclient)
|
||||
add_subdirectory(fdbserver)
|
||||
add_subdirectory(fdbcli)
|
||||
add_subdirectory(fdbmonitor)
|
||||
add_subdirectory(bindings)
|
||||
add_subdirectory(fdbbackup)
|
||||
|
||||
include(CPack)
|
||||
|
||||
################################################################################
|
||||
# process compile commands for IDE
|
||||
################################################################################
|
||||
|
||||
if (CMAKE_EXPORT_COMPILE_COMMANDS)
|
||||
add_custom_command(
|
||||
OUTPUT ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json
|
||||
COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/build/gen_compile_db.py
|
||||
ARGS -b ${CMAKE_CURRENT_BINARY_DIR} -s ${CMAKE_CURRENT_SOURCE_DIR} -o ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json
|
||||
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/build/gen_compile_db.py ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json
|
||||
COMMENT "Build compile commands for IDE"
|
||||
)
|
||||
add_custom_target(procossed_compile_commands ALL DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json)
|
||||
endif()
|
7
Makefile
7
Makefile
|
@ -15,13 +15,12 @@ ifeq ($(MONO),)
|
|||
MONO := /usr/bin/mono
|
||||
endif
|
||||
|
||||
DMCS := $(shell which dmcs)
|
||||
MCS := $(shell which mcs)
|
||||
ifneq ($(DMCS),)
|
||||
MCS := $(DMCS)
|
||||
ifeq ($(MCS),)
|
||||
MCS := $(shell which dmcs)
|
||||
endif
|
||||
ifeq ($(MCS),)
|
||||
MCS := /usr/bin/dmcs
|
||||
MCS := /usr/bin/mcs
|
||||
endif
|
||||
|
||||
CFLAGS := -Werror -Wno-error=format -fPIC -DNO_INTELLISENSE -fvisibility=hidden -DNDEBUG=1 -Wreturn-type -fno-omit-frame-pointer
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
add_subdirectory(c)
|
||||
add_subdirectory(python)
|
||||
add_subdirectory(java)
|
|
@ -0,0 +1,53 @@
|
|||
set(FDB_C_SRCS
|
||||
fdb_c.cpp
|
||||
foundationdb/fdb_c.h
|
||||
ThreadCleanup.cpp)
|
||||
|
||||
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/foundationdb)
|
||||
|
||||
set(platform)
|
||||
if(APPLE)
|
||||
set(platform "osx")
|
||||
else()
|
||||
set(platform "linux")
|
||||
endif()
|
||||
|
||||
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.g.S
|
||||
${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h
|
||||
COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/generate_asm.py ${platform}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/fdb_c.cpp
|
||||
${CMAKE_CURRENT_BINARY_DIR}/fdb_c.g.S
|
||||
${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h
|
||||
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/generate_asm.py ${CMAKE_CURRENT_SOURCE_DIR}/fdb_c.cpp
|
||||
COMMENT "Generate C bindings")
|
||||
add_custom_target(fdb_c_generated DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.g.S
|
||||
${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h)
|
||||
|
||||
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_options.g.h
|
||||
COMMAND ${MONO_EXECUTABLE} ${VEXILLOGRAPHER_EXE} ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options c ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_options.g.h
|
||||
DEPENDS ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options vexillographer
|
||||
COMMENT "Generate C options")
|
||||
add_custom_target(fdb_c_options DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_options.g.h)
|
||||
|
||||
include(GenerateExportHeader)
|
||||
|
||||
add_library(fdb_c SHARED ${FDB_C_SRCS} ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.g.S)
|
||||
add_dependencies(fdb_c fdb_c_generated fdb_c_options)
|
||||
target_link_libraries(fdb_c PUBLIC fdbclient)
|
||||
target_include_directories(fdb_c PUBLIC
|
||||
${CMAKE_CURRENT_BINARY_DIR}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}
|
||||
${CMAKE_CURRENT_BINARY_DIR}/foundationdb)
|
||||
# TODO: re-enable once the old vcxproj-based build system is removed.
|
||||
#generate_export_header(fdb_c EXPORT_MACRO_NAME "DLLEXPORT"
|
||||
# EXPORT_FILE_NAME ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_export.h)
|
||||
install(TARGETS fdb_c
|
||||
EXPORT fdbc
|
||||
DESTINATION ${FDB_LIB_DIR}
|
||||
COMPONENT clients)
|
||||
install(
|
||||
FILES foundationdb/fdb_c.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_options.g.h
|
||||
${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options
|
||||
DESTINATION ${FDB_INCLUDE_INSTALL_DIR}/foundationdb COMPONENT clients)
|
||||
#install(EXPORT fdbc DESTINATION ${FDB_LIB_DIR}/foundationdb COMPONENT clients)
|
|
@ -87,7 +87,7 @@ namespace FDB {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & key & orEqual & offset;
|
||||
serializer(ar, key, orEqual, offset);
|
||||
}
|
||||
};
|
||||
inline bool operator == (const KeySelectorRef& lhs, const KeySelectorRef& rhs) { return lhs.key == rhs.key && lhs.orEqual==rhs.orEqual && lhs.offset==rhs.offset; }
|
||||
|
@ -123,7 +123,7 @@ namespace FDB {
|
|||
int expectedSize() const { return key.expectedSize() + value.expectedSize(); }
|
||||
|
||||
template <class Ar>
|
||||
force_inline void serialize(Ar& ar) { ar & key & value; }
|
||||
force_inline void serialize(Ar& ar) { serializer(ar, key, value); }
|
||||
|
||||
struct OrderByKey {
|
||||
bool operator()(KeyValueRef const& a, KeyValueRef const& b) const {
|
||||
|
@ -171,7 +171,7 @@ namespace FDB {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & ((VectorRef<KeyValueRef>&)*this) & more & readThrough & readToBegin & readThroughEnd;
|
||||
serializer(ar, ((VectorRef<KeyValueRef>&)*this), more, readThrough, readToBegin, readThroughEnd);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -234,7 +234,7 @@ namespace FDB {
|
|||
|
||||
template <class Ar>
|
||||
force_inline void serialize(Ar& ar) {
|
||||
ar & const_cast<KeyRef&>(begin) & const_cast<KeyRef&>(end);
|
||||
serializer(ar, const_cast<KeyRef&>(begin), const_cast<KeyRef&>(end));
|
||||
if( begin > end ) {
|
||||
throw inverted_range();
|
||||
};
|
||||
|
|
|
@ -86,7 +86,7 @@ func retryable(wrapped func() (interface{}, error), onError func(Error) FutureNi
|
|||
for {
|
||||
ret, e = wrapped()
|
||||
|
||||
/* No error means success! */
|
||||
// No error means success!
|
||||
if e == nil {
|
||||
return
|
||||
}
|
||||
|
@ -96,8 +96,8 @@ func retryable(wrapped func() (interface{}, error), onError func(Error) FutureNi
|
|||
e = onError(ep).Get()
|
||||
}
|
||||
|
||||
/* If OnError returns an error, then it's not
|
||||
/* retryable; otherwise take another pass at things */
|
||||
// If OnError returns an error, then it's not
|
||||
// retryable; otherwise take another pass at things
|
||||
if e != nil {
|
||||
return
|
||||
}
|
||||
|
@ -125,7 +125,7 @@ func retryable(wrapped func() (interface{}, error), onError func(Error) FutureNi
|
|||
// Transaction and Database objects.
|
||||
func (d Database) Transact(f func(Transaction) (interface{}, error)) (interface{}, error) {
|
||||
tr, e := d.CreateTransaction()
|
||||
/* Any error here is non-retryable */
|
||||
// Any error here is non-retryable
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ func (d Database) Transact(f func(Transaction) (interface{}, error)) (interface{
|
|||
// Transaction, Snapshot and Database objects.
|
||||
func (d Database) ReadTransact(f func(ReadTransaction) (interface{}, error)) (interface{}, error) {
|
||||
tr, e := d.CreateTransaction()
|
||||
/* Any error here is non-retryable */
|
||||
// Any error here is non-retryable
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
|
|
@ -37,9 +37,9 @@ import (
|
|||
"unsafe"
|
||||
)
|
||||
|
||||
/* Would put this in futures.go but for the documented issue with
|
||||
/* exports and functions in preamble
|
||||
/* (https://code.google.com/p/go-wiki/wiki/cgo#Global_functions) */
|
||||
// Would put this in futures.go but for the documented issue with
|
||||
// exports and functions in preamble
|
||||
// (https://code.google.com/p/go-wiki/wiki/cgo#Global_functions)
|
||||
//export unlockMutex
|
||||
func unlockMutex(p unsafe.Pointer) {
|
||||
m := (*sync.Mutex)(p)
|
||||
|
|
|
@ -0,0 +1,144 @@
|
|||
include(UseJava)
|
||||
find_package(JNI 1.8 REQUIRED)
|
||||
find_package(Java 1.8 COMPONENTS Development REQUIRED)
|
||||
|
||||
set(JAVA_BINDING_SRCS
|
||||
src/main/com/apple/foundationdb/async/AsyncIterable.java
|
||||
src/main/com/apple/foundationdb/async/AsyncIterator.java
|
||||
src/main/com/apple/foundationdb/async/AsyncUtil.java
|
||||
src/main/com/apple/foundationdb/async/Cancellable.java
|
||||
src/main/com/apple/foundationdb/async/CloneableException.java
|
||||
src/main/com/apple/foundationdb/async/CloseableAsyncIterator.java
|
||||
src/main/com/apple/foundationdb/async/package-info.java
|
||||
src/main/com/apple/foundationdb/Cluster.java
|
||||
src/main/com/apple/foundationdb/Database.java
|
||||
src/main/com/apple/foundationdb/directory/Directory.java
|
||||
src/main/com/apple/foundationdb/directory/DirectoryAlreadyExistsException.java
|
||||
src/main/com/apple/foundationdb/directory/DirectoryException.java
|
||||
src/main/com/apple/foundationdb/directory/DirectoryLayer.java
|
||||
src/main/com/apple/foundationdb/directory/DirectoryMoveException.java
|
||||
src/main/com/apple/foundationdb/directory/DirectoryPartition.java
|
||||
src/main/com/apple/foundationdb/directory/DirectorySubspace.java
|
||||
src/main/com/apple/foundationdb/directory/DirectoryUtil.java
|
||||
src/main/com/apple/foundationdb/directory/DirectoryVersionException.java
|
||||
src/main/com/apple/foundationdb/directory/MismatchedLayerException.java
|
||||
src/main/com/apple/foundationdb/directory/NoSuchDirectoryException.java
|
||||
src/main/com/apple/foundationdb/directory/package-info.java
|
||||
src/main/com/apple/foundationdb/directory/PathUtil.java
|
||||
src/main/com/apple/foundationdb/FDB.java
|
||||
src/main/com/apple/foundationdb/FDBDatabase.java
|
||||
src/main/com/apple/foundationdb/FDBTransaction.java
|
||||
src/main/com/apple/foundationdb/FutureCluster.java
|
||||
src/main/com/apple/foundationdb/FutureDatabase.java
|
||||
src/main/com/apple/foundationdb/FutureKey.java
|
||||
src/main/com/apple/foundationdb/FutureResult.java
|
||||
src/main/com/apple/foundationdb/FutureResults.java
|
||||
src/main/com/apple/foundationdb/FutureStrings.java
|
||||
src/main/com/apple/foundationdb/FutureVersion.java
|
||||
src/main/com/apple/foundationdb/FutureVoid.java
|
||||
src/main/com/apple/foundationdb/JNIUtil.java
|
||||
src/main/com/apple/foundationdb/KeySelector.java
|
||||
src/main/com/apple/foundationdb/KeyValue.java
|
||||
src/main/com/apple/foundationdb/LocalityUtil.java
|
||||
src/main/com/apple/foundationdb/NativeFuture.java
|
||||
src/main/com/apple/foundationdb/NativeObjectWrapper.java
|
||||
src/main/com/apple/foundationdb/OptionConsumer.java
|
||||
src/main/com/apple/foundationdb/OptionsSet.java
|
||||
src/main/com/apple/foundationdb/package-info.java
|
||||
src/main/com/apple/foundationdb/Range.java
|
||||
src/main/com/apple/foundationdb/RangeQuery.java
|
||||
src/main/com/apple/foundationdb/RangeResult.java
|
||||
src/main/com/apple/foundationdb/RangeResultInfo.java
|
||||
src/main/com/apple/foundationdb/RangeResultSummary.java
|
||||
src/main/com/apple/foundationdb/ReadTransaction.java
|
||||
src/main/com/apple/foundationdb/ReadTransactionContext.java
|
||||
src/main/com/apple/foundationdb/subspace/package-info.java
|
||||
src/main/com/apple/foundationdb/subspace/Subspace.java
|
||||
src/main/com/apple/foundationdb/Transaction.java
|
||||
src/main/com/apple/foundationdb/TransactionContext.java
|
||||
src/main/com/apple/foundationdb/tuple/ByteArrayUtil.java
|
||||
src/main/com/apple/foundationdb/tuple/IterableComparator.java
|
||||
src/main/com/apple/foundationdb/tuple/package-info.java
|
||||
src/main/com/apple/foundationdb/tuple/Tuple.java
|
||||
src/main/com/apple/foundationdb/tuple/TupleUtil.java
|
||||
src/main/com/apple/foundationdb/tuple/Versionstamp.java)
|
||||
|
||||
set(JAVA_TESTS_SRCS
|
||||
src/test/com/apple/foundationdb/test/AbstractTester.java
|
||||
src/test/com/apple/foundationdb/test/AsyncDirectoryExtension.java
|
||||
src/test/com/apple/foundationdb/test/AsyncStackTester.java
|
||||
src/test/com/apple/foundationdb/test/BlockingBenchmark.java
|
||||
src/test/com/apple/foundationdb/test/ConcurrentGetSetGet.java
|
||||
src/test/com/apple/foundationdb/test/Context.java
|
||||
src/test/com/apple/foundationdb/test/ContinuousSample.java
|
||||
src/test/com/apple/foundationdb/test/DirectoryExtension.java
|
||||
src/test/com/apple/foundationdb/test/DirectoryOperation.java
|
||||
src/test/com/apple/foundationdb/test/DirectoryTest.java
|
||||
src/test/com/apple/foundationdb/test/DirectoryUtil.java
|
||||
src/test/com/apple/foundationdb/test/Example.java
|
||||
src/test/com/apple/foundationdb/test/Instruction.java
|
||||
src/test/com/apple/foundationdb/test/IterableTest.java
|
||||
src/test/com/apple/foundationdb/test/LocalityTests.java
|
||||
src/test/com/apple/foundationdb/test/ParallelRandomScan.java
|
||||
src/test/com/apple/foundationdb/test/PerformanceTester.java
|
||||
src/test/com/apple/foundationdb/test/RangeTest.java
|
||||
src/test/com/apple/foundationdb/test/RYWBenchmark.java
|
||||
src/test/com/apple/foundationdb/test/SerialInsertion.java
|
||||
src/test/com/apple/foundationdb/test/SerialIteration.java
|
||||
src/test/com/apple/foundationdb/test/SerialTest.java
|
||||
src/test/com/apple/foundationdb/test/Stack.java
|
||||
src/test/com/apple/foundationdb/test/StackEntry.java
|
||||
src/test/com/apple/foundationdb/test/StackOperation.java
|
||||
src/test/com/apple/foundationdb/test/StackTester.java
|
||||
src/test/com/apple/foundationdb/test/StackUtils.java
|
||||
src/test/com/apple/foundationdb/test/TesterArgs.java
|
||||
src/test/com/apple/foundationdb/test/TestResult.java
|
||||
src/test/com/apple/foundationdb/test/TupleTest.java
|
||||
src/test/com/apple/foundationdb/test/VersionstampSmokeTest.java
|
||||
src/test/com/apple/foundationdb/test/WatchTest.java
|
||||
src/test/com/apple/foundationdb/test/WhileTrueTest.java)
|
||||
|
||||
set(GENERATED_JAVA_DIR ${CMAKE_CURRENT_BINARY_DIR}/src/main/com/foundationdb)
|
||||
file(MAKE_DIRECTORY ${GENERATED_JAVA_DIR})
|
||||
|
||||
set(GENERATED_JAVA_FILES
|
||||
${GENERATED_JAVA_DIR}/ClusterOptions.java
|
||||
${GENERATED_JAVA_DIR}/ConflictRangeType.java
|
||||
${GENERATED_JAVA_DIR}/DatabaseOptions.java
|
||||
${GENERATED_JAVA_DIR}/MutationType.java
|
||||
${GENERATED_JAVA_DIR}/NetworkOptions.java
|
||||
${GENERATED_JAVA_DIR}/StreamingMode.java
|
||||
${GENERATED_JAVA_DIR}/TransactionOptions.java
|
||||
${GENERATED_JAVA_DIR}/FDBException.java)
|
||||
|
||||
add_custom_command(OUTPUT ${GENERATED_JAVA_FILES}
|
||||
COMMAND ${MONO_EXECUTABLE} ${VEXILLOGRAPHER_EXE} ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options java ${GENERATED_JAVA_DIR}
|
||||
DEPENDS ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options vexillographer
|
||||
COMMENT "Generate Java options")
|
||||
add_custom_target(fdb_java_options DEPENDS ${GENERATED_JAVA_DIR}/StreamingMode.java)
|
||||
|
||||
set(SYSTEM_NAME "linux")
|
||||
if (APPLE)
|
||||
set(SYSTEM_NAME "osx")
|
||||
endif()
|
||||
|
||||
add_library(fdb_java SHARED fdbJNI.cpp)
|
||||
message(DEBUG ${JNI_INCLUDE_DIRS})
|
||||
message(DEBUG ${JNI_LIBRARIES})
|
||||
target_include_directories(fdb_java PRIVATE ${JNI_INCLUDE_DIRS})
|
||||
# libfdb_java.so is loaded by fdb-java.jar and doesn't need to depened on jvm shared libraries.
|
||||
target_link_libraries(fdb_java PRIVATE fdb_c)
|
||||
set_target_properties(fdb_java PROPERTIES
|
||||
LIBRARY_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib/${SYSTEM_NAME}/amd64/)
|
||||
|
||||
set(CMAKE_JAVA_COMPILE_FLAGS "-source" "1.8" "-target" "1.8")
|
||||
set(CMAKE_JNI_TARGET TRUE)
|
||||
set(JAR_VERSION "${FDB_MAJOR}.${FDB_MINOR}.${FDB_REVISION}")
|
||||
add_jar(fdb-java ${JAVA_BINDING_SRCS} ${GENERATED_JAVA_FILES}
|
||||
OUTPUT_DIR ${PROJECT_BINARY_DIR}/lib)
|
||||
add_dependencies(fdb-java fdb_java_options fdb_java)
|
||||
add_jar(foundationdb-tests SOURCES ${JAVA_TESTS_SRCS} INCLUDE_JARS fdb-java)
|
||||
add_dependencies(foundationdb-tests fdb_java_options)
|
||||
|
||||
install_jar(fdb-java DESTINATION ${FDB_SHARE_DIR}/java COMPONENT clients)
|
||||
install(TARGETS fdb_java DESTINATION ${FDB_LIB_DIR} COMPONENT clients)
|
|
@ -0,0 +1,44 @@
|
|||
set(SRCS
|
||||
fdb/__init__.py
|
||||
fdb/directory_impl.py
|
||||
fdb/impl.py
|
||||
fdb/locality.py
|
||||
fdb/six.py
|
||||
fdb/subspace_impl.py
|
||||
fdb/tuple.py)
|
||||
|
||||
if(APPLE)
|
||||
list(APPEND SRCS fdb/libfdb_c.dylib.pth)
|
||||
else()
|
||||
list(APPEND SRCS fdb/libfdb_c.so.pth)
|
||||
endif()
|
||||
|
||||
set(out_files "")
|
||||
foreach(src ${SRCS})
|
||||
get_filename_component(dirname ${src} DIRECTORY)
|
||||
get_filename_component(extname ${src} EXT)
|
||||
add_custom_command(OUTPUT ${PROJECT_BINARY_DIR}/bindings/python/${src}
|
||||
COMMAND mkdir -p ${PROJECT_BINARY_DIR}/bindings/python/${dirname}
|
||||
COMMAND cp ${src} ${PROJECT_BINARY_DIR}/bindings/python/${dirname}/
|
||||
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${src}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
COMMENT "copy ${src}")
|
||||
set(out_files "${out_files};${PROJECT_BINARY_DIR}/bindings/python/${src}")
|
||||
endforeach()
|
||||
add_custom_target(python_binding ALL DEPENDS ${out_files})
|
||||
|
||||
file(MAKE_DIRECTORY ${PROJECT_BINARY_DIR}/bindings/python/fdb)
|
||||
set(options_file ${PROJECT_BINARY_DIR}/bindings/python/fdb/fdboptions.py)
|
||||
add_custom_command(OUTPUT ${options_file}
|
||||
COMMAND ${MONO_EXECUTABLE} ${VEXILLOGRAPHER_EXE} ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options python ${options_file}
|
||||
DEPENDS ${PROJECT_SOURCE_DIR}/fdbclient/vexillographer/fdb.options vexillographer
|
||||
COMMENT "Generate Python options")
|
||||
add_custom_target(fdb_python_options DEPENDS
|
||||
${options_file}
|
||||
${PROJECT_SOURCE_DIR}/fdbclient/vexillographer/fdb.options
|
||||
vexillographer)
|
||||
|
||||
add_dependencies(python_binding fdb_python_options)
|
||||
|
||||
set(out_files "${out_files};${options_file}")
|
||||
install(FILES ${out_files} DESTINATION ${FDB_PYTHON_INSTALL_DIR} COMPONENT clients)
|
|
@ -9,7 +9,13 @@ RUN adduser --disabled-password --gecos '' fdb && chown -R fdb /opt && chmod -R
|
|||
|
||||
USER fdb
|
||||
|
||||
RUN cd /opt/ && wget http://downloads.sourceforge.net/project/boost/boost/1.52.0/boost_1_52_0.tar.bz2 -qO - | tar -xj
|
||||
# wget of bintray without forcing UTF-8 encoding results in 403 Forbidden
|
||||
RUN cd /opt/ && wget http://downloads.sourceforge.net/project/boost/boost/1.52.0/boost_1_52_0.tar.bz2 &&\
|
||||
wget --local-encoding=UTF-8 https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.tar.bz2 &&\
|
||||
echo '2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba boost_1_67_0.tar.bz2' | sha256sum -c - &&\
|
||||
tar -xjf boost_1_52_0.tar.bz2 &&\
|
||||
tar -xjf boost_1_67_0.tar.bz2 &&\
|
||||
rm boost_1_52_0.tar.bz2 boost_1_67_0.tar.bz2
|
||||
|
||||
USER root
|
||||
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
#!/usr/bin/env python3
|
||||
from argparse import ArgumentParser
|
||||
import os
|
||||
import json
|
||||
import re
|
||||
|
||||
def actorFile(actor: str, build: str, src: str):
|
||||
res = actor.replace(build, src, 1)
|
||||
res = res.replace('actor.g.cpp', 'actor.cpp')
|
||||
return res.replace('actor.g.h', 'actor.h')
|
||||
|
||||
def rreplace(s, old, new, occurrence = 1):
|
||||
li = s.rsplit(old, occurrence)
|
||||
return new.join(li)
|
||||
|
||||
|
||||
def actorCommand(cmd: str, build:str, src: str):
|
||||
r1 = re.compile('-c (.+)(actor\.g\.cpp)')
|
||||
m1 = r1.search(cmd)
|
||||
if m1 is None:
|
||||
return cmd
|
||||
cmd1 = r1.sub('\\1actor.cpp', cmd)
|
||||
return rreplace(cmd1, build, src)
|
||||
|
||||
|
||||
parser = ArgumentParser(description="Generates a new compile_commands.json for rtags+flow")
|
||||
parser.add_argument("-b", help="Build directory", dest="builddir", default=os.getcwd())
|
||||
parser.add_argument("-s", help="Build directory", dest="srcdir", default=os.getcwd())
|
||||
parser.add_argument("-o", help="Output file", dest="out", default="processed_compile_commands.json")
|
||||
parser.add_argument("input", help="compile_commands.json", default="compile_commands.json", nargs="?")
|
||||
args = parser.parse_args()
|
||||
|
||||
print("transform {} with build directory {}".format(args.input, args.builddir))
|
||||
|
||||
with open(args.input) as f:
|
||||
cmds = json.load(f)
|
||||
|
||||
result = []
|
||||
|
||||
for cmd in cmds:
|
||||
cmd['command'] = cmd['command'].replace(' -DNO_INTELLISENSE ', ' ')
|
||||
if cmd['file'].endswith('actor.g.cpp'):
|
||||
# here we need to rewrite the rule
|
||||
cmd['command'] = actorCommand(cmd['command'], args.builddir, args.srcdir)
|
||||
cmd['file'] = actorFile(cmd['file'], args.builddir, args.srcdir)
|
||||
result.append(cmd)
|
||||
else:
|
||||
result.append(cmd)
|
||||
|
||||
with open(args.out, 'w') as f:
|
||||
json.dump(result, f, indent=4)
|
|
@ -0,0 +1,3 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
cat $1 | grep '<PackageName>' | sed -e 's,^[^>]*>,,' -e 's,<.*,,'
|
|
@ -0,0 +1,4 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
cat $1 | grep '<Version>' | sed -e 's,^[^>]*>,,' -e 's,<.*,,'
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
find_program(MONO_EXECUTABLE mono)
|
||||
find_program(MCS_EXECUTABLE dmcs)
|
||||
|
||||
if (NOT MCS_EXECUTABLE)
|
||||
find_program(MCS_EXECUTABLE mcs)
|
||||
endif()
|
||||
|
||||
set(MONO_FOUND FALSE CACHE INTERNAL "")
|
||||
|
||||
if (NOT MCS_EXECUTABLE)
|
||||
find_program(MCS_EXECUTABLE mcs)
|
||||
endif()
|
||||
|
||||
if (MONO_EXECUTABLE AND MCS_EXECUTABLE)
|
||||
set(MONO_FOUND True CACHE INTERNAL "")
|
||||
endif()
|
||||
|
||||
if (NOT MONO_FOUND)
|
||||
message(FATAL_ERROR "Could not find mono")
|
||||
endif()
|
||||
|
||||
set(ACTORCOMPILER_SRCS
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/flow/actorcompiler/ActorCompiler.cs
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/flow/actorcompiler/ActorParser.cs
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/flow/actorcompiler/ParseTree.cs
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/flow/actorcompiler/Program.cs
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/flow/actorcompiler/Properties/AssemblyInfo.cs)
|
||||
set(ACTOR_COMPILER_REFERENCES
|
||||
"-r:System,System.Core,System.Xml.Linq,System.Data.DataSetExtensions,Microsoft.CSharp,System.Data,System.Xml")
|
||||
|
||||
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/actorcompiler.exe
|
||||
COMMAND ${MCS_EXECUTABLE} ARGS ${ACTOR_COMPILER_REFERENCES} ${ACTORCOMPILER_SRCS} "-target:exe" "-out:actorcompiler.exe"
|
||||
DEPENDS ${ACTORCOMPILER_SRCS}
|
||||
COMMENT "Compile actor compiler" VERBATIM)
|
||||
add_custom_target(actorcompiler DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/actorcompiler.exe)
|
||||
set(actor_exe "${CMAKE_CURRENT_BINARY_DIR}/actorcompiler.exe")
|
|
@ -0,0 +1,26 @@
|
|||
find_package(Boost 1.67)
|
||||
|
||||
if(Boost_FOUND)
|
||||
add_library(boost_target INTERFACE)
|
||||
target_link_libraries(boost_target INTERFACE Boost::boost)
|
||||
else()
|
||||
include(ExternalProject)
|
||||
ExternalProject_add(boostProject
|
||||
URL "https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.tar.bz2"
|
||||
URL_HASH SHA256=2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
BUILD_IN_SOURCE ON
|
||||
INSTALL_COMMAND ""
|
||||
UPDATE_COMMAND ""
|
||||
BUILD_BYPRODUCTS <SOURCE_DIR>/boost/config.hpp)
|
||||
|
||||
ExternalProject_Get_property(boostProject SOURCE_DIR)
|
||||
|
||||
set(BOOST_INCLUDE_DIR ${SOURCE_DIR})
|
||||
message(STATUS "Boost include dir ${BOOST_INCLUDE_DIR}")
|
||||
|
||||
add_library(boost_target INTERFACE)
|
||||
add_dependencies(boost_target boostProject)
|
||||
target_include_directories(boost_target INTERFACE ${BOOST_INCLUDE_DIR})
|
||||
endif()
|
|
@ -0,0 +1,25 @@
|
|||
set(VEXILLOGRAPHER_SRCS
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/fdbclient/vexillographer/c.cs
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/fdbclient/vexillographer/cpp.cs
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/fdbclient/vexillographer/java.cs
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/fdbclient/vexillographer/python.cs
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/fdbclient/vexillographer/ruby.cs
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/fdbclient/vexillographer/vexillographer.cs)
|
||||
|
||||
set(VEXILLOGRAPHER_REFERENCES "-r:System,System.Core,System.Data,System.Xml,System.Xml.Linq")
|
||||
set(VEXILLOGRAPHER_EXE "${CMAKE_CURRENT_BINARY_DIR}/vexillographer.exe")
|
||||
add_custom_command(OUTPUT ${VEXILLOGRAPHER_EXE}
|
||||
COMMAND ${MCS_EXECUTABLE} ARGS ${VEXILLOGRAPHER_REFERENCES} ${VEXILLOGRAPHER_SRCS} -target:exe -out:${VEXILLOGRAPHER_EXE}
|
||||
DEPENDS ${VEXILLOGRAPHER_SRCS}
|
||||
COMMENT "Compile Vexillographer")
|
||||
add_custom_target(vexillographer DEPENDS ${VEXILLOGRAPHER_EXE})
|
||||
|
||||
set(ERROR_GEN_SRCS
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/flow/error_gen.cs)
|
||||
set(ERROR_GEN_REFERENCES "-r:System,System.Core,System.Data,System.Xml,System.Xml.Linq")
|
||||
set(ERROR_GEN_EXE "${CMAKE_CURRENT_BINARY_DIR}/error_gen.exe")
|
||||
add_custom_command (OUTPUT ${ERROR_GEN_EXE}
|
||||
COMMAND ${MCS_EXECUTABLE} ARGS ${ERROR_GEN_REFERENCES} ${ERROR_GEN_SRCS} -target:exe -out:${ERROR_GEN_EXE}
|
||||
DEPENDS ${ERROR_GEN_SRCS}
|
||||
COMMENT "Compile error_gen")
|
||||
add_custom_target(error_gen DEPENDS ${ERROR_GEN_EXE})
|
|
@ -0,0 +1,128 @@
|
|||
set(USE_GPERFTOOLS OFF CACHE BOOL "Use gperfools for profiling")
|
||||
set(PORTABLE_BINARY OFF CACHE BOOL "Create a binary that runs on older OS versions")
|
||||
set(USE_VALGRIND OFF CACHE BOOL "Compile for valgrind usage")
|
||||
set(USE_GOLD_LINKER OFF CACHE BOOL "Use gold linker")
|
||||
set(ALLOC_INSTRUMENTATION OFF CACHE BOOL "Instrument alloc")
|
||||
set(WITH_UNDODB OFF CACHE BOOL "Use rr or undodb")
|
||||
set(OPEN_FOR_IDE OFF CACHE BOOL "Open this in an IDE (won't compile/link)")
|
||||
set(FDB_RELEASE OFF CACHE BOOL "This is a building of a final release")
|
||||
|
||||
find_package(Threads REQUIRED)
|
||||
if(ALLOC_INSTRUMENTATION)
|
||||
add_compile_options(-DALLOC_INSTRUMENTATION)
|
||||
endif()
|
||||
if(WITH_UNDODB)
|
||||
add_compile_options(-DWITH_UNDODB)
|
||||
endif()
|
||||
if(DEBUG_TASKS)
|
||||
add_compile_options(-DDEBUG_TASKS)
|
||||
endif()
|
||||
|
||||
if(NDEBUG)
|
||||
add_compile_options(-DNDEBUG)
|
||||
endif()
|
||||
|
||||
if(FDB_RELEASE)
|
||||
add_compile_options(-DFDB_RELEASE)
|
||||
endif()
|
||||
|
||||
include_directories(${CMAKE_SOURCE_DIR})
|
||||
include_directories(${CMAKE_CURRENT_BINARY_DIR})
|
||||
if (NOT OPEN_FOR_IDE)
|
||||
add_definitions(-DNO_INTELLISENSE)
|
||||
endif()
|
||||
add_definitions(-DUSE_UCONTEXT)
|
||||
enable_language(ASM)
|
||||
|
||||
include(CheckFunctionExists)
|
||||
set(CMAKE_REQUIRED_INCLUDES stdlib.h malloc.h)
|
||||
set(CMAKE_REQUIRED_LIBRARIES c)
|
||||
|
||||
|
||||
if(WIN32)
|
||||
add_compile_options(/W3 /EHsc)
|
||||
else()
|
||||
if(USE_GOLD_LINKER)
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=gold -Wl,--disable-new-dtags")
|
||||
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=gold -Wl,--disable-new-dtags")
|
||||
endif()
|
||||
|
||||
set(GCC NO)
|
||||
set(CLANG NO)
|
||||
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang")
|
||||
set(CLANG YES)
|
||||
else()
|
||||
# This is not a very good test. However, as we do not really support many architectures
|
||||
# this is good enough for now
|
||||
set(GCC YES)
|
||||
endif()
|
||||
|
||||
# we always compile with debug symbols. CPack will strip them out
|
||||
# and create a debuginfo rpm
|
||||
add_compile_options(-ggdb)
|
||||
set(USE_ASAN OFF CACHE BOOL "Compile with address sanitizer")
|
||||
if(USE_ASAN)
|
||||
add_compile_options(
|
||||
-fno-omit-frame-pointer -fsanitize=address
|
||||
-DUSE_ASAN)
|
||||
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=address")
|
||||
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=address")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=address ${CMAKE_THREAD_LIBS_INIT}")
|
||||
endif()
|
||||
|
||||
if(PORTABLE_BINARY)
|
||||
message(STATUS "Create a more portable binary")
|
||||
set(CMAKE_MODULE_LINKER_FLAGS "-static-libstdc++ -static-libgcc ${CMAKE_MODULE_LINKER_FLAGS}")
|
||||
set(CMAKE_SHARED_LINKER_FLAGS "-static-libstdc++ -static-libgcc ${CMAKE_SHARED_LINKER_FLAGS}")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "-static-libstdc++ -static-libgcc ${CMAKE_EXE_LINKER_FLAGS}")
|
||||
endif()
|
||||
# Instruction sets we require to be supported by the CPU
|
||||
add_compile_options(
|
||||
-maes
|
||||
-mmmx
|
||||
-mavx
|
||||
-msse4.2)
|
||||
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-std=c++11>)
|
||||
if (USE_VALGRIND)
|
||||
add_compile_options(-DVALGRIND -DUSE_VALGRIND)
|
||||
endif()
|
||||
if (CLANG)
|
||||
if (APPLE)
|
||||
add_compile_options(-stdlib=libc++)
|
||||
endif()
|
||||
add_compile_options(
|
||||
-Wno-unknown-warning-option
|
||||
-Wno-dangling-else
|
||||
-Wno-sign-compare
|
||||
-Wno-comment
|
||||
-Wno-unknown-pragmas
|
||||
-Wno-delete-non-virtual-dtor
|
||||
-Wno-undefined-var-template
|
||||
-Wno-unused-value
|
||||
-Wno-tautological-pointer-compare
|
||||
-Wno-format)
|
||||
endif()
|
||||
if (CMAKE_GENERATOR STREQUAL Xcode)
|
||||
else()
|
||||
add_compile_options(-Werror)
|
||||
endif()
|
||||
add_compile_options($<$<BOOL:${GCC}>:-Wno-pragmas>)
|
||||
add_compile_options(-Wno-error=format
|
||||
-Wno-deprecated
|
||||
-fvisibility=hidden
|
||||
-Wreturn-type
|
||||
-fdiagnostics-color=always
|
||||
-fPIC)
|
||||
|
||||
if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
set(USE_LTO OFF CACHE BOOL "Do link time optimization")
|
||||
if (USE_LTO)
|
||||
add_compile_options($<$<CONFIG:Release>:-flto>)
|
||||
set(CMAKE_AR "gcc-ar")
|
||||
set(CMAKE_C_ARCHIVE_CREATE "<CMAKE_AR> qcs <TARGET> <LINK_FLAGS> <OBJECTS>")
|
||||
set(CMAKE_C_ARCHIVE_FINISH true)
|
||||
set(CMAKE_CXX_ARCHIVE_CREATE "<CMAKE_AR> qcs <TARGET> <LINK_FLAGS> <OBJECTS>")
|
||||
set(CMAKE_CXX_ARCHIVE_FINISH true)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
|
@ -0,0 +1,16 @@
|
|||
find_package(Curses)
|
||||
include(FindPackageHandleStandardArgs)
|
||||
|
||||
if(CURSES_FOUND)
|
||||
find_path(Editline_INCLUDE_DIR editline/readline.h)
|
||||
find_library(Editline_LIBRARY edit)
|
||||
find_package_handle_standard_args(
|
||||
Editline DEFAULT_MSG Editline_LIBRARY Editline_INCLUDE_DIR)
|
||||
if(Editline_FOUND)
|
||||
set(Editline_LIBRARIES ${Editline_LIBRARY} ${CURSES_LIBRARIES})
|
||||
set(Editline_INCLUDE_DIRS ${Editline_INCLUDE_DIR} ${CURSES_INCLUDE_DIRS})
|
||||
mark_as_advanced(Editline_INCLUDE_DIR Editline_LIBRARY)
|
||||
endif()
|
||||
else()
|
||||
set(Editline_FOUND False)
|
||||
endif()
|
|
@ -0,0 +1,51 @@
|
|||
# Tries to find Gperftools.
|
||||
#
|
||||
# Usage of this module as follows:
|
||||
#
|
||||
# find_package(Gperftools)
|
||||
#
|
||||
# Variables used by this module, they can change the default behaviour and need
|
||||
# to be set before calling find_package:
|
||||
#
|
||||
# Gperftools_ROOT_DIR Set this variable to the root installation of
|
||||
# Gperftools if the module has problems finding
|
||||
# the proper installation path.
|
||||
#
|
||||
# Variables defined by this module:
|
||||
#
|
||||
# GPERFTOOLS_FOUND System has Gperftools libs/headers
|
||||
# GPERFTOOLS_LIBRARIES The Gperftools libraries (tcmalloc & profiler)
|
||||
# GPERFTOOLS_INCLUDE_DIR The location of Gperftools headers
|
||||
|
||||
find_library(GPERFTOOLS_TCMALLOC
|
||||
NAMES tcmalloc
|
||||
HINTS ${Gperftools_ROOT_DIR}/lib)
|
||||
|
||||
find_library(GPERFTOOLS_PROFILER
|
||||
NAMES profiler
|
||||
HINTS ${Gperftools_ROOT_DIR}/lib)
|
||||
|
||||
find_library(GPERFTOOLS_TCMALLOC_AND_PROFILER
|
||||
NAMES tcmalloc_and_profiler
|
||||
HINTS ${Gperftools_ROOT_DIR}/lib)
|
||||
|
||||
find_path(GPERFTOOLS_INCLUDE_DIR
|
||||
NAMES gperftools/heap-profiler.h
|
||||
HINTS ${Gperftools_ROOT_DIR}/include)
|
||||
|
||||
set(GPERFTOOLS_LIBRARIES ${GPERFTOOLS_TCMALLOC_AND_PROFILER})
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(
|
||||
Gperftools
|
||||
DEFAULT_MSG
|
||||
GPERFTOOLS_LIBRARIES
|
||||
GPERFTOOLS_INCLUDE_DIR)
|
||||
|
||||
mark_as_advanced(
|
||||
Gperftools_ROOT_DIR
|
||||
GPERFTOOLS_TCMALLOC
|
||||
GPERFTOOLS_PROFILER
|
||||
GPERFTOOLS_TCMALLOC_AND_PROFILER
|
||||
GPERFTOOLS_LIBRARIES
|
||||
GPERFTOOLS_INCLUDE_DIR)
|
|
@ -0,0 +1,46 @@
|
|||
macro(actor_set varname srcs)
|
||||
set(${varname})
|
||||
foreach(src ${srcs})
|
||||
set(tmp "${src}")
|
||||
if(${src} MATCHES ".*\\.h")
|
||||
continue()
|
||||
elseif(${src} MATCHES ".*\\.actor\\.cpp")
|
||||
string(REPLACE ".actor.cpp" ".actor.g.cpp" tmp ${src})
|
||||
set(tmp "${CMAKE_CURRENT_BINARY_DIR}/${tmp}")
|
||||
endif()
|
||||
set(${varname} "${${varname}};${tmp}")
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
set(ACTOR_TARGET_COUNTER "0")
|
||||
macro(actor_compile target srcs)
|
||||
set(options DISABLE_ACTOR_WITHOUT_WAIT)
|
||||
set(oneValueArg)
|
||||
set(multiValueArgs)
|
||||
cmake_parse_arguments(ACTOR_COMPILE "${options}" "${oneValueArgs}" "${multiValueArgs}" "${ARGN}")
|
||||
set(_tmp_out "")
|
||||
foreach(src ${srcs})
|
||||
set(tmp "")
|
||||
if(${src} MATCHES ".*\\.actor\\.h")
|
||||
string(REPLACE ".actor.h" ".actor.g.h" tmp ${src})
|
||||
elseif(${src} MATCHES ".*\\.actor\\.cpp")
|
||||
string(REPLACE ".actor.cpp" ".actor.g.cpp" tmp ${src})
|
||||
endif()
|
||||
set(actor_compiler_flags "")
|
||||
if(ACTOR_COMPILE_DISABLE_ACTOR_WITHOUT_WAIT)
|
||||
set(actor_compiler_flags "--disable-actor-without-wait-error")
|
||||
endif()
|
||||
if(tmp)
|
||||
add_custom_command(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${tmp}"
|
||||
COMMAND ${MONO_EXECUTABLE} ${actor_exe} "${CMAKE_CURRENT_SOURCE_DIR}/${src}" "${CMAKE_CURRENT_BINARY_DIR}/${tmp}" ${actor_compiler_flags} > /dev/null
|
||||
DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/${src}" actorcompiler ${actor_exe}
|
||||
COMMENT "Compile actor: ${src}")
|
||||
set(_tmp_out "${_tmp_out};${CMAKE_CURRENT_BINARY_DIR}/${tmp}")
|
||||
endif()
|
||||
endforeach()
|
||||
MATH(EXPR ACTOR_TARGET_COUNTER "${ACTOR_TARGET_COUNTER}+1")
|
||||
add_custom_target(${target}_actors_${ACTOR_TARGET_COUNTER} DEPENDS ${_tmp_out})
|
||||
add_dependencies(${target} ${target}_actors_${ACTOR_TARGET_COUNTER})
|
||||
target_include_directories(${target} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
target_include_directories(${target} PUBLIC ${CMAKE_CURRENT_BINARY_DIR})
|
||||
endmacro()
|
|
@ -0,0 +1,221 @@
|
|||
if(NOT INSTALL_LAYOUT)
|
||||
set(DEFAULT_INSTALL_LAYOUT "STANDALONE")
|
||||
endif()
|
||||
set(INSTALL_LAYOUT "${DEFAULT_INSTALL_LAYOUT}"
|
||||
CACHE STRING "Installation directory layout. Options are: TARGZ (as in tar.gz installer), WIN, STANDALONE, RPM, DEB, OSX")
|
||||
|
||||
set(DIR_LAYOUT ${INSTALL_LAYOUT})
|
||||
if(DIR_LAYOUT MATCHES "TARGZ")
|
||||
set(DIR_LAYOUT "STANDALONE")
|
||||
endif()
|
||||
|
||||
if(UNIX)
|
||||
get_property(LIB64 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB64_PATHS)
|
||||
set(FDB_CONFIG_DIR "etc/foundationdb")
|
||||
if("${LIB64}" STREQUAL "TRUE")
|
||||
set(LIBSUFFIX 64)
|
||||
else()
|
||||
set(LIBSUFFIX "")
|
||||
endif()
|
||||
set(FDB_LIB_NOSUFFIX "lib")
|
||||
if(DIR_LAYOUT MATCHES "STANDALONE")
|
||||
set(FDB_LIB_DIR "lib${LIBSUFFIX}")
|
||||
set(FDB_LIBEXEC_DIR "${FDB_LIB_DIR}")
|
||||
set(FDB_BIN_DIR "bin")
|
||||
set(FDB_SBIN_DIR "sbin")
|
||||
set(FDB_INCLUDE_INSTALL_DIR "include")
|
||||
set(FDB_PYTHON_INSTALL_DIR "${FDB_LIB_DIR}/python2.7/site-packages/fdb")
|
||||
set(FDB_SHARE_DIR "share")
|
||||
elseif(DIR_LAYOUT MATCHES "OSX")
|
||||
set(CPACK_GENERATOR productbuild)
|
||||
set(CPACK_PACKAGING_INSTALL_PREFIX "/")
|
||||
set(FDB_LIB_DIR "usr/local/lib")
|
||||
set(FDB_LIB_NOSUFFIX "usr/lib")
|
||||
set(FDB_LIBEXEC_DIR "usr/local/libexec")
|
||||
set(FDB_BIN_DIR "usr/local/bin")
|
||||
set(FDB_SBIN_DIR "usr/local/sbin")
|
||||
set(FDB_INCLUDE_INSTALL_DIR "usr/local/include")
|
||||
set(FDB_PYTHON_INSTALL_DIR "Library/Python/2.7/site-packages/fdb")
|
||||
set(FDB_SHARE_DIR "usr/local/share")
|
||||
elseif(DIR_LAYOUT MATCHES "WIN")
|
||||
# TODO
|
||||
else()
|
||||
# for deb and rpm
|
||||
if(INSTALL_LAYOUT MATCHES "RPM")
|
||||
set(CPACK_GENERATOR "RPM")
|
||||
else()
|
||||
# DEB
|
||||
set(CPACK_GENERATOR "DEB")
|
||||
endif()
|
||||
set(CMAKE_INSTALL_PREFIX "/")
|
||||
set(CPACK_PACKAGING_INSTALL_PREFIX "/")
|
||||
set(FDB_LIB_DIR "usr/lib${LIBSUFFIX}")
|
||||
set(FDB_LIB_NOSUFFIX "usr/lib")
|
||||
set(FDB_LIBEXEC_DIR "${FDB_LIB_DIR}")
|
||||
set(FDB_BIN_DIR "usr/bin")
|
||||
set(FDB_SBIN_DIR "usr/sbin")
|
||||
set(FDB_INCLUDE_INSTALL_DIR "usr/include")
|
||||
set(FDB_PYTHON_INSTALL_DIR "${FDB_LIB_DIR}/python2.7/site-packages/fdb")
|
||||
set(FDB_SHARE_DIR "usr/share")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
# Version information
|
||||
################################################################################
|
||||
|
||||
string(REPLACE "." ";" FDB_VERSION_LIST ${FDB_VERSION_PLAIN})
|
||||
list(GET FDB_VERSION_LIST 0 FDB_MAJOR)
|
||||
list(GET FDB_VERSION_LIST 1 FDB_MINOR)
|
||||
list(GET FDB_VERSION_LIST 2 FDB_PATCH)
|
||||
|
||||
################################################################################
|
||||
# General CPack configuration
|
||||
################################################################################
|
||||
|
||||
include(InstallRequiredSystemLibraries)
|
||||
set(CPACK_PACKAGE_NAME "foundationdb")
|
||||
set(CPACK_PACKAGE_VENDOR "FoundationDB <fdb-dist@apple.com>")
|
||||
set(CPACK_PACKAGE_VERSION_MAJOR ${FDB_MAJOR})
|
||||
set(CPACK_PACKAGE_VERSION_MINOR ${FDB_MINOR})
|
||||
set(CPACK_PACKAGE_VERSION_PATCH ${FDB_PATCH})
|
||||
set(CPACK_PACKAGE_DESCRIPTION_FILE ${CMAKE_SOURCE_DIR}/packaging/description)
|
||||
set(CPACK_PACKAGE_DESCRIPTION_SUMMARY
|
||||
"FoundationDB is a scalable, fault-tolerant, ordered key-value store with full ACID transactions.")
|
||||
set(CPACK_PACKAGE_ICON ${CMAKE_SOURCE_DIR}/packaging/foundationdb.ico)
|
||||
set(CPACK_PACKAGE_CONTACT "The FoundationDB Community")
|
||||
set(CPACK_COMPONENT_server_DEPENDS clients)
|
||||
if (INSTALL_LAYOUT MATCHES "OSX")
|
||||
set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/packaging/osx/resources/conclusion.rtf)
|
||||
set(CPACK_PRODUCTBUILD_RESOURCES_DIR ${CMAKE_SOURCE_DIR}/packaging/osx/resources)
|
||||
else()
|
||||
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE)
|
||||
set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/README.md)
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
# Configuration for RPM
|
||||
################################################################################
|
||||
|
||||
if(INSTALL_LAYOUT MATCHES "RPM")
|
||||
set(CPACK_RPM_server_USER_FILELIST
|
||||
"%config(noreplace) /etc/foundationdb/foundationdb.conf"
|
||||
"%attr(0700,foundationdb,foundationdb) /var/log/foundationdb"
|
||||
"%attr(0700, foundationdb, foundationdb) /var/lib/foundationdb")
|
||||
set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION
|
||||
"/usr/sbin"
|
||||
"/usr/share/java"
|
||||
"/usr/lib64/python2.7"
|
||||
"/usr/lib64/python2.7/site-packages"
|
||||
"/var"
|
||||
"/var/log"
|
||||
"/var/lib"
|
||||
"/lib"
|
||||
"/lib/systemd"
|
||||
"/lib/systemd/system"
|
||||
"/etc/rc.d/init.d")
|
||||
set(CPACK_RPM_DEBUGINFO_PACKAGE ON)
|
||||
set(CPACK_RPM_BUILD_SOURCE_DIRS_PREFIX /usr/src)
|
||||
set(CPACK_RPM_COMPONENT_INSTALL ON)
|
||||
set(CPACK_RPM_clients_PRE_INSTALL_SCRIPT_FILE
|
||||
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preclients.sh)
|
||||
set(CPACK_RPM_clients_POST_INSTALL_SCRIPT_FILE
|
||||
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postclients.sh)
|
||||
set(CPACK_RPM_server_PRE_INSTALL_SCRIPT_FILE
|
||||
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preserver.sh)
|
||||
set(CPACK_RPM_server_POST_INSTALL_SCRIPT_FILE
|
||||
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postserver.sh)
|
||||
set(CPACK_RPM_server_PRE_UNINSTALL_SCRIPT_FILE
|
||||
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preunserver.sh)
|
||||
set(CPACK_RPM_server_PACKAGE_REQUIRES
|
||||
"foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
# Configuration for DEB
|
||||
################################################################################
|
||||
|
||||
if(INSTALL_LAYOUT MATCHES "DEB")
|
||||
set(CPACK_DEB_COMPONENT_INSTALL ON)
|
||||
set(CPACK_DEBIAN_PACKAGE_SECTION "database")
|
||||
set(CPACK_DEBIAN_ENABLE_COMPONENT_DEPENDS ON)
|
||||
|
||||
set(CPACK_DEBIAN_server_PACKAGE_DEPENDS "adduser, libc6 (>= 2.11), python (>= 2.6)")
|
||||
set(CPACK_DEBIAN_clients_PACKAGE_DEPENDS "adduser, libc6 (>= 2.11)")
|
||||
set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://www.foundationdb.org")
|
||||
set(CPACK_DEBIAN_clients_PACKAGE_CONTROL_EXTRA
|
||||
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-clients/postinst)
|
||||
set(CPACK_DEBIAN_server_PACKAGE_CONTROL_EXTRA
|
||||
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/conffiles
|
||||
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/preinst
|
||||
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/postinst
|
||||
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/prerm
|
||||
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/postrm)
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
# Server configuration
|
||||
################################################################################
|
||||
|
||||
string(RANDOM LENGTH 8 description1)
|
||||
string(RANDOM LENGTH 8 description2)
|
||||
set(CLUSTER_DESCRIPTION1 ${description1} CACHE STRING "Cluster description")
|
||||
set(CLUSTER_DESCRIPTION2 ${description2} CACHE STRING "Cluster description")
|
||||
|
||||
install(FILES ${CMAKE_SOURCE_DIR}/packaging/foundationdb.conf
|
||||
DESTINATION ${FDB_CONFIG_DIR}
|
||||
COMPONENT server)
|
||||
install(FILES ${CMAKE_SOURCE_DIR}/packaging/argparse.py
|
||||
DESTINATION "usr/lib/foundationdb"
|
||||
COMPONENT server)
|
||||
install(FILES ${CMAKE_SOURCE_DIR}/packaging/make_public.py
|
||||
DESTINATION "usr/lib/foundationdb")
|
||||
if((INSTALL_LAYOUT MATCHES "RPM") OR (INSTALL_LAYOUT MATCHES "DEB"))
|
||||
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/packaging/foundationdb
|
||||
${CMAKE_BINARY_DIR}/packaging/rpm)
|
||||
install(
|
||||
DIRECTORY ${CMAKE_BINARY_DIR}/packaging/foundationdb
|
||||
DESTINATION "var/log"
|
||||
COMPONENT server)
|
||||
install(
|
||||
DIRECTORY ${CMAKE_BINARY_DIR}/packaging/foundationdb
|
||||
DESTINATION "var/lib"
|
||||
COMPONENT server)
|
||||
execute_process(
|
||||
COMMAND pidof systemd
|
||||
RESULT_VARIABLE IS_SYSTEMD
|
||||
OUTPUT_QUIET
|
||||
ERROR_QUIET)
|
||||
if(IS_SYSTEMD EQUAL "0")
|
||||
configure_file(${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb.service
|
||||
${CMAKE_BINARY_DIR}/packaging/rpm/foundationdb.service)
|
||||
install(FILES ${CMAKE_BINARY_DIR}/packaging/rpm/foundationdb.service
|
||||
DESTINATION "lib/systemd/system"
|
||||
COMPONENT server)
|
||||
else()
|
||||
if(INSTALL_LAYOUT MATCHES "RPM")
|
||||
install(FILES ${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb-init
|
||||
DESTINATION "etc/rc.d/init.d"
|
||||
RENAME "foundationdb"
|
||||
COMPONENT server)
|
||||
else()
|
||||
install(FILES ${CMAKE_SOURCE_DIR}/packaging/deb/foundationdb-init
|
||||
DESTINATION "etc/init.d"
|
||||
RENAME "foundationdb"
|
||||
COMPONENT server)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
# Helper Macros
|
||||
################################################################################
|
||||
|
||||
macro(install_symlink filepath sympath compondent)
|
||||
install(CODE "execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink ${filepath} ${sympath})" COMPONENT ${component})
|
||||
install(CODE "message(\"-- Created symlink: ${sympath} -> ${filepath}\")")
|
||||
endmacro()
|
||||
macro(install_mkdir dirname component)
|
||||
install(CODE "execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory ${dirname})" COMPONENT ${component})
|
||||
install(CODE "message(\"-- Created directory: ${dirname}\")")
|
||||
endmacro()
|
|
@ -0,0 +1,2 @@
|
|||
using @BOOST_TOOLSET@ : : @CMAKE_CXX_COMPILER@ : @BOOST_ADDITIONAL_COMPILE_OPTIOINS@ ;
|
||||
using python : @PYTHON_VERSION_MAJOR@.@PYTHON_VERSION_MINOR@ : @PYTHON_EXECUTABLE@ : @PYTHON_INCLUDE_DIRS@ ;
|
|
@ -355,22 +355,19 @@ FoundationDB will never use processes on the same machine for the replication of
|
|||
FoundationDB replicates data to three machines, and at least three available machines are required to make progress. This is the recommended mode for a cluster of five or more machines in a single datacenter.
|
||||
|
||||
``three_data_hall`` mode
|
||||
FoundationDB replicates data to three machines, and at least three available machines are required to make progress. Every piece of data that has been committed to storage servers
|
||||
will be replicated onto three different data halls, and the cluster will
|
||||
remain available after losing a single data hall and one machine in another
|
||||
data hall.
|
||||
FoundationDB stores data in triplicate, with one copy on a storage server in each of three data halls. The transaction logs are replicated four times, with two data halls containing two replicas apiece. Four available machines (two in each of two data halls) are therefore required to make progress. This configuration enables the cluster to remain available after losing a single data hall and one machine in another data hall.
|
||||
|
||||
Datacenter-aware mode
|
||||
---------------------
|
||||
|
||||
In addition to the more commonly used modes listed above, this version of FoundationDB has support for redundancy across multiple datacenters. Although data will always be triple replicated in this mode, it may not be replicated across all datacenters.
|
||||
In addition to the more commonly used modes listed above, this version of FoundationDB has support for redundancy across multiple datacenters.
|
||||
|
||||
.. note:: When using the datacenter-aware mode, all ``fdbserver`` processes should be passed a valid datacenter identifier on the command line.
|
||||
|
||||
``three_datacenter`` mode
|
||||
*(for 5+ machines in 3 datacenters)*
|
||||
|
||||
FoundationDB attempts to replicate data across three datacenters and will stay up with only two available. Data is replicated 6 times. For maximum availability, you should use five coordination servers: two in two of the datacenters and one in the third datacenter.
|
||||
FoundationDB attempts to replicate data across three datacenters and will stay up with only two available. Data is replicated 6 times. Transaction logs are stored in the same configuration as the ``three_data_hall`` mode, so commit latencies are tied to the latency between datacenters. For maximum availability, you should use five coordination servers: two in two of the datacenters and one in the third datacenter.
|
||||
|
||||
.. warning:: ``three_datacenter`` mode is not compatible with region configuration.
|
||||
|
||||
|
|
|
@ -10,38 +10,38 @@ macOS
|
|||
|
||||
The macOS installation package is supported on macOS 10.7+. It includes the client and (optionally) the server.
|
||||
|
||||
* `FoundationDB-6.0.17.pkg <https://www.foundationdb.org/downloads/6.0.17/macOS/installers/FoundationDB-6.0.17.pkg>`_
|
||||
* `FoundationDB-6.0.18.pkg <https://www.foundationdb.org/downloads/6.0.18/macOS/installers/FoundationDB-6.0.18.pkg>`_
|
||||
|
||||
Ubuntu
|
||||
------
|
||||
|
||||
The Ubuntu packages are supported on 64-bit Ubuntu 12.04+, but beware of the Linux kernel bug in Ubuntu 12.x.
|
||||
|
||||
* `foundationdb-clients-6.0.17-1_amd64.deb <https://www.foundationdb.org/downloads/6.0.17/ubuntu/installers/foundationdb-clients_6.0.17-1_amd64.deb>`_
|
||||
* `foundationdb-server-6.0.17-1_amd64.deb <https://www.foundationdb.org/downloads/6.0.17/ubuntu/installers/foundationdb-server_6.0.17-1_amd64.deb>`_ (depends on the clients package)
|
||||
* `foundationdb-clients-6.0.18-1_amd64.deb <https://www.foundationdb.org/downloads/6.0.18/ubuntu/installers/foundationdb-clients_6.0.18-1_amd64.deb>`_
|
||||
* `foundationdb-server-6.0.18-1_amd64.deb <https://www.foundationdb.org/downloads/6.0.18/ubuntu/installers/foundationdb-server_6.0.18-1_amd64.deb>`_ (depends on the clients package)
|
||||
|
||||
RHEL/CentOS EL6
|
||||
---------------
|
||||
|
||||
The RHEL/CentOS EL6 packages are supported on 64-bit RHEL/CentOS 6.x.
|
||||
|
||||
* `foundationdb-clients-6.0.17-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.17/rhel6/installers/foundationdb-clients-6.0.17-1.el6.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.0.17-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.17/rhel6/installers/foundationdb-server-6.0.17-1.el6.x86_64.rpm>`_ (depends on the clients package)
|
||||
* `foundationdb-clients-6.0.18-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.18/rhel6/installers/foundationdb-clients-6.0.18-1.el6.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.0.18-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.18/rhel6/installers/foundationdb-server-6.0.18-1.el6.x86_64.rpm>`_ (depends on the clients package)
|
||||
|
||||
RHEL/CentOS EL7
|
||||
---------------
|
||||
|
||||
The RHEL/CentOS EL7 packages are supported on 64-bit RHEL/CentOS 7.x.
|
||||
|
||||
* `foundationdb-clients-6.0.17-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.17/rhel7/installers/foundationdb-clients-6.0.17-1.el7.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.0.17-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.17/rhel7/installers/foundationdb-server-6.0.17-1.el7.x86_64.rpm>`_ (depends on the clients package)
|
||||
* `foundationdb-clients-6.0.18-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.18/rhel7/installers/foundationdb-clients-6.0.18-1.el7.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.0.18-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.18/rhel7/installers/foundationdb-server-6.0.18-1.el7.x86_64.rpm>`_ (depends on the clients package)
|
||||
|
||||
Windows
|
||||
-------
|
||||
|
||||
The Windows installer is supported on 64-bit Windows XP and later. It includes the client and (optionally) the server.
|
||||
|
||||
* `foundationdb-6.0.17-x64.msi <https://www.foundationdb.org/downloads/6.0.17/windows/installers/foundationdb-6.0.17-x64.msi>`_
|
||||
* `foundationdb-6.0.18-x64.msi <https://www.foundationdb.org/downloads/6.0.18/windows/installers/foundationdb-6.0.18-x64.msi>`_
|
||||
|
||||
API Language Bindings
|
||||
=====================
|
||||
|
@ -58,18 +58,18 @@ On macOS and Windows, the FoundationDB Python API bindings are installed as part
|
|||
|
||||
If you need to use the FoundationDB Python API from other Python installations or paths, download the Python package:
|
||||
|
||||
* `foundationdb-6.0.17.tar.gz <https://www.foundationdb.org/downloads/6.0.17/bindings/python/foundationdb-6.0.17.tar.gz>`_
|
||||
* `foundationdb-6.0.18.tar.gz <https://www.foundationdb.org/downloads/6.0.18/bindings/python/foundationdb-6.0.18.tar.gz>`_
|
||||
|
||||
Ruby 1.9.3/2.0.0+
|
||||
-----------------
|
||||
|
||||
* `fdb-6.0.17.gem <https://www.foundationdb.org/downloads/6.0.17/bindings/ruby/fdb-6.0.17.gem>`_
|
||||
* `fdb-6.0.18.gem <https://www.foundationdb.org/downloads/6.0.18/bindings/ruby/fdb-6.0.18.gem>`_
|
||||
|
||||
Java 8+
|
||||
-------
|
||||
|
||||
* `fdb-java-6.0.17.jar <https://www.foundationdb.org/downloads/6.0.17/bindings/java/fdb-java-6.0.17.jar>`_
|
||||
* `fdb-java-6.0.17-javadoc.jar <https://www.foundationdb.org/downloads/6.0.17/bindings/java/fdb-java-6.0.17-javadoc.jar>`_
|
||||
* `fdb-java-6.0.18.jar <https://www.foundationdb.org/downloads/6.0.18/bindings/java/fdb-java-6.0.18.jar>`_
|
||||
* `fdb-java-6.0.18-javadoc.jar <https://www.foundationdb.org/downloads/6.0.18/bindings/java/fdb-java-6.0.18-javadoc.jar>`_
|
||||
|
||||
Go 1.1+
|
||||
-------
|
||||
|
|
|
@ -2,6 +2,23 @@
|
|||
Release Notes
|
||||
#############
|
||||
|
||||
6.0.18
|
||||
======
|
||||
|
||||
Fixes
|
||||
-----
|
||||
|
||||
* Backup metadata could falsely indicate that a backup is not usable. `(PR #1007) <https://github.com/apple/foundationdb/pull/1007>`_
|
||||
* Blobstore request failures could cause backup expire and delete operations to skip some files. `(PR #1007) <https://github.com/apple/foundationdb/pull/1007>`_
|
||||
* Blobstore request failures could cause restore to fail to apply some files. `(PR #1007) <https://github.com/apple/foundationdb/pull/1007>`_
|
||||
* Storage servers with large amounts of data would pause for a short period of time after rebooting. `(PR #1001) <https://github.com/apple/foundationdb/pull/1001>`_
|
||||
* The client library could leak memory when a thread died. `(PR #1011) <https://github.com/apple/foundationdb/pull/1011>`_
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
* Added the ability to specify versions as version-days ago from latest log in backup. `(PR #1007) <https://github.com/apple/foundationdb/pull/1007>`_
|
||||
|
||||
6.0.17
|
||||
======
|
||||
|
||||
|
|
|
@ -70,6 +70,8 @@ The value for each setting can be specified in more than one way. The actual va
|
|||
2. The value of the environment variable, if one has been set;
|
||||
3. The default value
|
||||
|
||||
For the password, rather than using the command-line option, it is recommended to use the environment variable ``FDB_TLS_PASSWORD``, as command-line options are more visible to other processes running on the same host.
|
||||
|
||||
As with all other command-line options to ``fdbserver``, the TLS settings can be specified in the :ref:`[fdbserver] section of the configuration file <foundationdb-conf-fdbserver>`.
|
||||
|
||||
The settings for certificate file, key file, peer verification, password and CA file are interpreted by the software.
|
||||
|
@ -99,6 +101,17 @@ There is no default password. If no password is specified, it is assumed that th
|
|||
Parameters and client bindings
|
||||
------------------------------
|
||||
|
||||
Automatic TLS certificate refresh
|
||||
------------------------------
|
||||
|
||||
The TLS certificate will be automatically refreshed on a configurable cadence. The server will inspect the CA, certificate, and key files in the specified locations periodically, and will begin using the new versions if following criterion were met:
|
||||
|
||||
* They are changed, judging by the last modified time.
|
||||
* They are valid certificates.
|
||||
* The key file matches the certificate file.
|
||||
|
||||
The refresh rate is controlled by ``--knob_tls_cert_refresh_delay_seconds``. Setting it to 0 will disable the refresh.
|
||||
|
||||
The default LibreSSL-based implementation
|
||||
=========================================
|
||||
|
||||
|
@ -197,7 +210,7 @@ Requirements can be placed on the fields of the Issuer and Subject DNs in the pe
|
|||
Field Well known name
|
||||
======= ===================
|
||||
``CN`` Common Name
|
||||
``C`` County
|
||||
``C`` Country
|
||||
``L`` Locality
|
||||
``ST`` State
|
||||
``O`` Organization
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
${CLUSTER_DESCRIPTION1}:${CLUSTER_DESCRIPTION1}@127.0.0.1:4500
|
|
@ -0,0 +1,25 @@
|
|||
set(FDBBACKUP_SRCS
|
||||
backup.actor.cpp)
|
||||
|
||||
actor_set(FDBBACKUP_BUILD "${FDBBACKUP_SRCS}")
|
||||
add_executable(fdbbackup "${FDBBACKUP_BUILD}")
|
||||
actor_compile(fdbbackup "${FDBBACKUP_SRCS}")
|
||||
target_link_libraries(fdbbackup PRIVATE fdbclient)
|
||||
|
||||
install(TARGETS fdbbackup DESTINATION ${FDB_BIN_DIR} COMPONENT clients)
|
||||
install(PROGRAMS $<TARGET_FILE:fdbbackup>
|
||||
DESTINATION ${FDB_LIB_DIR}/foundationdb/backup_agent
|
||||
RENAME backup_agent
|
||||
COMPONENT clients)
|
||||
install(PROGRAMS $<TARGET_FILE:fdbbackup>
|
||||
DESTINATION ${FDB_BIN_DIR}
|
||||
RENAME fdbrestore
|
||||
COMPONENT clients)
|
||||
install(PROGRAMS $<TARGET_FILE:fdbbackup>
|
||||
DESTINATION ${FDB_BIN_DIR}
|
||||
RENAME dr_agent
|
||||
COMPONENT clients)
|
||||
install(PROGRAMS $<TARGET_FILE:fdbbackup>
|
||||
DESTINATION ${FDB_BIN_DIR}
|
||||
RENAME fdbdr
|
||||
COMPONENT clients)
|
|
@ -77,7 +77,7 @@ enum enumProgramExe {
|
|||
};
|
||||
|
||||
enum enumBackupType {
|
||||
BACKUP_UNDEFINED=0, BACKUP_START, BACKUP_STATUS, BACKUP_ABORT, BACKUP_WAIT, BACKUP_DISCONTINUE, BACKUP_PAUSE, BACKUP_RESUME, BACKUP_EXPIRE, BACKUP_DELETE, BACKUP_DESCRIBE, BACKUP_LIST
|
||||
BACKUP_UNDEFINED=0, BACKUP_START, BACKUP_STATUS, BACKUP_ABORT, BACKUP_WAIT, BACKUP_DISCONTINUE, BACKUP_PAUSE, BACKUP_RESUME, BACKUP_EXPIRE, BACKUP_DELETE, BACKUP_DESCRIBE, BACKUP_LIST, BACKUP_DUMP
|
||||
};
|
||||
|
||||
enum enumDBType {
|
||||
|
@ -92,8 +92,10 @@ enum enumRestoreType {
|
|||
enum {
|
||||
// Backup constants
|
||||
OPT_DESTCONTAINER, OPT_SNAPSHOTINTERVAL, OPT_ERRORLIMIT, OPT_NOSTOPWHENDONE,
|
||||
OPT_EXPIRE_BEFORE_VERSION, OPT_EXPIRE_BEFORE_DATETIME, OPT_EXPIRE_RESTORABLE_AFTER_VERSION, OPT_EXPIRE_RESTORABLE_AFTER_DATETIME,
|
||||
OPT_EXPIRE_BEFORE_VERSION, OPT_EXPIRE_BEFORE_DATETIME, OPT_EXPIRE_DELETE_BEFORE_DAYS,
|
||||
OPT_EXPIRE_RESTORABLE_AFTER_VERSION, OPT_EXPIRE_RESTORABLE_AFTER_DATETIME, OPT_EXPIRE_MIN_RESTORABLE_DAYS,
|
||||
OPT_BASEURL, OPT_BLOB_CREDENTIALS, OPT_DESCRIBE_DEEP, OPT_DESCRIBE_TIMESTAMPS,
|
||||
OPT_DUMP_BEGIN, OPT_DUMP_END,
|
||||
|
||||
// Backup and Restore constants
|
||||
OPT_TAGNAME, OPT_BACKUPKEYS, OPT_WAITFORDONE,
|
||||
|
@ -110,7 +112,9 @@ enum {
|
|||
//DB constants
|
||||
OPT_SOURCE_CLUSTER,
|
||||
OPT_DEST_CLUSTER,
|
||||
OPT_CLEANUP
|
||||
OPT_CLEANUP,
|
||||
|
||||
OPT_TRACE_FORMAT
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgAgentOptions[] = {
|
||||
|
@ -119,7 +123,6 @@ CSimpleOpt::SOption g_rgAgentOptions[] = {
|
|||
#endif
|
||||
{ OPT_CLUSTERFILE, "-C", SO_REQ_SEP },
|
||||
{ OPT_CLUSTERFILE, "--cluster_file", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
{ OPT_VERSION, "-v", SO_NONE },
|
||||
|
@ -127,6 +130,8 @@ CSimpleOpt::SOption g_rgAgentOptions[] = {
|
|||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_CRASHONERROR, "--crash", SO_NONE },
|
||||
{ OPT_LOCALITY, "--locality_", SO_REQ_SEP },
|
||||
{ OPT_MEMLIMIT, "-m", SO_REQ_SEP },
|
||||
|
@ -162,6 +167,8 @@ CSimpleOpt::SOption g_rgBackupStartOptions[] = {
|
|||
{ OPT_DRYRUN, "--dryrun", SO_NONE },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
|
@ -191,6 +198,8 @@ CSimpleOpt::SOption g_rgBackupStatusOptions[] = {
|
|||
{ OPT_TAGNAME, "--tagname", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
{ OPT_VERSION, "-v", SO_NONE },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
|
@ -216,6 +225,8 @@ CSimpleOpt::SOption g_rgBackupAbortOptions[] = {
|
|||
{ OPT_TAGNAME, "--tagname", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
|
@ -243,6 +254,8 @@ CSimpleOpt::SOption g_rgBackupDiscontinueOptions[] = {
|
|||
{ OPT_WAITFORDONE, "--waitfordone", SO_NONE },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
|
@ -270,6 +283,8 @@ CSimpleOpt::SOption g_rgBackupWaitOptions[] = {
|
|||
{ OPT_NOSTOPWHENDONE, "--no-stop-when-done",SO_NONE },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
|
@ -293,6 +308,8 @@ CSimpleOpt::SOption g_rgBackupPauseOptions[] = {
|
|||
{ OPT_CLUSTERFILE, "--cluster_file", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
|
@ -318,6 +335,8 @@ CSimpleOpt::SOption g_rgBackupExpireOptions[] = {
|
|||
{ OPT_DESTCONTAINER, "--destcontainer", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "-v", SO_NONE },
|
||||
|
@ -337,6 +356,8 @@ CSimpleOpt::SOption g_rgBackupExpireOptions[] = {
|
|||
{ OPT_EXPIRE_RESTORABLE_AFTER_DATETIME, "--restorable_after_timestamp", SO_REQ_SEP },
|
||||
{ OPT_EXPIRE_BEFORE_VERSION, "--expire_before_version", SO_REQ_SEP },
|
||||
{ OPT_EXPIRE_BEFORE_DATETIME, "--expire_before_timestamp", SO_REQ_SEP },
|
||||
{ OPT_EXPIRE_MIN_RESTORABLE_DAYS, "--min_restorable_days", SO_REQ_SEP },
|
||||
{ OPT_EXPIRE_DELETE_BEFORE_DAYS, "--delete_before_days", SO_REQ_SEP },
|
||||
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
@ -349,6 +370,8 @@ CSimpleOpt::SOption g_rgBackupDeleteOptions[] = {
|
|||
{ OPT_DESTCONTAINER, "--destcontainer", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "-v", SO_NONE },
|
||||
|
@ -376,6 +399,8 @@ CSimpleOpt::SOption g_rgBackupDescribeOptions[] = {
|
|||
{ OPT_DESTCONTAINER, "--destcontainer", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "-v", SO_NONE },
|
||||
|
@ -395,6 +420,36 @@ CSimpleOpt::SOption g_rgBackupDescribeOptions[] = {
|
|||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgBackupDumpOptions[] = {
|
||||
#ifdef _WIN32
|
||||
{ OPT_PARENTPID, "--parentpid", SO_REQ_SEP },
|
||||
#endif
|
||||
{ OPT_CLUSTERFILE, "-C", SO_REQ_SEP },
|
||||
{ OPT_CLUSTERFILE, "--cluster_file", SO_REQ_SEP },
|
||||
{ OPT_DESTCONTAINER, "-d", SO_REQ_SEP },
|
||||
{ OPT_DESTCONTAINER, "--destcontainer", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "-v", SO_NONE },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
{ OPT_CRASHONERROR, "--crash", SO_NONE },
|
||||
{ OPT_MEMLIMIT, "-m", SO_REQ_SEP },
|
||||
{ OPT_MEMLIMIT, "--memory", SO_REQ_SEP },
|
||||
{ OPT_HELP, "-?", SO_NONE },
|
||||
{ OPT_HELP, "-h", SO_NONE },
|
||||
{ OPT_HELP, "--help", SO_NONE },
|
||||
{ OPT_DEVHELP, "--dev-help", SO_NONE },
|
||||
{ OPT_BLOB_CREDENTIALS, "--blob_credentials", SO_REQ_SEP },
|
||||
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
|
||||
{ OPT_DUMP_BEGIN, "--begin", SO_REQ_SEP },
|
||||
{ OPT_DUMP_END, "--end", SO_REQ_SEP },
|
||||
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgBackupListOptions[] = {
|
||||
#ifdef _WIN32
|
||||
{ OPT_PARENTPID, "--parentpid", SO_REQ_SEP },
|
||||
|
@ -403,6 +458,8 @@ CSimpleOpt::SOption g_rgBackupListOptions[] = {
|
|||
{ OPT_BASEURL, "--base_url", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "-v", SO_NONE },
|
||||
|
@ -440,6 +497,8 @@ CSimpleOpt::SOption g_rgRestoreOptions[] = {
|
|||
{ OPT_DBVERSION, "-v", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_DRYRUN, "-n", SO_NONE },
|
||||
|
@ -473,6 +532,8 @@ CSimpleOpt::SOption g_rgDBAgentOptions[] = {
|
|||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_CRASHONERROR, "--crash", SO_NONE },
|
||||
{ OPT_LOCALITY, "--locality_", SO_REQ_SEP },
|
||||
{ OPT_MEMLIMIT, "-m", SO_REQ_SEP },
|
||||
|
@ -499,6 +560,8 @@ CSimpleOpt::SOption g_rgDBStartOptions[] = {
|
|||
{ OPT_BACKUPKEYS, "--keys", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
|
@ -528,6 +591,8 @@ CSimpleOpt::SOption g_rgDBStatusOptions[] = {
|
|||
{ OPT_TAGNAME, "--tagname", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
{ OPT_VERSION, "-v", SO_NONE },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
|
@ -555,6 +620,8 @@ CSimpleOpt::SOption g_rgDBSwitchOptions[] = {
|
|||
{ OPT_TAGNAME, "--tagname", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
|
@ -583,6 +650,8 @@ CSimpleOpt::SOption g_rgDBAbortOptions[] = {
|
|||
{ OPT_TAGNAME, "--tagname", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
|
@ -608,6 +677,8 @@ CSimpleOpt::SOption g_rgDBPauseOptions[] = {
|
|||
{ OPT_DEST_CLUSTER, "--destination", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
|
@ -650,7 +721,7 @@ static void printVersion() {
|
|||
printf("protocol %llx\n", (long long) currentProtocolVersion);
|
||||
}
|
||||
|
||||
const char *BlobCredentialInfo =
|
||||
const char *BlobCredentialInfo =
|
||||
" BLOB CREDENTIALS\n"
|
||||
" Blob account secret keys can optionally be omitted from blobstore:// URLs, in which case they will be\n"
|
||||
" loaded, if possible, from 1 or more blob credentials definition files.\n\n"
|
||||
|
@ -677,6 +748,9 @@ static void printAgentUsage(bool devhelp) {
|
|||
" --logdir PATH Specifes the output directory for trace files. If\n"
|
||||
" unspecified, defaults to the current directory. Has\n"
|
||||
" no effect unless --log is specified.\n");
|
||||
printf(" --trace_format FORMAT\n"
|
||||
" Select the format of the trace files. xml (the default) and json are supported.\n"
|
||||
" Has no effect unless --log is specified.\n");
|
||||
printf(" -m SIZE, --memory SIZE\n"
|
||||
" Memory limit. The default value is 8GiB. When specified\n"
|
||||
" without a unit, MiB is assumed.\n");
|
||||
|
@ -725,10 +799,16 @@ static void printBackupUsage(bool devhelp) {
|
|||
" in the database to obtain a cutoff version very close to the timestamp given in YYYY-MM-DD.HH:MI:SS format (UTC).\n");
|
||||
printf(" --expire_before_version VERSION\n"
|
||||
" Version cutoff for expire operations. Deletes data files containing no data at or after VERSION.\n");
|
||||
printf(" --delete_before_days NUM_DAYS\n"
|
||||
" Another way to specify version cutoff for expire operations. Deletes data files containing no data at or after a\n"
|
||||
" version approximately NUM_DAYS days worth of versions prior to the latest log version in the backup.\n");
|
||||
printf(" --restorable_after_timestamp DATETIME\n"
|
||||
" For expire operations, set minimum acceptable restorability to the version equivalent of DATETIME and later.\n");
|
||||
printf(" --restorable_after_version VERSION\n"
|
||||
" For expire operations, set minimum acceptable restorability to the VERSION and later.\n");
|
||||
printf(" --min_restorable_days NUM_DAYS\n"
|
||||
" For expire operations, set minimum acceptable restorability to approximately NUM_DAYS days worth of versions\n"
|
||||
" prior to the latest log version in the backup.\n");
|
||||
printf(" --version_timestamps\n");
|
||||
printf(" For describe operations, lookup versions in the database to obtain timestamps. A cluster file is required.\n");
|
||||
printf(" -f, --force For expire operations, force expiration even if minimum restorability would be violated.\n");
|
||||
|
@ -737,7 +817,7 @@ static void printBackupUsage(bool devhelp) {
|
|||
printf(" -e ERRORLIMIT The maximum number of errors printed by status (default is 10).\n");
|
||||
printf(" -k KEYS List of key ranges to backup.\n"
|
||||
" If not specified, the entire database will be backed up.\n");
|
||||
printf(" -n, --dry-run For start or restore operations, performs a trial run with no actual changes made.\n");
|
||||
printf(" -n, --dryrun For start or restore operations, performs a trial run with no actual changes made.\n");
|
||||
printf(" -v, --version Print version information and exit.\n");
|
||||
printf(" -w, --wait Wait for the backup to complete (allowed with `start' and `discontinue').\n");
|
||||
printf(" -z, --no-stop-when-done\n"
|
||||
|
@ -752,7 +832,7 @@ static void printBackupUsage(bool devhelp) {
|
|||
printf(" Specify a process after whose termination to exit.\n");
|
||||
#endif
|
||||
printf(" --deep For describe operations, do not use cached metadata. Warning: Very slow\n");
|
||||
|
||||
|
||||
}
|
||||
printf("\n"
|
||||
" KEYS FORMAT: \"<BEGINKEY> <ENDKEY>\" [...]\n");
|
||||
|
@ -781,7 +861,7 @@ static void printRestoreUsage(bool devhelp ) {
|
|||
printf(" -k KEYS List of key ranges from the backup to restore\n");
|
||||
printf(" --remove_prefix PREFIX prefix to remove from the restored keys\n");
|
||||
printf(" --add_prefix PREFIX prefix to add to the restored keys\n");
|
||||
printf(" -n, --dry-run Perform a trial run with no changes made.\n");
|
||||
printf(" -n, --dryrun Perform a trial run with no changes made.\n");
|
||||
printf(" -v DBVERSION The version at which the database will be restored.\n");
|
||||
printf(" -h, --help Display this help and exit.\n");
|
||||
|
||||
|
@ -812,6 +892,9 @@ static void printDBAgentUsage(bool devhelp) {
|
|||
" --logdir PATH Specifes the output directory for trace files. If\n"
|
||||
" unspecified, defaults to the current directory. Has\n"
|
||||
" no effect unless --log is specified.\n");
|
||||
printf(" --trace_format FORMAT\n"
|
||||
" Select the format of the trace files. xml (the default) and json are supported.\n"
|
||||
" Has no effect unless --log is specified.\n");
|
||||
printf(" -m SIZE, --memory SIZE\n"
|
||||
" Memory limit. The default value is 8GiB. When specified\n"
|
||||
" without a unit, MiB is assumed.\n");
|
||||
|
@ -970,6 +1053,7 @@ enumBackupType getBackupType(std::string backupType)
|
|||
values["delete"] = BACKUP_DELETE;
|
||||
values["describe"] = BACKUP_DESCRIBE;
|
||||
values["list"] = BACKUP_LIST;
|
||||
values["dump"] = BACKUP_DUMP;
|
||||
}
|
||||
|
||||
auto i = values.find(backupType);
|
||||
|
@ -1730,11 +1814,10 @@ ACTOR Future<Void> changeDBBackupResumed(Database src, Database dest, bool pause
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> runRestore(Database db, std::string tagName, std::string container, Standalone<VectorRef<KeyRangeRef>> ranges, Version dbVersion, bool performRestore, bool verbose, bool waitForDone, std::string addPrefix, std::string removePrefix) {
|
||||
ACTOR Future<Void> runRestore(Database db, std::string tagName, std::string container, Standalone<VectorRef<KeyRangeRef>> ranges, Version targetVersion, bool performRestore, bool verbose, bool waitForDone, std::string addPrefix, std::string removePrefix) {
|
||||
try
|
||||
{
|
||||
state FileBackupAgent backupAgent;
|
||||
state int64_t restoreVersion = -1;
|
||||
|
||||
if(ranges.size() > 1) {
|
||||
fprintf(stderr, "Currently only a single restore range is supported!\n");
|
||||
|
@ -1743,52 +1826,45 @@ ACTOR Future<Void> runRestore(Database db, std::string tagName, std::string cont
|
|||
|
||||
state KeyRange range = (ranges.size() == 0) ? normalKeys : ranges.front();
|
||||
|
||||
if (performRestore) {
|
||||
if(dbVersion == invalidVersion) {
|
||||
BackupDescription desc = wait(IBackupContainer::openContainer(container)->describeBackup());
|
||||
if(!desc.maxRestorableVersion.present()) {
|
||||
fprintf(stderr, "The specified backup is not restorable to any version.\n");
|
||||
throw restore_error();
|
||||
}
|
||||
state Reference<IBackupContainer> bc = IBackupContainer::openContainer(container);
|
||||
|
||||
dbVersion = desc.maxRestorableVersion.get();
|
||||
// If targetVersion is unset then use the maximum restorable version from the backup description
|
||||
if(targetVersion == invalidVersion) {
|
||||
if(verbose)
|
||||
printf("No restore target version given, will use maximum restorable version from backup description.\n");
|
||||
|
||||
BackupDescription desc = wait(bc->describeBackup());
|
||||
|
||||
if(!desc.maxRestorableVersion.present()) {
|
||||
fprintf(stderr, "The specified backup is not restorable to any version.\n");
|
||||
throw restore_error();
|
||||
}
|
||||
|
||||
targetVersion = desc.maxRestorableVersion.get();
|
||||
|
||||
if(verbose)
|
||||
printf("Using target restore version %lld\n", targetVersion);
|
||||
}
|
||||
|
||||
if (performRestore) {
|
||||
Version restoredVersion = wait(backupAgent.restore(db, KeyRef(tagName), KeyRef(container), waitForDone, targetVersion, verbose, range, KeyRef(addPrefix), KeyRef(removePrefix)));
|
||||
|
||||
if(waitForDone && verbose) {
|
||||
// If restore is now complete then report version restored
|
||||
printf("Restored to version %lld\n", restoredVersion);
|
||||
}
|
||||
Version _restoreVersion = wait(backupAgent.restore(db, KeyRef(tagName), KeyRef(container), waitForDone, dbVersion, verbose, range, KeyRef(addPrefix), KeyRef(removePrefix)));
|
||||
restoreVersion = _restoreVersion;
|
||||
}
|
||||
else {
|
||||
state Reference<IBackupContainer> bc = IBackupContainer::openContainer(container);
|
||||
state BackupDescription description = wait(bc->describeBackup());
|
||||
state Optional<RestorableFileSet> rset = wait(bc->getRestoreSet(targetVersion));
|
||||
|
||||
if(dbVersion <= 0) {
|
||||
wait(description.resolveVersionTimes(db));
|
||||
if(description.maxRestorableVersion.present())
|
||||
restoreVersion = description.maxRestorableVersion.get();
|
||||
else {
|
||||
fprintf(stderr, "Backup is not restorable\n");
|
||||
throw restore_invalid_version();
|
||||
}
|
||||
}
|
||||
else
|
||||
restoreVersion = dbVersion;
|
||||
|
||||
state Optional<RestorableFileSet> rset = wait(bc->getRestoreSet(restoreVersion));
|
||||
if(!rset.present()) {
|
||||
fprintf(stderr, "Insufficient data to restore to version %lld\n", restoreVersion);
|
||||
fprintf(stderr, "Insufficient data to restore to version %lld. Describe backup for more information.\n", targetVersion);
|
||||
throw restore_invalid_version();
|
||||
}
|
||||
|
||||
// Display the restore information, if requested
|
||||
if (verbose) {
|
||||
printf("[DRY RUN] Restoring backup to version: %lld\n", (long long) restoreVersion);
|
||||
printf("%s\n", description.toString().c_str());
|
||||
}
|
||||
printf("Backup can be used to restore to version %lld\n", targetVersion);
|
||||
}
|
||||
|
||||
if(waitForDone && verbose) {
|
||||
// If restore completed then report version restored
|
||||
printf("Restored to version %lld%s\n", (long long) restoreVersion, (performRestore) ? "" : " (DRY RUN)");
|
||||
}
|
||||
}
|
||||
catch (Error& e) {
|
||||
if(e.code() == error_code_actor_cancelled)
|
||||
|
@ -1824,6 +1900,33 @@ Reference<IBackupContainer> openBackupContainer(const char *name, std::string de
|
|||
return c;
|
||||
}
|
||||
|
||||
ACTOR Future<Void> dumpBackupData(const char *name, std::string destinationContainer, Version beginVersion, Version endVersion) {
|
||||
state Reference<IBackupContainer> c = openBackupContainer(name, destinationContainer);
|
||||
|
||||
if(beginVersion < 0 || endVersion < 0) {
|
||||
BackupDescription desc = wait(c->describeBackup());
|
||||
|
||||
if(!desc.maxLogEnd.present()) {
|
||||
fprintf(stderr, "ERROR: Backup must have log data in order to use relative begin/end versions.\n");
|
||||
throw backup_invalid_info();
|
||||
}
|
||||
|
||||
if(beginVersion < 0) {
|
||||
beginVersion += desc.maxLogEnd.get();
|
||||
}
|
||||
|
||||
if(endVersion < 0) {
|
||||
endVersion += desc.maxLogEnd.get();
|
||||
}
|
||||
}
|
||||
|
||||
printf("Scanning version range %lld to %lld\n", beginVersion, endVersion);
|
||||
BackupFileList files = wait(c->dumpFileList(beginVersion, endVersion));
|
||||
files.toStream(stdout);
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> expireBackupData(const char *name, std::string destinationContainer, Version endVersion, std::string endDatetime, Database db, bool force, Version restorableAfterVersion, std::string restorableAfterDatetime) {
|
||||
if (!endDatetime.empty()) {
|
||||
Version v = wait( timeKeeperVersionFromDatetime(endDatetime, db) );
|
||||
|
@ -1843,8 +1946,35 @@ ACTOR Future<Void> expireBackupData(const char *name, std::string destinationCon
|
|||
|
||||
try {
|
||||
Reference<IBackupContainer> c = openBackupContainer(name, destinationContainer);
|
||||
wait(c->expireData(endVersion, force, restorableAfterVersion));
|
||||
printf("All data before version %lld is deleted.\n", endVersion);
|
||||
|
||||
state IBackupContainer::ExpireProgress progress;
|
||||
state std::string lastProgress;
|
||||
state Future<Void> expire = c->expireData(endVersion, force, &progress, restorableAfterVersion);
|
||||
|
||||
loop {
|
||||
choose {
|
||||
when(wait(delay(5))) {
|
||||
std::string p = progress.toString();
|
||||
if(p != lastProgress) {
|
||||
int spaces = lastProgress.size() - p.size();
|
||||
printf("\r%s%s", p.c_str(), (spaces > 0 ? std::string(spaces, ' ').c_str() : "") );
|
||||
lastProgress = p;
|
||||
}
|
||||
}
|
||||
when(wait(expire)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::string p = progress.toString();
|
||||
int spaces = lastProgress.size() - p.size();
|
||||
printf("\r%s%s\n", p.c_str(), (spaces > 0 ? std::string(spaces, ' ').c_str() : "") );
|
||||
|
||||
if(endVersion < 0)
|
||||
printf("All data before %lld versions (%lld days) prior to latest backup log has been deleted.\n", -endVersion, -endVersion / ((int64_t)24 * 3600 * CLIENT_KNOBS->CORE_VERSIONSPERSECOND));
|
||||
else
|
||||
printf("All data before version %lld has been deleted.\n", endVersion);
|
||||
}
|
||||
catch (Error& e) {
|
||||
if(e.code() == error_code_actor_cancelled)
|
||||
|
@ -1865,17 +1995,24 @@ ACTOR Future<Void> deleteBackupContainer(const char *name, std::string destinati
|
|||
state int numDeleted = 0;
|
||||
state Future<Void> done = c->deleteContainer(&numDeleted);
|
||||
|
||||
state int lastUpdate = -1;
|
||||
printf("Deleting %s...\n", destinationContainer.c_str());
|
||||
|
||||
loop {
|
||||
choose {
|
||||
when ( wait(done) ) {
|
||||
printf("The entire container has been deleted.\n");
|
||||
break;
|
||||
}
|
||||
when ( wait(delay(3)) ) {
|
||||
printf("%d files have been deleted so far...\n", numDeleted);
|
||||
when ( wait(delay(5)) ) {
|
||||
if(numDeleted != lastUpdate) {
|
||||
printf("\r%d...", numDeleted);
|
||||
lastUpdate = numDeleted;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
printf("\r%d objects deleted\n", numDeleted);
|
||||
printf("The entire container has been deleted.\n");
|
||||
}
|
||||
catch (Error& e) {
|
||||
if(e.code() == error_code_actor_cancelled)
|
||||
|
@ -2073,6 +2210,26 @@ static void addKeyRange(std::string optionValue, Standalone<VectorRef<KeyRangeRe
|
|||
return;
|
||||
}
|
||||
|
||||
Version parseVersion(const char *str) {
|
||||
StringRef s((const uint8_t *)str, strlen(str));
|
||||
|
||||
if(s.endsWith(LiteralStringRef("days")) || s.endsWith(LiteralStringRef("d"))) {
|
||||
float days;
|
||||
if(sscanf(str, "%f", &days) != 1) {
|
||||
fprintf(stderr, "Could not parse version: %s\n", str);
|
||||
flushAndExit(FDB_EXIT_ERROR);
|
||||
}
|
||||
return (double)CLIENT_KNOBS->CORE_VERSIONSPERSECOND * 24 * 3600 * -days;
|
||||
}
|
||||
|
||||
Version ver;
|
||||
if(sscanf(str, "%lld", &ver) != 1) {
|
||||
fprintf(stderr, "Could not parse version: %s\n", str);
|
||||
flushAndExit(FDB_EXIT_ERROR);
|
||||
}
|
||||
return ver;
|
||||
}
|
||||
|
||||
#ifdef ALLOC_INSTRUMENTATION
|
||||
extern uint8_t *g_extra_memory;
|
||||
#endif
|
||||
|
@ -2151,6 +2308,9 @@ int main(int argc, char* argv[]) {
|
|||
case BACKUP_DESCRIBE:
|
||||
args = new CSimpleOpt(argc - 1, &argv[1], g_rgBackupDescribeOptions, SO_O_EXACT);
|
||||
break;
|
||||
case BACKUP_DUMP:
|
||||
args = new CSimpleOpt(argc - 1, &argv[1], g_rgBackupDumpOptions, SO_O_EXACT);
|
||||
break;
|
||||
case BACKUP_LIST:
|
||||
args = new CSimpleOpt(argc - 1, &argv[1], g_rgBackupListOptions, SO_O_EXACT);
|
||||
break;
|
||||
|
@ -2288,10 +2448,12 @@ int main(int argc, char* argv[]) {
|
|||
uint64_t memLimit = 8LL << 30;
|
||||
Optional<uint64_t> ti;
|
||||
std::vector<std::string> blobCredentials;
|
||||
Version dumpBegin = 0;
|
||||
Version dumpEnd = std::numeric_limits<Version>::max();
|
||||
|
||||
if( argc == 1 ) {
|
||||
printUsage(programExe, false);
|
||||
return FDB_EXIT_ERROR;
|
||||
return FDB_EXIT_ERROR;
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
|
@ -2375,6 +2537,11 @@ int main(int argc, char* argv[]) {
|
|||
trace = true;
|
||||
traceDir = args->OptionArg();
|
||||
break;
|
||||
case OPT_TRACE_FORMAT:
|
||||
if (!selectTraceFormatter(args->OptionArg())) {
|
||||
fprintf(stderr, "WARNING: Unrecognized trace format `%s'\n", args->OptionArg());
|
||||
}
|
||||
break;
|
||||
case OPT_TRACE_LOG_GROUP:
|
||||
traceLogGroup = args->OptionArg();
|
||||
break;
|
||||
|
@ -2397,6 +2564,8 @@ int main(int argc, char* argv[]) {
|
|||
break;
|
||||
case OPT_EXPIRE_BEFORE_VERSION:
|
||||
case OPT_EXPIRE_RESTORABLE_AFTER_VERSION:
|
||||
case OPT_EXPIRE_MIN_RESTORABLE_DAYS:
|
||||
case OPT_EXPIRE_DELETE_BEFORE_DAYS:
|
||||
{
|
||||
const char* a = args->OptionArg();
|
||||
long long ver = 0;
|
||||
|
@ -2405,7 +2574,13 @@ int main(int argc, char* argv[]) {
|
|||
printHelpTeaser(argv[0]);
|
||||
return FDB_EXIT_ERROR;
|
||||
}
|
||||
if(optId == OPT_EXPIRE_BEFORE_VERSION)
|
||||
|
||||
// Interpret the value as days worth of versions relative to now (negative)
|
||||
if(optId == OPT_EXPIRE_MIN_RESTORABLE_DAYS || optId == OPT_EXPIRE_DELETE_BEFORE_DAYS) {
|
||||
ver = -ver * 24 * 60 * 60 * CLIENT_KNOBS->CORE_VERSIONSPERSECOND;
|
||||
}
|
||||
|
||||
if(optId == OPT_EXPIRE_BEFORE_VERSION || optId == OPT_EXPIRE_DELETE_BEFORE_DAYS)
|
||||
expireVersion = ver;
|
||||
else
|
||||
expireRestorableAfterVersion = ver;
|
||||
|
@ -2537,6 +2712,12 @@ int main(int argc, char* argv[]) {
|
|||
case OPT_BLOB_CREDENTIALS:
|
||||
blobCredentials.push_back(args->OptionArg());
|
||||
break;
|
||||
case OPT_DUMP_BEGIN:
|
||||
dumpBegin = parseVersion(args->OptionArg());
|
||||
break;
|
||||
case OPT_DUMP_END:
|
||||
dumpEnd = parseVersion(args->OptionArg());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2632,7 +2813,7 @@ int main(int argc, char* argv[]) {
|
|||
for(auto k=knobs.begin(); k!=knobs.end(); ++k) {
|
||||
try {
|
||||
if (!flowKnobs->setKnob( k->first, k->second ) &&
|
||||
!clientKnobs->setKnob( k->first, k->second ))
|
||||
!clientKnobs->setKnob( k->first, k->second ))
|
||||
{
|
||||
fprintf(stderr, "Unrecognized knob option '%s'\n", k->first.c_str());
|
||||
return FDB_EXIT_ERROR;
|
||||
|
@ -2700,7 +2881,7 @@ int main(int argc, char* argv[]) {
|
|||
.trackLatest("ProgramStart");
|
||||
|
||||
// Ordinarily, this is done when the network is run. However, network thread should be set before TraceEvents are logged. This thread will eventually run the network, so call it now.
|
||||
TraceEvent::setNetworkThread();
|
||||
TraceEvent::setNetworkThread();
|
||||
|
||||
// Add blob credentials files from the environment to the list collected from the command line.
|
||||
const char *blobCredsFromENV = getenv("FDB_BLOB_CREDENTIALS");
|
||||
|
@ -2852,11 +3033,17 @@ int main(int argc, char* argv[]) {
|
|||
// Only pass database optionDatabase Describe will lookup version timestamps if a cluster file was given, but quietly skip them if not.
|
||||
f = stopAfter( describeBackup(argv[0], destinationContainer, describeDeep, describeTimestamps ? Optional<Database>(db) : Optional<Database>()) );
|
||||
break;
|
||||
|
||||
case BACKUP_LIST:
|
||||
initTraceFile();
|
||||
f = stopAfter( listBackup(baseUrl) );
|
||||
break;
|
||||
|
||||
case BACKUP_DUMP:
|
||||
initTraceFile();
|
||||
f = stopAfter( dumpBackupData(argv[0], destinationContainer, dumpBegin, dumpEnd) );
|
||||
break;
|
||||
|
||||
case BACKUP_UNDEFINED:
|
||||
default:
|
||||
fprintf(stderr, "ERROR: Unsupported backup action %s\n", argv[1]);
|
||||
|
@ -2867,8 +3054,13 @@ int main(int argc, char* argv[]) {
|
|||
|
||||
break;
|
||||
case EXE_RESTORE:
|
||||
if(!initCluster())
|
||||
if(dryRun) {
|
||||
initTraceFile();
|
||||
}
|
||||
else if(!initCluster()) {
|
||||
return FDB_EXIT_ERROR;
|
||||
}
|
||||
|
||||
switch(restoreType) {
|
||||
case RESTORE_START:
|
||||
f = stopAfter( runRestore(db, tagName, restoreContainer, backupKeys, dbVersion, !dryRun, !quietDisplay, waitForDone, addPrefix, removePrefix) );
|
||||
|
@ -2883,7 +3075,7 @@ int main(int argc, char* argv[]) {
|
|||
}) );
|
||||
break;
|
||||
case RESTORE_STATUS:
|
||||
|
||||
|
||||
// If no tag is specifically provided then print all tag status, don't just use "default"
|
||||
if(tagProvided)
|
||||
tag = tagName;
|
||||
|
@ -3004,5 +3196,5 @@ int main(int argc, char* argv[]) {
|
|||
status = FDB_EXIT_MAIN_EXCEPTION;
|
||||
}
|
||||
|
||||
return status;
|
||||
flushAndExit(status);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
set(FDBCLI_SRCS
|
||||
fdbcli.actor.cpp
|
||||
FlowLineNoise.actor.cpp
|
||||
FlowLineNoise.h
|
||||
linenoise/linenoise.c
|
||||
linenoise/linenoise.h)
|
||||
|
||||
actor_set(FDBCLI_BUILD "${FDBCLI_SRCS}")
|
||||
add_executable(fdbcli "${FDBCLI_BUILD}")
|
||||
actor_compile(fdbcli "${FDBCLI_SRCS}")
|
||||
target_link_libraries(fdbcli PRIVATE fdbclient)
|
||||
|
||||
install(TARGETS fdbcli DESTINATION ${FDB_BIN_DIR} COMPONENT clients)
|
|
@ -57,7 +57,7 @@ extern const char* getHGVersion();
|
|||
|
||||
std::vector<std::string> validOptions;
|
||||
|
||||
enum { OPT_CONNFILE, OPT_DATABASE, OPT_HELP, OPT_TRACE, OPT_TRACE_DIR, OPT_TIMEOUT, OPT_EXEC, OPT_NO_STATUS, OPT_STATUS_FROM_JSON, OPT_VERSION };
|
||||
enum { OPT_CONNFILE, OPT_DATABASE, OPT_HELP, OPT_TRACE, OPT_TRACE_DIR, OPT_TIMEOUT, OPT_EXEC, OPT_NO_STATUS, OPT_STATUS_FROM_JSON, OPT_VERSION, OPT_TRACE_FORMAT };
|
||||
|
||||
CSimpleOpt::SOption g_rgOptions[] = {
|
||||
{ OPT_CONNFILE, "-C", SO_REQ_SEP },
|
||||
|
@ -74,6 +74,7 @@ CSimpleOpt::SOption g_rgOptions[] = {
|
|||
{ OPT_STATUS_FROM_JSON, "--status-from-json", SO_REQ_SEP },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
{ OPT_VERSION, "-v", SO_NONE },
|
||||
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
|
||||
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
|
@ -135,7 +136,7 @@ public:
|
|||
//Applies all enabled transaction options to the given transaction
|
||||
void apply(Reference<ReadYourWritesTransaction> tr) {
|
||||
for(auto itr = transactionOptions.options.begin(); itr != transactionOptions.options.end(); ++itr)
|
||||
tr->setOption(itr->first, itr->second.cast_to<StringRef>());
|
||||
tr->setOption(itr->first, itr->second.castTo<StringRef>());
|
||||
}
|
||||
|
||||
//Returns true if any options have been set
|
||||
|
@ -173,7 +174,7 @@ private:
|
|||
if(intrans)
|
||||
tr->setOption(option, arg);
|
||||
|
||||
transactionOptions.setOption(option, enabled, arg.cast_to<StringRef>());
|
||||
transactionOptions.setOption(option, enabled, arg.castTo<StringRef>());
|
||||
}
|
||||
|
||||
//A group of enabled options (of type T::Option) as well as a legal options map from string to T::Option
|
||||
|
@ -188,8 +189,8 @@ private:
|
|||
//Enable or disable an option. Returns true if option value changed
|
||||
bool setOption(typename T::Option option, bool enabled, Optional<StringRef> arg) {
|
||||
auto optionItr = options.find(option);
|
||||
if(enabled && (optionItr == options.end() || Optional<Standalone<StringRef>>(optionItr->second).cast_to< StringRef >() != arg)) {
|
||||
options[option] = arg.cast_to<Standalone<StringRef>>();
|
||||
if(enabled && (optionItr == options.end() || Optional<Standalone<StringRef>>(optionItr->second).castTo< StringRef >() != arg)) {
|
||||
options[option] = arg.castTo<Standalone<StringRef>>();
|
||||
return true;
|
||||
}
|
||||
else if(!enabled && optionItr != options.end()) {
|
||||
|
@ -401,6 +402,9 @@ static void printProgramUsage(const char* name) {
|
|||
" --log-dir PATH Specifes the output directory for trace files. If\n"
|
||||
" unspecified, defaults to the current directory. Has\n"
|
||||
" no effect unless --log is specified.\n"
|
||||
" --trace_format FORMAT\n"
|
||||
" Select the format of the log files. xml (the default) and json\n"
|
||||
" are supported. Has no effect unless --log is specified.\n"
|
||||
" --exec CMDS Immediately executes the semicolon separated CLI commands\n"
|
||||
" and then exits.\n"
|
||||
" --no-status Disables the initial status check done when starting\n"
|
||||
|
@ -2331,6 +2335,11 @@ struct CLIOptions {
|
|||
return 0;
|
||||
case OPT_STATUS_FROM_JSON:
|
||||
return printStatusFromJSON(args.OptionArg());
|
||||
case OPT_TRACE_FORMAT:
|
||||
if (!selectTraceFormatter(args.OptionArg())) {
|
||||
fprintf(stderr, "WARNING: Unrecognized trace format `%s'\n", args.OptionArg());
|
||||
}
|
||||
break;
|
||||
case OPT_VERSION:
|
||||
printVersion();
|
||||
return FDB_EXIT_SUCCESS;
|
||||
|
|
|
@ -276,7 +276,7 @@ public:
|
|||
|
||||
// stopWhenDone will return when the backup is stopped, if enabled. Otherwise, it
|
||||
// will return when the backup directory is restorable.
|
||||
Future<int> waitBackup(Database cx, std::string tagName, bool stopWhenDone = true);
|
||||
Future<int> waitBackup(Database cx, std::string tagName, bool stopWhenDone = true, Reference<IBackupContainer> *pContainer = nullptr, UID *pUID = nullptr);
|
||||
|
||||
static const Key keyLastRestorable;
|
||||
|
||||
|
@ -415,7 +415,7 @@ struct RCGroup {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & items & version & groupKey;
|
||||
serializer(ar, items, version, groupKey);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -47,6 +47,26 @@ Future<Void> IBackupFile::appendStringRefWithLen(Standalone<StringRef> s) {
|
|||
return IBackupFile_impl::appendStringRefWithLen(Reference<IBackupFile>::addRef(this), s);
|
||||
}
|
||||
|
||||
std::string IBackupContainer::ExpireProgress::toString() const {
|
||||
std::string s = step + "...";
|
||||
if(total > 0) {
|
||||
s += format("%d/%d (%.2f%%)", done, total, double(done) / total * 100);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
void BackupFileList::toStream(FILE *fout) const {
|
||||
for(const RangeFile &f : ranges) {
|
||||
fprintf(fout, "range %lld %s\n", f.fileSize, f.fileName.c_str());
|
||||
}
|
||||
for(const LogFile &f : logs) {
|
||||
fprintf(fout, "log %lld %s\n", f.fileSize, f.fileName.c_str());
|
||||
}
|
||||
for(const KeyspaceSnapshotFile &f : snapshots) {
|
||||
fprintf(fout, "snapshotManifest %lld %s\n", f.totalSize, f.fileName.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
std::string formatTime(int64_t t) {
|
||||
time_t curTime = (time_t)t;
|
||||
char buffer[128];
|
||||
|
@ -110,6 +130,10 @@ std::string BackupDescription::toString() const {
|
|||
else
|
||||
s = format("%lld (unknown)", v);
|
||||
}
|
||||
else if(maxLogEnd.present()) {
|
||||
double days = double(maxLogEnd.get() - v) / (CLIENT_KNOBS->CORE_VERSIONSPERSECOND * 24 * 60 * 60);
|
||||
s = format("%lld (maxLogEnd %s%.2f days)", v, days < 0 ? "+" : "-", days);
|
||||
}
|
||||
else {
|
||||
s = format("%lld", v);
|
||||
}
|
||||
|
@ -123,6 +147,10 @@ std::string BackupDescription::toString() const {
|
|||
|
||||
info.append(format("SnapshotBytes: %lld\n", snapshotBytes));
|
||||
|
||||
if(expiredEndVersion.present())
|
||||
info.append(format("ExpiredEndVersion: %s\n", formatVersion(expiredEndVersion.get()).c_str()));
|
||||
if(unreliableEndVersion.present())
|
||||
info.append(format("UnreliableEndVersion: %s\n", formatVersion(unreliableEndVersion.get()).c_str()));
|
||||
if(minLogBegin.present())
|
||||
info.append(format("MinLogBeginVersion: %s\n", formatVersion(minLogBegin.get()).c_str()));
|
||||
if(contiguousLogEnd.present())
|
||||
|
@ -184,6 +212,7 @@ public:
|
|||
|
||||
// Create the container
|
||||
virtual Future<Void> create() = 0;
|
||||
virtual Future<bool> exists() = 0;
|
||||
|
||||
// Get a list of fileNames and their sizes in the container under the given path
|
||||
// Although not required, an implementation can avoid traversing unwanted subfolders
|
||||
|
@ -332,14 +361,34 @@ public:
|
|||
throw restore_corrupted_data();
|
||||
|
||||
std::vector<RangeFile> results;
|
||||
int missing = 0;
|
||||
|
||||
for(auto const &fileValue : filesArray.get_array()) {
|
||||
if(fileValue.type() != json_spirit::str_type)
|
||||
throw restore_corrupted_data();
|
||||
auto i = rangeIndex.find(fileValue.get_str());
|
||||
if(i == rangeIndex.end())
|
||||
throw restore_corrupted_data();
|
||||
|
||||
results.push_back(i->second);
|
||||
// If the file is not in the index then log the error but don't throw yet, keep checking the whole list.
|
||||
auto i = rangeIndex.find(fileValue.get_str());
|
||||
if(i == rangeIndex.end()) {
|
||||
TraceEvent(SevError, "FileRestoreMissingRangeFile")
|
||||
.detail("URL", bc->getURL())
|
||||
.detail("File", fileValue.get_str());
|
||||
|
||||
++missing;
|
||||
}
|
||||
|
||||
// No point in using more memory once data is missing since an error will be thrown instead.
|
||||
if(missing == 0) {
|
||||
results.push_back(i->second);
|
||||
}
|
||||
}
|
||||
|
||||
if(missing > 0) {
|
||||
TraceEvent(SevError, "FileRestoreMissingRangeFileSummary")
|
||||
.detail("URL", bc->getURL())
|
||||
.detail("Count", missing);
|
||||
|
||||
throw restore_missing_data();
|
||||
}
|
||||
|
||||
return results;
|
||||
|
@ -396,8 +445,7 @@ public:
|
|||
return writeKeyspaceSnapshotFile_impl(Reference<BackupContainerFileSystem>::addRef(this), fileNames, totalBytes);
|
||||
};
|
||||
|
||||
// List log files which contain data at any version >= beginVersion and <= targetVersion
|
||||
// Lists files in sorted order by begin version. Does not check that results are non overlapping or contiguous.
|
||||
// List log files, unsorted, which contain data at any version >= beginVersion and <= targetVersion
|
||||
Future<std::vector<LogFile>> listLogFiles(Version beginVersion = 0, Version targetVersion = std::numeric_limits<Version>::max()) {
|
||||
// The first relevant log file could have a begin version less than beginVersion based on the knobs which determine log file range size,
|
||||
// so start at an earlier version adjusted by how many versions a file could contain.
|
||||
|
@ -423,12 +471,11 @@ public:
|
|||
if(pathToLogFile(lf, f.first, f.second) && lf.endVersion > beginVersion && lf.beginVersion <= targetVersion)
|
||||
results.push_back(lf);
|
||||
}
|
||||
std::sort(results.begin(), results.end());
|
||||
return results;
|
||||
});
|
||||
}
|
||||
|
||||
// List range files which contain data at or between beginVersion and endVersion
|
||||
// List range files, unsorted, which contain data at or between beginVersion and endVersion
|
||||
// NOTE: This reads the range file folder schema from FDB 6.0.15 and earlier and is provided for backward compatibility
|
||||
Future<std::vector<RangeFile>> old_listRangeFiles(Version beginVersion, Version endVersion) {
|
||||
// Get the cleaned (without slashes) first and last folders that could contain relevant results.
|
||||
|
@ -454,7 +501,7 @@ public:
|
|||
});
|
||||
}
|
||||
|
||||
// List range files, sorted in version order, which contain data at or between beginVersion and endVersion
|
||||
// List range files, unsorted, which contain data at or between beginVersion and endVersion
|
||||
// Note: The contents of each top level snapshot.N folder do not necessarily constitute a valid snapshot
|
||||
// and therefore listing files is not how RestoreSets are obtained.
|
||||
// Note: Snapshots partially written using FDB versions prior to 6.0.16 will have some range files stored
|
||||
|
@ -483,18 +530,17 @@ public:
|
|||
std::vector<RangeFile> results = std::move(newFiles.get());
|
||||
std::vector<RangeFile> oldResults = std::move(oldFiles.get());
|
||||
results.insert(results.end(), std::make_move_iterator(oldResults.begin()), std::make_move_iterator(oldResults.end()));
|
||||
std::sort(results.begin(), results.end());
|
||||
return results;
|
||||
});
|
||||
}
|
||||
|
||||
// List snapshots which have been fully written, in sorted beginVersion order.
|
||||
Future<std::vector<KeyspaceSnapshotFile>> listKeyspaceSnapshots() {
|
||||
// List snapshots which have been fully written, in sorted beginVersion order, which start before end and finish on or after begin
|
||||
Future<std::vector<KeyspaceSnapshotFile>> listKeyspaceSnapshots(Version begin = 0, Version end = std::numeric_limits<Version>::max()) {
|
||||
return map(listFiles("snapshots/"), [=](const FilesAndSizesT &files) {
|
||||
std::vector<KeyspaceSnapshotFile> results;
|
||||
KeyspaceSnapshotFile sf;
|
||||
for(auto &f : files) {
|
||||
if(pathToKeyspaceSnapshotFile(sf, f.first))
|
||||
if(pathToKeyspaceSnapshotFile(sf, f.first) && sf.beginVersion < end && sf.endVersion >= begin)
|
||||
results.push_back(sf);
|
||||
}
|
||||
std::sort(results.begin(), results.end());
|
||||
|
@ -502,50 +548,144 @@ public:
|
|||
});
|
||||
}
|
||||
|
||||
ACTOR static Future<FullBackupListing> dumpFileList_impl(Reference<BackupContainerFileSystem> bc) {
|
||||
state Future<std::vector<RangeFile>> fRanges = bc->listRangeFiles(0, std::numeric_limits<Version>::max());
|
||||
state Future<std::vector<KeyspaceSnapshotFile>> fSnapshots = bc->listKeyspaceSnapshots();
|
||||
state Future<std::vector<LogFile>> fLogs = bc->listLogFiles(0, std::numeric_limits<Version>::max());
|
||||
ACTOR static Future<BackupFileList> dumpFileList_impl(Reference<BackupContainerFileSystem> bc, Version begin, Version end) {
|
||||
state Future<std::vector<RangeFile>> fRanges = bc->listRangeFiles(begin, end);
|
||||
state Future<std::vector<KeyspaceSnapshotFile>> fSnapshots = bc->listKeyspaceSnapshots(begin, end);
|
||||
state Future<std::vector<LogFile>> fLogs = bc->listLogFiles(begin, end);
|
||||
|
||||
wait(success(fRanges) && success(fSnapshots) && success(fLogs));
|
||||
return FullBackupListing({fRanges.get(), fLogs.get(), fSnapshots.get()});
|
||||
|
||||
return BackupFileList({fRanges.get(), fLogs.get(), fSnapshots.get()});
|
||||
}
|
||||
|
||||
Future<FullBackupListing> dumpFileList() {
|
||||
return dumpFileList_impl(Reference<BackupContainerFileSystem>::addRef(this));
|
||||
Future<BackupFileList> dumpFileList(Version begin, Version end) {
|
||||
return dumpFileList_impl(Reference<BackupContainerFileSystem>::addRef(this), begin, end);
|
||||
}
|
||||
|
||||
ACTOR static Future<BackupDescription> describeBackup_impl(Reference<BackupContainerFileSystem> bc, bool deepScan) {
|
||||
static Version resolveRelativeVersion(Optional<Version> max, Version v, const char *name, Error e) {
|
||||
if(v == invalidVersion) {
|
||||
TraceEvent(SevError, "BackupExpireInvalidVersion").detail(name, v);
|
||||
throw e;
|
||||
}
|
||||
if(v < 0) {
|
||||
if(!max.present()) {
|
||||
TraceEvent(SevError, "BackupExpireCannotResolveRelativeVersion").detail(name, v);
|
||||
throw e;
|
||||
}
|
||||
v += max.get();
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
ACTOR static Future<BackupDescription> describeBackup_impl(Reference<BackupContainerFileSystem> bc, bool deepScan, Version logStartVersionOverride) {
|
||||
state BackupDescription desc;
|
||||
desc.url = bc->getURL();
|
||||
|
||||
// This is the range of logs we'll have to list to determine log continuity
|
||||
state Version scanBegin = 0;
|
||||
state Version scanEnd = std::numeric_limits<Version>::max();
|
||||
TraceEvent("BackupContainerDescribe1")
|
||||
.detail("URL", bc->getURL())
|
||||
.detail("LogStartVersionOverride", logStartVersionOverride);
|
||||
|
||||
// Get range for which we know there are logs, if available
|
||||
state Optional<Version> begin;
|
||||
state Optional<Version> end;
|
||||
|
||||
if(!deepScan) {
|
||||
wait(store(bc->logBeginVersion().get(), begin) && store(bc->logEndVersion().get(), end));
|
||||
bool e = wait(bc->exists());
|
||||
if(!e) {
|
||||
TraceEvent(SevWarnAlways, "BackupContainerDoesNotExist").detail("URL", bc->getURL());
|
||||
throw backup_does_not_exist();
|
||||
}
|
||||
|
||||
// If logStartVersion is relative, then first do a recursive call without it to find the max log version
|
||||
// from which to resolve the relative version.
|
||||
// This could be handled more efficiently without recursion but it's tricky, this will do for now.
|
||||
if(logStartVersionOverride != invalidVersion && logStartVersionOverride < 0) {
|
||||
BackupDescription tmp = wait(bc->describeBackup(false, invalidVersion));
|
||||
logStartVersionOverride = resolveRelativeVersion(tmp.maxLogEnd, logStartVersionOverride, "LogStartVersionOverride", invalid_option_value());
|
||||
}
|
||||
|
||||
// Get metadata versions
|
||||
state Optional<Version> metaLogBegin;
|
||||
state Optional<Version> metaLogEnd;
|
||||
state Optional<Version> metaExpiredEnd;
|
||||
state Optional<Version> metaUnreliableEnd;
|
||||
|
||||
std::vector<Future<Void>> metaReads;
|
||||
metaReads.push_back(store(bc->expiredEndVersion().get(), metaExpiredEnd));
|
||||
metaReads.push_back(store(bc->unreliableEndVersion().get(), metaUnreliableEnd));
|
||||
|
||||
// Only read log begin/end versions if not doing a deep scan, otherwise scan files and recalculate them.
|
||||
if(!deepScan) {
|
||||
metaReads.push_back(store(bc->logBeginVersion().get(), metaLogBegin));
|
||||
metaReads.push_back(store(bc->logEndVersion().get(), metaLogEnd));
|
||||
}
|
||||
|
||||
wait(waitForAll(metaReads));
|
||||
|
||||
TraceEvent("BackupContainerDescribe2")
|
||||
.detail("URL", bc->getURL())
|
||||
.detail("LogStartVersionOverride", logStartVersionOverride)
|
||||
.detail("ExpiredEndVersion", metaExpiredEnd.orDefault(invalidVersion))
|
||||
.detail("UnreliableEndVersion", metaUnreliableEnd.orDefault(invalidVersion))
|
||||
.detail("LogBeginVersion", metaLogBegin.orDefault(invalidVersion))
|
||||
.detail("LogEndVersion", metaLogEnd.orDefault(invalidVersion));
|
||||
|
||||
// If the logStartVersionOverride is positive (not relative) then ensure that unreliableEndVersion is equal or greater
|
||||
if(logStartVersionOverride != invalidVersion && metaUnreliableEnd.orDefault(invalidVersion) < logStartVersionOverride) {
|
||||
metaUnreliableEnd = logStartVersionOverride;
|
||||
}
|
||||
|
||||
// Don't use metaLogBegin or metaLogEnd if any of the following are true, the safest
|
||||
// thing to do is rescan to verify log continuity and get exact begin/end versions
|
||||
// - either are missing
|
||||
// - metaLogEnd <= metaLogBegin (invalid range)
|
||||
// - metaLogEnd < metaExpiredEnd (log continuity exists in missing data range)
|
||||
// - metaLogEnd < metaUnreliableEnd (log continuity exists in incomplete data range)
|
||||
if(!metaLogBegin.present() || !metaLogEnd.present()
|
||||
|| metaLogEnd.get() <= metaLogBegin.get()
|
||||
|| metaLogEnd.get() < metaExpiredEnd.orDefault(invalidVersion)
|
||||
|| metaLogEnd.get() < metaUnreliableEnd.orDefault(invalidVersion)
|
||||
) {
|
||||
TraceEvent(SevWarnAlways, "BackupContainerMetadataInvalid")
|
||||
.detail("URL", bc->getURL())
|
||||
.detail("ExpiredEndVersion", metaExpiredEnd.orDefault(invalidVersion))
|
||||
.detail("UnreliableEndVersion", metaUnreliableEnd.orDefault(invalidVersion))
|
||||
.detail("LogBeginVersion", metaLogBegin.orDefault(invalidVersion))
|
||||
.detail("LogEndVersion", metaLogEnd.orDefault(invalidVersion));
|
||||
|
||||
metaLogBegin = Optional<Version>();
|
||||
metaLogEnd = Optional<Version>();
|
||||
}
|
||||
|
||||
// If the unreliable end version is not set or is < expiredEndVersion then increase it to expiredEndVersion.
|
||||
// Describe does not update unreliableEnd in the backup metadata for safety reasons as there is no
|
||||
// compare-and-set operation to atomically change it and an expire process could be advancing it simultaneously.
|
||||
if(!metaUnreliableEnd.present() || metaUnreliableEnd.get() < metaExpiredEnd.orDefault(0))
|
||||
metaUnreliableEnd = metaExpiredEnd;
|
||||
|
||||
desc.unreliableEndVersion = metaUnreliableEnd;
|
||||
desc.expiredEndVersion = metaExpiredEnd;
|
||||
|
||||
// Start scanning at the end of the unreliable version range, which is the version before which data is likely
|
||||
// missing because an expire process has operated on that range.
|
||||
state Version scanBegin = desc.unreliableEndVersion.orDefault(0);
|
||||
state Version scanEnd = std::numeric_limits<Version>::max();
|
||||
|
||||
// Use the known log range if present
|
||||
if(begin.present() && end.present()) {
|
||||
// Logs are assumed to be contiguious between begin and max(begin, end), so initalize desc accordingly
|
||||
// The use of max() is to allow for a stale end version that has been exceeded by begin version
|
||||
desc.minLogBegin = begin.get();
|
||||
desc.maxLogEnd = std::max(begin.get(), end.get());
|
||||
// Logs are assumed to be contiguious between metaLogBegin and metaLogEnd, so initalize desc accordingly
|
||||
if(metaLogBegin.present() && metaLogEnd.present()) {
|
||||
// minLogBegin is the greater of the log begin metadata OR the unreliable end version since we can't count
|
||||
// on log file presence before that version.
|
||||
desc.minLogBegin = std::max(metaLogBegin.get(), desc.unreliableEndVersion.orDefault(0));
|
||||
|
||||
// Set the maximum known end version of a log file, so far, which is also the assumed contiguous log file end version
|
||||
desc.maxLogEnd = metaLogEnd.get();
|
||||
desc.contiguousLogEnd = desc.maxLogEnd;
|
||||
|
||||
// Begin file scan at the contiguous log end version
|
||||
// Advance scanBegin to the contiguous log end version
|
||||
scanBegin = desc.contiguousLogEnd.get();
|
||||
}
|
||||
|
||||
std::vector<KeyspaceSnapshotFile> snapshots = wait(bc->listKeyspaceSnapshots());
|
||||
desc.snapshots = snapshots;
|
||||
state std::vector<LogFile> logs;
|
||||
wait(store(bc->listLogFiles(scanBegin, scanEnd), logs) && store(bc->listKeyspaceSnapshots(), desc.snapshots));
|
||||
|
||||
std::vector<LogFile> logs = wait(bc->listLogFiles(scanBegin, scanEnd));
|
||||
// List logs in version order so log continuity can be analyzed
|
||||
std::sort(logs.begin(), logs.end());
|
||||
|
||||
if(!logs.empty()) {
|
||||
desc.maxLogEnd = logs.rbegin()->endVersion;
|
||||
|
@ -570,20 +710,32 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
// Try to update the saved log versions if they are not set and we have values for them,
|
||||
// but ignore errors in the update attempt in case the container is not writeable
|
||||
// Also update logEndVersion if it has a value but it is less than contiguousLogEnd
|
||||
try {
|
||||
state Future<Void> updates = Void();
|
||||
if(desc.minLogBegin.present() && !begin.present())
|
||||
updates = updates && bc->logBeginVersion().set(desc.minLogBegin.get());
|
||||
if(desc.contiguousLogEnd.present() && (!end.present() || end.get() < desc.contiguousLogEnd.get()) )
|
||||
updates = updates && bc->logEndVersion().set(desc.contiguousLogEnd.get());
|
||||
wait(updates);
|
||||
} catch(Error &e) {
|
||||
if(e.code() == error_code_actor_cancelled)
|
||||
throw;
|
||||
TraceEvent(SevWarn, "BackupContainerSafeVersionUpdateFailure").detail("URL", bc->getURL());
|
||||
// Only update stored contiguous log begin and end versions if we did NOT use a log start override.
|
||||
// Otherwise, a series of describe operations can result in a version range which is actually missing data.
|
||||
if(logStartVersionOverride == invalidVersion) {
|
||||
// If the log metadata begin/end versions are missing (or treated as missing due to invalidity) or
|
||||
// differ from the newly calculated values for minLogBegin and contiguousLogEnd, respectively,
|
||||
// then attempt to update the metadata in the backup container but ignore errors in case the
|
||||
// container is not writeable.
|
||||
try {
|
||||
state Future<Void> updates = Void();
|
||||
|
||||
if(desc.minLogBegin.present() && metaLogBegin != desc.minLogBegin) {
|
||||
updates = updates && bc->logBeginVersion().set(desc.minLogBegin.get());
|
||||
}
|
||||
|
||||
if(desc.contiguousLogEnd.present() && metaLogEnd != desc.contiguousLogEnd) {
|
||||
updates = updates && bc->logEndVersion().set(desc.contiguousLogEnd.get());
|
||||
}
|
||||
|
||||
wait(updates);
|
||||
} catch(Error &e) {
|
||||
if(e.code() == error_code_actor_cancelled)
|
||||
throw;
|
||||
TraceEvent(SevWarn, "BackupContainerMetadataUpdateFailure")
|
||||
.error(e)
|
||||
.detail("URL", bc->getURL());
|
||||
}
|
||||
}
|
||||
|
||||
for(auto &s : desc.snapshots) {
|
||||
|
@ -623,18 +775,37 @@ public:
|
|||
}
|
||||
|
||||
// Uses the virtual methods to describe the backup contents
|
||||
Future<BackupDescription> describeBackup(bool deepScan = false) {
|
||||
return describeBackup_impl(Reference<BackupContainerFileSystem>::addRef(this), deepScan);
|
||||
Future<BackupDescription> describeBackup(bool deepScan, Version logStartVersionOverride) {
|
||||
return describeBackup_impl(Reference<BackupContainerFileSystem>::addRef(this), deepScan, logStartVersionOverride);
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> expireData_impl(Reference<BackupContainerFileSystem> bc, Version expireEndVersion, bool force, Version restorableBeginVersion) {
|
||||
ACTOR static Future<Void> expireData_impl(Reference<BackupContainerFileSystem> bc, Version expireEndVersion, bool force, ExpireProgress *progress, Version restorableBeginVersion) {
|
||||
if(progress != nullptr) {
|
||||
progress->step = "Describing backup";
|
||||
progress->total = 0;
|
||||
}
|
||||
|
||||
TraceEvent("BackupContainerFileSystemExpire1")
|
||||
.detail("URL", bc->getURL())
|
||||
.detail("ExpireEndVersion", expireEndVersion)
|
||||
.detail("RestorableBeginVersion", restorableBeginVersion);
|
||||
|
||||
// Get the backup description.
|
||||
state BackupDescription desc = wait(bc->describeBackup(false, expireEndVersion));
|
||||
|
||||
// Resolve relative versions using max log version
|
||||
expireEndVersion = resolveRelativeVersion(desc.maxLogEnd, expireEndVersion, "ExpireEndVersion", invalid_option_value());
|
||||
restorableBeginVersion = resolveRelativeVersion(desc.maxLogEnd, restorableBeginVersion, "RestorableBeginVersion", invalid_option_value());
|
||||
|
||||
// It would be impossible to have restorability to any version < expireEndVersion after expiring to that version
|
||||
if(restorableBeginVersion < expireEndVersion)
|
||||
throw backup_cannot_expire();
|
||||
|
||||
state Version scanBegin = 0;
|
||||
|
||||
// Get the backup description.
|
||||
state BackupDescription desc = wait(bc->describeBackup());
|
||||
// If the expire request is to a version at or before the previous version to which data was already deleted
|
||||
// then do nothing and just return
|
||||
if(expireEndVersion <= desc.expiredEndVersion.orDefault(invalidVersion)) {
|
||||
return Void();
|
||||
}
|
||||
|
||||
// Assume force is needed, then try to prove otherwise.
|
||||
// Force is required if there is not a restorable snapshot which both
|
||||
|
@ -648,47 +819,50 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
// Get metadata
|
||||
state Optional<Version> expiredEnd;
|
||||
state Optional<Version> logBegin;
|
||||
state Optional<Version> logEnd;
|
||||
wait(store(bc->expiredEndVersion().get(), expiredEnd) && store(bc->logBeginVersion().get(), logBegin) && store(bc->logEndVersion().get(), logEnd));
|
||||
// If force is needed but not passed then refuse to expire anything.
|
||||
// Note that it is possible for there to be no actual files in the backup prior to expireEndVersion,
|
||||
// if they were externally deleted or an expire operation deleted them but was terminated before
|
||||
// updating expireEndVersion
|
||||
if(forceNeeded && !force)
|
||||
throw backup_cannot_expire();
|
||||
|
||||
// Update scan range if expiredEnd is present
|
||||
if(expiredEnd.present()) {
|
||||
if(expireEndVersion <= expiredEnd.get()) {
|
||||
// If the expire request is to the version already expired to then there is no work to do so return true
|
||||
return Void();
|
||||
}
|
||||
scanBegin = expiredEnd.get();
|
||||
}
|
||||
// Start scan for files to delete at the last completed expire operation's end or 0.
|
||||
state Version scanBegin = desc.expiredEndVersion.orDefault(0);
|
||||
|
||||
TraceEvent("BackupContainerFileSystem")
|
||||
TraceEvent("BackupContainerFileSystemExpire2")
|
||||
.detail("URL", bc->getURL())
|
||||
.detail("ExpireEndVersion", expireEndVersion)
|
||||
.detail("ScanBeginVersion", scanBegin)
|
||||
.detail("CachedLogBegin", logBegin.orDefault(-1))
|
||||
.detail("CachedLogEnd", logEnd.orDefault(-1))
|
||||
.detail("CachedExpiredEnd", expiredEnd.orDefault(-1));
|
||||
.detail("RestorableBeginVersion", restorableBeginVersion)
|
||||
.detail("ScanBeginVersion", scanBegin);
|
||||
|
||||
// Get log files that contain any data at or before expireEndVersion
|
||||
state std::vector<LogFile> logs = wait(bc->listLogFiles(scanBegin, expireEndVersion - 1));
|
||||
// Get range files up to and including expireEndVersion
|
||||
state std::vector<RangeFile> ranges = wait(bc->listRangeFiles(scanBegin, expireEndVersion - 1));
|
||||
state std::vector<LogFile> logs;
|
||||
state std::vector<RangeFile> ranges;
|
||||
|
||||
if(progress != nullptr) {
|
||||
progress->step = "Listing files";
|
||||
}
|
||||
// Get log files or range files that contain any data at or before expireEndVersion
|
||||
wait(store(bc->listLogFiles(scanBegin, expireEndVersion - 1), logs) && store(bc->listRangeFiles(scanBegin, expireEndVersion - 1), ranges));
|
||||
|
||||
// The new logBeginVersion will be taken from the last log file, if there is one
|
||||
state Optional<Version> newLogBeginVersion;
|
||||
if(!logs.empty()) {
|
||||
LogFile &last = logs.back();
|
||||
// Linear scan the unsorted logs to find the latest one in sorted order
|
||||
LogFile &last = *std::max_element(logs.begin(), logs.end());
|
||||
|
||||
// If the last log ends at expireEndVersion then that will be the next log begin
|
||||
if(last.endVersion == expireEndVersion) {
|
||||
newLogBeginVersion = expireEndVersion;
|
||||
}
|
||||
else {
|
||||
// If the last log overlaps the expiredEnd then use the log's begin version and move the expiredEnd
|
||||
// back to match it.
|
||||
// back to match it and keep the last log file
|
||||
if(last.endVersion > expireEndVersion) {
|
||||
newLogBeginVersion = last.beginVersion;
|
||||
logs.pop_back();
|
||||
|
||||
// Instead of modifying this potentially very large vector, just clear LogFile
|
||||
last = LogFile();
|
||||
|
||||
expireEndVersion = newLogBeginVersion.get();
|
||||
}
|
||||
}
|
||||
|
@ -699,7 +873,10 @@ public:
|
|||
|
||||
// Move filenames out of vector then destroy it to save memory
|
||||
for(auto const &f : logs) {
|
||||
toDelete.push_back(std::move(f.fileName));
|
||||
// We may have cleared the last log file earlier so skip any empty filenames
|
||||
if(!f.fileName.empty()) {
|
||||
toDelete.push_back(std::move(f.fileName));
|
||||
}
|
||||
}
|
||||
logs.clear();
|
||||
|
||||
|
@ -720,37 +897,21 @@ public:
|
|||
}
|
||||
desc = BackupDescription();
|
||||
|
||||
// If some files to delete were found AND force is needed AND the force option is NOT set, then fail
|
||||
if(!toDelete.empty() && forceNeeded && !force)
|
||||
throw backup_cannot_expire();
|
||||
|
||||
// We are about to start deleting files, at which point no data prior to the expire end version can be
|
||||
// safely assumed to exist. The [logBegin, logEnd) range from the container's metadata describes
|
||||
// a range of log versions which can be assumed to exist, so if the range of data being deleted overlaps
|
||||
// that range then the metadata range must be updated.
|
||||
|
||||
// If we're expiring the entire log range described by the metadata then clear both metadata values
|
||||
if(logEnd.present() && logEnd.get() < expireEndVersion) {
|
||||
if(logBegin.present())
|
||||
wait(bc->logBeginVersion().clear());
|
||||
if(logEnd.present())
|
||||
wait(bc->logEndVersion().clear());
|
||||
// We are about to start deleting files, at which point all data prior to expireEndVersion is considered
|
||||
// 'unreliable' as some or all of it will be missing. So before deleting anything, read unreliableEndVersion
|
||||
// (don't use cached value in desc) and update its value if it is missing or < expireEndVersion
|
||||
if(progress != nullptr) {
|
||||
progress->step = "Initial metadata update";
|
||||
}
|
||||
else {
|
||||
// If we are expiring to a point within the metadata range then update the begin if we have a new
|
||||
// log begin version (which we should!) or clear the metadata range if we do not (which would be
|
||||
// repairing the metadata from an incorrect state)
|
||||
if(logBegin.present() && logBegin.get() < expireEndVersion) {
|
||||
if(newLogBeginVersion.present()) {
|
||||
wait(bc->logBeginVersion().set(newLogBeginVersion.get()));
|
||||
}
|
||||
else {
|
||||
if(logBegin.present())
|
||||
wait(bc->logBeginVersion().clear());
|
||||
if(logEnd.present())
|
||||
wait(bc->logEndVersion().clear());
|
||||
}
|
||||
}
|
||||
Optional<Version> metaUnreliableEnd = wait(bc->unreliableEndVersion().get());
|
||||
if(metaUnreliableEnd.orDefault(0) < expireEndVersion) {
|
||||
wait(bc->unreliableEndVersion().set(expireEndVersion));
|
||||
}
|
||||
|
||||
if(progress != nullptr) {
|
||||
progress->step = "Deleting files";
|
||||
progress->total = toDelete.size();
|
||||
progress->done = 0;
|
||||
}
|
||||
|
||||
// Delete files, but limit parallelism because the file list could use a lot of memory and the corresponding
|
||||
|
@ -772,19 +933,30 @@ public:
|
|||
|
||||
while(deleteFutures.size() > targetFuturesSize) {
|
||||
wait(deleteFutures.front());
|
||||
if(progress != nullptr) {
|
||||
++progress->done;
|
||||
}
|
||||
deleteFutures.pop_front();
|
||||
}
|
||||
}
|
||||
|
||||
// Update the expiredEndVersion property.
|
||||
wait(bc->expiredEndVersion().set(expireEndVersion));
|
||||
if(progress != nullptr) {
|
||||
progress->step = "Final metadata update";
|
||||
progress->total = 0;
|
||||
}
|
||||
// Update the expiredEndVersion metadata to indicate that everything prior to that version has been
|
||||
// successfully deleted if the current version is lower or missing
|
||||
Optional<Version> metaExpiredEnd = wait(bc->expiredEndVersion().get());
|
||||
if(metaExpiredEnd.orDefault(0) < expireEndVersion) {
|
||||
wait(bc->expiredEndVersion().set(expireEndVersion));
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
// Delete all data up to (but not including endVersion)
|
||||
Future<Void> expireData(Version expireEndVersion, bool force, Version restorableBeginVersion) {
|
||||
return expireData_impl(Reference<BackupContainerFileSystem>::addRef(this), expireEndVersion, force, restorableBeginVersion);
|
||||
Future<Void> expireData(Version expireEndVersion, bool force, ExpireProgress *progress, Version restorableBeginVersion) {
|
||||
return expireData_impl(Reference<BackupContainerFileSystem>::addRef(this), expireEndVersion, force, progress, restorableBeginVersion);
|
||||
}
|
||||
|
||||
ACTOR static Future<Optional<RestorableFileSet>> getRestoreSet_impl(Reference<BackupContainerFileSystem> bc, Version targetVersion) {
|
||||
|
@ -808,7 +980,10 @@ public:
|
|||
if(snapshot.get().beginVersion == snapshot.get().endVersion && snapshot.get().endVersion == targetVersion)
|
||||
return Optional<RestorableFileSet>(restorable);
|
||||
|
||||
std::vector<LogFile> logs = wait(bc->listLogFiles(snapshot.get().beginVersion, targetVersion));
|
||||
state std::vector<LogFile> logs = wait(bc->listLogFiles(snapshot.get().beginVersion, targetVersion));
|
||||
|
||||
// List logs in version order so log continuity can be analyzed
|
||||
std::sort(logs.begin(), logs.end());
|
||||
|
||||
// If there are logs and the first one starts at or before the snapshot begin version then proceed
|
||||
if(!logs.empty() && logs.front().beginVersion <= snapshot.get().beginVersion) {
|
||||
|
@ -858,18 +1033,19 @@ private:
|
|||
|
||||
public:
|
||||
// To avoid the need to scan the underyling filesystem in many cases, some important version boundaries are stored in named files.
|
||||
// These files can be deleted from the filesystem if they appear to be wrong or corrupt, and full scans will done
|
||||
// when needed.
|
||||
// These versions also indicate what version ranges are known to be deleted or partially deleted.
|
||||
//
|
||||
// The three versions below, when present, describe 4 version ranges which collectively cover the entire version timeline.
|
||||
// 0 - expiredEndVersion: All files in this range have been deleted
|
||||
// expiredEndVersion - presentBeginVersion: Files in this range *may* have been deleted so their presence must not be assumed.
|
||||
// presentBeginVersion - presentEndVersion: Files in this range have NOT been deleted by any FDB backup operations.
|
||||
// presentEndVersion - infinity: Files in this range may or may not exist yet. Scan to find what is there.
|
||||
// The values below describe version ranges as follows:
|
||||
// 0 - expiredEndVersion All files in this range have been deleted
|
||||
// expiredEndVersion - unreliableEndVersion Some files in this range may have been deleted.
|
||||
//
|
||||
// logBeginVersion - logEnd Log files are contiguous in this range and have NOT been deleted by fdbbackup
|
||||
// logEnd - infinity Files in this range may or may not exist yet
|
||||
//
|
||||
VersionProperty logBeginVersion() { return {Reference<BackupContainerFileSystem>::addRef(this), "log_begin_version"}; }
|
||||
VersionProperty logEndVersion() { return {Reference<BackupContainerFileSystem>::addRef(this), "log_end_version"}; }
|
||||
VersionProperty expiredEndVersion() { return {Reference<BackupContainerFileSystem>::addRef(this), "expired_end_version"}; }
|
||||
VersionProperty unreliableEndVersion() { return {Reference<BackupContainerFileSystem>::addRef(this), "unreliable_end_version"}; }
|
||||
|
||||
ACTOR static Future<Void> writeVersionProperty(Reference<BackupContainerFileSystem> bc, std::string path, Version v) {
|
||||
try {
|
||||
|
@ -879,7 +1055,10 @@ public:
|
|||
wait(f->finish());
|
||||
return Void();
|
||||
} catch(Error &e) {
|
||||
TraceEvent(SevWarn, "BackupContainerWritePropertyFailed").error(e).detail("Path", path);
|
||||
TraceEvent(SevWarn, "BackupContainerWritePropertyFailed")
|
||||
.error(e)
|
||||
.detail("URL", bc->getURL())
|
||||
.detail("Path", path);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
@ -896,12 +1075,20 @@ public:
|
|||
if(rs == size && sscanf(s.c_str(), "%lld%n", &v, &len) == 1 && len == size)
|
||||
return v;
|
||||
|
||||
TraceEvent(SevWarn, "BackupContainerInvalidProperty");
|
||||
TraceEvent(SevWarn, "BackupContainerInvalidProperty")
|
||||
.detail("URL", bc->getURL())
|
||||
.detail("Path", path);
|
||||
|
||||
throw backup_invalid_info();
|
||||
} catch(Error &e) {
|
||||
if(e.code() == error_code_file_not_found)
|
||||
return Optional<Version>();
|
||||
TraceEvent(SevWarn, "BackupContainerReadPropertyFailed").error(e).detail("Path", path);
|
||||
|
||||
TraceEvent(SevWarn, "BackupContainerReadPropertyFailed")
|
||||
.error(e)
|
||||
.detail("URL", bc->getURL())
|
||||
.detail("Path", path);
|
||||
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
@ -968,6 +1155,11 @@ public:
|
|||
return Void();
|
||||
}
|
||||
|
||||
// The container exists if the folder it resides in exists
|
||||
Future<bool> exists() {
|
||||
return directoryExists(m_path);
|
||||
}
|
||||
|
||||
Future<Reference<IAsyncFile>> readFile(std::string path) {
|
||||
int flags = IAsyncFile::OPEN_NO_AIO | IAsyncFile::OPEN_READONLY | IAsyncFile::OPEN_UNCACHED;
|
||||
// Simulation does not properly handle opening the same file from multiple machines using a shared filesystem,
|
||||
|
@ -1060,7 +1252,7 @@ public:
|
|||
Future<Void> deleteContainer(int *pNumDeleted) {
|
||||
// In order to avoid deleting some random directory due to user error, first describe the backup
|
||||
// and make sure it has something in it.
|
||||
return map(describeBackup(), [=](BackupDescription const &desc) {
|
||||
return map(describeBackup(false, invalidVersion), [=](BackupDescription const &desc) {
|
||||
// If the backup has no snapshots and no logs then it's probably not a valid backup
|
||||
if(desc.snapshots.size() == 0 && !desc.minLogBegin.present())
|
||||
throw backup_invalid_url();
|
||||
|
@ -1214,7 +1406,18 @@ public:
|
|||
return create_impl(Reference<BackupContainerBlobStore>::addRef(this));
|
||||
}
|
||||
|
||||
// The container exists if the index entry in the blob bucket exists
|
||||
Future<bool> exists() {
|
||||
return m_bstore->objectExists(m_bucket, indexEntry());
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> deleteContainer_impl(Reference<BackupContainerBlobStore> bc, int *pNumDeleted) {
|
||||
bool e = wait(bc->exists());
|
||||
if(!e) {
|
||||
TraceEvent(SevWarnAlways, "BackupContainerDoesNotExist").detail("URL", bc->getURL());
|
||||
throw backup_does_not_exist();
|
||||
}
|
||||
|
||||
// First delete everything under the data prefix in the bucket
|
||||
wait(bc->m_bstore->deleteRecursively(bc->m_bucket, bc->dataPath(""), pNumDeleted));
|
||||
|
||||
|
@ -1284,10 +1487,12 @@ Reference<IBackupContainer> IBackupContainer::openContainer(std::string url)
|
|||
throw;
|
||||
|
||||
TraceEvent m(SevWarn, "BackupContainer");
|
||||
m.detail("Description", "Invalid container specification. See help.").detail("URL", url);
|
||||
|
||||
m.detail("Description", "Invalid container specification. See help.");
|
||||
m.detail("URL", url);
|
||||
m.error(e);
|
||||
if(e.code() == error_code_backup_invalid_url)
|
||||
m.detail("LastOpenError", lastOpenError);
|
||||
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
@ -1328,10 +1533,13 @@ ACTOR Future<std::vector<std::string>> listContainers_impl(std::string baseURL)
|
|||
throw;
|
||||
|
||||
TraceEvent m(SevWarn, "BackupContainer");
|
||||
m.detail("Description", "Invalid backup container URL prefix. See help.").detail("URL", baseURL);
|
||||
|
||||
|
||||
m.detail("Description", "Invalid backup container URL prefix. See help.");
|
||||
m.detail("URL", baseURL);
|
||||
m.error(e);
|
||||
if(e.code() == error_code_backup_invalid_url)
|
||||
m.detail("LastOpenError", IBackupContainer::lastOpenError);
|
||||
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
@ -1484,7 +1692,7 @@ ACTOR Future<Void> testBackupContainer(std::string url) {
|
|||
try {
|
||||
wait(c->deleteContainer());
|
||||
} catch(Error &e) {
|
||||
if(e.code() != error_code_backup_invalid_url)
|
||||
if(e.code() != error_code_backup_invalid_url && e.code() != error_code_backup_does_not_exist)
|
||||
throw;
|
||||
}
|
||||
|
||||
|
@ -1548,7 +1756,7 @@ ACTOR Future<Void> testBackupContainer(std::string url) {
|
|||
|
||||
wait(waitForAll(writes));
|
||||
|
||||
state FullBackupListing listing = wait(c->dumpFileList());
|
||||
state BackupFileList listing = wait(c->dumpFileList());
|
||||
ASSERT(listing.ranges.size() == nRangeFiles);
|
||||
ASSERT(listing.logs.size() == logs.size());
|
||||
ASSERT(listing.snapshots.size() == snapshots.size());
|
||||
|
@ -1589,12 +1797,11 @@ ACTOR Future<Void> testBackupContainer(std::string url) {
|
|||
printf("DELETING\n");
|
||||
wait(c->deleteContainer());
|
||||
|
||||
BackupDescription d = wait(c->describeBackup());
|
||||
printf("\n%s\n", d.toString().c_str());
|
||||
ASSERT(d.snapshots.size() == 0);
|
||||
ASSERT(!d.minLogBegin.present());
|
||||
state Future<BackupDescription> d = c->describeBackup();
|
||||
wait(ready(d));
|
||||
ASSERT(d.isError() && d.getError().code() == error_code_backup_does_not_exist);
|
||||
|
||||
FullBackupListing empty = wait(c->dumpFileList());
|
||||
BackupFileList empty = wait(c->dumpFileList());
|
||||
ASSERT(empty.ranges.size() == 0);
|
||||
ASSERT(empty.logs.size() == 0);
|
||||
ASSERT(empty.snapshots.size() == 0);
|
||||
|
|
|
@ -96,10 +96,12 @@ struct KeyspaceSnapshotFile {
|
|||
}
|
||||
};
|
||||
|
||||
struct FullBackupListing {
|
||||
struct BackupFileList {
|
||||
std::vector<RangeFile> ranges;
|
||||
std::vector<LogFile> logs;
|
||||
std::vector<KeyspaceSnapshotFile> snapshots;
|
||||
|
||||
void toStream(FILE *fout) const;
|
||||
};
|
||||
|
||||
// The byte counts here only include usable log files and byte counts from kvrange manifests
|
||||
|
@ -108,10 +110,19 @@ struct BackupDescription {
|
|||
std::string url;
|
||||
std::vector<KeyspaceSnapshotFile> snapshots;
|
||||
int64_t snapshotBytes;
|
||||
// The version before which everything has been deleted by an expire
|
||||
Optional<Version> expiredEndVersion;
|
||||
// The latest version before which at least some data has been deleted by an expire
|
||||
Optional<Version> unreliableEndVersion;
|
||||
// The minimum log version in the backup
|
||||
Optional<Version> minLogBegin;
|
||||
// The maximum log version in the backup
|
||||
Optional<Version> maxLogEnd;
|
||||
// The maximum log version for which there is contiguous log version coverage extending back to minLogBegin
|
||||
Optional<Version> contiguousLogEnd;
|
||||
// The maximum version which this backup can be used to restore to
|
||||
Optional<Version> maxRestorableVersion;
|
||||
// The minimum version which this backup can be used to restore to
|
||||
Optional<Version> minRestorableVersion;
|
||||
std::string extendedDetail; // Freeform container-specific info.
|
||||
|
||||
|
@ -153,6 +164,7 @@ public:
|
|||
|
||||
// Create the container
|
||||
virtual Future<Void> create() = 0;
|
||||
virtual Future<bool> exists() = 0;
|
||||
|
||||
// Open a log file or range file for writing
|
||||
virtual Future<Reference<IBackupFile>> writeLogFile(Version beginVersion, Version endVersion, int blockSize) = 0;
|
||||
|
@ -165,23 +177,32 @@ public:
|
|||
// Open a file for read by name
|
||||
virtual Future<Reference<IAsyncFile>> readFile(std::string name) = 0;
|
||||
|
||||
struct ExpireProgress {
|
||||
std::string step;
|
||||
int total;
|
||||
int done;
|
||||
std::string toString() const;
|
||||
};
|
||||
// Delete backup files which do not contain any data at or after (more recent than) expireEndVersion.
|
||||
// If force is false, then nothing will be deleted unless there is a restorable snapshot which
|
||||
// - begins at or after expireEndVersion
|
||||
// - ends at or before restorableBeginVersion
|
||||
// If force is true, data is deleted unconditionally which could leave the backup in an unusable state. This is not recommended.
|
||||
// Returns true if expiration was done.
|
||||
virtual Future<Void> expireData(Version expireEndVersion, bool force = false, Version restorableBeginVersion = std::numeric_limits<Version>::max()) = 0;
|
||||
virtual Future<Void> expireData(Version expireEndVersion, bool force = false, ExpireProgress *progress = nullptr, Version restorableBeginVersion = std::numeric_limits<Version>::max()) = 0;
|
||||
|
||||
// Delete entire container. During the process, if pNumDeleted is not null it will be
|
||||
// updated with the count of deleted files so that progress can be seen.
|
||||
virtual Future<Void> deleteContainer(int *pNumDeleted = nullptr) = 0;
|
||||
|
||||
// Return key details about a backup's contents, possibly using cached or stored metadata
|
||||
// unless deepScan is true.
|
||||
virtual Future<BackupDescription> describeBackup(bool deepScan = false) = 0;
|
||||
// Return key details about a backup's contents.
|
||||
// Unless deepScan is true, use cached metadata, if present, as initial contiguous available log range.
|
||||
// If logStartVersionOverride is given, log data prior to that version will be ignored for the purposes
|
||||
// of this describe operation. This can be used to calculate what the restorability of a backup would
|
||||
// be after deleting all data prior to logStartVersionOverride.
|
||||
virtual Future<BackupDescription> describeBackup(bool deepScan = false, Version logStartVersionOverride = invalidVersion) = 0;
|
||||
|
||||
virtual Future<FullBackupListing> dumpFileList() = 0;
|
||||
virtual Future<BackupFileList> dumpFileList(Version begin = 0, Version end = std::numeric_limits<Version>::max()) = 0;
|
||||
|
||||
// Get exactly the files necessary to restore to targetVersion. Returns non-present if
|
||||
// restore to given version is not possible.
|
||||
|
|
|
@ -258,8 +258,17 @@ ACTOR Future<Void> deleteObject_impl(Reference<BlobStoreEndpoint> b, std::string
|
|||
|
||||
std::string resource = std::string("/") + bucket + "/" + object;
|
||||
HTTP::Headers headers;
|
||||
// 200 or 204 means object successfully deleted, 404 means it already doesn't exist, so any of those are considered successful
|
||||
Reference<HTTP::Response> r = wait(b->doRequest("DELETE", resource, headers, NULL, 0, {200, 204, 404}));
|
||||
// 200 means object deleted, 404 means it doesn't exist already, so either success code passed above is fine.
|
||||
|
||||
// But if the object already did not exist then the 'delete' is assumed to be successful but a warning is logged.
|
||||
if(r->code == 404) {
|
||||
TraceEvent(SevWarnAlways, "BlobStoreEndpointDeleteObjectMissing")
|
||||
.detail("Host", b->host)
|
||||
.detail("Bucket", bucket)
|
||||
.detail("Object", object);
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
|
@ -502,8 +511,8 @@ ACTOR Future<Reference<HTTP::Response>> doRequest_impl(Reference<BlobStoreEndpoi
|
|||
Future<BlobStoreEndpoint::ReusableConnection> frconn = bstore->connect();
|
||||
|
||||
// Make a shallow copy of the queue by calling addref() on each buffer in the chain and then prepending that chain to contentCopy
|
||||
contentCopy.discardAll();
|
||||
if(pContent != nullptr) {
|
||||
contentCopy.discardAll();
|
||||
PacketBuffer *pFirst = pContent->getUnsent();
|
||||
PacketBuffer *pLast = nullptr;
|
||||
for(PacketBuffer *p = pFirst; p != nullptr; p = p->nextPacketBuffer()) {
|
||||
|
|
|
@ -36,7 +36,7 @@ public:
|
|||
struct Stats {
|
||||
Stats() : requests_successful(0), requests_failed(0), bytes_sent(0) {}
|
||||
Stats operator-(const Stats &rhs);
|
||||
void clear() { memset(this, sizeof(*this), 0); }
|
||||
void clear() { memset(this, 0, sizeof(*this)); }
|
||||
json_spirit::mObject getJSON();
|
||||
|
||||
int64_t requests_successful;
|
||||
|
|
|
@ -0,0 +1,97 @@
|
|||
set(FDBCLIENT_SRCS
|
||||
AsyncFileBlobStore.actor.cpp
|
||||
AsyncFileBlobStore.actor.h
|
||||
Atomic.h
|
||||
AutoPublicAddress.cpp
|
||||
BackupAgent.h
|
||||
BackupAgentBase.actor.cpp
|
||||
BackupContainer.actor.cpp
|
||||
BackupContainer.h
|
||||
BlobStore.actor.cpp
|
||||
ClientDBInfo.h
|
||||
ClientLogEvents.h
|
||||
ClientWorkerInterface.h
|
||||
ClusterInterface.h
|
||||
CommitTransaction.h
|
||||
CoordinationInterface.h
|
||||
DatabaseBackupAgent.actor.cpp
|
||||
DatabaseConfiguration.cpp
|
||||
DatabaseConfiguration.h
|
||||
DatabaseContext.h
|
||||
EventTypes.actor.h
|
||||
FailureMonitorClient.actor.cpp
|
||||
FailureMonitorClient.h
|
||||
FDBOptions.h
|
||||
FDBTypes.h
|
||||
FileBackupAgent.actor.cpp
|
||||
HTTP.actor.cpp
|
||||
IClientApi.h
|
||||
JsonBuilder.cpp
|
||||
JsonBuilder.h
|
||||
KeyBackedTypes.h
|
||||
KeyRangeMap.actor.cpp
|
||||
KeyRangeMap.h
|
||||
Knobs.cpp
|
||||
Knobs.h
|
||||
ManagementAPI.actor.cpp
|
||||
ManagementAPI.h
|
||||
MasterProxyInterface.h
|
||||
MetricLogger.actor.cpp
|
||||
MetricLogger.h
|
||||
MonitorLeader.actor.cpp
|
||||
MonitorLeader.h
|
||||
MultiVersionAssignmentVars.h
|
||||
MultiVersionTransaction.actor.cpp
|
||||
MultiVersionTransaction.h
|
||||
MutationList.h
|
||||
NativeAPI.actor.cpp
|
||||
NativeAPI.h
|
||||
Notified.h
|
||||
ReadYourWrites.actor.cpp
|
||||
ReadYourWrites.h
|
||||
RunTransaction.actor.h
|
||||
RYWIterator.cpp
|
||||
RYWIterator.h
|
||||
Schemas.cpp
|
||||
Schemas.h
|
||||
SnapshotCache.h
|
||||
Status.h
|
||||
StatusClient.actor.cpp
|
||||
StatusClient.h
|
||||
StorageServerInterface.h
|
||||
Subspace.cpp
|
||||
Subspace.h
|
||||
SystemData.cpp
|
||||
SystemData.h
|
||||
TaskBucket.actor.cpp
|
||||
TaskBucket.h
|
||||
ThreadSafeTransaction.actor.cpp
|
||||
ThreadSafeTransaction.h
|
||||
Tuple.cpp
|
||||
Tuple.h
|
||||
VersionedMap.actor.h
|
||||
VersionedMap.h
|
||||
WriteMap.h
|
||||
json_spirit/json_spirit_error_position.h
|
||||
json_spirit/json_spirit_reader_template.h
|
||||
json_spirit/json_spirit_value.h
|
||||
json_spirit/json_spirit_writer_options.h
|
||||
json_spirit/json_spirit_writer_template.h
|
||||
libb64/cdecode.c
|
||||
libb64/cencode.c
|
||||
md5/md5.c
|
||||
sha1/SHA1.cpp
|
||||
${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.cpp)
|
||||
|
||||
|
||||
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.h ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.cpp
|
||||
COMMAND ${MONO_EXECUTABLE} ${VEXILLOGRAPHER_EXE} ${CMAKE_CURRENT_SOURCE_DIR}/vexillographer/fdb.options cpp ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g
|
||||
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/vexillographer/fdb.options vexillographer
|
||||
COMMENT "Generate FDBOptions c++ files")
|
||||
add_custom_target(fdboptions DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.h ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.cpp)
|
||||
|
||||
actor_set(FDBCLIENT_BUILD "${FDBCLIENT_SRCS}")
|
||||
add_library(fdbclient STATIC ${FDBCLIENT_BUILD})
|
||||
add_dependencies(fdbclient fdboptions)
|
||||
actor_compile(fdbclient "${FDBCLIENT_SRCS}")
|
||||
target_link_libraries(fdbclient PUBLIC fdbrpc)
|
|
@ -39,8 +39,8 @@ struct ClientDBInfo {
|
|||
template <class Archive>
|
||||
void serialize(Archive& ar) {
|
||||
ASSERT( ar.protocolVersion() >= 0x0FDB00A200040001LL );
|
||||
ar & proxies & id & clientTxnInfoSampleRate & clientTxnInfoSizeLimit;
|
||||
serializer(ar, proxies, id, clientTxnInfoSampleRate, clientTxnInfoSizeLimit);
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -39,7 +39,7 @@ namespace FdbClientLogEvents {
|
|||
Event(EventType t, double ts) : type(t), startTs(ts) { }
|
||||
Event() { }
|
||||
|
||||
template <typename Ar> Ar& serialize(Ar &ar) { return ar & type & startTs; }
|
||||
template <typename Ar> Ar& serialize(Ar &ar) { return serializer(ar, type, startTs); }
|
||||
|
||||
EventType type{ EVENTTYPEEND };
|
||||
double startTs{ 0 };
|
||||
|
@ -53,9 +53,9 @@ namespace FdbClientLogEvents {
|
|||
|
||||
template <typename Ar> Ar& serialize(Ar &ar) {
|
||||
if (!ar.isDeserializing)
|
||||
return Event::serialize(ar) & latency;
|
||||
return serializer(Event::serialize(ar), latency);
|
||||
else
|
||||
return ar & latency;
|
||||
return serializer(ar, latency);
|
||||
}
|
||||
|
||||
double latency;
|
||||
|
@ -71,9 +71,9 @@ namespace FdbClientLogEvents {
|
|||
|
||||
template <typename Ar> Ar& serialize(Ar &ar) {
|
||||
if (!ar.isDeserializing)
|
||||
return Event::serialize(ar) & latency & valueSize & key;
|
||||
return serializer(Event::serialize(ar), latency, valueSize, key);
|
||||
else
|
||||
return ar & latency & valueSize & key;
|
||||
return serializer(ar, latency, valueSize, key);
|
||||
}
|
||||
|
||||
double latency;
|
||||
|
@ -91,9 +91,9 @@ namespace FdbClientLogEvents {
|
|||
|
||||
template <typename Ar> Ar& serialize(Ar &ar) {
|
||||
if (!ar.isDeserializing)
|
||||
return Event::serialize(ar) & latency & rangeSize & startKey & endKey;
|
||||
return serializer(Event::serialize(ar), latency, rangeSize, startKey, endKey);
|
||||
else
|
||||
return ar & latency & rangeSize & startKey & endKey;
|
||||
return serializer(ar, latency, rangeSize, startKey, endKey);
|
||||
}
|
||||
|
||||
double latency;
|
||||
|
@ -112,9 +112,9 @@ namespace FdbClientLogEvents {
|
|||
|
||||
template <typename Ar> Ar& serialize(Ar &ar) {
|
||||
if (!ar.isDeserializing)
|
||||
return Event::serialize(ar) & latency & numMutations & commitBytes & req.transaction & req.arena;
|
||||
return serializer(Event::serialize(ar), latency, numMutations, commitBytes, req.transaction, req.arena);
|
||||
else
|
||||
return ar & latency & numMutations & commitBytes & req.transaction & req.arena;
|
||||
return serializer(ar, latency, numMutations, commitBytes, req.transaction, req.arena);
|
||||
}
|
||||
|
||||
double latency;
|
||||
|
@ -145,9 +145,9 @@ namespace FdbClientLogEvents {
|
|||
|
||||
template <typename Ar> Ar& serialize(Ar &ar) {
|
||||
if (!ar.isDeserializing)
|
||||
return Event::serialize(ar) & errCode & key;
|
||||
return serializer(Event::serialize(ar), errCode, key);
|
||||
else
|
||||
return ar & errCode & key;
|
||||
return serializer(ar, errCode, key);
|
||||
}
|
||||
|
||||
int errCode;
|
||||
|
@ -164,9 +164,9 @@ namespace FdbClientLogEvents {
|
|||
|
||||
template <typename Ar> Ar& serialize(Ar &ar) {
|
||||
if (!ar.isDeserializing)
|
||||
return Event::serialize(ar) & errCode & startKey & endKey;
|
||||
return serializer(Event::serialize(ar), errCode, startKey, endKey);
|
||||
else
|
||||
return ar & errCode & startKey & endKey;
|
||||
return serializer(ar, errCode, startKey, endKey);
|
||||
}
|
||||
|
||||
int errCode;
|
||||
|
@ -184,9 +184,9 @@ namespace FdbClientLogEvents {
|
|||
|
||||
template <typename Ar> Ar& serialize(Ar &ar) {
|
||||
if (!ar.isDeserializing)
|
||||
return Event::serialize(ar) & errCode & req.transaction & req.arena;
|
||||
return serializer(Event::serialize(ar), errCode, req.transaction, req.arena);
|
||||
else
|
||||
return ar & errCode & req.transaction & req.arena;
|
||||
return serializer(ar, errCode, req.transaction, req.arena);
|
||||
}
|
||||
|
||||
int errCode;
|
||||
|
|
|
@ -40,7 +40,7 @@ struct ClientWorkerInterface {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & reboot & profiler;
|
||||
serializer(ar, reboot, profiler);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -52,7 +52,7 @@ struct RebootRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & deleteData & checkData;
|
||||
serializer(ar, deleteData, checkData);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -77,7 +77,7 @@ struct ProfilerRequest {
|
|||
|
||||
template<class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & reply & type & action & duration & outputFile;
|
||||
serializer(ar, reply, type, action, duration, outputFile);
|
||||
}
|
||||
};
|
||||
BINARY_SERIALIZABLE( ProfilerRequest::Type );
|
||||
|
|
|
@ -52,7 +52,7 @@ struct ClusterInterface {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & openDatabase & failureMonitoring & databaseStatus & ping & getClientWorkers & forceRecovery;
|
||||
serializer(ar, openDatabase, failureMonitoring, databaseStatus, ping, getClientWorkers, forceRecovery);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -93,7 +93,7 @@ struct ClientVersionRef {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & clientVersion & sourceVersion & protocolVersion;
|
||||
serializer(ar, clientVersion, sourceVersion, protocolVersion);
|
||||
}
|
||||
|
||||
size_t expectedSize() const { return clientVersion.size() + sourceVersion.size() + protocolVersion.size(); }
|
||||
|
@ -125,7 +125,7 @@ struct OpenDatabaseRequest {
|
|||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ASSERT( ar.protocolVersion() >= 0x0FDB00A400040001LL );
|
||||
ar & issues & supportedVersions & traceLogGroup & knownClientInfoID & reply & arena;
|
||||
serializer(ar, issues, supportedVersions, traceLogGroup, knownClientInfoID, reply, arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -138,7 +138,7 @@ struct SystemFailureStatus {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & address & status;
|
||||
serializer(ar, address, status);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -159,7 +159,7 @@ struct FailureMonitoringRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & senderStatus & failureInformationVersion & reply;
|
||||
serializer(ar, senderStatus, failureInformationVersion, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -173,7 +173,7 @@ struct FailureMonitoringReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & changes & failureInformationVersion & allOthersFailed & clientRequestIntervalMS & considerServerFailedTimeoutMS & arena;
|
||||
serializer(ar, changes, failureInformationVersion, allOthersFailed, clientRequestIntervalMS, considerServerFailedTimeoutMS, arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -182,7 +182,7 @@ struct StatusRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & reply;
|
||||
serializer(ar, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -196,7 +196,7 @@ struct StatusReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & statusStr;
|
||||
serializer(ar, statusStr);
|
||||
if( ar.isDeserializing ) {
|
||||
json_spirit::mValue mv;
|
||||
if(g_network->isSimulated()) {
|
||||
|
@ -218,7 +218,7 @@ struct GetClientWorkersRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & reply;
|
||||
serializer(ar, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -229,7 +229,7 @@ struct ForceRecoveryRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & reply;
|
||||
serializer(ar, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ struct MutationRef {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & type & param1 & param2;
|
||||
serializer(ar, type, param1, param2);
|
||||
}
|
||||
|
||||
// These masks define which mutation types have particular properties (they are used to implement isSingleKeyMutation() etc)
|
||||
|
@ -101,7 +101,7 @@ struct CommitTransactionRef {
|
|||
|
||||
template <class Ar>
|
||||
force_inline void serialize( Ar& ar ) {
|
||||
ar & read_conflict_ranges & write_conflict_ranges & mutations & read_snapshot;
|
||||
serializer(ar, read_conflict_ranges, write_conflict_ranges, mutations, read_snapshot);
|
||||
}
|
||||
|
||||
// Convenience for internal code required to manipulate these without the Native API
|
||||
|
|
|
@ -122,7 +122,7 @@ struct LeaderInfo {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & changeID & serializedInfo & forward;
|
||||
serializer(ar, changeID, serializedInfo, forward);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -136,7 +136,7 @@ struct GetLeaderRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & key & knownLeader & reply;
|
||||
serializer(ar, key, knownLeader, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -1197,7 +1197,7 @@ namespace dbBackup {
|
|||
loop {
|
||||
try {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
Key tagPath = srcDrAgent.states.get(task->params[DatabaseBackupAgent::keyConfigLogUid]).pack(BackupAgentBase::keyConfigBackupTag);
|
||||
Optional<Key> tagName = wait(tr->get(tagPath));
|
||||
if (!tagName.present()) {
|
||||
|
|
|
@ -41,7 +41,7 @@ struct SatelliteInfo {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & dcId & priority;
|
||||
serializer(ar, dcId, priority);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -71,8 +71,8 @@ struct RegionInfo {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & dcId & priority & satelliteTLogPolicy & satelliteDesiredTLogCount & satelliteTLogReplicationFactor & satelliteTLogWriteAntiQuorum & satelliteTLogUsableDcs &
|
||||
satelliteTLogPolicyFallback & satelliteTLogReplicationFactorFallback & satelliteTLogWriteAntiQuorumFallback & satelliteTLogUsableDcsFallback & satellites;
|
||||
serializer(ar, dcId, priority, satelliteTLogPolicy, satelliteDesiredTLogCount, satelliteTLogReplicationFactor, satelliteTLogWriteAntiQuorum, satelliteTLogUsableDcs,
|
||||
satelliteTLogPolicyFallback, satelliteTLogReplicationFactorFallback, satelliteTLogWriteAntiQuorumFallback, satelliteTLogUsableDcsFallback, satellites);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -203,7 +203,7 @@ struct DatabaseConfiguration {
|
|||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
if (!ar.isDeserializing) makeConfigurationImmutable();
|
||||
ar & rawConfiguration;
|
||||
serializer(ar, rawConfiguration);
|
||||
if (ar.isDeserializing) {
|
||||
for(auto c=rawConfiguration.begin(); c!=rawConfiguration.end(); ++c)
|
||||
setInternal(c->key, c->value);
|
||||
|
|
|
@ -53,7 +53,7 @@ struct Tag {
|
|||
|
||||
template <class Ar>
|
||||
force_inline void serialize_unversioned(Ar& ar) {
|
||||
ar & locality & id;
|
||||
serializer(ar, locality, id);
|
||||
}
|
||||
};
|
||||
#pragma pack(pop)
|
||||
|
@ -145,13 +145,18 @@ static std::string describe( std::set<T> const& items, int max_items = -1 ) {
|
|||
|
||||
std::string printable( const StringRef& val );
|
||||
std::string printable( const std::string& val );
|
||||
std::string printable( const Optional<StringRef>& val );
|
||||
std::string printable( const Optional<Standalone<StringRef>>& val );
|
||||
std::string printable( const KeyRangeRef& range );
|
||||
std::string printable( const VectorRef<StringRef>& val );
|
||||
std::string printable( const VectorRef<KeyValueRef>& val );
|
||||
std::string printable( const KeyValueRef& val );
|
||||
|
||||
template <class T>
|
||||
std::string printable( const Optional<T>& val ) {
|
||||
if( val.present() )
|
||||
return printable( val.get() );
|
||||
return "[not set]";
|
||||
}
|
||||
|
||||
inline bool equalsKeyAfter( const KeyRef& key, const KeyRef& compareKey ) {
|
||||
if( key.size()+1 != compareKey.size() || compareKey[compareKey.size()-1] != 0 )
|
||||
return false;
|
||||
|
@ -193,7 +198,7 @@ struct KeyRangeRef {
|
|||
|
||||
template <class Ar>
|
||||
force_inline void serialize(Ar& ar) {
|
||||
ar & const_cast<KeyRef&>(begin) & const_cast<KeyRef&>(end);
|
||||
serializer(ar, const_cast<KeyRef&>(begin), const_cast<KeyRef&>(end));
|
||||
if( begin > end ) {
|
||||
throw inverted_range();
|
||||
};
|
||||
|
@ -227,7 +232,7 @@ struct KeyValueRef {
|
|||
int expectedSize() const { return key.expectedSize() + value.expectedSize(); }
|
||||
|
||||
template <class Ar>
|
||||
force_inline void serialize(Ar& ar) { ar & key & value; }
|
||||
force_inline void serialize(Ar& ar) { serializer(ar, key, value); }
|
||||
|
||||
struct OrderByKey {
|
||||
bool operator()(KeyValueRef const& a, KeyValueRef const& b) const {
|
||||
|
@ -385,7 +390,7 @@ public:
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & key & orEqual & offset;
|
||||
serializer(ar, key, orEqual, offset);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -418,7 +423,7 @@ struct KeyRangeWith : KeyRange {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & ((KeyRange&)*this) & value;
|
||||
serializer(ar, ((KeyRange&)*this), value);
|
||||
}
|
||||
};
|
||||
template <class Val>
|
||||
|
@ -470,7 +475,7 @@ struct RangeResultRef : VectorRef<KeyValueRef> {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & ((VectorRef<KeyValueRef>&)*this) & more & readThrough & readToBegin & readThroughEnd;
|
||||
serializer(ar, ((VectorRef<KeyValueRef>&)*this), more, readThrough, readToBegin, readThroughEnd);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -492,7 +497,7 @@ struct KeyValueStoreType {
|
|||
operator StoreType() const { return StoreType(type); }
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) { ar & type; }
|
||||
void serialize(Ar& ar) { serializer(ar, type); }
|
||||
|
||||
std::string toString() const {
|
||||
switch( type ) {
|
||||
|
@ -520,7 +525,7 @@ struct StorageBytes {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & free & total & used & available;
|
||||
serializer(ar, free, total, used, available);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -639,7 +644,7 @@ struct ClusterControllerPriorityInfo {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & processClassFitness & isExcluded & dcFitness;
|
||||
serializer(ar, processClassFitness, isExcluded, dcFitness);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -3381,8 +3381,9 @@ class FileBackupAgentImpl {
|
|||
public:
|
||||
static const int MAX_RESTORABLE_FILE_METASECTION_BYTES = 1024 * 8;
|
||||
|
||||
// This method will return the final status of the backup
|
||||
ACTOR static Future<int> waitBackup(FileBackupAgent* backupAgent, Database cx, std::string tagName, bool stopWhenDone) {
|
||||
// This method will return the final status of the backup at tag, and return the URL that was used on the tag
|
||||
// when that status value was read.
|
||||
ACTOR static Future<int> waitBackup(FileBackupAgent* backupAgent, Database cx, std::string tagName, bool stopWhenDone, Reference<IBackupContainer> *pContainer = nullptr, UID *pUID = nullptr) {
|
||||
state std::string backTrace;
|
||||
state KeyBackedTag tag = makeBackupTag(tagName);
|
||||
|
||||
|
@ -3400,13 +3401,20 @@ public:
|
|||
state BackupConfig config(oldUidAndAborted.get().first);
|
||||
state EBackupState status = wait(config.stateEnum().getD(tr, false, EBackupState::STATE_NEVERRAN));
|
||||
|
||||
// Break, if no longer runnable
|
||||
if (!FileBackupAgent::isRunnable(status)) {
|
||||
return status;
|
||||
}
|
||||
// Break, if one of the following is true
|
||||
// - no longer runnable
|
||||
// - in differential mode (restorable) and stopWhenDone is not enabled
|
||||
if( !FileBackupAgent::isRunnable(status) || (!stopWhenDone) && (BackupAgentBase::STATE_DIFFERENTIAL == status) ) {
|
||||
|
||||
if(pContainer != nullptr) {
|
||||
Reference<IBackupContainer> c = wait(config.backupContainer().getOrThrow(tr, false, backup_invalid_info()));
|
||||
*pContainer = c;
|
||||
}
|
||||
|
||||
if(pUID != nullptr) {
|
||||
*pUID = oldUidAndAborted.get().first;
|
||||
}
|
||||
|
||||
// Break, if in differential mode (restorable) and stopWhenDone is not enabled
|
||||
if ((!stopWhenDone) && (BackupAgentBase::STATE_DIFFERENTIAL == status)) {
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -4082,7 +4090,7 @@ void FileBackupAgent::setLastRestorable(Reference<ReadYourWritesTransaction> tr,
|
|||
tr->set(lastRestorable.pack(tagName), BinaryWriter::toValue<Version>(version, Unversioned()));
|
||||
}
|
||||
|
||||
Future<int> FileBackupAgent::waitBackup(Database cx, std::string tagName, bool stopWhenDone) {
|
||||
return FileBackupAgentImpl::waitBackup(this, cx, tagName, stopWhenDone);
|
||||
Future<int> FileBackupAgent::waitBackup(Database cx, std::string tagName, bool stopWhenDone, Reference<IBackupContainer> *pContainer, UID *pUID) {
|
||||
return FileBackupAgentImpl::waitBackup(this, cx, tagName, stopWhenDone, pContainer, pUID);
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ namespace HTTP {
|
|||
o.reserve(s.size() * 3);
|
||||
char buf[4];
|
||||
for(auto c : s)
|
||||
if(std::isalnum(c))
|
||||
if(std::isalnum(c) || c == '?' || c == '/' || c == '-' || c == '_' || c == '.')
|
||||
o.append(&c, 1);
|
||||
else {
|
||||
sprintf(buf, "%%%.02X", c);
|
||||
|
@ -292,15 +292,41 @@ namespace HTTP {
|
|||
// Request content is provided as UnsentPacketQueue *pContent which will be depleted as bytes are sent but the queue itself must live for the life of this actor
|
||||
// and be destroyed by the caller
|
||||
// TODO: pSent is very hackish, do something better.
|
||||
ACTOR Future<Reference<HTTP::Response>> doRequest(Reference<IConnection> conn, std::string verb, std::string resource, HTTP::Headers headers, UnsentPacketQueue *pContent, int contentLen, Reference<IRateControl> sendRate, int64_t *pSent, Reference<IRateControl> recvRate) {
|
||||
ACTOR Future<Reference<HTTP::Response>> doRequest(Reference<IConnection> conn, std::string verb, std::string resource, HTTP::Headers headers, UnsentPacketQueue *pContent, int contentLen, Reference<IRateControl> sendRate, int64_t *pSent, Reference<IRateControl> recvRate, std::string requestIDHeader) {
|
||||
state TraceEvent event(SevDebug, "HTTPRequest");
|
||||
|
||||
state UnsentPacketQueue empty;
|
||||
if(pContent == NULL)
|
||||
pContent = ∅
|
||||
|
||||
// There is no standard http request id header field, so either a global default can be set via a knob
|
||||
// or it can be set per-request with the requestIDHeader argument (which overrides the default)
|
||||
if(requestIDHeader.empty()) {
|
||||
requestIDHeader = CLIENT_KNOBS->HTTP_REQUEST_ID_HEADER;
|
||||
}
|
||||
|
||||
state bool earlyResponse = false;
|
||||
state int total_sent = 0;
|
||||
|
||||
event.detail("DebugID", conn->getDebugID());
|
||||
event.detail("RemoteAddress", conn->getPeerAddress());
|
||||
event.detail("Verb", verb);
|
||||
event.detail("Resource", resource);
|
||||
event.detail("RequestContentLen", contentLen);
|
||||
|
||||
try {
|
||||
state std::string requestID;
|
||||
if(!requestIDHeader.empty()) {
|
||||
requestID = g_random->randomUniqueID().toString();
|
||||
requestID = requestID.insert(20, "-");
|
||||
requestID = requestID.insert(16, "-");
|
||||
requestID = requestID.insert(12, "-");
|
||||
requestID = requestID.insert(8, "-");
|
||||
|
||||
headers[requestIDHeader] = requestID;
|
||||
event.detail("RequestIDSent", requestID);
|
||||
}
|
||||
|
||||
// Write headers to a packet buffer chain
|
||||
PacketBuffer *pFirst = new PacketBuffer();
|
||||
PacketBuffer *pLast = writeRequestHeader(verb, resource, headers, pFirst);
|
||||
|
@ -346,19 +372,59 @@ namespace HTTP {
|
|||
}
|
||||
|
||||
wait(responseReading);
|
||||
|
||||
double elapsed = timer() - send_start;
|
||||
if(CLIENT_KNOBS->HTTP_VERBOSE_LEVEL > 0)
|
||||
printf("[%s] HTTP code=%d early=%d, time=%fs %s %s contentLen=%d [%d out, response content len %d]\n",
|
||||
conn->getDebugID().toString().c_str(), r->code, earlyResponse, elapsed, verb.c_str(), resource.c_str(), contentLen, total_sent, (int)r->contentLen);
|
||||
if(CLIENT_KNOBS->HTTP_VERBOSE_LEVEL > 2)
|
||||
|
||||
event.detail("ResponseCode", r->code);
|
||||
event.detail("ResponseContentLen", r->contentLen);
|
||||
event.detail("Elapsed", elapsed);
|
||||
|
||||
Optional<Error> err;
|
||||
if(!requestIDHeader.empty()) {
|
||||
std::string responseID;
|
||||
auto iid = r->headers.find(requestIDHeader);
|
||||
if(iid != r->headers.end()) {
|
||||
responseID = iid->second;
|
||||
}
|
||||
event.detail("RequestIDReceived", responseID);
|
||||
if(requestID != responseID) {
|
||||
err = http_bad_request_id();
|
||||
// Log a non-debug a error
|
||||
TraceEvent(SevError, "HTTPRequestFailedIDMismatch")
|
||||
.detail("DebugID", conn->getDebugID())
|
||||
.detail("RemoteAddress", conn->getPeerAddress())
|
||||
.detail("Verb", verb)
|
||||
.detail("Resource", resource)
|
||||
.detail("RequestContentLen", contentLen)
|
||||
.detail("ResponseCode", r->code)
|
||||
.detail("ResponseContentLen", r->contentLen)
|
||||
.detail("RequestIDSent", requestID)
|
||||
.detail("RequestIDReceived", responseID)
|
||||
.error(err.get());
|
||||
}
|
||||
}
|
||||
|
||||
if(CLIENT_KNOBS->HTTP_VERBOSE_LEVEL > 0) {
|
||||
printf("[%s] HTTP %scode=%d early=%d, time=%fs %s %s contentLen=%d [%d out, response content len %d]\n",
|
||||
conn->getDebugID().toString().c_str(),
|
||||
(err.present() ? format("*ERROR*=%s ", err.get().name()).c_str() : ""),
|
||||
r->code, earlyResponse, elapsed, verb.c_str(), resource.c_str(), contentLen, total_sent, (int)r->contentLen);
|
||||
}
|
||||
if(CLIENT_KNOBS->HTTP_VERBOSE_LEVEL > 2) {
|
||||
printf("[%s] HTTP RESPONSE: %s %s\n%s\n", conn->getDebugID().toString().c_str(), verb.c_str(), resource.c_str(), r->toString().c_str());
|
||||
}
|
||||
|
||||
if(err.present()) {
|
||||
throw err.get();
|
||||
}
|
||||
|
||||
return r;
|
||||
} catch(Error &e) {
|
||||
double elapsed = timer() - send_start;
|
||||
if(CLIENT_KNOBS->HTTP_VERBOSE_LEVEL > 0)
|
||||
if(CLIENT_KNOBS->HTTP_VERBOSE_LEVEL > 0 && e.code() != error_code_http_bad_request_id) {
|
||||
printf("[%s] HTTP *ERROR*=%s early=%d, time=%fs %s %s contentLen=%d [%d out]\n",
|
||||
conn->getDebugID().toString().c_str(), e.name(), earlyResponse, elapsed, verb.c_str(), resource.c_str(), contentLen, total_sent);
|
||||
}
|
||||
event.error(e);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -51,5 +51,5 @@ namespace HTTP {
|
|||
PacketBuffer * writeRequestHeader(std::string const &verb, std::string const &resource, HTTP::Headers const &headers, PacketBuffer *dest);
|
||||
|
||||
// Do an HTTP request to the blob store, parse the response.
|
||||
Future<Reference<Response>> doRequest(Reference<IConnection> const &conn, std::string const &verb, std::string const &resource, HTTP::Headers const &headers, UnsentPacketQueue * const &pContent, int const &contentLen, Reference<IRateControl> const &sendRate, int64_t * const &pSent, Reference<IRateControl> const &recvRate);
|
||||
Future<Reference<Response>> doRequest(Reference<IConnection> const &conn, std::string const &verb, std::string const &resource, HTTP::Headers const &headers, UnsentPacketQueue * const &pContent, int const &contentLen, Reference<IRateControl> const &sendRate, int64_t * const &pSent, Reference<IRateControl> const &recvRate, const std::string &requestHeader = std::string());
|
||||
}
|
||||
|
|
|
@ -148,6 +148,7 @@ ClientKnobs::ClientKnobs(bool randomize) {
|
|||
init( HTTP_READ_SIZE, 128*1024 );
|
||||
init( HTTP_SEND_SIZE, 32*1024 );
|
||||
init( HTTP_VERBOSE_LEVEL, 0 );
|
||||
init( HTTP_REQUEST_ID_HEADER, "" );
|
||||
init( BLOBSTORE_CONNECT_TRIES, 10 );
|
||||
init( BLOBSTORE_CONNECT_TIMEOUT, 10 );
|
||||
init( BLOBSTORE_MAX_CONNECTION_LIFE, 120 );
|
||||
|
|
|
@ -152,6 +152,7 @@ public:
|
|||
int HTTP_SEND_SIZE;
|
||||
int HTTP_READ_SIZE;
|
||||
int HTTP_VERBOSE_LEVEL;
|
||||
std::string HTTP_REQUEST_ID_HEADER;
|
||||
int BLOBSTORE_CONNECT_TRIES;
|
||||
int BLOBSTORE_CONNECT_TIMEOUT;
|
||||
int BLOBSTORE_MAX_CONNECTION_LIFE;
|
||||
|
|
|
@ -50,7 +50,7 @@ struct MasterProxyInterface {
|
|||
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar) {
|
||||
ar & locality & commit & getConsistentReadVersion & getKeyServersLocations & waitFailure & getStorageServerRejoinInfo & getRawCommittedVersion & txnState;
|
||||
serializer(ar, locality, commit, getConsistentReadVersion, getKeyServersLocations, waitFailure, getStorageServerRejoinInfo, getRawCommittedVersion, txnState);
|
||||
}
|
||||
|
||||
void initEndpoints() {
|
||||
|
@ -67,7 +67,7 @@ struct CommitID {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & version & txnBatchId;
|
||||
serializer(ar, version, txnBatchId);
|
||||
}
|
||||
|
||||
CommitID() : version(invalidVersion), txnBatchId(0) {}
|
||||
|
@ -93,7 +93,7 @@ struct CommitTransactionRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & transaction & reply & arena & flags & debugID;
|
||||
serializer(ar, transaction, reply, arena, flags, debugID);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -116,7 +116,7 @@ struct GetReadVersionReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & version & locked;
|
||||
serializer(ar, version, locked);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -144,7 +144,7 @@ struct GetReadVersionRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & transactionCount & flags & debugID & reply;
|
||||
serializer(ar, transactionCount, flags, debugID, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -154,7 +154,7 @@ struct GetKeyServerLocationsReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & results & arena;
|
||||
serializer(ar, results, arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -171,7 +171,7 @@ struct GetKeyServerLocationsRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & begin & end & limit & reverse & reply & arena;
|
||||
serializer(ar, begin, end, limit, reverse, reply, arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -183,7 +183,7 @@ struct GetRawCommittedVersionRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & debugID & reply;
|
||||
serializer(ar, debugID, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -196,7 +196,7 @@ struct GetStorageServerRejoinInfoReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & version & tag & newTag & newLocality & history;
|
||||
serializer(ar, version, tag, newTag, newLocality, history);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -210,7 +210,7 @@ struct GetStorageServerRejoinInfoRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & id & dcId & reply;
|
||||
serializer(ar, id, dcId, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -223,7 +223,7 @@ struct TxnStateRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & data & sequence & last & reply & arena;
|
||||
serializer(ar, data, sequence, last, reply, arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -622,7 +622,7 @@ void MultiVersionDatabase::setOption(FDBDatabaseOptions::Option option, Optional
|
|||
dbState->db->setOption(option, value);
|
||||
}
|
||||
|
||||
dbState->options.push_back(std::make_pair(option, value.cast_to<Standalone<StringRef>>()));
|
||||
dbState->options.push_back(std::make_pair(option, value.castTo<Standalone<StringRef>>()));
|
||||
}
|
||||
|
||||
void MultiVersionDatabase::Connector::connect() {
|
||||
|
@ -727,7 +727,7 @@ void MultiVersionDatabase::DatabaseState::stateChanged() {
|
|||
optionLock.enter();
|
||||
for(auto option : options) {
|
||||
try {
|
||||
newDb->setOption(option.first, option.second.cast_to<StringRef>()); // In practice, this will set a deferred error instead of throwing. If that happens, the database will be unusable (attempts to use it will throw errors).
|
||||
newDb->setOption(option.first, option.second.castTo<StringRef>()); // In practice, this will set a deferred error instead of throwing. If that happens, the database will be unusable (attempts to use it will throw errors).
|
||||
}
|
||||
catch(Error &e) {
|
||||
optionLock.leave();
|
||||
|
@ -982,7 +982,7 @@ void MultiVersionApi::setNetworkOptionInternal(FDBNetworkOptions::Option option,
|
|||
});
|
||||
}
|
||||
else {
|
||||
options.push_back(std::make_pair(option, value.cast_to<Standalone<StringRef>>()));
|
||||
options.push_back(std::make_pair(option, value.castTo<Standalone<StringRef>>()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1026,7 +1026,7 @@ void MultiVersionApi::setupNetwork() {
|
|||
MutexHolder holder(lock);
|
||||
runOnExternalClients([this, transportId](Reference<ClientInfo> client) {
|
||||
for(auto option : options) {
|
||||
client->api->setNetworkOption(option.first, option.second.cast_to<StringRef>());
|
||||
client->api->setNetworkOption(option.first, option.second.castTo<StringRef>());
|
||||
}
|
||||
client->api->setNetworkOption(FDBNetworkOptions::EXTERNAL_CLIENT_TRANSPORT_ID, std::to_string(transportId));
|
||||
|
||||
|
|
|
@ -132,7 +132,7 @@ public:
|
|||
|
||||
template <class Ar>
|
||||
void serialize_load( Ar& ar ) {
|
||||
ar & totalBytes;
|
||||
serializer(ar, totalBytes);
|
||||
|
||||
if(totalBytes > 0) {
|
||||
blob_begin = blob_end = new (ar.arena()) Blob;
|
||||
|
@ -142,7 +142,7 @@ public:
|
|||
}
|
||||
template <class Ar>
|
||||
void serialize_save( Ar& ar ) const {
|
||||
ar & totalBytes;
|
||||
serializer(ar, totalBytes);
|
||||
for(auto b = blob_begin; b; b=b->next)
|
||||
ar.serializeBytes(b->data);
|
||||
}
|
||||
|
|
|
@ -137,18 +137,6 @@ std::string printable( const std::string& str ) {
|
|||
return StringRef(str).printable();
|
||||
}
|
||||
|
||||
std::string printable( const Optional<StringRef>& val ) {
|
||||
if( val.present() )
|
||||
return printable( val.get() );
|
||||
return "[not set]";
|
||||
}
|
||||
|
||||
std::string printable( const Optional<Standalone<StringRef>>& val ) {
|
||||
if( val.present() )
|
||||
return printable( val.get() );
|
||||
return "[not set]";
|
||||
}
|
||||
|
||||
std::string printable( const KeyRangeRef& range ) {
|
||||
return printable(range.begin) + " - " + printable(range.end);
|
||||
}
|
||||
|
|
|
@ -945,7 +945,7 @@ public:
|
|||
swapAndPop(&itCopy->value, i--);
|
||||
} else {
|
||||
itCopy->value[i]->setPresent = true;
|
||||
itCopy->value[i]->setValue = val.cast_to<Value>();
|
||||
itCopy->value[i]->setValue = val.castTo<Value>();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -68,11 +68,11 @@ struct StorageServerInterface {
|
|||
void serialize( Ar& ar ) {
|
||||
// StorageServerInterface is persisted in the database and in the tLog's data structures, so changes here have to be
|
||||
// versioned carefully!
|
||||
ar & uniqueID & locality & getVersion & getValue & getKey & getKeyValues & getShardState & waitMetrics
|
||||
& splitMetrics & getPhysicalMetrics & waitFailure & getQueuingMetrics & getKeyValueStoreType;
|
||||
serializer(ar, uniqueID, locality, getVersion, getValue, getKey, getKeyValues, getShardState, waitMetrics,
|
||||
splitMetrics, getPhysicalMetrics, waitFailure, getQueuingMetrics, getKeyValueStoreType);
|
||||
|
||||
if( ar.protocolVersion() >= 0x0FDB00A200090001LL )
|
||||
ar & watchValue;
|
||||
serializer(ar, watchValue);
|
||||
}
|
||||
bool operator == (StorageServerInterface const& s) const { return uniqueID == s.uniqueID; }
|
||||
bool operator < (StorageServerInterface const& s) const { return uniqueID < s.uniqueID; }
|
||||
|
@ -103,7 +103,7 @@ struct GetValueReply : public LoadBalancedReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & *(LoadBalancedReply*)this & value;
|
||||
serializer(ar, *(LoadBalancedReply*)this, value);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -118,7 +118,7 @@ struct GetValueRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & key & version & debugID & reply;
|
||||
serializer(ar, key, version, debugID, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -134,7 +134,7 @@ struct WatchValueRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & key & value & version & debugID & reply;
|
||||
serializer(ar, key, value, version, debugID, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -146,7 +146,7 @@ struct GetKeyValuesReply : public LoadBalancedReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & *(LoadBalancedReply*)this & data & version & more & arena;
|
||||
serializer(ar, *(LoadBalancedReply*)this, data, version, more, arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -162,7 +162,7 @@ struct GetKeyValuesRequest {
|
|||
// GetKeyValuesRequest(const KeySelectorRef& begin, const KeySelectorRef& end, Version version, int limit, int limitBytes, Optional<UID> debugID) : begin(begin), end(end), version(version), limit(limit), limitBytes(limitBytes) {}
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & begin & end & version & limit & limitBytes & debugID & reply & arena;
|
||||
serializer(ar, begin, end, version, limit, limitBytes, debugID, reply, arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -174,7 +174,7 @@ struct GetKeyReply : public LoadBalancedReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & *(LoadBalancedReply*)this & sel;
|
||||
serializer(ar, *(LoadBalancedReply*)this, sel);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -189,7 +189,7 @@ struct GetKeyRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & sel & version & reply & arena;
|
||||
serializer(ar, sel, version, reply, arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -208,7 +208,7 @@ struct GetShardStateRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & keys & mode & reply;
|
||||
serializer(ar, keys, mode, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -244,7 +244,7 @@ struct StorageMetrics {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & bytes & bytesPerKSecond & iosPerKSecond;
|
||||
serializer(ar, bytes, bytesPerKSecond, iosPerKSecond);
|
||||
}
|
||||
|
||||
void negate() { operator*=(-1.0); }
|
||||
|
@ -278,7 +278,7 @@ struct WaitMetricsRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & keys & min & max & reply & arena;
|
||||
serializer(ar, keys, min, max, reply, arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -288,7 +288,7 @@ struct SplitMetricsReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & splits & used;
|
||||
serializer(ar, splits, used);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -306,7 +306,7 @@ struct SplitMetricsRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & keys & limits & used & estimated & isLastShard & reply & arena;
|
||||
serializer(ar, keys, limits, used, estimated, isLastShard, reply, arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -317,7 +317,7 @@ struct GetPhysicalMetricsReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & load & free & capacity;
|
||||
serializer(ar, load, free, capacity);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -326,7 +326,7 @@ struct GetPhysicalMetricsRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & reply;
|
||||
serializer(ar, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -336,7 +336,7 @@ struct StorageQueuingMetricsRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & reply;
|
||||
serializer(ar, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -349,7 +349,7 @@ struct StorageQueuingMetricsReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & localTime & instanceID & bytesDurable & bytesInput & v & storageBytes;
|
||||
serializer(ar, localTime, instanceID, bytesDurable, bytesInput, v, storageBytes);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
#pragma once
|
||||
#define FDB_VT_VERSION "${FDB_VERSION}"
|
||||
#define FDB_VT_PACKAGE_NAME "${FDB_PACKAGE_NAME}"
|
|
@ -0,0 +1,9 @@
|
|||
set(FDBMONITOR_SRCS ConvertUTF.h SimpleIni.h fdbmonitor.cpp)
|
||||
|
||||
add_executable(fdbmonitor ${FDBMONITOR_SRCS})
|
||||
# FIXME: This include directory is an ugly hack. We probably want to fix this
|
||||
# as soon as we get rid of the old build system
|
||||
target_include_directories(fdbmonitor PRIVATE ${CMAKE_BINARY_DIR}/fdbclient)
|
||||
target_link_libraries(fdbmonitor flow)
|
||||
|
||||
install(TARGETS fdbmonitor DESTINATION "${FDB_LIB_DIR}/foundationdb" COMPONENT server)
|
|
@ -257,9 +257,9 @@ private:
|
|||
try {
|
||||
TraceEvent("AFCUnderlyingOpenBegin").detail("Filename", filename);
|
||||
if(flags & IAsyncFile::OPEN_CACHED_READ_ONLY)
|
||||
flags = flags & ~IAsyncFile::OPEN_READWRITE | IAsyncFile::OPEN_READONLY;
|
||||
flags = (flags & ~IAsyncFile::OPEN_READWRITE) | IAsyncFile::OPEN_READONLY;
|
||||
else
|
||||
flags = flags & ~IAsyncFile::OPEN_READONLY | IAsyncFile::OPEN_READWRITE;
|
||||
flags = (flags & ~IAsyncFile::OPEN_READONLY) | IAsyncFile::OPEN_READWRITE;
|
||||
state Reference<IAsyncFile> f = wait( IAsyncFileSystem::filesystem()->open(filename, flags | IAsyncFile::OPEN_UNCACHED | IAsyncFile::OPEN_UNBUFFERED, mode) );
|
||||
TraceEvent("AFCUnderlyingOpenEnd").detail("Filename", filename);
|
||||
int64_t l = wait( f->size() );
|
||||
|
|
|
@ -0,0 +1,61 @@
|
|||
set(FDBRPC_SRCS
|
||||
ActorFuzz.actor.cpp
|
||||
AsyncFileCached.actor.h
|
||||
AsyncFileEIO.actor.h
|
||||
AsyncFileKAIO.actor.h
|
||||
AsyncFileNonDurable.actor.h
|
||||
AsyncFileReadAhead.actor.h
|
||||
AsyncFileWinASIO.actor.h
|
||||
AsyncFileCached.actor.cpp
|
||||
AsyncFileNonDurable.actor.cpp
|
||||
AsyncFileWriteChecker.cpp
|
||||
batcher.actor.h
|
||||
crc32c.cpp
|
||||
dsltest.actor.cpp
|
||||
FailureMonitor.actor.cpp
|
||||
FlowTests.actor.cpp
|
||||
FlowTransport.actor.cpp
|
||||
genericactors.actor.h
|
||||
genericactors.actor.cpp
|
||||
IAsyncFile.actor.cpp
|
||||
LoadBalance.actor.h
|
||||
Locality.cpp
|
||||
Net2FileSystem.cpp
|
||||
networksender.actor.h
|
||||
Platform.cpp
|
||||
QueueModel.cpp
|
||||
ReplicationPolicy.cpp
|
||||
ReplicationTypes.cpp
|
||||
ReplicationUtils.cpp
|
||||
sim2.actor.cpp
|
||||
sim_validation.cpp
|
||||
TLSConnection.actor.cpp
|
||||
TraceFileIO.cpp
|
||||
# C files
|
||||
libcoroutine/Common.c
|
||||
libcoroutine/context.c
|
||||
libcoroutine/Coro.c
|
||||
libeio/eio.c
|
||||
zlib/adler32.c
|
||||
zlib/crc32.c
|
||||
zlib/deflate.c
|
||||
zlib/gzclose.c
|
||||
zlib/gzlib.c
|
||||
zlib/gzread.c
|
||||
zlib/gzwrite.c
|
||||
zlib/infback.c
|
||||
zlib/inffast.c
|
||||
zlib/inflate.c
|
||||
zlib/inftrees.c
|
||||
zlib/trees.c
|
||||
zlib/zutil.c)
|
||||
|
||||
if(APPLE)
|
||||
list(APPEND FDBRPC_SRCS libcoroutine/asm.S libcoroutine/context.c)
|
||||
endif()
|
||||
|
||||
actor_set(FDBRPC_BUILD "${FDBRPC_SRCS}")
|
||||
add_library(fdbrpc STATIC ${FDBRPC_BUILD})
|
||||
actor_compile(fdbrpc "${FDBRPC_SRCS}")
|
||||
target_include_directories(fdbrpc PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/libeio)
|
||||
target_link_libraries(fdbrpc PUBLIC flow)
|
|
@ -26,6 +26,7 @@
|
|||
#include "flow/IRandom.h"
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
|
||||
template <class T>
|
||||
class ContinuousSample {
|
||||
|
|
|
@ -1,74 +0,0 @@
|
|||
/*
|
||||
* EndpointGroup.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FLOW_ENDPOINT_GROUP_H
|
||||
#define FLOW_ENDPOINT_GROUP_H
|
||||
#pragma once
|
||||
|
||||
#include "fdbrpc/flow.h"
|
||||
|
||||
// EndpointGroup makes it easier to implement backward compatibility for interface serialization
|
||||
// It also provides a central place to implement more compact serialization for a group of related endpoints in the future.
|
||||
|
||||
/* Typical usage:
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
auto endpoints = endpointGroup(ar);
|
||||
endpoints.require( ar.protocolVersion() <= currentProtocolVersion );
|
||||
endpoints & apple & banana;
|
||||
endpoints.require( ar.protocolVersion() >= 0xabc ); // Following endpoints added in this version
|
||||
endpoints & cherry;
|
||||
endpoints.require( ar.protocolVersion() >= 0xdef ); // .. and then some more were added
|
||||
endpoints & date;
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
|
||||
template <class Ar>
|
||||
struct EndpointGroup : NonCopyable {
|
||||
Ar& ar;
|
||||
bool enabled;
|
||||
|
||||
explicit EndpointGroup( Ar& ar ) : ar(ar), enabled(true) {
|
||||
ASSERT( ar.protocolVersion() != 0 );
|
||||
}
|
||||
EndpointGroup( EndpointGroup&& g ) : ar(g.ar), enabled(g.enabled) {}
|
||||
|
||||
EndpointGroup& require( bool condition ) {
|
||||
enabled = enabled && condition;
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
EndpointGroup& operator & (PromiseStream<T>& stream) {
|
||||
if (enabled)
|
||||
ar & stream;
|
||||
else if (Ar::isDeserializing)
|
||||
stream.sendError( incompatible_protocol_version() );
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
template <class Ar>
|
||||
EndpointGroup<Ar> endpointGroup( Ar& ar ) { return EndpointGroup<Ar>(ar); }
|
||||
|
||||
#endif
|
|
@ -74,7 +74,7 @@ struct FailureStatus {
|
|||
bool operator != (FailureStatus const& r) const { return failed != r.failed; }
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & failed;
|
||||
serializer(ar, failed);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -71,7 +71,7 @@ struct LoadBalancedReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar &ar) {
|
||||
ar & penalty;
|
||||
serializer(ar, penalty);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ struct ProcessClass {
|
|||
// This enum is stored in restartInfo.ini for upgrade tests, so be very careful about changing the existing items!
|
||||
enum ClassType { UnsetClass, StorageClass, TransactionClass, ResolutionClass, TesterClass, ProxyClass, MasterClass, StatelessClass, LogClass, ClusterControllerClass, LogRouterClass, InvalidClass = -1 };
|
||||
enum Fitness { BestFit, GoodFit, UnsetFit, OkayFit, WorstFit, ExcludeFit, NeverAssign }; //cannot be larger than 7 because of leader election mask
|
||||
enum ClusterRole { Storage, TLog, Proxy, Master, Resolver, LogRouter, ClusterController };
|
||||
enum ClusterRole { Storage, TLog, Proxy, Master, Resolver, LogRouter, ClusterController, NoRole };
|
||||
enum ClassSource { CommandLineSource, AutoSource, DBSource, InvalidSource = -1 };
|
||||
int16_t _class;
|
||||
int16_t _source;
|
||||
|
@ -110,7 +110,7 @@ public:
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & _class & _source;
|
||||
serializer(ar, _class, _source);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -188,10 +188,10 @@ public:
|
|||
Standalone<StringRef> key;
|
||||
Optional<Standalone<StringRef>> value;
|
||||
uint64_t mapSize = (uint64_t)_data.size();
|
||||
ar & mapSize;
|
||||
serializer(ar, mapSize);
|
||||
if (ar.isDeserializing) {
|
||||
for (size_t i = 0; i < mapSize; i++) {
|
||||
ar & key & value;
|
||||
serializer(ar, key, value);
|
||||
_data[key] = value;
|
||||
}
|
||||
}
|
||||
|
@ -199,24 +199,24 @@ public:
|
|||
for (auto it = _data.begin(); it != _data.end(); it++) {
|
||||
key = it->first;
|
||||
value = it->second;
|
||||
ar & key & value;
|
||||
serializer(ar, key, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
ASSERT(ar.isDeserializing);
|
||||
UID zoneId, dcId, processId;
|
||||
ar & zoneId & dcId;
|
||||
serializer(ar, zoneId, dcId);
|
||||
set(keyZoneId, Standalone<StringRef>(zoneId.toString()));
|
||||
set(keyDcId, Standalone<StringRef>(dcId.toString()));
|
||||
|
||||
if (ar.protocolVersion() >= 0x0FDB00A340000001LL) {
|
||||
ar & processId;
|
||||
serializer(ar, processId);
|
||||
set(keyProcessId, Standalone<StringRef>(processId.toString()));
|
||||
}
|
||||
else {
|
||||
int _machineClass = ProcessClass::UnsetClass;
|
||||
ar & _machineClass;
|
||||
serializer(ar, _machineClass);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -258,7 +258,7 @@ struct ProcessData {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & locality & processClass & address;
|
||||
serializer(ar, locality, processClass, address);
|
||||
}
|
||||
|
||||
struct sort_by_address {
|
||||
|
|
|
@ -43,7 +43,7 @@ struct PerfMetric {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & m_name & m_format_code & m_value & m_averaged;
|
||||
serializer(ar, m_name, m_format_code, m_value, m_averaged);
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -106,4 +106,4 @@ struct GlobalCounters {
|
|||
|
||||
extern GlobalCounters g_counters;
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -136,7 +136,7 @@ struct PolicyAcross : IReplicationPolicy, public ReferenceCounted<PolicyAcross>
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & _attribKey & _count;
|
||||
serializer(ar, _attribKey, _count);
|
||||
serializeReplicationPolicy(ar, _policy);
|
||||
}
|
||||
|
||||
|
@ -207,7 +207,7 @@ struct PolicyAnd : IReplicationPolicy, public ReferenceCounted<PolicyAnd> {
|
|||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
int count = _policies.size();
|
||||
ar & count;
|
||||
serializer(ar, count);
|
||||
_policies.resize(count);
|
||||
for(int i = 0; i < count; i++) {
|
||||
serializeReplicationPolicy(ar, _policies[i]);
|
||||
|
@ -233,7 +233,7 @@ template <class Ar>
|
|||
void serializeReplicationPolicy(Ar& ar, IRepPolicyRef& policy) {
|
||||
if(Ar::isDeserializing) {
|
||||
StringRef name;
|
||||
ar & name;
|
||||
serializer(ar, name);
|
||||
|
||||
if(name == LiteralStringRef("One")) {
|
||||
PolicyOne* pointer = new PolicyOne();
|
||||
|
@ -261,7 +261,7 @@ void serializeReplicationPolicy(Ar& ar, IRepPolicyRef& policy) {
|
|||
else {
|
||||
std::string name = policy ? policy->name() : "None";
|
||||
Standalone<StringRef> nameRef = StringRef(name);
|
||||
ar & nameRef;
|
||||
serializer(ar, nameRef);
|
||||
if(name == "One") {
|
||||
((PolicyOne*)policy.getPtr())->serialize(ar);
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#define FLOW_REPLICATION_TYPES_H
|
||||
#pragma once
|
||||
|
||||
#include <sstream>
|
||||
#include "flow/flow.h"
|
||||
#include "fdbrpc/Locality.h"
|
||||
|
||||
|
@ -142,14 +143,15 @@ struct LocalityRecord : public ReferenceCounted<LocalityRecord> {
|
|||
}
|
||||
|
||||
std::string toString() {
|
||||
std::string str = "KeyValueArraySize:" + _dataMap->_keyvaluearray.size();
|
||||
std::stringstream ss;
|
||||
ss << "KeyValueArraySize:" << _dataMap->_keyvaluearray.size();
|
||||
for (int i = 0; i < _dataMap->size(); ++i) {
|
||||
AttribRecord attribRecord = _dataMap->_keyvaluearray[i]; // first is key, second is value
|
||||
str += " KeyValueArrayIndex:" + std::to_string(i) + " Key:" + std::to_string(attribRecord.first._id) +
|
||||
" Value:" + std::to_string(attribRecord.second._id);
|
||||
ss << " KeyValueArrayIndex:" << i << " Key:" << attribRecord.first._id <<
|
||||
" Value:" << attribRecord.second._id;
|
||||
}
|
||||
|
||||
return str;
|
||||
return ss.str();
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#pragma once
|
||||
|
||||
#include "flow/flow.h"
|
||||
#include <cmath>
|
||||
|
||||
struct Smoother {
|
||||
// Times (t) are expected to be nondecreasing
|
||||
|
@ -90,4 +91,4 @@ struct TimerSmoother {
|
|||
double time, total, estimate;
|
||||
};
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -46,10 +46,10 @@ static int send_func(void* ctx, const uint8_t* buf, int len) {
|
|||
int w = conn->conn->write( &sb );
|
||||
return w;
|
||||
} catch ( Error& e ) {
|
||||
TraceEvent("TLSConnectionSendError", conn->getDebugID()).error(e);
|
||||
TraceEvent("TLSConnectionSendError", conn->getDebugID()).error(e).suppressFor(1.0);
|
||||
return -1;
|
||||
} catch ( ... ) {
|
||||
TraceEvent("TLSConnectionSendError", conn->getDebugID()).error( unknown_error() );
|
||||
TraceEvent("TLSConnectionSendError", conn->getDebugID()).error( unknown_error() ).suppressFor(1.0);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
@ -62,10 +62,10 @@ static int recv_func(void* ctx, uint8_t* buf, int len) {
|
|||
int r = conn->conn->read( buf, buf + len );
|
||||
return r;
|
||||
} catch ( Error& e ) {
|
||||
TraceEvent("TLSConnectionRecvError", conn->getDebugID()).error(e);
|
||||
TraceEvent("TLSConnectionRecvError", conn->getDebugID()).error(e).suppressFor(1.0);
|
||||
return -1;
|
||||
} catch ( ... ) {
|
||||
TraceEvent("TLSConnectionRecvError", conn->getDebugID()).error( unknown_error() );
|
||||
TraceEvent("TLSConnectionRecvError", conn->getDebugID()).error( unknown_error() ).suppressFor(1.0);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -925,7 +925,7 @@ struct AddReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & sum;
|
||||
serializer(ar, sum);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -938,7 +938,7 @@ struct AddRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & a & b & reply;
|
||||
serializer(ar, a, b, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -74,7 +74,6 @@
|
|||
<ClInclude Include="AsyncFileWriteChecker.h" />
|
||||
<ClInclude Include="ContinuousSample.h" />
|
||||
<ClInclude Include="crc32c.h" />
|
||||
<ClInclude Include="EndpointGroup.h" />
|
||||
<ClInclude Include="FailureMonitor.h" />
|
||||
<ActorCompiler Include="LoadBalance.actor.h">
|
||||
<EnableCompile>false</EnableCompile>
|
||||
|
|
|
@ -137,7 +137,6 @@
|
|||
<ClInclude Include="Platform.h" />
|
||||
<ClInclude Include="ActorFuzz.h" />
|
||||
<ClInclude Include="ContinuousSample.h" />
|
||||
<ClInclude Include="EndpointGroup.h" />
|
||||
<ClInclude Include="fdbrpc.h" />
|
||||
<ClInclude Include="MultiInterface.h" />
|
||||
<ClInclude Include="PerfMetric.h" />
|
||||
|
|
|
@ -148,7 +148,7 @@ static void applyMetadataMutations(UID const& dbgid, Arena &arena, VectorRef<Mut
|
|||
}
|
||||
}
|
||||
else if (m.param1.startsWith(configKeysPrefix) || m.param1 == coordinatorsKey) {
|
||||
if(Optional<StringRef>(m.param2) != txnStateStore->readValue(m.param1).get().cast_to<StringRef>()) { // FIXME: Make this check more specific, here or by reading configuration whenever there is a change
|
||||
if(Optional<StringRef>(m.param2) != txnStateStore->readValue(m.param1).get().castTo<StringRef>()) { // FIXME: Make this check more specific, here or by reading configuration whenever there is a change
|
||||
if(!m.param1.startsWith( excludedServersPrefix ) && m.param1 != excludedServersVersionKey) {
|
||||
auto t = txnStateStore->readValue(m.param1).get();
|
||||
TraceEvent("MutationRequiresRestart", dbgid).detail("M", m.toString()).detail("PrevValue", t.present() ? printable(t.get()) : "(none)").detail("ToCommit", toCommit!=NULL);
|
||||
|
|
|
@ -0,0 +1,405 @@
|
|||
set(FDBSERVER_SRCS
|
||||
ApplyMetadataMutation.h
|
||||
ClusterController.actor.cpp
|
||||
ClusterRecruitmentInterface.h
|
||||
ConflictSet.h
|
||||
CoordinatedState.actor.cpp
|
||||
CoordinatedState.h
|
||||
Coordination.actor.cpp
|
||||
CoordinationInterface.h
|
||||
CoroFlow.actor.cpp
|
||||
CoroFlow.h
|
||||
DataDistribution.actor.cpp
|
||||
DataDistribution.h
|
||||
DataDistributionQueue.actor.cpp
|
||||
DataDistributionTracker.actor.cpp
|
||||
DBCoreState.h
|
||||
DiskQueue.actor.cpp
|
||||
fdbserver.actor.cpp
|
||||
IDiskQueue.h
|
||||
IKeyValueStore.h
|
||||
IPager.h
|
||||
IVersionedStore.h
|
||||
IndirectShadowPager.actor.cpp
|
||||
IndirectShadowPager.h
|
||||
KeyValueStoreCompressTestData.actor.cpp
|
||||
KeyValueStoreMemory.actor.cpp
|
||||
KeyValueStoreSQLite.actor.cpp
|
||||
Knobs.cpp
|
||||
Knobs.h
|
||||
LeaderElection.actor.cpp
|
||||
LeaderElection.h
|
||||
LogProtocolMessage.h
|
||||
LogRouter.actor.cpp
|
||||
LogSystem.h
|
||||
LogSystemConfig.h
|
||||
LogSystemDiskQueueAdapter.actor.cpp
|
||||
LogSystemDiskQueueAdapter.h
|
||||
LogSystemPeekCursor.actor.cpp
|
||||
MasterInterface.h
|
||||
MasterProxyServer.actor.cpp
|
||||
masterserver.actor.cpp
|
||||
MemoryPager.actor.cpp
|
||||
MemoryPager.h
|
||||
MoveKeys.actor.cpp
|
||||
MoveKeys.h
|
||||
networktest.actor.cpp
|
||||
NetworkTest.h
|
||||
OldTLogServer.actor.cpp
|
||||
Orderer.actor.h
|
||||
pubsub.actor.cpp
|
||||
pubsub.h
|
||||
QuietDatabase.actor.cpp
|
||||
QuietDatabase.h
|
||||
Ratekeeper.actor.cpp
|
||||
Ratekeeper.h
|
||||
RecoveryState.h
|
||||
Restore.actor.cpp
|
||||
RestoreInterface.h
|
||||
Resolver.actor.cpp
|
||||
ResolverInterface.h
|
||||
ServerDBInfo.h
|
||||
SimulatedCluster.actor.cpp
|
||||
SimulatedCluster.h
|
||||
SkipList.cpp
|
||||
sqlite/btree.h
|
||||
sqlite/hash.h
|
||||
sqlite/sqlite3.h
|
||||
sqlite/sqlite3ext.h
|
||||
sqlite/sqliteInt.h
|
||||
sqlite/sqliteLimit.h
|
||||
sqlite/sqlite3.amalgamation.c
|
||||
Status.actor.cpp
|
||||
Status.h
|
||||
StorageMetrics.actor.h
|
||||
StorageMetrics.h
|
||||
storageserver.actor.cpp
|
||||
TagPartitionedLogSystem.actor.cpp
|
||||
template_fdb.h
|
||||
tester.actor.cpp
|
||||
TesterInterface.h
|
||||
TLogInterface.h
|
||||
TLogServer.actor.cpp
|
||||
VersionedBTree.actor.cpp
|
||||
VFSAsync.cpp
|
||||
WaitFailure.actor.cpp
|
||||
WaitFailure.h
|
||||
worker.actor.cpp
|
||||
WorkerInterface.h
|
||||
workloads/ApiCorrectness.actor.cpp
|
||||
workloads/ApiWorkload.actor.cpp
|
||||
workloads/ApiWorkload.h
|
||||
workloads/AsyncFile.actor.h
|
||||
workloads/AsyncFile.cpp
|
||||
workloads/AsyncFileCorrectness.actor.cpp
|
||||
workloads/AsyncFileRead.actor.cpp
|
||||
workloads/AsyncFileWrite.actor.cpp
|
||||
workloads/AtomicOps.actor.cpp
|
||||
workloads/AtomicOpsApiCorrectness.actor.cpp
|
||||
workloads/AtomicRestore.actor.cpp
|
||||
workloads/AtomicSwitchover.actor.cpp
|
||||
workloads/BackgroundSelectors.actor.cpp
|
||||
workloads/BackupCorrectness.actor.cpp
|
||||
workloads/BackupToDBAbort.actor.cpp
|
||||
workloads/BackupToDBCorrectness.actor.cpp
|
||||
workloads/BackupToDBUpgrade.actor.cpp
|
||||
workloads/BulkLoad.actor.cpp
|
||||
workloads/BulkSetup.actor.h
|
||||
workloads/ChangeConfig.actor.cpp
|
||||
workloads/ClientTransactionProfileCorrectness.actor.cpp
|
||||
workloads/CommitBugCheck.actor.cpp
|
||||
workloads/ConfigureDatabase.actor.cpp
|
||||
workloads/ConflictRange.actor.cpp
|
||||
workloads/ConsistencyCheck.actor.cpp
|
||||
workloads/CpuProfiler.actor.cpp
|
||||
workloads/Cycle.actor.cpp
|
||||
workloads/DDBalance.actor.cpp
|
||||
workloads/DDMetrics.actor.cpp
|
||||
workloads/DiskDurability.actor.cpp
|
||||
workloads/DiskDurabilityTest.actor.cpp
|
||||
workloads/DummyWorkload.actor.cpp
|
||||
workloads/FastTriggeredWatches.actor.cpp
|
||||
workloads/FileSystem.actor.cpp
|
||||
workloads/Fuzz.cpp
|
||||
workloads/FuzzApiCorrectness.actor.cpp
|
||||
workloads/Increment.actor.cpp
|
||||
workloads/IndexScan.actor.cpp
|
||||
workloads/Inventory.actor.cpp
|
||||
workloads/KVStoreTest.actor.cpp
|
||||
workloads/LockDatabase.actor.cpp
|
||||
workloads/LogMetrics.actor.cpp
|
||||
workloads/LowLatency.actor.cpp
|
||||
workloads/MachineAttrition.actor.cpp
|
||||
workloads/MemoryKeyValueStore.cpp
|
||||
workloads/MemoryKeyValueStore.h
|
||||
workloads/MemoryLifetime.actor.cpp
|
||||
workloads/MetricLogging.actor.cpp
|
||||
workloads/Performance.actor.cpp
|
||||
workloads/Ping.actor.cpp
|
||||
workloads/PubSubMultiples.actor.cpp
|
||||
workloads/QueuePush.actor.cpp
|
||||
workloads/RandomClogging.actor.cpp
|
||||
workloads/RandomMoveKeys.actor.cpp
|
||||
workloads/RandomSelector.actor.cpp
|
||||
workloads/ReadWrite.actor.cpp
|
||||
workloads/RemoveServersSafely.actor.cpp
|
||||
workloads/Rollback.actor.cpp
|
||||
workloads/RyowCorrectness.actor.cpp
|
||||
workloads/RYWDisable.actor.cpp
|
||||
workloads/RYWPerformance.actor.cpp
|
||||
workloads/SaveAndKill.actor.cpp
|
||||
workloads/SelectorCorrectness.actor.cpp
|
||||
workloads/Serializability.actor.cpp
|
||||
workloads/Sideband.actor.cpp
|
||||
workloads/SlowTaskWorkload.actor.cpp
|
||||
workloads/StatusWorkload.actor.cpp
|
||||
workloads/Storefront.actor.cpp
|
||||
workloads/StreamingRead.actor.cpp
|
||||
workloads/TargetedKill.actor.cpp
|
||||
workloads/TaskBucketCorrectness.actor.cpp
|
||||
workloads/ThreadSafety.actor.cpp
|
||||
workloads/Throughput.actor.cpp
|
||||
workloads/TimeKeeperCorrectness.actor.cpp
|
||||
workloads/UnitPerf.actor.cpp
|
||||
workloads/UnitTests.actor.cpp
|
||||
workloads/Unreadable.actor.cpp
|
||||
workloads/VersionStamp.actor.cpp
|
||||
workloads/WatchAndWait.actor.cpp
|
||||
workloads/Watches.actor.cpp
|
||||
workloads/WorkerErrors.actor.cpp
|
||||
workloads/workloads.h
|
||||
workloads/WriteBandwidth.actor.cpp
|
||||
workloads/WriteDuringRead.actor.cpp)
|
||||
|
||||
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/workloads)
|
||||
|
||||
actor_set(FDBSERVER_BUILD "${FDBSERVER_SRCS}")
|
||||
add_executable(fdbserver ${FDBSERVER_BUILD})
|
||||
actor_compile(fdbserver "${FDBSERVER_SRCS}")
|
||||
target_include_directories(fdbserver PRIVATE
|
||||
${CMAKE_CURRENT_BINARY_DIR}/workloads
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/workloads)
|
||||
target_link_libraries(fdbserver PRIVATE fdbclient)
|
||||
|
||||
install(TARGETS fdbserver DESTINATION ${FDB_SBIN_DIR} COMPONENT server)
|
||||
|
||||
################################################################################
|
||||
# Testing
|
||||
################################################################################
|
||||
|
||||
set(ENABLE_BUGGIFY OFF CACHE BOOL "Enable buggify for tests")
|
||||
|
||||
set(TestRunner "${PROJECT_SOURCE_DIR}/tests/TestRunner/TestRunner.py")
|
||||
|
||||
configure_file(${PROJECT_SOURCE_DIR}/tests/CTestCustom.ctest ${PROJECT_BINARY_DIR}/CTestCustom.ctest @ONLY)
|
||||
|
||||
# This will add a test that can be run by ctest. This macro can be called
|
||||
# with the following arguments:
|
||||
#
|
||||
# - UNIT will run the test as a unit test (it won't bring up a whole simulated system)
|
||||
# - TEST_NAME followed the name of the test
|
||||
# - TIMEOUT followed by a timeout - reaching the timeout makes the test fail (default is
|
||||
# 3600 seconds). The timeout will be reached whenever it ran either too long in simulated
|
||||
# time or in real time - whatever is smaller.
|
||||
# - TEST_FILES followed by typically one test file. The test runner will run
|
||||
# all these tests in serialized order and within the same directory. This is
|
||||
# useful for restart tests
|
||||
function(add_fdb_test)
|
||||
set(options UNIT)
|
||||
set(oneValueArgs TEST_NAME TIMEOUT)
|
||||
set(multiValueArgs TEST_FILES)
|
||||
cmake_parse_arguments(ADD_FDB_TEST "${options}" "${oneValueArgs}" "${multiValueArgs}" "${ARGN}")
|
||||
set(this_test_timeout ${ADD_FDB_TEST_TIMEOUT})
|
||||
if(NOT this_test_timeout)
|
||||
set(this_test_timeout 3600)
|
||||
endif()
|
||||
set(test_type "simulation")
|
||||
list(LENGTH ADD_FDB_TEST_TEST_FILES NUM_TEST_FILES)
|
||||
if(ADD_FDB_TEST_UNIT)
|
||||
set(test_type "test")
|
||||
endif()
|
||||
list(GET ADD_FDB_TEST_TEST_FILES 0 first_file)
|
||||
get_filename_component(test_name ${first_file} NAME_WE)
|
||||
if (NOT "${ADD_FDB_TEST_TEST_NAME}" STREQUAL "")
|
||||
set(test_name ${ADD_FDB_TEST_TEST_NAME})
|
||||
endif()
|
||||
if(ADD_FDB_TEST_UNIT)
|
||||
message(STATUS
|
||||
"ADDING UNIT TEST ${test_name}")
|
||||
else()
|
||||
message(STATUS
|
||||
"ADDING SIMULATOR TEST ${test_name}")
|
||||
endif()
|
||||
set(test_files "")
|
||||
foreach(curr_test_file ${ADD_FDB_TEST_TEST_FILES})
|
||||
set(test_files "${test_files} ${curr_test_file}")
|
||||
endforeach()
|
||||
set(BUGGIFY_OPTION "")
|
||||
if (ENABLE_BUGGIFY)
|
||||
set(BUGGIFY_OPTION "-B")
|
||||
endif()
|
||||
add_test(NAME ${test_name}
|
||||
COMMAND ${PYTHON_EXECUTABLE} ${TestRunner}
|
||||
-n ${test_name}
|
||||
-b ${PROJECT_BINARY_DIR}
|
||||
-t ${test_type}
|
||||
--seed ${SEED}
|
||||
${BUGGIFY_OPTION}
|
||||
${ADD_FDB_TEST_TEST_FILES}
|
||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR})
|
||||
get_filename_component(test_dir_full ${first_file} DIRECTORY)
|
||||
get_filename_component(test_dir ${test_dir_full} NAME)
|
||||
set_tests_properties(${test_name} PROPERTIES TIMEOUT ${this_test_timeout} LABELS "${test_dir}")
|
||||
endfunction()
|
||||
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/AsyncFileCorrectness.txt UNIT)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/AsyncFileMix.txt UNIT)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/AsyncFileRead.txt UNIT)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/AsyncFileReadRandom.txt UNIT)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/AsyncFileWrite.txt UNIT)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/BackupContainers.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/BandwidthThrottle.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/BigInsert.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/BlobStore.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/ConsistencyCheck.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/DiskDurability.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/FileSystem.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/Happy.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/IncrementalDelete.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/KVStoreMemTest.txt UNIT)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/KVStoreReadMostly.txt UNIT)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/KVStoreTest.txt UNIT)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/KVStoreTestRead.txt UNIT)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/KVStoreTestWrite.txt UNIT)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/KVStoreValueSize.txt UNIT)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/LayerStatusMerge.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/PureNetwork.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/RRW2500.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/RandomRead.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/RandomReadWrite.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/ReadAbsent.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/ReadHalfAbsent.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/SlowTask.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/SpecificUnitTest.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/StreamingWrite.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/ThreadSafety.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/TraceEventMetrics.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/default.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/errors.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fail.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/killall.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/latency.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/performance-fs.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/performance.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/ping.TXT)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/pingServers.TXT)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/pt.TXT)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/randomSelector.txt)
|
||||
#add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/selectorCorrectness.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/AtomicBackupCorrectness.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/AtomicBackupToDBCorrectness.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/AtomicOps.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/AtomicOpsApiCorrectness.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/BackupCorrectness.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/BackupCorrectnessClean.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/BackupToDBCorrectness.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/BackupToDBCorrectnessClean.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/CloggedSideband.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/ConstrainedRandomSelector.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/CycleAndLock.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/CycleTest.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/FuzzApiCorrectness.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/FuzzApiCorrectnessClean.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/IncrementTest.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/InventoryTestAlmostReadOnly.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/InventoryTestSomeWrites.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/LongStackWriteDuringRead.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/LowLatency.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/MemoryLifetime.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/MoveKeysCycle.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/RandomSelector.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/RandomUnitTests.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/SelectorCorrectness.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/Sideband.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/SidebandWithStatus.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/SwizzledRollbackSideband.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/SystemRebootTestCycle.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/TaskBucketCorrectness.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/TimeKeeperCorrectness.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/Unreadable.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/VersionStamp.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/Watches.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/WriteDuringRead.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/fast/WriteDuringReadClean.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/rare/CheckRelocation.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/rare/ClogUnclog.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/rare/CloggedCycleWithKills.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/rare/ConflictRangeCheck.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/rare/ConflictRangeRYOWCheck.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/rare/CycleRollbackClogged.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/rare/CycleWithKills.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/rare/FuzzTest.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/rare/InventoryTestHeavyWrites.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/rare/LargeApiCorrectness.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/rare/LargeApiCorrectnessStatus.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/rare/RYWDisable.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/rare/RandomReadWriteTest.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/rare/SwizzledLargeApiCorrectness.txt)
|
||||
add_fdb_test(
|
||||
TEST_FILES ${PROJECT_SOURCE_DIR}/tests/restarting/CycleTestRestart-1.txt
|
||||
${PROJECT_SOURCE_DIR}/tests/restarting/CycleTestRestart-2.txt
|
||||
TEST_NAME CycleTestRestart)
|
||||
add_fdb_test(
|
||||
TEST_FILES ${PROJECT_SOURCE_DIR}/tests/restarting/StorefrontTestRestart-1.txt
|
||||
${PROJECT_SOURCE_DIR}/tests/restarting/StorefrontTestRestart-2.txt
|
||||
TEST_NAME StorefrontTestRestart)
|
||||
add_fdb_test(
|
||||
TEST_FILES ${PROJECT_SOURCE_DIR}/tests/restarting/from_5.1.7/DrUpgradeRestart-1.txt
|
||||
${PROJECT_SOURCE_DIR}/tests/restarting/from_5.1.7/DrUpgradeRestart-2.txt
|
||||
TEST_NAME DrUpgradeRestart)
|
||||
add_fdb_test(
|
||||
TEST_FILES ${PROJECT_SOURCE_DIR}/tests/restarting/from_5.2.0/ClientTransactionProfilingCorrectness-1.txt
|
||||
${PROJECT_SOURCE_DIR}/tests/restarting/from_5.2.0/ClientTransactionProfilingCorrectness-2.txt
|
||||
TEST_NAME ClientTransactionProfilingCorrectness)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/ApiCorrectness.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/ApiCorrectnessAtomicRestore.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/ApiCorrectnessSwitchover.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/ClogWithRollbacks.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/CloggedCycleTest.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/CloggedStorefront.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/CommitBug.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/ConfigureTest.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/CycleRollbackPlain.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/DDBalanceAndRemove.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/DDBalanceAndRemoveStatus.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/FastTriggeredWatches.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/LowLatencyWithFailures.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/MoveKeysClean.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/MoveKeysSideband.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/RyowCorrectness.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/Serializability.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/SharedBackupCorrectness.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/SharedBackupToDBCorrectness.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/StorefrontTest.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/SwizzledApiCorrectness.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/SwizzledCycleTest.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/SwizzledDdBalance.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/SwizzledRollbackTimeLapse.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/SwizzledRollbackTimeLapseIncrement.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/VersionStampBackupToDB.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/VersionStampSwitchover.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/WriteDuringReadAtomicRestore.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/WriteDuringReadSwitchover.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/slow/ddbalance.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/status/invalid_proc_addresses.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/status/local_6_machine_no_replicas_remain.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/status/separate_1_of_3_coordinators_remain.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/status/separate_2_of_3_coordinators_remain.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/status/separate_cannot_write_cluster_file.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/status/separate_idle.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/status/separate_initializing.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/status/separate_no_coordinators.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/status/separate_no_database.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/status/separate_no_servers.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/status/separate_not_enough_servers.txt)
|
||||
add_fdb_test(TEST_FILES ${PROJECT_SOURCE_DIR}/tests/status/single_process_too_many_config_params.txt)
|
|
@ -438,17 +438,18 @@ public:
|
|||
struct RoleFitness {
|
||||
ProcessClass::Fitness bestFit;
|
||||
ProcessClass::Fitness worstFit;
|
||||
ProcessClass::ClusterRole role;
|
||||
int count;
|
||||
|
||||
RoleFitness(int bestFit, int worstFit, int count) : bestFit((ProcessClass::Fitness)bestFit), worstFit((ProcessClass::Fitness)worstFit), count(count) {}
|
||||
RoleFitness(int bestFit, int worstFit, int count, ProcessClass::ClusterRole role) : bestFit((ProcessClass::Fitness)bestFit), worstFit((ProcessClass::Fitness)worstFit), count(count), role(role) {}
|
||||
|
||||
RoleFitness(int fitness, int count) : bestFit((ProcessClass::Fitness)fitness), worstFit((ProcessClass::Fitness)fitness), count(count) {}
|
||||
RoleFitness(int fitness, int count, ProcessClass::ClusterRole role) : bestFit((ProcessClass::Fitness)fitness), worstFit((ProcessClass::Fitness)fitness), count(count), role(role) {}
|
||||
|
||||
RoleFitness() : bestFit(ProcessClass::NeverAssign), worstFit(ProcessClass::NeverAssign), count(0) {}
|
||||
RoleFitness() : bestFit(ProcessClass::NeverAssign), worstFit(ProcessClass::NeverAssign), role(ProcessClass::NoRole), count(0) {}
|
||||
|
||||
RoleFitness(RoleFitness first, RoleFitness second) : bestFit(std::min(first.worstFit, second.worstFit)), worstFit(std::max(first.worstFit, second.worstFit)), count(first.count + second.count) {}
|
||||
RoleFitness(RoleFitness first, RoleFitness second, ProcessClass::ClusterRole role) : bestFit(std::min(first.worstFit, second.worstFit)), worstFit(std::max(first.worstFit, second.worstFit)), count(first.count + second.count), role(role) { }
|
||||
|
||||
RoleFitness( vector<std::pair<WorkerInterface, ProcessClass>> workers, ProcessClass::ClusterRole role ) {
|
||||
RoleFitness( vector<std::pair<WorkerInterface, ProcessClass>> workers, ProcessClass::ClusterRole role ) : role(role) {
|
||||
worstFit = ProcessClass::BestFit;
|
||||
bestFit = ProcessClass::NeverAssign;
|
||||
for(auto it : workers) {
|
||||
|
@ -459,7 +460,7 @@ public:
|
|||
count = workers.size();
|
||||
}
|
||||
|
||||
RoleFitness( std::vector<ProcessClass> classes, ProcessClass::ClusterRole role ) {
|
||||
RoleFitness( std::vector<ProcessClass> classes, ProcessClass::ClusterRole role ) : role(role) {
|
||||
worstFit = ProcessClass::BestFit;
|
||||
bestFit = ProcessClass::NeverAssign;
|
||||
for(auto it : classes) {
|
||||
|
@ -472,7 +473,8 @@ public:
|
|||
|
||||
bool operator < (RoleFitness const& r) const {
|
||||
if (worstFit != r.worstFit) return worstFit < r.worstFit;
|
||||
if (bestFit != r.bestFit) return bestFit < r.bestFit;
|
||||
// FIXME: TLog recruitment process does not guarantee the best fit is not worsened.
|
||||
if (role != ProcessClass::TLog && role != ProcessClass::LogRouter && bestFit != r.bestFit) return bestFit < r.bestFit;
|
||||
return count > r.count;
|
||||
}
|
||||
|
||||
|
@ -489,7 +491,7 @@ public:
|
|||
|
||||
bool operator == (RoleFitness const& r) const { return worstFit == r.worstFit && bestFit == r.bestFit && count == r.count; }
|
||||
|
||||
std::string toString() const { return format("%d %d &d", bestFit, worstFit, count); }
|
||||
std::string toString() const { return format("%d %d %d", bestFit, worstFit, count); }
|
||||
};
|
||||
|
||||
std::set<Optional<Standalone<StringRef>>> getDatacenters( DatabaseConfiguration const& conf, bool checkStable = false ) {
|
||||
|
@ -532,8 +534,8 @@ public:
|
|||
}
|
||||
|
||||
if( now() - remoteStartTime.get() < SERVER_KNOBS->WAIT_FOR_GOOD_REMOTE_RECRUITMENT_DELAY &&
|
||||
( ( RoleFitness(SERVER_KNOBS->EXPECTED_TLOG_FITNESS, req.configuration.getDesiredRemoteLogs()).betterCount(RoleFitness(remoteLogs, ProcessClass::TLog)) ) ||
|
||||
( RoleFitness(SERVER_KNOBS->EXPECTED_LOG_ROUTER_FITNESS, req.logRouterCount).betterCount(RoleFitness(logRouters, ProcessClass::LogRouter)) ) ) ) {
|
||||
( ( RoleFitness(SERVER_KNOBS->EXPECTED_TLOG_FITNESS, req.configuration.getDesiredRemoteLogs(), ProcessClass::TLog).betterCount(RoleFitness(remoteLogs, ProcessClass::TLog)) ) ||
|
||||
( RoleFitness(SERVER_KNOBS->EXPECTED_LOG_ROUTER_FITNESS, req.logRouterCount, ProcessClass::LogRouter).betterCount(RoleFitness(logRouters, ProcessClass::LogRouter)) ) ) ) {
|
||||
throw operation_failed();
|
||||
}
|
||||
|
||||
|
@ -600,10 +602,10 @@ public:
|
|||
}
|
||||
|
||||
if( now() - startTime < SERVER_KNOBS->WAIT_FOR_GOOD_RECRUITMENT_DELAY &&
|
||||
( RoleFitness(SERVER_KNOBS->EXPECTED_TLOG_FITNESS, req.configuration.getDesiredLogs()).betterCount(RoleFitness(tlogs, ProcessClass::TLog)) ||
|
||||
( region.satelliteTLogReplicationFactor > 0 && RoleFitness(SERVER_KNOBS->EXPECTED_TLOG_FITNESS, req.configuration.getDesiredSatelliteLogs(dcId)).betterCount(RoleFitness(satelliteLogs, ProcessClass::TLog)) ) ||
|
||||
RoleFitness(SERVER_KNOBS->EXPECTED_PROXY_FITNESS, req.configuration.getDesiredProxies()).betterCount(RoleFitness(proxies, ProcessClass::Proxy)) ||
|
||||
RoleFitness(SERVER_KNOBS->EXPECTED_RESOLVER_FITNESS, req.configuration.getDesiredResolvers()).betterCount(RoleFitness(resolvers, ProcessClass::Resolver)) ) ) {
|
||||
( RoleFitness(SERVER_KNOBS->EXPECTED_TLOG_FITNESS, req.configuration.getDesiredLogs(), ProcessClass::TLog).betterCount(RoleFitness(tlogs, ProcessClass::TLog)) ||
|
||||
( region.satelliteTLogReplicationFactor > 0 && RoleFitness(SERVER_KNOBS->EXPECTED_TLOG_FITNESS, req.configuration.getDesiredSatelliteLogs(dcId), ProcessClass::TLog).betterCount(RoleFitness(satelliteLogs, ProcessClass::TLog)) ) ||
|
||||
RoleFitness(SERVER_KNOBS->EXPECTED_PROXY_FITNESS, req.configuration.getDesiredProxies(), ProcessClass::Proxy).betterCount(RoleFitness(proxies, ProcessClass::Proxy)) ||
|
||||
RoleFitness(SERVER_KNOBS->EXPECTED_RESOLVER_FITNESS, req.configuration.getDesiredResolvers(), ProcessClass::Resolver).betterCount(RoleFitness(resolvers, ProcessClass::Resolver)) ) ) {
|
||||
return operation_failed();
|
||||
}
|
||||
|
||||
|
@ -705,7 +707,7 @@ public:
|
|||
proxies.push_back(first_proxy.worker);
|
||||
resolvers.push_back(first_resolver.worker);
|
||||
|
||||
auto fitness = RoleFitness( RoleFitness(proxies, ProcessClass::Proxy), RoleFitness(resolvers, ProcessClass::Resolver) );
|
||||
auto fitness = RoleFitness( RoleFitness(proxies, ProcessClass::Proxy), RoleFitness(resolvers, ProcessClass::Resolver), ProcessClass::NoRole );
|
||||
|
||||
if(dcId == clusterControllerDcId) {
|
||||
bestFitness = fitness;
|
||||
|
@ -750,8 +752,8 @@ public:
|
|||
.detail("DesiredResolvers", req.configuration.getDesiredResolvers()).detail("ActualResolvers", result.resolvers.size());
|
||||
|
||||
if( now() - startTime < SERVER_KNOBS->WAIT_FOR_GOOD_RECRUITMENT_DELAY &&
|
||||
( RoleFitness(SERVER_KNOBS->EXPECTED_TLOG_FITNESS, req.configuration.getDesiredLogs()).betterCount(RoleFitness(tlogs, ProcessClass::TLog)) ||
|
||||
RoleFitness(std::min(SERVER_KNOBS->EXPECTED_PROXY_FITNESS, SERVER_KNOBS->EXPECTED_RESOLVER_FITNESS), std::max(SERVER_KNOBS->EXPECTED_PROXY_FITNESS, SERVER_KNOBS->EXPECTED_RESOLVER_FITNESS), req.configuration.getDesiredProxies()+req.configuration.getDesiredResolvers()).betterCount(bestFitness) ) ) {
|
||||
( RoleFitness(SERVER_KNOBS->EXPECTED_TLOG_FITNESS, req.configuration.getDesiredLogs(), ProcessClass::TLog).betterCount(RoleFitness(tlogs, ProcessClass::TLog)) ||
|
||||
RoleFitness(std::min(SERVER_KNOBS->EXPECTED_PROXY_FITNESS, SERVER_KNOBS->EXPECTED_RESOLVER_FITNESS), std::max(SERVER_KNOBS->EXPECTED_PROXY_FITNESS, SERVER_KNOBS->EXPECTED_RESOLVER_FITNESS), req.configuration.getDesiredProxies()+req.configuration.getDesiredResolvers(), ProcessClass::NoRole).betterCount(bestFitness) ) ) {
|
||||
throw operation_failed();
|
||||
}
|
||||
|
||||
|
@ -945,10 +947,11 @@ public:
|
|||
return false;
|
||||
|
||||
RoleFitness oldRemoteTLogFit(remote_tlogs, ProcessClass::TLog);
|
||||
RoleFitness newRemoteTLogFit((db.config.usableRegions > 1 && dbi.recoveryState == RecoveryState::FULLY_RECOVERED) ? getWorkersForTlogs(db.config, db.config.getRemoteTLogReplicationFactor(), db.config.getDesiredRemoteLogs(), db.config.getRemoteTLogPolicy(), id_used, true, remoteDC) : remote_tlogs, ProcessClass::TLog);
|
||||
|
||||
RoleFitness newRemoteTLogFit(
|
||||
(db.config.usableRegions > 1 && dbi.recoveryState == RecoveryState::FULLY_RECOVERED) ?
|
||||
getWorkersForTlogs(db.config, db.config.getRemoteTLogReplicationFactor(), db.config.getDesiredRemoteLogs(), db.config.getRemoteTLogPolicy(), id_used, true, remoteDC)
|
||||
: remote_tlogs, ProcessClass::TLog);
|
||||
if(oldRemoteTLogFit < newRemoteTLogFit) return false;
|
||||
|
||||
int oldRouterCount = oldTLogFit.count * std::max<int>(1, db.config.desiredLogRouterCount / std::max(1,oldTLogFit.count));
|
||||
int newRouterCount = newTLogFit.count * std::max<int>(1, db.config.desiredLogRouterCount / std::max(1,newTLogFit.count));
|
||||
RoleFitness oldLogRoutersFit(log_routers, ProcessClass::LogRouter);
|
||||
|
@ -960,11 +963,9 @@ public:
|
|||
if(newLogRoutersFit.count < newRouterCount) {
|
||||
newLogRoutersFit.worstFit = ProcessClass::NeverAssign;
|
||||
}
|
||||
|
||||
if(oldLogRoutersFit < newLogRoutersFit) return false;
|
||||
|
||||
// Check proxy/resolver fitness
|
||||
RoleFitness oldInFit(RoleFitness(proxyClasses, ProcessClass::Proxy), RoleFitness(resolverClasses, ProcessClass::Resolver));
|
||||
RoleFitness oldInFit(RoleFitness(proxyClasses, ProcessClass::Proxy), RoleFitness(resolverClasses, ProcessClass::Resolver), ProcessClass::NoRole);
|
||||
|
||||
auto first_resolver = getWorkerForRoleInDatacenter( clusterControllerDcId, ProcessClass::Resolver, ProcessClass::ExcludeFit, db.config, id_used, true );
|
||||
auto first_proxy = getWorkerForRoleInDatacenter( clusterControllerDcId, ProcessClass::Proxy, ProcessClass::ExcludeFit, db.config, id_used, true );
|
||||
|
@ -974,10 +975,8 @@ public:
|
|||
proxies.push_back(first_proxy.worker);
|
||||
resolvers.push_back(first_resolver.worker);
|
||||
|
||||
RoleFitness newInFit(RoleFitness(proxies, ProcessClass::Proxy), RoleFitness(resolvers, ProcessClass::Resolver));
|
||||
|
||||
RoleFitness newInFit(RoleFitness(proxies, ProcessClass::Proxy), RoleFitness(resolvers, ProcessClass::Resolver), ProcessClass::NoRole);
|
||||
if(oldInFit.betterFitness(newInFit)) return false;
|
||||
|
||||
if(oldTLogFit > newTLogFit || oldInFit > newInFit || (oldSatelliteFallback && !newSatelliteFallback) || oldSatelliteTLogFit > newSatelliteTLogFit || oldRemoteTLogFit > newRemoteTLogFit || oldLogRoutersFit > newLogRoutersFit) {
|
||||
TraceEvent("BetterMasterExists", id).detail("OldMasterFit", oldMasterFit).detail("NewMasterFit", mworker.fitness)
|
||||
.detail("OldTLogFit", oldTLogFit.toString()).detail("NewTLogFit", newTLogFit.toString())
|
||||
|
@ -1321,6 +1320,9 @@ ACTOR Future<Void> rebootAndCheck( ClusterControllerData* cluster, Optional<Stan
|
|||
ACTOR Future<Void> workerAvailabilityWatch( WorkerInterface worker, ProcessClass startingClass, ClusterControllerData* cluster ) {
|
||||
state Future<Void> failed = worker.address() == g_network->getLocalAddress() ? Never() : waitFailureClient( worker.waitFailure, SERVER_KNOBS->WORKER_FAILURE_TIME );
|
||||
cluster->updateWorkerList.set( worker.locality.processId(), ProcessData(worker.locality, startingClass, worker.address()) );
|
||||
// This switching avoids a race where the worker can be added to id_worker map after the workerAvailabilityWatch fails for the worker.
|
||||
wait(delay(0));
|
||||
|
||||
loop {
|
||||
choose {
|
||||
when( wait( IFailureMonitor::failureMonitor().onStateEqual( worker.storage.getEndpoint(), FailureStatus(IFailureMonitor::failureMonitor().getState( worker.storage.getEndpoint() ).isAvailable()) ) ) ) {
|
||||
|
|
|
@ -61,7 +61,7 @@ struct ClusterControllerFullInterface {
|
|||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ASSERT( ar.protocolVersion() >= 0x0FDB00A200040001LL );
|
||||
ar & clientInterface & recruitFromConfiguration & recruitRemoteFromConfiguration & recruitStorage & registerWorker & getWorkers & registerMaster & getServerDBInfo;
|
||||
serializer(ar, clientInterface, recruitFromConfiguration, recruitRemoteFromConfiguration, recruitStorage, registerWorker, getWorkers, registerMaster, getServerDBInfo);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -77,7 +77,7 @@ struct RecruitFromConfigurationRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & configuration & recruitSeedServers & maxOldLogRouters & reply;
|
||||
serializer(ar, configuration, recruitSeedServers, maxOldLogRouters, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -95,7 +95,7 @@ struct RecruitFromConfigurationReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & tLogs & satelliteTLogs & proxies & resolvers & storageServers & oldLogRouters & dcId & satelliteFallback;
|
||||
serializer(ar, tLogs, satelliteTLogs, proxies, resolvers, storageServers, oldLogRouters, dcId, satelliteFallback);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -110,7 +110,7 @@ struct RecruitRemoteFromConfigurationRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & configuration & dcId & logRouterCount & reply;
|
||||
serializer(ar, configuration, dcId, logRouterCount, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -120,7 +120,7 @@ struct RecruitRemoteFromConfigurationReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & remoteTLogs & logRouters;
|
||||
serializer(ar, remoteTLogs, logRouters);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -130,7 +130,7 @@ struct RecruitStorageReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & worker & processClass;
|
||||
serializer(ar, worker, processClass);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -143,7 +143,7 @@ struct RecruitStorageRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & excludeMachines & excludeAddresses & includeDCs & criticalRecruitment & reply;
|
||||
serializer(ar, excludeMachines, excludeAddresses, includeDCs, criticalRecruitment, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -156,7 +156,7 @@ struct RegisterWorkerReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & processClass & priorityInfo;
|
||||
serializer(ar, processClass, priorityInfo);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -174,7 +174,7 @@ struct RegisterWorkerRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & wi & initialClass & processClass & priorityInfo & generation & reply;
|
||||
serializer(ar, wi, initialClass, processClass, priorityInfo, generation, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -189,7 +189,7 @@ struct GetWorkersRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & flags & reply;
|
||||
serializer(ar, flags, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -213,7 +213,7 @@ struct RegisterMasterRequest {
|
|||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ASSERT( ar.protocolVersion() >= 0x0FDB00A200040001LL );
|
||||
ar & id & mi & logSystemConfig & proxies & resolvers & recoveryCount & registrationCount & configuration & priorCommittedLogServers & recoveryState & recoveryStalled & reply;
|
||||
serializer(ar, id, mi, logSystemConfig, proxies, resolvers, recoveryCount, registrationCount, configuration, priorCommittedLogServers, recoveryState, recoveryStalled, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -225,7 +225,7 @@ struct GetServerDBInfoRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & knownServerInfoID & issues & incompatiblePeers & reply;
|
||||
serializer(ar, knownServerInfoID, issues, incompatiblePeers, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -208,7 +208,7 @@ struct MovableValue {
|
|||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ASSERT( ar.protocolVersion() >= 0x0FDB00A2000D0001LL );
|
||||
ar & value & mode & other;
|
||||
serializer(ar, value, mode, other);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -316,4 +316,4 @@ MovableCoordinatedState::~MovableCoordinatedState() {
|
|||
Future<Value> MovableCoordinatedState::read() { return MovableCoordinatedStateImpl::read(impl); }
|
||||
Future<Void> MovableCoordinatedState::onConflict() { return impl->onConflict(); }
|
||||
Future<Void> MovableCoordinatedState::setExclusive(Value v) { return impl->setExclusive(v); }
|
||||
Future<Void> MovableCoordinatedState::move( ClusterConnectionString const& nc ) { return MovableCoordinatedStateImpl::move(impl, nc); }
|
||||
Future<Void> MovableCoordinatedState::move( ClusterConnectionString const& nc ) { return MovableCoordinatedStateImpl::move(impl, nc); }
|
||||
|
|
|
@ -33,7 +33,7 @@ struct GenerationRegVal {
|
|||
Optional<Value> val;
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & readGen & writeGen & val;
|
||||
serializer(ar, readGen, writeGen, val);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ struct UniqueGeneration {
|
|||
}
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & generation & uid;
|
||||
serializer(ar, generation, uid);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -78,7 +78,7 @@ struct GenerationRegReadRequest {
|
|||
GenerationRegReadRequest( Key key, UniqueGeneration gen ) : key(key), gen(gen) {}
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & key & gen & reply;
|
||||
serializer(ar, key, gen, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -89,7 +89,7 @@ struct GenerationRegReadReply {
|
|||
GenerationRegReadReply( Optional<Value> value, UniqueGeneration gen, UniqueGeneration rgen ) : value(value), gen(gen), rgen(rgen) {}
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & value & gen & rgen;
|
||||
serializer(ar, value, gen, rgen);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -101,7 +101,7 @@ struct GenerationRegWriteRequest {
|
|||
GenerationRegWriteRequest(KeyValue kv, UniqueGeneration gen) : kv(kv), gen(gen) {}
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & kv & gen & reply;
|
||||
serializer(ar, kv, gen, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -126,7 +126,7 @@ struct CandidacyRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & key & myInfo & knownLeader & prevChangeID & reply;
|
||||
serializer(ar, key, myInfo, knownLeader, prevChangeID, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -141,7 +141,7 @@ struct LeaderHeartbeatRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & key & myInfo & prevChangeID & reply;
|
||||
serializer(ar, key, myInfo, prevChangeID, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -155,7 +155,7 @@ struct ForwardRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & key & conn & reply;
|
||||
serializer(ar, key, conn, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -169,4 +169,4 @@ public:
|
|||
|
||||
Future<Void> coordinationServer( std::string const& dataFolder );
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -54,7 +54,7 @@ struct CoreTLogSet {
|
|||
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar) {
|
||||
ar & tLogs & tLogWriteAntiQuorum & tLogReplicationFactor & tLogPolicy & tLogLocalities & isLocal & locality & startVersion & satelliteTagLocations;
|
||||
serializer(ar, tLogs, tLogWriteAntiQuorum, tLogReplicationFactor, tLogPolicy, tLogLocalities, isLocal, locality, startVersion, satelliteTagLocations);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -72,11 +72,11 @@ struct OldTLogCoreData {
|
|||
template <class Archive>
|
||||
void serialize(Archive& ar) {
|
||||
if( ar.protocolVersion() >= 0x0FDB00A560010001LL) {
|
||||
ar & tLogs & logRouterTags & epochEnd;
|
||||
serializer(ar, tLogs, logRouterTags, epochEnd);
|
||||
}
|
||||
else if(ar.isDeserializing) {
|
||||
tLogs.push_back(CoreTLogSet());
|
||||
ar & tLogs[0].tLogs & tLogs[0].tLogWriteAntiQuorum & tLogs[0].tLogReplicationFactor & tLogs[0].tLogPolicy & epochEnd & tLogs[0].tLogLocalities;
|
||||
serializer(ar, tLogs[0].tLogs, tLogs[0].tLogWriteAntiQuorum, tLogs[0].tLogReplicationFactor, tLogs[0].tLogPolicy, epochEnd, tLogs[0].tLogLocalities);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -122,18 +122,18 @@ struct DBCoreState {
|
|||
|
||||
ASSERT(ar.protocolVersion() >= 0x0FDB00A460010001LL);
|
||||
if(ar.protocolVersion() >= 0x0FDB00A560010001LL) {
|
||||
ar & tLogs & logRouterTags & oldTLogData & recoveryCount & logSystemType;
|
||||
serializer(ar, tLogs, logRouterTags, oldTLogData, recoveryCount, logSystemType);
|
||||
} else if(ar.isDeserializing) {
|
||||
tLogs.push_back(CoreTLogSet());
|
||||
ar & tLogs[0].tLogs & tLogs[0].tLogWriteAntiQuorum & recoveryCount & tLogs[0].tLogReplicationFactor & logSystemType;
|
||||
serializer(ar, tLogs[0].tLogs, tLogs[0].tLogWriteAntiQuorum, recoveryCount, tLogs[0].tLogReplicationFactor, logSystemType);
|
||||
|
||||
uint64_t tLocalitySize = (uint64_t)tLogs[0].tLogLocalities.size();
|
||||
ar & oldTLogData & tLogs[0].tLogPolicy & tLocalitySize;
|
||||
serializer(ar, oldTLogData, tLogs[0].tLogPolicy, tLocalitySize);
|
||||
if (ar.isDeserializing) {
|
||||
tLogs[0].tLogLocalities.reserve(tLocalitySize);
|
||||
for (size_t i = 0; i < tLocalitySize; i++) {
|
||||
LocalityData locality;
|
||||
ar & locality;
|
||||
serializer(ar, locality);
|
||||
tLogs[0].tLogLocalities.push_back(locality);
|
||||
}
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include "fdbserver/MoveKeys.h"
|
||||
#include "fdbserver/Knobs.h"
|
||||
#include <set>
|
||||
#include <sstream>
|
||||
#include "fdbserver/WaitFailure.h"
|
||||
#include "fdbserver/ServerDBInfo.h"
|
||||
#include "fdbserver/IKeyValueStore.h"
|
||||
|
@ -74,15 +75,14 @@ struct TCMachineInfo : public ReferenceCounted<TCMachineInfo> {
|
|||
}
|
||||
|
||||
std::string getServersIDStr() {
|
||||
std::string str;
|
||||
std::stringstream ss;
|
||||
if (serversOnMachine.empty()) return "[unset]";
|
||||
|
||||
for (auto& server : serversOnMachine) {
|
||||
str += server->id.toString() + " ";
|
||||
ss << server->id.toString() << " ";
|
||||
}
|
||||
str.pop_back();
|
||||
|
||||
return str;
|
||||
return ss.str();
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -152,16 +152,15 @@ public:
|
|||
}
|
||||
|
||||
std::string getMachineIDsStr() {
|
||||
std::string str;
|
||||
std::stringstream ss;
|
||||
|
||||
if (this == NULL || machineIDs.empty()) return "[unset]";
|
||||
if (machineIDs.empty()) return "[unset]";
|
||||
|
||||
for (auto& id : machineIDs) {
|
||||
str += id.contents().toString() + " ";
|
||||
ss << id.contents().toString() << " ";
|
||||
}
|
||||
str.pop_back();
|
||||
|
||||
return str;
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
int getTotalMachineTeamNumber() {
|
||||
|
@ -210,15 +209,15 @@ public:
|
|||
virtual vector<UID> const& getServerIDs() { return serverIDs; }
|
||||
|
||||
virtual std::string getServerIDsStr() {
|
||||
std::string str;
|
||||
if (this == NULL || serverIDs.empty()) return "[unset]";
|
||||
std::stringstream ss;
|
||||
|
||||
if (serverIDs.empty()) return "[unset]";
|
||||
|
||||
for (auto& id : serverIDs) {
|
||||
str += id.toString() + " ";
|
||||
ss << id.toString() << " ";
|
||||
}
|
||||
str.pop_back();
|
||||
|
||||
return str;
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
virtual void addDataInFlightToTeam( int64_t delta ) {
|
||||
|
@ -2724,7 +2723,7 @@ ACTOR Future<Void> storageServerTracker(
|
|||
}
|
||||
}
|
||||
} catch( Error &e ) {
|
||||
if (e.code() != error_code_actor_cancelled)
|
||||
if (e.code() != error_code_actor_cancelled && errorOut.canBeSet())
|
||||
errorOut.sendError(e);
|
||||
throw;
|
||||
}
|
||||
|
|
|
@ -770,6 +770,8 @@ private:
|
|||
uint64_t popped;
|
||||
int payloadSize;
|
||||
};
|
||||
// The on disk format depends on the size of PageHeader.
|
||||
static_assert( sizeof(PageHeader) == 36, "PageHeader must be packed" );
|
||||
|
||||
struct Page : PageHeader {
|
||||
static const int maxPayload = _PAGE_SIZE - sizeof(PageHeader);
|
||||
|
@ -901,7 +903,7 @@ private:
|
|||
|
||||
// Writes go at the end of our reads (but on the next page)
|
||||
self->nextPageSeq = self->nextReadLocation/sizeof(Page)*sizeof(Page);
|
||||
if (self->nextReadLocation % sizeof(Page) > 36) self->nextPageSeq += sizeof(Page);
|
||||
if (self->nextReadLocation % sizeof(Page) > sizeof(PageHeader)) self->nextPageSeq += sizeof(Page);
|
||||
|
||||
TraceEvent("DQRecovered", self->dbgid).detail("LastPoppedSeq", self->lastPoppedSeq).detail("PoppedSeq", self->poppedSeq).detail("NextPageSeq", self->nextPageSeq).detail("File0Name", self->rawQueue->files[0].dbgFilename);
|
||||
self->recovered = true;
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
|
||||
#include "fdbserver/Knobs.h"
|
||||
#include "fdbrpc/Locality.h"
|
||||
#include <cmath>
|
||||
|
||||
ServerKnobs const* SERVER_KNOBS = new ServerKnobs();
|
||||
|
||||
|
@ -377,6 +378,8 @@ ServerKnobs::ServerKnobs(bool randomize, ClientKnobs* clientKnobs) {
|
|||
init( MAX_STORAGE_SERVER_WATCH_BYTES, 100e6 ); if( randomize && BUGGIFY ) MAX_STORAGE_SERVER_WATCH_BYTES = 10e3;
|
||||
init( MAX_BYTE_SAMPLE_CLEAR_MAP_SIZE, 1e9 ); if( randomize && BUGGIFY ) MAX_BYTE_SAMPLE_CLEAR_MAP_SIZE = 1e3;
|
||||
init( LONG_BYTE_SAMPLE_RECOVERY_DELAY, 60.0 );
|
||||
init( BYTE_SAMPLE_LOAD_PARALLELISM, 32 ); if( randomize && BUGGIFY ) BYTE_SAMPLE_LOAD_PARALLELISM = 1;
|
||||
init( BYTE_SAMPLE_LOAD_DELAY, 0.0 ); if( randomize && BUGGIFY ) BYTE_SAMPLE_LOAD_DELAY = 0.1;
|
||||
|
||||
//Wait Failure
|
||||
init( BUGGIFY_OUTSTANDING_WAIT_FAILURE_REQUESTS, 2 );
|
||||
|
|
|
@ -315,6 +315,8 @@ public:
|
|||
int MAX_STORAGE_SERVER_WATCH_BYTES;
|
||||
int MAX_BYTE_SAMPLE_CLEAR_MAP_SIZE;
|
||||
double LONG_BYTE_SAMPLE_RECOVERY_DELAY;
|
||||
int BYTE_SAMPLE_LOAD_PARALLELISM;
|
||||
double BYTE_SAMPLE_LOAD_DELAY;
|
||||
|
||||
//Wait Failure
|
||||
int BUGGIFY_OUTSTANDING_WAIT_FAILURE_REQUESTS;
|
||||
|
|
|
@ -60,7 +60,7 @@ struct LogProtocolMessage {
|
|||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
uint8_t poly = MutationRef::Reserved_For_LogProtocolMessage;
|
||||
ar & poly;
|
||||
serializer(ar, poly);
|
||||
applyVersionStartingHere(ar, IncludeVersion());
|
||||
}
|
||||
|
||||
|
@ -70,4 +70,4 @@ struct LogProtocolMessage {
|
|||
template <class Ar> static bool isNextIn(Ar& ar) { return startsLogProtocolMessage(*(const uint8_t*)ar.peekBytes(1)); }
|
||||
};
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -45,8 +45,8 @@ struct OptionalInterface {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & iface;
|
||||
if( !iface.present() ) ar & ident;
|
||||
serializer(ar, iface);
|
||||
if( !iface.present() ) serializer(ar, ident);
|
||||
else ident = iface.get().id();
|
||||
}
|
||||
|
||||
|
@ -111,7 +111,7 @@ struct TLogSet {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & tLogs & logRouters & tLogWriteAntiQuorum & tLogReplicationFactor & tLogPolicy & tLogLocalities & isLocal & locality & startVersion & satelliteTagLocations;
|
||||
serializer(ar, tLogs, logRouters, tLogWriteAntiQuorum, tLogReplicationFactor, tLogPolicy, tLogLocalities, isLocal, locality, startVersion, satelliteTagLocations);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -144,7 +144,7 @@ struct OldTLogConf {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & tLogs & epochEnd & logRouterTags;
|
||||
serializer(ar, tLogs, epochEnd, logRouterTags);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -304,7 +304,7 @@ struct LogSystemConfig {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & logSystemType & tLogs & logRouterTags & oldTLogs & expectedLogSets & recruitmentID & stopped & recoveredAt;
|
||||
serializer(ar, logSystemType, tLogs, logRouterTags, oldTLogs, expectedLogSets, recruitmentID, stopped, recoveredAt);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ struct MasterInterface {
|
|||
template <class Archive>
|
||||
void serialize(Archive& ar) {
|
||||
ASSERT( ar.protocolVersion() >= 0x0FDB00A200040001LL );
|
||||
ar & locality & waitFailure & getRateInfo & tlogRejoin & changeCoordinators & getCommitVersion;
|
||||
serializer(ar, locality, waitFailure, getRateInfo, tlogRejoin, changeCoordinators, getCommitVersion);
|
||||
}
|
||||
|
||||
void initEndpoints() {
|
||||
|
@ -61,7 +61,7 @@ struct GetRateInfoRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & requesterID & totalReleasedTransactions & reply;
|
||||
serializer(ar, requesterID, totalReleasedTransactions, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -71,7 +71,7 @@ struct GetRateInfoReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & transactionRate & leaseDuration;
|
||||
serializer(ar, transactionRate, leaseDuration);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -83,7 +83,7 @@ struct TLogRejoinRequest {
|
|||
explicit TLogRejoinRequest(const TLogInterface &interf) : myInterface(interf) { }
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & myInterface & reply;
|
||||
serializer(ar, myInterface, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -96,7 +96,7 @@ struct ChangeCoordinatorsRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & newConnectionString & reply;
|
||||
serializer(ar, newConnectionString, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -121,7 +121,7 @@ struct ResolverMoveRef {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & range & dest;
|
||||
serializer(ar, range, dest);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -137,7 +137,7 @@ struct GetCommitVersionReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & resolverChanges & resolverChangesVersion & version & prevVersion & requestNum;
|
||||
serializer(ar, resolverChanges, resolverChangesVersion, version, prevVersion, requestNum);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -153,7 +153,7 @@ struct GetCommitVersionRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & requestNum & mostRecentProcessedRequestNum & requestingProxy & reply;
|
||||
serializer(ar, requestNum, mostRecentProcessedRequestNum, requestingProxy, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -175,7 +175,7 @@ struct LifetimeToken {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & ccID & count;
|
||||
serializer(ar, ccID, count);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
struct MoveKeysLock {
|
||||
UID prevOwner, myOwner, prevWrite;
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) { ar & prevOwner & myOwner & prevWrite; }
|
||||
void serialize(Ar& ar) { serializer(ar, prevOwner, myOwner, prevWrite); }
|
||||
};
|
||||
|
||||
Future<MoveKeysLock> takeMoveKeysLock( Database const& cx, UID const& masterId );
|
||||
|
@ -86,4 +86,4 @@ Future<bool> canRemoveStorageServer( Transaction* const& tr, UID const& serverID
|
|||
// Returns true if the given storage server has no keys assigned to it and may be safely removed
|
||||
// Obviously that could change later!
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -40,7 +40,7 @@ struct NetworkTestRequest {
|
|||
NetworkTestRequest( Key key, uint32_t replySize ) : key(key), replySize(replySize) {}
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & key & replySize & reply;
|
||||
serializer(ar, key, replySize, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -50,7 +50,7 @@ struct NetworkTestReply {
|
|||
NetworkTestReply( Value value ) : value(value) {}
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & value;
|
||||
serializer(ar, value);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -58,4 +58,4 @@ Future<Void> networkTestServer();
|
|||
|
||||
Future<Void> networkTestClient( std:: string const& testServers );
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -74,7 +74,7 @@ namespace oldTLog {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & tag & messageOffsets;
|
||||
serializer(ar, tag, messageOffsets);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -93,9 +93,9 @@ namespace oldTLog {
|
|||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
if( ar.protocolVersion() >= 0x0FDB00A460010001) {
|
||||
ar & version & messages & tags & knownCommittedVersion & id;
|
||||
serializer(ar, version, messages, tags, knownCommittedVersion, id);
|
||||
} else if(ar.isDeserializing) {
|
||||
ar & version & messages & tags;
|
||||
serializer(ar, version, messages, tags);
|
||||
knownCommittedVersion = 0;
|
||||
id = UID();
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ struct ResolverInterface {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & uniqueID & locality & resolve & metrics & split & waitFailure;
|
||||
serializer(ar, uniqueID, locality, resolve, metrics, split, waitFailure);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -65,7 +65,7 @@ struct StateTransactionRef {
|
|||
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar) {
|
||||
ar & committed & mutations;
|
||||
serializer(ar, committed, mutations);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -77,7 +77,7 @@ struct ResolveTransactionBatchReply {
|
|||
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar) {
|
||||
ar & committed & stateMutations & arena & debugID;
|
||||
serializer(ar, committed, stateMutations, arena, debugID);
|
||||
}
|
||||
|
||||
};
|
||||
|
@ -95,7 +95,7 @@ struct ResolveTransactionBatchRequest {
|
|||
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar) {
|
||||
ar & prevVersion & version & lastReceivedVersion & transactions & txnStateTransactions & reply & arena & debugID;
|
||||
serializer(ar, prevVersion, version, lastReceivedVersion, transactions, txnStateTransactions, reply, arena, debugID);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -104,7 +104,7 @@ struct ResolutionMetricsRequest {
|
|||
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar) {
|
||||
ar & reply;
|
||||
serializer(ar, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -113,7 +113,7 @@ struct ResolutionSplitReply {
|
|||
int64_t used;
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar) {
|
||||
ar & key & used;
|
||||
serializer(ar, key, used);
|
||||
}
|
||||
|
||||
};
|
||||
|
@ -126,7 +126,7 @@ struct ResolutionSplitRequest {
|
|||
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar) {
|
||||
ar & range & offset & front & reply;
|
||||
serializer(ar, range, offset, front, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ struct RestoreInterface {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & test;
|
||||
serializer(ar, test);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -54,7 +54,7 @@ struct TestRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & testData & reply;
|
||||
serializer(ar, testData, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -66,7 +66,7 @@ struct TestReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & replyData;
|
||||
serializer(ar, replyData);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ struct ServerDBInfo {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & id & clusterInterface & client & master & resolvers & recoveryCount & masterLifetime & logSystemConfig & priorCommittedLogServers & recoveryState;
|
||||
serializer(ar, id, clusterInterface, client, master, resolvers, recoveryCount, masterLifetime, logSystemConfig, priorCommittedLogServers, recoveryState);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -65,8 +65,8 @@ struct TLogInterface {
|
|||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ASSERT(ar.isDeserializing || uniqueID != UID());
|
||||
ar & uniqueID & sharedTLogID & locality & peekMessages & popMessages
|
||||
& commit & lock & getQueuingMetrics & confirmRunning & waitFailure & recoveryFinished;
|
||||
serializer(ar, uniqueID, sharedTLogID, locality, peekMessages, popMessages
|
||||
, commit, lock, getQueuingMetrics, confirmRunning, waitFailure, recoveryFinished);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -77,7 +77,7 @@ struct TLogRecoveryFinishedRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & reply;
|
||||
serializer(ar, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -87,7 +87,7 @@ struct TLogLockResult {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & end & knownCommittedVersion;
|
||||
serializer(ar, end, knownCommittedVersion);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -100,7 +100,7 @@ struct TLogConfirmRunningRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & debugID & reply;
|
||||
serializer(ar, debugID, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -116,7 +116,7 @@ struct VersionUpdateRef {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & version & mutations & isPrivateData;
|
||||
serializer(ar, version, mutations, isPrivateData);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -131,7 +131,7 @@ struct VerUpdateRef {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & version & mutations & isPrivateData;
|
||||
serializer(ar, version, mutations, isPrivateData);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -146,7 +146,7 @@ struct TLogPeekReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & arena & messages & end & popped & maxKnownVersion & minKnownCommittedVersion & begin;
|
||||
serializer(ar, arena, messages, end, popped, maxKnownVersion, minKnownCommittedVersion, begin);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -163,7 +163,7 @@ struct TLogPeekRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & arena & begin & tag & returnIfBlocked & sequence & reply;
|
||||
serializer(ar, arena, begin, tag, returnIfBlocked, sequence, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -179,7 +179,7 @@ struct TLogPopRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & arena & to & durableKnownCommittedVersion & tag & reply;
|
||||
serializer(ar, arena, to, durableKnownCommittedVersion, tag, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -196,7 +196,7 @@ struct TagMessagesRef {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & tag & messageOffsets;
|
||||
serializer(ar, tag, messageOffsets);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -214,7 +214,7 @@ struct TLogCommitRequest {
|
|||
: arena(a), prevVersion(prevVersion), version(version), knownCommittedVersion(knownCommittedVersion), minKnownCommittedVersion(minKnownCommittedVersion), messages(messages), debugID(debugID) {}
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & prevVersion & version & knownCommittedVersion & minKnownCommittedVersion & messages & reply & arena & debugID;
|
||||
serializer(ar, prevVersion, version, knownCommittedVersion, minKnownCommittedVersion, messages, reply, arena, debugID);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -223,7 +223,7 @@ struct TLogQueuingMetricsRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & reply;
|
||||
serializer(ar, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -236,7 +236,7 @@ struct TLogQueuingMetricsReply {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & localTime & instanceID & bytesDurable & bytesInput & storageBytes & v;
|
||||
serializer(ar, localTime, instanceID, bytesDurable, bytesInput, storageBytes, v);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ struct TLogQueueEntryRef {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & version & messages & knownCommittedVersion & id;
|
||||
serializer(ar, version, messages, knownCommittedVersion, id);
|
||||
}
|
||||
size_t expectedSize() const {
|
||||
return messages.expectedSize();
|
||||
|
@ -76,11 +76,11 @@ struct AlternativeTLogQueueEntryRef {
|
|||
void serialize(Ar& ar) {
|
||||
ASSERT(!ar.isDeserializing && alternativeMessages);
|
||||
uint32_t msgSize = expectedSize();
|
||||
ar & version & msgSize;
|
||||
serializer(ar, version, msgSize);
|
||||
for(auto& msg : *alternativeMessages) {
|
||||
ar.serializeBytes( msg.message );
|
||||
}
|
||||
ar & knownCommittedVersion & id;
|
||||
serializer(ar, knownCommittedVersion, id);
|
||||
}
|
||||
|
||||
uint32_t expectedSize() const {
|
||||
|
|
|
@ -37,7 +37,7 @@ struct WorkloadInterface {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & setup & start & check & metrics & stop;
|
||||
serializer(ar, setup, start, check, metrics, stop);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -68,7 +68,7 @@ struct WorkloadRequest {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
ar & title & timeout & databasePingDelay & sharedRandomNumber & useDatabase & options & clientId & clientCount & reply & arena;
|
||||
serializer(ar, title, timeout, databasePingDelay, sharedRandomNumber, useDatabase, options, clientId, clientCount, reply, arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -79,7 +79,7 @@ struct TesterInterface {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ar & recruitments;
|
||||
serializer(ar, recruitments);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ struct BTreePage {
|
|||
r += format("['%s']", c.getKeyRef().toHexString(20).c_str());
|
||||
|
||||
r += " -> ";
|
||||
if(flags && IS_LEAF)
|
||||
if(flags & IS_LEAF)
|
||||
r += format("'%s'", c.getValueRef().toHexString(20).c_str());
|
||||
else
|
||||
r += format("Page id=%u", *(const uint32_t *)c.getValueRef().begin());
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue