Merge remote-tracking branch 'apple/master' into task/tls-upgrade
This commit is contained in:
commit
e05b53d755
|
@ -76,6 +76,8 @@ foundationdb.VC.db
|
||||||
foundationdb.VC.VC.opendb
|
foundationdb.VC.VC.opendb
|
||||||
ipch/
|
ipch/
|
||||||
compile_commands.json
|
compile_commands.json
|
||||||
|
flow/actorcompiler/obj
|
||||||
|
flow/coveragetool/obj
|
||||||
|
|
||||||
# Temporary and user configuration files
|
# Temporary and user configuration files
|
||||||
*~
|
*~
|
||||||
|
|
102
CMakeLists.txt
102
CMakeLists.txt
|
@ -17,17 +17,11 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
cmake_minimum_required(VERSION 3.12)
|
cmake_minimum_required(VERSION 3.12)
|
||||||
project(fdb
|
project(foundationdb
|
||||||
VERSION 6.1.0
|
VERSION 6.1.0
|
||||||
DESCRIPTION "FoundationDB is a scalable, fault-tolerant, ordered key-value store with full ACID transactions."
|
DESCRIPTION "FoundationDB is a scalable, fault-tolerant, ordered key-value store with full ACID transactions."
|
||||||
HOMEPAGE_URL "http://www.foundationdb.org/"
|
HOMEPAGE_URL "http://www.foundationdb.org/"
|
||||||
LANGUAGES ASM C CXX Java)
|
LANGUAGES ASM C CXX)
|
||||||
|
|
||||||
if(WIN32)
|
|
||||||
# C# is currently only supported on Windows.
|
|
||||||
# On other platforms we find mono manually
|
|
||||||
enable_language(CSharp)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH};${PROJECT_SOURCE_DIR}/cmake")
|
set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH};${PROJECT_SOURCE_DIR}/cmake")
|
||||||
message (STATUS "${PROJECT_SOURCE_DIR} ${PROJECT_BINARY_DIR}")
|
message (STATUS "${PROJECT_SOURCE_DIR} ${PROJECT_BINARY_DIR}")
|
||||||
|
@ -45,42 +39,26 @@ endif()
|
||||||
set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/bin)
|
set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/bin)
|
||||||
set(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/lib)
|
set(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/lib)
|
||||||
|
|
||||||
|
set(OPEN_FOR_IDE OFF CACHE BOOL "Open this in an IDE (won't compile/link)")
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
# Packages used for bindings
|
# Packages used for bindings
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
|
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
|
||||||
|
|
||||||
find_package(PythonInterp 3.4 REQUIRED)
|
|
||||||
set(Python_ADDITIONAL_VERSIONS 3.4 3.5 3.5)
|
|
||||||
find_package(PythonLibs 3.4 REQUIRED)
|
|
||||||
|
|
||||||
|
|
||||||
################################################################################
|
|
||||||
# LibreSSL
|
|
||||||
################################################################################
|
|
||||||
|
|
||||||
set(DISABLE_TLS OFF CACHE BOOL "Don't try to find LibreSSL and always build without TLS support")
|
|
||||||
if(DISABLE_TLS)
|
|
||||||
set(WITH_TLS FALSE)
|
|
||||||
else()
|
|
||||||
set(LIBRESSL_USE_STATIC_LIBS TRUE)
|
|
||||||
find_package(LibreSSL)
|
|
||||||
if(LibreSSL_FOUND)
|
|
||||||
set(WITH_TLS TRUE)
|
|
||||||
else()
|
|
||||||
message(STATUS "LibreSSL NOT Found - Will compile without TLS Support")
|
|
||||||
message(STATUS "You can set LibreSSL_ROOT to the LibreSSL install directory to help cmake find it")
|
|
||||||
set(WITH_TLS FALSE)
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
# Compiler configuration
|
# Compiler configuration
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
include(ConfigureCompiler)
|
include(ConfigureCompiler)
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# Compiler configuration
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
include(FDBComponents)
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
# Get repository information
|
# Get repository information
|
||||||
################################################################################
|
################################################################################
|
||||||
|
@ -127,10 +105,15 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/fdbclient/versions.h.cmake ${CMAKE_CU
|
||||||
# Flow
|
# Flow
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
|
# Flow and other tools are written in C# - so we need that dependency
|
||||||
|
include(EnableCsharp)
|
||||||
|
|
||||||
# First thing we need is the actor compiler - and to compile and run the
|
# First thing we need is the actor compiler - and to compile and run the
|
||||||
# actor compiler, we need mono
|
# actor compiler, we need mono
|
||||||
include(CompileActorCompiler)
|
include(CompileActorCompiler)
|
||||||
|
|
||||||
|
include(CompileCoverageTool)
|
||||||
|
|
||||||
# with the actor compiler, we can now make the flow commands available
|
# with the actor compiler, we can now make the flow commands available
|
||||||
include(FlowCommands)
|
include(FlowCommands)
|
||||||
|
|
||||||
|
@ -140,50 +123,6 @@ include(FlowCommands)
|
||||||
|
|
||||||
include(CompileVexillographer)
|
include(CompileVexillographer)
|
||||||
|
|
||||||
# This macro can be used to install symlinks, which turns out to be
|
|
||||||
# non-trivial due to CMake version differences and limitations on how
|
|
||||||
# files can be installed when building binary packages.
|
|
||||||
#
|
|
||||||
# The rule for binary packaging is that files (including symlinks) must
|
|
||||||
# be installed with the standard CMake install() macro.
|
|
||||||
#
|
|
||||||
# The rule for non-binary packaging is that CMake 2.6 cannot install()
|
|
||||||
# symlinks, but can create the symlink at install-time via scripting.
|
|
||||||
# Though, we assume that CMake 2.6 isn't going to be used to generate
|
|
||||||
# packages because versions later than 2.8.3 are superior for that purpose.
|
|
||||||
#
|
|
||||||
# _filepath: the absolute path to the file to symlink
|
|
||||||
# _sympath: absolute path of the installed symlink
|
|
||||||
|
|
||||||
macro(InstallSymlink _filepath _sympath)
|
|
||||||
get_filename_component(_symname ${_sympath} NAME)
|
|
||||||
get_filename_component(_installdir ${_sympath} PATH)
|
|
||||||
|
|
||||||
if (BINARY_PACKAGING_MODE)
|
|
||||||
execute_process(COMMAND "${CMAKE_COMMAND}" -E create_symlink
|
|
||||||
${_filepath}
|
|
||||||
${CMAKE_CURRENT_BINARY_DIR}/${_symname})
|
|
||||||
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${_symname}
|
|
||||||
DESTINATION ${_installdir}
|
|
||||||
COMPONENT clients)
|
|
||||||
else ()
|
|
||||||
# scripting the symlink installation at install time should work
|
|
||||||
# for CMake 2.6.x and 2.8.x
|
|
||||||
install(CODE "
|
|
||||||
if (\"\$ENV{DESTDIR}\" STREQUAL \"\")
|
|
||||||
execute_process(COMMAND \"${CMAKE_COMMAND}\" -E create_symlink
|
|
||||||
${_filepath}
|
|
||||||
${_installdir}/${_symname})
|
|
||||||
else ()
|
|
||||||
execute_process(COMMAND \"${CMAKE_COMMAND}\" -E create_symlink
|
|
||||||
${_filepath}
|
|
||||||
\$ENV{DESTDIR}/${_installdir}/${_symname})
|
|
||||||
endif ()
|
|
||||||
"
|
|
||||||
COMPONENT clients)
|
|
||||||
endif ()
|
|
||||||
endmacro(InstallSymlink)
|
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
# Generate config file
|
# Generate config file
|
||||||
################################################################################
|
################################################################################
|
||||||
|
@ -235,6 +174,9 @@ endif()
|
||||||
add_subdirectory(bindings)
|
add_subdirectory(bindings)
|
||||||
add_subdirectory(fdbbackup)
|
add_subdirectory(fdbbackup)
|
||||||
add_subdirectory(tests)
|
add_subdirectory(tests)
|
||||||
|
if(WITH_DOCUMENTATION)
|
||||||
|
add_subdirectory(documentation)
|
||||||
|
endif()
|
||||||
|
|
||||||
if(WIN32)
|
if(WIN32)
|
||||||
add_subdirectory(packaging/msi)
|
add_subdirectory(packaging/msi)
|
||||||
|
@ -256,3 +198,11 @@ if (CMAKE_EXPORT_COMPILE_COMMANDS)
|
||||||
)
|
)
|
||||||
add_custom_target(procossed_compile_commands ALL DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json)
|
add_custom_target(procossed_compile_commands ALL DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# Inform user which components we are going to build
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
print_components()
|
||||||
|
|
||||||
|
message(STATUS "CPACK_COMPONENTS_ALL ${CPACK_COMPONENTS_ALL}")
|
||||||
|
|
39
README.md
39
README.md
|
@ -104,16 +104,49 @@ cmake -DLibreSSL_ROOT=/usr/local/libressl-2.8.3/ ../foundationdb
|
||||||
FoundationDB will build just fine without LibreSSL, however, the resulting
|
FoundationDB will build just fine without LibreSSL, however, the resulting
|
||||||
binaries won't support TLS connections.
|
binaries won't support TLS connections.
|
||||||
|
|
||||||
|
### Language Bindings
|
||||||
|
|
||||||
|
The language bindings that are supported by cmake will have a corresponding
|
||||||
|
`README.md` file in the corresponding `bindings/lang` directory.
|
||||||
|
|
||||||
|
Generally, cmake will build all language bindings for which it can find all
|
||||||
|
necessary dependencies. After each successful cmake run, cmake will tell you
|
||||||
|
which language bindings it is going to build.
|
||||||
|
|
||||||
|
|
||||||
### Generating compile_commands.json
|
### Generating compile_commands.json
|
||||||
|
|
||||||
CMake can build a compilation database for you. However, the default generatd
|
CMake can build a compilation database for you. However, the default generated
|
||||||
one is not too useful as it operates on the generated files. When running make,
|
one is not too useful as it operates on the generated files. When running make,
|
||||||
the build system will create another `compile_commands.json` file in the source
|
the build system will create another `compile_commands.json` file in the source
|
||||||
directory. This can than be used for tools like
|
directory. This can than be used for tools like
|
||||||
[CCLS](https://github.com/MaskRay/ccls),
|
[CCLS](https://github.com/MaskRay/ccls),
|
||||||
[CQuery](https://github.com/cquery-project/cquery), etc. This way you can get
|
[CQuery](https://github.com/cquery-project/cquery), etc. This way you can get
|
||||||
code-completion and code navigation in flow. It is not yet perfect (it will show
|
code-completion and code navigation in flow. It is not yet perfect (it will show
|
||||||
a few errors) but we are constantly working on improving the developement experience.
|
a few errors) but we are constantly working on improving the development experience.
|
||||||
|
|
||||||
|
### Using IDEs
|
||||||
|
|
||||||
|
CMake has built in support for a number of popular IDEs. However, because flow
|
||||||
|
files are precompiled with the actor compiler, an IDE will not be very useful as
|
||||||
|
a user will only be presented with the generated code - which is not what she
|
||||||
|
wants to edit and get IDE features for.
|
||||||
|
|
||||||
|
The good news is, that it is possible to generate project files for editing
|
||||||
|
flow with a supported IDE. There is a cmake option called `OPEN_FOR_IDE` which
|
||||||
|
will generate a project which can be opened in an IDE for editing. You won't be
|
||||||
|
able to build this project, but you will be able to edit the files and get most
|
||||||
|
edit and navigation features your IDE supports.
|
||||||
|
|
||||||
|
For example, if you want to use XCode to make changes to FoundationDB you can
|
||||||
|
create a XCode-project with the following command:
|
||||||
|
|
||||||
|
```
|
||||||
|
cmake -G Xcode -DOPEN_FOR_IDE=ON <FDB_SOURCE_DIRECTORY>
|
||||||
|
```
|
||||||
|
|
||||||
|
You should create a second build-directory which you will use for building
|
||||||
|
(probably with make or ninja) and debugging.
|
||||||
|
|
||||||
### Linux
|
### Linux
|
||||||
|
|
||||||
|
@ -173,7 +206,7 @@ that Visual Studio is used to compile.
|
||||||
1. This should succeed. In which case you can build using msbuild:
|
1. This should succeed. In which case you can build using msbuild:
|
||||||
`msbuild /p:Configuration=Release fdb.sln`. You can also open the resulting
|
`msbuild /p:Configuration=Release fdb.sln`. You can also open the resulting
|
||||||
solution in Visual Studio and compile from there. However, be aware that
|
solution in Visual Studio and compile from there. However, be aware that
|
||||||
using Visual Studio for developement is currently not supported as Visual
|
using Visual Studio for development is currently not supported as Visual
|
||||||
Studio will only know about the generated files.
|
Studio will only know about the generated files.
|
||||||
|
|
||||||
If you want TLS support to be enabled under Windows you currently have to build
|
If you want TLS support to be enabled under Windows you currently have to build
|
||||||
|
|
|
@ -1,3 +1,12 @@
|
||||||
add_subdirectory(c)
|
add_subdirectory(c)
|
||||||
|
add_subdirectory(flow)
|
||||||
add_subdirectory(python)
|
add_subdirectory(python)
|
||||||
add_subdirectory(java)
|
if(WITH_JAVA)
|
||||||
|
add_subdirectory(java)
|
||||||
|
endif()
|
||||||
|
if(WITH_GO)
|
||||||
|
add_subdirectory(go)
|
||||||
|
endif()
|
||||||
|
if(WITH_RUBY)
|
||||||
|
add_subdirectory(ruby)
|
||||||
|
endif()
|
||||||
|
|
|
@ -16,7 +16,7 @@ elseif(WIN32)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
add_custom_command(OUTPUT ${asm_file} ${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h
|
add_custom_command(OUTPUT ${asm_file} ${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h
|
||||||
COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/generate_asm.py ${platform}
|
COMMAND $<TARGET_FILE:Python::Interpreter> ${CMAKE_CURRENT_SOURCE_DIR}/generate_asm.py ${platform}
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/fdb_c.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/fdb_c.cpp
|
||||||
${asm_file}
|
${asm_file}
|
||||||
${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h
|
${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h
|
||||||
|
@ -41,6 +41,15 @@ if(WIN32)
|
||||||
enable_language(ASM_MASM)
|
enable_language(ASM_MASM)
|
||||||
set_property(SOURCE ${asm_file} PROPERTY LANGUAGE ASM_MASM)
|
set_property(SOURCE ${asm_file} PROPERTY LANGUAGE ASM_MASM)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
# The tests don't build on windows
|
||||||
|
if(NOT WIN32)
|
||||||
|
add_executable(fdb_c_performance_test test/performance_test.c test/test.h)
|
||||||
|
target_link_libraries(fdb_c_performance_test PRIVATE fdb_c)
|
||||||
|
add_executable(fdb_c_ryw_benchmark test/ryw_benchmark.c test/test.h)
|
||||||
|
target_link_libraries(fdb_c_ryw_benchmark PRIVATE fdb_c)
|
||||||
|
endif()
|
||||||
|
|
||||||
# TODO: re-enable once the old vcxproj-based build system is removed.
|
# TODO: re-enable once the old vcxproj-based build system is removed.
|
||||||
#generate_export_header(fdb_c EXPORT_MACRO_NAME "DLLEXPORT"
|
#generate_export_header(fdb_c EXPORT_MACRO_NAME "DLLEXPORT"
|
||||||
# EXPORT_FILE_NAME ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_export.h)
|
# EXPORT_FILE_NAME ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_export.h)
|
||||||
|
|
|
@ -0,0 +1,42 @@
|
||||||
|
set(SRCS
|
||||||
|
DirectoryLayer.actor.cpp
|
||||||
|
DirectoryLayer.h
|
||||||
|
DirectoryPartition.h
|
||||||
|
DirectorySubspace.cpp
|
||||||
|
DirectorySubspace.h
|
||||||
|
FDBLoanerTypes.h
|
||||||
|
HighContentionAllocator.actor.cpp
|
||||||
|
HighContentionAllocator.h
|
||||||
|
IDirectory.h
|
||||||
|
Node.actor.cpp
|
||||||
|
Subspace.cpp
|
||||||
|
Subspace.h
|
||||||
|
Tuple.cpp
|
||||||
|
Tuple.h
|
||||||
|
fdb_flow.actor.cpp
|
||||||
|
fdb_flow.h)
|
||||||
|
|
||||||
|
add_flow_target(NAME fdb_flow SRCS ${SRCS} STATIC_LIBRARY)
|
||||||
|
target_link_libraries(fdb_flow PUBLIC fdb_c)
|
||||||
|
|
||||||
|
add_subdirectory(tester)
|
||||||
|
|
||||||
|
# generate flow-package
|
||||||
|
foreach(f IN LISTS SRCS)
|
||||||
|
if(f MATCHES ".*\\.h$")
|
||||||
|
list(APPEND headers ${CMAKE_CURRENT_SOURCE_DIR}/${f})
|
||||||
|
endif()
|
||||||
|
endforeach()
|
||||||
|
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/packages)
|
||||||
|
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/packages)
|
||||||
|
set(package_dir ${CMAKE_CURRENT_BINARY_DIR}/packages/fdb-flow-${CMAKE_PROJECT_VERSION})
|
||||||
|
set(tar_file ${CMAKE_BINARY_DIR}/packages/fdb-flow-${CMAKE_PROJECT_VERSION}.tar.gz)
|
||||||
|
add_custom_command(OUTPUT ${tar_file}
|
||||||
|
COMMAND
|
||||||
|
${CMAKE_COMMAND} -E make_directory ${package_dir} &&
|
||||||
|
${CMAKE_COMMAND} -E copy $<TARGET_FILE:fdb_flow> ${headers} ${package_dir} &&
|
||||||
|
${CMAKE_COMMAND} -E tar czf ${tar_file} ${package_dir}
|
||||||
|
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/packages
|
||||||
|
COMMENT "Build fdb_flow package")
|
||||||
|
add_custom_target(package_flow DEPENDS ${tar_file})
|
||||||
|
add_dependencies(packages package_flow)
|
|
@ -0,0 +1,6 @@
|
||||||
|
set(TEST_SRCS
|
||||||
|
DirectoryTester.actor.cpp
|
||||||
|
Tester.actor.cpp
|
||||||
|
Tester.actor.h)
|
||||||
|
add_flow_target(NAME fdb_flow_tester EXECUTABLE SRCS ${TEST_SRCS})
|
||||||
|
target_link_libraries(fdb_flow_tester fdb_flow)
|
|
@ -0,0 +1,121 @@
|
||||||
|
set(SRCS
|
||||||
|
src/_stacktester/directory.go
|
||||||
|
src/fdb/directory/allocator.go
|
||||||
|
src/fdb/directory/node.go
|
||||||
|
src/fdb/futures.go
|
||||||
|
src/fdb/subspace/subspace.go
|
||||||
|
src/_stacktester/stacktester.go
|
||||||
|
src/fdb/directory/directory.go
|
||||||
|
src/fdb/doc.go
|
||||||
|
src/fdb/transaction.go
|
||||||
|
src/fdb/directory/directoryLayer.go
|
||||||
|
src/fdb/errors.go
|
||||||
|
src/fdb/keyselector.go
|
||||||
|
src/fdb/tuple/tuple.go
|
||||||
|
src/fdb/cluster.go
|
||||||
|
src/fdb/directory/directoryPartition.go
|
||||||
|
src/fdb/fdb.go
|
||||||
|
src/fdb/range.go
|
||||||
|
src/fdb/tuple/tuple_test.go
|
||||||
|
src/fdb/database.go
|
||||||
|
src/fdb/directory/directorySubspace.go
|
||||||
|
src/fdb/fdb_test.go
|
||||||
|
src/fdb/snapshot.go)
|
||||||
|
|
||||||
|
set(GOPATH ${CMAKE_CURRENT_BINARY_DIR})
|
||||||
|
set(GO_PACKAGE_ROOT github.com/apple/foundationdb/bindings/go)
|
||||||
|
set(GO_IMPORT_PATH ${GO_PACKAGE_ROOT}/src)
|
||||||
|
set(GO_DEST ${GOPATH}/src/${GO_PACKAGE_ROOT})
|
||||||
|
|
||||||
|
if(APPLE)
|
||||||
|
set(GOPLATFORM darwin_amd64)
|
||||||
|
elseif(WIN32)
|
||||||
|
set(GOPLATFORM windows_amd64)
|
||||||
|
else()
|
||||||
|
set(GOPLATFORM linux_amd64)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set(GO_PACKAGE_OUTDIR ${GOPATH}/pkg/${GOPLATFORM}/${GO_IMPORT_PATH})
|
||||||
|
|
||||||
|
file(MAKE_DIRECTORY ${GOPATH}
|
||||||
|
${GO_DEST})
|
||||||
|
set(go_options_file ${GO_DEST}/src/fdb/generated.go)
|
||||||
|
|
||||||
|
set(go_env GOPATH=${GOPATH}
|
||||||
|
C_INCLUDE_PATH=${CMAKE_BINARY_DIR}/bindings/c/foundationdb:${CMAKE_SOURCE_DIR}/bindings/c
|
||||||
|
CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/lib)
|
||||||
|
|
||||||
|
foreach(src_file IN LISTS SRCS)
|
||||||
|
set(dest_file ${GO_DEST}/${src_file})
|
||||||
|
get_filename_component(dest_dir ${dest_file} DIRECTORY)
|
||||||
|
list(APPEND SRCS_OUT ${dest_file})
|
||||||
|
add_custom_command(OUTPUT ${dest_file}
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E make_directory ${dest_dir} &&
|
||||||
|
${CMAKE_COMMAND} -E copy ${src_file} ${dest_file}
|
||||||
|
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${src_file}
|
||||||
|
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||||
|
COMMENT "Creating fdb_go_path")
|
||||||
|
endforeach()
|
||||||
|
add_custom_target(copy_go_sources DEPENDS ${SRCS_OUT})
|
||||||
|
add_custom_command(OUTPUT ${go_options_file}
|
||||||
|
COMMAND ${GO_EXECUTABLE} run ${CMAKE_CURRENT_SOURCE_DIR}/src/_util/translate_fdb_options.go
|
||||||
|
-in ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options
|
||||||
|
-out ${go_options_file}
|
||||||
|
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/_util/translate_fdb_options.go
|
||||||
|
${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options
|
||||||
|
COMMENT "Generate FDBOptions for GO")
|
||||||
|
add_custom_target(go_options_file DEPENDS ${go_options_file})
|
||||||
|
add_dependencies(go_options_file copy_go_sources)
|
||||||
|
|
||||||
|
function(build_go_package)
|
||||||
|
set(options LIBRARY EXECUTABLE)
|
||||||
|
set(oneValueArgs NAME PATH)
|
||||||
|
set(multiValueArgs)
|
||||||
|
cmake_parse_arguments(BGP "${options}" "${oneValueArgs}" "${multiValueArgs}" "${ARGN}")
|
||||||
|
|
||||||
|
if(NOT BGP_NAME OR NOT BGP_PATH)
|
||||||
|
message(FATAL_ERROR "NAME and PATH arguments are missing")
|
||||||
|
endif()
|
||||||
|
if(BGP_LIBRARY AND BGP_EXECUTABLE)
|
||||||
|
message(FATAL_ERROR "Package can't be a library and an executable")
|
||||||
|
endif()
|
||||||
|
if(NOT BGP_LIBRARY AND NOT BGP_EXECUTABLE)
|
||||||
|
message(FATAL_ERROR "Missing type")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if(BGP_LIBRARY)
|
||||||
|
if(WIN32)
|
||||||
|
set(outfile ${GO_PACKAGE_OUTDIR}/${BGP_PATH}.lib)
|
||||||
|
else()
|
||||||
|
set(outfile ${GO_PACKAGE_OUTDIR}/${BGP_PATH}.a)
|
||||||
|
endif()
|
||||||
|
else()
|
||||||
|
get_filename_component(exec_filename ${BGP_PATH} NAME)
|
||||||
|
if(WIN32)
|
||||||
|
set(outfile ${GOPATH}/bin/${exec_filename}.exe)
|
||||||
|
else()
|
||||||
|
set(outfile ${GOPATH}/bin/${exec_filename})
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
add_custom_command(OUTPUT ${outfile}
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E env ${go_env}
|
||||||
|
${GO_EXECUTABLE} install ${GO_IMPORT_PATH}/${BGP_PATH}
|
||||||
|
DEPENDS ${fdb_options_file}
|
||||||
|
COMMENT "Building ${BGP_NAME}")
|
||||||
|
add_custom_target(${BGP_NAME} ALL DEPENDS ${outfile})
|
||||||
|
endfunction()
|
||||||
|
|
||||||
|
build_go_package(LIBRARY NAME fdb_go PATH fdb)
|
||||||
|
add_dependencies(fdb_go fdb_c go_options_file)
|
||||||
|
|
||||||
|
build_go_package(LIBRARY NAME tuple_go PATH fdb/tuple)
|
||||||
|
add_dependencies(tuple_go fdb_go)
|
||||||
|
|
||||||
|
build_go_package(LIBRARY NAME subspace_go PATH fdb/subspace)
|
||||||
|
add_dependencies(subspace_go tuple_go)
|
||||||
|
|
||||||
|
build_go_package(LIBRARY NAME directory_go PATH fdb/directory)
|
||||||
|
add_dependencies(directory_go tuple_go)
|
||||||
|
|
||||||
|
build_go_package(EXECUTABLE NAME fdb_go_tester PATH _stacktester)
|
||||||
|
add_dependencies(fdb_go_tester directory_go)
|
|
@ -24,8 +24,10 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"go/doc"
|
"go/doc"
|
||||||
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
@ -48,22 +50,22 @@ type Options struct {
|
||||||
Scope []Scope
|
Scope []Scope
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeOptString(receiver string, function string, opt Option) {
|
func writeOptString(w io.Writer, receiver string, function string, opt Option) {
|
||||||
fmt.Printf(`func (o %s) %s(param string) error {
|
fmt.Fprintf(w, `func (o %s) %s(param string) error {
|
||||||
return o.setOpt(%d, []byte(param))
|
return o.setOpt(%d, []byte(param))
|
||||||
}
|
}
|
||||||
`, receiver, function, opt.Code)
|
`, receiver, function, opt.Code)
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeOptBytes(receiver string, function string, opt Option) {
|
func writeOptBytes(w io.Writer, receiver string, function string, opt Option) {
|
||||||
fmt.Printf(`func (o %s) %s(param []byte) error {
|
fmt.Fprintf(w, `func (o %s) %s(param []byte) error {
|
||||||
return o.setOpt(%d, param)
|
return o.setOpt(%d, param)
|
||||||
}
|
}
|
||||||
`, receiver, function, opt.Code)
|
`, receiver, function, opt.Code)
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeOptInt(receiver string, function string, opt Option) {
|
func writeOptInt(w io.Writer, receiver string, function string, opt Option) {
|
||||||
fmt.Printf(`func (o %s) %s(param int64) error {
|
fmt.Fprintf(w, `func (o %s) %s(param int64) error {
|
||||||
b, e := int64ToBytes(param)
|
b, e := int64ToBytes(param)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return e
|
return e
|
||||||
|
@ -73,36 +75,36 @@ func writeOptInt(receiver string, function string, opt Option) {
|
||||||
`, receiver, function, opt.Code)
|
`, receiver, function, opt.Code)
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeOptNone(receiver string, function string, opt Option) {
|
func writeOptNone(w io.Writer, receiver string, function string, opt Option) {
|
||||||
fmt.Printf(`func (o %s) %s() error {
|
fmt.Fprintf(w, `func (o %s) %s() error {
|
||||||
return o.setOpt(%d, nil)
|
return o.setOpt(%d, nil)
|
||||||
}
|
}
|
||||||
`, receiver, function, opt.Code)
|
`, receiver, function, opt.Code)
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeOpt(receiver string, opt Option) {
|
func writeOpt(w io.Writer, receiver string, opt Option) {
|
||||||
function := "Set" + translateName(opt.Name)
|
function := "Set" + translateName(opt.Name)
|
||||||
|
|
||||||
fmt.Println()
|
fmt.Fprintln(w)
|
||||||
|
|
||||||
if opt.Description != "" {
|
if opt.Description != "" {
|
||||||
fmt.Printf("// %s\n", opt.Description)
|
fmt.Fprintf(w, "// %s\n", opt.Description)
|
||||||
if opt.ParamDesc != "" {
|
if opt.ParamDesc != "" {
|
||||||
fmt.Printf("//\n// Parameter: %s\n", opt.ParamDesc)
|
fmt.Fprintf(w, "//\n// Parameter: %s\n", opt.ParamDesc)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("// Not yet implemented.\n")
|
fmt.Fprintf(w, "// Not yet implemented.\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
switch opt.ParamType {
|
switch opt.ParamType {
|
||||||
case "String":
|
case "String":
|
||||||
writeOptString(receiver, function, opt)
|
writeOptString(w, receiver, function, opt)
|
||||||
case "Bytes":
|
case "Bytes":
|
||||||
writeOptBytes(receiver, function, opt)
|
writeOptBytes(w, receiver, function, opt)
|
||||||
case "Int":
|
case "Int":
|
||||||
writeOptInt(receiver, function, opt)
|
writeOptInt(w, receiver, function, opt)
|
||||||
case "":
|
case "":
|
||||||
writeOptNone(receiver, function, opt)
|
writeOptNone(w, receiver, function, opt)
|
||||||
default:
|
default:
|
||||||
log.Fatalf("Totally unexpected ParamType %s", opt.ParamType)
|
log.Fatalf("Totally unexpected ParamType %s", opt.ParamType)
|
||||||
}
|
}
|
||||||
|
@ -112,9 +114,9 @@ func translateName(old string) string {
|
||||||
return strings.Replace(strings.Title(strings.Replace(old, "_", " ", -1)), " ", "", -1)
|
return strings.Replace(strings.Title(strings.Replace(old, "_", " ", -1)), " ", "", -1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeMutation(opt Option) {
|
func writeMutation(w io.Writer, opt Option) {
|
||||||
tname := translateName(opt.Name)
|
tname := translateName(opt.Name)
|
||||||
fmt.Printf(`
|
fmt.Fprintf(w, `
|
||||||
// %s
|
// %s
|
||||||
func (t Transaction) %s(key KeyConvertible, param []byte) {
|
func (t Transaction) %s(key KeyConvertible, param []byte) {
|
||||||
t.atomicOp(key.FDBKey(), param, %d)
|
t.atomicOp(key.FDBKey(), param, %d)
|
||||||
|
@ -122,23 +124,38 @@ func (t Transaction) %s(key KeyConvertible, param []byte) {
|
||||||
`, opt.Description, tname, opt.Code)
|
`, opt.Description, tname, opt.Code)
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeEnum(scope Scope, opt Option, delta int) {
|
func writeEnum(w io.Writer, scope Scope, opt Option, delta int) {
|
||||||
fmt.Println()
|
fmt.Fprintln(w)
|
||||||
if opt.Description != "" {
|
if opt.Description != "" {
|
||||||
doc.ToText(os.Stdout, opt.Description, "\t// ", "", 73)
|
doc.ToText(w, opt.Description, "\t// ", "", 73)
|
||||||
// fmt.Printf(" // %s\n", opt.Description)
|
// fmt.Printf(" // %s\n", opt.Description)
|
||||||
}
|
}
|
||||||
fmt.Printf(" %s %s = %d\n", scope.Name+translateName(opt.Name), scope.Name, opt.Code+delta)
|
fmt.Fprintf(w, " %s %s = %d\n", scope.Name+translateName(opt.Name), scope.Name, opt.Code+delta)
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
var inFile string
|
||||||
|
var outFile string
|
||||||
|
flag.StringVar(&inFile, "in", "stdin", "Input file")
|
||||||
|
flag.StringVar(&outFile, "out", "stdout", "Output file")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
v := Options{}
|
v := Options{}
|
||||||
|
|
||||||
data, err := ioutil.ReadAll(os.Stdin)
|
var data []byte
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
if inFile == "stdin" {
|
||||||
|
data, err = ioutil.ReadAll(os.Stdin)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
data, err = ioutil.ReadFile(inFile)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = xml.Unmarshal(data, &v)
|
err = xml.Unmarshal(data, &v)
|
||||||
|
@ -146,7 +163,17 @@ func main() {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Print(`/*
|
var out *os.File
|
||||||
|
if outFile == "stdout" {
|
||||||
|
out = os.Stdout
|
||||||
|
} else {
|
||||||
|
out, err = os.Create(outFile)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprint(out, `/*
|
||||||
* generated.go
|
* generated.go
|
||||||
*
|
*
|
||||||
* This source file is part of the FoundationDB open source project
|
* This source file is part of the FoundationDB open source project
|
||||||
|
@ -197,7 +224,7 @@ func int64ToBytes(i int64) ([]byte, error) {
|
||||||
|
|
||||||
for _, opt := range scope.Option {
|
for _, opt := range scope.Option {
|
||||||
if !opt.Hidden {
|
if !opt.Hidden {
|
||||||
writeOpt(receiver, opt)
|
writeOpt(out, receiver, opt)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
|
@ -206,7 +233,7 @@ func int64ToBytes(i int64) ([]byte, error) {
|
||||||
if scope.Name == "MutationType" {
|
if scope.Name == "MutationType" {
|
||||||
for _, opt := range scope.Option {
|
for _, opt := range scope.Option {
|
||||||
if !opt.Hidden {
|
if !opt.Hidden {
|
||||||
writeMutation(opt)
|
writeMutation(out, opt)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
|
@ -223,16 +250,17 @@ func int64ToBytes(i int64) ([]byte, error) {
|
||||||
scope.Name = "conflictRangeType"
|
scope.Name = "conflictRangeType"
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf(`
|
fmt.Fprintf(out, `
|
||||||
type %s int
|
type %s int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
`, scope.Name)
|
`, scope.Name)
|
||||||
for _, opt := range scope.Option {
|
for _, opt := range scope.Option {
|
||||||
if !opt.Hidden {
|
if !opt.Hidden {
|
||||||
writeEnum(scope, opt, d)
|
writeEnum(out, scope, opt, d)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fmt.Println(")")
|
fmt.Fprintln(out, ")")
|
||||||
}
|
}
|
||||||
|
out.Close()
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,3 @@
|
||||||
include(UseJava)
|
|
||||||
find_package(JNI 1.8 REQUIRED)
|
|
||||||
find_package(Java 1.8 COMPONENTS Development REQUIRED)
|
|
||||||
|
|
||||||
set(JAVA_BINDING_SRCS
|
set(JAVA_BINDING_SRCS
|
||||||
src/main/com/apple/foundationdb/async/AsyncIterable.java
|
src/main/com/apple/foundationdb/async/AsyncIterable.java
|
||||||
src/main/com/apple/foundationdb/async/AsyncIterator.java
|
src/main/com/apple/foundationdb/async/AsyncIterator.java
|
||||||
|
@ -129,11 +125,67 @@ set_target_properties(fdb_java PROPERTIES
|
||||||
set(CMAKE_JAVA_COMPILE_FLAGS "-source" "1.8" "-target" "1.8")
|
set(CMAKE_JAVA_COMPILE_FLAGS "-source" "1.8" "-target" "1.8")
|
||||||
set(CMAKE_JNI_TARGET TRUE)
|
set(CMAKE_JNI_TARGET TRUE)
|
||||||
set(JAR_VERSION "${FDB_MAJOR}.${FDB_MINOR}.${FDB_REVISION}")
|
set(JAR_VERSION "${FDB_MAJOR}.${FDB_MINOR}.${FDB_REVISION}")
|
||||||
add_jar(fdb-java ${JAVA_BINDING_SRCS} ${GENERATED_JAVA_FILES}
|
add_jar(fdb-java ${JAVA_BINDING_SRCS} ${GENERATED_JAVA_FILES} ${CMAKE_SOURCE_DIR}/LICENSE
|
||||||
OUTPUT_DIR ${PROJECT_BINARY_DIR}/lib)
|
OUTPUT_DIR ${PROJECT_BINARY_DIR}/lib VERSION ${CMAKE_PROJECT_VERSION})
|
||||||
add_dependencies(fdb-java fdb_java_options fdb_java)
|
add_dependencies(fdb-java fdb_java_options fdb_java)
|
||||||
add_jar(foundationdb-tests SOURCES ${JAVA_TESTS_SRCS} INCLUDE_JARS fdb-java)
|
add_jar(foundationdb-tests SOURCES ${JAVA_TESTS_SRCS} INCLUDE_JARS fdb-java)
|
||||||
add_dependencies(foundationdb-tests fdb_java_options)
|
add_dependencies(foundationdb-tests fdb_java_options)
|
||||||
|
|
||||||
install_jar(fdb-java DESTINATION ${FDB_SHARE_DIR}/java COMPONENT clients)
|
# TODO[mpilman]: The java RPM will require some more effort (mostly on debian). However,
|
||||||
install(TARGETS fdb_java DESTINATION ${FDB_LIB_DIR} COMPONENT clients)
|
# most people will use the fat-jar, so it is not clear how high this priority is.
|
||||||
|
|
||||||
|
#install_jar(fdb-java DESTINATION ${FDB_SHARE_DIR}/java COMPONENT java)
|
||||||
|
#install(TARGETS fdb_java DESTINATION ${FDB_LIB_DIR} COMPONENT java)
|
||||||
|
|
||||||
|
set(FAT_JAR_BINARIES "NOTFOUND" CACHE STRING
|
||||||
|
"Path of a directory structure with libraries to include in fat jar (a lib directory)")
|
||||||
|
|
||||||
|
set(jar_destination ${CMAKE_BINARY_DIR}/packages)
|
||||||
|
set(unpack_dir ${CMAKE_CURRENT_BINARY_DIR}/fat_jar)
|
||||||
|
file(MAKE_DIRECTORY ${jar_destination})
|
||||||
|
file(MAKE_DIRECTORY ${unpack_dir})
|
||||||
|
message(STATUS "Building fat jar to ${jar_destination}")
|
||||||
|
get_property(jar_path TARGET fdb-java PROPERTY JAR_FILE)
|
||||||
|
add_custom_command(OUTPUT ${unpack_dir}/META-INF/MANIFEST.MF
|
||||||
|
COMMAND ${Java_JAR_EXECUTABLE} xf ${jar_path}
|
||||||
|
WORKING_DIRECTORY ${unpack_dir}
|
||||||
|
DEPENDS "${jar_path}"
|
||||||
|
COMMENT "Unpack jar-file")
|
||||||
|
add_custom_target(unpack_jar DEPENDS ${unpack_dir}/META-INF/MANIFEST.MF)
|
||||||
|
add_dependencies(unpack_jar fdb-java)
|
||||||
|
add_custom_command(OUTPUT ${unpack_dir}/LICENSE
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/LICENSE ${unpack_dir}
|
||||||
|
COMMENT "copy license")
|
||||||
|
add_custom_target(copy_license DEPENDS ${unpack_dir}/LICENSE)
|
||||||
|
add_dependencies(unpack_jar copy_license)
|
||||||
|
if(FAT_JAR_BINARIES)
|
||||||
|
add_custom_command(OUTPUT ${unpack_dir}/lib
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E copy_directory ${FAT_JAR_BINARIES} ${unpack_dir}
|
||||||
|
COMMENT "copy additional libraries"
|
||||||
|
DEPENDS ${unpack_dir}/META-INF/MANIFEST.MF)
|
||||||
|
add_custom_target(copy_libs DEPENDS ${unpack_dir}/lib)
|
||||||
|
add_dependencies(unpack_jar copy_libs)
|
||||||
|
endif()
|
||||||
|
if(WIN32)
|
||||||
|
set(lib_destination "windows/amd64")
|
||||||
|
elseif(APPLE)
|
||||||
|
set(lib_destination "osx/x86_64")
|
||||||
|
else()
|
||||||
|
set(lib_destination "linux/amd64")
|
||||||
|
endif()
|
||||||
|
set(lib_destination "${unpack_dir}/lib/${lib_destination}")
|
||||||
|
file(MAKE_DIRECTORY ${lib_destination})
|
||||||
|
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/lib_copied
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E copy $<TARGET_FILE:fdb_java> ${lib_destination} &&
|
||||||
|
${CMAKE_COMMAND} -E touch ${CMAKE_CURRENT_BINARY_DIR}/lib_copied
|
||||||
|
COMMENT "Copy library")
|
||||||
|
add_custom_target(copy_lib DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/lib_copied)
|
||||||
|
add_dependencies(copy_lib unpack_jar)
|
||||||
|
set(target_jar ${jar_destination}/fdb-java-${CMAKE_PROJECT_VERSION}.jar)
|
||||||
|
add_custom_command(OUTPUT ${target_jar}
|
||||||
|
COMMAND ${Java_JAR_EXECUTABLE} cf ${target_jar} .
|
||||||
|
WORKING_DIRECTORY ${unpack_dir}
|
||||||
|
COMMENT "Build ${jar_destination}/fdb-java-${CMAKE_PROJECT_VERSION}.jar")
|
||||||
|
add_custom_target(fat-jar DEPENDS ${target_jar})
|
||||||
|
add_dependencies(fat-jar copy_lib)
|
||||||
|
add_dependencies(packages fat-jar)
|
||||||
|
|
|
@ -0,0 +1,53 @@
|
||||||
|
<img alt="FoundationDB logo" src="documentation/FDB_logo.png?raw=true" width="400">
|
||||||
|
|
||||||
|
FoundationDB is a distributed database designed to handle large volumes of structured data across clusters of commodity servers. It organizes data as an ordered key-value store and employs ACID transactions for all operations. It is especially well-suited for read/write workloads but also has excellent performance for write-intensive workloads. Users interact with the database using API language binding.
|
||||||
|
|
||||||
|
To learn more about FoundationDB, visit [foundationdb.org](https://www.foundationdb.org/)
|
||||||
|
|
||||||
|
## FoundationDB Java Bindings
|
||||||
|
|
||||||
|
In order to build the java bindings,
|
||||||
|
[JDK](http://www.oracle.com/technetwork/java/javase/downloads/index.html) >= 8
|
||||||
|
has to be installed. CMake will try to find a JDK installation, if it can find
|
||||||
|
one it will automatically build the java bindings.
|
||||||
|
|
||||||
|
If you have Java installed but cmake fails to find them, set the
|
||||||
|
`JAVA_HOME`environment variable.
|
||||||
|
|
||||||
|
### Fat Jar
|
||||||
|
|
||||||
|
By default, the generated jar file will depend on an installed libfdb_java
|
||||||
|
(provided with the generated RPM/DEB file on Linux). However, users usually find
|
||||||
|
a Jar-file that contains this library more convenient. This is also what you
|
||||||
|
will get if you download the jar file from Maven.
|
||||||
|
|
||||||
|
This file can be generated by compiling the `packages` target. For example with
|
||||||
|
make, you can run:
|
||||||
|
|
||||||
|
``` shell
|
||||||
|
make packages
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Multi-Platform Jar-File
|
||||||
|
|
||||||
|
If you want to create a jar file that can run on more than one supported
|
||||||
|
architecture (the offical one supports MacOS, Linux, and Windows), you can do
|
||||||
|
that by executing the following steps:
|
||||||
|
|
||||||
|
1. Create a directory called `lib` somewhere on your file system.
|
||||||
|
1. Create a subdirectory for each *additional* platform you want to support
|
||||||
|
(`windows` for windows, `osx` for MacOS, and `linux` for Linux).
|
||||||
|
1. Under each of those create a subdirectory with the name of the architecture
|
||||||
|
(currently only `amd64` is supported - on MacOS this has to be called
|
||||||
|
`x86_64` - `amd64` on all others).
|
||||||
|
1. Set the cmake variable `FAT_JAR_BINARIES` to this `lib` directory. For
|
||||||
|
example, if you created this directory structure under `/foo/bar`, the
|
||||||
|
corresponding cmake command would be:
|
||||||
|
|
||||||
|
```
|
||||||
|
cmake -DFAT_JAR_BINARIES=/foo/bar/lib <PATH_TO_FDB_SOURCE>
|
||||||
|
```
|
||||||
|
|
||||||
|
After executing building the packages (with `make packages` or the packages
|
||||||
|
target in `Visual Studio`) you will find a jar-file in the `packages`
|
||||||
|
directory in your build directory.
|
|
@ -5,7 +5,9 @@ set(SRCS
|
||||||
fdb/locality.py
|
fdb/locality.py
|
||||||
fdb/six.py
|
fdb/six.py
|
||||||
fdb/subspace_impl.py
|
fdb/subspace_impl.py
|
||||||
fdb/tuple.py)
|
fdb/tuple.py
|
||||||
|
README.rst
|
||||||
|
MANIFEST.in)
|
||||||
|
|
||||||
if(APPLE)
|
if(APPLE)
|
||||||
list(APPEND SRCS fdb/libfdb_c.dylib.pth)
|
list(APPEND SRCS fdb/libfdb_c.dylib.pth)
|
||||||
|
@ -20,17 +22,10 @@ foreach(src ${SRCS})
|
||||||
if(NOT EXISTS ${dirname})
|
if(NOT EXISTS ${dirname})
|
||||||
file(MAKE_DIRECTORY ${PROJECT_BINARY_DIR}/bindings/python/${dirname})
|
file(MAKE_DIRECTORY ${PROJECT_BINARY_DIR}/bindings/python/${dirname})
|
||||||
endif()
|
endif()
|
||||||
set(copy_command "cp")
|
|
||||||
set(from_path ${CMAKE_CURRENT_SOURCE_DIR}/${src})
|
set(from_path ${CMAKE_CURRENT_SOURCE_DIR}/${src})
|
||||||
set(to_path ${CMAKE_CURRENT_BINARY_DIR}/${src})
|
set(to_path ${CMAKE_CURRENT_BINARY_DIR}/${src})
|
||||||
if (WIN32)
|
|
||||||
set(copy_command "copy")
|
|
||||||
# copy on Windows doesn't understand '/' separators
|
|
||||||
string(REPLACE "/" "\\" from_path "${from_path}")
|
|
||||||
string(REPLACE "/" "\\" to_path "${to_path}")
|
|
||||||
endif()
|
|
||||||
add_custom_command(OUTPUT ${PROJECT_BINARY_DIR}/bindings/python/${src}
|
add_custom_command(OUTPUT ${PROJECT_BINARY_DIR}/bindings/python/${src}
|
||||||
COMMAND ${copy_command} ${from_path} ${to_path}
|
COMMAND ${CMAKE_COMMAND} -E copy ${from_path} ${to_path}
|
||||||
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${src}
|
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${src}
|
||||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||||
COMMENT "copy ${src}")
|
COMMENT "copy ${src}")
|
||||||
|
@ -46,4 +41,31 @@ vexillographer_compile(TARGET fdb_python_options LANG python OUT ${options_file}
|
||||||
add_dependencies(python_binding fdb_python_options)
|
add_dependencies(python_binding fdb_python_options)
|
||||||
|
|
||||||
set(out_files "${out_files};${options_file}")
|
set(out_files "${out_files};${options_file}")
|
||||||
install(FILES ${out_files} DESTINATION ${FDB_PYTHON_INSTALL_DIR} COMPONENT clients)
|
# TODO[mpilman]: it is not clear whether we want to have rpms for python
|
||||||
|
#install(FILES ${out_files} DESTINATION ${FDB_PYTHON_INSTALL_DIR} COMPONENT python)
|
||||||
|
|
||||||
|
# Create sdist
|
||||||
|
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.cmake ${CMAKE_CURRENT_BINARY_DIR}/setup.py)
|
||||||
|
configure_file(${CMAKE_SOURCE_DIR}/LICENSE ${CMAKE_CURRENT_BINARY_DIR}/LICENSE COPYONLY)
|
||||||
|
find_program(pycodestyle pycodestyle)
|
||||||
|
if (pycodestyle)
|
||||||
|
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/check_py_code_style
|
||||||
|
COMMAND ${pycodestyle} bindings/python --config=${CMAKE_CURRENT_SOURCE_DIR}/setup.cfg &&
|
||||||
|
${CMAKE_COMMAND} -E ${CMAKE_CURRENT_BINARY_DIR}/check_py_code_style
|
||||||
|
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||||
|
DEPENDS ${out_files}
|
||||||
|
COMMENT "Check python code style")
|
||||||
|
add_custom_target(fdb_python_check DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/check_py_code_style)
|
||||||
|
else()
|
||||||
|
add_custom_target(fdb_python_check COMMAND ${CMAKE_COMMAND} -E echo "Skipped Python style check! Missing: pycodestyle")
|
||||||
|
endif()
|
||||||
|
set(package_file_name foundationdb-${FDB_VERSION}.tar.gz)
|
||||||
|
set(package_file ${CMAKE_BINARY_DIR}/packages/${package_file_name})
|
||||||
|
add_custom_command(OUTPUT ${package_file}
|
||||||
|
COMMAND $<TARGET_FILE:Python::Interpreter> setup.py sdist &&
|
||||||
|
${CMAKE_COMMAND} -E copy dist/${package_file_name} ${package_file}
|
||||||
|
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||||
|
COMMENT "Create Python sdist package")
|
||||||
|
add_custom_target(python_package DEPENDS ${package_file})
|
||||||
|
add_dependencies(python_package python_binding)
|
||||||
|
add_dependencies(packages python_package)
|
||||||
|
|
|
@ -0,0 +1,38 @@
|
||||||
|
from distutils.core import setup
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open("README.rst") as f:
|
||||||
|
long_desc = f.read()
|
||||||
|
except:
|
||||||
|
long_desc = ""
|
||||||
|
|
||||||
|
setup(name="foundationdb",
|
||||||
|
version="${FDB_VERSION}",
|
||||||
|
author="FoundationDB",
|
||||||
|
author_email="fdb-dist@apple.com",
|
||||||
|
description="Python bindings for the FoundationDB database",
|
||||||
|
url="https://www.foundationdb.org",
|
||||||
|
packages=['fdb'],
|
||||||
|
package_data={'fdb': ["fdb/*.py"]},
|
||||||
|
long_description=long_desc,
|
||||||
|
classifiers=[
|
||||||
|
'Development Status :: 5 - Production/Stable',
|
||||||
|
'Intended Audience :: Developers',
|
||||||
|
'License :: OSI Approved :: Apache Software License',
|
||||||
|
'Operating System :: MacOS :: MacOS X',
|
||||||
|
'Operating System :: Microsoft :: Windows',
|
||||||
|
'Operating System :: POSIX :: Linux',
|
||||||
|
'Programming Language :: Python :: 2',
|
||||||
|
'Programming Language :: Python :: 2.6',
|
||||||
|
'Programming Language :: Python :: 2.7',
|
||||||
|
'Programming Language :: Python :: 3',
|
||||||
|
'Programming Language :: Python :: 3.0',
|
||||||
|
'Programming Language :: Python :: 3.1',
|
||||||
|
'Programming Language :: Python :: 3.2',
|
||||||
|
'Programming Language :: Python :: 3.3',
|
||||||
|
'Programming Language :: Python :: 3.4',
|
||||||
|
'Programming Language :: Python :: Implementation :: CPython',
|
||||||
|
'Topic :: Database',
|
||||||
|
'Topic :: Database :: Front-Ends'
|
||||||
|
]
|
||||||
|
)
|
|
@ -0,0 +1,16 @@
|
||||||
|
# we put this generated file into the src dir, as it
|
||||||
|
# greatly simplifies debugging
|
||||||
|
vexillographer_compile(TARGET ruby_options LANG ruby
|
||||||
|
OUT ${CMAKE_CURRENT_SOURCE_DIR}/lib/fdboptions.rb ALL)
|
||||||
|
configure_file(fdb.gemspec.cmake fdb.gemspec)
|
||||||
|
|
||||||
|
set(gem_file fdb-${FDB_VERSION}.gem)
|
||||||
|
set(gem_target ${CMAKE_BINARY_DIR}/packages/${gem_file})
|
||||||
|
add_custom_command(OUTPUT ${gem_target}
|
||||||
|
COMMAND ${GEM_COMMAND} build fdb.gemspec &&
|
||||||
|
${CMAKE_COMMAND} -E copy ${gem_file} ${gem_target}
|
||||||
|
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||||
|
COMMENT "Building ruby gem")
|
||||||
|
add_custom_target(gem_package DEPENDS ${gem_target})
|
||||||
|
add_dependencies(gem_package ruby_options)
|
||||||
|
add_dependencies(packages gem_package)
|
|
@ -0,0 +1,22 @@
|
||||||
|
# -*- mode: ruby; -*-
|
||||||
|
|
||||||
|
Gem::Specification.new do |s|
|
||||||
|
s.name = 'fdb'
|
||||||
|
s.version = '${FDB_VERSION}'
|
||||||
|
s.date = Time.new.strftime '%Y-%m-%d'
|
||||||
|
s.summary = "Ruby bindings for the FoundationDB database"
|
||||||
|
s.description = <<-EOF
|
||||||
|
Ruby bindings for the FoundationDB database.
|
||||||
|
|
||||||
|
Complete documentation of the FoundationDB Ruby API can be found at:
|
||||||
|
https://apple.github.io/foundationdb/api-ruby.html.
|
||||||
|
EOF
|
||||||
|
s.authors = ["FoundationDB"]
|
||||||
|
s.email = 'fdb-dist@apple.com'
|
||||||
|
s.files = ["${CMAKE_SOURCE_DIR}/LICENSE", "${CMAKE_CURRENT_SOURCE_DIR}/lib/fdb.rb", "${CMAKE_CURRENT_SOURCE_DIR}/lib/fdbdirectory.rb", "${CMAKE_CURRENT_SOURCE_DIR}/lib/fdbimpl.rb", "${CMAKE_CURRENT_SOURCE_DIR}/lib/fdblocality.rb", "${CMAKE_CURRENT_SOURCE_DIR}/lib/fdboptions.rb", "${CMAKE_CURRENT_SOURCE_DIR}/lib/fdbsubspace.rb", "${CMAKE_CURRENT_SOURCE_DIR}/lib/fdbtuple.rb", "${CMAKE_CURRENT_SOURCE_DIR}/lib/fdbimpl_v609.rb"]
|
||||||
|
s.homepage = 'https://www.foundationdb.org'
|
||||||
|
s.license = 'Apache v2'
|
||||||
|
s.add_dependency('ffi', '>= 1.1.5')
|
||||||
|
s.required_ruby_version = '>= 1.9.3'
|
||||||
|
s.requirements << 'These bindings require the FoundationDB client. The client can be obtained from https://www.foundationdb.org/download/.'
|
||||||
|
end
|
|
@ -1,9 +1,9 @@
|
||||||
FROM ubuntu:15.04
|
FROM ubuntu:15.04
|
||||||
LABEL version=0.0.3
|
LABEL version=0.0.5
|
||||||
|
|
||||||
RUN sed -i -e 's/archive.ubuntu.com\|security.ubuntu.com/old-releases.ubuntu.com/g' -e 's/us\.old/old/g' /etc/apt/sources.list && apt-get clean
|
RUN sed -i -e 's/archive.ubuntu.com\|security.ubuntu.com/old-releases.ubuntu.com/g' -e 's/us\.old/old/g' /etc/apt/sources.list && apt-get clean
|
||||||
|
|
||||||
RUN apt-get update && apt-get --no-install-recommends install -y --force-yes bzip2 ca-certificates=20141019 adduser apt base-files base-passwd bash binutils build-essential cpp cpp-4.9 dpkg dos2unix fakeroot findutils g++=4:4.9.2-2ubuntu2 g++-4.9=4.9.2-10ubuntu13 gawk=1:4.1.1+dfsg-1 gcc-5-base gcc=4:4.9.2-2ubuntu2 gcc-4.9=4.9.2-10ubuntu13 gcc-4.9-base:amd64=4.9.2-10ubuntu13 gcc-5-base:amd64=5.1~rc1-0ubuntu1 gdb git golang golang-go golang-go-linux-amd64 golang-src grep gzip hostname java-common libasan1 liblsan0 libtsan0 libubsan0 libcilkrts5 libgcc-4.9-dev libstdc++-4.9-dev libgl1-mesa-dri libgl1-mesa-glx libmono-system-xml-linq4.0-cil libmono-system-data-datasetextensions4.0-cil libstdc++-4.9-pic locales login m4 make makedev mawk mono-dmcs npm openjdk-8-jdk passwd python-distlib python-gevent python-greenlet python-html5lib python-minimal python-pip python-pkg-resources python-requests python-setuptools python-six python-urllib3 python-yaml python2.7 python2.7-minimal rpm rpm2cpio ruby ruby2.1 rubygems-integration sed tar texinfo tzdata-java udev unzip util-linux valgrind vim wget golang-go.tools curl sphinx-common gnupg python-dev
|
RUN apt-get update && apt-get --no-install-recommends install -y --force-yes bzip2 ca-certificates=20141019 adduser apt base-files base-passwd bash binutils build-essential cpp cpp-4.9 dpkg dos2unix fakeroot findutils g++=4:4.9.2-2ubuntu2 g++-4.9=4.9.2-10ubuntu13 gawk=1:4.1.1+dfsg-1 gcc-5-base gcc=4:4.9.2-2ubuntu2 gcc-4.9=4.9.2-10ubuntu13 gcc-4.9-base:amd64=4.9.2-10ubuntu13 gcc-5-base:amd64=5.1~rc1-0ubuntu1 gdb git golang golang-go golang-go-linux-amd64 golang-src grep gzip hostname java-common libasan1 liblsan0 libtsan0 libubsan0 libcilkrts5 libgcc-4.9-dev libstdc++-4.9-dev libgl1-mesa-dri libgl1-mesa-glx libmono-system-xml-linq4.0-cil libmono-system-data-datasetextensions4.0-cil libstdc++-4.9-pic locales login m4 make makedev mawk mono-dmcs npm openjdk-8-jdk passwd python-distlib python-gevent python-greenlet python-html5lib python-minimal python-pip python-pkg-resources python-requests python-setuptools python-six python-urllib3 python-yaml python2.7 python2.7-minimal rpm rpm2cpio ruby ruby2.1 rubygems-integration sed tar texinfo tzdata-java udev unzip util-linux valgrind vim wget golang-go.tools curl sphinx-common gnupg python-dev python3 python3-dev
|
||||||
|
|
||||||
RUN adduser --disabled-password --gecos '' fdb && chown -R fdb /opt && chmod -R 0777 /opt
|
RUN adduser --disabled-password --gecos '' fdb && chown -R fdb /opt && chmod -R 0777 /opt
|
||||||
|
|
||||||
|
@ -31,6 +31,8 @@ RUN cd /opt/ && wget https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-2.6.4
|
||||||
./configure CFLAGS="-fPIC -O3" && make -j4 && make install &&\
|
./configure CFLAGS="-fPIC -O3" && make -j4 && make install &&\
|
||||||
cd /opt/ && rm -r libressl-2.6.4/ libressl-2.6.4.tar.gz libressl-2.6.4.tar.gz.asc libressl.asc
|
cd /opt/ && rm -r libressl-2.6.4/ libressl-2.6.4.tar.gz libressl-2.6.4.tar.gz.asc libressl.asc
|
||||||
|
|
||||||
|
RUN cd /opt && wget https://cmake.org/files/v3.12/cmake-3.12.1-Linux-x86_64.tar.gz -qO - | tar -xz
|
||||||
|
|
||||||
RUN LANGUAGE=en_US.UTF-8 LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 locale-gen en_US.UTF-8
|
RUN LANGUAGE=en_US.UTF-8 LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 locale-gen en_US.UTF-8
|
||||||
|
|
||||||
RUN dpkg-reconfigure locales
|
RUN dpkg-reconfigure locales
|
||||||
|
@ -43,3 +45,4 @@ ENV CC=$CC
|
||||||
|
|
||||||
ARG LIBRARY_PATH=/usr/local/lib
|
ARG LIBRARY_PATH=/usr/local/lib
|
||||||
ENV LIBRARY_PATH=$LD_FLAGS
|
ENV LIBRARY_PATH=$LD_FLAGS
|
||||||
|
ENV PATH=$PATH:/opt/cmake-3.12.1-Linux-x86_64/bin
|
||||||
|
|
|
@ -0,0 +1,44 @@
|
||||||
|
FROM centos:6
|
||||||
|
LABEL version=0.0.4
|
||||||
|
|
||||||
|
RUN yum install -y yum-utils
|
||||||
|
RUN yum-config-manager --enable rhel-server-rhscl-7-rpms
|
||||||
|
RUN yum -y install centos-release-scl
|
||||||
|
RUN yum install -y devtoolset-7
|
||||||
|
|
||||||
|
# install cmake
|
||||||
|
RUN curl -L https://github.com/Kitware/CMake/releases/download/v3.13.4/cmake-3.13.4-Linux-x86_64.tar.gz > /tmp/cmake.tar.gz &&\
|
||||||
|
echo "563a39e0a7c7368f81bfa1c3aff8b590a0617cdfe51177ddc808f66cc0866c76 /tmp/cmake.tar.gz" > /tmp/cmake-sha.txt &&\
|
||||||
|
sha256sum -c /tmp/cmake-sha.txt &&\
|
||||||
|
cd /tmp && tar xf cmake.tar.gz && cp -r cmake-3.13.4-Linux-x86_64/* /usr/local/
|
||||||
|
|
||||||
|
# install boost
|
||||||
|
RUN curl -L https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.tar.bz2 > /tmp/boost.tar.bz2 &&\
|
||||||
|
cd /tmp && echo "2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba boost.tar.bz2" > boost-sha.txt &&\
|
||||||
|
sha256sum -c boost-sha.txt && tar xf boost.tar.bz2 && cp -r boost_1_67_0/boost /usr/local/include/ &&\
|
||||||
|
rm -rf boost.tar.bz2 boost_1_67_0
|
||||||
|
|
||||||
|
# install mono (for actorcompiler)
|
||||||
|
RUN yum install -y epel-release
|
||||||
|
RUN yum install -y mono-core
|
||||||
|
|
||||||
|
# install Java
|
||||||
|
RUN yum install -y java-1.8.0-openjdk-devel
|
||||||
|
|
||||||
|
# install LibreSSL
|
||||||
|
RUN curl https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-2.8.2.tar.gz > /tmp/libressl.tar.gz &&\
|
||||||
|
cd /tmp && echo "b8cb31e59f1294557bfc80f2a662969bc064e83006ceef0574e2553a1c254fd5 libressl.tar.gz" > libressl-sha.txt &&\
|
||||||
|
sha256sum -c libressl-sha.txt && tar xf libressl.tar.gz &&\
|
||||||
|
cd libressl-2.8.2 && cd /tmp/libressl-2.8.2 && scl enable devtoolset-7 -- ./configure --prefix=/usr/local/stow/libressl CFLAGS="-fPIC -O3" --prefix=/usr/local &&\
|
||||||
|
cd /tmp/libressl-2.8.2 && scl enable devtoolset-7 -- make -j`nproc` install &&\
|
||||||
|
rm -rf /tmp/libressl-2.8.2 /tmp/libressl.tar.gz
|
||||||
|
|
||||||
|
|
||||||
|
# install dependencies for bindings and documentation
|
||||||
|
# python 2.7 is required for the documentation
|
||||||
|
RUN yum install -y rh-python36-python-devel rh-ruby24 golang python27
|
||||||
|
|
||||||
|
# install packaging tools
|
||||||
|
RUN yum install -y rpm-build debbuild
|
||||||
|
|
||||||
|
CMD scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash
|
|
@ -0,0 +1,236 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
arguments_usage() {
|
||||||
|
cat <<EOF
|
||||||
|
usage: build.sh [-h] [commands]
|
||||||
|
-h: print this help message and
|
||||||
|
abort execution
|
||||||
|
|
||||||
|
Will execute the passed commands
|
||||||
|
in the order they were passed
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
arguments_parse() {
|
||||||
|
local __res=0
|
||||||
|
while getopts ":ho:" opt
|
||||||
|
do
|
||||||
|
case ${opt} in
|
||||||
|
h )
|
||||||
|
arguments_usage
|
||||||
|
__res=2
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
\? )
|
||||||
|
echo "Unknown option ${opt}"
|
||||||
|
arguments_usage
|
||||||
|
__res=1
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
shift $((OPTIND -1))
|
||||||
|
commands=("$@")
|
||||||
|
return ${__res}
|
||||||
|
}
|
||||||
|
|
||||||
|
configure() {
|
||||||
|
local __res=0
|
||||||
|
for _ in 1
|
||||||
|
do
|
||||||
|
cmake ../src
|
||||||
|
__res=$?
|
||||||
|
if [ ${__res} -ne 0 ]
|
||||||
|
then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
return ${__res}
|
||||||
|
}
|
||||||
|
|
||||||
|
build_fast() {
|
||||||
|
local __res=0
|
||||||
|
for _ in 1
|
||||||
|
do
|
||||||
|
make -j`nproc`
|
||||||
|
__res=$?
|
||||||
|
if [ ${__res} -ne 0 ]
|
||||||
|
then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
return ${__res}
|
||||||
|
}
|
||||||
|
|
||||||
|
build() {
|
||||||
|
local __res=0
|
||||||
|
for _ in 1
|
||||||
|
do
|
||||||
|
configure
|
||||||
|
__res=$?
|
||||||
|
if [ ${__res} -ne 0 ]
|
||||||
|
then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
build_fast
|
||||||
|
__res=$?
|
||||||
|
if [ ${__res} -ne 0 ]
|
||||||
|
then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
return ${__res}
|
||||||
|
}
|
||||||
|
|
||||||
|
package_fast() {
|
||||||
|
local __res=0
|
||||||
|
for _ in 1
|
||||||
|
do
|
||||||
|
make -j`nproc` packages
|
||||||
|
__res=$?
|
||||||
|
if [ ${__res} -ne 0 ]
|
||||||
|
then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
return ${__res}
|
||||||
|
}
|
||||||
|
|
||||||
|
package() {
|
||||||
|
local __res=0
|
||||||
|
for _ in 1
|
||||||
|
do
|
||||||
|
configure
|
||||||
|
__res=$?
|
||||||
|
if [ ${__res} -ne 0 ]
|
||||||
|
then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
package_fast
|
||||||
|
__res=$?
|
||||||
|
if [ ${__res} -ne 0 ]
|
||||||
|
then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
return ${__res}
|
||||||
|
}
|
||||||
|
|
||||||
|
rpm() {
|
||||||
|
local __res=0
|
||||||
|
for _ in 1
|
||||||
|
do
|
||||||
|
cmake -DINSTALL_LAYOUT=RPM ../src
|
||||||
|
__res=$?
|
||||||
|
if [ ${__res} -ne 0 ]
|
||||||
|
then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
build_fast
|
||||||
|
__res=$?
|
||||||
|
if [ ${__res} -ne 0 ]
|
||||||
|
then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
fakeroot cpack
|
||||||
|
__res=$?
|
||||||
|
if [ ${__res} -ne 0 ]
|
||||||
|
then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
return ${__res}
|
||||||
|
}
|
||||||
|
|
||||||
|
deb() {
|
||||||
|
local __res=0
|
||||||
|
for _ in 1
|
||||||
|
do
|
||||||
|
cmake -DINSTALL_LAYOUT=DEB ../src
|
||||||
|
__res=$?
|
||||||
|
if [ ${__res} -ne 0 ]
|
||||||
|
then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
build_fast
|
||||||
|
__res=$?
|
||||||
|
if [ ${__res} -ne 0 ]
|
||||||
|
then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
fakeroot cpack
|
||||||
|
__res=$?
|
||||||
|
if [ ${__res} -ne 0 ]
|
||||||
|
then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
return ${__res}
|
||||||
|
}
|
||||||
|
|
||||||
|
main() {
|
||||||
|
local __res=0
|
||||||
|
for _ in 1
|
||||||
|
do
|
||||||
|
arguments_parse "$@"
|
||||||
|
__res=$?
|
||||||
|
if [ ${__res} -ne 0 ]
|
||||||
|
then
|
||||||
|
if [ ${__res} -eq 2 ]
|
||||||
|
then
|
||||||
|
# in this case there was no error
|
||||||
|
# We still want to exit the script
|
||||||
|
__res=0
|
||||||
|
fi
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
echo "Num commands ${#commands[@]}"
|
||||||
|
for command in "${commands[@]}"
|
||||||
|
do
|
||||||
|
echo "Command: ${command}"
|
||||||
|
case ${command} in
|
||||||
|
configure )
|
||||||
|
configure
|
||||||
|
__res=$?
|
||||||
|
;;
|
||||||
|
build )
|
||||||
|
build
|
||||||
|
__res=$?
|
||||||
|
;;
|
||||||
|
build/fast )
|
||||||
|
build_fast
|
||||||
|
__res=$?
|
||||||
|
;;
|
||||||
|
package )
|
||||||
|
package
|
||||||
|
__res=$?
|
||||||
|
;;
|
||||||
|
package/fast )
|
||||||
|
package_fast
|
||||||
|
__res=$?
|
||||||
|
;;
|
||||||
|
rpm )
|
||||||
|
rpm
|
||||||
|
;;
|
||||||
|
deb )
|
||||||
|
deb
|
||||||
|
;;
|
||||||
|
linux-pkgs)
|
||||||
|
rpm
|
||||||
|
deb
|
||||||
|
;;
|
||||||
|
* )
|
||||||
|
echo "ERROR: Command not found ($command)"
|
||||||
|
__res=1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
if [ ${__res} -ne 0 ]
|
||||||
|
then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
done
|
||||||
|
return ${__res}
|
||||||
|
}
|
||||||
|
|
||||||
|
main "$@"
|
|
@ -0,0 +1,3 @@
|
||||||
|
FROM centos:6
|
||||||
|
|
||||||
|
RUN yum install -y yum-utils
|
|
@ -0,0 +1,3 @@
|
||||||
|
FROM ubuntu:16.04
|
||||||
|
|
||||||
|
RUN apt-get update
|
|
@ -0,0 +1,57 @@
|
||||||
|
version: "3"
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
common: &common
|
||||||
|
image: foundationdb-build:0.0.4
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
|
||||||
|
build-setup: &build-setup
|
||||||
|
<<: *common
|
||||||
|
depends_on: [common]
|
||||||
|
#debuginfo builds need the build path to be longer than
|
||||||
|
#the path where debuginfo sources are places. Crazy, yes,
|
||||||
|
#see the manual for CPACK_RPM_BUILD_SOURCE_DIRS_PREFIX.
|
||||||
|
volumes:
|
||||||
|
- ../..:/foundationdb/deep/directory/as/debuginfo/doesnt/work/otherwise/src
|
||||||
|
- ${BUILDDIR}:/foundationdb/deep/directory/as/debuginfo/doesnt/work/otherwise/build
|
||||||
|
working_dir: /foundationdb/deep/directory/as/debuginfo/doesnt/work/otherwise/build
|
||||||
|
|
||||||
|
configure: &configure
|
||||||
|
<<: *build-setup
|
||||||
|
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../src/build/cmake/build.sh configure
|
||||||
|
|
||||||
|
build: &build
|
||||||
|
<<: *build-setup
|
||||||
|
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../src/build/cmake/build.sh build
|
||||||
|
|
||||||
|
build-fast: &build-fast
|
||||||
|
<<: *build-setup
|
||||||
|
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../src/build/cmake/build.sh build/fast
|
||||||
|
|
||||||
|
rpm: &rpm
|
||||||
|
<<: *build-setup
|
||||||
|
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../src/build/cmake/build.sh rpm
|
||||||
|
|
||||||
|
deb: &deb
|
||||||
|
<<: *build-setup
|
||||||
|
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../src/build/cmake/build.sh deb
|
||||||
|
|
||||||
|
linux-pkgs:
|
||||||
|
<<: *build-setup
|
||||||
|
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../src/build/cmake/build.sh linux-pkgs
|
||||||
|
|
||||||
|
package: &package
|
||||||
|
<<: *build-setup
|
||||||
|
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../src/build/cmake/build.sh package
|
||||||
|
|
||||||
|
package-fast: &package-fast
|
||||||
|
<<: *build-setup
|
||||||
|
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../src/build/cmake/build.sh package/fast
|
||||||
|
|
||||||
|
shell:
|
||||||
|
<<: *build-setup
|
||||||
|
volumes:
|
||||||
|
- ..:/foundationdb
|
|
@ -2,10 +2,7 @@ version: "3"
|
||||||
|
|
||||||
services:
|
services:
|
||||||
common: &common
|
common: &common
|
||||||
image: foundationdb-build:0.0.3
|
image: foundationdb/foundationdb-build:0.0.5
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
dockerfile: Dockerfile
|
|
||||||
|
|
||||||
build-setup: &build-setup
|
build-setup: &build-setup
|
||||||
<<: *common
|
<<: *common
|
||||||
|
@ -15,12 +12,14 @@ services:
|
||||||
working_dir: /foundationdb
|
working_dir: /foundationdb
|
||||||
environment:
|
environment:
|
||||||
- MAKEJOBS=1
|
- MAKEJOBS=1
|
||||||
|
- BUILD_DIR=./work
|
||||||
|
|
||||||
release-setup: &release-setup
|
release-setup: &release-setup
|
||||||
<<: *build-setup
|
<<: *build-setup
|
||||||
environment:
|
environment:
|
||||||
- MAKEJOBS=1
|
- MAKEJOBS=1
|
||||||
- RELEASE=true
|
- RELEASE=true
|
||||||
|
- BUILD_DIR=./work
|
||||||
|
|
||||||
snapshot-setup: &snapshot-setup
|
snapshot-setup: &snapshot-setup
|
||||||
<<: *build-setup
|
<<: *build-setup
|
||||||
|
@ -54,6 +53,30 @@ services:
|
||||||
<<: *snapshot-bindings
|
<<: *snapshot-bindings
|
||||||
|
|
||||||
|
|
||||||
|
snapshot-cmake: &snapshot-cmake
|
||||||
|
<<: *build-setup
|
||||||
|
command: bash -c 'if [ -f CMakeLists.txt ]; then mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake .. && make -j "$${MAKEJOBS}"; fi'
|
||||||
|
|
||||||
|
prb-cmake:
|
||||||
|
<<: *snapshot-cmake
|
||||||
|
|
||||||
|
|
||||||
|
snapshot-ctest: &snapshot-ctest
|
||||||
|
<<: *build-setup
|
||||||
|
command: bash -c 'if [ -f CMakeLists.txt ]; then mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake .. && make -j "$${MAKEJOBS}" && ctest -L fast -j "$${MAKEJOBS}" --output-on-failure; fi'
|
||||||
|
|
||||||
|
prb-ctest:
|
||||||
|
<<: *snapshot-ctest
|
||||||
|
|
||||||
|
|
||||||
|
snapshot-correctness: &snapshot-correctness
|
||||||
|
<<: *build-setup
|
||||||
|
command: bash -c 'if [ -f CMakeLists.txt ]; then mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake .. && make -j "$${MAKEJOBS}" && ctest -j "$${MAKEJOBS}" --output-on-failure; fi'
|
||||||
|
|
||||||
|
prb-correctness:
|
||||||
|
<<: *snapshot-correctness
|
||||||
|
|
||||||
|
|
||||||
shell:
|
shell:
|
||||||
<<: *build-setup
|
<<: *build-setup
|
||||||
volumes:
|
volumes:
|
||||||
|
|
|
@ -22,9 +22,10 @@ TARGETS += packages
|
||||||
CLEAN_TARGETS += packages_clean
|
CLEAN_TARGETS += packages_clean
|
||||||
|
|
||||||
PACKAGE_BINARIES = fdbcli fdbserver fdbbackup fdbmonitor fdbrestore fdbdr dr_agent backup_agent
|
PACKAGE_BINARIES = fdbcli fdbserver fdbbackup fdbmonitor fdbrestore fdbdr dr_agent backup_agent
|
||||||
PACKAGE_CONTENTS := $(addprefix bin/, $(PACKAGE_BINARIES)) $(addprefix bin/, $(addsuffix .debug, $(PACKAGE_BINARIES))) lib/libfdb_c.$(DLEXT) bindings/python/fdb/fdboptions.py bindings/c/foundationdb/fdb_c_options.g.h
|
PROJECT_BINARIES = $(addprefix bin/, $(PACKAGE_BINARIES))
|
||||||
|
PACKAGE_CONTENTS := $(PROJECT_BINARIES) $(addprefix bin/, $(addsuffix .debug, $(PACKAGE_BINARIES))) lib/libfdb_c.$(DLEXT) bindings/python/fdb/fdboptions.py bindings/c/foundationdb/fdb_c_options.g.h
|
||||||
|
|
||||||
packages: TGZ FDBSERVERAPI
|
packages: TGZ BINS FDBSERVERAPI
|
||||||
|
|
||||||
TGZ: $(PACKAGE_CONTENTS) versions.target lib/libfdb_java.$(DLEXT)
|
TGZ: $(PACKAGE_CONTENTS) versions.target lib/libfdb_java.$(DLEXT)
|
||||||
@echo "Archiving tgz"
|
@echo "Archiving tgz"
|
||||||
|
@ -32,9 +33,17 @@ TGZ: $(PACKAGE_CONTENTS) versions.target lib/libfdb_java.$(DLEXT)
|
||||||
@rm -f packages/FoundationDB-$(PLATFORM)-*.tar.gz
|
@rm -f packages/FoundationDB-$(PLATFORM)-*.tar.gz
|
||||||
@bash -c "tar -czf packages/FoundationDB-$(PLATFORM)-$(VERSION)-$(PKGRELEASE).tar.gz bin/{fdbmonitor{,.debug},fdbcli{,.debug},fdbserver{,.debug},fdbbackup{,.debug},fdbdr{,.debug},fdbrestore{,.debug},dr_agent{,.debug},coverage.{fdbclient,fdbserver,fdbrpc,flow}.xml} lib/libfdb_c.$(DLEXT){,-debug} lib/libfdb_java.$(DLEXT)* bindings/python/fdb/*.py bindings/c/*.h"
|
@bash -c "tar -czf packages/FoundationDB-$(PLATFORM)-$(VERSION)-$(PKGRELEASE).tar.gz bin/{fdbmonitor{,.debug},fdbcli{,.debug},fdbserver{,.debug},fdbbackup{,.debug},fdbdr{,.debug},fdbrestore{,.debug},dr_agent{,.debug},coverage.{fdbclient,fdbserver,fdbrpc,flow}.xml} lib/libfdb_c.$(DLEXT){,-debug} lib/libfdb_java.$(DLEXT)* bindings/python/fdb/*.py bindings/c/*.h"
|
||||||
|
|
||||||
|
BINS: packages/foundationdb-binaries-$(VERSION)-$(PLATFORM).tar.gz
|
||||||
|
|
||||||
packages_clean:
|
packages_clean:
|
||||||
@echo "Cleaning packages"
|
@echo "Cleaning packages"
|
||||||
@rm -f packages/FoundationDB-$(PLATFORM)-*.tar.gz packages/fdb-tests-$(VERSION).tar.gz packages/fdb-headers-$(VERSION).tar.gz packages/fdb-bindings-$(VERSION).tar.gz packages/fdb-server-$(VERSION)-$(PLATFORM).tar.gz
|
@rm -f packages/FoundationDB-$(PLATFORM)-*.tar.gz packages/foundationdb-binaries-$(VERSION)-$(PLATFORM).tar.gz packages/fdb-tests-$(VERSION).tar.gz packages/fdb-headers-$(VERSION).tar.gz packages/fdb-bindings-$(VERSION).tar.gz packages/fdb-server-$(VERSION)-$(PLATFORM).tar.gz
|
||||||
|
|
||||||
|
packages/foundationdb-binaries-$(VERSION)-$(PLATFORM).tar.gz: $(PROJECT_BINARIES) versions.target
|
||||||
|
@echo "Packaging binaries"
|
||||||
|
@mkdir -p packages
|
||||||
|
@rm -f packages/foundationdb-binaries-$(VERSION)-$(PLATFORM).tar.gz
|
||||||
|
@bash -c "tar -czf packages/foundationdb-binaries-$(VERSION)-$(PLATFORM).tar.gz $(PROJECT_BINARIES)"
|
||||||
|
|
||||||
packages/fdb-server-$(VERSION)-$(PLATFORM).tar.gz: bin/fdbserver bin/fdbcli lib/libfdb_c.$(DLEXT)
|
packages/fdb-server-$(VERSION)-$(PLATFORM).tar.gz: bin/fdbserver bin/fdbcli lib/libfdb_c.$(DLEXT)
|
||||||
@echo "Packaging fdb server api"
|
@echo "Packaging fdb server api"
|
||||||
|
|
|
@ -101,7 +101,7 @@ function(add_fdb_test)
|
||||||
endif()
|
endif()
|
||||||
list(TRANSFORM ADD_FDB_TEST_TEST_FILES PREPEND "${CMAKE_CURRENT_SOURCE_DIR}/")
|
list(TRANSFORM ADD_FDB_TEST_TEST_FILES PREPEND "${CMAKE_CURRENT_SOURCE_DIR}/")
|
||||||
add_test(NAME ${test_name}
|
add_test(NAME ${test_name}
|
||||||
COMMAND ${PYTHON_EXECUTABLE} ${TestRunner}
|
COMMAND $<TARGET_FILE:Python::Interpreter> ${TestRunner}
|
||||||
-n ${test_name}
|
-n ${test_name}
|
||||||
-b ${PROJECT_BINARY_DIR}
|
-b ${PROJECT_BINARY_DIR}
|
||||||
-t ${test_type}
|
-t ${test_type}
|
||||||
|
|
|
@ -16,27 +16,6 @@ if(WIN32)
|
||||||
"System.Data"
|
"System.Data"
|
||||||
"System.Xml")
|
"System.Xml")
|
||||||
else()
|
else()
|
||||||
find_program(MONO_EXECUTABLE mono)
|
|
||||||
find_program(MCS_EXECUTABLE dmcs)
|
|
||||||
|
|
||||||
if (NOT MCS_EXECUTABLE)
|
|
||||||
find_program(MCS_EXECUTABLE mcs)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set(MONO_FOUND FALSE CACHE INTERNAL "")
|
|
||||||
|
|
||||||
if (NOT MCS_EXECUTABLE)
|
|
||||||
find_program(MCS_EXECUTABLE mcs)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (MONO_EXECUTABLE AND MCS_EXECUTABLE)
|
|
||||||
set(MONO_FOUND True CACHE INTERNAL "")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (NOT MONO_FOUND)
|
|
||||||
message(FATAL_ERROR "Could not find mono")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set(ACTOR_COMPILER_REFERENCES
|
set(ACTOR_COMPILER_REFERENCES
|
||||||
"-r:System,System.Core,System.Xml.Linq,System.Data.DataSetExtensions,Microsoft.CSharp,System.Data,System.Xml")
|
"-r:System,System.Core,System.Xml.Linq,System.Data.DataSetExtensions,Microsoft.CSharp,System.Data,System.Xml")
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,25 @@
|
||||||
|
set(COVERAGETOOL_SRCS
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/flow/coveragetool/Program.cs
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/flow/coveragetool/Properties/AssemblyInfo.cs)
|
||||||
|
if(WIN32)
|
||||||
|
add_executable(coveragetool ${COVERAGETOOL_SRCS})
|
||||||
|
target_compile_options(coveragetool PRIVATE "/langversion:6")
|
||||||
|
set_property(TARGET coveragetool PROPERTY VS_DOTNET_REFERENCES
|
||||||
|
"System"
|
||||||
|
"ystem.Core"
|
||||||
|
"System.Xml.Linq"
|
||||||
|
"ystem.Data.DataSetExtensions"
|
||||||
|
"Microsoft.CSharp"
|
||||||
|
"ystem.Data"
|
||||||
|
"System.Xml")
|
||||||
|
else()
|
||||||
|
set(COVERAGETOOL_COMPILER_REFERENCES
|
||||||
|
"-r:System,System.Core,System.Xml.Linq,System.Data.DataSetExtensions,Microsoft.CSharp,System.Data,System.Xml")
|
||||||
|
|
||||||
|
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/coveragetool.exe
|
||||||
|
COMMAND ${MCS_EXECUTABLE} ARGS ${COVERAGETOOL_COMPILER_REFERENCES} ${COVERAGETOOL_SRCS} "-target:exe" "-out:coveragetool.exe"
|
||||||
|
DEPENDS ${COVERAGETOOL_SRCS}
|
||||||
|
COMMENT "Compile coveragetool" VERBATIM)
|
||||||
|
add_custom_target(coveragetool DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/coveragetool.exe)
|
||||||
|
set(coveragetool_exe "${CMAKE_CURRENT_BINARY_DIR}/coveragetool.exe")
|
||||||
|
endif()
|
|
@ -25,10 +25,14 @@ else()
|
||||||
add_custom_target(vexillographer DEPENDS ${VEXILLOGRAPHER_EXE})
|
add_custom_target(vexillographer DEPENDS ${VEXILLOGRAPHER_EXE})
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
macro(vexillographer_compile)
|
function(vexillographer_compile)
|
||||||
|
set(CX_OPTIONS ALL)
|
||||||
set(CX_ONE_VALUE_ARGS TARGET LANG OUT)
|
set(CX_ONE_VALUE_ARGS TARGET LANG OUT)
|
||||||
set(CX_MULTI_VALUE_ARGS OUTPUT)
|
set(CX_MULTI_VALUE_ARGS OUTPUT)
|
||||||
cmake_parse_arguments(VX "" "${CX_ONE_VALUE_ARGS}" "${CX_MULTI_VALUE_ARGS}" "${ARGN}")
|
cmake_parse_arguments(VX "${CX_OPTIONS}" "${CX_ONE_VALUE_ARGS}" "${CX_MULTI_VALUE_ARGS}" "${ARGN}")
|
||||||
|
if(NOT VX_OUTPUT)
|
||||||
|
set(VX_OUTPUT ${VX_OUT})
|
||||||
|
endif()
|
||||||
if(WIN32)
|
if(WIN32)
|
||||||
add_custom_command(
|
add_custom_command(
|
||||||
OUTPUT ${VX_OUTPUT}
|
OUTPUT ${VX_OUTPUT}
|
||||||
|
@ -42,5 +46,9 @@ macro(vexillographer_compile)
|
||||||
DEPENDS ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options vexillographer
|
DEPENDS ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options vexillographer
|
||||||
COMMENT "Generate FDBOptions ${VX_LANG} files")
|
COMMENT "Generate FDBOptions ${VX_LANG} files")
|
||||||
endif()
|
endif()
|
||||||
add_custom_target(${VX_TARGET} DEPENDS ${VX_OUTPUT})
|
if(VX_ALL)
|
||||||
endmacro()
|
add_custom_target(${VX_TARGET} ALL DEPENDS ${VX_OUTPUT})
|
||||||
|
else()
|
||||||
|
add_custom_target(${VX_TARGET} DEPENDS ${VX_OUTPUT})
|
||||||
|
endif()
|
||||||
|
endfunction()
|
||||||
|
|
|
@ -4,13 +4,9 @@ set(USE_VALGRIND OFF CACHE BOOL "Compile for valgrind usage")
|
||||||
set(USE_GOLD_LINKER OFF CACHE BOOL "Use gold linker")
|
set(USE_GOLD_LINKER OFF CACHE BOOL "Use gold linker")
|
||||||
set(ALLOC_INSTRUMENTATION OFF CACHE BOOL "Instrument alloc")
|
set(ALLOC_INSTRUMENTATION OFF CACHE BOOL "Instrument alloc")
|
||||||
set(WITH_UNDODB OFF CACHE BOOL "Use rr or undodb")
|
set(WITH_UNDODB OFF CACHE BOOL "Use rr or undodb")
|
||||||
set(OPEN_FOR_IDE OFF CACHE BOOL "Open this in an IDE (won't compile/link)")
|
|
||||||
set(FDB_RELEASE OFF CACHE BOOL "This is a building of a final release")
|
set(FDB_RELEASE OFF CACHE BOOL "This is a building of a final release")
|
||||||
|
|
||||||
add_compile_options(-DCMAKE_BUILD)
|
add_compile_options(-DCMAKE_BUILD)
|
||||||
if(WITH_TLS)
|
|
||||||
add_compile_options(-DHAVE_OPENSSL)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
find_package(Threads REQUIRED)
|
find_package(Threads REQUIRED)
|
||||||
if(ALLOC_INSTRUMENTATION)
|
if(ALLOC_INSTRUMENTATION)
|
||||||
|
|
|
@ -0,0 +1,27 @@
|
||||||
|
if(WIN32)
|
||||||
|
# C# is currently only supported on Windows.
|
||||||
|
# On other platforms we find mono manually
|
||||||
|
enable_language(CSharp)
|
||||||
|
else()
|
||||||
|
# for other platforms we currently use mono
|
||||||
|
find_program(MONO_EXECUTABLE mono)
|
||||||
|
find_program(MCS_EXECUTABLE dmcs)
|
||||||
|
|
||||||
|
if (NOT MCS_EXECUTABLE)
|
||||||
|
find_program(MCS_EXECUTABLE mcs)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set(MONO_FOUND FALSE CACHE INTERNAL "")
|
||||||
|
|
||||||
|
if (NOT MCS_EXECUTABLE)
|
||||||
|
find_program(MCS_EXECUTABLE mcs)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (MONO_EXECUTABLE AND MCS_EXECUTABLE)
|
||||||
|
set(MONO_FOUND True CACHE INTERNAL "")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (NOT MONO_FOUND)
|
||||||
|
message(FATAL_ERROR "Could not find mono")
|
||||||
|
endif()
|
||||||
|
endif()
|
|
@ -0,0 +1,105 @@
|
||||||
|
set(FORCE_ALL_COMPONENTS OFF CACHE BOOL "Fails cmake if not all dependencies are found")
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# LibreSSL
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
set(DISABLE_TLS OFF CACHE BOOL "Don't try to find LibreSSL and always build without TLS support")
|
||||||
|
if(DISABLE_TLS)
|
||||||
|
set(WITH_TLS OFF)
|
||||||
|
else()
|
||||||
|
set(LIBRESSL_USE_STATIC_LIBS TRUE)
|
||||||
|
find_package(LibreSSL)
|
||||||
|
if(LibreSSL_FOUND)
|
||||||
|
set(WITH_TLS ON)
|
||||||
|
add_compile_options(-DHAVE_OPENSSL)
|
||||||
|
else()
|
||||||
|
message(STATUS "LibreSSL NOT Found - Will compile without TLS Support")
|
||||||
|
message(STATUS "You can set LibreSSL_ROOT to the LibreSSL install directory to help cmake find it")
|
||||||
|
set(WITH_TLS OFF)
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# Java Bindings
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
set(WITH_JAVA OFF)
|
||||||
|
find_package(JNI 1.8 REQUIRED)
|
||||||
|
find_package(Java 1.8 COMPONENTS Development)
|
||||||
|
if(JNI_FOUND AND Java_FOUND AND Java_Development_FOUND)
|
||||||
|
set(WITH_JAVA ON)
|
||||||
|
include(UseJava)
|
||||||
|
enable_language(Java)
|
||||||
|
else()
|
||||||
|
set(WITH_JAVA OFF)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# Python Bindings
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
find_package(Python COMPONENTS Interpreter)
|
||||||
|
if(Python_Interpreter_FOUND)
|
||||||
|
set(WITH_PYTHON ON)
|
||||||
|
else()
|
||||||
|
message(FATAL_ERROR "Could not found a suitable python interpreter")
|
||||||
|
set(WITH_PYTHON OFF)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# Pip
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
find_package(Virtualenv)
|
||||||
|
if (Virtualenv_FOUND)
|
||||||
|
set(WITH_DOCUMENTATION ON)
|
||||||
|
else()
|
||||||
|
set(WITH_DOCUMENTATION OFF)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# GO
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
find_program(GO_EXECUTABLE go)
|
||||||
|
# building the go binaries is currently not supported on Windows
|
||||||
|
if(GO_EXECUTABLE AND NOT WIN32)
|
||||||
|
set(WITH_GO ON)
|
||||||
|
else()
|
||||||
|
set(WITH_GO OFF)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# Ruby
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
find_program(GEM_EXECUTABLE gem)
|
||||||
|
set(WITH_RUBY OFF)
|
||||||
|
if(GEM_EXECUTABLE)
|
||||||
|
set(GEM_COMMAND ${RUBY_EXECUTABLE} ${GEM_EXECUTABLE})
|
||||||
|
set(WITH_RUBY ON)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/packages)
|
||||||
|
add_custom_target(packages)
|
||||||
|
|
||||||
|
function(print_components)
|
||||||
|
message(STATUS "=========================================")
|
||||||
|
message(STATUS " Components Build Overview ")
|
||||||
|
message(STATUS "=========================================")
|
||||||
|
message(STATUS "Build Java Bindings: ${WITH_JAVA}")
|
||||||
|
message(STATUS "Build with TLS support: ${WITH_TLS}")
|
||||||
|
message(STATUS "Build Go bindings: ${WITH_GO}")
|
||||||
|
message(STATUS "Build Ruby bindings: ${WITH_RUBY}")
|
||||||
|
message(STATUS "Build Python sdist (make package): ${WITH_PYTHON}")
|
||||||
|
message(STATUS "Build Documentation (make html): ${WITH_DOCUMENTATION}")
|
||||||
|
message(STATUS "=========================================")
|
||||||
|
endfunction()
|
||||||
|
|
||||||
|
if(FORCE_ALL_COMPONENTS)
|
||||||
|
if(NOT WITH_JAVA OR NOT WITH_TLS OR NOT WITH_GO OR NOT WITH_RUBY OR NOT WITH_PYTHON OR NOT WITH_DOCUMENTATION)
|
||||||
|
print_components()
|
||||||
|
message(FATAL_ERROR "FORCE_ALL_COMPONENTS is set but not all dependencies could be found")
|
||||||
|
endif()
|
||||||
|
endif()
|
|
@ -0,0 +1,7 @@
|
||||||
|
find_program(SPHINXBUILD
|
||||||
|
sphinx-build
|
||||||
|
DOC "Sphinx-build tool")
|
||||||
|
|
||||||
|
find_package_handle_standard_args(Sphinx
|
||||||
|
FOUND_VAR SPHINX_FOUND
|
||||||
|
REQUIRED_VARS SPHINXBUILD)
|
|
@ -0,0 +1,20 @@
|
||||||
|
find_program(_VIRTUALENV_EXE virtualenv)
|
||||||
|
|
||||||
|
# get version and test that program actually works
|
||||||
|
if(_VIRTUALENV_EXE)
|
||||||
|
execute_process(
|
||||||
|
COMMAND ${_VIRTUALENV_EXE} --version
|
||||||
|
RESULT_VARIABLE ret_code
|
||||||
|
OUTPUT_VARIABLE version_string
|
||||||
|
ERROR_VARIABLE error_output
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||||
|
if(ret_code EQUAL 0 AND NOT ERROR_VARIABLE)
|
||||||
|
# we found a working virtualenv
|
||||||
|
set(VIRTUALENV_EXE ${_VIRTUALENV_EXE})
|
||||||
|
set(VIRTUALENV_VERSION version_string)
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
find_package_handle_standard_args(Virtualenv
|
||||||
|
REQUIRED_VARS VIRTUALENV_EXE
|
||||||
|
VERSION_VAR ${VIRTUALENV_VERSION})
|
|
@ -1,53 +1,159 @@
|
||||||
macro(actor_set varname srcs)
|
define_property(TARGET PROPERTY SOURCE_FILES
|
||||||
set(${varname})
|
BRIEF_DOCS "Source files a flow target is built off"
|
||||||
foreach(src ${srcs})
|
FULL_DOCS "When compiling a flow target, this property contains a list of the non-generated source files. \
|
||||||
set(tmp "${src}")
|
This property is set by the add_flow_target function")
|
||||||
if(${src} MATCHES ".*\\.h")
|
|
||||||
continue()
|
|
||||||
elseif(${src} MATCHES ".*\\.actor\\.cpp")
|
|
||||||
string(REPLACE ".actor.cpp" ".actor.g.cpp" tmp ${src})
|
|
||||||
set(tmp "${CMAKE_CURRENT_BINARY_DIR}/${tmp}")
|
|
||||||
endif()
|
|
||||||
set(${varname} "${${varname}};${tmp}")
|
|
||||||
endforeach()
|
|
||||||
endmacro()
|
|
||||||
|
|
||||||
set(ACTOR_TARGET_COUNTER "0")
|
define_property(TARGET PROPERTY COVERAGE_FILTERS
|
||||||
macro(actor_compile target srcs)
|
BRIEF_DOCS "List of filters for the coverage tool"
|
||||||
set(options DISABLE_ACTOR_WITHOUT_WAIT)
|
FULL_DOCS "Holds a list of regular expressions. All filenames matching any regular \
|
||||||
set(oneValueArg)
|
expression in this list will be ignored when the coverage.target.xml file is \
|
||||||
set(multiValueArgs)
|
generated. This property is set through the add_flow_target function.")
|
||||||
cmake_parse_arguments(ACTOR_COMPILE "${options}" "${oneValueArgs}" "${multiValueArgs}" "${ARGN}")
|
|
||||||
set(_tmp_out "")
|
function(generate_coverage_xml)
|
||||||
foreach(src ${srcs})
|
if(NOT (${ARGC} EQUAL "1"))
|
||||||
set(tmp "")
|
message(FATAL_ERROR "generate_coverage_xml expects one argument")
|
||||||
if(${src} MATCHES ".*\\.actor\\.h")
|
endif()
|
||||||
string(REPLACE ".actor.h" ".actor.g.h" tmp ${src})
|
set(target_name ${ARGV0})
|
||||||
elseif(${src} MATCHES ".*\\.actor\\.cpp")
|
get_target_property(sources ${target_name} SOURCE_FILES)
|
||||||
string(REPLACE ".actor.cpp" ".actor.g.cpp" tmp ${src})
|
get_target_property(filters ${target_name} COVERAGE_FILTER_OUT)
|
||||||
|
foreach(src IN LISTS sources)
|
||||||
|
set(include TRUE)
|
||||||
|
foreach(f IN LISTS filters)
|
||||||
|
if("${f}" MATCHES "${src}")
|
||||||
|
set(include FALSE)
|
||||||
|
endif()
|
||||||
|
endforeach()
|
||||||
|
if(include)
|
||||||
|
list(APPEND in_files ${src})
|
||||||
endif()
|
endif()
|
||||||
set(actor_compiler_flags "")
|
endforeach()
|
||||||
if(ACTOR_COMPILE_DISABLE_ACTOR_WITHOUT_WAIT)
|
set(target_file ${CMAKE_CURRENT_SOURCE_DIR}/coverage_target_${target_name})
|
||||||
set(actor_compiler_flags "--disable-actor-without-wait-error")
|
# we can't get the targets output dir through a generator expression as this would
|
||||||
|
# create a cyclic dependency.
|
||||||
|
# Instead we follow the following rules:
|
||||||
|
# - For executable we place the coverage file into the directory EXECUTABLE_OUTPUT_PATH
|
||||||
|
# - For static libraries we place it into the directory LIBRARY_OUTPUT_PATH
|
||||||
|
# - For dynamic libraries we place it into LIBRARY_OUTPUT_PATH on Linux and MACOS
|
||||||
|
# and to EXECUTABLE_OUTPUT_PATH on Windows
|
||||||
|
get_target_property(type ${target_name} TYPE)
|
||||||
|
# STATIC_LIBRARY, MODULE_LIBRARY, SHARED_LIBRARY, OBJECT_LIBRARY, INTERFACE_LIBRARY, EXECUTABLE
|
||||||
|
if(type STREQUAL "STATIC_LIBRARY")
|
||||||
|
set(target_file ${LIBRARY_OUTPUT_PATH}/coverage.${target_name}.xml)
|
||||||
|
elseif(type STREQUAL "SHARED_LIBRARY")
|
||||||
|
if(WIN32)
|
||||||
|
set(target_file ${EXECUTABLE_OUTPUT_PATH}/coverage.${target_name}.xml)
|
||||||
|
else()
|
||||||
|
set(target_file ${LIBRARY_OUTPUT_PATH}/coverage.${target_name}.xml)
|
||||||
endif()
|
endif()
|
||||||
if(tmp)
|
elseif(type STREQUAL "EXECUTABLE")
|
||||||
|
set(target_file ${EXECUTABLE_OUTPUT_PATH}/coverage.${target_name}.xml)
|
||||||
|
endif()
|
||||||
|
if(WIN32)
|
||||||
|
add_custom_command(
|
||||||
|
OUTPUT ${target_file}
|
||||||
|
COMMAND $<TARGET_FILE:coveragetool> ${target_file} ${in_files}
|
||||||
|
DEPENDS ${in_files}
|
||||||
|
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||||
|
COMMENT "Generate coverage xml")
|
||||||
|
else()
|
||||||
|
add_custom_command(
|
||||||
|
OUTPUT ${target_file}
|
||||||
|
COMMAND ${MONO_EXECUTABLE} ${coveragetool_exe} ${target_file} ${in_files}
|
||||||
|
DEPENDS ${in_files}
|
||||||
|
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||||
|
COMMENT "Generate coverage xml")
|
||||||
|
endif()
|
||||||
|
add_custom_target(coverage_${target_name} DEPENDS ${target_file})
|
||||||
|
add_dependencies(coverage_${target_name} coveragetool)
|
||||||
|
add_dependencies(${target_name} coverage_${target_name})
|
||||||
|
endfunction()
|
||||||
|
|
||||||
|
function(add_flow_target)
|
||||||
|
set(options EXECUTABLE STATIC_LIBRARY
|
||||||
|
DYNAMIC_LIBRARY)
|
||||||
|
set(oneValueArgs NAME)
|
||||||
|
set(multiValueArgs SRCS COVERAGE_FILTER_OUT DISABLE_ACTOR_WITHOUT_WAIT_WARNING)
|
||||||
|
cmake_parse_arguments(AFT "${options}" "${oneValueArgs}" "${multiValueArgs}" "${ARGN}")
|
||||||
|
if(NOT AFT_NAME)
|
||||||
|
message(FATAL_ERROR "add_flow_target requires option NAME")
|
||||||
|
endif()
|
||||||
|
if(NOT AFT_SRCS)
|
||||||
|
message(FATAL_ERROR "No sources provided")
|
||||||
|
endif()
|
||||||
|
if(OPEN_FOR_IDE)
|
||||||
|
set(sources ${AFT_SRCS} ${AFT_DISABLE_ACTOR_WRITHOUT_WAIT_WARNING})
|
||||||
|
if(AFT_EXECUTABLE)
|
||||||
|
set(target_type exec)
|
||||||
|
add_executable(${AFT_NAME} ${sources})
|
||||||
|
endif()
|
||||||
|
if(AFT_STATIC_LIBRARY)
|
||||||
|
if(target_type)
|
||||||
|
message(FATAL_ERROR "add_flow_target can only be of one type")
|
||||||
|
endif()
|
||||||
|
add_library(${AFT_NAME} STATIC ${sources})
|
||||||
|
endif()
|
||||||
|
if(AFT_DYNAMIC_LIBRARY)
|
||||||
|
if(target_type)
|
||||||
|
message(FATAL_ERROR "add_flow_target can only be of one type")
|
||||||
|
endif()
|
||||||
|
add_library(${AFT_NAME} DYNAMIC ${sources})
|
||||||
|
endif()
|
||||||
|
else()
|
||||||
|
foreach(src IN LISTS AFT_SRCS AFT_DISABLE_ACTOR_WITHOUT_WAIT_WARNING)
|
||||||
|
if(${src} MATCHES ".*\\.actor\\.(h|cpp)")
|
||||||
|
list(APPEND actors ${src})
|
||||||
|
if(${src} MATCHES ".*\\.h")
|
||||||
|
string(REPLACE ".actor.h" ".actor.g.h" generated ${src})
|
||||||
|
else()
|
||||||
|
string(REPLACE ".actor.cpp" ".actor.g.cpp" generated ${src})
|
||||||
|
endif()
|
||||||
|
set(actor_compiler_flags "")
|
||||||
|
foreach(s IN LISTS AFT_DISABLE_ACTOR_WITHOUT_WAIT_WARNING)
|
||||||
|
if("${s}" STREQUAL "${src}")
|
||||||
|
set(actor_compiler_flags "--disable-actor-without-wait-warning")
|
||||||
|
break()
|
||||||
|
endif()
|
||||||
|
endforeach()
|
||||||
|
list(APPEND sources ${generated})
|
||||||
|
list(APPEND generated_files ${CMAKE_CURRENT_BINARY_DIR}/${generated})
|
||||||
if(WIN32)
|
if(WIN32)
|
||||||
add_custom_command(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${tmp}"
|
add_custom_command(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${generated}"
|
||||||
COMMAND $<TARGET_FILE:actorcompiler> "${CMAKE_CURRENT_SOURCE_DIR}/${src}" "${CMAKE_CURRENT_BINARY_DIR}/${tmp}" ${actor_compiler_flags}
|
COMMAND $<TARGET_FILE:actorcompiler> "${CMAKE_CURRENT_SOURCE_DIR}/${src}" "${CMAKE_CURRENT_BINARY_DIR}/${generated}" ${actor_compiler_flags} ${actor_compiler_flags}
|
||||||
DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/${src}" actorcompiler ${actor_exe}
|
DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/${src}" actorcompiler
|
||||||
COMMENT "Compile actor: ${src}")
|
COMMENT "Compile actor: ${src}")
|
||||||
else()
|
else()
|
||||||
add_custom_command(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${tmp}"
|
add_custom_command(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${generated}"
|
||||||
COMMAND ${MONO_EXECUTABLE} ${actor_exe} "${CMAKE_CURRENT_SOURCE_DIR}/${src}" "${CMAKE_CURRENT_BINARY_DIR}/${tmp}" ${actor_compiler_flags} > /dev/null
|
COMMAND ${MONO_EXECUTABLE} ${actor_exe} "${CMAKE_CURRENT_SOURCE_DIR}/${src}" "${CMAKE_CURRENT_BINARY_DIR}/${generated}" ${actor_compiler_flags} ${actor_compiler_flags} > /dev/null
|
||||||
DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/${src}" actorcompiler ${actor_exe}
|
DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/${src}" actorcompiler
|
||||||
COMMENT "Compile actor: ${src}")
|
COMMENT "Compile actor: ${src}")
|
||||||
endif()
|
endif()
|
||||||
set(_tmp_out "${_tmp_out};${CMAKE_CURRENT_BINARY_DIR}/${tmp}")
|
else()
|
||||||
|
list(APPEND sources ${src})
|
||||||
endif()
|
endif()
|
||||||
endforeach()
|
endforeach()
|
||||||
MATH(EXPR ACTOR_TARGET_COUNTER "${ACTOR_TARGET_COUNTER}+1")
|
if(AFT_EXECUTABLE)
|
||||||
add_custom_target(${target}_actors_${ACTOR_TARGET_COUNTER} DEPENDS ${_tmp_out})
|
set(target_type exec)
|
||||||
add_dependencies(${target} ${target}_actors_${ACTOR_TARGET_COUNTER})
|
add_executable(${AFT_NAME} ${sources})
|
||||||
target_include_directories(${target} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
|
endif()
|
||||||
target_include_directories(${target} PUBLIC ${CMAKE_CURRENT_BINARY_DIR})
|
if(AFT_STATIC_LIBRARY)
|
||||||
endmacro()
|
if(target_type)
|
||||||
|
message(FATAL_ERROR "add_flow_target can only be of one type")
|
||||||
|
endif()
|
||||||
|
add_library(${AFT_NAME} STATIC ${sources})
|
||||||
|
endif()
|
||||||
|
if(AFT_DYNAMIC_LIBRARY)
|
||||||
|
if(target_type)
|
||||||
|
message(FATAL_ERROR "add_flow_target can only be of one type")
|
||||||
|
endif()
|
||||||
|
add_library(${AFT_NAME} DYNAMIC ${sources})
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set_property(TARGET ${AFT_NAME} PROPERTY SOURCE_FILES ${AFT_SRCS})
|
||||||
|
set_property(TARGET ${AFT_NAME} PROPERTY COVERAGE_FILTERS ${AFT_SRCS})
|
||||||
|
|
||||||
|
add_custom_target(${AFT_NAME}_actors DEPENDS ${generated_files})
|
||||||
|
add_dependencies(${AFT_NAME} ${AFT_NAME}_actors)
|
||||||
|
generate_coverage_xml(${AFT_NAME})
|
||||||
|
endif()
|
||||||
|
target_include_directories(${AFT_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
||||||
|
endfunction()
|
||||||
|
|
|
@ -1,3 +1,21 @@
|
||||||
|
################################################################################
|
||||||
|
# Helper Functions
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
function(install_symlink)
|
||||||
|
set(options "")
|
||||||
|
set(one_value_options COMPONENT TO DESTINATION)
|
||||||
|
set(multi_value_options)
|
||||||
|
cmake_parse_arguments(SYM "${options}" "${one_value_options}" "${multi_value_options}" "${ARGN}")
|
||||||
|
|
||||||
|
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/symlinks)
|
||||||
|
get_filename_component(fname ${SYM_DESTINATION} NAME)
|
||||||
|
get_filename_component(dest_dir ${SYM_DESTINATION} DIRECTORY)
|
||||||
|
set(sl ${CMAKE_CURRENT_BINARY_DIR}/symlinks/${fname})
|
||||||
|
execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink ${SYM_TO} ${sl})
|
||||||
|
install(FILES ${sl} DESTINATION ${dest_dir} COMPONENT ${SYM_COMPONENT})
|
||||||
|
endfunction()
|
||||||
|
|
||||||
if(NOT INSTALL_LAYOUT)
|
if(NOT INSTALL_LAYOUT)
|
||||||
if(WIN32)
|
if(WIN32)
|
||||||
set(DEFAULT_INSTALL_LAYOUT "WIN")
|
set(DEFAULT_INSTALL_LAYOUT "WIN")
|
||||||
|
@ -14,6 +32,9 @@ if(DIR_LAYOUT MATCHES "TARGZ")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
get_property(LIB64 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB64_PATHS)
|
get_property(LIB64 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB64_PATHS)
|
||||||
|
|
||||||
|
set(CPACK_PACKAGE_CHECKSUM SHA256)
|
||||||
|
|
||||||
set(FDB_CONFIG_DIR "etc/foundationdb")
|
set(FDB_CONFIG_DIR "etc/foundationdb")
|
||||||
if("${LIB64}" STREQUAL "TRUE")
|
if("${LIB64}" STREQUAL "TRUE")
|
||||||
set(LIBSUFFIX 64)
|
set(LIBSUFFIX 64)
|
||||||
|
@ -54,8 +75,24 @@ elseif(DIR_LAYOUT MATCHES "OSX")
|
||||||
set(FDB_PYTHON_INSTALL_DIR "Library/Python/2.7/site-packages/fdb")
|
set(FDB_PYTHON_INSTALL_DIR "Library/Python/2.7/site-packages/fdb")
|
||||||
set(FDB_SHARE_DIR "usr/local/share")
|
set(FDB_SHARE_DIR "usr/local/share")
|
||||||
else()
|
else()
|
||||||
# DEB
|
if(DIR_LAYOUT MATCHES "RPM")
|
||||||
set(CPACK_GENERATOR "DEB")
|
set(CPACK_GENERATOR RPM)
|
||||||
|
else()
|
||||||
|
# DEB
|
||||||
|
set(CPACK_GENERATOR "DEB")
|
||||||
|
set(LIBSUFFIX "")
|
||||||
|
endif()
|
||||||
|
set(CMAKE_INSTALL_PREFIX "/")
|
||||||
|
set(CPACK_PACKAGING_INSTALL_PREFIX "/")
|
||||||
|
set(FDB_CONFIG_DIR "etc/foundationdb")
|
||||||
|
set(FDB_LIB_DIR "usr/lib${LIBSUFFIX}")
|
||||||
|
set(FDB_LIB_NOSUFFIX "usr/lib")
|
||||||
|
set(FDB_LIBEXEC_DIR ${FDB_LIB_DIR})
|
||||||
|
set(FDB_BIN_DIR "usr/bin")
|
||||||
|
set(FDB_SBIN_DIR "usr/sbin")
|
||||||
|
set(FDB_INCLUDE_INSTALL_DIR "usr/include")
|
||||||
|
set(FDB_PYTHON_INSTALL_DIR "${FDB_LIB_DIR}/python2.7/site-packages/fdb")
|
||||||
|
set(FDB_SHARE_DIR "usr/share")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(INSTALL_LAYOUT MATCHES "OSX")
|
if(INSTALL_LAYOUT MATCHES "OSX")
|
||||||
|
@ -104,6 +141,12 @@ endif()
|
||||||
################################################################################
|
################################################################################
|
||||||
# Configuration for RPM
|
# Configuration for RPM
|
||||||
################################################################################
|
################################################################################
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
if(UNIX AND NOT APPLE)
|
||||||
|
install(DIRECTORY DESTINATION "var/log/foundationdb" COMPONENT server)
|
||||||
|
install(DIRECTORY DESTINATION "var/lib/foundationdb/data" COMPONENT server)
|
||||||
|
endif()
|
||||||
|
|
||||||
if(INSTALL_LAYOUT MATCHES "RPM")
|
if(INSTALL_LAYOUT MATCHES "RPM")
|
||||||
set(CPACK_RPM_server_USER_FILELIST
|
set(CPACK_RPM_server_USER_FILELIST
|
||||||
|
@ -122,7 +165,8 @@ if(INSTALL_LAYOUT MATCHES "RPM")
|
||||||
"/lib/systemd"
|
"/lib/systemd"
|
||||||
"/lib/systemd/system"
|
"/lib/systemd/system"
|
||||||
"/etc/rc.d/init.d")
|
"/etc/rc.d/init.d")
|
||||||
set(CPACK_RPM_DEBUGINFO_PACKAGE ON)
|
set(CPACK_RPM_server_DEBUGINFO_PACKAGE ON)
|
||||||
|
set(CPACK_RPM_clients_DEBUGINFO_PACKAGE ON)
|
||||||
set(CPACK_RPM_BUILD_SOURCE_DIRS_PREFIX /usr/src)
|
set(CPACK_RPM_BUILD_SOURCE_DIRS_PREFIX /usr/src)
|
||||||
set(CPACK_RPM_COMPONENT_INSTALL ON)
|
set(CPACK_RPM_COMPONENT_INSTALL ON)
|
||||||
set(CPACK_RPM_clients_PRE_INSTALL_SCRIPT_FILE
|
set(CPACK_RPM_clients_PRE_INSTALL_SCRIPT_FILE
|
||||||
|
@ -136,6 +180,11 @@ if(INSTALL_LAYOUT MATCHES "RPM")
|
||||||
set(CPACK_RPM_server_PRE_UNINSTALL_SCRIPT_FILE
|
set(CPACK_RPM_server_PRE_UNINSTALL_SCRIPT_FILE
|
||||||
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preunserver.sh)
|
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preunserver.sh)
|
||||||
set(CPACK_RPM_server_PACKAGE_REQUIRES
|
set(CPACK_RPM_server_PACKAGE_REQUIRES
|
||||||
|
"foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}, initscripts >= 9.03")
|
||||||
|
set(CPACK_RPM_server_PACKAGE_RE)
|
||||||
|
#set(CPACK_RPM_java_PACKAGE_REQUIRES
|
||||||
|
# "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
|
||||||
|
set(CPACK_RPM_python_PACKAGE_REQUIRES
|
||||||
"foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
|
"foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
@ -148,12 +197,12 @@ if(INSTALL_LAYOUT MATCHES "DEB")
|
||||||
set(CPACK_DEBIAN_PACKAGE_SECTION "database")
|
set(CPACK_DEBIAN_PACKAGE_SECTION "database")
|
||||||
set(CPACK_DEBIAN_ENABLE_COMPONENT_DEPENDS ON)
|
set(CPACK_DEBIAN_ENABLE_COMPONENT_DEPENDS ON)
|
||||||
|
|
||||||
set(CPACK_DEBIAN_server_PACKAGE_DEPENDS "adduser, libc6 (>= 2.11), python (>= 2.6)")
|
set(CPACK_DEBIAN_SERVER_PACKAGE_DEPENDS "adduser, libc6 (>= 2.12), python (>= 2.6), foundationdb-clients (= ${FDB_VERSION})")
|
||||||
set(CPACK_DEBIAN_clients_PACKAGE_DEPENDS "adduser, libc6 (>= 2.11)")
|
set(CPACK_DEBIAN_CLIENTS_PACKAGE_DEPENDS "adduser, libc6 (>= 2.12)")
|
||||||
set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://www.foundationdb.org")
|
set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://www.foundationdb.org")
|
||||||
set(CPACK_DEBIAN_clients_PACKAGE_CONTROL_EXTRA
|
set(CPACK_DEBIAN_CLIENTS_PACKAGE_CONTROL_EXTRA
|
||||||
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-clients/postinst)
|
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-clients/postinst)
|
||||||
set(CPACK_DEBIAN_server_PACKAGE_CONTROL_EXTRA
|
set(CPACK_DEBIAN_SERVER_PACKAGE_CONTROL_EXTRA
|
||||||
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/conffiles
|
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/conffiles
|
||||||
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/preinst
|
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/preinst
|
||||||
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/postinst
|
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/postinst
|
||||||
|
@ -223,36 +272,18 @@ if((INSTALL_LAYOUT MATCHES "RPM") OR (INSTALL_LAYOUT MATCHES "DEB"))
|
||||||
RESULT_VARIABLE IS_SYSTEMD
|
RESULT_VARIABLE IS_SYSTEMD
|
||||||
OUTPUT_QUIET
|
OUTPUT_QUIET
|
||||||
ERROR_QUIET)
|
ERROR_QUIET)
|
||||||
if(IS_SYSTEMD EQUAL "0")
|
install(FILES ${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb.service
|
||||||
configure_file(${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb.service
|
DESTINATION "lib/systemd/system"
|
||||||
${CMAKE_BINARY_DIR}/packaging/rpm/foundationdb.service)
|
COMPONENT server)
|
||||||
install(FILES ${CMAKE_BINARY_DIR}/packaging/rpm/foundationdb.service
|
if(INSTALL_LAYOUT MATCHES "RPM")
|
||||||
DESTINATION "lib/systemd/system"
|
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb-init
|
||||||
|
DESTINATION "etc/rc.d/init.d"
|
||||||
|
RENAME "foundationdb"
|
||||||
COMPONENT server)
|
COMPONENT server)
|
||||||
else()
|
else()
|
||||||
if(INSTALL_LAYOUT MATCHES "RPM")
|
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/deb/foundationdb-init
|
||||||
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb-init
|
DESTINATION "etc/init.d"
|
||||||
DESTINATION "etc/rc.d/init.d"
|
RENAME "foundationdb"
|
||||||
RENAME "foundationdb"
|
COMPONENT server)
|
||||||
COMPONENT server)
|
|
||||||
else()
|
|
||||||
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/deb/foundationdb-init
|
|
||||||
DESTINATION "etc/init.d"
|
|
||||||
RENAME "foundationdb"
|
|
||||||
COMPONENT server)
|
|
||||||
endif()
|
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
################################################################################
|
|
||||||
# Helper Macros
|
|
||||||
################################################################################
|
|
||||||
|
|
||||||
macro(install_symlink filepath sympath compondent)
|
|
||||||
install(CODE "execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink ${filepath} ${sympath})" COMPONENT ${component})
|
|
||||||
install(CODE "message(\"-- Created symlink: ${sympath} -> ${filepath}\")")
|
|
||||||
endmacro()
|
|
||||||
macro(install_mkdir dirname component)
|
|
||||||
install(CODE "execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory ${dirname})" COMPONENT ${component})
|
|
||||||
install(CODE "message(\"-- Created directory: ${dirname}\")")
|
|
||||||
endmacro()
|
|
||||||
|
|
|
@ -0,0 +1,68 @@
|
||||||
|
# build a virtualenv
|
||||||
|
set(sphinx_dir ${CMAKE_CURRENT_SOURCE_DIR}/sphinx)
|
||||||
|
set(venv_dir ${CMAKE_CURRENT_BINARY_DIR}/venv)
|
||||||
|
set(EXE_SUFFIX "")
|
||||||
|
if(WIN32)
|
||||||
|
set(EXE_SUFFIX ".exe")
|
||||||
|
endif()
|
||||||
|
set(pip_command ${venv_dir}/bin/pip${EXE_SUFFIX})
|
||||||
|
set(python_command ${venv_dir}/bin/python${EXE_SUFFIX})
|
||||||
|
|
||||||
|
add_custom_command(OUTPUT ${venv_dir}/venv_setup
|
||||||
|
COMMAND ${VIRTUALENV_EXE} venv &&
|
||||||
|
${CMAKE_COMMAND} -E copy ${sphinx_dir}/.pip.conf ${venv_dir}/pip.conf &&
|
||||||
|
. ${venv_dir}/bin/activate &&
|
||||||
|
${pip_command} install --upgrade pip &&
|
||||||
|
${pip_command} install --upgrade -r ${sphinx_dir}/requirements.txt &&
|
||||||
|
${pip_command} install sphinx-autobuild && # somehow this is missing in requirements.txt
|
||||||
|
${CMAKE_COMMAND} -E touch ${venv_dir}/venv_setup
|
||||||
|
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||||
|
COMMENT "Set up virtualenv")
|
||||||
|
add_custom_target(buildsphinx DEPENDS ${venv_dir}/venv_setup)
|
||||||
|
|
||||||
|
file(GLOB_RECURSE SRCS *.rst)
|
||||||
|
|
||||||
|
function(add_documentation_target)
|
||||||
|
set(options)
|
||||||
|
set(oneValueArgs GENERATOR SPHINX_COMMAND DOCTREE)
|
||||||
|
set(multiValueArgs ADDITIONAL_ARGUMENTS)
|
||||||
|
cmake_parse_arguments(ADT "${options}" "${oneValueArgs}" "${multiValueArgs}" "${ARGN}")
|
||||||
|
if(NOT ADT_GENERATOR)
|
||||||
|
message(ERROR "GENERATOR is a required argument to add_documentation_target")
|
||||||
|
endif()
|
||||||
|
set(target ${ADT_GENERATOR})
|
||||||
|
set(SPHINX_COMMAND "${venv_dir}/bin/sphinx-build")
|
||||||
|
if(ADT_SPHINX_COMMAND)
|
||||||
|
set(SPHINX_COMMAND "${venv_dir}/bin/${ADT_SPHINX_COMMAND}")
|
||||||
|
endif()
|
||||||
|
set(doctree "doctree")
|
||||||
|
if (ADT_DOCTREE)
|
||||||
|
set(doctree "${ADT_DOCTREE}")
|
||||||
|
endif()
|
||||||
|
set(out_dir ${CMAKE_CURRENT_BINARY_DIR}/${target})
|
||||||
|
add_custom_command(
|
||||||
|
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${target}_done
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E make_directory ${out_dir} &&
|
||||||
|
${python_command} ${SPHINX_COMMAND} -b ${target}
|
||||||
|
-d ${doctree} -c ${sphinx_dir}
|
||||||
|
${sphinx_dir}/source
|
||||||
|
${CMAKE_CURRENT_BINARY_DIR}/${target} &&
|
||||||
|
${CMAKE_COMMAND} -E touch ${CMAKE_CURRENT_BINARY_DIR}/${target}_done
|
||||||
|
DEPENDS ${SRCS}
|
||||||
|
WORKING_DIRECTORY ${venv_dir})
|
||||||
|
message(STATUS "add_custom_target(${target} DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${target}_done)")
|
||||||
|
add_custom_target(${target} DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${target}_done)
|
||||||
|
add_dependencies(${target} buildsphinx)
|
||||||
|
endfunction()
|
||||||
|
|
||||||
|
message(STATUS "Add html target")
|
||||||
|
add_documentation_target(GENERATOR html)
|
||||||
|
|
||||||
|
set(tar_file ${CMAKE_BINARY_DIR}/packages/${CMAKE_PROJECT_NAME}-docs-${FDB_VERSION}.tar.gz)
|
||||||
|
add_custom_command(
|
||||||
|
OUTPUT ${tar_file}
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E tar czf ${tar_file} .
|
||||||
|
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/html)
|
||||||
|
add_custom_target(package_html DEPENDS ${tar_file})
|
||||||
|
add_dependencies(package_html html)
|
||||||
|
add_dependencies(packages package_html)
|
|
@ -8,6 +8,8 @@ Release Notes
|
||||||
Features
|
Features
|
||||||
--------
|
--------
|
||||||
|
|
||||||
|
* Get read version, read, and commit requests are counted and aggregated by server-side latency in configurable latency bands and output in JSON status. `(PR #1084) <https://github.com/apple/foundationdb/pull/1084>`_
|
||||||
|
|
||||||
Performance
|
Performance
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
|
|
|
@ -1,25 +1,23 @@
|
||||||
set(FDBBACKUP_SRCS
|
set(FDBBACKUP_SRCS
|
||||||
backup.actor.cpp)
|
backup.actor.cpp)
|
||||||
|
|
||||||
actor_set(FDBBACKUP_BUILD "${FDBBACKUP_SRCS}")
|
add_flow_target(EXECUTABLE NAME fdbbackup SRCS ${FDBBACKUP_SRCS})
|
||||||
add_executable(fdbbackup "${FDBBACKUP_BUILD}")
|
|
||||||
actor_compile(fdbbackup "${FDBBACKUP_SRCS}")
|
|
||||||
target_link_libraries(fdbbackup PRIVATE fdbclient)
|
target_link_libraries(fdbbackup PRIVATE fdbclient)
|
||||||
|
|
||||||
install(TARGETS fdbbackup DESTINATION ${FDB_BIN_DIR} COMPONENT clients)
|
install(TARGETS fdbbackup DESTINATION ${FDB_BIN_DIR} COMPONENT clients)
|
||||||
install(PROGRAMS $<TARGET_FILE:fdbbackup>
|
install_symlink(
|
||||||
DESTINATION ${FDB_LIB_DIR}/foundationdb/backup_agent
|
TO /${FDB_BIN_DIR}/fdbbackup
|
||||||
RENAME backup_agent
|
DESTINATION ${FDB_LIB_DIR}/foundationdb/backup_agent/backup_agent
|
||||||
COMPONENT clients)
|
COMPONENT clients)
|
||||||
install(PROGRAMS $<TARGET_FILE:fdbbackup>
|
install_symlink(
|
||||||
DESTINATION ${FDB_BIN_DIR}
|
TO /${FDB_BIN_DIR}/fdbbackup
|
||||||
RENAME fdbrestore
|
DESTINATION ${FDB_BIN_DIR}/fdbrestore
|
||||||
COMPONENT clients)
|
COMPONENT clients)
|
||||||
install(PROGRAMS $<TARGET_FILE:fdbbackup>
|
install_symlink(
|
||||||
DESTINATION ${FDB_BIN_DIR}
|
TO /${FDB_BIN_DIR}/fdbbackup
|
||||||
RENAME dr_agent
|
DESTINATION ${FDB_BIN_DIR}/dr_agent
|
||||||
COMPONENT clients)
|
COMPONENT clients)
|
||||||
install(PROGRAMS $<TARGET_FILE:fdbbackup>
|
install_symlink(
|
||||||
DESTINATION ${FDB_BIN_DIR}
|
TO /${FDB_BIN_DIR}/fdbbackup
|
||||||
RENAME fdbdr
|
DESTINATION ${FDB_BIN_DIR}/fdbdr
|
||||||
COMPONENT clients)
|
COMPONENT clients)
|
||||||
|
|
|
@ -8,9 +8,7 @@ if(NOT WIN32)
|
||||||
list(APPEND FDBCLI_SRCS linenoise/linenoise.c)
|
list(APPEND FDBCLI_SRCS linenoise/linenoise.c)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
actor_set(FDBCLI_BUILD "${FDBCLI_SRCS}")
|
add_flow_target(EXECUTABLE NAME fdbcli SRCS ${FDBCLI_SRCS})
|
||||||
add_executable(fdbcli "${FDBCLI_BUILD}")
|
|
||||||
actor_compile(fdbcli "${FDBCLI_SRCS}")
|
|
||||||
target_link_libraries(fdbcli PRIVATE fdbclient)
|
target_link_libraries(fdbcli PRIVATE fdbclient)
|
||||||
|
|
||||||
install(TARGETS fdbcli DESTINATION ${FDB_BIN_DIR} COMPONENT clients)
|
install(TARGETS fdbcli DESTINATION ${FDB_BIN_DIR} COMPONENT clients)
|
||||||
|
|
|
@ -1682,7 +1682,7 @@ ACTOR Future<bool> fileConfigure(Database db, std::string filePath, bool isNewDa
|
||||||
StatusObject configJSON = config.get_obj();
|
StatusObject configJSON = config.get_obj();
|
||||||
|
|
||||||
json_spirit::mValue schema;
|
json_spirit::mValue schema;
|
||||||
if(!json_spirit::read_string( JSONSchemas::configurationSchema.toString(), schema )) {
|
if(!json_spirit::read_string( JSONSchemas::clusterConfigurationSchema.toString(), schema )) {
|
||||||
ASSERT(false);
|
ASSERT(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -87,8 +87,6 @@ set(FDBCLIENT_SRCS
|
||||||
vexillographer_compile(TARGET fdboptions LANG cpp OUT ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g
|
vexillographer_compile(TARGET fdboptions LANG cpp OUT ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g
|
||||||
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.h ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.cpp)
|
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.h ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.cpp)
|
||||||
|
|
||||||
actor_set(FDBCLIENT_BUILD "${FDBCLIENT_SRCS}")
|
add_flow_target(STATIC_LIBRARY NAME fdbclient SRCS ${FDBCLIENT_SRCS})
|
||||||
add_library(fdbclient STATIC ${FDBCLIENT_BUILD})
|
|
||||||
add_dependencies(fdbclient fdboptions)
|
add_dependencies(fdbclient fdboptions)
|
||||||
actor_compile(fdbclient "${FDBCLIENT_SRCS}")
|
|
||||||
target_link_libraries(fdbclient PUBLIC fdbrpc)
|
target_link_libraries(fdbclient PUBLIC fdbrpc)
|
||||||
|
|
|
@ -287,6 +287,7 @@ ACTOR Future<ConfigurationResult::Type> changeConfig( Database cx, std::map<std:
|
||||||
}
|
}
|
||||||
|
|
||||||
state Future<Void> tooLong = delay(4.5);
|
state Future<Void> tooLong = delay(4.5);
|
||||||
|
state std::string versionKey = g_random->randomUniqueID().toString();
|
||||||
loop {
|
loop {
|
||||||
try {
|
try {
|
||||||
tr.setOption( FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE );
|
tr.setOption( FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE );
|
||||||
|
@ -432,6 +433,9 @@ ACTOR Future<ConfigurationResult::Type> changeConfig( Database cx, std::map<std:
|
||||||
for(auto i=m.begin(); i!=m.end(); ++i)
|
for(auto i=m.begin(); i!=m.end(); ++i)
|
||||||
tr.set( StringRef(i->first), StringRef(i->second) );
|
tr.set( StringRef(i->first), StringRef(i->second) );
|
||||||
|
|
||||||
|
tr.addReadConflictRange( singleKeyRange(configVersionKey) );
|
||||||
|
tr.set( configVersionKey, versionKey );
|
||||||
|
|
||||||
wait( tr.commit() );
|
wait( tr.commit() );
|
||||||
break;
|
break;
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
|
@ -698,6 +702,7 @@ ConfigureAutoResult parseConfig( StatusObject const& status ) {
|
||||||
|
|
||||||
ACTOR Future<ConfigurationResult::Type> autoConfig( Database cx, ConfigureAutoResult conf ) {
|
ACTOR Future<ConfigurationResult::Type> autoConfig( Database cx, ConfigureAutoResult conf ) {
|
||||||
state Transaction tr(cx);
|
state Transaction tr(cx);
|
||||||
|
state std::string versionKey = g_random->randomUniqueID().toString();
|
||||||
|
|
||||||
if(!conf.address_class.size())
|
if(!conf.address_class.size())
|
||||||
return ConfigurationResult::INCOMPLETE_CONFIGURATION; //FIXME: correct return type
|
return ConfigurationResult::INCOMPLETE_CONFIGURATION; //FIXME: correct return type
|
||||||
|
@ -747,6 +752,9 @@ ACTOR Future<ConfigurationResult::Type> autoConfig( Database cx, ConfigureAutoRe
|
||||||
tr.set(kv.first, kv.second);
|
tr.set(kv.first, kv.second);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tr.addReadConflictRange( singleKeyRange(configVersionKey) );
|
||||||
|
tr.set( configVersionKey, versionKey );
|
||||||
|
|
||||||
wait( tr.commit() );
|
wait( tr.commit() );
|
||||||
return ConfigurationResult::SUCCESS;
|
return ConfigurationResult::SUCCESS;
|
||||||
} catch( Error &e ) {
|
} catch( Error &e ) {
|
||||||
|
@ -1125,6 +1133,7 @@ Reference<IQuorumChange> autoQuorumChange( int desired ) { return Reference<IQuo
|
||||||
ACTOR Future<Void> excludeServers( Database cx, vector<AddressExclusion> servers ) {
|
ACTOR Future<Void> excludeServers( Database cx, vector<AddressExclusion> servers ) {
|
||||||
state Transaction tr(cx);
|
state Transaction tr(cx);
|
||||||
state std::string versionKey = g_random->randomUniqueID().toString();
|
state std::string versionKey = g_random->randomUniqueID().toString();
|
||||||
|
state std::string excludeVersionKey = g_random->randomUniqueID().toString();
|
||||||
loop {
|
loop {
|
||||||
try {
|
try {
|
||||||
tr.setOption( FDBTransactionOptions::ACCESS_SYSTEM_KEYS );
|
tr.setOption( FDBTransactionOptions::ACCESS_SYSTEM_KEYS );
|
||||||
|
@ -1132,7 +1141,9 @@ ACTOR Future<Void> excludeServers( Database cx, vector<AddressExclusion> servers
|
||||||
tr.setOption( FDBTransactionOptions::LOCK_AWARE );
|
tr.setOption( FDBTransactionOptions::LOCK_AWARE );
|
||||||
|
|
||||||
tr.addReadConflictRange( singleKeyRange(excludedServersVersionKey) ); //To conflict with parallel includeServers
|
tr.addReadConflictRange( singleKeyRange(excludedServersVersionKey) ); //To conflict with parallel includeServers
|
||||||
tr.set( excludedServersVersionKey, versionKey );
|
tr.addReadConflictRange( singleKeyRange(configVersionKey) );
|
||||||
|
tr.set( configVersionKey, versionKey );
|
||||||
|
tr.set( excludedServersVersionKey, excludeVersionKey );
|
||||||
for(auto& s : servers)
|
for(auto& s : servers)
|
||||||
tr.set( encodeExcludedServersKey(s), StringRef() );
|
tr.set( encodeExcludedServersKey(s), StringRef() );
|
||||||
|
|
||||||
|
@ -1150,6 +1161,7 @@ ACTOR Future<Void> includeServers( Database cx, vector<AddressExclusion> servers
|
||||||
state bool includeAll = false;
|
state bool includeAll = false;
|
||||||
state Transaction tr(cx);
|
state Transaction tr(cx);
|
||||||
state std::string versionKey = g_random->randomUniqueID().toString();
|
state std::string versionKey = g_random->randomUniqueID().toString();
|
||||||
|
state std::string excludeVersionKey = g_random->randomUniqueID().toString();
|
||||||
loop {
|
loop {
|
||||||
try {
|
try {
|
||||||
tr.setOption( FDBTransactionOptions::ACCESS_SYSTEM_KEYS );
|
tr.setOption( FDBTransactionOptions::ACCESS_SYSTEM_KEYS );
|
||||||
|
@ -1159,15 +1171,27 @@ ACTOR Future<Void> includeServers( Database cx, vector<AddressExclusion> servers
|
||||||
// includeServers might be used in an emergency transaction, so make sure it is retry-self-conflicting and CAUSAL_WRITE_RISKY
|
// includeServers might be used in an emergency transaction, so make sure it is retry-self-conflicting and CAUSAL_WRITE_RISKY
|
||||||
tr.setOption( FDBTransactionOptions::CAUSAL_WRITE_RISKY );
|
tr.setOption( FDBTransactionOptions::CAUSAL_WRITE_RISKY );
|
||||||
tr.addReadConflictRange( singleKeyRange(excludedServersVersionKey) );
|
tr.addReadConflictRange( singleKeyRange(excludedServersVersionKey) );
|
||||||
|
tr.addReadConflictRange( singleKeyRange(configVersionKey) );
|
||||||
|
|
||||||
|
tr.set( configVersionKey, versionKey );
|
||||||
|
tr.set( excludedServersVersionKey, excludeVersionKey );
|
||||||
|
|
||||||
tr.set( excludedServersVersionKey, versionKey );
|
|
||||||
for(auto& s : servers ) {
|
for(auto& s : servers ) {
|
||||||
if (!s.isValid()) {
|
if (!s.isValid()) {
|
||||||
tr.clear( excludedServersKeys );
|
tr.clear( excludedServersKeys );
|
||||||
includeAll = true;
|
includeAll = true;
|
||||||
} else if (s.isWholeMachine()) {
|
} else if (s.isWholeMachine()) {
|
||||||
// Eliminate both any ip-level exclusion (1.2.3.4) and any port-level exclusions (1.2.3.4:5)
|
// Eliminate both any ip-level exclusion (1.2.3.4) and any
|
||||||
tr.clear( KeyRangeRef( encodeExcludedServersKey(s), encodeExcludedServersKey(s) + char(':'+1) ) );
|
// port-level exclusions (1.2.3.4:5)
|
||||||
|
// The range ['IP', 'IP;'] was originally deleted. ';' is
|
||||||
|
// char(':' + 1). This does not work, as other for all
|
||||||
|
// x between 0 and 9, 'IPx' will also be in this range.
|
||||||
|
//
|
||||||
|
// This is why we now make two clears: first only of the ip
|
||||||
|
// address, the second will delete all ports.
|
||||||
|
auto addr = encodeExcludedServersKey(s);
|
||||||
|
tr.clear(singleKeyRange(addr));
|
||||||
|
tr.clear(KeyRangeRef(addr + ':', addr + char(':' + 1)));
|
||||||
} else {
|
} else {
|
||||||
tr.clear( encodeExcludedServersKey(s) );
|
tr.clear( encodeExcludedServersKey(s) );
|
||||||
}
|
}
|
||||||
|
@ -1564,120 +1588,121 @@ void schemaCoverage( std::string const& spath, bool covered ) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool schemaMatch( StatusObject const schema, StatusObject const result, std::string& errorStr, Severity sev, bool checkCoverage, std::string path, std::string schema_path ) {
|
bool schemaMatch( json_spirit::mValue const& schemaValue, json_spirit::mValue const& resultValue, std::string& errorStr, Severity sev, bool checkCoverage, std::string path, std::string schemaPath ) {
|
||||||
// Returns true if everything in `result` is permitted by `schema`
|
// Returns true if everything in `result` is permitted by `schema`
|
||||||
|
|
||||||
// Really this should recurse on "values" rather than "objects"?
|
|
||||||
|
|
||||||
bool ok = true;
|
bool ok = true;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
for(auto& rkv : result) {
|
if(normJSONType(schemaValue.type()) != normJSONType(resultValue.type())) {
|
||||||
auto& key = rkv.first;
|
errorStr += format("ERROR: Incorrect value type for key `%s'\n", path.c_str());
|
||||||
auto& rv = rkv.second;
|
TraceEvent(sev, "SchemaMismatch").detail("Path", path).detail("SchemaType", schemaValue.type()).detail("ValueType", resultValue.type());
|
||||||
std::string kpath = path + "." + key;
|
return false;
|
||||||
std::string spath = schema_path + "." + key;
|
}
|
||||||
|
|
||||||
if(checkCoverage) schemaCoverage(spath);
|
if(resultValue.type() == json_spirit::obj_type) {
|
||||||
|
auto& result = resultValue.get_obj();
|
||||||
|
auto& schema = schemaValue.get_obj();
|
||||||
|
|
||||||
if (!schema.count(key)) {
|
for(auto& rkv : result) {
|
||||||
errorStr += format("ERROR: Unknown key `%s'\n", kpath.c_str());
|
auto& key = rkv.first;
|
||||||
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaPath", spath);
|
auto& rv = rkv.second;
|
||||||
ok = false;
|
std::string kpath = path + "." + key;
|
||||||
continue;
|
std::string spath = schemaPath + "." + key;
|
||||||
}
|
|
||||||
auto& sv = schema.at(key);
|
|
||||||
|
|
||||||
if (sv.type() == json_spirit::obj_type && sv.get_obj().count("$enum")) {
|
if(checkCoverage) {
|
||||||
auto& enum_values = sv.get_obj().at("$enum").get_array();
|
schemaCoverage(spath);
|
||||||
|
}
|
||||||
|
|
||||||
bool any_match = false;
|
if(!schema.count(key)) {
|
||||||
for(auto& enum_item : enum_values)
|
errorStr += format("ERROR: Unknown key `%s'\n", kpath.c_str());
|
||||||
if (enum_item == rv) {
|
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaPath", spath);
|
||||||
any_match = true;
|
ok = false;
|
||||||
if(checkCoverage) schemaCoverage(spath + ".$enum." + enum_item.get_str());
|
continue;
|
||||||
break;
|
}
|
||||||
|
auto& sv = schema.at(key);
|
||||||
|
|
||||||
|
if(sv.type() == json_spirit::obj_type && sv.get_obj().count("$enum")) {
|
||||||
|
auto& enum_values = sv.get_obj().at("$enum").get_array();
|
||||||
|
|
||||||
|
bool any_match = false;
|
||||||
|
for(auto& enum_item : enum_values)
|
||||||
|
if(enum_item == rv) {
|
||||||
|
any_match = true;
|
||||||
|
if(checkCoverage) {
|
||||||
|
schemaCoverage(spath + ".$enum." + enum_item.get_str());
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if(!any_match) {
|
||||||
|
errorStr += format("ERROR: Unknown value `%s' for key `%s'\n", json_spirit::write_string(rv).c_str(), kpath.c_str());
|
||||||
|
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaEnumItems", enum_values.size()).detail("Value", json_spirit::write_string(rv));
|
||||||
|
if(checkCoverage) {
|
||||||
|
schemaCoverage(spath + ".$enum." + json_spirit::write_string(rv));
|
||||||
|
}
|
||||||
|
ok = false;
|
||||||
}
|
}
|
||||||
if (!any_match) {
|
} else if(sv.type() == json_spirit::obj_type && sv.get_obj().count("$map")) {
|
||||||
errorStr += format("ERROR: Unknown value `%s' for key `%s'\n", json_spirit::write_string(rv).c_str(), kpath.c_str());
|
if(rv.type() != json_spirit::obj_type) {
|
||||||
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaEnumItems", enum_values.size()).detail("Value", json_spirit::write_string(rv));
|
errorStr += format("ERROR: Expected an object as the value for key `%s'\n", kpath.c_str());
|
||||||
if(checkCoverage) schemaCoverage(spath + ".$enum." + json_spirit::write_string(rv));
|
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaType", sv.type()).detail("ValueType", rv.type());
|
||||||
ok = false;
|
|
||||||
}
|
|
||||||
} else if (sv.type() == json_spirit::obj_type && sv.get_obj().count("$map")) {
|
|
||||||
if (rv.type() != json_spirit::obj_type) {
|
|
||||||
errorStr += format("ERROR: Expected an object as the value for key `%s'\n", kpath.c_str());
|
|
||||||
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaType", sv.type()).detail("ValueType", rv.type());
|
|
||||||
ok = false;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if(sv.get_obj().at("$map").type() != json_spirit::obj_type) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
auto& schema_obj = sv.get_obj().at("$map").get_obj();
|
|
||||||
auto& value_obj = rv.get_obj();
|
|
||||||
|
|
||||||
if(checkCoverage) schemaCoverage(spath + ".$map");
|
|
||||||
|
|
||||||
for(auto& value_pair : value_obj) {
|
|
||||||
auto vpath = kpath + "[" + value_pair.first + "]";
|
|
||||||
auto upath = spath + ".$map";
|
|
||||||
if (value_pair.second.type() != json_spirit::obj_type) {
|
|
||||||
errorStr += format("ERROR: Expected an object for `%s'\n", vpath.c_str());
|
|
||||||
TraceEvent(sev, "SchemaMismatch").detail("Path", vpath).detail("ValueType", value_pair.second.type());
|
|
||||||
ok = false;
|
ok = false;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (!schemaMatch(schema_obj, value_pair.second.get_obj(), errorStr, sev, checkCoverage, vpath, upath))
|
if(sv.get_obj().at("$map").type() != json_spirit::obj_type) {
|
||||||
ok = false;
|
continue;
|
||||||
}
|
}
|
||||||
} else {
|
auto& schemaVal = sv.get_obj().at("$map");
|
||||||
// The schema entry isn't an operator, so it asserts a type and (depending on the type) recursive schema definition
|
auto& valueObj = rv.get_obj();
|
||||||
if (normJSONType(sv.type()) != normJSONType(rv.type())) {
|
|
||||||
errorStr += format("ERROR: Incorrect value type for key `%s'\n", kpath.c_str());
|
if(checkCoverage) {
|
||||||
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaType", sv.type()).detail("ValueType", rv.type());
|
schemaCoverage(spath + ".$map");
|
||||||
ok = false;
|
}
|
||||||
continue;
|
|
||||||
}
|
for(auto& valuePair : valueObj) {
|
||||||
if (rv.type() == json_spirit::array_type) {
|
auto vpath = kpath + "[" + valuePair.first + "]";
|
||||||
auto& value_array = rv.get_array();
|
auto upath = spath + ".$map";
|
||||||
auto& schema_array = sv.get_array();
|
if (valuePair.second.type() != json_spirit::obj_type) {
|
||||||
if (!schema_array.size()) {
|
errorStr += format("ERROR: Expected an object for `%s'\n", vpath.c_str());
|
||||||
// An empty schema array means that the value array is required to be empty
|
TraceEvent(sev, "SchemaMismatch").detail("Path", vpath).detail("ValueType", valuePair.second.type());
|
||||||
if (value_array.size()) {
|
|
||||||
errorStr += format("ERROR: Expected an empty array for key `%s'\n", kpath.c_str());
|
|
||||||
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaSize", schema_array.size()).detail("ValueSize", value_array.size());
|
|
||||||
ok = false;
|
ok = false;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
} else if (schema_array.size() == 1 && schema_array[0].type() == json_spirit::obj_type) {
|
if(!schemaMatch(schemaVal, valuePair.second, errorStr, sev, checkCoverage, vpath, upath)) {
|
||||||
// A one item schema array means that all items in the value must match the first item in the schema
|
ok = false;
|
||||||
auto& schema_obj = schema_array[0].get_obj();
|
|
||||||
int index = 0;
|
|
||||||
for(auto &value_item : value_array) {
|
|
||||||
if (value_item.type() != json_spirit::obj_type) {
|
|
||||||
errorStr += format("ERROR: Expected all array elements to be objects for key `%s'\n", kpath.c_str());
|
|
||||||
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath + format("[%d]",index)).detail("ValueType", value_item.type());
|
|
||||||
ok = false;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (!schemaMatch(schema_obj, value_item.get_obj(), errorStr, sev, checkCoverage, kpath + format("[%d]", index), spath + "[0]"))
|
|
||||||
ok = false;
|
|
||||||
index++;
|
|
||||||
}
|
}
|
||||||
} else
|
}
|
||||||
ASSERT(false); // Schema doesn't make sense
|
} else {
|
||||||
} else if (rv.type() == json_spirit::obj_type) {
|
if(!schemaMatch(sv, rv, errorStr, sev, checkCoverage, kpath, spath)) {
|
||||||
auto& schema_obj = sv.get_obj();
|
|
||||||
auto& value_obj = rv.get_obj();
|
|
||||||
if (!schemaMatch(schema_obj, value_obj, errorStr, sev, checkCoverage, kpath, spath))
|
|
||||||
ok = false;
|
ok = false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else if(resultValue.type() == json_spirit::array_type) {
|
||||||
|
auto& valueArray = resultValue.get_array();
|
||||||
|
auto& schemaArray = schemaValue.get_array();
|
||||||
|
if(!schemaArray.size()) {
|
||||||
|
// An empty schema array means that the value array is required to be empty
|
||||||
|
if(valueArray.size()) {
|
||||||
|
errorStr += format("ERROR: Expected an empty array for key `%s'\n", path.c_str());
|
||||||
|
TraceEvent(sev, "SchemaMismatch").detail("Path", path).detail("SchemaSize", schemaArray.size()).detail("ValueSize", valueArray.size());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} else if(schemaArray.size() == 1) {
|
||||||
|
// A one item schema array means that all items in the value must match the first item in the schema
|
||||||
|
int index = 0;
|
||||||
|
for(auto &valueItem : valueArray) {
|
||||||
|
if(!schemaMatch(schemaArray[0], valueItem, errorStr, sev, checkCoverage, path + format("[%d]", index), schemaPath + "[0]")) {
|
||||||
|
ok = false;
|
||||||
|
}
|
||||||
|
index++;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ASSERT(false); // Schema doesn't make sense
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return ok;
|
return ok;
|
||||||
} catch (std::exception& e) {
|
} catch (std::exception& e) {
|
||||||
TraceEvent(SevError, "SchemaMatchException").detail("What", e.what()).detail("Path", path).detail("SchemaPath", schema_path);
|
TraceEvent(SevError, "SchemaMatchException").detail("What", e.what()).detail("Path", path).detail("SchemaPath", schemaPath);
|
||||||
throw unknown_error();
|
throw unknown_error();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -177,6 +177,6 @@ Future<Void> waitForPrimaryDC( Database const& cx, StringRef const& dcId );
|
||||||
Future<std::vector<NetworkAddress>> getCoordinators( Database const& cx );
|
Future<std::vector<NetworkAddress>> getCoordinators( Database const& cx );
|
||||||
|
|
||||||
void schemaCoverage( std::string const& spath, bool covered=true );
|
void schemaCoverage( std::string const& spath, bool covered=true );
|
||||||
bool schemaMatch( StatusObject const schema, StatusObject const result, std::string& errorStr, Severity sev=SevError, bool checkCoverage=false, std::string path = std::string(), std::string schema_path = std::string() );
|
bool schemaMatch( json_spirit::mValue const& schema, json_spirit::mValue const& result, std::string& errorStr, Severity sev=SevError, bool checkCoverage=false, std::string path = std::string(), std::string schema_path = std::string() );
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* MasterProxyInterface.h
|
* MasterProxyInterface.h
|
||||||
*
|
*
|
||||||
|
@ -26,6 +27,8 @@
|
||||||
#include "fdbclient/StorageServerInterface.h"
|
#include "fdbclient/StorageServerInterface.h"
|
||||||
#include "fdbclient/CommitTransaction.h"
|
#include "fdbclient/CommitTransaction.h"
|
||||||
|
|
||||||
|
#include "flow/Stats.h"
|
||||||
|
|
||||||
struct MasterProxyInterface {
|
struct MasterProxyInterface {
|
||||||
enum { LocationAwareLoadBalance = 1 };
|
enum { LocationAwareLoadBalance = 1 };
|
||||||
enum { AlwaysFresh = 1 };
|
enum { AlwaysFresh = 1 };
|
||||||
|
@ -74,7 +77,7 @@ struct CommitID {
|
||||||
CommitID( Version version, uint16_t txnBatchId ) : version(version), txnBatchId(txnBatchId) {}
|
CommitID( Version version, uint16_t txnBatchId ) : version(version), txnBatchId(txnBatchId) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct CommitTransactionRequest {
|
struct CommitTransactionRequest : TimedRequest {
|
||||||
enum {
|
enum {
|
||||||
FLAG_IS_LOCK_AWARE = 0x1,
|
FLAG_IS_LOCK_AWARE = 0x1,
|
||||||
FLAG_FIRST_IN_BATCH = 0x2
|
FLAG_FIRST_IN_BATCH = 0x2
|
||||||
|
@ -120,7 +123,7 @@ struct GetReadVersionReply {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct GetReadVersionRequest {
|
struct GetReadVersionRequest : TimedRequest {
|
||||||
enum {
|
enum {
|
||||||
PRIORITY_SYSTEM_IMMEDIATE = 15 << 24, // Highest possible priority, always executed even if writes are otherwise blocked
|
PRIORITY_SYSTEM_IMMEDIATE = 15 << 24, // Highest possible priority, always executed even if writes are otherwise blocked
|
||||||
PRIORITY_DEFAULT = 8 << 24,
|
PRIORITY_DEFAULT = 8 << 24,
|
||||||
|
|
|
@ -124,6 +124,15 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
|
||||||
"hz":0.0,
|
"hz":0.0,
|
||||||
"counter":0,
|
"counter":0,
|
||||||
"roughness":0.0
|
"roughness":0.0
|
||||||
|
},
|
||||||
|
"grv_latency_bands":{
|
||||||
|
"$map": 1
|
||||||
|
},
|
||||||
|
"read_latency_bands":{
|
||||||
|
"$map": 1
|
||||||
|
},
|
||||||
|
"commit_latency_bands":{
|
||||||
|
"$map": 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
@ -604,7 +613,7 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
|
||||||
}
|
}
|
||||||
})statusSchema");
|
})statusSchema");
|
||||||
|
|
||||||
const KeyRef JSONSchemas::configurationSchema = LiteralStringRef(R"configSchema(
|
const KeyRef JSONSchemas::clusterConfigurationSchema = LiteralStringRef(R"configSchema(
|
||||||
{
|
{
|
||||||
"create":{
|
"create":{
|
||||||
"$enum":[
|
"$enum":[
|
||||||
|
@ -671,3 +680,25 @@ const KeyRef JSONSchemas::configurationSchema = LiteralStringRef(R"configSchema(
|
||||||
"auto_logs":3,
|
"auto_logs":3,
|
||||||
"proxies":5
|
"proxies":5
|
||||||
})configSchema");
|
})configSchema");
|
||||||
|
|
||||||
|
const KeyRef JSONSchemas::latencyBandConfigurationSchema = LiteralStringRef(R"configSchema(
|
||||||
|
{
|
||||||
|
"get_read_version":{
|
||||||
|
"bands":[
|
||||||
|
0.0
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"read":{
|
||||||
|
"bands":[
|
||||||
|
0.0
|
||||||
|
],
|
||||||
|
"max_key_selector_offset":0,
|
||||||
|
"max_read_bytes":0
|
||||||
|
},
|
||||||
|
"commit":{
|
||||||
|
"bands":[
|
||||||
|
0.0
|
||||||
|
],
|
||||||
|
"max_commit_bytes":0
|
||||||
|
}
|
||||||
|
})configSchema");
|
||||||
|
|
|
@ -28,7 +28,8 @@
|
||||||
|
|
||||||
struct JSONSchemas {
|
struct JSONSchemas {
|
||||||
static const KeyRef statusSchema;
|
static const KeyRef statusSchema;
|
||||||
static const KeyRef configurationSchema;
|
static const KeyRef clusterConfigurationSchema;
|
||||||
|
static const KeyRef latencyBandConfigurationSchema;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* FDBCLIENT_SCHEMAS_H */
|
#endif /* FDBCLIENT_SCHEMAS_H */
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
#include "fdbrpc/QueueModel.h"
|
#include "fdbrpc/QueueModel.h"
|
||||||
#include "fdbrpc/fdbrpc.h"
|
#include "fdbrpc/fdbrpc.h"
|
||||||
#include "fdbrpc/LoadBalance.actor.h"
|
#include "fdbrpc/LoadBalance.actor.h"
|
||||||
|
#include "flow/Stats.h"
|
||||||
|
|
||||||
struct StorageServerInterface {
|
struct StorageServerInterface {
|
||||||
enum {
|
enum {
|
||||||
|
@ -107,7 +108,7 @@ struct GetValueReply : public LoadBalancedReply {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct GetValueRequest {
|
struct GetValueRequest : TimedRequest {
|
||||||
Key key;
|
Key key;
|
||||||
Version version;
|
Version version;
|
||||||
Optional<UID> debugID;
|
Optional<UID> debugID;
|
||||||
|
@ -150,7 +151,7 @@ struct GetKeyValuesReply : public LoadBalancedReply {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct GetKeyValuesRequest {
|
struct GetKeyValuesRequest : TimedRequest {
|
||||||
Arena arena;
|
Arena arena;
|
||||||
KeySelectorRef begin, end;
|
KeySelectorRef begin, end;
|
||||||
Version version; // or latestVersion
|
Version version; // or latestVersion
|
||||||
|
@ -178,7 +179,7 @@ struct GetKeyReply : public LoadBalancedReply {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct GetKeyRequest {
|
struct GetKeyRequest : TimedRequest {
|
||||||
Arena arena;
|
Arena arena;
|
||||||
KeySelectorRef sel;
|
KeySelectorRef sel;
|
||||||
Version version; // or latestVersion
|
Version version; // or latestVersion
|
||||||
|
|
|
@ -381,6 +381,8 @@ std::string encodeExcludedServersKey( AddressExclusion const& addr ) {
|
||||||
return excludedServersPrefix.toString() + as;
|
return excludedServersPrefix.toString() + as;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const KeyRef configVersionKey = LiteralStringRef("\xff/conf/confChange");
|
||||||
|
|
||||||
const KeyRangeRef workerListKeys( LiteralStringRef("\xff/worker/"), LiteralStringRef("\xff/worker0") );
|
const KeyRangeRef workerListKeys( LiteralStringRef("\xff/worker/"), LiteralStringRef("\xff/worker0") );
|
||||||
const KeyRef workerListPrefix = workerListKeys.begin;
|
const KeyRef workerListPrefix = workerListKeys.begin;
|
||||||
|
|
||||||
|
@ -434,6 +436,9 @@ const KeyRangeRef fdbClientInfoPrefixRange(LiteralStringRef("\xff\x02/fdbClientI
|
||||||
const KeyRef fdbClientInfoTxnSampleRate = LiteralStringRef("\xff\x02/fdbClientInfo/client_txn_sample_rate/");
|
const KeyRef fdbClientInfoTxnSampleRate = LiteralStringRef("\xff\x02/fdbClientInfo/client_txn_sample_rate/");
|
||||||
const KeyRef fdbClientInfoTxnSizeLimit = LiteralStringRef("\xff\x02/fdbClientInfo/client_txn_size_limit/");
|
const KeyRef fdbClientInfoTxnSizeLimit = LiteralStringRef("\xff\x02/fdbClientInfo/client_txn_size_limit/");
|
||||||
|
|
||||||
|
// Request latency measurement key
|
||||||
|
const KeyRef latencyBandConfigKey = LiteralStringRef("\xff\x02/latencyBandConfig");
|
||||||
|
|
||||||
// Keyspace to maintain wall clock to version map
|
// Keyspace to maintain wall clock to version map
|
||||||
const KeyRangeRef timeKeeperPrefixRange(LiteralStringRef("\xff\x02/timeKeeper/map/"), LiteralStringRef("\xff\x02/timeKeeper/map0"));
|
const KeyRangeRef timeKeeperPrefixRange(LiteralStringRef("\xff\x02/timeKeeper/map/"), LiteralStringRef("\xff\x02/timeKeeper/map0"));
|
||||||
const KeyRef timeKeeperVersionKey = LiteralStringRef("\xff\x02/timeKeeper/version");
|
const KeyRef timeKeeperVersionKey = LiteralStringRef("\xff\x02/timeKeeper/version");
|
||||||
|
|
|
@ -133,6 +133,11 @@ extern const KeyRef excludedServersVersionKey; // The value of this key shall b
|
||||||
const AddressExclusion decodeExcludedServersKey( KeyRef const& key ); // where key.startsWith(excludedServersPrefix)
|
const AddressExclusion decodeExcludedServersKey( KeyRef const& key ); // where key.startsWith(excludedServersPrefix)
|
||||||
std::string encodeExcludedServersKey( AddressExclusion const& );
|
std::string encodeExcludedServersKey( AddressExclusion const& );
|
||||||
|
|
||||||
|
// "\xff/conf/confChange" := ""
|
||||||
|
// This is the key representing the version of the configuration, which should be updated for each
|
||||||
|
// new configuration.
|
||||||
|
extern const KeyRef configVersionKey;
|
||||||
|
|
||||||
// "\xff/workers/[[processID]]" := ""
|
// "\xff/workers/[[processID]]" := ""
|
||||||
// Asynchronously updated by the cluster controller, this is a list of fdbserver processes that have joined the cluster
|
// Asynchronously updated by the cluster controller, this is a list of fdbserver processes that have joined the cluster
|
||||||
// and are currently (recently) available
|
// and are currently (recently) available
|
||||||
|
@ -212,6 +217,9 @@ extern const KeyRangeRef fdbClientInfoPrefixRange;
|
||||||
extern const KeyRef fdbClientInfoTxnSampleRate;
|
extern const KeyRef fdbClientInfoTxnSampleRate;
|
||||||
extern const KeyRef fdbClientInfoTxnSizeLimit;
|
extern const KeyRef fdbClientInfoTxnSizeLimit;
|
||||||
|
|
||||||
|
// Request latency measurement key
|
||||||
|
extern const KeyRef latencyBandConfigKey;
|
||||||
|
|
||||||
// Keyspace to maintain wall clock to version map
|
// Keyspace to maintain wall clock to version map
|
||||||
extern const KeyRangeRef timeKeeperPrefixRange;
|
extern const KeyRangeRef timeKeeperPrefixRange;
|
||||||
extern const KeyRef timeKeeperVersionKey;
|
extern const KeyRef timeKeeperVersionKey;
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
set(FDBRPC_SRCS
|
set(FDBRPC_SRCS
|
||||||
ActorFuzz.actor.cpp
|
|
||||||
AsyncFileCached.actor.h
|
AsyncFileCached.actor.h
|
||||||
AsyncFileEIO.actor.h
|
AsyncFileEIO.actor.h
|
||||||
AsyncFileKAIO.actor.h
|
AsyncFileKAIO.actor.h
|
||||||
|
@ -11,9 +10,7 @@ set(FDBRPC_SRCS
|
||||||
AsyncFileWriteChecker.cpp
|
AsyncFileWriteChecker.cpp
|
||||||
batcher.actor.h
|
batcher.actor.h
|
||||||
crc32c.cpp
|
crc32c.cpp
|
||||||
dsltest.actor.cpp
|
|
||||||
FailureMonitor.actor.cpp
|
FailureMonitor.actor.cpp
|
||||||
FlowTests.actor.cpp
|
|
||||||
FlowTransport.actor.cpp
|
FlowTransport.actor.cpp
|
||||||
genericactors.actor.h
|
genericactors.actor.h
|
||||||
genericactors.actor.cpp
|
genericactors.actor.cpp
|
||||||
|
@ -55,8 +52,13 @@ if(NOT WIN32)
|
||||||
list(APPEND FDBRPC_SRCS libcoroutine/context.c libeio/eio.c)
|
list(APPEND FDBRPC_SRCS libcoroutine/context.c libeio/eio.c)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
actor_set(FDBRPC_BUILD "${FDBRPC_SRCS}")
|
set(FDBRPC_SRCS_DISABLE_ACTOR_WITHOUT_WAIT_WARNING
|
||||||
add_library(fdbrpc STATIC ${FDBRPC_BUILD})
|
ActorFuzz.actor.cpp
|
||||||
actor_compile(fdbrpc "${FDBRPC_SRCS}")
|
FlowTests.actor.cpp
|
||||||
|
dsltest.actor.cpp)
|
||||||
|
|
||||||
|
add_flow_target(STATIC_LIBRARY NAME fdbrpc
|
||||||
|
SRCS ${FDBRPC_SRCS}
|
||||||
|
DISABLE_ACTOR_WITHOUT_WAIT_WARNING ${FDBRPC_SRCS_DISABLE_ACTOR_WITHOUT_WAIT_WARNING})
|
||||||
target_include_directories(fdbrpc PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/libeio)
|
target_include_directories(fdbrpc PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/libeio)
|
||||||
target_link_libraries(fdbrpc PUBLIC flow)
|
target_link_libraries(fdbrpc PUBLIC flow)
|
||||||
|
|
|
@ -148,6 +148,27 @@ ProcessClass::Fitness ProcessClass::machineClassFitness( ClusterRole role ) cons
|
||||||
default:
|
default:
|
||||||
return ProcessClass::WorstFit;
|
return ProcessClass::WorstFit;
|
||||||
}
|
}
|
||||||
|
case ProcessClass::DataDistributor:
|
||||||
|
switch( _class ) {
|
||||||
|
case ProcessClass::DataDistributorClass:
|
||||||
|
return ProcessClass::BestFit;
|
||||||
|
case ProcessClass::StatelessClass:
|
||||||
|
return ProcessClass::GoodFit;
|
||||||
|
case ProcessClass::MasterClass:
|
||||||
|
return ProcessClass::OkayFit;
|
||||||
|
case ProcessClass::ResolutionClass:
|
||||||
|
return ProcessClass::OkayFit;
|
||||||
|
case ProcessClass::TransactionClass:
|
||||||
|
return ProcessClass::OkayFit;
|
||||||
|
case ProcessClass::ProxyClass:
|
||||||
|
return ProcessClass::OkayFit;
|
||||||
|
case ProcessClass::UnsetClass:
|
||||||
|
return ProcessClass::UnsetFit;
|
||||||
|
case ProcessClass::TesterClass:
|
||||||
|
return ProcessClass::NeverAssign;
|
||||||
|
default:
|
||||||
|
return ProcessClass::WorstFit;
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return ProcessClass::NeverAssign;
|
return ProcessClass::NeverAssign;
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,9 +26,9 @@
|
||||||
|
|
||||||
struct ProcessClass {
|
struct ProcessClass {
|
||||||
// This enum is stored in restartInfo.ini for upgrade tests, so be very careful about changing the existing items!
|
// This enum is stored in restartInfo.ini for upgrade tests, so be very careful about changing the existing items!
|
||||||
enum ClassType { UnsetClass, StorageClass, TransactionClass, ResolutionClass, TesterClass, ProxyClass, MasterClass, StatelessClass, LogClass, ClusterControllerClass, LogRouterClass, InvalidClass = -1 };
|
enum ClassType { UnsetClass, StorageClass, TransactionClass, ResolutionClass, TesterClass, ProxyClass, MasterClass, StatelessClass, LogClass, ClusterControllerClass, LogRouterClass, DataDistributorClass, InvalidClass = -1 };
|
||||||
enum Fitness { BestFit, GoodFit, UnsetFit, OkayFit, WorstFit, ExcludeFit, NeverAssign }; //cannot be larger than 7 because of leader election mask
|
enum Fitness { BestFit, GoodFit, UnsetFit, OkayFit, WorstFit, ExcludeFit, NeverAssign }; //cannot be larger than 7 because of leader election mask
|
||||||
enum ClusterRole { Storage, TLog, Proxy, Master, Resolver, LogRouter, ClusterController, NoRole };
|
enum ClusterRole { Storage, TLog, Proxy, Master, Resolver, LogRouter, ClusterController, DataDistributor, NoRole };
|
||||||
enum ClassSource { CommandLineSource, AutoSource, DBSource, InvalidSource = -1 };
|
enum ClassSource { CommandLineSource, AutoSource, DBSource, InvalidSource = -1 };
|
||||||
int16_t _class;
|
int16_t _class;
|
||||||
int16_t _source;
|
int16_t _source;
|
||||||
|
@ -48,6 +48,7 @@ public:
|
||||||
else if (s=="log") _class = LogClass;
|
else if (s=="log") _class = LogClass;
|
||||||
else if (s=="router") _class = LogRouterClass;
|
else if (s=="router") _class = LogRouterClass;
|
||||||
else if (s=="cluster_controller") _class = ClusterControllerClass;
|
else if (s=="cluster_controller") _class = ClusterControllerClass;
|
||||||
|
else if (s=="data_distributor") _class = DataDistributorClass;
|
||||||
else _class = InvalidClass;
|
else _class = InvalidClass;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -63,6 +64,7 @@ public:
|
||||||
else if (classStr=="log") _class = LogClass;
|
else if (classStr=="log") _class = LogClass;
|
||||||
else if (classStr=="router") _class = LogRouterClass;
|
else if (classStr=="router") _class = LogRouterClass;
|
||||||
else if (classStr=="cluster_controller") _class = ClusterControllerClass;
|
else if (classStr=="cluster_controller") _class = ClusterControllerClass;
|
||||||
|
else if (classStr=="data_distributor") _class = DataDistributorClass;
|
||||||
else _class = InvalidClass;
|
else _class = InvalidClass;
|
||||||
|
|
||||||
if (sourceStr=="command_line") _source = CommandLineSource;
|
if (sourceStr=="command_line") _source = CommandLineSource;
|
||||||
|
@ -93,6 +95,7 @@ public:
|
||||||
case LogClass: return "log";
|
case LogClass: return "log";
|
||||||
case LogRouterClass: return "router";
|
case LogRouterClass: return "router";
|
||||||
case ClusterControllerClass: return "cluster_controller";
|
case ClusterControllerClass: return "cluster_controller";
|
||||||
|
case DataDistributorClass: return "data_distributor";
|
||||||
default: return "invalid";
|
default: return "invalid";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -98,6 +98,7 @@ public:
|
||||||
case ProcessClass::LogClass: return true;
|
case ProcessClass::LogClass: return true;
|
||||||
case ProcessClass::LogRouterClass: return false;
|
case ProcessClass::LogRouterClass: return false;
|
||||||
case ProcessClass::ClusterControllerClass: return false;
|
case ProcessClass::ClusterControllerClass: return false;
|
||||||
|
case ProcessClass::DataDistributorClass: return false;
|
||||||
default: return false;
|
default: return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -149,7 +149,7 @@ static void applyMetadataMutations(UID const& dbgid, Arena &arena, VectorRef<Mut
|
||||||
}
|
}
|
||||||
else if (m.param1.startsWith(configKeysPrefix) || m.param1 == coordinatorsKey) {
|
else if (m.param1.startsWith(configKeysPrefix) || m.param1 == coordinatorsKey) {
|
||||||
if(Optional<StringRef>(m.param2) != txnStateStore->readValue(m.param1).get().castTo<StringRef>()) { // FIXME: Make this check more specific, here or by reading configuration whenever there is a change
|
if(Optional<StringRef>(m.param2) != txnStateStore->readValue(m.param1).get().castTo<StringRef>()) { // FIXME: Make this check more specific, here or by reading configuration whenever there is a change
|
||||||
if(!m.param1.startsWith( excludedServersPrefix ) && m.param1 != excludedServersVersionKey) {
|
if(!m.param1.startsWith( excludedServersPrefix ) && m.param1 != excludedServersVersionKey && m.param1 != configVersionKey) {
|
||||||
auto t = txnStateStore->readValue(m.param1).get();
|
auto t = txnStateStore->readValue(m.param1).get();
|
||||||
TraceEvent("MutationRequiresRestart", dbgid).detail("M", m.toString()).detail("PrevValue", t.present() ? printable(t.get()) : "(none)").detail("ToCommit", toCommit!=NULL);
|
TraceEvent("MutationRequiresRestart", dbgid).detail("M", m.toString()).detail("PrevValue", t.present() ? printable(t.get()) : "(none)").detail("ToCommit", toCommit!=NULL);
|
||||||
if(confChange) *confChange = true;
|
if(confChange) *confChange = true;
|
||||||
|
|
|
@ -13,6 +13,7 @@ set(FDBSERVER_SRCS
|
||||||
DataDistribution.h
|
DataDistribution.h
|
||||||
DataDistributionQueue.actor.cpp
|
DataDistributionQueue.actor.cpp
|
||||||
DataDistributionTracker.actor.cpp
|
DataDistributionTracker.actor.cpp
|
||||||
|
DataDistributorInterface.h
|
||||||
DBCoreState.h
|
DBCoreState.h
|
||||||
DiskQueue.actor.cpp
|
DiskQueue.actor.cpp
|
||||||
fdbserver.actor.cpp
|
fdbserver.actor.cpp
|
||||||
|
@ -27,6 +28,8 @@ set(FDBSERVER_SRCS
|
||||||
KeyValueStoreSQLite.actor.cpp
|
KeyValueStoreSQLite.actor.cpp
|
||||||
Knobs.cpp
|
Knobs.cpp
|
||||||
Knobs.h
|
Knobs.h
|
||||||
|
LatencyBandConfig.cpp
|
||||||
|
LatencyBandConfig.h
|
||||||
LeaderElection.actor.cpp
|
LeaderElection.actor.cpp
|
||||||
LeaderElection.h
|
LeaderElection.h
|
||||||
LogProtocolMessage.h
|
LogProtocolMessage.h
|
||||||
|
@ -173,9 +176,7 @@ set(FDBSERVER_SRCS
|
||||||
|
|
||||||
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/workloads)
|
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/workloads)
|
||||||
|
|
||||||
actor_set(FDBSERVER_BUILD "${FDBSERVER_SRCS}")
|
add_flow_target(EXECUTABLE NAME fdbserver SRCS ${FDBSERVER_SRCS})
|
||||||
add_executable(fdbserver ${FDBSERVER_BUILD})
|
|
||||||
actor_compile(fdbserver "${FDBSERVER_SRCS}")
|
|
||||||
target_include_directories(fdbserver PRIVATE
|
target_include_directories(fdbserver PRIVATE
|
||||||
${CMAKE_CURRENT_BINARY_DIR}/workloads
|
${CMAKE_CURRENT_BINARY_DIR}/workloads
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/workloads)
|
${CMAKE_CURRENT_SOURCE_DIR}/workloads)
|
||||||
|
|
|
@ -22,14 +22,17 @@
|
||||||
#include "flow/ActorCollection.h"
|
#include "flow/ActorCollection.h"
|
||||||
#include "fdbclient/NativeAPI.h"
|
#include "fdbclient/NativeAPI.h"
|
||||||
#include "fdbserver/CoordinationInterface.h"
|
#include "fdbserver/CoordinationInterface.h"
|
||||||
|
#include "fdbserver/DataDistributorInterface.h"
|
||||||
#include "fdbserver/Knobs.h"
|
#include "fdbserver/Knobs.h"
|
||||||
#include "fdbserver/MoveKeys.h"
|
#include "fdbserver/MoveKeys.h"
|
||||||
#include "fdbserver/WorkerInterface.h"
|
#include "fdbserver/WorkerInterface.h"
|
||||||
#include "fdbserver/LeaderElection.h"
|
#include "fdbserver/LeaderElection.h"
|
||||||
|
#include "fdbserver/LogSystemConfig.h"
|
||||||
#include "fdbserver/WaitFailure.h"
|
#include "fdbserver/WaitFailure.h"
|
||||||
#include "fdbserver/ClusterRecruitmentInterface.h"
|
#include "fdbserver/ClusterRecruitmentInterface.h"
|
||||||
#include "fdbserver/ServerDBInfo.h"
|
#include "fdbserver/ServerDBInfo.h"
|
||||||
#include "fdbserver/Status.h"
|
#include "fdbserver/Status.h"
|
||||||
|
#include "fdbserver/LatencyBandConfig.h"
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include "fdbclient/DatabaseContext.h"
|
#include "fdbclient/DatabaseContext.h"
|
||||||
#include "fdbserver/RecoveryState.h"
|
#include "fdbserver/RecoveryState.h"
|
||||||
|
@ -105,7 +108,20 @@ public:
|
||||||
serverInfo( new AsyncVar<ServerDBInfo>( ServerDBInfo() ) ),
|
serverInfo( new AsyncVar<ServerDBInfo>( ServerDBInfo() ) ),
|
||||||
db( DatabaseContext::create( clientInfo, Future<Void>(), LocalityData(), true, TaskDefaultEndpoint, true ) ) // SOMEDAY: Locality!
|
db( DatabaseContext::create( clientInfo, Future<Void>(), LocalityData(), true, TaskDefaultEndpoint, true ) ) // SOMEDAY: Locality!
|
||||||
{
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void setDistributor(const DataDistributorInterface& distributorInterf) {
|
||||||
|
ServerDBInfo newInfo = serverInfo->get();
|
||||||
|
newInfo.id = g_random->randomUniqueID();
|
||||||
|
newInfo.distributor = distributorInterf;
|
||||||
|
serverInfo->set( newInfo );
|
||||||
|
}
|
||||||
|
|
||||||
|
void clearDistributor() {
|
||||||
|
ServerDBInfo newInfo = serverInfo->get();
|
||||||
|
newInfo.id = g_random->randomUniqueID();
|
||||||
|
newInfo.distributor = Optional<DataDistributorInterface>();
|
||||||
|
serverInfo->set( newInfo );
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -502,12 +518,19 @@ public:
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void updateKnownIds(std::map< Optional<Standalone<StringRef>>, int>* id_used) {
|
||||||
|
(*id_used)[masterProcessId]++;
|
||||||
|
(*id_used)[clusterControllerProcessId]++;
|
||||||
|
if (db.serverInfo->get().distributor.present()) {
|
||||||
|
(*id_used)[db.serverInfo->get().distributor.get().locality.processId()]++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
RecruitRemoteFromConfigurationReply findRemoteWorkersForConfiguration( RecruitRemoteFromConfigurationRequest const& req ) {
|
RecruitRemoteFromConfigurationReply findRemoteWorkersForConfiguration( RecruitRemoteFromConfigurationRequest const& req ) {
|
||||||
RecruitRemoteFromConfigurationReply result;
|
RecruitRemoteFromConfigurationReply result;
|
||||||
std::map< Optional<Standalone<StringRef>>, int> id_used;
|
std::map< Optional<Standalone<StringRef>>, int> id_used;
|
||||||
|
|
||||||
id_used[masterProcessId]++;
|
updateKnownIds(&id_used);
|
||||||
id_used[clusterControllerProcessId]++;
|
|
||||||
|
|
||||||
std::set<Optional<Key>> remoteDC;
|
std::set<Optional<Key>> remoteDC;
|
||||||
remoteDC.insert(req.dcId);
|
remoteDC.insert(req.dcId);
|
||||||
|
@ -545,8 +568,7 @@ public:
|
||||||
ErrorOr<RecruitFromConfigurationReply> findWorkersForConfiguration( RecruitFromConfigurationRequest const& req, Optional<Key> dcId ) {
|
ErrorOr<RecruitFromConfigurationReply> findWorkersForConfiguration( RecruitFromConfigurationRequest const& req, Optional<Key> dcId ) {
|
||||||
RecruitFromConfigurationReply result;
|
RecruitFromConfigurationReply result;
|
||||||
std::map< Optional<Standalone<StringRef>>, int> id_used;
|
std::map< Optional<Standalone<StringRef>>, int> id_used;
|
||||||
id_used[masterProcessId]++;
|
updateKnownIds(&id_used);
|
||||||
id_used[clusterControllerProcessId]++;
|
|
||||||
|
|
||||||
ASSERT(dcId.present());
|
ASSERT(dcId.present());
|
||||||
|
|
||||||
|
@ -674,9 +696,7 @@ public:
|
||||||
} else {
|
} else {
|
||||||
RecruitFromConfigurationReply result;
|
RecruitFromConfigurationReply result;
|
||||||
std::map< Optional<Standalone<StringRef>>, int> id_used;
|
std::map< Optional<Standalone<StringRef>>, int> id_used;
|
||||||
id_used[masterProcessId]++;
|
updateKnownIds(&id_used);
|
||||||
id_used[clusterControllerProcessId]++;
|
|
||||||
|
|
||||||
auto tlogs = getWorkersForTlogs( req.configuration, req.configuration.tLogReplicationFactor, req.configuration.getDesiredLogs(), req.configuration.tLogPolicy, id_used );
|
auto tlogs = getWorkersForTlogs( req.configuration, req.configuration.tLogReplicationFactor, req.configuration.getDesiredLogs(), req.configuration.tLogPolicy, id_used );
|
||||||
for(int i = 0; i < tlogs.size(); i++) {
|
for(int i = 0; i < tlogs.size(); i++) {
|
||||||
result.tLogs.push_back(tlogs[i].first);
|
result.tLogs.push_back(tlogs[i].first);
|
||||||
|
@ -898,6 +918,9 @@ public:
|
||||||
|
|
||||||
std::map< Optional<Standalone<StringRef>>, int> id_used;
|
std::map< Optional<Standalone<StringRef>>, int> id_used;
|
||||||
id_used[clusterControllerProcessId]++;
|
id_used[clusterControllerProcessId]++;
|
||||||
|
if (db.serverInfo->get().distributor.present()) {
|
||||||
|
id_used[db.serverInfo->get().distributor.get().locality.processId()]++;
|
||||||
|
}
|
||||||
WorkerFitnessInfo mworker = getWorkerForRoleInDatacenter(clusterControllerDcId, ProcessClass::Master, ProcessClass::NeverAssign, db.config, id_used, true);
|
WorkerFitnessInfo mworker = getWorkerForRoleInDatacenter(clusterControllerDcId, ProcessClass::Master, ProcessClass::NeverAssign, db.config, id_used, true);
|
||||||
|
|
||||||
if ( oldMasterFit < mworker.fitness )
|
if ( oldMasterFit < mworker.fitness )
|
||||||
|
@ -991,8 +1014,31 @@ public:
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::map< Optional<Standalone<StringRef>>, int> getUsedIds() {
|
||||||
|
std::map<Optional<Standalone<StringRef>>, int> idUsed;
|
||||||
|
updateKnownIds(&idUsed);
|
||||||
|
|
||||||
|
auto dbInfo = db.serverInfo->get();
|
||||||
|
for (const auto& tlogset : dbInfo.logSystemConfig.tLogs) {
|
||||||
|
for (const auto& tlog: tlogset.tLogs) {
|
||||||
|
if (tlog.present()) {
|
||||||
|
idUsed[tlog.interf().locality.processId()]++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (const MasterProxyInterface& interf : dbInfo.client.proxies) {
|
||||||
|
ASSERT(interf.locality.processId().present());
|
||||||
|
idUsed[interf.locality.processId()]++;
|
||||||
|
}
|
||||||
|
for (const ResolverInterface& interf: dbInfo.resolvers) {
|
||||||
|
ASSERT(interf.locality.processId().present());
|
||||||
|
idUsed[interf.locality.processId()]++;
|
||||||
|
}
|
||||||
|
return idUsed;
|
||||||
|
}
|
||||||
|
|
||||||
std::map< Optional<Standalone<StringRef>>, WorkerInfo > id_worker;
|
std::map< Optional<Standalone<StringRef>>, WorkerInfo > id_worker;
|
||||||
std::map< Optional<Standalone<StringRef>>, ProcessClass > id_class; //contains the mapping from process id to process class from the database
|
std::map< Optional<Standalone<StringRef>>, ProcessClass > id_class; //contains the mapping from process id to process class from the database
|
||||||
Standalone<RangeResultRef> lastProcessClasses;
|
Standalone<RangeResultRef> lastProcessClasses;
|
||||||
bool gotProcessClasses;
|
bool gotProcessClasses;
|
||||||
bool gotFullyRecoveredConfig;
|
bool gotFullyRecoveredConfig;
|
||||||
|
@ -1016,6 +1062,7 @@ public:
|
||||||
Optional<double> remoteStartTime;
|
Optional<double> remoteStartTime;
|
||||||
Version datacenterVersionDifference;
|
Version datacenterVersionDifference;
|
||||||
bool versionDifferenceUpdated;
|
bool versionDifferenceUpdated;
|
||||||
|
PromiseStream<Future<Void>> addActor;
|
||||||
|
|
||||||
ClusterControllerData( ClusterControllerFullInterface const& ccInterface, LocalityData const& locality )
|
ClusterControllerData( ClusterControllerFullInterface const& ccInterface, LocalityData const& locality )
|
||||||
: id(ccInterface.id()), ac(false), outstandingRequestChecker(Void()), gotProcessClasses(false), gotFullyRecoveredConfig(false), startTime(now()), datacenterVersionDifference(0), versionDifferenceUpdated(false)
|
: id(ccInterface.id()), ac(false), outstandingRequestChecker(Void()), gotProcessClasses(false), gotFullyRecoveredConfig(false), startTime(now()), datacenterVersionDifference(0), versionDifferenceUpdated(false)
|
||||||
|
@ -1035,14 +1082,6 @@ public:
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template <class K, class T>
|
|
||||||
vector<T> values( std::map<K,T> const& map ) {
|
|
||||||
vector<T> t;
|
|
||||||
for(auto i = map.begin(); i!=map.end(); ++i)
|
|
||||||
t.push_back(i->second);
|
|
||||||
return t;
|
|
||||||
}
|
|
||||||
|
|
||||||
ACTOR Future<Void> clusterWatchDatabase( ClusterControllerData* cluster, ClusterControllerData::DBInfo* db )
|
ACTOR Future<Void> clusterWatchDatabase( ClusterControllerData* cluster, ClusterControllerData::DBInfo* db )
|
||||||
{
|
{
|
||||||
state MasterInterface iMaster;
|
state MasterInterface iMaster;
|
||||||
|
@ -1064,6 +1103,9 @@ ACTOR Future<Void> clusterWatchDatabase( ClusterControllerData* cluster, Cluster
|
||||||
//This should always be possible, because we can recruit the master on the same process as the cluster controller.
|
//This should always be possible, because we can recruit the master on the same process as the cluster controller.
|
||||||
std::map< Optional<Standalone<StringRef>>, int> id_used;
|
std::map< Optional<Standalone<StringRef>>, int> id_used;
|
||||||
id_used[cluster->clusterControllerProcessId]++;
|
id_used[cluster->clusterControllerProcessId]++;
|
||||||
|
if (cluster->db.serverInfo->get().distributor.present()) {
|
||||||
|
id_used[cluster->db.serverInfo->get().distributor.get().locality.processId()]++;
|
||||||
|
}
|
||||||
state WorkerFitnessInfo masterWorker = cluster->getWorkerForRoleInDatacenter(cluster->clusterControllerDcId, ProcessClass::Master, ProcessClass::NeverAssign, db->config, id_used);
|
state WorkerFitnessInfo masterWorker = cluster->getWorkerForRoleInDatacenter(cluster->clusterControllerDcId, ProcessClass::Master, ProcessClass::NeverAssign, db->config, id_used);
|
||||||
if( ( masterWorker.worker.second.machineClassFitness( ProcessClass::Master ) > SERVER_KNOBS->EXPECTED_MASTER_FITNESS || masterWorker.worker.first.locality.processId() == cluster->clusterControllerProcessId )
|
if( ( masterWorker.worker.second.machineClassFitness( ProcessClass::Master ) > SERVER_KNOBS->EXPECTED_MASTER_FITNESS || masterWorker.worker.first.locality.processId() == cluster->clusterControllerProcessId )
|
||||||
&& now() - cluster->startTime < SERVER_KNOBS->WAIT_FOR_GOOD_RECRUITMENT_DELAY ) {
|
&& now() - cluster->startTime < SERVER_KNOBS->WAIT_FOR_GOOD_RECRUITMENT_DELAY ) {
|
||||||
|
@ -1099,6 +1141,7 @@ ACTOR Future<Void> clusterWatchDatabase( ClusterControllerData* cluster, Cluster
|
||||||
dbInfo.masterLifetime = db->serverInfo->get().masterLifetime;
|
dbInfo.masterLifetime = db->serverInfo->get().masterLifetime;
|
||||||
++dbInfo.masterLifetime;
|
++dbInfo.masterLifetime;
|
||||||
dbInfo.clusterInterface = db->serverInfo->get().clusterInterface;
|
dbInfo.clusterInterface = db->serverInfo->get().clusterInterface;
|
||||||
|
dbInfo.distributor = db->serverInfo->get().distributor;
|
||||||
|
|
||||||
TraceEvent("CCWDB", cluster->id).detail("Lifetime", dbInfo.masterLifetime.toString()).detail("ChangeID", dbInfo.id);
|
TraceEvent("CCWDB", cluster->id).detail("Lifetime", dbInfo.masterLifetime.toString()).detail("ChangeID", dbInfo.id);
|
||||||
db->serverInfo->set( dbInfo );
|
db->serverInfo->set( dbInfo );
|
||||||
|
@ -1704,6 +1747,11 @@ void registerWorker( RegisterWorkerRequest req, ClusterControllerData *self ) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ( req.distributorInterf.present() && !self->db.serverInfo->get().distributor.present() ) {
|
||||||
|
const DataDistributorInterface& di = req.distributorInterf.get();
|
||||||
|
TraceEvent("ClusterController_RegisterDataDistributor", self->id).detail("DDID", di.id());
|
||||||
|
self->db.setDistributor( di );
|
||||||
|
}
|
||||||
if( info == self->id_worker.end() ) {
|
if( info == self->id_worker.end() ) {
|
||||||
self->id_worker[w.locality.processId()] = WorkerInfo( workerAvailabilityWatch( w, newProcessClass, self ), req.reply, req.generation, w, req.initialClass, newProcessClass, newPriorityInfo );
|
self->id_worker[w.locality.processId()] = WorkerInfo( workerAvailabilityWatch( w, newProcessClass, self ), req.reply, req.generation, w, req.initialClass, newProcessClass, newPriorityInfo );
|
||||||
checkOutstandingRequests( self );
|
checkOutstandingRequests( self );
|
||||||
|
@ -1968,6 +2016,43 @@ ACTOR Future<Void> monitorProcessClasses(ClusterControllerData *self) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ACTOR Future<Void> monitorServerInfoConfig(ClusterControllerData::DBInfo* db) {
|
||||||
|
loop {
|
||||||
|
state ReadYourWritesTransaction tr(db->db);
|
||||||
|
loop {
|
||||||
|
try {
|
||||||
|
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
||||||
|
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||||
|
tr.setOption(FDBTransactionOptions::READ_LOCK_AWARE);
|
||||||
|
|
||||||
|
Optional<Value> configVal = wait(tr.get(latencyBandConfigKey));
|
||||||
|
Optional<LatencyBandConfig> config;
|
||||||
|
if(configVal.present()) {
|
||||||
|
config = LatencyBandConfig::parse(configVal.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
ServerDBInfo serverInfo = db->serverInfo->get();
|
||||||
|
if(config != serverInfo.latencyBandConfig) {
|
||||||
|
TraceEvent("LatencyBandConfigChanged").detail("Present", config.present());
|
||||||
|
serverInfo.id = g_random->randomUniqueID();
|
||||||
|
serverInfo.latencyBandConfig = config;
|
||||||
|
db->serverInfo->set(serverInfo);
|
||||||
|
}
|
||||||
|
|
||||||
|
state Future<Void> configChangeFuture = tr.watch(latencyBandConfigKey);
|
||||||
|
|
||||||
|
wait(tr.commit());
|
||||||
|
wait(configChangeFuture);
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
catch (Error &e) {
|
||||||
|
wait(tr.onError(e));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ACTOR Future<Void> monitorClientTxnInfoConfigs(ClusterControllerData::DBInfo* db) {
|
ACTOR Future<Void> monitorClientTxnInfoConfigs(ClusterControllerData::DBInfo* db) {
|
||||||
loop {
|
loop {
|
||||||
state ReadYourWritesTransaction tr(db->db);
|
state ReadYourWritesTransaction tr(db->db);
|
||||||
|
@ -2177,24 +2262,85 @@ ACTOR Future<Void> updateDatacenterVersionDifference( ClusterControllerData *sel
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ACTOR Future<DataDistributorInterface> startDataDistributor( ClusterControllerData *self ) {
|
||||||
|
state Optional<Key> dcId = self->clusterControllerDcId;
|
||||||
|
while ( !self->clusterControllerProcessId.present() || !self->masterProcessId.present() ) {
|
||||||
|
wait( delay(SERVER_KNOBS->WAIT_FOR_GOOD_RECRUITMENT_DELAY) );
|
||||||
|
}
|
||||||
|
|
||||||
|
loop {
|
||||||
|
try {
|
||||||
|
while ( self->db.serverInfo->get().recoveryState < RecoveryState::ACCEPTING_COMMITS ) {
|
||||||
|
wait( self->db.serverInfo->onChange() );
|
||||||
|
}
|
||||||
|
|
||||||
|
std::map<Optional<Standalone<StringRef>>, int> id_used = self->getUsedIds();
|
||||||
|
state WorkerFitnessInfo data_distributor = self->getWorkerForRoleInDatacenter(dcId, ProcessClass::DataDistributor, ProcessClass::NeverAssign, self->db.config, id_used);
|
||||||
|
state InitializeDataDistributorRequest req;
|
||||||
|
req.reqId = g_random->randomUniqueID();
|
||||||
|
TraceEvent("ClusterController_DataDistributorRecruit", req.reqId).detail("Addr", data_distributor.worker.first.address());
|
||||||
|
|
||||||
|
ErrorOr<DataDistributorInterface> distributor = wait( data_distributor.worker.first.dataDistributor.getReplyUnlessFailedFor(req, SERVER_KNOBS->WAIT_FOR_DISTRIBUTOR_JOIN_DELAY, 0) );
|
||||||
|
if (distributor.present()) {
|
||||||
|
TraceEvent("ClusterController_DataDistributorRecruited", req.reqId).detail("Addr", data_distributor.worker.first.address());
|
||||||
|
return distributor.get();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch (Error& e) {
|
||||||
|
TraceEvent("ClusterController_DataDistributorRecruitError", req.reqId).error(e);
|
||||||
|
if ( e.code() != error_code_no_more_servers ) {
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wait( delay(SERVER_KNOBS->ATTEMPT_RECRUITMENT_DELAY) );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ACTOR Future<Void> waitDDRejoinOrStartDD( ClusterControllerData *self, ClusterControllerFullInterface *clusterInterface ) {
|
||||||
|
state Future<Void> initialDelay = delay(SERVER_KNOBS->WAIT_FOR_DISTRIBUTOR_JOIN_DELAY);
|
||||||
|
|
||||||
|
// wait for a while to see if existing data distributor will join.
|
||||||
|
loop choose {
|
||||||
|
when ( wait(initialDelay) ) { break; }
|
||||||
|
when ( wait(self->db.serverInfo->onChange()) ) { // Rejoins via worker registration
|
||||||
|
if ( self->db.serverInfo->get().distributor.present() ) {
|
||||||
|
TraceEvent("ClusterController_InfoChange", self->id)
|
||||||
|
.detail("DataDistributorID", self->db.serverInfo->get().distributor.get().id());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
loop {
|
||||||
|
if ( self->db.serverInfo->get().distributor.present() ) {
|
||||||
|
wait( waitFailureClient( self->db.serverInfo->get().distributor.get().waitFailure, SERVER_KNOBS->DD_FAILURE_TIME ) );
|
||||||
|
TraceEvent("ClusterController", self->id)
|
||||||
|
.detail("DataDistributorDied", self->db.serverInfo->get().distributor.get().id());
|
||||||
|
self->db.clearDistributor();
|
||||||
|
} else {
|
||||||
|
DataDistributorInterface distributorInterf = wait( startDataDistributor(self) );
|
||||||
|
self->db.setDistributor( distributorInterf );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ACTOR Future<Void> clusterControllerCore( ClusterControllerFullInterface interf, Future<Void> leaderFail, ServerCoordinators coordinators, LocalityData locality ) {
|
ACTOR Future<Void> clusterControllerCore( ClusterControllerFullInterface interf, Future<Void> leaderFail, ServerCoordinators coordinators, LocalityData locality ) {
|
||||||
state ClusterControllerData self( interf, locality );
|
state ClusterControllerData self( interf, locality );
|
||||||
state Future<Void> coordinationPingDelay = delay( SERVER_KNOBS->WORKER_COORDINATION_PING_DELAY );
|
state Future<Void> coordinationPingDelay = delay( SERVER_KNOBS->WORKER_COORDINATION_PING_DELAY );
|
||||||
state uint64_t step = 0;
|
state uint64_t step = 0;
|
||||||
state PromiseStream<Future<Void>> addActor;
|
state Future<ErrorOr<Void>> error = errorOr( actorCollection( self.addActor.getFuture() ) );
|
||||||
state Future<ErrorOr<Void>> error = errorOr( actorCollection( addActor.getFuture() ) );
|
|
||||||
|
|
||||||
auto pSelf = &self;
|
self.addActor.send( failureDetectionServer( self.id, &self.db, interf.clientInterface.failureMonitoring.getFuture() ) );
|
||||||
addActor.send( failureDetectionServer( self.id, &self.db, interf.clientInterface.failureMonitoring.getFuture() ) );
|
self.addActor.send( clusterWatchDatabase( &self, &self.db ) ); // Start the master database
|
||||||
addActor.send( clusterWatchDatabase( &self, &self.db ) ); // Start the master database
|
self.addActor.send( self.updateWorkerList.init( self.db.db ) );
|
||||||
addActor.send( self.updateWorkerList.init( self.db.db ) );
|
self.addActor.send( statusServer( interf.clientInterface.databaseStatus.getFuture(), &self, coordinators));
|
||||||
addActor.send( statusServer( interf.clientInterface.databaseStatus.getFuture(), &self, coordinators));
|
self.addActor.send( timeKeeper(&self) );
|
||||||
addActor.send( timeKeeper(&self) );
|
self.addActor.send( monitorProcessClasses(&self) );
|
||||||
addActor.send( monitorProcessClasses(&self) );
|
self.addActor.send( monitorClientTxnInfoConfigs(&self.db) );
|
||||||
addActor.send( monitorClientTxnInfoConfigs(&self.db) );
|
self.addActor.send( updatedChangingDatacenters(&self) );
|
||||||
addActor.send( updatedChangingDatacenters(&self) );
|
self.addActor.send( updatedChangedDatacenters(&self) );
|
||||||
addActor.send( updatedChangedDatacenters(&self) );
|
self.addActor.send( updateDatacenterVersionDifference(&self) );
|
||||||
addActor.send( updateDatacenterVersionDifference(&self) );
|
self.addActor.send( waitDDRejoinOrStartDD(&self, &interf) );
|
||||||
//printf("%s: I am the cluster controller\n", g_network->getLocalAddress().toString().c_str());
|
//printf("%s: I am the cluster controller\n", g_network->getLocalAddress().toString().c_str());
|
||||||
|
|
||||||
loop choose {
|
loop choose {
|
||||||
|
@ -2210,13 +2356,13 @@ ACTOR Future<Void> clusterControllerCore( ClusterControllerFullInterface interf,
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
when( OpenDatabaseRequest req = waitNext( interf.clientInterface.openDatabase.getFuture() ) ) {
|
when( OpenDatabaseRequest req = waitNext( interf.clientInterface.openDatabase.getFuture() ) ) {
|
||||||
addActor.send( clusterOpenDatabase( &self.db, req.knownClientInfoID, req.issues.toString(), req.supportedVersions, req.traceLogGroup, req.reply ) );
|
self.addActor.send( clusterOpenDatabase( &self.db, req.knownClientInfoID, req.issues.toString(), req.supportedVersions, req.traceLogGroup, req.reply ) );
|
||||||
}
|
}
|
||||||
when( RecruitFromConfigurationRequest req = waitNext( interf.recruitFromConfiguration.getFuture() ) ) {
|
when( RecruitFromConfigurationRequest req = waitNext( interf.recruitFromConfiguration.getFuture() ) ) {
|
||||||
addActor.send( clusterRecruitFromConfiguration( &self, req ) );
|
self.addActor.send( clusterRecruitFromConfiguration( &self, req ) );
|
||||||
}
|
}
|
||||||
when( RecruitRemoteFromConfigurationRequest req = waitNext( interf.recruitRemoteFromConfiguration.getFuture() ) ) {
|
when( RecruitRemoteFromConfigurationRequest req = waitNext( interf.recruitRemoteFromConfiguration.getFuture() ) ) {
|
||||||
addActor.send( clusterRecruitRemoteFromConfiguration( &self, req ) );
|
self.addActor.send( clusterRecruitRemoteFromConfiguration( &self, req ) );
|
||||||
}
|
}
|
||||||
when( RecruitStorageRequest req = waitNext( interf.recruitStorage.getFuture() ) ) {
|
when( RecruitStorageRequest req = waitNext( interf.recruitStorage.getFuture() ) ) {
|
||||||
clusterRecruitStorage( &self, req );
|
clusterRecruitStorage( &self, req );
|
||||||
|
@ -2271,7 +2417,7 @@ ACTOR Future<Void> clusterControllerCore( ClusterControllerFullInterface interf,
|
||||||
clusterRegisterMaster( &self, req );
|
clusterRegisterMaster( &self, req );
|
||||||
}
|
}
|
||||||
when( GetServerDBInfoRequest req = waitNext( interf.getServerDBInfo.getFuture() ) ) {
|
when( GetServerDBInfoRequest req = waitNext( interf.getServerDBInfo.getFuture() ) ) {
|
||||||
addActor.send( clusterGetServerInfo( &self.db, req.knownServerInfoID, req.issues.toString(), req.incompatiblePeers, req.reply ) );
|
self.addActor.send( clusterGetServerInfo( &self.db, req.knownServerInfoID, req.issues.toString(), req.incompatiblePeers, req.reply ) );
|
||||||
}
|
}
|
||||||
when( wait( leaderFail ) ) {
|
when( wait( leaderFail ) ) {
|
||||||
// We are no longer the leader if this has changed.
|
// We are no longer the leader if this has changed.
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#include "fdbclient/StorageServerInterface.h"
|
#include "fdbclient/StorageServerInterface.h"
|
||||||
#include "fdbclient/MasterProxyInterface.h"
|
#include "fdbclient/MasterProxyInterface.h"
|
||||||
#include "fdbclient/DatabaseConfiguration.h"
|
#include "fdbclient/DatabaseConfiguration.h"
|
||||||
|
#include "fdbserver/DataDistributorInterface.h"
|
||||||
#include "fdbserver/MasterInterface.h"
|
#include "fdbserver/MasterInterface.h"
|
||||||
#include "fdbserver/RecoveryState.h"
|
#include "fdbserver/RecoveryState.h"
|
||||||
#include "fdbserver/TLogInterface.h"
|
#include "fdbserver/TLogInterface.h"
|
||||||
|
@ -166,15 +167,16 @@ struct RegisterWorkerRequest {
|
||||||
ProcessClass processClass;
|
ProcessClass processClass;
|
||||||
ClusterControllerPriorityInfo priorityInfo;
|
ClusterControllerPriorityInfo priorityInfo;
|
||||||
Generation generation;
|
Generation generation;
|
||||||
|
Optional<DataDistributorInterface> distributorInterf;
|
||||||
ReplyPromise<RegisterWorkerReply> reply;
|
ReplyPromise<RegisterWorkerReply> reply;
|
||||||
|
|
||||||
RegisterWorkerRequest() : priorityInfo(ProcessClass::UnsetFit, false, ClusterControllerPriorityInfo::FitnessUnknown) {}
|
RegisterWorkerRequest() : priorityInfo(ProcessClass::UnsetFit, false, ClusterControllerPriorityInfo::FitnessUnknown) {}
|
||||||
RegisterWorkerRequest(WorkerInterface wi, ProcessClass initialClass, ProcessClass processClass, ClusterControllerPriorityInfo priorityInfo, Generation generation) :
|
RegisterWorkerRequest(WorkerInterface wi, ProcessClass initialClass, ProcessClass processClass, ClusterControllerPriorityInfo priorityInfo, Generation generation, Optional<DataDistributorInterface> ddInterf) :
|
||||||
wi(wi), initialClass(initialClass), processClass(processClass), priorityInfo(priorityInfo), generation(generation) {}
|
wi(wi), initialClass(initialClass), processClass(processClass), priorityInfo(priorityInfo), generation(generation), distributorInterf(ddInterf) {}
|
||||||
|
|
||||||
template <class Ar>
|
template <class Ar>
|
||||||
void serialize( Ar& ar ) {
|
void serialize( Ar& ar ) {
|
||||||
serializer(ar, wi, initialClass, processClass, priorityInfo, generation, reply);
|
serializer(ar, wi, initialClass, processClass, priorityInfo, generation, distributorInterf, reply);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -200,17 +200,6 @@ struct InitialDataDistribution : ReferenceCounted<InitialDataDistribution> {
|
||||||
vector<DDShardInfo> shards;
|
vector<DDShardInfo> shards;
|
||||||
};
|
};
|
||||||
|
|
||||||
Future<Void> dataDistribution(
|
|
||||||
Reference<AsyncVar<struct ServerDBInfo>> const& db,
|
|
||||||
MasterInterface const& mi, DatabaseConfiguration const& configuration,
|
|
||||||
PromiseStream< std::pair<UID, Optional<StorageServerInterface>> > const& serverChanges,
|
|
||||||
Reference<ILogSystem> const& logSystem,
|
|
||||||
Version const& recoveryCommitVersion,
|
|
||||||
std::vector<Optional<Key>> const& primaryDcId,
|
|
||||||
std::vector<Optional<Key>> const& remoteDcIds,
|
|
||||||
double* const& lastLimited,
|
|
||||||
Future<Void> const& remoteRecovered);
|
|
||||||
|
|
||||||
Future<Void> dataDistributionTracker(
|
Future<Void> dataDistributionTracker(
|
||||||
Reference<InitialDataDistribution> const& initData,
|
Reference<InitialDataDistribution> const& initData,
|
||||||
Database const& cx,
|
Database const& cx,
|
||||||
|
@ -220,7 +209,7 @@ Future<Void> dataDistributionTracker(
|
||||||
FutureStream<Promise<int64_t>> const& getAverageShardBytes,
|
FutureStream<Promise<int64_t>> const& getAverageShardBytes,
|
||||||
Promise<Void> const& readyToStart,
|
Promise<Void> const& readyToStart,
|
||||||
Reference<AsyncVar<bool>> const& zeroHealthyTeams,
|
Reference<AsyncVar<bool>> const& zeroHealthyTeams,
|
||||||
UID const& masterId);
|
UID const& distributorId);
|
||||||
|
|
||||||
Future<Void> dataDistributionQueue(
|
Future<Void> dataDistributionQueue(
|
||||||
Database const& cx,
|
Database const& cx,
|
||||||
|
@ -232,10 +221,9 @@ Future<Void> dataDistributionQueue(
|
||||||
Reference<ShardsAffectedByTeamFailure> const& shardsAffectedByTeamFailure,
|
Reference<ShardsAffectedByTeamFailure> const& shardsAffectedByTeamFailure,
|
||||||
MoveKeysLock const& lock,
|
MoveKeysLock const& lock,
|
||||||
PromiseStream<Promise<int64_t>> const& getAverageShardBytes,
|
PromiseStream<Promise<int64_t>> const& getAverageShardBytes,
|
||||||
MasterInterface const& mi,
|
UID const& distributorId,
|
||||||
int const& teamSize,
|
int const& teamSize,
|
||||||
double* const& lastLimited,
|
double* const& lastLimited);
|
||||||
Version const& recoveryVersion);
|
|
||||||
|
|
||||||
//Holds the permitted size and IO Bounds for a shard
|
//Holds the permitted size and IO Bounds for a shard
|
||||||
struct ShardSizeBounds {
|
struct ShardSizeBounds {
|
||||||
|
|
|
@ -331,10 +331,9 @@ void complete( RelocateData const& relocation, std::map<UID, Busyness> & busymap
|
||||||
Future<Void> dataDistributionRelocator( struct DDQueueData* const& self, RelocateData const& rd );
|
Future<Void> dataDistributionRelocator( struct DDQueueData* const& self, RelocateData const& rd );
|
||||||
|
|
||||||
struct DDQueueData {
|
struct DDQueueData {
|
||||||
MasterInterface mi;
|
UID distributorId;
|
||||||
MoveKeysLock lock;
|
MoveKeysLock lock;
|
||||||
Database cx;
|
Database cx;
|
||||||
Version recoveryVersion;
|
|
||||||
|
|
||||||
std::vector<TeamCollectionInterface> teamCollections;
|
std::vector<TeamCollectionInterface> teamCollections;
|
||||||
Reference<ShardsAffectedByTeamFailure> shardsAffectedByTeamFailure;
|
Reference<ShardsAffectedByTeamFailure> shardsAffectedByTeamFailure;
|
||||||
|
@ -394,13 +393,13 @@ struct DDQueueData {
|
||||||
priority_relocations[priority]--;
|
priority_relocations[priority]--;
|
||||||
}
|
}
|
||||||
|
|
||||||
DDQueueData( MasterInterface mi, MoveKeysLock lock, Database cx, std::vector<TeamCollectionInterface> teamCollections,
|
DDQueueData( UID mid, MoveKeysLock lock, Database cx, std::vector<TeamCollectionInterface> teamCollections,
|
||||||
Reference<ShardsAffectedByTeamFailure> sABTF, PromiseStream<Promise<int64_t>> getAverageShardBytes,
|
Reference<ShardsAffectedByTeamFailure> sABTF, PromiseStream<Promise<int64_t>> getAverageShardBytes,
|
||||||
int teamSize, PromiseStream<RelocateShard> output, FutureStream<RelocateShard> input, PromiseStream<GetMetricsRequest> getShardMetrics, double* lastLimited, Version recoveryVersion ) :
|
int teamSize, PromiseStream<RelocateShard> output, FutureStream<RelocateShard> input, PromiseStream<GetMetricsRequest> getShardMetrics, double* lastLimited ) :
|
||||||
activeRelocations( 0 ), queuedRelocations( 0 ), bytesWritten ( 0 ), teamCollections( teamCollections ),
|
activeRelocations( 0 ), queuedRelocations( 0 ), bytesWritten ( 0 ), teamCollections( teamCollections ),
|
||||||
shardsAffectedByTeamFailure( sABTF ), getAverageShardBytes( getAverageShardBytes ), mi( mi ), lock( lock ),
|
shardsAffectedByTeamFailure( sABTF ), getAverageShardBytes( getAverageShardBytes ), distributorId( mid ), lock( lock ),
|
||||||
cx( cx ), teamSize( teamSize ), output( output ), input( input ), getShardMetrics( getShardMetrics ), startMoveKeysParallelismLock( SERVER_KNOBS->DD_MOVE_KEYS_PARALLELISM ),
|
cx( cx ), teamSize( teamSize ), output( output ), input( input ), getShardMetrics( getShardMetrics ), startMoveKeysParallelismLock( SERVER_KNOBS->DD_MOVE_KEYS_PARALLELISM ),
|
||||||
finishMoveKeysParallelismLock( SERVER_KNOBS->DD_MOVE_KEYS_PARALLELISM ), lastLimited(lastLimited), recoveryVersion(recoveryVersion),
|
finishMoveKeysParallelismLock( SERVER_KNOBS->DD_MOVE_KEYS_PARALLELISM ), lastLimited(lastLimited),
|
||||||
suppressIntervals(0), lastInterval(0), unhealthyRelocations(0), rawProcessingUnhealthy( new AsyncVar<bool>(false) ) {}
|
suppressIntervals(0), lastInterval(0), unhealthyRelocations(0), rawProcessingUnhealthy( new AsyncVar<bool>(false) ) {}
|
||||||
|
|
||||||
void validate() {
|
void validate() {
|
||||||
|
@ -506,7 +505,7 @@ struct DDQueueData {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR Future<Void> getSourceServersForRange( Database cx, MasterInterface mi, RelocateData input, PromiseStream<RelocateData> output ) {
|
ACTOR Future<Void> getSourceServersForRange( Database cx, RelocateData input, PromiseStream<RelocateData> output ) {
|
||||||
state std::set<UID> servers;
|
state std::set<UID> servers;
|
||||||
state Transaction tr(cx);
|
state Transaction tr(cx);
|
||||||
|
|
||||||
|
@ -637,14 +636,14 @@ struct DDQueueData {
|
||||||
rrs.keys = affectedQueuedItems[r];
|
rrs.keys = affectedQueuedItems[r];
|
||||||
|
|
||||||
rrs.interval = TraceInterval("QueuedRelocation");
|
rrs.interval = TraceInterval("QueuedRelocation");
|
||||||
/*TraceEvent(rrs.interval.begin(), mi.id());
|
/*TraceEvent(rrs.interval.begin(), distributorId);
|
||||||
.detail("KeyBegin", printable(rrs.keys.begin)).detail("KeyEnd", printable(rrs.keys.end))
|
.detail("KeyBegin", printable(rrs.keys.begin)).detail("KeyEnd", printable(rrs.keys.end))
|
||||||
.detail("Priority", rrs.priority).detail("WantsNewServers", rrs.wantsNewServers);*/
|
.detail("Priority", rrs.priority).detail("WantsNewServers", rrs.wantsNewServers);*/
|
||||||
queuedRelocations++;
|
queuedRelocations++;
|
||||||
startRelocation(rrs.priority);
|
startRelocation(rrs.priority);
|
||||||
|
|
||||||
fetchingSourcesQueue.insert( rrs );
|
fetchingSourcesQueue.insert( rrs );
|
||||||
getSourceActors.insert( rrs.keys, getSourceServersForRange( cx, mi, rrs, fetchSourceServersComplete ) );
|
getSourceActors.insert( rrs.keys, getSourceServersForRange( cx, rrs, fetchSourceServersComplete ) );
|
||||||
} else {
|
} else {
|
||||||
RelocateData newData( rrs );
|
RelocateData newData( rrs );
|
||||||
newData.keys = affectedQueuedItems[r];
|
newData.keys = affectedQueuedItems[r];
|
||||||
|
@ -657,7 +656,7 @@ struct DDQueueData {
|
||||||
if( serverQueue.erase(rrs) > 0 ) {
|
if( serverQueue.erase(rrs) > 0 ) {
|
||||||
if( !foundActiveRelocation ) {
|
if( !foundActiveRelocation ) {
|
||||||
newData.interval = TraceInterval("QueuedRelocation");
|
newData.interval = TraceInterval("QueuedRelocation");
|
||||||
/*TraceEvent(newData.interval.begin(), mi.id());
|
/*TraceEvent(newData.interval.begin(), distributorId);
|
||||||
.detail("KeyBegin", printable(newData.keys.begin)).detail("KeyEnd", printable(newData.keys.end))
|
.detail("KeyBegin", printable(newData.keys.begin)).detail("KeyEnd", printable(newData.keys.end))
|
||||||
.detail("Priority", newData.priority).detail("WantsNewServers", newData.wantsNewServers);*/
|
.detail("Priority", newData.priority).detail("WantsNewServers", newData.wantsNewServers);*/
|
||||||
queuedRelocations++;
|
queuedRelocations++;
|
||||||
|
@ -677,14 +676,14 @@ struct DDQueueData {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*TraceEvent("ReceivedRelocateShard", mi.id())
|
/*TraceEvent("ReceivedRelocateShard", distributorId)
|
||||||
.detail("KeyBegin", printable(rd.keys.begin))
|
.detail("KeyBegin", printable(rd.keys.begin))
|
||||||
.detail("KeyEnd", printable(rd.keys.end))
|
.detail("KeyEnd", printable(rd.keys.end))
|
||||||
.detail("Priority", rd.priority)
|
.detail("Priority", rd.priority)
|
||||||
.detail("AffectedRanges", affectedQueuedItems.size()); */
|
.detail("AffectedRanges", affectedQueuedItems.size()); */
|
||||||
}
|
}
|
||||||
|
|
||||||
void completeSourceFetch( RelocateData results ) {
|
void completeSourceFetch( const RelocateData& results ) {
|
||||||
ASSERT( fetchingSourcesQueue.count( results ) );
|
ASSERT( fetchingSourcesQueue.count( results ) );
|
||||||
|
|
||||||
//logRelocation( results, "GotSourceServers" );
|
//logRelocation( results, "GotSourceServers" );
|
||||||
|
@ -696,12 +695,12 @@ struct DDQueueData {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void logRelocation( RelocateData rd, const char *title ) {
|
void logRelocation( const RelocateData& rd, const char *title ) {
|
||||||
std::string busyString;
|
std::string busyString;
|
||||||
for(int i = 0; i < rd.src.size() && i < teamSize * 2; i++)
|
for(int i = 0; i < rd.src.size() && i < teamSize * 2; i++)
|
||||||
busyString += describe(rd.src[i]) + " - (" + busymap[ rd.src[i] ].toString() + "); ";
|
busyString += describe(rd.src[i]) + " - (" + busymap[ rd.src[i] ].toString() + "); ";
|
||||||
|
|
||||||
TraceEvent(title, mi.id())
|
TraceEvent(title, distributorId)
|
||||||
.detail("KeyBegin", printable(rd.keys.begin))
|
.detail("KeyBegin", printable(rd.keys.begin))
|
||||||
.detail("KeyEnd", printable(rd.keys.end))
|
.detail("KeyEnd", printable(rd.keys.end))
|
||||||
.detail("Priority", rd.priority)
|
.detail("Priority", rd.priority)
|
||||||
|
@ -759,7 +758,7 @@ struct DDQueueData {
|
||||||
!rd.keys.contains( it->range() ) &&
|
!rd.keys.contains( it->range() ) &&
|
||||||
it->value().priority >= rd.priority &&
|
it->value().priority >= rd.priority &&
|
||||||
rd.priority < PRIORITY_TEAM_UNHEALTHY ) {
|
rd.priority < PRIORITY_TEAM_UNHEALTHY ) {
|
||||||
/*TraceEvent("OverlappingInFlight", mi.id())
|
/*TraceEvent("OverlappingInFlight", distributorId)
|
||||||
.detail("KeyBegin", printable(it->value().keys.begin))
|
.detail("KeyBegin", printable(it->value().keys.begin))
|
||||||
.detail("KeyEnd", printable(it->value().keys.end))
|
.detail("KeyEnd", printable(it->value().keys.end))
|
||||||
.detail("Priority", it->value().priority); */
|
.detail("Priority", it->value().priority); */
|
||||||
|
@ -792,7 +791,7 @@ struct DDQueueData {
|
||||||
|
|
||||||
//logRelocation( rd, "LaunchingRelocation" );
|
//logRelocation( rd, "LaunchingRelocation" );
|
||||||
|
|
||||||
//TraceEvent(rd.interval.end(), mi.id()).detail("Result","Success");
|
//TraceEvent(rd.interval.end(), distributorId).detail("Result","Success");
|
||||||
queuedRelocations--;
|
queuedRelocations--;
|
||||||
finishRelocation(rd.priority);
|
finishRelocation(rd.priority);
|
||||||
|
|
||||||
|
@ -832,7 +831,7 @@ struct DDQueueData {
|
||||||
TraceEvent(SevWarnAlways, "LaunchingQueueSlowx1000").detail("Elapsed", now() - startTime );
|
TraceEvent(SevWarnAlways, "LaunchingQueueSlowx1000").detail("Elapsed", now() - startTime );
|
||||||
|
|
||||||
/*if( startedHere > 0 ) {
|
/*if( startedHere > 0 ) {
|
||||||
TraceEvent("StartedDDRelocators", mi.id())
|
TraceEvent("StartedDDRelocators", distributorId)
|
||||||
.detail("QueueSize", queuedRelocations)
|
.detail("QueueSize", queuedRelocations)
|
||||||
.detail("StartedHere", startedHere)
|
.detail("StartedHere", startedHere)
|
||||||
.detail("ActiveRelocations", activeRelocations);
|
.detail("ActiveRelocations", activeRelocations);
|
||||||
|
@ -853,7 +852,7 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
||||||
state PromiseStream<RelocateData> dataTransferComplete( self->dataTransferComplete );
|
state PromiseStream<RelocateData> dataTransferComplete( self->dataTransferComplete );
|
||||||
state PromiseStream<RelocateData> relocationComplete( self->relocationComplete );
|
state PromiseStream<RelocateData> relocationComplete( self->relocationComplete );
|
||||||
state bool signalledTransferComplete = false;
|
state bool signalledTransferComplete = false;
|
||||||
state UID masterId = self->mi.id();
|
state UID distributorId = self->distributorId;
|
||||||
state ParallelTCInfo healthyDestinations;
|
state ParallelTCInfo healthyDestinations;
|
||||||
|
|
||||||
state bool anyHealthy = false;
|
state bool anyHealthy = false;
|
||||||
|
@ -867,7 +866,7 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
||||||
self->suppressIntervals++;
|
self->suppressIntervals++;
|
||||||
}
|
}
|
||||||
|
|
||||||
TraceEvent(relocateShardInterval.begin(), masterId)
|
TraceEvent(relocateShardInterval.begin(), distributorId)
|
||||||
.detail("KeyBegin", printable(rd.keys.begin)).detail("KeyEnd", printable(rd.keys.end))
|
.detail("KeyBegin", printable(rd.keys.begin)).detail("KeyEnd", printable(rd.keys.end))
|
||||||
.detail("Priority", rd.priority).detail("RelocationID", relocateShardInterval.pairID).detail("SuppressedEventCount", self->suppressIntervals);
|
.detail("Priority", rd.priority).detail("RelocationID", relocateShardInterval.pairID).detail("SuppressedEventCount", self->suppressIntervals);
|
||||||
|
|
||||||
|
@ -928,7 +927,7 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
||||||
|
|
||||||
TEST(true); //did not find a healthy destination team on the first attempt
|
TEST(true); //did not find a healthy destination team on the first attempt
|
||||||
stuckCount++;
|
stuckCount++;
|
||||||
TraceEvent(stuckCount > 50 ? SevWarnAlways : SevWarn, "BestTeamStuck", masterId)
|
TraceEvent(stuckCount > 50 ? SevWarnAlways : SevWarn, "BestTeamStuck", distributorId)
|
||||||
.suppressFor(1.0)
|
.suppressFor(1.0)
|
||||||
.detail("Count", stuckCount)
|
.detail("Count", stuckCount)
|
||||||
.detail("TeamCollectionId", tciIndex)
|
.detail("TeamCollectionId", tciIndex)
|
||||||
|
@ -981,14 +980,14 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
||||||
//FIXME: do not add data in flight to servers that were already in the src.
|
//FIXME: do not add data in flight to servers that were already in the src.
|
||||||
healthyDestinations.addDataInFlightToTeam(+metrics.bytes);
|
healthyDestinations.addDataInFlightToTeam(+metrics.bytes);
|
||||||
|
|
||||||
TraceEvent(relocateShardInterval.severity, "RelocateShardHasDestination", masterId)
|
TraceEvent(relocateShardInterval.severity, "RelocateShardHasDestination", distributorId)
|
||||||
.detail("PairId", relocateShardInterval.pairID)
|
.detail("PairId", relocateShardInterval.pairID)
|
||||||
.detail("DestinationTeam", describe(destIds))
|
.detail("DestinationTeam", describe(destIds))
|
||||||
.detail("ExtraIds", describe(extraIds));
|
.detail("ExtraIds", describe(extraIds));
|
||||||
|
|
||||||
state Error error = success();
|
state Error error = success();
|
||||||
state Promise<Void> dataMovementComplete;
|
state Promise<Void> dataMovementComplete;
|
||||||
state Future<Void> doMoveKeys = moveKeys(self->cx, rd.keys, destIds, healthyIds, self->lock, dataMovementComplete, &self->startMoveKeysParallelismLock, &self->finishMoveKeysParallelismLock, self->recoveryVersion, self->teamCollections.size() > 1, relocateShardInterval.pairID );
|
state Future<Void> doMoveKeys = moveKeys(self->cx, rd.keys, destIds, healthyIds, self->lock, dataMovementComplete, &self->startMoveKeysParallelismLock, &self->finishMoveKeysParallelismLock, self->teamCollections.size() > 1, relocateShardInterval.pairID );
|
||||||
state Future<Void> pollHealth = signalledTransferComplete ? Never() : delay( SERVER_KNOBS->HEALTH_POLL_TIME, TaskDataDistributionLaunch );
|
state Future<Void> pollHealth = signalledTransferComplete ? Never() : delay( SERVER_KNOBS->HEALTH_POLL_TIME, TaskDataDistributionLaunch );
|
||||||
try {
|
try {
|
||||||
loop {
|
loop {
|
||||||
|
@ -999,7 +998,7 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
||||||
healthyIds.insert(healthyIds.end(), extraIds.begin(), extraIds.end());
|
healthyIds.insert(healthyIds.end(), extraIds.begin(), extraIds.end());
|
||||||
extraIds.clear();
|
extraIds.clear();
|
||||||
ASSERT(totalIds == destIds.size()); // Sanity check the destIDs before we move keys
|
ASSERT(totalIds == destIds.size()); // Sanity check the destIDs before we move keys
|
||||||
doMoveKeys = moveKeys(self->cx, rd.keys, destIds, healthyIds, self->lock, Promise<Void>(), &self->startMoveKeysParallelismLock, &self->finishMoveKeysParallelismLock, self->recoveryVersion, self->teamCollections.size() > 1, relocateShardInterval.pairID );
|
doMoveKeys = moveKeys(self->cx, rd.keys, destIds, healthyIds, self->lock, Promise<Void>(), &self->startMoveKeysParallelismLock, &self->finishMoveKeysParallelismLock, self->teamCollections.size() > 1, relocateShardInterval.pairID );
|
||||||
} else {
|
} else {
|
||||||
self->fetchKeysComplete.insert( rd );
|
self->fetchKeysComplete.insert( rd );
|
||||||
break;
|
break;
|
||||||
|
@ -1027,7 +1026,7 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
||||||
error = e;
|
error = e;
|
||||||
}
|
}
|
||||||
|
|
||||||
//TraceEvent("RelocateShardFinished", masterId).detail("RelocateId", relocateShardInterval.pairID);
|
//TraceEvent("RelocateShardFinished", distributorId).detail("RelocateId", relocateShardInterval.pairID);
|
||||||
|
|
||||||
if( error.code() != error_code_move_to_removed_server ) {
|
if( error.code() != error_code_move_to_removed_server ) {
|
||||||
if( !error.code() ) {
|
if( !error.code() ) {
|
||||||
|
@ -1042,7 +1041,7 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
||||||
|
|
||||||
// onFinished.send( rs );
|
// onFinished.send( rs );
|
||||||
if( !error.code() ) {
|
if( !error.code() ) {
|
||||||
TraceEvent(relocateShardInterval.end(), masterId).detail("Result","Success");
|
TraceEvent(relocateShardInterval.end(), distributorId).detail("Result","Success");
|
||||||
if(rd.keys.begin == keyServersPrefix) {
|
if(rd.keys.begin == keyServersPrefix) {
|
||||||
TraceEvent("MovedKeyServerKeys").detail("Dest", describe(destIds)).trackLatest("MovedKeyServers");
|
TraceEvent("MovedKeyServerKeys").detail("Dest", describe(destIds)).trackLatest("MovedKeyServers");
|
||||||
}
|
}
|
||||||
|
@ -1066,7 +1065,7 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
TraceEvent(relocateShardInterval.end(), masterId).error(e, true);
|
TraceEvent(relocateShardInterval.end(), distributorId).error(e, true);
|
||||||
if( !signalledTransferComplete )
|
if( !signalledTransferComplete )
|
||||||
dataTransferComplete.send( rd );
|
dataTransferComplete.send( rd );
|
||||||
|
|
||||||
|
@ -1100,7 +1099,7 @@ ACTOR Future<bool> rebalanceTeams( DDQueueData* self, int priority, Reference<ID
|
||||||
std::vector<KeyRange> shards = self->shardsAffectedByTeamFailure->getShardsFor( ShardsAffectedByTeamFailure::Team( sourceTeam->getServerIDs(), primary ) );
|
std::vector<KeyRange> shards = self->shardsAffectedByTeamFailure->getShardsFor( ShardsAffectedByTeamFailure::Team( sourceTeam->getServerIDs(), primary ) );
|
||||||
for( int i = 0; i < shards.size(); i++ ) {
|
for( int i = 0; i < shards.size(); i++ ) {
|
||||||
if( moveShard == shards[i] ) {
|
if( moveShard == shards[i] ) {
|
||||||
TraceEvent(priority == PRIORITY_REBALANCE_OVERUTILIZED_TEAM ? "BgDDMountainChopper" : "BgDDValleyFiller", self->mi.id())
|
TraceEvent(priority == PRIORITY_REBALANCE_OVERUTILIZED_TEAM ? "BgDDMountainChopper" : "BgDDValleyFiller", self->distributorId)
|
||||||
.detail("SourceBytes", sourceBytes)
|
.detail("SourceBytes", sourceBytes)
|
||||||
.detail("DestBytes", destBytes)
|
.detail("DestBytes", destBytes)
|
||||||
.detail("ShardBytes", metrics.bytes)
|
.detail("ShardBytes", metrics.bytes)
|
||||||
|
@ -1195,12 +1194,11 @@ ACTOR Future<Void> dataDistributionQueue(
|
||||||
Reference<ShardsAffectedByTeamFailure> shardsAffectedByTeamFailure,
|
Reference<ShardsAffectedByTeamFailure> shardsAffectedByTeamFailure,
|
||||||
MoveKeysLock lock,
|
MoveKeysLock lock,
|
||||||
PromiseStream<Promise<int64_t>> getAverageShardBytes,
|
PromiseStream<Promise<int64_t>> getAverageShardBytes,
|
||||||
MasterInterface mi,
|
UID distributorId,
|
||||||
int teamSize,
|
int teamSize,
|
||||||
double* lastLimited,
|
double* lastLimited)
|
||||||
Version recoveryVersion)
|
|
||||||
{
|
{
|
||||||
state DDQueueData self( mi, lock, cx, teamCollections, shardsAffectedByTeamFailure, getAverageShardBytes, teamSize, output, input, getShardMetrics, lastLimited, recoveryVersion );
|
state DDQueueData self( distributorId, lock, cx, teamCollections, shardsAffectedByTeamFailure, getAverageShardBytes, teamSize, output, input, getShardMetrics, lastLimited );
|
||||||
state std::set<UID> serversToLaunchFrom;
|
state std::set<UID> serversToLaunchFrom;
|
||||||
state KeyRange keysToLaunchFrom;
|
state KeyRange keysToLaunchFrom;
|
||||||
state RelocateData launchData;
|
state RelocateData launchData;
|
||||||
|
@ -1286,7 +1284,7 @@ ACTOR Future<Void> dataDistributionQueue(
|
||||||
highPriorityRelocations += it->second;
|
highPriorityRelocations += it->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
TraceEvent("MovingData", mi.id())
|
TraceEvent("MovingData", distributorId)
|
||||||
.detail( "InFlight", self.activeRelocations )
|
.detail( "InFlight", self.activeRelocations )
|
||||||
.detail( "InQueue", self.queuedRelocations )
|
.detail( "InQueue", self.queuedRelocations )
|
||||||
.detail( "AverageShardSize", req.getFuture().isReady() ? req.getFuture().get() : -1 )
|
.detail( "AverageShardSize", req.getFuture().isReady() ? req.getFuture().get() : -1 )
|
||||||
|
@ -1303,7 +1301,7 @@ ACTOR Future<Void> dataDistributionQueue(
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
if (e.code() != error_code_broken_promise && // FIXME: Get rid of these broken_promise errors every time we are killed by the master dying
|
if (e.code() != error_code_broken_promise && // FIXME: Get rid of these broken_promise errors every time we are killed by the master dying
|
||||||
e.code() != error_code_movekeys_conflict)
|
e.code() != error_code_movekeys_conflict)
|
||||||
TraceEvent(SevError, "DataDistributionQueueError", mi.id()).error(e);
|
TraceEvent(SevError, "DataDistributionQueueError", distributorId).error(e);
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,7 +64,7 @@ struct ShardTrackedData {
|
||||||
|
|
||||||
struct DataDistributionTracker {
|
struct DataDistributionTracker {
|
||||||
Database cx;
|
Database cx;
|
||||||
UID masterId;
|
UID distributorId;
|
||||||
KeyRangeMap< ShardTrackedData > shards;
|
KeyRangeMap< ShardTrackedData > shards;
|
||||||
ActorCollection sizeChanges;
|
ActorCollection sizeChanges;
|
||||||
|
|
||||||
|
@ -79,8 +79,8 @@ struct DataDistributionTracker {
|
||||||
Promise<Void> readyToStart;
|
Promise<Void> readyToStart;
|
||||||
Reference<AsyncVar<bool>> anyZeroHealthyTeams;
|
Reference<AsyncVar<bool>> anyZeroHealthyTeams;
|
||||||
|
|
||||||
DataDistributionTracker(Database cx, UID masterId, Promise<Void> const& readyToStart, PromiseStream<RelocateShard> const& output, Reference<ShardsAffectedByTeamFailure> shardsAffectedByTeamFailure, Reference<AsyncVar<bool>> anyZeroHealthyTeams)
|
DataDistributionTracker(Database cx, UID distributorId, Promise<Void> const& readyToStart, PromiseStream<RelocateShard> const& output, Reference<ShardsAffectedByTeamFailure> shardsAffectedByTeamFailure, Reference<AsyncVar<bool>> anyZeroHealthyTeams)
|
||||||
: cx(cx), masterId( masterId ), dbSizeEstimate( new AsyncVar<int64_t>() ),
|
: cx(cx), distributorId( distributorId ), dbSizeEstimate( new AsyncVar<int64_t>() ),
|
||||||
maxShardSize( new AsyncVar<Optional<int64_t>>() ),
|
maxShardSize( new AsyncVar<Optional<int64_t>>() ),
|
||||||
sizeChanges(false), readyToStart(readyToStart), output( output ), shardsAffectedByTeamFailure(shardsAffectedByTeamFailure), anyZeroHealthyTeams(anyZeroHealthyTeams) {}
|
sizeChanges(false), readyToStart(readyToStart), output( output ), shardsAffectedByTeamFailure(shardsAffectedByTeamFailure), anyZeroHealthyTeams(anyZeroHealthyTeams) {}
|
||||||
|
|
||||||
|
@ -328,7 +328,7 @@ ACTOR Future<Void> shardSplitter(
|
||||||
int numShards = splitKeys.size() - 1;
|
int numShards = splitKeys.size() - 1;
|
||||||
|
|
||||||
if( g_random->random01() < 0.01 ) {
|
if( g_random->random01() < 0.01 ) {
|
||||||
TraceEvent("RelocateShardStartSplitx100", self->masterId)
|
TraceEvent("RelocateShardStartSplitx100", self->distributorId)
|
||||||
.detail("Begin", printable(keys.begin))
|
.detail("Begin", printable(keys.begin))
|
||||||
.detail("End", printable(keys.end))
|
.detail("End", printable(keys.end))
|
||||||
.detail("MaxBytes", shardBounds.max.bytes)
|
.detail("MaxBytes", shardBounds.max.bytes)
|
||||||
|
@ -449,7 +449,7 @@ Future<Void> shardMerger(
|
||||||
//restarting shard tracker will derefenced values in the shard map, so make a copy
|
//restarting shard tracker will derefenced values in the shard map, so make a copy
|
||||||
KeyRange mergeRange = merged;
|
KeyRange mergeRange = merged;
|
||||||
|
|
||||||
TraceEvent("RelocateShardMergeMetrics", self->masterId)
|
TraceEvent("RelocateShardMergeMetrics", self->distributorId)
|
||||||
.detail("OldKeys", printable(keys))
|
.detail("OldKeys", printable(keys))
|
||||||
.detail("NewKeys", printable(mergeRange))
|
.detail("NewKeys", printable(mergeRange))
|
||||||
.detail("EndingSize", endingStats.bytes)
|
.detail("EndingSize", endingStats.bytes)
|
||||||
|
@ -495,7 +495,7 @@ ACTOR Future<Void> shardEvaluator(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*TraceEvent("ShardEvaluator", self->masterId)
|
/*TraceEvent("ShardEvaluator", self->distributorId)
|
||||||
.detail("TrackerId", trackerID)
|
.detail("TrackerId", trackerID)
|
||||||
.detail("ShouldSplit", shouldSplit)
|
.detail("ShouldSplit", shouldSplit)
|
||||||
.detail("ShouldMerge", shouldMerge)
|
.detail("ShouldMerge", shouldMerge)
|
||||||
|
@ -531,7 +531,7 @@ ACTOR Future<Void> shardTracker(
|
||||||
// Since maxShardSize will become present for all shards at once, avoid slow tasks with a short delay
|
// Since maxShardSize will become present for all shards at once, avoid slow tasks with a short delay
|
||||||
wait( delay( 0, TaskDataDistribution ) );
|
wait( delay( 0, TaskDataDistribution ) );
|
||||||
|
|
||||||
/*TraceEvent("ShardTracker", self->masterId)
|
/*TraceEvent("ShardTracker", self->distributorId)
|
||||||
.detail("Begin", printable(keys.begin))
|
.detail("Begin", printable(keys.begin))
|
||||||
.detail("End", printable(keys.end))
|
.detail("End", printable(keys.end))
|
||||||
.detail("TrackerID", trackerID)
|
.detail("TrackerID", trackerID)
|
||||||
|
@ -571,7 +571,7 @@ void restartShardTrackers( DataDistributionTracker* self, KeyRangeRef keys, Opti
|
||||||
// we can use the old size if it is available. This will be the case when merging shards.
|
// we can use the old size if it is available. This will be the case when merging shards.
|
||||||
if( startingSize.present() ) {
|
if( startingSize.present() ) {
|
||||||
ASSERT( ranges.size() == 1 );
|
ASSERT( ranges.size() == 1 );
|
||||||
/*TraceEvent("ShardTrackerSizePreset", self->masterId)
|
/*TraceEvent("ShardTrackerSizePreset", self->distributorId)
|
||||||
.detail("Keys", printable(keys))
|
.detail("Keys", printable(keys))
|
||||||
.detail("Size", startingSize.get().metrics.bytes)
|
.detail("Size", startingSize.get().metrics.bytes)
|
||||||
.detail("Merges", startingSize.get().merges);*/
|
.detail("Merges", startingSize.get().merges);*/
|
||||||
|
@ -589,7 +589,7 @@ void restartShardTrackers( DataDistributionTracker* self, KeyRangeRef keys, Opti
|
||||||
|
|
||||||
ACTOR Future<Void> trackInitialShards(DataDistributionTracker *self, Reference<InitialDataDistribution> initData)
|
ACTOR Future<Void> trackInitialShards(DataDistributionTracker *self, Reference<InitialDataDistribution> initData)
|
||||||
{
|
{
|
||||||
TraceEvent("TrackInitialShards", self->masterId).detail("InitialShardCount", initData->shards.size());
|
TraceEvent("TrackInitialShards", self->distributorId).detail("InitialShardCount", initData->shards.size());
|
||||||
|
|
||||||
//This line reduces the priority of shard initialization to prevent interference with failure monitoring.
|
//This line reduces the priority of shard initialization to prevent interference with failure monitoring.
|
||||||
//SOMEDAY: Figure out what this priority should actually be
|
//SOMEDAY: Figure out what this priority should actually be
|
||||||
|
@ -659,9 +659,9 @@ ACTOR Future<Void> dataDistributionTracker(
|
||||||
FutureStream<Promise<int64_t>> getAverageShardBytes,
|
FutureStream<Promise<int64_t>> getAverageShardBytes,
|
||||||
Promise<Void> readyToStart,
|
Promise<Void> readyToStart,
|
||||||
Reference<AsyncVar<bool>> anyZeroHealthyTeams,
|
Reference<AsyncVar<bool>> anyZeroHealthyTeams,
|
||||||
UID masterId)
|
UID distributorId)
|
||||||
{
|
{
|
||||||
state DataDistributionTracker self(cx, masterId, readyToStart, output, shardsAffectedByTeamFailure, anyZeroHealthyTeams);
|
state DataDistributionTracker self(cx, distributorId, readyToStart, output, shardsAffectedByTeamFailure, anyZeroHealthyTeams);
|
||||||
state Future<Void> loggingTrigger = Void();
|
state Future<Void> loggingTrigger = Void();
|
||||||
try {
|
try {
|
||||||
wait( trackInitialShards( &self, initData ) );
|
wait( trackInitialShards( &self, initData ) );
|
||||||
|
@ -672,7 +672,7 @@ ACTOR Future<Void> dataDistributionTracker(
|
||||||
req.send( self.maxShardSize->get().get() / 2 );
|
req.send( self.maxShardSize->get().get() / 2 );
|
||||||
}
|
}
|
||||||
when( wait( loggingTrigger ) ) {
|
when( wait( loggingTrigger ) ) {
|
||||||
TraceEvent("DDTrackerStats", self.masterId)
|
TraceEvent("DDTrackerStats", self.distributorId)
|
||||||
.detail("Shards", self.shards.size())
|
.detail("Shards", self.shards.size())
|
||||||
.detail("TotalSizeBytes", self.dbSizeEstimate->get())
|
.detail("TotalSizeBytes", self.dbSizeEstimate->get())
|
||||||
.trackLatest( "DDTrackerStats" );
|
.trackLatest( "DDTrackerStats" );
|
||||||
|
@ -685,7 +685,7 @@ ACTOR Future<Void> dataDistributionTracker(
|
||||||
when( wait( self.sizeChanges.getResult() ) ) {}
|
when( wait( self.sizeChanges.getResult() ) ) {}
|
||||||
}
|
}
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
TraceEvent(SevError, "DataDistributionTrackerError", self.masterId).error(e);
|
TraceEvent(SevError, "DataDistributionTrackerError", self.distributorId).error(e);
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,75 @@
|
||||||
|
/*
|
||||||
|
* DataDistributorInterface.h
|
||||||
|
*
|
||||||
|
* This source file is part of the FoundationDB open source project
|
||||||
|
*
|
||||||
|
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef FDBSERVER_DATADISTRIBUTORINTERFACE_H
|
||||||
|
#define FDBSERVER_DATADISTRIBUTORINTERFACE_H
|
||||||
|
|
||||||
|
#include "fdbrpc/fdbrpc.h"
|
||||||
|
#include "fdbrpc/Locality.h"
|
||||||
|
|
||||||
|
struct DataDistributorInterface {
|
||||||
|
RequestStream<ReplyPromise<Void>> waitFailure;
|
||||||
|
RequestStream<struct GetRateInfoRequest> getRateInfo;
|
||||||
|
struct LocalityData locality;
|
||||||
|
|
||||||
|
DataDistributorInterface() {}
|
||||||
|
explicit DataDistributorInterface(const struct LocalityData& l) : locality(l) {}
|
||||||
|
|
||||||
|
void initEndpoints() {}
|
||||||
|
UID id() const { return getRateInfo.getEndpoint().token; }
|
||||||
|
NetworkAddress address() const { return getRateInfo.getEndpoint().getPrimaryAddress(); }
|
||||||
|
bool operator== (const DataDistributorInterface& r) const {
|
||||||
|
return id() == r.id();
|
||||||
|
}
|
||||||
|
bool operator!= (const DataDistributorInterface& r) const {
|
||||||
|
return !(*this == r);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Archive>
|
||||||
|
void serialize(Archive& ar) {
|
||||||
|
serializer(ar, waitFailure, getRateInfo, locality);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct GetRateInfoRequest {
|
||||||
|
UID requesterID;
|
||||||
|
int64_t totalReleasedTransactions;
|
||||||
|
ReplyPromise<struct GetRateInfoReply> reply;
|
||||||
|
|
||||||
|
GetRateInfoRequest() {}
|
||||||
|
GetRateInfoRequest( UID const& requesterID, int64_t totalReleasedTransactions ) : requesterID(requesterID), totalReleasedTransactions(totalReleasedTransactions) {}
|
||||||
|
|
||||||
|
template <class Ar>
|
||||||
|
void serialize(Ar& ar) {
|
||||||
|
serializer(ar, requesterID, totalReleasedTransactions, reply);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct GetRateInfoReply {
|
||||||
|
double transactionRate;
|
||||||
|
double leaseDuration;
|
||||||
|
|
||||||
|
template <class Ar>
|
||||||
|
void serialize(Ar& ar) {
|
||||||
|
serializer(ar, transactionRate, leaseDuration);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif //FDBSERVER_DATADISTRIBUTORINTERFACE_H
|
|
@ -22,6 +22,7 @@
|
||||||
#include "fdbrpc/IAsyncFile.h"
|
#include "fdbrpc/IAsyncFile.h"
|
||||||
#include "fdbserver/Knobs.h"
|
#include "fdbserver/Knobs.h"
|
||||||
#include "fdbrpc/simulator.h"
|
#include "fdbrpc/simulator.h"
|
||||||
|
#include "flow/genericactors.actor.h"
|
||||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||||
|
|
||||||
typedef bool(*compare_pages)(void*,void*);
|
typedef bool(*compare_pages)(void*,void*);
|
||||||
|
@ -109,7 +110,36 @@ private:
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class RawDiskQueue_TwoFiles {
|
// We use a Tracked instead of a Reference when the shutdown/destructor code would need to wait().
|
||||||
|
template <typename T>
|
||||||
|
class Tracked {
|
||||||
|
protected:
|
||||||
|
struct TrackMe : NonCopyable {
|
||||||
|
T* self;
|
||||||
|
explicit TrackMe( T* self ) : self(self) {
|
||||||
|
self->actorCount++;
|
||||||
|
if (self->actorCount == 1) self->actorCountIsZero.set(false);
|
||||||
|
}
|
||||||
|
~TrackMe() {
|
||||||
|
self->actorCount--;
|
||||||
|
if (self->actorCount == 0) self->actorCountIsZero.set(true);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Future<Void> onSafeToDestruct() {
|
||||||
|
if (actorCountIsZero.get()) {
|
||||||
|
return Void();
|
||||||
|
} else {
|
||||||
|
return actorCountIsZero.onChange();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
int actorCount = 0;
|
||||||
|
AsyncVar<bool> actorCountIsZero = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
class RawDiskQueue_TwoFiles : public Tracked<RawDiskQueue_TwoFiles> {
|
||||||
public:
|
public:
|
||||||
RawDiskQueue_TwoFiles( std::string basename, std::string fileExtension, UID dbgid, int64_t fileSizeWarningLimit )
|
RawDiskQueue_TwoFiles( std::string basename, std::string fileExtension, UID dbgid, int64_t fileSizeWarningLimit )
|
||||||
: basename(basename), fileExtension(fileExtension), onError(delayed(error.getFuture())), onStopped(stopped.getFuture()),
|
: basename(basename), fileExtension(fileExtension), onError(delayed(error.getFuture())), onStopped(stopped.getFuture()),
|
||||||
|
@ -121,6 +151,13 @@ public:
|
||||||
fileExtensionBytes = 8<<10;
|
fileExtensionBytes = 8<<10;
|
||||||
files[0].dbgFilename = filename(0);
|
files[0].dbgFilename = filename(0);
|
||||||
files[1].dbgFilename = filename(1);
|
files[1].dbgFilename = filename(1);
|
||||||
|
// We issue reads into firstPages, so it needs to be 4k aligned.
|
||||||
|
firstPages.reserve(firstPages.arena(), 2);
|
||||||
|
void* pageMemory = operator new (sizeof(Page) * 3, firstPages.arena());
|
||||||
|
firstPages[0] = (Page*)((((uintptr_t)pageMemory + 4095) / 4096) * 4096);
|
||||||
|
memset(firstPages[0], 0, sizeof(Page));
|
||||||
|
firstPages[1] = (Page*)((uintptr_t)firstPages[0] + 4096);
|
||||||
|
memset(firstPages[1], 0, sizeof(Page));
|
||||||
stallCount.init(LiteralStringRef("RawDiskQueue.StallCount"));
|
stallCount.init(LiteralStringRef("RawDiskQueue.StallCount"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -143,6 +180,8 @@ public:
|
||||||
|
|
||||||
Future<Void> setPoppedPage( int file, int64_t page, int64_t debugSeq ) { return setPoppedPage(this, file, page, debugSeq); }
|
Future<Void> setPoppedPage( int file, int64_t page, int64_t debugSeq ) { return setPoppedPage(this, file, page, debugSeq); }
|
||||||
|
|
||||||
|
// FIXME: let the caller pass in where to write the data.
|
||||||
|
Future<Standalone<StringRef>> read(int file, int page, int nPages) { return read(this, file, page, nPages); }
|
||||||
Future<Standalone<StringRef>> readNextPage() { return readNextPage(this); }
|
Future<Standalone<StringRef>> readNextPage() { return readNextPage(this); }
|
||||||
Future<Void> truncateBeforeLastReadPage() { return truncateBeforeLastReadPage(this); }
|
Future<Void> truncateBeforeLastReadPage() { return truncateBeforeLastReadPage(this); }
|
||||||
|
|
||||||
|
@ -178,6 +217,7 @@ public:
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
File files[2]; // After readFirstAndLastPages(), files[0] is logically before files[1] (pushes are always into files[1])
|
File files[2]; // After readFirstAndLastPages(), files[0] is logically before files[1] (pushes are always into files[1])
|
||||||
|
Standalone<VectorRef<Page*>> firstPages;
|
||||||
|
|
||||||
std::string basename;
|
std::string basename;
|
||||||
std::string fileExtension;
|
std::string fileExtension;
|
||||||
|
@ -202,20 +242,8 @@ public:
|
||||||
|
|
||||||
int64_t fileExtensionBytes;
|
int64_t fileExtensionBytes;
|
||||||
|
|
||||||
AsyncMap<bool,int> recoveryActorCount;
|
|
||||||
|
|
||||||
Int64MetricHandle stallCount;
|
Int64MetricHandle stallCount;
|
||||||
|
|
||||||
struct TrackMe : NonCopyable {
|
|
||||||
RawDiskQueue_TwoFiles* self;
|
|
||||||
TrackMe( RawDiskQueue_TwoFiles* self ) : self(self) {
|
|
||||||
self->recoveryActorCount.set(false, self->recoveryActorCount.get(false)+1);
|
|
||||||
}
|
|
||||||
~TrackMe() {
|
|
||||||
self->recoveryActorCount.set(false, self->recoveryActorCount.get(false)-1);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Future<Void> truncateFile(int file, int64_t pos) { return truncateFile(this, file, pos); }
|
Future<Void> truncateFile(int file, int64_t pos) { return truncateFile(this, file, pos); }
|
||||||
|
|
||||||
Future<Void> push(StringRef pageData, vector<Reference<SyncQueue>>& toSync) {
|
Future<Void> push(StringRef pageData, vector<Reference<SyncQueue>>& toSync) {
|
||||||
|
@ -243,6 +271,7 @@ public:
|
||||||
|
|
||||||
dbg_file0BeginSeq += files[0].size;
|
dbg_file0BeginSeq += files[0].size;
|
||||||
std::swap(files[0], files[1]);
|
std::swap(files[0], files[1]);
|
||||||
|
std::swap(firstPages[0], firstPages[1]);
|
||||||
files[1].popped = 0;
|
files[1].popped = 0;
|
||||||
writingPos = 0;
|
writingPos = 0;
|
||||||
} else {
|
} else {
|
||||||
|
@ -259,6 +288,10 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (writingPos == 0) {
|
||||||
|
*firstPages[1] = *(const Page*)pageData.begin();
|
||||||
|
}
|
||||||
|
|
||||||
/*TraceEvent("RDQWrite", this->dbgid).detail("File1name", files[1].dbgFilename).detail("File1size", files[1].size)
|
/*TraceEvent("RDQWrite", this->dbgid).detail("File1name", files[1].dbgFilename).detail("File1size", files[1].size)
|
||||||
.detail("WritingPos", writingPos).detail("WritingBytes", pageData.size());*/
|
.detail("WritingPos", writingPos).detail("WritingBytes", pageData.size());*/
|
||||||
files[1].size = std::max( files[1].size, writingPos + pageData.size() );
|
files[1].size = std::max( files[1].size, writingPos + pageData.size() );
|
||||||
|
@ -299,7 +332,9 @@ public:
|
||||||
TEST(2==syncFiles.size()); // push spans both files
|
TEST(2==syncFiles.size()); // push spans both files
|
||||||
wait( pushed );
|
wait( pushed );
|
||||||
|
|
||||||
delete pageMem;
|
if (!g_network->isSimulated()) {
|
||||||
|
delete pageMem;
|
||||||
|
}
|
||||||
pageMem = 0;
|
pageMem = 0;
|
||||||
|
|
||||||
Future<Void> sync = syncFiles[0]->onSync();
|
Future<Void> sync = syncFiles[0]->onSync();
|
||||||
|
@ -320,7 +355,9 @@ public:
|
||||||
|
|
||||||
committed.send(Void());
|
committed.send(Void());
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
delete pageMem;
|
if (!g_network->isSimulated()) {
|
||||||
|
delete pageMem;
|
||||||
|
}
|
||||||
TEST(true); // push error
|
TEST(true); // push error
|
||||||
TEST(2==syncFiles.size()); // push spanning both files error
|
TEST(2==syncFiles.size()); // push spanning both files error
|
||||||
TraceEvent(SevError, "RDQPushAndCommitError", dbgid).error(e, true).detail("InitialFilename0", filename);
|
TraceEvent(SevError, "RDQPushAndCommitError", dbgid).error(e, true).detail("InitialFilename0", filename);
|
||||||
|
@ -411,8 +448,7 @@ public:
|
||||||
state Error error = success();
|
state Error error = success();
|
||||||
try {
|
try {
|
||||||
wait(success(errorOr(self->lastCommit)));
|
wait(success(errorOr(self->lastCommit)));
|
||||||
while (self->recoveryActorCount.get(false))
|
wait( self->onSafeToDestruct() );
|
||||||
wait( self->recoveryActorCount.onChange(false) );
|
|
||||||
|
|
||||||
for(int i=0; i<2; i++)
|
for(int i=0; i<2; i++)
|
||||||
self->files[i].f.clear();
|
self->files[i].f.clear();
|
||||||
|
@ -443,12 +479,8 @@ public:
|
||||||
|
|
||||||
ACTOR static UNCANCELLABLE Future<Standalone<StringRef>> readFirstAndLastPages(RawDiskQueue_TwoFiles* self, compare_pages compare) {
|
ACTOR static UNCANCELLABLE Future<Standalone<StringRef>> readFirstAndLastPages(RawDiskQueue_TwoFiles* self, compare_pages compare) {
|
||||||
state TrackMe trackMe(self);
|
state TrackMe trackMe(self);
|
||||||
state StringBuffer result( self->dbgid );
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
result.alignReserve( sizeof(Page), sizeof(Page)*3 );
|
|
||||||
state Page* firstPage = (Page*)result.append(sizeof(Page)*3);
|
|
||||||
|
|
||||||
// Open both files or create both files
|
// Open both files or create both files
|
||||||
wait( openFiles(self) );
|
wait( openFiles(self) );
|
||||||
|
|
||||||
|
@ -464,20 +496,19 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read the first pages
|
// Read the first pages
|
||||||
memset(firstPage, 0, sizeof(Page)*2);
|
|
||||||
vector<Future<int>> reads;
|
vector<Future<int>> reads;
|
||||||
for(int i=0; i<2; i++)
|
for(int i=0; i<2; i++)
|
||||||
if( self->files[i].size > 0)
|
if( self->files[i].size > 0)
|
||||||
reads.push_back( self->files[i].f->read( &firstPage[i], sizeof(Page), 0 ) );
|
reads.push_back( self->files[i].f->read( self->firstPages[i], sizeof(Page), 0 ) );
|
||||||
wait( waitForAll(reads) );
|
wait( waitForAll(reads) );
|
||||||
|
|
||||||
// Determine which file comes first
|
// Determine which file comes first
|
||||||
if ( compare( &firstPage[1], &firstPage[0] ) ) {
|
if ( compare( self->firstPages[1], self->firstPages[0] ) ) {
|
||||||
std::swap( firstPage[0], firstPage[1] );
|
std::swap( self->firstPages[0], self->firstPages[1] );
|
||||||
std::swap( self->files[0], self->files[1] );
|
std::swap( self->files[0], self->files[1] );
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( !compare( &firstPage[1], &firstPage[1] ) ) {
|
if ( !compare( self->firstPages[1], self->firstPages[1] ) ) {
|
||||||
// Both files are invalid... the queue is empty!
|
// Both files are invalid... the queue is empty!
|
||||||
// Begin pushing at the beginning of files[1]
|
// Begin pushing at the beginning of files[1]
|
||||||
|
|
||||||
|
@ -498,12 +529,13 @@ public:
|
||||||
return Standalone<StringRef>();
|
return Standalone<StringRef>();
|
||||||
}
|
}
|
||||||
|
|
||||||
// A page in files[1] is "valid" iff compare(&firstPage[1], page)
|
// A page in files[1] is "valid" iff compare(self->firstPages[1], page)
|
||||||
// Binary search to find a page in files[1] that is "valid" but the next page is not valid
|
// Binary search to find a page in files[1] that is "valid" but the next page is not valid
|
||||||
// Invariant: the page at begin is valid, and the page at end is invalid
|
// Invariant: the page at begin is valid, and the page at end is invalid
|
||||||
state int64_t begin = 0;
|
state int64_t begin = 0;
|
||||||
state int64_t end = self->files[1].size/sizeof(Page);
|
state int64_t end = self->files[1].size/sizeof(Page);
|
||||||
state Page *middlePage = &firstPage[2];
|
state Standalone<StringRef> middlePageAllocation = makeAlignedString(sizeof(Page), sizeof(Page));
|
||||||
|
state Page *middlePage = (Page*)middlePageAllocation.begin();
|
||||||
while ( begin + 1 != end ) {
|
while ( begin + 1 != end ) {
|
||||||
state int64_t middle = (begin+end)/2;
|
state int64_t middle = (begin+end)/2;
|
||||||
ASSERT( middle > begin && middle < end ); // So the loop always changes begin or end
|
ASSERT( middle > begin && middle < end ); // So the loop always changes begin or end
|
||||||
|
@ -511,7 +543,7 @@ public:
|
||||||
int len = wait( self->files[1].f->read( middlePage, sizeof(Page), middle*sizeof(Page) ) );
|
int len = wait( self->files[1].f->read( middlePage, sizeof(Page), middle*sizeof(Page) ) );
|
||||||
ASSERT( len == sizeof(Page) );
|
ASSERT( len == sizeof(Page) );
|
||||||
|
|
||||||
bool middleValid = compare( &firstPage[1], middlePage );
|
bool middleValid = compare( self->firstPages[1], middlePage );
|
||||||
|
|
||||||
TraceEvent("RDQBS", self->dbgid).detail("Begin", begin).detail("End", end).detail("Middle", middle).detail("Valid", middleValid).detail("File0Name", self->files[0].dbgFilename);
|
TraceEvent("RDQBS", self->dbgid).detail("Begin", begin).detail("End", end).detail("Middle", middle).detail("Valid", middleValid).detail("File0Name", self->files[0].dbgFilename);
|
||||||
|
|
||||||
|
@ -522,16 +554,16 @@ public:
|
||||||
}
|
}
|
||||||
// Now by the invariant and the loop condition, begin is a valid page and begin+1 is an invalid page
|
// Now by the invariant and the loop condition, begin is a valid page and begin+1 is an invalid page
|
||||||
// Check that begin+1 is invalid
|
// Check that begin+1 is invalid
|
||||||
int len = wait( self->files[1].f->read( &firstPage[2], sizeof(Page), (begin+1)*sizeof(Page) ) );
|
int len1 = wait( self->files[1].f->read( middlePage, sizeof(Page), (begin+1)*sizeof(Page) ) );
|
||||||
ASSERT( !(len == sizeof(Page) && compare( &firstPage[1], &firstPage[2] )) );
|
ASSERT( !(len1 == sizeof(Page) && compare( self->firstPages[1], middlePage )) );
|
||||||
|
|
||||||
// Read it
|
// Read it
|
||||||
int len = wait( self->files[1].f->read( &firstPage[2], sizeof(Page), begin*sizeof(Page) ) );
|
int len2 = wait( self->files[1].f->read( middlePage, sizeof(Page), begin*sizeof(Page) ) );
|
||||||
ASSERT( len == sizeof(Page) && compare( &firstPage[1], &firstPage[2] ) );
|
ASSERT( len2 == sizeof(Page) && compare( self->firstPages[1], middlePage ) );
|
||||||
|
|
||||||
TraceEvent("RDQEndFound", self->dbgid).detail("File0Name", self->files[0].dbgFilename).detail("Pos", begin).detail("FileSize", self->files[1].size);
|
TraceEvent("RDQEndFound", self->dbgid).detail("File0Name", self->files[0].dbgFilename).detail("Pos", begin).detail("FileSize", self->files[1].size);
|
||||||
|
|
||||||
return result.str;
|
return middlePageAllocation;
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
bool ok = e.code() == error_code_file_not_found;
|
bool ok = e.code() == error_code_file_not_found;
|
||||||
TraceEvent(ok ? SevInfo : SevError, "RDQReadFirstAndLastPagesError", self->dbgid).error(e, true).detail("File0Name", self->files[0].dbgFilename);
|
TraceEvent(ok ? SevInfo : SevError, "RDQReadFirstAndLastPagesError", self->dbgid).error(e, true).detail("File0Name", self->files[0].dbgFilename);
|
||||||
|
@ -540,6 +572,16 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ACTOR static Future<Standalone<StringRef>> read(RawDiskQueue_TwoFiles* self, int file, int pageOffset, int nPages) {
|
||||||
|
state TrackMe trackMe(self);
|
||||||
|
state const size_t bytesRequested = nPages * sizeof(Page);
|
||||||
|
state Standalone<StringRef> result = makeAlignedString(sizeof(Page), bytesRequested);
|
||||||
|
if (file == 1) ASSERT_WE_THINK(pageOffset * sizeof(Page) + bytesRequested <= self->writingPos );
|
||||||
|
int bytesRead = wait( self->files[file].f->read( mutateString(result), bytesRequested, pageOffset*sizeof(Page) ) );
|
||||||
|
ASSERT_WE_THINK(bytesRead == bytesRequested);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
Future<int> fillReadingBuffer() {
|
Future<int> fillReadingBuffer() {
|
||||||
// If we're right at the end of a file...
|
// If we're right at the end of a file...
|
||||||
if ( readingPage*sizeof(Page) >= (size_t)files[readingFile].size ) {
|
if ( readingPage*sizeof(Page) >= (size_t)files[readingFile].size ) {
|
||||||
|
@ -599,6 +641,9 @@ public:
|
||||||
state TrackMe trackMe(self);
|
state TrackMe trackMe(self);
|
||||||
TraceEvent("DQTruncateFile", self->dbgid).detail("File", file).detail("Pos", pos).detail("File0Name", self->files[0].dbgFilename);
|
TraceEvent("DQTruncateFile", self->dbgid).detail("File", file).detail("Pos", pos).detail("File0Name", self->files[0].dbgFilename);
|
||||||
state Reference<IAsyncFile> f = self->files[file].f; // Hold onto a reference in the off-chance that the DQ is removed from underneath us.
|
state Reference<IAsyncFile> f = self->files[file].f; // Hold onto a reference in the off-chance that the DQ is removed from underneath us.
|
||||||
|
if (pos == 0) {
|
||||||
|
memset(self->firstPages[file], 0, _PAGE_SIZE);
|
||||||
|
}
|
||||||
wait( f->zeroRange( pos, self->files[file].size-pos ) );
|
wait( f->zeroRange( pos, self->files[file].size-pos ) );
|
||||||
wait(self->files[file].syncQueue->onSync());
|
wait(self->files[file].syncQueue->onSync());
|
||||||
// We intentionally don't return the f->zero future, so that TrackMe is destructed after f->zero finishes.
|
// We intentionally don't return the f->zero future, so that TrackMe is destructed after f->zero finishes.
|
||||||
|
@ -629,6 +674,7 @@ public:
|
||||||
|
|
||||||
if (swap) {
|
if (swap) {
|
||||||
std::swap(self->files[0], self->files[1]);
|
std::swap(self->files[0], self->files[1]);
|
||||||
|
std::swap(self->firstPages[0], self->firstPages[1]);
|
||||||
self->files[0].popped = self->files[0].size;
|
self->files[0].popped = self->files[0].size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -641,11 +687,12 @@ public:
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class DiskQueue : public IDiskQueue {
|
class DiskQueue : public IDiskQueue, public Tracked<DiskQueue> {
|
||||||
public:
|
public:
|
||||||
|
// FIXME: Is setting lastCommittedSeq to -1 instead of 0 necessary?
|
||||||
DiskQueue( std::string basename, std::string fileExtension, UID dbgid, int64_t fileSizeWarningLimit )
|
DiskQueue( std::string basename, std::string fileExtension, UID dbgid, int64_t fileSizeWarningLimit )
|
||||||
: rawQueue( new RawDiskQueue_TwoFiles(basename, fileExtension, dbgid, fileSizeWarningLimit) ), dbgid(dbgid), anyPopped(false), nextPageSeq(0), poppedSeq(0), lastPoppedSeq(0),
|
: rawQueue( new RawDiskQueue_TwoFiles(basename, fileExtension, dbgid, fileSizeWarningLimit) ), dbgid(dbgid), anyPopped(false), nextPageSeq(0), poppedSeq(0), lastPoppedSeq(0),
|
||||||
nextReadLocation(-1), readBufPage(NULL), readBufPos(0), pushed_page_buffer(NULL), recovered(false), lastCommittedSeq(0), warnAlwaysForMemory(true)
|
nextReadLocation(-1), readBufPage(NULL), readBufPos(0), pushed_page_buffer(NULL), recovered(false), initialized(false), lastCommittedSeq(-1), warnAlwaysForMemory(true)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -666,6 +713,7 @@ public:
|
||||||
}
|
}
|
||||||
return endLocation();
|
return endLocation();
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void pop( location upTo ) {
|
virtual void pop( location upTo ) {
|
||||||
ASSERT( !upTo.hi );
|
ASSERT( !upTo.hi );
|
||||||
ASSERT( !recovered || upTo.lo <= endLocation() );
|
ASSERT( !recovered || upTo.lo <= endLocation() );
|
||||||
|
@ -685,6 +733,8 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual Future<Standalone<StringRef>> read(location from, location to) { return read(this, from, to); }
|
||||||
|
|
||||||
int getMaxPayload() {
|
int getMaxPayload() {
|
||||||
return Page::maxPayload;
|
return Page::maxPayload;
|
||||||
}
|
}
|
||||||
|
@ -728,6 +778,9 @@ public:
|
||||||
|
|
||||||
lastCommittedSeq = backPage().endSeq();
|
lastCommittedSeq = backPage().endSeq();
|
||||||
auto f = rawQueue->pushAndCommit( pushed_page_buffer->ref(), pushed_page_buffer, poppedSeq/sizeof(Page) - lastPoppedSeq/sizeof(Page) );
|
auto f = rawQueue->pushAndCommit( pushed_page_buffer->ref(), pushed_page_buffer, poppedSeq/sizeof(Page) - lastPoppedSeq/sizeof(Page) );
|
||||||
|
if (g_network->isSimulated()) {
|
||||||
|
verifyCommit(this, f, pushed_page_buffer, ((Page*)pushed_page_buffer->ref().begin())->seq, lastCommittedSeq);
|
||||||
|
}
|
||||||
lastPoppedSeq = poppedSeq;
|
lastPoppedSeq = poppedSeq;
|
||||||
pushed_page_buffer = 0;
|
pushed_page_buffer = 0;
|
||||||
return f;
|
return f;
|
||||||
|
@ -737,17 +790,27 @@ public:
|
||||||
rawQueue->stall();
|
rawQueue->stall();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual Future<bool> initializeRecovery() { return initializeRecovery( this ); }
|
||||||
virtual Future<Standalone<StringRef>> readNext( int bytes ) { return readNext(this, bytes); }
|
virtual Future<Standalone<StringRef>> readNext( int bytes ) { return readNext(this, bytes); }
|
||||||
|
|
||||||
|
// FIXME: getNextReadLocation should ASSERT( initialized ), but the memory storage engine needs
|
||||||
|
// to be changed to understand the new intiailizeRecovery protocol.
|
||||||
virtual location getNextReadLocation() { return nextReadLocation; }
|
virtual location getNextReadLocation() { return nextReadLocation; }
|
||||||
|
|
||||||
virtual Future<Void> getError() { return rawQueue->getError(); }
|
virtual Future<Void> getError() { return rawQueue->getError(); }
|
||||||
virtual Future<Void> onClosed() { return rawQueue->onClosed(); }
|
virtual Future<Void> onClosed() { return rawQueue->onClosed(); }
|
||||||
|
|
||||||
virtual void dispose() {
|
virtual void dispose() {
|
||||||
TraceEvent("DQDestroy", dbgid).detail("LastPoppedSeq", lastPoppedSeq).detail("PoppedSeq", poppedSeq).detail("NextPageSeq", nextPageSeq).detail("File0Name", rawQueue->files[0].dbgFilename);
|
TraceEvent("DQDestroy", dbgid).detail("LastPoppedSeq", lastPoppedSeq).detail("PoppedSeq", poppedSeq).detail("NextPageSeq", nextPageSeq).detail("File0Name", rawQueue->files[0].dbgFilename);
|
||||||
rawQueue->dispose();
|
dispose(this);
|
||||||
delete this;
|
|
||||||
}
|
}
|
||||||
|
ACTOR static void dispose(DiskQueue* self) {
|
||||||
|
wait( self->onSafeToDestruct() );
|
||||||
|
TraceEvent("DQDestroyDone", self->dbgid).detail("File0Name", self->rawQueue->files[0].dbgFilename);
|
||||||
|
self->rawQueue->dispose();
|
||||||
|
delete self;
|
||||||
|
}
|
||||||
|
|
||||||
virtual void close() {
|
virtual void close() {
|
||||||
TraceEvent("DQClose", dbgid)
|
TraceEvent("DQClose", dbgid)
|
||||||
.detail("LastPoppedSeq", lastPoppedSeq)
|
.detail("LastPoppedSeq", lastPoppedSeq)
|
||||||
|
@ -755,8 +818,13 @@ public:
|
||||||
.detail("NextPageSeq", nextPageSeq)
|
.detail("NextPageSeq", nextPageSeq)
|
||||||
.detail("PoppedCommitted", rawQueue->dbg_file0BeginSeq + rawQueue->files[0].popped + rawQueue->files[1].popped)
|
.detail("PoppedCommitted", rawQueue->dbg_file0BeginSeq + rawQueue->files[0].popped + rawQueue->files[1].popped)
|
||||||
.detail("File0Name", rawQueue->files[0].dbgFilename);
|
.detail("File0Name", rawQueue->files[0].dbgFilename);
|
||||||
rawQueue->close();
|
close(this);
|
||||||
delete this;
|
}
|
||||||
|
ACTOR static void close(DiskQueue* self) {
|
||||||
|
wait( self->onSafeToDestruct() );
|
||||||
|
TraceEvent("DQCloseDone", self->dbgid).detail("File0Name", self->rawQueue->files[0].dbgFilename);
|
||||||
|
self->rawQueue->close();
|
||||||
|
delete self;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual StorageBytes getStorageBytes() {
|
virtual StorageBytes getStorageBytes() {
|
||||||
|
@ -828,6 +896,127 @@ private:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ACTOR static void verifyCommit(DiskQueue* self, Future<Void> commitSynced, StringBuffer* buffer, loc_t start, loc_t end) {
|
||||||
|
state TrackMe trackme(self);
|
||||||
|
try {
|
||||||
|
wait( commitSynced );
|
||||||
|
Standalone<StringRef> pagedData = wait( readPages(self, start, end) );
|
||||||
|
const int startOffset = start % _PAGE_SIZE;
|
||||||
|
const int dataLen = end - start;
|
||||||
|
ASSERT( pagedData.substr(startOffset, dataLen).compare( buffer->ref().substr(0, dataLen) ) == 0 );
|
||||||
|
} catch (Error& e) {
|
||||||
|
if (e.code() != error_code_io_error) {
|
||||||
|
delete buffer;
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
delete buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
ACTOR static Future<Standalone<StringRef>> readPages(DiskQueue *self, location start, location end) {
|
||||||
|
state TrackMe trackme(self);
|
||||||
|
state int fromFile;
|
||||||
|
state int toFile;
|
||||||
|
state int64_t fromPage;
|
||||||
|
state int64_t toPage;
|
||||||
|
state uint64_t file0size = self->firstPages(1).seq - self->firstPages(0).seq;
|
||||||
|
ASSERT(end > start);
|
||||||
|
ASSERT(start.lo >= self->firstPages(0).seq);
|
||||||
|
self->findPhysicalLocation(start.lo, &fromFile, &fromPage, nullptr);
|
||||||
|
self->findPhysicalLocation(end.lo-1, &toFile, &toPage, nullptr);
|
||||||
|
if (fromFile == 0) { ASSERT( fromPage < file0size / _PAGE_SIZE ); }
|
||||||
|
if (toFile == 0) { ASSERT( toPage < file0size / _PAGE_SIZE ); }
|
||||||
|
// FIXME I think there's something with nextReadLocation we can do here when initialized && !recovered.
|
||||||
|
if (fromFile == 1 && self->recovered) { ASSERT( fromPage < self->rawQueue->writingPos / _PAGE_SIZE ); }
|
||||||
|
if (toFile == 1 && self->recovered) { ASSERT( toPage < self->rawQueue->writingPos / _PAGE_SIZE ); }
|
||||||
|
if (fromFile == toFile) {
|
||||||
|
ASSERT(toPage >= fromPage);
|
||||||
|
Standalone<StringRef> pagedData = wait( self->rawQueue->read( fromFile, fromPage, toPage - fromPage + 1 ) );
|
||||||
|
if ( self->firstPages(0).seq > start.lo ) {
|
||||||
|
// Simulation allows for reads to be delayed and executed after overlapping subsequent
|
||||||
|
// write operations. This means that by the time our read was executed, it's possible
|
||||||
|
// that both disk queue files have been completely overwritten.
|
||||||
|
// I'm not clear what is the actual contract for read/write in this case, so simulation
|
||||||
|
// might be a bit overly aggressive here, but it's behavior we need to tolerate.
|
||||||
|
throw io_error();
|
||||||
|
}
|
||||||
|
ASSERT( ((Page*)pagedData.begin())->seq == start.lo / _PAGE_SIZE * _PAGE_SIZE );
|
||||||
|
ASSERT(pagedData.size() == (toPage - fromPage + 1) * _PAGE_SIZE );
|
||||||
|
|
||||||
|
ASSERT( ((Page*)pagedData.end() - 1)->seq == (end.lo - 1) / _PAGE_SIZE * _PAGE_SIZE );
|
||||||
|
return pagedData;
|
||||||
|
} else {
|
||||||
|
ASSERT(fromFile == 0);
|
||||||
|
state Standalone<StringRef> firstChunk;
|
||||||
|
state Standalone<StringRef> secondChunk;
|
||||||
|
wait( store(firstChunk, self->rawQueue->read( fromFile, fromPage, ( file0size / sizeof(Page) ) - fromPage )) &&
|
||||||
|
store(secondChunk, self->rawQueue->read( toFile, 0, toPage + 1 )) );
|
||||||
|
if ( self->firstPages(0).seq > start.lo ) {
|
||||||
|
// See above.
|
||||||
|
throw io_error();
|
||||||
|
}
|
||||||
|
ASSERT(firstChunk.size() == ( ( file0size / sizeof(Page) ) - fromPage ) * _PAGE_SIZE );
|
||||||
|
ASSERT( ((Page*)firstChunk.begin())->seq == start.lo / _PAGE_SIZE * _PAGE_SIZE );
|
||||||
|
ASSERT(secondChunk.size() == (toPage + 1) * _PAGE_SIZE);
|
||||||
|
ASSERT( ((Page*)secondChunk.end() - 1)->seq == (end.lo - 1) / _PAGE_SIZE * _PAGE_SIZE );
|
||||||
|
return firstChunk.withSuffix(secondChunk);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ACTOR static Future<Standalone<StringRef>> read(DiskQueue *self, location start, location end) {
|
||||||
|
// This `state` is unnecessary, but works around pagedData wrongly becoming const
|
||||||
|
// due to the actor compiler.
|
||||||
|
state Standalone<StringRef> pagedData = wait(readPages(self, start, end));
|
||||||
|
ASSERT(start.lo % sizeof(Page) == 0 ||
|
||||||
|
start.lo % sizeof(Page) >= sizeof(PageHeader));
|
||||||
|
int startingOffset = start.lo % sizeof(Page);
|
||||||
|
if (startingOffset > 0) startingOffset -= sizeof(PageHeader);
|
||||||
|
ASSERT(end.lo % sizeof(Page) == 0 ||
|
||||||
|
end.lo % sizeof(Page) > sizeof(PageHeader));
|
||||||
|
int endingOffset = end.lo % sizeof(Page);
|
||||||
|
if (endingOffset == 0) endingOffset = sizeof(Page);
|
||||||
|
if (endingOffset > 0) endingOffset -= sizeof(PageHeader);
|
||||||
|
|
||||||
|
if ((end.lo-1)/sizeof(Page)*sizeof(Page) == start.lo/sizeof(Page)*sizeof(Page)) {
|
||||||
|
// start and end are on the same page
|
||||||
|
ASSERT(pagedData.size() == sizeof(Page));
|
||||||
|
pagedData.contents() = pagedData.substr(sizeof(PageHeader) + startingOffset, endingOffset - startingOffset);
|
||||||
|
return pagedData;
|
||||||
|
} else {
|
||||||
|
// FIXME: This allocation is excessive and unnecessary. We know the overhead per page that
|
||||||
|
// we'll be stripping out (sizeof(PageHeader)), so we should be able to do a smaller
|
||||||
|
// allocation. But we should be able to re-use the space allocated for pagedData, which
|
||||||
|
// would mean not having to allocate 2x the space for a read.
|
||||||
|
Standalone<StringRef> unpagedData = makeString(pagedData.size());
|
||||||
|
uint8_t *buf = mutateString(unpagedData);
|
||||||
|
memset(buf, 0, unpagedData.size());
|
||||||
|
const Page *data = reinterpret_cast<const Page*>(pagedData.begin());
|
||||||
|
|
||||||
|
// Only start copying from `start` in the first page.
|
||||||
|
if( data->payloadSize > startingOffset ) {
|
||||||
|
memcpy(buf, data->payload+startingOffset, data->payloadSize-startingOffset);
|
||||||
|
buf += data->payloadSize-startingOffset;
|
||||||
|
}
|
||||||
|
data++;
|
||||||
|
|
||||||
|
// Copy all the middle pages
|
||||||
|
while (data->seq != ((end.lo-1)/sizeof(Page)*sizeof(Page))) {
|
||||||
|
// These pages can have varying amounts of data, as pages with partial
|
||||||
|
// data will be zero-filled when commit is called.
|
||||||
|
memcpy(buf, data->payload, data->payloadSize);
|
||||||
|
buf += data->payloadSize;
|
||||||
|
data++;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy only until `end` in the last page.
|
||||||
|
memcpy(buf, data->payload, std::min(endingOffset, data->payloadSize));
|
||||||
|
buf += std::min(endingOffset, data->payloadSize);
|
||||||
|
|
||||||
|
unpagedData.contents() = unpagedData.substr(0, buf - unpagedData.begin());
|
||||||
|
return unpagedData;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void readFromBuffer( StringBuffer* result, int* bytes ) {
|
void readFromBuffer( StringBuffer* result, int* bytes ) {
|
||||||
// extract up to bytes from readBufPage into result
|
// extract up to bytes from readBufPage into result
|
||||||
int len = std::min( readBufPage->payloadSize - readBufPos, *bytes );
|
int len = std::min( readBufPage->payloadSize - readBufPos, *bytes );
|
||||||
|
@ -847,21 +1036,14 @@ private:
|
||||||
|
|
||||||
ASSERT( !self->recovered );
|
ASSERT( !self->recovered );
|
||||||
|
|
||||||
if (self->nextReadLocation < 0) {
|
if (!self->initialized) {
|
||||||
bool nonempty = wait( findStart(self) );
|
bool recoveryComplete = wait( initializeRecovery(self) );
|
||||||
if (!nonempty) {
|
|
||||||
// The constructor has already put everything in the right state for an empty queue
|
|
||||||
self->recovered = true;
|
|
||||||
ASSERT( self->poppedSeq <= self->endLocation() );
|
|
||||||
|
|
||||||
//The next read location isn't necessarily the end of the last commit, but this is sufficient for helping us check an ASSERTion
|
if (recoveryComplete) {
|
||||||
self->lastCommittedSeq = self->nextReadLocation;
|
ASSERT( self->poppedSeq <= self->endLocation() );
|
||||||
|
|
||||||
return Standalone<StringRef>();
|
return Standalone<StringRef>();
|
||||||
}
|
}
|
||||||
self->readBufPos = self->nextReadLocation % sizeof(Page) - sizeof(PageHeader);
|
|
||||||
if (self->readBufPos < 0) { self->nextReadLocation -= self->readBufPos; self->readBufPos = 0; }
|
|
||||||
TraceEvent("DQRecStart", self->dbgid).detail("ReadBufPos", self->readBufPos).detail("NextReadLoc", self->nextReadLocation).detail("File0Name", self->rawQueue->files[0].dbgFilename);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
|
@ -909,7 +1091,6 @@ private:
|
||||||
TraceEvent("DQRecovered", self->dbgid).detail("LastPoppedSeq", self->lastPoppedSeq).detail("PoppedSeq", self->poppedSeq).detail("NextPageSeq", self->nextPageSeq).detail("File0Name", self->rawQueue->files[0].dbgFilename);
|
TraceEvent("DQRecovered", self->dbgid).detail("LastPoppedSeq", self->lastPoppedSeq).detail("PoppedSeq", self->poppedSeq).detail("NextPageSeq", self->nextPageSeq).detail("File0Name", self->rawQueue->files[0].dbgFilename);
|
||||||
self->recovered = true;
|
self->recovered = true;
|
||||||
ASSERT( self->poppedSeq <= self->endLocation() );
|
ASSERT( self->poppedSeq <= self->endLocation() );
|
||||||
self->recoveryFirstPages = Standalone<StringRef>();
|
|
||||||
|
|
||||||
TEST( result.size() == 0 ); // End of queue at border between reads
|
TEST( result.size() == 0 ); // End of queue at border between reads
|
||||||
TEST( result.size() != 0 ); // Partial read at end of queue
|
TEST( result.size() != 0 ); // Partial read at end of queue
|
||||||
|
@ -920,19 +1101,22 @@ private:
|
||||||
return result.str;
|
return result.str;
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR static Future<bool> findStart( DiskQueue* self ) {
|
ACTOR static Future<bool> initializeRecovery( DiskQueue* self ) {
|
||||||
Standalone<StringRef> epbuf = wait( self->rawQueue->readFirstAndLastPages( &comparePages ) );
|
if (self->initialized) {
|
||||||
ASSERT( epbuf.size() % sizeof(Page) == 0 );
|
return self->recovered;
|
||||||
self->recoveryFirstPages = epbuf;
|
}
|
||||||
|
Standalone<StringRef> lastPageData = wait( self->rawQueue->readFirstAndLastPages( &comparePages ) );
|
||||||
|
self->initialized = true;
|
||||||
|
|
||||||
if (!epbuf.size()) {
|
if (!lastPageData.size()) {
|
||||||
// There are no valid pages, so apparently this is a completely empty queue
|
// There are no valid pages, so apparently this is a completely empty queue
|
||||||
self->nextReadLocation = 0;
|
self->nextReadLocation = 0;
|
||||||
return false;
|
self->lastCommittedSeq = 0;
|
||||||
|
self->recovered = true;
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
int n = epbuf.size() / sizeof(Page);
|
Page* lastPage = (Page*)lastPageData.begin();
|
||||||
Page* lastPage = (Page*)epbuf.end() - 1;
|
|
||||||
self->nextReadLocation = self->poppedSeq = lastPage->popped;
|
self->nextReadLocation = self->poppedSeq = lastPage->popped;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -951,48 +1135,55 @@ private:
|
||||||
self->findPhysicalLocation( self->poppedSeq, &file, &page, "poppedSeq" );
|
self->findPhysicalLocation( self->poppedSeq, &file, &page, "poppedSeq" );
|
||||||
self->rawQueue->setStartPage( file, page );
|
self->rawQueue->setStartPage( file, page );
|
||||||
|
|
||||||
return true;
|
self->readBufPos = self->nextReadLocation % sizeof(Page) - sizeof(PageHeader);
|
||||||
|
if (self->readBufPos < 0) { self->nextReadLocation -= self->readBufPos; self->readBufPos = 0; }
|
||||||
|
TraceEvent("DQRecStart", self->dbgid).detail("ReadBufPos", self->readBufPos).detail("NextReadLoc", self->nextReadLocation).detail("File0Name", self->rawQueue->files[0].dbgFilename);
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
Page& firstPages(int i) {
|
||||||
|
ASSERT( initialized );
|
||||||
|
return *(Page*)rawQueue->firstPages[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
void findPhysicalLocation( loc_t loc, int* file, int64_t* page, const char* context ) {
|
void findPhysicalLocation( loc_t loc, int* file, int64_t* page, const char* context ) {
|
||||||
bool ok = false;
|
bool ok = false;
|
||||||
Page*p = (Page*)recoveryFirstPages.begin();
|
|
||||||
|
|
||||||
TraceEvent(SevInfo, "FindPhysicalLocation", dbgid)
|
if (context)
|
||||||
.detail("RecoveryFirstPages", recoveryFirstPages.size())
|
TraceEvent(SevInfo, "FindPhysicalLocation", dbgid)
|
||||||
.detail("Page0Valid", p[0].checkHash())
|
.detail("Page0Valid", firstPages(0).checkHash())
|
||||||
.detail("Page0Seq", p[0].seq)
|
.detail("Page0Seq", firstPages(0).seq)
|
||||||
.detail("Page1Valid", p[1].checkHash())
|
.detail("Page1Valid", firstPages(1).checkHash())
|
||||||
.detail("Page1Seq", p[1].seq)
|
.detail("Page1Seq", firstPages(1).seq)
|
||||||
.detail("Location", loc)
|
.detail("Location", loc)
|
||||||
.detail("Context", context)
|
.detail("Context", context)
|
||||||
.detail("File0Name", rawQueue->files[0].dbgFilename);
|
.detail("File0Name", rawQueue->files[0].dbgFilename);
|
||||||
|
|
||||||
for(int i=recoveryFirstPages.size() / sizeof(Page) - 2; i>=0; i--)
|
for(int i = 1; i >= 0; i--)
|
||||||
if ( p[i].checkHash() && p[i].seq <= (size_t)loc ) {
|
if ( firstPages(i).checkHash() && firstPages(i).seq <= (size_t)loc ) {
|
||||||
*file = i;
|
*file = i;
|
||||||
*page = (loc - p[i].seq)/sizeof(Page);
|
*page = (loc - firstPages(i).seq)/sizeof(Page);
|
||||||
TraceEvent("FoundPhysicalLocation", dbgid)
|
if (context)
|
||||||
.detail("PageIndex", i)
|
TraceEvent("FoundPhysicalLocation", dbgid)
|
||||||
.detail("PageLocation", *page)
|
.detail("PageIndex", i)
|
||||||
.detail("RecoveryFirstPagesSize", recoveryFirstPages.size())
|
.detail("PageLocation", *page)
|
||||||
.detail("SizeofPage", sizeof(Page))
|
.detail("SizeofPage", sizeof(Page))
|
||||||
.detail("PageSequence", p[i].seq)
|
.detail("PageSequence", firstPages(i).seq)
|
||||||
.detail("Location", loc)
|
.detail("Location", loc)
|
||||||
.detail("Context", context)
|
.detail("Context", context)
|
||||||
.detail("File0Name", rawQueue->files[0].dbgFilename);
|
.detail("File0Name", rawQueue->files[0].dbgFilename);
|
||||||
ok = true;
|
ok = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (!ok)
|
if (!ok)
|
||||||
TraceEvent(SevError, "DiskQueueLocationError", dbgid)
|
TraceEvent(SevError, "DiskQueueLocationError", dbgid)
|
||||||
.detail("RecoveryFirstPages", recoveryFirstPages.size())
|
.detail("Page0Valid", firstPages(0).checkHash())
|
||||||
.detail("Page0Valid", p[0].checkHash())
|
.detail("Page0Seq", firstPages(0).seq)
|
||||||
.detail("Page0Seq", p[0].seq)
|
.detail("Page1Valid", firstPages(1).checkHash())
|
||||||
.detail("Page1Valid", p[1].checkHash())
|
.detail("Page1Seq", firstPages(1).seq)
|
||||||
.detail("Page1Seq", p[1].seq)
|
|
||||||
.detail("Location", loc)
|
.detail("Location", loc)
|
||||||
.detail("Context", context)
|
.detail("Context", context ? context : "")
|
||||||
.detail("File0Name", rawQueue->files[0].dbgFilename);
|
.detail("File0Name", rawQueue->files[0].dbgFilename);
|
||||||
ASSERT( ok );
|
ASSERT( ok );
|
||||||
}
|
}
|
||||||
|
@ -1025,11 +1216,11 @@ private:
|
||||||
|
|
||||||
// Recovery state
|
// Recovery state
|
||||||
bool recovered;
|
bool recovered;
|
||||||
|
bool initialized;
|
||||||
loc_t nextReadLocation;
|
loc_t nextReadLocation;
|
||||||
Arena readBufArena;
|
Arena readBufArena;
|
||||||
Page* readBufPage;
|
Page* readBufPage;
|
||||||
int readBufPos;
|
int readBufPos;
|
||||||
Standalone<StringRef> recoveryFirstPages;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
//A class wrapping DiskQueue which durably allows uncommitted data to be popped
|
//A class wrapping DiskQueue which durably allows uncommitted data to be popped
|
||||||
|
@ -1048,10 +1239,13 @@ public:
|
||||||
void close() { queue->close(); delete this; }
|
void close() { queue->close(); delete this; }
|
||||||
|
|
||||||
//IDiskQueue
|
//IDiskQueue
|
||||||
|
Future<bool> initializeRecovery() { return queue->initializeRecovery(); }
|
||||||
Future<Standalone<StringRef>> readNext( int bytes ) { return readNext(this, bytes); }
|
Future<Standalone<StringRef>> readNext( int bytes ) { return readNext(this, bytes); }
|
||||||
|
|
||||||
virtual location getNextReadLocation() { return queue->getNextReadLocation(); }
|
virtual location getNextReadLocation() { return queue->getNextReadLocation(); }
|
||||||
|
|
||||||
|
virtual Future<Standalone<StringRef>> read( location start, location end ) { return queue->read( start, end ); }
|
||||||
|
|
||||||
virtual location push( StringRef contents ) {
|
virtual location push( StringRef contents ) {
|
||||||
pushed = queue->push(contents);
|
pushed = queue->push(contents);
|
||||||
return pushed;
|
return pushed;
|
||||||
|
|
|
@ -41,11 +41,20 @@ public:
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
//! Find the first and last pages in the disk queue, and initialize invariants.
|
||||||
|
//!
|
||||||
|
//! Most importantly, most invariants only hold after this function returns, and
|
||||||
|
//! some functions assert that the IDiskQueue has been initialized.
|
||||||
|
//!
|
||||||
|
//! \returns True, if DiskQueue is now considered in a recovered state.
|
||||||
|
//! False, if the caller should call readNext until recovered is true.
|
||||||
|
virtual Future<bool> initializeRecovery() = 0;
|
||||||
// Before calling push or commit, the caller *must* perform recovery by calling readNext() until it returns less than the requested number of bytes.
|
// Before calling push or commit, the caller *must* perform recovery by calling readNext() until it returns less than the requested number of bytes.
|
||||||
// Thereafter it may not be called again.
|
// Thereafter it may not be called again.
|
||||||
virtual Future<Standalone<StringRef>> readNext( int bytes ) = 0; // Return the next bytes in the queue (beginning, the first time called, with the first unpopped byte)
|
virtual Future<Standalone<StringRef>> readNext( int bytes ) = 0; // Return the next bytes in the queue (beginning, the first time called, with the first unpopped byte)
|
||||||
virtual location getNextReadLocation() = 0; // Returns a location >= the location of all bytes previously returned by readNext(), and <= the location of all bytes subsequently returned
|
virtual location getNextReadLocation() = 0; // Returns a location >= the location of all bytes previously returned by readNext(), and <= the location of all bytes subsequently returned
|
||||||
|
|
||||||
|
virtual Future<Standalone<StringRef>> read( location start, location end ) = 0;
|
||||||
virtual location push( StringRef contents ) = 0; // Appends the given bytes to the byte stream. Returns a location token representing the *end* of the contents.
|
virtual location push( StringRef contents ) = 0; // Appends the given bytes to the byte stream. Returns a location token representing the *end* of the contents.
|
||||||
virtual void pop( location upTo ) = 0; // Removes all bytes before the given location token from the byte stream.
|
virtual void pop( location upTo ) = 0; // Removes all bytes before the given location token from the byte stream.
|
||||||
virtual Future<Void> commit() = 0; // returns when all prior pushes and pops are durable. If commit does not return (due to close or a crash), any prefix of the pushed bytes and any prefix of the popped bytes may be durable.
|
virtual Future<Void> commit() = 0; // returns when all prior pushes and pops are durable. If commit does not return (due to close or a crash), any prefix of the pushed bytes and any prefix of the popped bytes may be durable.
|
||||||
|
|
|
@ -165,6 +165,8 @@ ServerKnobs::ServerKnobs(bool randomize, ClientKnobs* clientKnobs) {
|
||||||
init( DD_LOCATION_CACHE_SIZE, 2000000 ); if( randomize && BUGGIFY ) DD_LOCATION_CACHE_SIZE = 3;
|
init( DD_LOCATION_CACHE_SIZE, 2000000 ); if( randomize && BUGGIFY ) DD_LOCATION_CACHE_SIZE = 3;
|
||||||
init( MOVEKEYS_LOCK_POLLING_DELAY, 5.0 );
|
init( MOVEKEYS_LOCK_POLLING_DELAY, 5.0 );
|
||||||
init( DEBOUNCE_RECRUITING_DELAY, 5.0 );
|
init( DEBOUNCE_RECRUITING_DELAY, 5.0 );
|
||||||
|
init( DD_FAILURE_TIME, 1.0 ); if( randomize && BUGGIFY ) DD_FAILURE_TIME = 10.0;
|
||||||
|
init( DD_ZERO_HEALTHY_TEAM_DELAY, 1.0 );
|
||||||
|
|
||||||
// Redwood Storage Engine
|
// Redwood Storage Engine
|
||||||
init( PREFIX_TREE_IMMEDIATE_KEY_SIZE_LIMIT, 30 );
|
init( PREFIX_TREE_IMMEDIATE_KEY_SIZE_LIMIT, 30 );
|
||||||
|
@ -295,6 +297,7 @@ ServerKnobs::ServerKnobs(bool randomize, ClientKnobs* clientKnobs) {
|
||||||
init( WAIT_FOR_GOOD_RECRUITMENT_DELAY, 1.0 );
|
init( WAIT_FOR_GOOD_RECRUITMENT_DELAY, 1.0 );
|
||||||
init( WAIT_FOR_GOOD_REMOTE_RECRUITMENT_DELAY, 5.0 );
|
init( WAIT_FOR_GOOD_REMOTE_RECRUITMENT_DELAY, 5.0 );
|
||||||
init( ATTEMPT_RECRUITMENT_DELAY, 0.035 );
|
init( ATTEMPT_RECRUITMENT_DELAY, 0.035 );
|
||||||
|
init( WAIT_FOR_DISTRIBUTOR_JOIN_DELAY, 1.0 );
|
||||||
init( WORKER_FAILURE_TIME, 1.0 ); if( randomize && BUGGIFY ) WORKER_FAILURE_TIME = 10.0;
|
init( WORKER_FAILURE_TIME, 1.0 ); if( randomize && BUGGIFY ) WORKER_FAILURE_TIME = 10.0;
|
||||||
init( CHECK_OUTSTANDING_INTERVAL, 0.5 ); if( randomize && BUGGIFY ) CHECK_OUTSTANDING_INTERVAL = 0.001;
|
init( CHECK_OUTSTANDING_INTERVAL, 0.5 ); if( randomize && BUGGIFY ) CHECK_OUTSTANDING_INTERVAL = 0.001;
|
||||||
init( VERSION_LAG_METRIC_INTERVAL, 0.5 ); if( randomize && BUGGIFY ) VERSION_LAG_METRIC_INTERVAL = 10.0;
|
init( VERSION_LAG_METRIC_INTERVAL, 0.5 ); if( randomize && BUGGIFY ) VERSION_LAG_METRIC_INTERVAL = 10.0;
|
||||||
|
|
|
@ -128,6 +128,8 @@ public:
|
||||||
int64_t DD_LOCATION_CACHE_SIZE;
|
int64_t DD_LOCATION_CACHE_SIZE;
|
||||||
double MOVEKEYS_LOCK_POLLING_DELAY;
|
double MOVEKEYS_LOCK_POLLING_DELAY;
|
||||||
double DEBOUNCE_RECRUITING_DELAY;
|
double DEBOUNCE_RECRUITING_DELAY;
|
||||||
|
double DD_FAILURE_TIME;
|
||||||
|
double DD_ZERO_HEALTHY_TEAM_DELAY;
|
||||||
|
|
||||||
// Redwood Storage Engine
|
// Redwood Storage Engine
|
||||||
int PREFIX_TREE_IMMEDIATE_KEY_SIZE_LIMIT;
|
int PREFIX_TREE_IMMEDIATE_KEY_SIZE_LIMIT;
|
||||||
|
@ -234,6 +236,7 @@ public:
|
||||||
double WAIT_FOR_GOOD_RECRUITMENT_DELAY;
|
double WAIT_FOR_GOOD_RECRUITMENT_DELAY;
|
||||||
double WAIT_FOR_GOOD_REMOTE_RECRUITMENT_DELAY;
|
double WAIT_FOR_GOOD_REMOTE_RECRUITMENT_DELAY;
|
||||||
double ATTEMPT_RECRUITMENT_DELAY;
|
double ATTEMPT_RECRUITMENT_DELAY;
|
||||||
|
double WAIT_FOR_DISTRIBUTOR_JOIN_DELAY;
|
||||||
double WORKER_FAILURE_TIME;
|
double WORKER_FAILURE_TIME;
|
||||||
double CHECK_OUTSTANDING_INTERVAL;
|
double CHECK_OUTSTANDING_INTERVAL;
|
||||||
double INCOMPATIBLE_PEERS_LOGGING_INTERVAL;
|
double INCOMPATIBLE_PEERS_LOGGING_INTERVAL;
|
||||||
|
|
|
@ -0,0 +1,120 @@
|
||||||
|
/*
|
||||||
|
* LatencyBandConfig.cpp
|
||||||
|
*
|
||||||
|
* This source file is part of the FoundationDB open source project
|
||||||
|
*
|
||||||
|
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "fdbserver/LatencyBandConfig.h"
|
||||||
|
|
||||||
|
#include "fdbclient/ManagementAPI.h"
|
||||||
|
#include "fdbclient/Schemas.h"
|
||||||
|
|
||||||
|
bool operator==(LatencyBandConfig::RequestConfig const& lhs, LatencyBandConfig::RequestConfig const& rhs) {
|
||||||
|
return typeid(lhs) == typeid(rhs) && lhs.isEqual(rhs);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool operator!=(LatencyBandConfig::RequestConfig const& lhs, LatencyBandConfig::RequestConfig const& rhs) {
|
||||||
|
return !(lhs == rhs);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool LatencyBandConfig::RequestConfig::isEqual(RequestConfig const& r) const {
|
||||||
|
return bands == r.bands;
|
||||||
|
};
|
||||||
|
|
||||||
|
void LatencyBandConfig::RequestConfig::fromJson(JSONDoc json) {
|
||||||
|
json_spirit::mArray bandsArray;
|
||||||
|
if(json.get("bands", bandsArray)) {
|
||||||
|
for(auto b : bandsArray) {
|
||||||
|
bands.insert(b.get_real());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void LatencyBandConfig::ReadConfig::fromJson(JSONDoc json) {
|
||||||
|
RequestConfig::fromJson(json);
|
||||||
|
|
||||||
|
int value;
|
||||||
|
if(json.get("max_read_bytes", value)) {
|
||||||
|
maxReadBytes = value;
|
||||||
|
}
|
||||||
|
if(json.get("max_key_selector_offset", value)) {
|
||||||
|
maxKeySelectorOffset = value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool LatencyBandConfig::ReadConfig::isEqual(RequestConfig const& r) const {
|
||||||
|
ReadConfig const& other = static_cast<ReadConfig const&>(r);
|
||||||
|
return RequestConfig::isEqual(r) && maxReadBytes == other.maxReadBytes && maxKeySelectorOffset == other.maxKeySelectorOffset;
|
||||||
|
}
|
||||||
|
|
||||||
|
void LatencyBandConfig::CommitConfig::fromJson(JSONDoc json) {
|
||||||
|
RequestConfig::fromJson(json);
|
||||||
|
|
||||||
|
int value;
|
||||||
|
if(json.get("max_commit_bytes", value)) {
|
||||||
|
maxCommitBytes = value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool LatencyBandConfig::CommitConfig::isEqual(RequestConfig const& r) const {
|
||||||
|
CommitConfig const& other = static_cast<CommitConfig const&>(r);
|
||||||
|
return RequestConfig::isEqual(r) && maxCommitBytes == other.maxCommitBytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
Optional<LatencyBandConfig> LatencyBandConfig::parse(ValueRef configurationString) {
|
||||||
|
Optional<LatencyBandConfig> config;
|
||||||
|
if(configurationString.size() == 0) {
|
||||||
|
return config;
|
||||||
|
}
|
||||||
|
|
||||||
|
json_spirit::mValue parsedConfig;
|
||||||
|
if(!json_spirit::read_string(configurationString.toString(), parsedConfig)) {
|
||||||
|
TraceEvent(SevWarnAlways, "InvalidLatencyBandConfiguration").detail("Reason", "InvalidJSON").detail("Configuration", printable(configurationString));
|
||||||
|
return config;
|
||||||
|
}
|
||||||
|
|
||||||
|
json_spirit::mObject configJson = parsedConfig.get_obj();
|
||||||
|
|
||||||
|
json_spirit::mValue schema;
|
||||||
|
if(!json_spirit::read_string(JSONSchemas::latencyBandConfigurationSchema.toString(), schema)) {
|
||||||
|
ASSERT(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string errorStr;
|
||||||
|
if(!schemaMatch(schema.get_obj(), configJson, errorStr)) {
|
||||||
|
TraceEvent(SevWarnAlways, "InvalidLatencyBandConfiguration").detail("Reason", "SchemaMismatch").detail("Configuration", printable(configurationString)).detail("Error", errorStr);
|
||||||
|
return config;
|
||||||
|
}
|
||||||
|
|
||||||
|
JSONDoc configDoc(configJson);
|
||||||
|
|
||||||
|
config = LatencyBandConfig();
|
||||||
|
|
||||||
|
config.get().grvConfig.fromJson(configDoc.subDoc("get_read_version"));
|
||||||
|
config.get().readConfig.fromJson(configDoc.subDoc("read"));
|
||||||
|
config.get().commitConfig.fromJson(configDoc.subDoc("commit"));
|
||||||
|
|
||||||
|
return config;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool LatencyBandConfig::operator==(LatencyBandConfig const& r) const {
|
||||||
|
return grvConfig == r.grvConfig && readConfig == r.readConfig && commitConfig == r.commitConfig;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool LatencyBandConfig::operator!=(LatencyBandConfig const& r) const {
|
||||||
|
return !(*this == r);
|
||||||
|
}
|
|
@ -0,0 +1,106 @@
|
||||||
|
/*
|
||||||
|
* LatencyBandConfig.h
|
||||||
|
*
|
||||||
|
* This source file is part of the FoundationDB open source project
|
||||||
|
*
|
||||||
|
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef FDBSERVER_LATENCYBANDCONFIG_H
|
||||||
|
#define FDBSERVER_LATENCYBANDCONFIG_H
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "fdbclient/FDBTypes.h"
|
||||||
|
#include "fdbclient/JSONDoc.h"
|
||||||
|
|
||||||
|
struct LatencyBandConfig {
|
||||||
|
struct RequestConfig {
|
||||||
|
std::set<double> bands;
|
||||||
|
|
||||||
|
friend bool operator==(RequestConfig const& lhs, RequestConfig const& rhs);
|
||||||
|
friend bool operator!=(RequestConfig const& lhs, RequestConfig const& rhs);
|
||||||
|
|
||||||
|
virtual void fromJson(JSONDoc json);
|
||||||
|
|
||||||
|
template <class Ar>
|
||||||
|
void serialize(Ar& ar) {
|
||||||
|
uint64_t bandsSize = (uint64_t)bands.size();
|
||||||
|
serializer(ar, bandsSize);
|
||||||
|
|
||||||
|
if(ar.isDeserializing) {
|
||||||
|
double band;
|
||||||
|
for(uint64_t i = 0; i < bandsSize; i++) {
|
||||||
|
serializer(ar, band);
|
||||||
|
bands.insert(band);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
for(double band : bands) {
|
||||||
|
serializer(ar, band);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
virtual bool isEqual(RequestConfig const& r) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct GrvConfig : RequestConfig {};
|
||||||
|
|
||||||
|
struct ReadConfig : RequestConfig {
|
||||||
|
Optional<int> maxReadBytes;
|
||||||
|
Optional<int> maxKeySelectorOffset;
|
||||||
|
|
||||||
|
virtual void fromJson(JSONDoc json);
|
||||||
|
|
||||||
|
template <class Ar>
|
||||||
|
void serialize(Ar& ar) {
|
||||||
|
serializer(ar, *(RequestConfig*)this, maxReadBytes, maxKeySelectorOffset);
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
virtual bool isEqual(RequestConfig const& r) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct CommitConfig : RequestConfig {
|
||||||
|
Optional<int> maxCommitBytes;
|
||||||
|
|
||||||
|
virtual void fromJson(JSONDoc json);
|
||||||
|
|
||||||
|
template <class Ar>
|
||||||
|
void serialize(Ar& ar) {
|
||||||
|
serializer(ar, *(RequestConfig*)this, maxCommitBytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
virtual bool isEqual(RequestConfig const& r) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
GrvConfig grvConfig;
|
||||||
|
ReadConfig readConfig;
|
||||||
|
CommitConfig commitConfig;
|
||||||
|
|
||||||
|
template <class Ar>
|
||||||
|
void serialize(Ar& ar) {
|
||||||
|
serializer(ar, grvConfig, readConfig, commitConfig);
|
||||||
|
}
|
||||||
|
|
||||||
|
static Optional<LatencyBandConfig> parse(ValueRef configurationString);
|
||||||
|
|
||||||
|
bool operator==(LatencyBandConfig const& r) const;
|
||||||
|
bool operator!=(LatencyBandConfig const& r) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif
|
|
@ -67,8 +67,10 @@ public:
|
||||||
virtual void close();
|
virtual void close();
|
||||||
|
|
||||||
// IDiskQueue interface
|
// IDiskQueue interface
|
||||||
|
virtual Future<bool> initializeRecovery() { return false; }
|
||||||
virtual Future<Standalone<StringRef>> readNext( int bytes );
|
virtual Future<Standalone<StringRef>> readNext( int bytes );
|
||||||
virtual IDiskQueue::location getNextReadLocation();
|
virtual IDiskQueue::location getNextReadLocation();
|
||||||
|
virtual Future<Standalone<StringRef>> read( location start, location end ) { ASSERT(false); throw internal_error(); }
|
||||||
virtual IDiskQueue::location push( StringRef contents );
|
virtual IDiskQueue::location push( StringRef contents );
|
||||||
virtual void pop( IDiskQueue::location upTo );
|
virtual void pop( IDiskQueue::location upTo );
|
||||||
virtual Future<Void> commit();
|
virtual Future<Void> commit();
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
#include "fdbclient/FDBTypes.h"
|
#include "fdbclient/FDBTypes.h"
|
||||||
#include "fdbclient/StorageServerInterface.h"
|
#include "fdbclient/StorageServerInterface.h"
|
||||||
#include "fdbclient/CommitTransaction.h"
|
#include "fdbclient/CommitTransaction.h"
|
||||||
|
#include "fdbclient/DatabaseConfiguration.h"
|
||||||
#include "fdbserver/TLogInterface.h"
|
#include "fdbserver/TLogInterface.h"
|
||||||
|
|
||||||
typedef uint64_t DBRecoveryCount;
|
typedef uint64_t DBRecoveryCount;
|
||||||
|
@ -32,7 +33,6 @@ typedef uint64_t DBRecoveryCount;
|
||||||
struct MasterInterface {
|
struct MasterInterface {
|
||||||
LocalityData locality;
|
LocalityData locality;
|
||||||
RequestStream< ReplyPromise<Void> > waitFailure;
|
RequestStream< ReplyPromise<Void> > waitFailure;
|
||||||
RequestStream< struct GetRateInfoRequest > getRateInfo;
|
|
||||||
RequestStream< struct TLogRejoinRequest > tlogRejoin; // sent by tlog (whether or not rebooted) to communicate with a new master
|
RequestStream< struct TLogRejoinRequest > tlogRejoin; // sent by tlog (whether or not rebooted) to communicate with a new master
|
||||||
RequestStream< struct ChangeCoordinatorsRequest > changeCoordinators;
|
RequestStream< struct ChangeCoordinatorsRequest > changeCoordinators;
|
||||||
RequestStream< struct GetCommitVersionRequest > getCommitVersion;
|
RequestStream< struct GetCommitVersionRequest > getCommitVersion;
|
||||||
|
@ -43,7 +43,7 @@ struct MasterInterface {
|
||||||
template <class Archive>
|
template <class Archive>
|
||||||
void serialize(Archive& ar) {
|
void serialize(Archive& ar) {
|
||||||
ASSERT( ar.protocolVersion() >= 0x0FDB00A200040001LL );
|
ASSERT( ar.protocolVersion() >= 0x0FDB00A200040001LL );
|
||||||
serializer(ar, locality, waitFailure, getRateInfo, tlogRejoin, changeCoordinators, getCommitVersion);
|
serializer(ar, locality, waitFailure, tlogRejoin, changeCoordinators, getCommitVersion);
|
||||||
}
|
}
|
||||||
|
|
||||||
void initEndpoints() {
|
void initEndpoints() {
|
||||||
|
@ -51,30 +51,6 @@ struct MasterInterface {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct GetRateInfoRequest {
|
|
||||||
UID requesterID;
|
|
||||||
int64_t totalReleasedTransactions;
|
|
||||||
ReplyPromise<struct GetRateInfoReply> reply;
|
|
||||||
|
|
||||||
GetRateInfoRequest() {}
|
|
||||||
GetRateInfoRequest( UID const& requesterID, int64_t totalReleasedTransactions ) : requesterID(requesterID), totalReleasedTransactions(totalReleasedTransactions) {}
|
|
||||||
|
|
||||||
template <class Ar>
|
|
||||||
void serialize(Ar& ar) {
|
|
||||||
serializer(ar, requesterID, totalReleasedTransactions, reply);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct GetRateInfoReply {
|
|
||||||
double transactionRate;
|
|
||||||
double leaseDuration;
|
|
||||||
|
|
||||||
template <class Ar>
|
|
||||||
void serialize(Ar& ar) {
|
|
||||||
serializer(ar, transactionRate, leaseDuration);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct TLogRejoinRequest {
|
struct TLogRejoinRequest {
|
||||||
TLogInterface myInterface;
|
TLogInterface myInterface;
|
||||||
ReplyPromise<bool> reply; // false means someone else registered, so we should re-register. true means this master is recovered, so don't send again to the same master.
|
ReplyPromise<bool> reply; // false means someone else registered, so we should re-register. true means this master is recovered, so don't send again to the same master.
|
||||||
|
|
|
@ -37,6 +37,7 @@
|
||||||
#include "flow/Stats.h"
|
#include "flow/Stats.h"
|
||||||
#include "fdbserver/ApplyMetadataMutation.h"
|
#include "fdbserver/ApplyMetadataMutation.h"
|
||||||
#include "fdbserver/RecoveryState.h"
|
#include "fdbserver/RecoveryState.h"
|
||||||
|
#include "fdbserver/LatencyBandConfig.h"
|
||||||
#include "fdbclient/Atomic.h"
|
#include "fdbclient/Atomic.h"
|
||||||
#include "flow/TDMetric.actor.h"
|
#include "flow/TDMetric.actor.h"
|
||||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||||
|
@ -55,13 +56,17 @@ struct ProxyStats {
|
||||||
Counter conflictRanges;
|
Counter conflictRanges;
|
||||||
Version lastCommitVersionAssigned;
|
Version lastCommitVersionAssigned;
|
||||||
|
|
||||||
|
LatencyBands commitLatencyBands;
|
||||||
|
LatencyBands grvLatencyBands;
|
||||||
|
|
||||||
Future<Void> logger;
|
Future<Void> logger;
|
||||||
|
|
||||||
explicit ProxyStats(UID id, Version* pVersion, NotifiedVersion* pCommittedVersion, int64_t *commitBatchesMemBytesCountPtr)
|
explicit ProxyStats(UID id, Version* pVersion, NotifiedVersion* pCommittedVersion, int64_t *commitBatchesMemBytesCountPtr)
|
||||||
: cc("ProxyStats", id.toString()),
|
: cc("ProxyStats", id.toString()),
|
||||||
txnStartIn("TxnStartIn", cc), txnStartOut("TxnStartOut", cc), txnStartBatch("TxnStartBatch", cc), txnSystemPriorityStartIn("TxnSystemPriorityStartIn", cc), txnSystemPriorityStartOut("TxnSystemPriorityStartOut", cc), txnBatchPriorityStartIn("TxnBatchPriorityStartIn", cc), txnBatchPriorityStartOut("TxnBatchPriorityStartOut", cc),
|
txnStartIn("TxnStartIn", cc), txnStartOut("TxnStartOut", cc), txnStartBatch("TxnStartBatch", cc), txnSystemPriorityStartIn("TxnSystemPriorityStartIn", cc), txnSystemPriorityStartOut("TxnSystemPriorityStartOut", cc), txnBatchPriorityStartIn("TxnBatchPriorityStartIn", cc), txnBatchPriorityStartOut("TxnBatchPriorityStartOut", cc),
|
||||||
txnDefaultPriorityStartIn("TxnDefaultPriorityStartIn", cc), txnDefaultPriorityStartOut("TxnDefaultPriorityStartOut", cc), txnCommitIn("TxnCommitIn", cc), txnCommitVersionAssigned("TxnCommitVersionAssigned", cc), txnCommitResolving("TxnCommitResolving", cc), txnCommitResolved("TxnCommitResolved", cc), txnCommitOut("TxnCommitOut", cc),
|
txnDefaultPriorityStartIn("TxnDefaultPriorityStartIn", cc), txnDefaultPriorityStartOut("TxnDefaultPriorityStartOut", cc), txnCommitIn("TxnCommitIn", cc), txnCommitVersionAssigned("TxnCommitVersionAssigned", cc), txnCommitResolving("TxnCommitResolving", cc), txnCommitResolved("TxnCommitResolved", cc), txnCommitOut("TxnCommitOut", cc),
|
||||||
txnCommitOutSuccess("TxnCommitOutSuccess", cc), txnConflicts("TxnConflicts", cc), commitBatchIn("CommitBatchIn", cc), commitBatchOut("CommitBatchOut", cc), mutationBytes("MutationBytes", cc), mutations("Mutations", cc), conflictRanges("ConflictRanges", cc), lastCommitVersionAssigned(0)
|
txnCommitOutSuccess("TxnCommitOutSuccess", cc), txnConflicts("TxnConflicts", cc), commitBatchIn("CommitBatchIn", cc), commitBatchOut("CommitBatchOut", cc), mutationBytes("MutationBytes", cc), mutations("Mutations", cc), conflictRanges("ConflictRanges", cc), lastCommitVersionAssigned(0),
|
||||||
|
commitLatencyBands("CommitLatencyMetrics", id, SERVER_KNOBS->STORAGE_LOGGING_DELAY), grvLatencyBands("GRVLatencyMetrics", id, SERVER_KNOBS->STORAGE_LOGGING_DELAY)
|
||||||
{
|
{
|
||||||
specialCounter(cc, "LastAssignedCommitVersion", [this](){return this->lastCommitVersionAssigned;});
|
specialCounter(cc, "LastAssignedCommitVersion", [this](){return this->lastCommitVersionAssigned;});
|
||||||
specialCounter(cc, "Version", [pVersion](){return *pVersion; });
|
specialCounter(cc, "Version", [pVersion](){return *pVersion; });
|
||||||
|
@ -82,28 +87,40 @@ Future<Void> forwardValue(Promise<T> out, Future<T> in)
|
||||||
|
|
||||||
int getBytes(Promise<Version> const& r) { return 0; }
|
int getBytes(Promise<Version> const& r) { return 0; }
|
||||||
|
|
||||||
ACTOR Future<Void> getRate(UID myID, MasterInterface master, int64_t* inTransactionCount, double* outTransactionRate) {
|
ACTOR Future<Void> getRate(UID myID, Reference<AsyncVar<ServerDBInfo>> db, int64_t* inTransactionCount, double* outTransactionRate) {
|
||||||
state Future<Void> nextRequestTimer = Void();
|
state Future<Void> nextRequestTimer = Never();
|
||||||
state Future<Void> leaseTimeout = Never();
|
state Future<Void> leaseTimeout = Never();
|
||||||
state Future<GetRateInfoReply> reply;
|
state Future<GetRateInfoReply> reply = Never();
|
||||||
state int64_t lastTC = 0;
|
state int64_t lastTC = 0;
|
||||||
|
|
||||||
loop choose{
|
if (db->get().distributor.present()) nextRequestTimer = Void();
|
||||||
when(wait(nextRequestTimer)) {
|
loop choose {
|
||||||
nextRequestTimer = Never();
|
when ( wait( db->onChange() ) ) {
|
||||||
reply = brokenPromiseToNever(master.getRateInfo.getReply(GetRateInfoRequest(myID, *inTransactionCount)));
|
if ( db->get().distributor.present() ) {
|
||||||
|
TraceEvent("Proxy_DataDistributorChanged", myID)
|
||||||
|
.detail("DDID", db->get().distributor.get().id());
|
||||||
|
nextRequestTimer = Void(); // trigger GetRate request
|
||||||
|
} else {
|
||||||
|
TraceEvent("Proxy_DataDistributorDied", myID);
|
||||||
|
nextRequestTimer = Never();
|
||||||
|
reply = Never();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
when(GetRateInfoReply rep = wait(reply)) {
|
when ( wait( nextRequestTimer ) ) {
|
||||||
|
nextRequestTimer = Never();
|
||||||
|
reply = brokenPromiseToNever(db->get().distributor.get().getRateInfo.getReply(GetRateInfoRequest(myID, *inTransactionCount)));
|
||||||
|
}
|
||||||
|
when ( GetRateInfoReply rep = wait(reply) ) {
|
||||||
reply = Never();
|
reply = Never();
|
||||||
*outTransactionRate = rep.transactionRate;
|
*outTransactionRate = rep.transactionRate;
|
||||||
//TraceEvent("MasterProxyRate", myID).detail("Rate", rep.transactionRate).detail("Lease", rep.leaseDuration).detail("ReleasedTransactions", *inTransactionCount - lastTC);
|
// TraceEvent("MasterProxyRate", myID).detail("Rate", rep.transactionRate).detail("Lease", rep.leaseDuration).detail("ReleasedTransactions", *inTransactionCount - lastTC);
|
||||||
lastTC = *inTransactionCount;
|
lastTC = *inTransactionCount;
|
||||||
leaseTimeout = delay(rep.leaseDuration);
|
leaseTimeout = delay(rep.leaseDuration);
|
||||||
nextRequestTimer = delayJittered(rep.leaseDuration / 2);
|
nextRequestTimer = delayJittered(rep.leaseDuration / 2);
|
||||||
}
|
}
|
||||||
when(wait(leaseTimeout)) {
|
when ( wait(leaseTimeout ) ) {
|
||||||
*outTransactionRate = 0;
|
*outTransactionRate = 0;
|
||||||
//TraceEvent("MasterProxyRate", myID).detail("Rate", 0).detail("Lease", "Expired");
|
// TraceEvent("MasterProxyRate", myID).detail("Rate", 0).detail("Lease", "Expired");
|
||||||
leaseTimeout = Never();
|
leaseTimeout = Never();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -208,6 +225,8 @@ struct ProxyCommitData {
|
||||||
Version lastTxsPop;
|
Version lastTxsPop;
|
||||||
bool popRemoteTxs;
|
bool popRemoteTxs;
|
||||||
|
|
||||||
|
Optional<LatencyBandConfig> latencyBandConfig;
|
||||||
|
|
||||||
//The tag related to a storage server rarely change, so we keep a vector of tags for each key range to be slightly more CPU efficient.
|
//The tag related to a storage server rarely change, so we keep a vector of tags for each key range to be slightly more CPU efficient.
|
||||||
//When a tag related to a storage server does change, we empty out all of these vectors to signify they must be repopulated.
|
//When a tag related to a storage server does change, we empty out all of these vectors to signify they must be repopulated.
|
||||||
//We do not repopulate them immediately to avoid a slow task.
|
//We do not repopulate them immediately to avoid a slow task.
|
||||||
|
@ -458,11 +477,13 @@ ACTOR Future<Void> commitBatch(
|
||||||
|
|
||||||
ResolutionRequestBuilder requests( self, commitVersion, prevVersion, self->version );
|
ResolutionRequestBuilder requests( self, commitVersion, prevVersion, self->version );
|
||||||
int conflictRangeCount = 0;
|
int conflictRangeCount = 0;
|
||||||
|
state int64_t maxTransactionBytes = 0;
|
||||||
for (int t = 0; t<trs.size(); t++) {
|
for (int t = 0; t<trs.size(); t++) {
|
||||||
requests.addTransaction(trs[t].transaction, t);
|
requests.addTransaction(trs[t].transaction, t);
|
||||||
conflictRangeCount += trs[t].transaction.read_conflict_ranges.size() + trs[t].transaction.write_conflict_ranges.size();
|
conflictRangeCount += trs[t].transaction.read_conflict_ranges.size() + trs[t].transaction.write_conflict_ranges.size();
|
||||||
//TraceEvent("MPTransactionDump", self->dbgid).detail("Snapshot", trs[t].transaction.read_snapshot);
|
//TraceEvent("MPTransactionDump", self->dbgid).detail("Snapshot", trs[t].transaction.read_snapshot);
|
||||||
//for(auto& m : trs[t].transaction.mutations)
|
//for(auto& m : trs[t].transaction.mutations)
|
||||||
|
maxTransactionBytes = std::max<int64_t>(maxTransactionBytes, trs[t].transaction.expectedSize());
|
||||||
// TraceEvent("MPTransactionsDump", self->dbgid).detail("Mutation", m.toString());
|
// TraceEvent("MPTransactionsDump", self->dbgid).detail("Mutation", m.toString());
|
||||||
}
|
}
|
||||||
self->stats.conflictRanges += conflictRangeCount;
|
self->stats.conflictRanges += conflictRangeCount;
|
||||||
|
@ -952,16 +973,24 @@ ACTOR Future<Void> commitBatch(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send replies to clients
|
// Send replies to clients
|
||||||
for (int t = 0; t < trs.size(); t++)
|
double endTime = timer();
|
||||||
{
|
for (int t = 0; t < trs.size(); t++) {
|
||||||
if (committed[t] == ConflictBatch::TransactionCommitted && (!locked || trs[t].isLockAware())) {
|
if (committed[t] == ConflictBatch::TransactionCommitted && (!locked || trs[t].isLockAware())) {
|
||||||
ASSERT_WE_THINK(commitVersion != invalidVersion);
|
ASSERT_WE_THINK(commitVersion != invalidVersion);
|
||||||
trs[t].reply.send(CommitID(commitVersion, t));
|
trs[t].reply.send(CommitID(commitVersion, t));
|
||||||
}
|
}
|
||||||
else if (committed[t] == ConflictBatch::TransactionTooOld)
|
else if (committed[t] == ConflictBatch::TransactionTooOld) {
|
||||||
trs[t].reply.sendError(transaction_too_old());
|
trs[t].reply.sendError(transaction_too_old());
|
||||||
else
|
}
|
||||||
|
else {
|
||||||
trs[t].reply.sendError(not_committed());
|
trs[t].reply.sendError(not_committed());
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: filter if pipelined with large commit
|
||||||
|
if(self->latencyBandConfig.present()) {
|
||||||
|
bool filter = maxTransactionBytes > self->latencyBandConfig.get().commitConfig.maxCommitBytes.orDefault(std::numeric_limits<int>::max());
|
||||||
|
self->stats.commitLatencyBands.addMeasurement(endTime - trs[t].requestTime, filter);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
++self->stats.commitBatchOut;
|
++self->stats.commitBatchOut;
|
||||||
|
@ -1049,9 +1078,19 @@ ACTOR Future<Void> fetchVersions(ProxyCommitData *commitData) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ACTOR Future<Void> sendGrvReplies(Future<GetReadVersionReply> replyFuture, std::vector<GetReadVersionRequest> requests, ProxyStats *stats) {
|
||||||
|
GetReadVersionReply reply = wait(replyFuture);
|
||||||
|
double end = timer();
|
||||||
|
for(GetReadVersionRequest const& request : requests) {
|
||||||
|
stats->grvLatencyBands.addMeasurement(end - request.requestTime);
|
||||||
|
request.reply.send(reply);
|
||||||
|
}
|
||||||
|
|
||||||
|
return Void();
|
||||||
|
}
|
||||||
|
|
||||||
ACTOR static Future<Void> transactionStarter(
|
ACTOR static Future<Void> transactionStarter(
|
||||||
MasterProxyInterface proxy,
|
MasterProxyInterface proxy,
|
||||||
MasterInterface master,
|
|
||||||
Reference<AsyncVar<ServerDBInfo>> db,
|
Reference<AsyncVar<ServerDBInfo>> db,
|
||||||
PromiseStream<Future<Void>> addActor,
|
PromiseStream<Future<Void>> addActor,
|
||||||
ProxyCommitData* commitData
|
ProxyCommitData* commitData
|
||||||
|
@ -1068,7 +1107,7 @@ ACTOR static Future<Void> transactionStarter(
|
||||||
state vector<MasterProxyInterface> otherProxies;
|
state vector<MasterProxyInterface> otherProxies;
|
||||||
|
|
||||||
state PromiseStream<double> replyTimes;
|
state PromiseStream<double> replyTimes;
|
||||||
addActor.send(getRate(proxy.id(), master, &transactionCount, &transactionRate));
|
addActor.send( getRate(proxy.id(), db, &transactionCount, &transactionRate) );
|
||||||
addActor.send(queueTransactionStartRequests(&transactionQueue, proxy.getConsistentReadVersion.getFuture(), GRVTimer, &lastGRVTime, &GRVBatchTime, replyTimes.getFuture(), &commitData->stats));
|
addActor.send(queueTransactionStartRequests(&transactionQueue, proxy.getConsistentReadVersion.getFuture(), GRVTimer, &lastGRVTime, &GRVBatchTime, replyTimes.getFuture(), &commitData->stats));
|
||||||
|
|
||||||
// Get a list of the other proxies that go together with us
|
// Get a list of the other proxies that go together with us
|
||||||
|
@ -1098,7 +1137,7 @@ ACTOR static Future<Void> transactionStarter(
|
||||||
int defaultPriTransactionsStarted[2] = { 0, 0 };
|
int defaultPriTransactionsStarted[2] = { 0, 0 };
|
||||||
int batchPriTransactionsStarted[2] = { 0, 0 };
|
int batchPriTransactionsStarted[2] = { 0, 0 };
|
||||||
|
|
||||||
vector<vector<ReplyPromise<GetReadVersionReply>>> start(2); // start[0] is transactions starting with !(flags&CAUSAL_READ_RISKY), start[1] is transactions starting with flags&CAUSAL_READ_RISKY
|
vector<vector<GetReadVersionRequest>> start(2); // start[0] is transactions starting with !(flags&CAUSAL_READ_RISKY), start[1] is transactions starting with flags&CAUSAL_READ_RISKY
|
||||||
Optional<UID> debugID;
|
Optional<UID> debugID;
|
||||||
|
|
||||||
double leftToStart = 0;
|
double leftToStart = 0;
|
||||||
|
@ -1114,7 +1153,6 @@ ACTOR static Future<Void> transactionStarter(
|
||||||
if (!debugID.present()) debugID = g_nondeterministic_random->randomUniqueID();
|
if (!debugID.present()) debugID = g_nondeterministic_random->randomUniqueID();
|
||||||
g_traceBatch.addAttach("TransactionAttachID", req.debugID.get().first(), debugID.get().first());
|
g_traceBatch.addAttach("TransactionAttachID", req.debugID.get().first(), debugID.get().first());
|
||||||
}
|
}
|
||||||
start[req.flags & 1].push_back(std::move(req.reply)); static_assert(GetReadVersionRequest::FLAG_CAUSAL_READ_RISKY == 1, "Implementation dependent on flag value");
|
|
||||||
|
|
||||||
transactionsStarted[req.flags&1] += tc;
|
transactionsStarted[req.flags&1] += tc;
|
||||||
if (req.priority() >= GetReadVersionRequest::PRIORITY_SYSTEM_IMMEDIATE)
|
if (req.priority() >= GetReadVersionRequest::PRIORITY_SYSTEM_IMMEDIATE)
|
||||||
|
@ -1124,6 +1162,7 @@ ACTOR static Future<Void> transactionStarter(
|
||||||
else
|
else
|
||||||
batchPriTransactionsStarted[req.flags & 1] += tc;
|
batchPriTransactionsStarted[req.flags & 1] += tc;
|
||||||
|
|
||||||
|
start[req.flags & 1].push_back(std::move(req)); static_assert(GetReadVersionRequest::FLAG_CAUSAL_READ_RISKY == 1, "Implementation dependent on flag value");
|
||||||
transactionQueue.pop();
|
transactionQueue.pop();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1141,20 +1180,22 @@ ACTOR static Future<Void> transactionStarter(
|
||||||
.detail("TransactionBudget", transactionBudget)
|
.detail("TransactionBudget", transactionBudget)
|
||||||
.detail("LastLeftToStart", leftToStart);*/
|
.detail("LastLeftToStart", leftToStart);*/
|
||||||
|
|
||||||
// dynamic batching
|
|
||||||
ReplyPromise<GetReadVersionReply> GRVReply;
|
|
||||||
if (start[0].size()){
|
|
||||||
start[0].push_back(GRVReply); // for now, base dynamic batching on the time for normal requests (not read_risky)
|
|
||||||
addActor.send(timeReply(GRVReply.getFuture(), replyTimes));
|
|
||||||
}
|
|
||||||
|
|
||||||
transactionCount += transactionsStarted[0] + transactionsStarted[1];
|
transactionCount += transactionsStarted[0] + transactionsStarted[1];
|
||||||
transactionBudget = std::max(std::min(nTransactionsToStart - transactionsStarted[0] - transactionsStarted[1], SERVER_KNOBS->START_TRANSACTION_MAX_BUDGET_SIZE), -SERVER_KNOBS->START_TRANSACTION_MAX_BUDGET_SIZE);
|
transactionBudget = std::max(std::min(nTransactionsToStart - transactionsStarted[0] - transactionsStarted[1], SERVER_KNOBS->START_TRANSACTION_MAX_BUDGET_SIZE), -SERVER_KNOBS->START_TRANSACTION_MAX_BUDGET_SIZE);
|
||||||
if (debugID.present())
|
|
||||||
|
if (debugID.present()) {
|
||||||
g_traceBatch.addEvent("TransactionDebug", debugID.get().first(), "MasterProxyServer.masterProxyServerCore.Broadcast");
|
g_traceBatch.addEvent("TransactionDebug", debugID.get().first(), "MasterProxyServer.masterProxyServerCore.Broadcast");
|
||||||
for (int i = 0; i<start.size(); i++) {
|
}
|
||||||
|
|
||||||
|
for (int i = 0; i < start.size(); i++) {
|
||||||
if (start[i].size()) {
|
if (start[i].size()) {
|
||||||
addActor.send(broadcast(getLiveCommittedVersion(commitData, i, &otherProxies, debugID, transactionsStarted[i], systemTransactionsStarted[i], defaultPriTransactionsStarted[i], batchPriTransactionsStarted[i]), start[i]));
|
Future<GetReadVersionReply> readVersionReply = getLiveCommittedVersion(commitData, i, &otherProxies, debugID, transactionsStarted[i], systemTransactionsStarted[i], defaultPriTransactionsStarted[i], batchPriTransactionsStarted[i]);
|
||||||
|
addActor.send(sendGrvReplies(readVersionReply, start[i], &commitData->stats));
|
||||||
|
|
||||||
|
// for now, base dynamic batching on the time for normal requests (not read_risky)
|
||||||
|
if (i == 0) {
|
||||||
|
addActor.send(timeReply(readVersionReply, replyTimes));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1383,7 +1424,7 @@ ACTOR Future<Void> masterProxyServerCore(
|
||||||
TraceEvent(SevInfo, "CommitBatchesMemoryLimit").detail("BytesLimit", commitBatchesMemoryLimit);
|
TraceEvent(SevInfo, "CommitBatchesMemoryLimit").detail("BytesLimit", commitBatchesMemoryLimit);
|
||||||
|
|
||||||
addActor.send(monitorRemoteCommitted(&commitData, db));
|
addActor.send(monitorRemoteCommitted(&commitData, db));
|
||||||
addActor.send(transactionStarter(proxy, master, db, addActor, &commitData));
|
addActor.send(transactionStarter(proxy, db, addActor, &commitData));
|
||||||
addActor.send(readRequestServer(proxy, &commitData));
|
addActor.send(readRequestServer(proxy, &commitData));
|
||||||
|
|
||||||
// wait for txnStateStore recovery
|
// wait for txnStateStore recovery
|
||||||
|
@ -1405,6 +1446,34 @@ ACTOR Future<Void> masterProxyServerCore(
|
||||||
}
|
}
|
||||||
commitData.logSystem->pop(commitData.lastTxsPop, txsTag, 0, tagLocalityRemoteLog);
|
commitData.logSystem->pop(commitData.lastTxsPop, txsTag, 0, tagLocalityRemoteLog);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Optional<LatencyBandConfig> newLatencyBandConfig = db->get().latencyBandConfig;
|
||||||
|
|
||||||
|
if(newLatencyBandConfig.present() != commitData.latencyBandConfig.present()
|
||||||
|
|| (newLatencyBandConfig.present() && newLatencyBandConfig.get().grvConfig != commitData.latencyBandConfig.get().grvConfig))
|
||||||
|
{
|
||||||
|
TraceEvent("LatencyBandGrvUpdatingConfig").detail("Present", newLatencyBandConfig.present());
|
||||||
|
commitData.stats.grvLatencyBands.clearBands();
|
||||||
|
if(newLatencyBandConfig.present()) {
|
||||||
|
for(auto band : newLatencyBandConfig.get().grvConfig.bands) {
|
||||||
|
commitData.stats.grvLatencyBands.addThreshold(band);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if(newLatencyBandConfig.present() != commitData.latencyBandConfig.present()
|
||||||
|
|| (newLatencyBandConfig.present() && newLatencyBandConfig.get().commitConfig != commitData.latencyBandConfig.get().commitConfig))
|
||||||
|
{
|
||||||
|
TraceEvent("LatencyBandCommitUpdatingConfig").detail("Present", newLatencyBandConfig.present());
|
||||||
|
commitData.stats.commitLatencyBands.clearBands();
|
||||||
|
if(newLatencyBandConfig.present()) {
|
||||||
|
for(auto band : newLatencyBandConfig.get().commitConfig.bands) {
|
||||||
|
commitData.stats.commitLatencyBands.addThreshold(band);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
commitData.latencyBandConfig = newLatencyBandConfig;
|
||||||
}
|
}
|
||||||
when(wait(onError)) {}
|
when(wait(onError)) {}
|
||||||
when(std::pair<vector<CommitTransactionRequest>, int> batchedRequests = waitNext(batchedCommits.getFuture())) {
|
when(std::pair<vector<CommitTransactionRequest>, int> batchedRequests = waitNext(batchedCommits.getFuture())) {
|
||||||
|
|
|
@ -387,11 +387,11 @@ ACTOR Future<Void> startMoveKeys( Database occ, KeyRange keys, vector<UID> serve
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR Future<Void> waitForShardReady( StorageServerInterface server, KeyRange keys, Version minVersion, Version recoveryVersion, GetShardStateRequest::waitMode mode){
|
ACTOR Future<Void> waitForShardReady( StorageServerInterface server, KeyRange keys, Version minVersion, GetShardStateRequest::waitMode mode ) {
|
||||||
loop {
|
loop {
|
||||||
try {
|
try {
|
||||||
std::pair<Version,Version> rep = wait( server.getShardState.getReply( GetShardStateRequest(keys, mode), TaskMoveKeys ) );
|
std::pair<Version,Version> rep = wait( server.getShardState.getReply( GetShardStateRequest(keys, mode), TaskMoveKeys ) );
|
||||||
if (rep.first >= minVersion && (recoveryVersion == invalidVersion || rep.second >= recoveryVersion)) {
|
if (rep.first >= minVersion) {
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
wait( delayJittered( SERVER_KNOBS->SHARD_READY_DELAY, TaskMoveKeys ) );
|
wait( delayJittered( SERVER_KNOBS->SHARD_READY_DELAY, TaskMoveKeys ) );
|
||||||
|
@ -431,7 +431,7 @@ ACTOR Future<Void> checkFetchingState( Database cx, vector<UID> dest, KeyRange k
|
||||||
}
|
}
|
||||||
auto si = decodeServerListValue(serverListValues[s].get());
|
auto si = decodeServerListValue(serverListValues[s].get());
|
||||||
ASSERT( si.id() == dest[s] );
|
ASSERT( si.id() == dest[s] );
|
||||||
requests.push_back( waitForShardReady( si, keys, tr.getReadVersion().get(), invalidVersion, GetShardStateRequest::FETCHING ) );
|
requests.push_back( waitForShardReady( si, keys, tr.getReadVersion().get(), GetShardStateRequest::FETCHING ) );
|
||||||
}
|
}
|
||||||
|
|
||||||
wait( timeoutError( waitForAll( requests ),
|
wait( timeoutError( waitForAll( requests ),
|
||||||
|
@ -452,7 +452,7 @@ ACTOR Future<Void> checkFetchingState( Database cx, vector<UID> dest, KeyRange k
|
||||||
// keyServers[k].dest must be the same for all k in keys
|
// keyServers[k].dest must be the same for all k in keys
|
||||||
// Set serverKeys[dest][keys] = true; serverKeys[src][keys] = false for all src not in dest
|
// Set serverKeys[dest][keys] = true; serverKeys[src][keys] = false for all src not in dest
|
||||||
// Should be cancelled and restarted if keyServers[keys].dest changes (?so this is no longer true?)
|
// Should be cancelled and restarted if keyServers[keys].dest changes (?so this is no longer true?)
|
||||||
ACTOR Future<Void> finishMoveKeys( Database occ, KeyRange keys, vector<UID> destinationTeam, MoveKeysLock lock, FlowLock *finishMoveKeysParallelismLock, Version recoveryVersion, bool hasRemote, UID relocationIntervalId )
|
ACTOR Future<Void> finishMoveKeys( Database occ, KeyRange keys, vector<UID> destinationTeam, MoveKeysLock lock, FlowLock *finishMoveKeysParallelismLock, bool hasRemote, UID relocationIntervalId )
|
||||||
{
|
{
|
||||||
state TraceInterval interval("RelocateShard_FinishMoveKeys");
|
state TraceInterval interval("RelocateShard_FinishMoveKeys");
|
||||||
state TraceInterval waitInterval("");
|
state TraceInterval waitInterval("");
|
||||||
|
@ -626,7 +626,7 @@ ACTOR Future<Void> finishMoveKeys( Database occ, KeyRange keys, vector<UID> dest
|
||||||
}
|
}
|
||||||
|
|
||||||
for(int s=0; s<storageServerInterfaces.size(); s++)
|
for(int s=0; s<storageServerInterfaces.size(); s++)
|
||||||
serverReady.push_back( waitForShardReady( storageServerInterfaces[s], keys, tr.getReadVersion().get(), recoveryVersion, GetShardStateRequest::READABLE) );
|
serverReady.push_back( waitForShardReady( storageServerInterfaces[s], keys, tr.getReadVersion().get(), GetShardStateRequest::READABLE) );
|
||||||
wait( timeout( waitForAll( serverReady ), SERVER_KNOBS->SERVER_READY_QUORUM_TIMEOUT, Void(), TaskMoveKeys ) );
|
wait( timeout( waitForAll( serverReady ), SERVER_KNOBS->SERVER_READY_QUORUM_TIMEOUT, Void(), TaskMoveKeys ) );
|
||||||
int count = dest.size() - newDestinations.size();
|
int count = dest.size() - newDestinations.size();
|
||||||
for(int s=0; s<serverReady.size(); s++)
|
for(int s=0; s<serverReady.size(); s++)
|
||||||
|
@ -881,7 +881,6 @@ ACTOR Future<Void> moveKeys(
|
||||||
Promise<Void> dataMovementComplete,
|
Promise<Void> dataMovementComplete,
|
||||||
FlowLock *startMoveKeysParallelismLock,
|
FlowLock *startMoveKeysParallelismLock,
|
||||||
FlowLock *finishMoveKeysParallelismLock,
|
FlowLock *finishMoveKeysParallelismLock,
|
||||||
Version recoveryVersion,
|
|
||||||
bool hasRemote,
|
bool hasRemote,
|
||||||
UID relocationIntervalId)
|
UID relocationIntervalId)
|
||||||
{
|
{
|
||||||
|
@ -891,7 +890,7 @@ ACTOR Future<Void> moveKeys(
|
||||||
|
|
||||||
state Future<Void> completionSignaller = checkFetchingState( cx, healthyDestinations, keys, dataMovementComplete, relocationIntervalId );
|
state Future<Void> completionSignaller = checkFetchingState( cx, healthyDestinations, keys, dataMovementComplete, relocationIntervalId );
|
||||||
|
|
||||||
wait( finishMoveKeys( cx, keys, destinationTeam, lock, finishMoveKeysParallelismLock, recoveryVersion, hasRemote, relocationIntervalId ) );
|
wait( finishMoveKeys( cx, keys, destinationTeam, lock, finishMoveKeysParallelismLock, hasRemote, relocationIntervalId ) );
|
||||||
|
|
||||||
//This is defensive, but make sure that we always say that the movement is complete before moveKeys completes
|
//This is defensive, but make sure that we always say that the movement is complete before moveKeys completes
|
||||||
completionSignaller.cancel();
|
completionSignaller.cancel();
|
||||||
|
|
|
@ -59,7 +59,6 @@ Future<Void> moveKeys(
|
||||||
Promise<Void> const& dataMovementComplete,
|
Promise<Void> const& dataMovementComplete,
|
||||||
FlowLock* const& startMoveKeysParallelismLock,
|
FlowLock* const& startMoveKeysParallelismLock,
|
||||||
FlowLock* const& finishMoveKeysParallelismLock,
|
FlowLock* const& finishMoveKeysParallelismLock,
|
||||||
Version const& recoveryVersion,
|
|
||||||
bool const& hasRemote,
|
bool const& hasRemote,
|
||||||
UID const& relocationIntervalId); // for logging only
|
UID const& relocationIntervalId); // for logging only
|
||||||
// Eventually moves the given keys to the given destination team
|
// Eventually moves the given keys to the given destination team
|
||||||
|
|
|
@ -64,32 +64,56 @@ ACTOR Future<WorkerInterface> getMasterWorker( Database cx, Reference<AsyncVar<S
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//Gets the number of bytes in flight from the master
|
// Gets the WorkerInterface representing the data distributor.
|
||||||
ACTOR Future<int64_t> getDataInFlight( Database cx, WorkerInterface masterWorker ) {
|
ACTOR Future<WorkerInterface> getDataDistributorWorker( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo ) {
|
||||||
|
TraceEvent("GetDataDistributorWorker").detail("Stage", "GettingWorkers");
|
||||||
|
|
||||||
|
loop {
|
||||||
|
state vector<std::pair<WorkerInterface, ProcessClass>> workers = wait( getWorkers( dbInfo ) );
|
||||||
|
if (!dbInfo->get().distributor.present()) continue;
|
||||||
|
|
||||||
|
for( int i = 0; i < workers.size(); i++ ) {
|
||||||
|
if( workers[i].first.address() == dbInfo->get().distributor.get().address() ) {
|
||||||
|
TraceEvent("GetDataDistributorWorker").detail("Stage", "GotWorkers")
|
||||||
|
.detail("DataDistributorId", dbInfo->get().distributor.get().id())
|
||||||
|
.detail("WorkerId", workers[i].first.id());
|
||||||
|
return workers[i].first;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TraceEvent(SevWarn, "GetDataDistributorWorker")
|
||||||
|
.detail("Error", "DataDistributorWorkerNotFound")
|
||||||
|
.detail("DataDistributorId", dbInfo->get().distributor.get().id())
|
||||||
|
.detail("DataDistributorAddress", dbInfo->get().distributor.get().address())
|
||||||
|
.detail("WorkerCount", workers.size());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gets the number of bytes in flight from the data distributor.
|
||||||
|
ACTOR Future<int64_t> getDataInFlight( Database cx, WorkerInterface distributorWorker ) {
|
||||||
try {
|
try {
|
||||||
TraceEvent("DataInFlight").detail("Stage", "ContactingMaster");
|
TraceEvent("DataInFlight").detail("Stage", "ContactingDataDistributor");
|
||||||
TraceEventFields md = wait( timeoutError(masterWorker.eventLogRequest.getReply(
|
TraceEventFields md = wait( timeoutError(distributorWorker.eventLogRequest.getReply(
|
||||||
EventLogRequest( LiteralStringRef("TotalDataInFlight") ) ), 1.0 ) );
|
EventLogRequest( LiteralStringRef("TotalDataInFlight") ) ), 1.0 ) );
|
||||||
int64_t dataInFlight;
|
int64_t dataInFlight;
|
||||||
sscanf(md.getValue("TotalBytes").c_str(), "%lld", &dataInFlight);
|
sscanf(md.getValue("TotalBytes").c_str(), "%lld", &dataInFlight);
|
||||||
return dataInFlight;
|
return dataInFlight;
|
||||||
} catch( Error &e ) {
|
} catch( Error &e ) {
|
||||||
TraceEvent("QuietDatabaseFailure", masterWorker.id()).error(e).detail("Reason", "Failed to extract DataInFlight");
|
TraceEvent("QuietDatabaseFailure", distributorWorker.id()).error(e).detail("Reason", "Failed to extract DataInFlight");
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//Gets the number of bytes in flight from the master
|
// Gets the number of bytes in flight from the data distributor.
|
||||||
//Convenience method that first finds the master worker from a zookeeper interface
|
|
||||||
ACTOR Future<int64_t> getDataInFlight( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo ) {
|
ACTOR Future<int64_t> getDataInFlight( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo ) {
|
||||||
WorkerInterface masterWorker = wait(getMasterWorker(cx, dbInfo));
|
WorkerInterface distributorInterf = wait( getDataDistributorWorker(cx, dbInfo) );
|
||||||
int64_t dataInFlight = wait(getDataInFlight(cx, masterWorker));
|
int64_t dataInFlight = wait(getDataInFlight(cx, distributorInterf));
|
||||||
return dataInFlight;
|
return dataInFlight;
|
||||||
}
|
}
|
||||||
|
|
||||||
//Computes the queue size for storage servers and tlogs using the bytesInput and bytesDurable attributes
|
//Computes the queue size for storage servers and tlogs using the bytesInput and bytesDurable attributes
|
||||||
int64_t getQueueSize( TraceEventFields md ) {
|
int64_t getQueueSize( const TraceEventFields& md ) {
|
||||||
double inputRate, durableRate;
|
double inputRate, durableRate;
|
||||||
double inputRoughness, durableRoughness;
|
double inputRoughness, durableRoughness;
|
||||||
int64_t inputBytes, durableBytes;
|
int64_t inputBytes, durableBytes;
|
||||||
|
@ -101,7 +125,7 @@ int64_t getQueueSize( TraceEventFields md ) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is not robust in the face of a TLog failure
|
// This is not robust in the face of a TLog failure
|
||||||
ACTOR Future<int64_t> getMaxTLogQueueSize( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo, WorkerInterface masterWorker ) {
|
ACTOR Future<int64_t> getMaxTLogQueueSize( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo ) {
|
||||||
TraceEvent("MaxTLogQueueSize").detail("Stage", "ContactingLogs");
|
TraceEvent("MaxTLogQueueSize").detail("Stage", "ContactingLogs");
|
||||||
|
|
||||||
state std::vector<std::pair<WorkerInterface, ProcessClass>> workers = wait(getWorkers(dbInfo));
|
state std::vector<std::pair<WorkerInterface, ProcessClass>> workers = wait(getWorkers(dbInfo));
|
||||||
|
@ -139,12 +163,6 @@ ACTOR Future<int64_t> getMaxTLogQueueSize( Database cx, Reference<AsyncVar<Serve
|
||||||
return maxQueueSize;
|
return maxQueueSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR Future<int64_t> getMaxTLogQueueSize( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo ) {
|
|
||||||
WorkerInterface masterWorker = wait(getMasterWorker(cx, dbInfo));
|
|
||||||
int64_t maxQueueSize = wait(getMaxTLogQueueSize(cx, dbInfo, masterWorker));
|
|
||||||
return maxQueueSize;
|
|
||||||
}
|
|
||||||
|
|
||||||
ACTOR Future<vector<StorageServerInterface>> getStorageServers( Database cx, bool use_system_priority = false) {
|
ACTOR Future<vector<StorageServerInterface>> getStorageServers( Database cx, bool use_system_priority = false) {
|
||||||
state Transaction tr( cx );
|
state Transaction tr( cx );
|
||||||
if (use_system_priority)
|
if (use_system_priority)
|
||||||
|
@ -167,7 +185,7 @@ ACTOR Future<vector<StorageServerInterface>> getStorageServers( Database cx, boo
|
||||||
}
|
}
|
||||||
|
|
||||||
//Gets the maximum size of all the storage server queues
|
//Gets the maximum size of all the storage server queues
|
||||||
ACTOR Future<int64_t> getMaxStorageServerQueueSize( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo, WorkerInterface masterWorker ) {
|
ACTOR Future<int64_t> getMaxStorageServerQueueSize( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo ) {
|
||||||
TraceEvent("MaxStorageServerQueueSize").detail("Stage", "ContactingStorageServers");
|
TraceEvent("MaxStorageServerQueueSize").detail("Stage", "ContactingStorageServers");
|
||||||
|
|
||||||
Future<std::vector<StorageServerInterface>> serversFuture = getStorageServers(cx);
|
Future<std::vector<StorageServerInterface>> serversFuture = getStorageServers(cx);
|
||||||
|
@ -202,7 +220,7 @@ ACTOR Future<int64_t> getMaxStorageServerQueueSize( Database cx, Reference<Async
|
||||||
try {
|
try {
|
||||||
maxQueueSize = std::max( maxQueueSize, getQueueSize( messages[i].get() ) );
|
maxQueueSize = std::max( maxQueueSize, getQueueSize( messages[i].get() ) );
|
||||||
} catch( Error &e ) {
|
} catch( Error &e ) {
|
||||||
TraceEvent("QuietDatabaseFailure", masterWorker.id()).detail("Reason", "Failed to extract MaxStorageServerQueue").detail("SS", servers[i].id());
|
TraceEvent("QuietDatabaseFailure").detail("Reason", "Failed to extract MaxStorageServerQueue").detail("SS", servers[i].id());
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -210,20 +228,12 @@ ACTOR Future<int64_t> getMaxStorageServerQueueSize( Database cx, Reference<Async
|
||||||
return maxQueueSize;
|
return maxQueueSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
//Gets the maximum size of all the storage server queues
|
|
||||||
//Convenience method that first gets the master worker and system map from a zookeeper interface
|
|
||||||
ACTOR Future<int64_t> getMaxStorageServerQueueSize( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo ) {
|
|
||||||
WorkerInterface masterWorker = wait(getMasterWorker(cx, dbInfo));
|
|
||||||
int64_t maxQueueSize = wait(getMaxStorageServerQueueSize(cx, dbInfo, masterWorker));
|
|
||||||
return maxQueueSize;
|
|
||||||
}
|
|
||||||
|
|
||||||
//Gets the size of the data distribution queue. If reportInFlight is true, then data in flight is considered part of the queue
|
//Gets the size of the data distribution queue. If reportInFlight is true, then data in flight is considered part of the queue
|
||||||
ACTOR Future<int64_t> getDataDistributionQueueSize( Database cx, WorkerInterface masterWorker, bool reportInFlight) {
|
ACTOR Future<int64_t> getDataDistributionQueueSize( Database cx, WorkerInterface distributorWorker, bool reportInFlight) {
|
||||||
try {
|
try {
|
||||||
TraceEvent("DataDistributionQueueSize").detail("Stage", "ContactingMaster");
|
TraceEvent("DataDistributionQueueSize").detail("Stage", "ContactingDataDistributor");
|
||||||
|
|
||||||
TraceEventFields movingDataMessage = wait( timeoutError(masterWorker.eventLogRequest.getReply(
|
TraceEventFields movingDataMessage = wait( timeoutError(distributorWorker.eventLogRequest.getReply(
|
||||||
EventLogRequest( LiteralStringRef("MovingData") ) ), 1.0 ) );
|
EventLogRequest( LiteralStringRef("MovingData") ) ), 1.0 ) );
|
||||||
|
|
||||||
TraceEvent("DataDistributionQueueSize").detail("Stage", "GotString");
|
TraceEvent("DataDistributionQueueSize").detail("Stage", "GotString");
|
||||||
|
@ -239,7 +249,7 @@ ACTOR Future<int64_t> getDataDistributionQueueSize( Database cx, WorkerInterface
|
||||||
|
|
||||||
return inQueue;
|
return inQueue;
|
||||||
} catch( Error &e ) {
|
} catch( Error &e ) {
|
||||||
TraceEvent("QuietDatabaseFailure", masterWorker.id()).detail("Reason", "Failed to extract DataDistributionQueueSize");
|
TraceEvent("QuietDatabaseFailure", distributorWorker.id()).detail("Reason", "Failed to extract DataDistributionQueueSize");
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -247,37 +257,39 @@ ACTOR Future<int64_t> getDataDistributionQueueSize( Database cx, WorkerInterface
|
||||||
//Gets the size of the data distribution queue. If reportInFlight is true, then data in flight is considered part of the queue
|
//Gets the size of the data distribution queue. If reportInFlight is true, then data in flight is considered part of the queue
|
||||||
//Convenience method that first finds the master worker from a zookeeper interface
|
//Convenience method that first finds the master worker from a zookeeper interface
|
||||||
ACTOR Future<int64_t> getDataDistributionQueueSize( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo, bool reportInFlight ) {
|
ACTOR Future<int64_t> getDataDistributionQueueSize( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo, bool reportInFlight ) {
|
||||||
WorkerInterface masterWorker = wait(getMasterWorker(cx, dbInfo));
|
WorkerInterface distributorInterf = wait( getDataDistributorWorker(cx, dbInfo) );
|
||||||
int64_t inQueue = wait(getDataDistributionQueueSize( cx, masterWorker, reportInFlight));
|
int64_t inQueue = wait( getDataDistributionQueueSize( cx, distributorInterf, reportInFlight) );
|
||||||
return inQueue;
|
return inQueue;
|
||||||
}
|
}
|
||||||
|
|
||||||
//Checks that data distribution is active
|
// Checks that data distribution is active
|
||||||
ACTOR Future<bool> getDataDistributionActive( Database cx, WorkerInterface masterWorker ) {
|
ACTOR Future<bool> getDataDistributionActive( Database cx, WorkerInterface distributorWorker ) {
|
||||||
try {
|
try {
|
||||||
TraceEvent("DataDistributionActive").detail("Stage", "ContactingMaster");
|
TraceEvent("DataDistributionActive").detail("Stage", "ContactingDataDistributor");
|
||||||
|
|
||||||
TraceEventFields activeMessage = wait( timeoutError(masterWorker.eventLogRequest.getReply(
|
TraceEventFields activeMessage = wait( timeoutError(distributorWorker.eventLogRequest.getReply(
|
||||||
EventLogRequest( LiteralStringRef("DDTrackerStarting") ) ), 1.0 ) );
|
EventLogRequest( LiteralStringRef("DDTrackerStarting") ) ), 1.0 ) );
|
||||||
|
|
||||||
return activeMessage.getValue("State") == "Active";
|
return activeMessage.getValue("State") == "Active";
|
||||||
} catch( Error &e ) {
|
} catch( Error &e ) {
|
||||||
TraceEvent("QuietDatabaseFailure", masterWorker.id()).detail("Reason", "Failed to extract DataDistributionActive");
|
TraceEvent("QuietDatabaseFailure", distributorWorker.id()).detail("Reason", "Failed to extract DataDistributionActive");
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//Checks to see if any storage servers are being recruited
|
// Checks to see if any storage servers are being recruited
|
||||||
ACTOR Future<bool> getStorageServersRecruiting( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo, WorkerInterface masterWorker ) {
|
ACTOR Future<bool> getStorageServersRecruiting( Database cx, WorkerInterface distributorWorker, UID distributorUID ) {
|
||||||
try {
|
try {
|
||||||
TraceEvent("StorageServersRecruiting").detail("Stage", "ContactingMaster");
|
TraceEvent("StorageServersRecruiting").detail("Stage", "ContactingDataDistributor");
|
||||||
|
TraceEventFields recruitingMessage = wait( timeoutError(distributorWorker.eventLogRequest.getReply(
|
||||||
TraceEventFields recruitingMessage = wait( timeoutError(masterWorker.eventLogRequest.getReply(
|
EventLogRequest( StringRef( "StorageServerRecruitment_" + distributorUID.toString()) ) ), 1.0 ) );
|
||||||
EventLogRequest( StringRef( "StorageServerRecruitment_" + dbInfo->get().master.id().toString()) ) ), 1.0 ) );
|
|
||||||
|
|
||||||
|
TraceEvent("StorageServersRecruiting").detail("Message", recruitingMessage.toString());
|
||||||
return recruitingMessage.getValue("State") == "Recruiting";
|
return recruitingMessage.getValue("State") == "Recruiting";
|
||||||
} catch( Error &e ) {
|
} catch( Error &e ) {
|
||||||
TraceEvent("QuietDatabaseFailure", masterWorker.id()).detail("Reason", "Failed to extract StorageServersRecruiting").detail("MasterID", dbInfo->get().master.id());
|
TraceEvent("QuietDatabaseFailure", distributorWorker.id())
|
||||||
|
.detail("Reason", "Failed to extract StorageServersRecruiting")
|
||||||
|
.detail("DataDistributorID", distributorUID);
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -323,16 +335,17 @@ ACTOR Future<Void> waitForQuietDatabase( Database cx, Reference<AsyncVar<ServerD
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
try {
|
try {
|
||||||
TraceEvent("QuietDatabaseWaitingOnMaster");
|
TraceEvent("QuietDatabaseWaitingOnDataDistributor");
|
||||||
WorkerInterface masterWorker = wait(getMasterWorker( cx, dbInfo ));
|
WorkerInterface distributorWorker = wait( getDataDistributorWorker( cx, dbInfo ) );
|
||||||
TraceEvent("QuietDatabaseGotMaster");
|
UID distributorUID = dbInfo->get().distributor.get().id();
|
||||||
|
TraceEvent("QuietDatabaseGotDataDistributor", distributorUID).detail("Locality", distributorWorker.locality.toString());
|
||||||
|
|
||||||
state Future<int64_t> dataInFlight = getDataInFlight( cx, masterWorker);
|
state Future<int64_t> dataInFlight = getDataInFlight( cx, distributorWorker);
|
||||||
state Future<int64_t> tLogQueueSize = getMaxTLogQueueSize( cx, dbInfo, masterWorker );
|
state Future<int64_t> tLogQueueSize = getMaxTLogQueueSize( cx, dbInfo );
|
||||||
state Future<int64_t> dataDistributionQueueSize = getDataDistributionQueueSize( cx, masterWorker, dataInFlightGate == 0);
|
state Future<int64_t> dataDistributionQueueSize = getDataDistributionQueueSize( cx, distributorWorker, dataInFlightGate == 0);
|
||||||
state Future<int64_t> storageQueueSize = getMaxStorageServerQueueSize( cx, dbInfo, masterWorker );
|
state Future<int64_t> storageQueueSize = getMaxStorageServerQueueSize( cx, dbInfo );
|
||||||
state Future<bool> dataDistributionActive = getDataDistributionActive( cx, masterWorker );
|
state Future<bool> dataDistributionActive = getDataDistributionActive( cx, distributorWorker );
|
||||||
state Future<bool> storageServersRecruiting = getStorageServersRecruiting ( cx, dbInfo, masterWorker );
|
state Future<bool> storageServersRecruiting = getStorageServersRecruiting ( cx, distributorWorker, distributorUID );
|
||||||
|
|
||||||
wait( success( dataInFlight ) && success( tLogQueueSize ) && success( dataDistributionQueueSize )
|
wait( success( dataInFlight ) && success( tLogQueueSize ) && success( dataDistributionQueueSize )
|
||||||
&& success( storageQueueSize ) && success( dataDistributionActive ) && success( storageServersRecruiting ) );
|
&& success( storageQueueSize ) && success( dataDistributionActive ) && success( storageServersRecruiting ) );
|
||||||
|
|
|
@ -23,9 +23,11 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "fdbserver/ClusterRecruitmentInterface.h"
|
#include "fdbserver/ClusterRecruitmentInterface.h"
|
||||||
|
#include "fdbserver/DataDistributorInterface.h"
|
||||||
#include "fdbserver/MasterInterface.h"
|
#include "fdbserver/MasterInterface.h"
|
||||||
#include "fdbserver/LogSystemConfig.h"
|
#include "fdbserver/LogSystemConfig.h"
|
||||||
#include "fdbserver/RecoveryState.h"
|
#include "fdbserver/RecoveryState.h"
|
||||||
|
#include "fdbserver/LatencyBandConfig.h"
|
||||||
|
|
||||||
struct ServerDBInfo {
|
struct ServerDBInfo {
|
||||||
// This structure contains transient information which is broadcast to all workers for a database,
|
// This structure contains transient information which is broadcast to all workers for a database,
|
||||||
|
@ -35,6 +37,7 @@ struct ServerDBInfo {
|
||||||
UID id; // Changes each time any other member changes
|
UID id; // Changes each time any other member changes
|
||||||
ClusterControllerFullInterface clusterInterface;
|
ClusterControllerFullInterface clusterInterface;
|
||||||
ClientDBInfo client; // After a successful recovery, eventually proxies that communicate with it
|
ClientDBInfo client; // After a successful recovery, eventually proxies that communicate with it
|
||||||
|
Optional<DataDistributorInterface> distributor; // The best guess of current data distributor.
|
||||||
MasterInterface master; // The best guess as to the most recent master, which might still be recovering
|
MasterInterface master; // The best guess as to the most recent master, which might still be recovering
|
||||||
vector<ResolverInterface> resolvers;
|
vector<ResolverInterface> resolvers;
|
||||||
DBRecoveryCount recoveryCount; // A recovery count from DBCoreState. A successful master recovery increments it twice; unsuccessful recoveries may increment it once. Depending on where the current master is in its recovery process, this might not have been written by the current master.
|
DBRecoveryCount recoveryCount; // A recovery count from DBCoreState. A successful master recovery increments it twice; unsuccessful recoveries may increment it once. Depending on where the current master is in its recovery process, this might not have been written by the current master.
|
||||||
|
@ -43,6 +46,7 @@ struct ServerDBInfo {
|
||||||
LocalityData myLocality; // (Not serialized) Locality information, if available, for the *local* process
|
LocalityData myLocality; // (Not serialized) Locality information, if available, for the *local* process
|
||||||
LogSystemConfig logSystemConfig;
|
LogSystemConfig logSystemConfig;
|
||||||
std::vector<UID> priorCommittedLogServers; // If !fullyRecovered and logSystemConfig refers to a new log system which may not have been committed to the coordinated state yet, then priorCommittedLogServers are the previous, fully committed generation which need to stay alive in case this recovery fails
|
std::vector<UID> priorCommittedLogServers; // If !fullyRecovered and logSystemConfig refers to a new log system which may not have been committed to the coordinated state yet, then priorCommittedLogServers are the previous, fully committed generation which need to stay alive in case this recovery fails
|
||||||
|
Optional<LatencyBandConfig> latencyBandConfig;
|
||||||
|
|
||||||
explicit ServerDBInfo() : recoveryCount(0), recoveryState(RecoveryState::UNINITIALIZED) {}
|
explicit ServerDBInfo() : recoveryCount(0), recoveryState(RecoveryState::UNINITIALIZED) {}
|
||||||
|
|
||||||
|
@ -51,7 +55,7 @@ struct ServerDBInfo {
|
||||||
|
|
||||||
template <class Ar>
|
template <class Ar>
|
||||||
void serialize( Ar& ar ) {
|
void serialize( Ar& ar ) {
|
||||||
serializer(ar, id, clusterInterface, client, master, resolvers, recoveryCount, masterLifetime, logSystemConfig, priorCommittedLogServers, recoveryState);
|
serializer(ar, id, clusterInterface, client, distributor, master, resolvers, recoveryCount, recoveryState, masterLifetime, logSystemConfig, priorCommittedLogServers, latencyBandConfig);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -80,7 +80,8 @@ extern int limitReasonEnd;
|
||||||
extern const char* limitReasonName[];
|
extern const char* limitReasonName[];
|
||||||
extern const char* limitReasonDesc[];
|
extern const char* limitReasonDesc[];
|
||||||
|
|
||||||
struct WorkerEvents : std::map<NetworkAddress, TraceEventFields> {};
|
struct WorkerEvents : std::map<NetworkAddress, TraceEventFields> {};
|
||||||
|
typedef std::map<std::string, TraceEventFields> EventMap;
|
||||||
|
|
||||||
ACTOR static Future< Optional<TraceEventFields> > latestEventOnWorker(WorkerInterface worker, std::string eventName) {
|
ACTOR static Future< Optional<TraceEventFields> > latestEventOnWorker(WorkerInterface worker, std::string eventName) {
|
||||||
try {
|
try {
|
||||||
|
@ -160,39 +161,52 @@ static Optional<std::pair<WorkerInterface, ProcessClass>> getWorker(std::map<Net
|
||||||
}
|
}
|
||||||
|
|
||||||
class StatusCounter {
|
class StatusCounter {
|
||||||
public:
|
public:
|
||||||
StatusCounter(double hz=0.0, double roughness=0.0, int64_t counter=0) : _hz(hz), _roughness(roughness), _counter(counter) {}
|
StatusCounter() : hz(0), roughness(0), counter(0) {}
|
||||||
StatusCounter(const std::string& parsableText) {
|
StatusCounter(double hz, double roughness, int64_t counter) : hz(hz), roughness(roughness), counter(counter) {}
|
||||||
parseText(parsableText);
|
StatusCounter(const std::string& parsableText) {
|
||||||
}
|
parseText(parsableText);
|
||||||
|
}
|
||||||
|
|
||||||
StatusCounter& parseText(const std::string& parsableText) {
|
StatusCounter& parseText(const std::string& parsableText) {
|
||||||
sscanf(parsableText.c_str(), "%lf %lf %lld", &_hz, &_roughness, &_counter);
|
sscanf(parsableText.c_str(), "%lf %lf %lld", &hz, &roughness, &counter);
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
StatusCounter& updateValues(const StatusCounter& statusCounter) {
|
StatusCounter& updateValues(const StatusCounter& statusCounter) {
|
||||||
double hzNew = _hz + statusCounter._hz;
|
double hzNew = hz + statusCounter.hz;
|
||||||
double roughnessNew = (_hz + statusCounter._hz) ? (_roughness*_hz + statusCounter._roughness*statusCounter._hz) / (_hz + statusCounter._hz) : 0.0;
|
double roughnessNew = (hz + statusCounter.hz) ? (roughness*hz + statusCounter.roughness*statusCounter.hz) / (hz + statusCounter.hz) : 0.0;
|
||||||
int64_t counterNew = _counter + statusCounter._counter;
|
int64_t counterNew = counter + statusCounter.counter;
|
||||||
_hz = hzNew;
|
hz = hzNew;
|
||||||
_roughness = roughnessNew;
|
roughness = roughnessNew;
|
||||||
_counter = counterNew;
|
counter = counterNew;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
JsonBuilderObject getStatus() const {
|
JsonBuilderObject getStatus() const {
|
||||||
JsonBuilderObject statusObject;
|
JsonBuilderObject statusObject;
|
||||||
statusObject["hz"] = _hz;
|
statusObject["hz"] = hz;
|
||||||
statusObject["roughness"] = _roughness;
|
statusObject["roughness"] = roughness;
|
||||||
statusObject["counter"] = _counter;
|
statusObject["counter"] = counter;
|
||||||
return statusObject;
|
return statusObject;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
double getHz() {
|
||||||
double _hz;
|
return hz;
|
||||||
double _roughness;
|
}
|
||||||
int64_t _counter;
|
|
||||||
|
double getRoughness() {
|
||||||
|
return roughness;
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t getCounter() {
|
||||||
|
return counter;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
double hz;
|
||||||
|
double roughness;
|
||||||
|
int64_t counter;
|
||||||
};
|
};
|
||||||
|
|
||||||
static double parseDouble(std::string const& s, bool permissive = false) {
|
static double parseDouble(std::string const& s, bool permissive = false) {
|
||||||
|
@ -290,7 +304,7 @@ static JsonBuilderObject machineStatusFetcher(WorkerEvents mMetrics, vector<std:
|
||||||
std::map<std::string, int32_t> workerContribMap;
|
std::map<std::string, int32_t> workerContribMap;
|
||||||
std::map<std::string, JsonBuilderObject> machineJsonMap;
|
std::map<std::string, JsonBuilderObject> machineJsonMap;
|
||||||
|
|
||||||
for (auto worker : workers){
|
for (auto const& worker : workers){
|
||||||
locality[worker.first.address()] = worker.first.locality;
|
locality[worker.first.address()] = worker.first.locality;
|
||||||
if (worker.first.locality.dcId().present())
|
if (worker.first.locality.dcId().present())
|
||||||
dcIds[worker.first.address()] = worker.first.locality.dcId().get().printable();
|
dcIds[worker.first.address()] = worker.first.locality.dcId().get().printable();
|
||||||
|
@ -394,40 +408,65 @@ struct MachineMemoryInfo {
|
||||||
|
|
||||||
struct RolesInfo {
|
struct RolesInfo {
|
||||||
std::multimap<NetworkAddress, JsonBuilderObject> roles;
|
std::multimap<NetworkAddress, JsonBuilderObject> roles;
|
||||||
|
|
||||||
|
JsonBuilderObject addLatencyBandInfo(TraceEventFields const& metrics) {
|
||||||
|
JsonBuilderObject latency;
|
||||||
|
std::map<std::string, JsonBuilderObject> bands;
|
||||||
|
|
||||||
|
for(auto itr = metrics.begin(); itr != metrics.end(); ++itr) {
|
||||||
|
std::string band;
|
||||||
|
if(itr->first.substr(0, 4) == "Band") {
|
||||||
|
band = itr->first.substr(4);
|
||||||
|
}
|
||||||
|
else if(itr->first == "Filtered") {
|
||||||
|
band = "filtered";
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
latency[band] = StatusCounter(itr->second).getCounter();
|
||||||
|
}
|
||||||
|
|
||||||
|
return latency;
|
||||||
|
}
|
||||||
|
|
||||||
JsonBuilderObject& addRole( NetworkAddress address, std::string const& role, UID id) {
|
JsonBuilderObject& addRole( NetworkAddress address, std::string const& role, UID id) {
|
||||||
JsonBuilderObject obj;
|
JsonBuilderObject obj;
|
||||||
obj["id"] = id.shortString();
|
obj["id"] = id.shortString();
|
||||||
obj["role"] = role;
|
obj["role"] = role;
|
||||||
return roles.insert( std::make_pair(address, obj ))->second;
|
return roles.insert( std::make_pair(address, obj ))->second;
|
||||||
}
|
}
|
||||||
JsonBuilderObject& addRole(std::string const& role, StorageServerInterface& iface, TraceEventFields const& metrics, Version maxTLogVersion, double* pDataLagSeconds) {
|
JsonBuilderObject& addRole(std::string const& role, StorageServerInterface& iface, EventMap const& metrics, Version maxTLogVersion, double* pDataLagSeconds) {
|
||||||
JsonBuilderObject obj;
|
JsonBuilderObject obj;
|
||||||
double dataLagSeconds = -1.0;
|
double dataLagSeconds = -1.0;
|
||||||
obj["id"] = iface.id().shortString();
|
obj["id"] = iface.id().shortString();
|
||||||
obj["role"] = role;
|
obj["role"] = role;
|
||||||
try {
|
try {
|
||||||
obj.setKeyRawNumber("stored_bytes", metrics.getValue("BytesStored"));
|
TraceEventFields const& storageMetrics = metrics.at("StorageMetrics");
|
||||||
obj.setKeyRawNumber("kvstore_used_bytes", metrics.getValue("KvstoreBytesUsed"));
|
|
||||||
obj.setKeyRawNumber("kvstore_free_bytes", metrics.getValue("KvstoreBytesFree"));
|
|
||||||
obj.setKeyRawNumber("kvstore_available_bytes", metrics.getValue("KvstoreBytesAvailable"));
|
|
||||||
obj.setKeyRawNumber("kvstore_total_bytes", metrics.getValue("KvstoreBytesTotal"));
|
|
||||||
obj["input_bytes"] = StatusCounter(metrics.getValue("BytesInput")).getStatus();
|
|
||||||
obj["durable_bytes"] = StatusCounter(metrics.getValue("BytesDurable")).getStatus();
|
|
||||||
obj.setKeyRawNumber("query_queue_max", metrics.getValue("QueryQueueMax"));
|
|
||||||
obj["total_queries"] = StatusCounter(metrics.getValue("QueryQueue")).getStatus();
|
|
||||||
obj["finished_queries"] = StatusCounter(metrics.getValue("FinishedQueries")).getStatus();
|
|
||||||
obj["bytes_queried"] = StatusCounter(metrics.getValue("BytesQueried")).getStatus();
|
|
||||||
obj["keys_queried"] = StatusCounter(metrics.getValue("RowsQueried")).getStatus();
|
|
||||||
obj["mutation_bytes"] = StatusCounter(metrics.getValue("MutationBytes")).getStatus();
|
|
||||||
obj["mutations"] = StatusCounter(metrics.getValue("Mutations")).getStatus();
|
|
||||||
|
|
||||||
Version version = parseInt64(metrics.getValue("Version"));
|
obj.setKeyRawNumber("stored_bytes", storageMetrics.getValue("BytesStored"));
|
||||||
Version durableVersion = parseInt64(metrics.getValue("DurableVersion"));
|
obj.setKeyRawNumber("kvstore_used_bytes", storageMetrics.getValue("KvstoreBytesUsed"));
|
||||||
|
obj.setKeyRawNumber("kvstore_free_bytes", storageMetrics.getValue("KvstoreBytesFree"));
|
||||||
|
obj.setKeyRawNumber("kvstore_available_bytes", storageMetrics.getValue("KvstoreBytesAvailable"));
|
||||||
|
obj.setKeyRawNumber("kvstore_total_bytes", storageMetrics.getValue("KvstoreBytesTotal"));
|
||||||
|
obj["input_bytes"] = StatusCounter(storageMetrics.getValue("BytesInput")).getStatus();
|
||||||
|
obj["durable_bytes"] = StatusCounter(storageMetrics.getValue("BytesDurable")).getStatus();
|
||||||
|
obj.setKeyRawNumber("query_queue_max", storageMetrics.getValue("QueryQueueMax"));
|
||||||
|
obj["total_queries"] = StatusCounter(storageMetrics.getValue("QueryQueue")).getStatus();
|
||||||
|
obj["finished_queries"] = StatusCounter(storageMetrics.getValue("FinishedQueries")).getStatus();
|
||||||
|
obj["bytes_queried"] = StatusCounter(storageMetrics.getValue("BytesQueried")).getStatus();
|
||||||
|
obj["keys_queried"] = StatusCounter(storageMetrics.getValue("RowsQueried")).getStatus();
|
||||||
|
obj["mutation_bytes"] = StatusCounter(storageMetrics.getValue("MutationBytes")).getStatus();
|
||||||
|
obj["mutations"] = StatusCounter(storageMetrics.getValue("Mutations")).getStatus();
|
||||||
|
|
||||||
|
Version version = parseInt64(storageMetrics.getValue("Version"));
|
||||||
|
Version durableVersion = parseInt64(storageMetrics.getValue("DurableVersion"));
|
||||||
|
|
||||||
obj["data_version"] = version;
|
obj["data_version"] = version;
|
||||||
obj["durable_version"] = durableVersion;
|
obj["durable_version"] = durableVersion;
|
||||||
|
|
||||||
int64_t versionLag = parseInt64(metrics.getValue("VersionLag"));
|
int64_t versionLag = parseInt64(storageMetrics.getValue("VersionLag"));
|
||||||
if(maxTLogVersion > 0) {
|
if(maxTLogVersion > 0) {
|
||||||
// It's possible that the storage server hasn't talked to the logs recently, in which case it may not be aware of how far behind it is.
|
// It's possible that the storage server hasn't talked to the logs recently, in which case it may not be aware of how far behind it is.
|
||||||
// To account for that, we also compute the version difference between each storage server and the tlog with the largest version.
|
// To account for that, we also compute the version difference between each storage server and the tlog with the largest version.
|
||||||
|
@ -437,6 +476,11 @@ struct RolesInfo {
|
||||||
versionLag = std::max<int64_t>(versionLag, maxTLogVersion - version - SERVER_KNOBS->STORAGE_LOGGING_DELAY * SERVER_KNOBS->VERSIONS_PER_SECOND);
|
versionLag = std::max<int64_t>(versionLag, maxTLogVersion - version - SERVER_KNOBS->STORAGE_LOGGING_DELAY * SERVER_KNOBS->VERSIONS_PER_SECOND);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TraceEventFields const& readLatencyMetrics = metrics.at("ReadLatencyMetrics");
|
||||||
|
if(readLatencyMetrics.size()) {
|
||||||
|
obj["read_latency_bands"] = addLatencyBandInfo(readLatencyMetrics);
|
||||||
|
}
|
||||||
|
|
||||||
JsonBuilderObject dataLag;
|
JsonBuilderObject dataLag;
|
||||||
dataLag["versions"] = versionLag;
|
dataLag["versions"] = versionLag;
|
||||||
dataLagSeconds = versionLag / (double)SERVER_KNOBS->VERSIONS_PER_SECOND;
|
dataLagSeconds = versionLag / (double)SERVER_KNOBS->VERSIONS_PER_SECOND;
|
||||||
|
@ -453,27 +497,32 @@ struct RolesInfo {
|
||||||
if(e.code() != error_code_attribute_not_found)
|
if(e.code() != error_code_attribute_not_found)
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
if (pDataLagSeconds)
|
|
||||||
|
if (pDataLagSeconds) {
|
||||||
*pDataLagSeconds = dataLagSeconds;
|
*pDataLagSeconds = dataLagSeconds;
|
||||||
|
}
|
||||||
|
|
||||||
return roles.insert( std::make_pair(iface.address(), obj ))->second;
|
return roles.insert( std::make_pair(iface.address(), obj ))->second;
|
||||||
}
|
}
|
||||||
JsonBuilderObject& addRole(std::string const& role, TLogInterface& iface, TraceEventFields const& metrics, Version* pMetricVersion) {
|
JsonBuilderObject& addRole(std::string const& role, TLogInterface& iface, EventMap const& metrics, Version* pMetricVersion) {
|
||||||
JsonBuilderObject obj;
|
JsonBuilderObject obj;
|
||||||
Version metricVersion = 0;
|
Version metricVersion = 0;
|
||||||
obj["id"] = iface.id().shortString();
|
obj["id"] = iface.id().shortString();
|
||||||
obj["role"] = role;
|
obj["role"] = role;
|
||||||
try {
|
try {
|
||||||
obj.setKeyRawNumber("kvstore_used_bytes",metrics.getValue("KvstoreBytesUsed"));
|
TraceEventFields const& tlogMetrics = metrics.at("TLogMetrics");
|
||||||
obj.setKeyRawNumber("kvstore_free_bytes",metrics.getValue("KvstoreBytesFree"));
|
|
||||||
obj.setKeyRawNumber("kvstore_available_bytes",metrics.getValue("KvstoreBytesAvailable"));
|
obj.setKeyRawNumber("kvstore_used_bytes", tlogMetrics.getValue("KvstoreBytesUsed"));
|
||||||
obj.setKeyRawNumber("kvstore_total_bytes",metrics.getValue("KvstoreBytesTotal"));
|
obj.setKeyRawNumber("kvstore_free_bytes", tlogMetrics.getValue("KvstoreBytesFree"));
|
||||||
obj.setKeyRawNumber("queue_disk_used_bytes",metrics.getValue("QueueDiskBytesUsed"));
|
obj.setKeyRawNumber("kvstore_available_bytes", tlogMetrics.getValue("KvstoreBytesAvailable"));
|
||||||
obj.setKeyRawNumber("queue_disk_free_bytes",metrics.getValue("QueueDiskBytesFree"));
|
obj.setKeyRawNumber("kvstore_total_bytes", tlogMetrics.getValue("KvstoreBytesTotal"));
|
||||||
obj.setKeyRawNumber("queue_disk_available_bytes",metrics.getValue("QueueDiskBytesAvailable"));
|
obj.setKeyRawNumber("queue_disk_used_bytes", tlogMetrics.getValue("QueueDiskBytesUsed"));
|
||||||
obj.setKeyRawNumber("queue_disk_total_bytes",metrics.getValue("QueueDiskBytesTotal"));
|
obj.setKeyRawNumber("queue_disk_free_bytes", tlogMetrics.getValue("QueueDiskBytesFree"));
|
||||||
obj["input_bytes"] = StatusCounter(metrics.getValue("BytesInput")).getStatus();
|
obj.setKeyRawNumber("queue_disk_available_bytes", tlogMetrics.getValue("QueueDiskBytesAvailable"));
|
||||||
obj["durable_bytes"] = StatusCounter(metrics.getValue("BytesDurable")).getStatus();
|
obj.setKeyRawNumber("queue_disk_total_bytes", tlogMetrics.getValue("QueueDiskBytesTotal"));
|
||||||
metricVersion = parseInt64(metrics.getValue("Version"));
|
obj["input_bytes"] = StatusCounter(tlogMetrics.getValue("BytesInput")).getStatus();
|
||||||
|
obj["durable_bytes"] = StatusCounter(tlogMetrics.getValue("BytesDurable")).getStatus();
|
||||||
|
metricVersion = parseInt64(tlogMetrics.getValue("Version"));
|
||||||
obj["data_version"] = metricVersion;
|
obj["data_version"] = metricVersion;
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
if(e.code() != error_code_attribute_not_found)
|
if(e.code() != error_code_attribute_not_found)
|
||||||
|
@ -483,6 +532,28 @@ struct RolesInfo {
|
||||||
*pMetricVersion = metricVersion;
|
*pMetricVersion = metricVersion;
|
||||||
return roles.insert( std::make_pair(iface.address(), obj ))->second;
|
return roles.insert( std::make_pair(iface.address(), obj ))->second;
|
||||||
}
|
}
|
||||||
|
JsonBuilderObject& addRole(std::string const& role, MasterProxyInterface& iface, EventMap const& metrics) {
|
||||||
|
JsonBuilderObject obj;
|
||||||
|
obj["id"] = iface.id().shortString();
|
||||||
|
obj["role"] = role;
|
||||||
|
try {
|
||||||
|
TraceEventFields const& grvLatencyMetrics = metrics.at("GRVLatencyMetrics");
|
||||||
|
if(grvLatencyMetrics.size()) {
|
||||||
|
obj["grv_latency_bands"] = addLatencyBandInfo(grvLatencyMetrics);
|
||||||
|
}
|
||||||
|
|
||||||
|
TraceEventFields const& commitLatencyMetrics = metrics.at("CommitLatencyMetrics");
|
||||||
|
if(commitLatencyMetrics.size()) {
|
||||||
|
obj["commit_latency_bands"] = addLatencyBandInfo(commitLatencyMetrics);
|
||||||
|
}
|
||||||
|
} catch (Error &e) {
|
||||||
|
if(e.code() != error_code_attribute_not_found) {
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return roles.insert( std::make_pair(iface.address(), obj ))->second;
|
||||||
|
}
|
||||||
template <class InterfaceType>
|
template <class InterfaceType>
|
||||||
JsonBuilderObject& addRole(std::string const& role, InterfaceType& iface) {
|
JsonBuilderObject& addRole(std::string const& role, InterfaceType& iface) {
|
||||||
return addRole(iface.address(), role, iface.id());
|
return addRole(iface.address(), role, iface.id());
|
||||||
|
@ -507,8 +578,9 @@ ACTOR static Future<JsonBuilderObject> processStatusFetcher(
|
||||||
WorkerEvents traceFileOpenErrors,
|
WorkerEvents traceFileOpenErrors,
|
||||||
WorkerEvents programStarts,
|
WorkerEvents programStarts,
|
||||||
std::map<std::string, JsonBuilderObject> processIssues,
|
std::map<std::string, JsonBuilderObject> processIssues,
|
||||||
vector<std::pair<StorageServerInterface, TraceEventFields>> storageServers,
|
vector<std::pair<StorageServerInterface, EventMap>> storageServers,
|
||||||
vector<std::pair<TLogInterface, TraceEventFields>> tLogs,
|
vector<std::pair<TLogInterface, EventMap>> tLogs,
|
||||||
|
vector<std::pair<MasterProxyInterface, EventMap>> proxies,
|
||||||
Database cx,
|
Database cx,
|
||||||
Optional<DatabaseConfiguration> configuration,
|
Optional<DatabaseConfiguration> configuration,
|
||||||
std::set<std::string> *incomplete_reasons) {
|
std::set<std::string> *incomplete_reasons) {
|
||||||
|
@ -567,16 +639,13 @@ ACTOR static Future<JsonBuilderObject> processStatusFetcher(
|
||||||
roles.addRole("master", db->get().master);
|
roles.addRole("master", db->get().master);
|
||||||
roles.addRole("cluster_controller", db->get().clusterInterface.clientInterface);
|
roles.addRole("cluster_controller", db->get().clusterInterface.clientInterface);
|
||||||
|
|
||||||
state Reference<ProxyInfo> proxies = cx->getMasterProxies();
|
state std::vector<std::pair<MasterProxyInterface, EventMap>>::iterator proxy;
|
||||||
if (proxies) {
|
for(proxy = proxies.begin(); proxy != proxies.end(); ++proxy) {
|
||||||
state int proxyIndex;
|
roles.addRole( "proxy", proxy->first, proxy->second );
|
||||||
for(proxyIndex = 0; proxyIndex < proxies->size(); proxyIndex++) {
|
wait(yield());
|
||||||
roles.addRole( "proxy", proxies->getInterface(proxyIndex) );
|
|
||||||
wait(yield());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
state std::vector<std::pair<TLogInterface, TraceEventFields>>::iterator log;
|
state std::vector<std::pair<TLogInterface, EventMap>>::iterator log;
|
||||||
state Version maxTLogVersion = 0;
|
state Version maxTLogVersion = 0;
|
||||||
|
|
||||||
// Get largest TLog version
|
// Get largest TLog version
|
||||||
|
@ -587,7 +656,7 @@ ACTOR static Future<JsonBuilderObject> processStatusFetcher(
|
||||||
wait(yield());
|
wait(yield());
|
||||||
}
|
}
|
||||||
|
|
||||||
state std::vector<std::pair<StorageServerInterface, TraceEventFields>>::iterator ss;
|
state std::vector<std::pair<StorageServerInterface, EventMap>>::iterator ss;
|
||||||
state std::map<NetworkAddress, double> ssLag;
|
state std::map<NetworkAddress, double> ssLag;
|
||||||
state double lagSeconds;
|
state double lagSeconds;
|
||||||
for(ss = storageServers.begin(); ss != storageServers.end(); ++ss) {
|
for(ss = storageServers.begin(); ss != storageServers.end(); ++ss) {
|
||||||
|
@ -1217,37 +1286,68 @@ namespace std
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR template <class iface>
|
ACTOR template <class iface>
|
||||||
static Future<vector<std::pair<iface, TraceEventFields>>> getServerMetrics(vector<iface> servers, std::unordered_map<NetworkAddress, WorkerInterface> address_workers, std::string suffix) {
|
static Future<vector<std::pair<iface, EventMap>>> getServerMetrics(vector<iface> servers, std::unordered_map<NetworkAddress, WorkerInterface> address_workers, std::vector<std::string> eventNames) {
|
||||||
state vector<Future<Optional<TraceEventFields>>> futures;
|
state vector<Future<Optional<TraceEventFields>>> futures;
|
||||||
for (auto s : servers) {
|
for (auto s : servers) {
|
||||||
futures.push_back(latestEventOnWorker(address_workers[s.address()], s.id().toString() + suffix));
|
for (auto name : eventNames) {
|
||||||
|
futures.push_back(latestEventOnWorker(address_workers[s.address()], s.id().toString() + "/" + name));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
wait(waitForAll(futures));
|
wait(waitForAll(futures));
|
||||||
|
|
||||||
vector<std::pair<iface, TraceEventFields>> results;
|
vector<std::pair<iface, EventMap>> results;
|
||||||
|
auto futureItr = futures.begin();
|
||||||
|
|
||||||
for (int i = 0; i < servers.size(); i++) {
|
for (int i = 0; i < servers.size(); i++) {
|
||||||
results.push_back(std::make_pair(servers[i], futures[i].get().present() ? futures[i].get().get() : TraceEventFields()));
|
EventMap serverResults;
|
||||||
|
for (auto name : eventNames) {
|
||||||
|
ASSERT(futureItr != futures.end());
|
||||||
|
serverResults[name] = futureItr->get().present() ? futureItr->get().get() : TraceEventFields();
|
||||||
|
++futureItr;
|
||||||
|
}
|
||||||
|
|
||||||
|
results.push_back(std::make_pair(servers[i], serverResults));
|
||||||
}
|
}
|
||||||
|
|
||||||
return results;
|
return results;
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR static Future<vector<std::pair<StorageServerInterface, TraceEventFields>>> getStorageServersAndMetrics(Database cx, std::unordered_map<NetworkAddress, WorkerInterface> address_workers) {
|
ACTOR static Future<vector<std::pair<StorageServerInterface, EventMap>>> getStorageServersAndMetrics(Database cx, std::unordered_map<NetworkAddress, WorkerInterface> address_workers) {
|
||||||
vector<StorageServerInterface> servers = wait(timeoutError(getStorageServers(cx, true), 5.0));
|
vector<StorageServerInterface> servers = wait(timeoutError(getStorageServers(cx, true), 5.0));
|
||||||
vector<std::pair<StorageServerInterface, TraceEventFields>> results = wait(getServerMetrics(servers, address_workers, "/StorageMetrics"));
|
vector<std::pair<StorageServerInterface, EventMap>> results = wait(getServerMetrics(servers, address_workers,
|
||||||
|
std::vector<std::string>{ "StorageMetrics", "ReadLatencyMetrics" }));
|
||||||
|
|
||||||
return results;
|
return results;
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR static Future<vector<std::pair<TLogInterface, TraceEventFields>>> getTLogsAndMetrics(Reference<AsyncVar<struct ServerDBInfo>> db, std::unordered_map<NetworkAddress, WorkerInterface> address_workers) {
|
ACTOR static Future<vector<std::pair<TLogInterface, EventMap>>> getTLogsAndMetrics(Reference<AsyncVar<struct ServerDBInfo>> db, std::unordered_map<NetworkAddress, WorkerInterface> address_workers) {
|
||||||
vector<TLogInterface> servers = db->get().logSystemConfig.allPresentLogs();
|
vector<TLogInterface> servers = db->get().logSystemConfig.allPresentLogs();
|
||||||
vector<std::pair<TLogInterface, TraceEventFields>> results = wait(getServerMetrics(servers, address_workers, "/TLogMetrics"));
|
vector<std::pair<TLogInterface, EventMap>> results = wait(getServerMetrics(servers, address_workers,
|
||||||
|
std::vector<std::string>{ "TLogMetrics" }));
|
||||||
|
|
||||||
return results;
|
return results;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int getExtraTLogEligibleMachines(vector<std::pair<WorkerInterface, ProcessClass>> workers, DatabaseConfiguration configuration) {
|
ACTOR static Future<vector<std::pair<MasterProxyInterface, EventMap>>> getProxiesAndMetrics(Database cx, std::unordered_map<NetworkAddress, WorkerInterface> address_workers) {
|
||||||
|
Reference<ProxyInfo> proxyInfo = cx->getMasterProxies();
|
||||||
|
std::vector<MasterProxyInterface> servers;
|
||||||
|
if(proxyInfo) {
|
||||||
|
for(int i = 0; i < proxyInfo->size(); ++i) {
|
||||||
|
servers.push_back(proxyInfo->getInterface(i));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
vector<std::pair<MasterProxyInterface, EventMap>> results = wait(getServerMetrics(servers, address_workers,
|
||||||
|
std::vector<std::string>{ "GRVLatencyMetrics", "CommitLatencyMetrics" }));
|
||||||
|
|
||||||
|
return results;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int getExtraTLogEligibleMachines(const vector<std::pair<WorkerInterface, ProcessClass>>& workers, const DatabaseConfiguration& configuration) {
|
||||||
std::set<StringRef> allMachines;
|
std::set<StringRef> allMachines;
|
||||||
std::map<Key,std::set<StringRef>> dcId_machine;
|
std::map<Key,std::set<StringRef>> dcId_machine;
|
||||||
for(auto worker : workers) {
|
for(auto const& worker : workers) {
|
||||||
if(worker.second.machineClassFitness(ProcessClass::TLog) < ProcessClass::NeverAssign
|
if(worker.second.machineClassFitness(ProcessClass::TLog) < ProcessClass::NeverAssign
|
||||||
&& !configuration.isExcludedServer(worker.first.address()))
|
&& !configuration.isExcludedServer(worker.first.address()))
|
||||||
{
|
{
|
||||||
|
@ -1284,7 +1384,7 @@ static int getExtraTLogEligibleMachines(vector<std::pair<WorkerInterface, Proces
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR static Future<JsonBuilderObject> workloadStatusFetcher(Reference<AsyncVar<struct ServerDBInfo>> db, vector<std::pair<WorkerInterface, ProcessClass>> workers, std::pair<WorkerInterface, ProcessClass> mWorker,
|
ACTOR static Future<JsonBuilderObject> workloadStatusFetcher(Reference<AsyncVar<struct ServerDBInfo>> db, vector<std::pair<WorkerInterface, ProcessClass>> workers, std::pair<WorkerInterface, ProcessClass> mWorker,
|
||||||
JsonBuilderObject *qos, JsonBuilderObject *data_overlay, std::set<std::string> *incomplete_reasons, Future<ErrorOr<vector<std::pair<StorageServerInterface, TraceEventFields>>>> storageServerFuture)
|
JsonBuilderObject *qos, JsonBuilderObject *data_overlay, std::set<std::string> *incomplete_reasons, Future<ErrorOr<vector<std::pair<StorageServerInterface, EventMap>>>> storageServerFuture)
|
||||||
{
|
{
|
||||||
state JsonBuilderObject statusObj;
|
state JsonBuilderObject statusObj;
|
||||||
state JsonBuilderObject operationsObj;
|
state JsonBuilderObject operationsObj;
|
||||||
|
@ -1295,7 +1395,7 @@ ACTOR static Future<JsonBuilderObject> workloadStatusFetcher(Reference<AsyncVar<
|
||||||
try {
|
try {
|
||||||
vector<Future<TraceEventFields>> proxyStatFutures;
|
vector<Future<TraceEventFields>> proxyStatFutures;
|
||||||
std::map<NetworkAddress, std::pair<WorkerInterface, ProcessClass>> workersMap;
|
std::map<NetworkAddress, std::pair<WorkerInterface, ProcessClass>> workersMap;
|
||||||
for (auto w : workers) {
|
for (auto const& w : workers) {
|
||||||
workersMap[w.first.address()] = w;
|
workersMap[w.first.address()] = w;
|
||||||
}
|
}
|
||||||
for (auto &p : db->get().client.proxies) {
|
for (auto &p : db->get().client.proxies) {
|
||||||
|
@ -1387,7 +1487,7 @@ ACTOR static Future<JsonBuilderObject> workloadStatusFetcher(Reference<AsyncVar<
|
||||||
|
|
||||||
// Reads
|
// Reads
|
||||||
try {
|
try {
|
||||||
ErrorOr<vector<std::pair<StorageServerInterface, TraceEventFields>>> storageServers = wait(storageServerFuture);
|
ErrorOr<vector<std::pair<StorageServerInterface, EventMap>>> storageServers = wait(storageServerFuture);
|
||||||
if(!storageServers.present()) {
|
if(!storageServers.present()) {
|
||||||
throw storageServers.getError();
|
throw storageServers.getError();
|
||||||
}
|
}
|
||||||
|
@ -1398,10 +1498,12 @@ ACTOR static Future<JsonBuilderObject> workloadStatusFetcher(Reference<AsyncVar<
|
||||||
StatusCounter readBytes;
|
StatusCounter readBytes;
|
||||||
|
|
||||||
for(auto &ss : storageServers.get()) {
|
for(auto &ss : storageServers.get()) {
|
||||||
readRequests.updateValues( StatusCounter(ss.second.getValue("QueryQueue")));
|
TraceEventFields const& storageMetrics = ss.second.at("StorageMetrics");
|
||||||
reads.updateValues( StatusCounter(ss.second.getValue("FinishedQueries")));
|
|
||||||
readKeys.updateValues( StatusCounter(ss.second.getValue("RowsQueried")));
|
readRequests.updateValues( StatusCounter(storageMetrics.getValue("QueryQueue")));
|
||||||
readBytes.updateValues( StatusCounter(ss.second.getValue("BytesQueried")));
|
reads.updateValues( StatusCounter(storageMetrics.getValue("FinishedQueries")));
|
||||||
|
readKeys.updateValues( StatusCounter(storageMetrics.getValue("RowsQueried")));
|
||||||
|
readBytes.updateValues( StatusCounter(storageMetrics.getValue("BytesQueried")));
|
||||||
}
|
}
|
||||||
|
|
||||||
operationsObj["read_requests"] = readRequests.getStatus();
|
operationsObj["read_requests"] = readRequests.getStatus();
|
||||||
|
@ -1778,8 +1880,9 @@ ACTOR Future<StatusReply> clusterGetStatus(
|
||||||
}
|
}
|
||||||
|
|
||||||
state std::map<std::string, JsonBuilderObject> processIssues = getProcessIssuesAsMessages(workerIssues);
|
state std::map<std::string, JsonBuilderObject> processIssues = getProcessIssuesAsMessages(workerIssues);
|
||||||
state vector<std::pair<StorageServerInterface, TraceEventFields>> storageServers;
|
state vector<std::pair<StorageServerInterface, EventMap>> storageServers;
|
||||||
state vector<std::pair<TLogInterface, TraceEventFields>> tLogs;
|
state vector<std::pair<TLogInterface, EventMap>> tLogs;
|
||||||
|
state vector<std::pair<MasterProxyInterface, EventMap>> proxies;
|
||||||
state JsonBuilderObject qos;
|
state JsonBuilderObject qos;
|
||||||
state JsonBuilderObject data_overlay;
|
state JsonBuilderObject data_overlay;
|
||||||
|
|
||||||
|
@ -1814,10 +1917,13 @@ ACTOR Future<StatusReply> clusterGetStatus(
|
||||||
// Start getting storage servers now (using system priority) concurrently. Using sys priority because having storage servers
|
// Start getting storage servers now (using system priority) concurrently. Using sys priority because having storage servers
|
||||||
// in status output is important to give context to error messages in status that reference a storage server role ID.
|
// in status output is important to give context to error messages in status that reference a storage server role ID.
|
||||||
state std::unordered_map<NetworkAddress, WorkerInterface> address_workers;
|
state std::unordered_map<NetworkAddress, WorkerInterface> address_workers;
|
||||||
for (auto worker : workers)
|
for (auto const& worker : workers) {
|
||||||
address_workers[worker.first.address()] = worker.first;
|
address_workers[worker.first.address()] = worker.first;
|
||||||
state Future<ErrorOr<vector<std::pair<StorageServerInterface, TraceEventFields>>>> storageServerFuture = errorOr(getStorageServersAndMetrics(cx, address_workers));
|
}
|
||||||
state Future<ErrorOr<vector<std::pair<TLogInterface, TraceEventFields>>>> tLogFuture = errorOr(getTLogsAndMetrics(db, address_workers));
|
|
||||||
|
state Future<ErrorOr<vector<std::pair<StorageServerInterface, EventMap>>>> storageServerFuture = errorOr(getStorageServersAndMetrics(cx, address_workers));
|
||||||
|
state Future<ErrorOr<vector<std::pair<TLogInterface, EventMap>>>> tLogFuture = errorOr(getTLogsAndMetrics(db, address_workers));
|
||||||
|
state Future<ErrorOr<vector<std::pair<MasterProxyInterface, EventMap>>>> proxyFuture = errorOr(getProxiesAndMetrics(cx, address_workers));
|
||||||
|
|
||||||
state int minReplicasRemaining = -1;
|
state int minReplicasRemaining = -1;
|
||||||
std::vector<Future<JsonBuilderObject>> futures2;
|
std::vector<Future<JsonBuilderObject>> futures2;
|
||||||
|
@ -1870,20 +1976,31 @@ ACTOR Future<StatusReply> clusterGetStatus(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Need storage servers now for processStatusFetcher() below.
|
// Need storage servers now for processStatusFetcher() below.
|
||||||
ErrorOr<vector<std::pair<StorageServerInterface, TraceEventFields>>> _storageServers = wait(storageServerFuture);
|
ErrorOr<vector<std::pair<StorageServerInterface, EventMap>>> _storageServers = wait(storageServerFuture);
|
||||||
if (_storageServers.present()) {
|
if (_storageServers.present()) {
|
||||||
storageServers = _storageServers.get();
|
storageServers = _storageServers.get();
|
||||||
}
|
}
|
||||||
else
|
else {
|
||||||
messages.push_back(JsonBuilder::makeMessage("storage_servers_error", "Timed out trying to retrieve storage servers."));
|
messages.push_back(JsonBuilder::makeMessage("storage_servers_error", "Timed out trying to retrieve storage servers."));
|
||||||
|
}
|
||||||
|
|
||||||
// ...also tlogs
|
// ...also tlogs
|
||||||
ErrorOr<vector<std::pair<TLogInterface, TraceEventFields>>> _tLogs = wait(tLogFuture);
|
ErrorOr<vector<std::pair<TLogInterface, EventMap>>> _tLogs = wait(tLogFuture);
|
||||||
if (_tLogs.present()) {
|
if (_tLogs.present()) {
|
||||||
tLogs = _tLogs.get();
|
tLogs = _tLogs.get();
|
||||||
}
|
}
|
||||||
else
|
else {
|
||||||
messages.push_back(JsonBuilder::makeMessage("log_servers_error", "Timed out trying to retrieve log servers."));
|
messages.push_back(JsonBuilder::makeMessage("log_servers_error", "Timed out trying to retrieve log servers."));
|
||||||
|
}
|
||||||
|
|
||||||
|
// ...also proxies
|
||||||
|
ErrorOr<vector<std::pair<MasterProxyInterface, EventMap>>> _proxies = wait(proxyFuture);
|
||||||
|
if (_proxies.present()) {
|
||||||
|
proxies = _proxies.get();
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
messages.push_back(JsonBuilder::makeMessage("proxies_error", "Timed out trying to retrieve proxies."));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
// Set layers status to { _valid: false, error: "configurationMissing"}
|
// Set layers status to { _valid: false, error: "configurationMissing"}
|
||||||
|
@ -1893,7 +2010,7 @@ ACTOR Future<StatusReply> clusterGetStatus(
|
||||||
statusObj["layers"] = layers;
|
statusObj["layers"] = layers;
|
||||||
}
|
}
|
||||||
|
|
||||||
JsonBuilderObject processStatus = wait(processStatusFetcher(db, workers, pMetrics, mMetrics, latestError, traceFileOpenErrors, programStarts, processIssues, storageServers, tLogs, cx, configuration, &status_incomplete_reasons));
|
JsonBuilderObject processStatus = wait(processStatusFetcher(db, workers, pMetrics, mMetrics, latestError, traceFileOpenErrors, programStarts, processIssues, storageServers, tLogs, proxies, cx, configuration, &status_incomplete_reasons));
|
||||||
statusObj["processes"] = processStatus;
|
statusObj["processes"] = processStatus;
|
||||||
statusObj["clients"] = clientStatusFetcher(clientVersionMap, traceLogGroupMap);
|
statusObj["clients"] = clientStatusFetcher(clientVersionMap, traceLogGroupMap);
|
||||||
|
|
||||||
|
|
|
@ -1909,7 +1909,6 @@ ACTOR Future<Void> updateLogSystem(TLogData* self, Reference<LogData> logData, L
|
||||||
|
|
||||||
ACTOR Future<Void> tLogStart( TLogData* self, InitializeTLogRequest req, LocalityData locality ) {
|
ACTOR Future<Void> tLogStart( TLogData* self, InitializeTLogRequest req, LocalityData locality ) {
|
||||||
state TLogInterface recruited(self->dbgid, locality);
|
state TLogInterface recruited(self->dbgid, locality);
|
||||||
recruited.locality = locality;
|
|
||||||
recruited.initEndpoints();
|
recruited.initEndpoints();
|
||||||
|
|
||||||
DUMPTOKEN( recruited.peekMessages );
|
DUMPTOKEN( recruited.peekMessages );
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
#define FDBSERVER_WORKERINTERFACE_H
|
#define FDBSERVER_WORKERINTERFACE_H
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include "fdbserver/DataDistributorInterface.h"
|
||||||
#include "fdbserver/MasterInterface.h"
|
#include "fdbserver/MasterInterface.h"
|
||||||
#include "fdbserver/TLogInterface.h"
|
#include "fdbserver/TLogInterface.h"
|
||||||
#include "fdbserver/ResolverInterface.h"
|
#include "fdbserver/ResolverInterface.h"
|
||||||
|
@ -40,6 +41,7 @@ struct WorkerInterface {
|
||||||
RequestStream< struct InitializeTLogRequest > tLog;
|
RequestStream< struct InitializeTLogRequest > tLog;
|
||||||
RequestStream< struct RecruitMasterRequest > master;
|
RequestStream< struct RecruitMasterRequest > master;
|
||||||
RequestStream< struct InitializeMasterProxyRequest > masterProxy;
|
RequestStream< struct InitializeMasterProxyRequest > masterProxy;
|
||||||
|
RequestStream< struct InitializeDataDistributorRequest > dataDistributor;
|
||||||
RequestStream< struct InitializeResolverRequest > resolver;
|
RequestStream< struct InitializeResolverRequest > resolver;
|
||||||
RequestStream< struct InitializeStorageRequest > storage;
|
RequestStream< struct InitializeStorageRequest > storage;
|
||||||
RequestStream< struct InitializeLogRouterRequest > logRouter;
|
RequestStream< struct InitializeLogRouterRequest > logRouter;
|
||||||
|
@ -58,11 +60,11 @@ struct WorkerInterface {
|
||||||
NetworkAddress address() const { return tLog.getEndpoint().getPrimaryAddress(); }
|
NetworkAddress address() const { return tLog.getEndpoint().getPrimaryAddress(); }
|
||||||
|
|
||||||
WorkerInterface() {}
|
WorkerInterface() {}
|
||||||
WorkerInterface( LocalityData locality ) : locality( locality ) {}
|
WorkerInterface( const LocalityData& locality ) : locality( locality ) {}
|
||||||
|
|
||||||
template <class Ar>
|
template <class Ar>
|
||||||
void serialize(Ar& ar) {
|
void serialize(Ar& ar) {
|
||||||
serializer(ar, clientInterface, locality, tLog, master, masterProxy, resolver, storage, logRouter, debugPing, coordinationPing, waitFailure, setMetricsRate, eventLogRequest, traceBatchDumpRequest, testerInterface, diskStoreRequest);
|
serializer(ar, clientInterface, locality, tLog, master, masterProxy, dataDistributor, resolver, storage, logRouter, debugPing, coordinationPing, waitFailure, setMetricsRate, eventLogRequest, traceBatchDumpRequest, testerInterface, diskStoreRequest);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -133,6 +135,16 @@ struct InitializeMasterProxyRequest {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct InitializeDataDistributorRequest {
|
||||||
|
UID reqId;
|
||||||
|
ReplyPromise<DataDistributorInterface> reply;
|
||||||
|
|
||||||
|
template <class Ar>
|
||||||
|
void serialize( Ar& ar ) {
|
||||||
|
serializer(ar, reqId, reply);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
struct InitializeResolverRequest {
|
struct InitializeResolverRequest {
|
||||||
uint64_t recoveryCount;
|
uint64_t recoveryCount;
|
||||||
int proxyCount;
|
int proxyCount;
|
||||||
|
@ -281,6 +293,7 @@ struct Role {
|
||||||
static const Role CLUSTER_CONTROLLER;
|
static const Role CLUSTER_CONTROLLER;
|
||||||
static const Role TESTER;
|
static const Role TESTER;
|
||||||
static const Role LOG_ROUTER;
|
static const Role LOG_ROUTER;
|
||||||
|
static const Role DATA_DISTRIBUTOR;
|
||||||
|
|
||||||
std::string roleName;
|
std::string roleName;
|
||||||
std::string abbreviation;
|
std::string abbreviation;
|
||||||
|
@ -330,6 +343,7 @@ Future<Void> tLog( class IKeyValueStore* const& persistentData, class IDiskQueue
|
||||||
Future<Void> monitorServerDBInfo( Reference<AsyncVar<Optional<ClusterControllerFullInterface>>> const& ccInterface, Reference<ClusterConnectionFile> const&, LocalityData const&, Reference<AsyncVar<ServerDBInfo>> const& dbInfo );
|
Future<Void> monitorServerDBInfo( Reference<AsyncVar<Optional<ClusterControllerFullInterface>>> const& ccInterface, Reference<ClusterConnectionFile> const&, LocalityData const&, Reference<AsyncVar<ServerDBInfo>> const& dbInfo );
|
||||||
Future<Void> resolver( ResolverInterface const& proxy, InitializeResolverRequest const&, Reference<AsyncVar<ServerDBInfo>> const& db );
|
Future<Void> resolver( ResolverInterface const& proxy, InitializeResolverRequest const&, Reference<AsyncVar<ServerDBInfo>> const& db );
|
||||||
Future<Void> logRouter( TLogInterface const& interf, InitializeLogRouterRequest const& req, Reference<AsyncVar<ServerDBInfo>> const& db );
|
Future<Void> logRouter( TLogInterface const& interf, InitializeLogRouterRequest const& req, Reference<AsyncVar<ServerDBInfo>> const& db );
|
||||||
|
Future<Void> dataDistributor( DataDistributorInterface const& ddi, Reference<AsyncVar<ServerDBInfo>> const& db );
|
||||||
|
|
||||||
void registerThreadForProfiling();
|
void registerThreadForProfiling();
|
||||||
void updateCpuProfiler(ProfilerRequest req);
|
void updateCpuProfiler(ProfilerRequest req);
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
<?xml version="1.0" encoding="utf-8"?>
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||||
<Import Project="$(SolutionDir)versions.target" />
|
<Import Project="$(SolutionDir)versions.target" />
|
||||||
<PropertyGroup Condition="'$(Release)' != 'true' ">
|
<PropertyGroup Condition="'$(Release)' != 'true' ">
|
||||||
|
@ -58,6 +58,7 @@
|
||||||
<ActorCompiler Include="MemoryPager.actor.cpp" />
|
<ActorCompiler Include="MemoryPager.actor.cpp" />
|
||||||
<ActorCompiler Include="LogRouter.actor.cpp" />
|
<ActorCompiler Include="LogRouter.actor.cpp" />
|
||||||
<ActorCompiler Include="OldTLogServer.actor.cpp" />
|
<ActorCompiler Include="OldTLogServer.actor.cpp" />
|
||||||
|
<ClCompile Include="LatencyBandConfig.cpp" />
|
||||||
<ClCompile Include="SkipList.cpp" />
|
<ClCompile Include="SkipList.cpp" />
|
||||||
<ActorCompiler Include="WaitFailure.actor.cpp" />
|
<ActorCompiler Include="WaitFailure.actor.cpp" />
|
||||||
<ActorCompiler Include="tester.actor.cpp" />
|
<ActorCompiler Include="tester.actor.cpp" />
|
||||||
|
@ -157,12 +158,14 @@
|
||||||
<ClInclude Include="CoordinationInterface.h" />
|
<ClInclude Include="CoordinationInterface.h" />
|
||||||
<ClInclude Include="CoroFlow.h" />
|
<ClInclude Include="CoroFlow.h" />
|
||||||
<ClInclude Include="DataDistribution.h" />
|
<ClInclude Include="DataDistribution.h" />
|
||||||
|
<ClInclude Include="DataDistributorInterface.h" />
|
||||||
<ClInclude Include="DBCoreState.h" />
|
<ClInclude Include="DBCoreState.h" />
|
||||||
<ClInclude Include="IDiskQueue.h" />
|
<ClInclude Include="IDiskQueue.h" />
|
||||||
<ClInclude Include="IKeyValueStore.h" />
|
<ClInclude Include="IKeyValueStore.h" />
|
||||||
<ClInclude Include="IndirectShadowPager.h" />
|
<ClInclude Include="IndirectShadowPager.h" />
|
||||||
<ClInclude Include="IPager.h" />
|
<ClInclude Include="IPager.h" />
|
||||||
<ClInclude Include="IVersionedStore.h" />
|
<ClInclude Include="IVersionedStore.h" />
|
||||||
|
<ClInclude Include="LatencyBandConfig.h" />
|
||||||
<ClInclude Include="LeaderElection.h" />
|
<ClInclude Include="LeaderElection.h" />
|
||||||
<ClInclude Include="LogProtocolMessage.h" />
|
<ClInclude Include="LogProtocolMessage.h" />
|
||||||
<ClInclude Include="LogSystem.h" />
|
<ClInclude Include="LogSystem.h" />
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
<?xml version="1.0" encoding="utf-8"?>
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||||
<ItemGroup>
|
<ItemGroup>
|
||||||
<ActorCompiler Include="ClusterController.actor.cpp" />
|
<ActorCompiler Include="ClusterController.actor.cpp" />
|
||||||
|
@ -295,7 +295,6 @@
|
||||||
<Filter>sqlite</Filter>
|
<Filter>sqlite</Filter>
|
||||||
</ClCompile>
|
</ClCompile>
|
||||||
<ClCompile Include="VFSAsync.cpp" />
|
<ClCompile Include="VFSAsync.cpp" />
|
||||||
<ClCompile Include="DatabaseConfiguration.cpp" />
|
|
||||||
<ClCompile Include="workloads\AsyncFile.cpp">
|
<ClCompile Include="workloads\AsyncFile.cpp">
|
||||||
<Filter>workloads</Filter>
|
<Filter>workloads</Filter>
|
||||||
</ClCompile>
|
</ClCompile>
|
||||||
|
@ -303,6 +302,7 @@
|
||||||
<ClCompile Include="workloads\MemoryKeyValueStore.cpp">
|
<ClCompile Include="workloads\MemoryKeyValueStore.cpp">
|
||||||
<Filter>workloads</Filter>
|
<Filter>workloads</Filter>
|
||||||
</ClCompile>
|
</ClCompile>
|
||||||
|
<ClCompile Include="LatencyBandConfig.cpp" />
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
<ItemGroup>
|
<ItemGroup>
|
||||||
<ClInclude Include="ConflictSet.h" />
|
<ClInclude Include="ConflictSet.h" />
|
||||||
|
@ -369,6 +369,7 @@
|
||||||
<ClInclude Include="MemoryPager.h" />
|
<ClInclude Include="MemoryPager.h" />
|
||||||
<ClInclude Include="IndirectShadowPager.h" />
|
<ClInclude Include="IndirectShadowPager.h" />
|
||||||
<ClInclude Include="template_fdb.h" />
|
<ClInclude Include="template_fdb.h" />
|
||||||
|
<ClInclude Include="LatencyBandConfig.h" />
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
<ItemGroup>
|
<ItemGroup>
|
||||||
<Filter Include="workloads">
|
<Filter Include="workloads">
|
||||||
|
|
|
@ -1031,7 +1031,6 @@ static std::set<int> const& normalMasterErrors() {
|
||||||
s.insert( error_code_no_more_servers );
|
s.insert( error_code_no_more_servers );
|
||||||
s.insert( error_code_master_recovery_failed );
|
s.insert( error_code_master_recovery_failed );
|
||||||
s.insert( error_code_coordinated_state_conflict );
|
s.insert( error_code_coordinated_state_conflict );
|
||||||
s.insert( error_code_movekeys_conflict );
|
|
||||||
s.insert( error_code_master_max_versions_in_flight );
|
s.insert( error_code_master_max_versions_in_flight );
|
||||||
s.insert( error_code_worker_removed );
|
s.insert( error_code_worker_removed );
|
||||||
s.insert( error_code_new_coordinators_timed_out );
|
s.insert( error_code_new_coordinators_timed_out );
|
||||||
|
@ -1155,7 +1154,7 @@ ACTOR Future<Void> configurationMonitor( Reference<MasterData> self ) {
|
||||||
self->registrationTrigger.trigger();
|
self->registrationTrigger.trigger();
|
||||||
}
|
}
|
||||||
|
|
||||||
state Future<Void> watchFuture = tr.watch(excludedServersVersionKey);
|
state Future<Void> watchFuture = tr.watch(configVersionKey);
|
||||||
wait(tr.commit());
|
wait(tr.commit());
|
||||||
wait(watchFuture);
|
wait(watchFuture);
|
||||||
break;
|
break;
|
||||||
|
@ -1349,14 +1348,6 @@ ACTOR Future<Void> masterCore( Reference<MasterData> self ) {
|
||||||
.detail("RecoveryDuration", recoveryDuration)
|
.detail("RecoveryDuration", recoveryDuration)
|
||||||
.trackLatest("MasterRecoveryState");
|
.trackLatest("MasterRecoveryState");
|
||||||
|
|
||||||
// Now that the master is recovered we can start auxiliary services that happen to run here
|
|
||||||
{
|
|
||||||
PromiseStream< std::pair<UID, Optional<StorageServerInterface>> > ddStorageServerChanges;
|
|
||||||
state double lastLimited = 0;
|
|
||||||
self->addActor.send( reportErrorsExcept( dataDistribution( self->dbInfo, self->myInterface, self->configuration, ddStorageServerChanges, self->logSystem, self->recoveryTransactionVersion, self->primaryDcId, self->remoteDcIds, &lastLimited, remoteRecovered.getFuture() ), "DataDistribution", self->dbgid, &normalMasterErrors() ) );
|
|
||||||
self->addActor.send( reportErrors( rateKeeper( self->dbInfo, ddStorageServerChanges, self->myInterface.getRateInfo.getFuture(), self->configuration, &lastLimited ), "Ratekeeper", self->dbgid) );
|
|
||||||
}
|
|
||||||
|
|
||||||
if( self->resolvers.size() > 1 )
|
if( self->resolvers.size() > 1 )
|
||||||
self->addActor.send( resolutionBalancing(self) );
|
self->addActor.send( resolutionBalancing(self) );
|
||||||
|
|
||||||
|
|
|
@ -47,6 +47,7 @@
|
||||||
#include "fdbserver/LogSystem.h"
|
#include "fdbserver/LogSystem.h"
|
||||||
#include "fdbserver/RecoveryState.h"
|
#include "fdbserver/RecoveryState.h"
|
||||||
#include "fdbserver/LogProtocolMessage.h"
|
#include "fdbserver/LogProtocolMessage.h"
|
||||||
|
#include "fdbserver/LatencyBandConfig.h"
|
||||||
#include "flow/TDMetric.actor.h"
|
#include "flow/TDMetric.actor.h"
|
||||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||||
|
|
||||||
|
@ -431,6 +432,8 @@ public:
|
||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Optional<LatencyBandConfig> latencyBandConfig;
|
||||||
|
|
||||||
struct Counters {
|
struct Counters {
|
||||||
CounterCollection cc;
|
CounterCollection cc;
|
||||||
Counter allQueries, getKeyQueries, getValueQueries, getRangeQueries, finishedQueries, rowsQueried, bytesQueried, watchQueries;
|
Counter allQueries, getKeyQueries, getValueQueries, getRangeQueries, finishedQueries, rowsQueried, bytesQueried, watchQueries;
|
||||||
|
@ -441,6 +444,8 @@ public:
|
||||||
Counter loops;
|
Counter loops;
|
||||||
Counter fetchWaitingMS, fetchWaitingCount, fetchExecutingMS, fetchExecutingCount;
|
Counter fetchWaitingMS, fetchWaitingCount, fetchExecutingMS, fetchExecutingCount;
|
||||||
|
|
||||||
|
LatencyBands readLatencyBands;
|
||||||
|
|
||||||
Counters(StorageServer* self)
|
Counters(StorageServer* self)
|
||||||
: cc("StorageServer", self->thisServerID.toString()),
|
: cc("StorageServer", self->thisServerID.toString()),
|
||||||
getKeyQueries("GetKeyQueries", cc),
|
getKeyQueries("GetKeyQueries", cc),
|
||||||
|
@ -465,7 +470,8 @@ public:
|
||||||
fetchWaitingMS("FetchWaitingMS", cc),
|
fetchWaitingMS("FetchWaitingMS", cc),
|
||||||
fetchWaitingCount("FetchWaitingCount", cc),
|
fetchWaitingCount("FetchWaitingCount", cc),
|
||||||
fetchExecutingMS("FetchExecutingMS", cc),
|
fetchExecutingMS("FetchExecutingMS", cc),
|
||||||
fetchExecutingCount("FetchExecutingCount", cc)
|
fetchExecutingCount("FetchExecutingCount", cc),
|
||||||
|
readLatencyBands("ReadLatencyMetrics", self->thisServerID, SERVER_KNOBS->STORAGE_LOGGING_DELAY)
|
||||||
{
|
{
|
||||||
specialCounter(cc, "LastTLogVersion", [self](){ return self->lastTLogVersion; });
|
specialCounter(cc, "LastTLogVersion", [self](){ return self->lastTLogVersion; });
|
||||||
specialCounter(cc, "Version", [self](){ return self->version.get(); });
|
specialCounter(cc, "Version", [self](){ return self->version.get(); });
|
||||||
|
@ -733,15 +739,16 @@ ACTOR Future<Version> waitForVersionNoTooOld( StorageServer* data, Version versi
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR Future<Void> getValueQ( StorageServer* data, GetValueRequest req ) {
|
ACTOR Future<Void> getValueQ( StorageServer* data, GetValueRequest req ) {
|
||||||
state double startTime = timer();
|
state int64_t resultSize = 0;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Active load balancing runs at a very high priority (to obtain accurate queue lengths)
|
|
||||||
// so we need to downgrade here
|
|
||||||
++data->counters.getValueQueries;
|
++data->counters.getValueQueries;
|
||||||
++data->counters.allQueries;
|
++data->counters.allQueries;
|
||||||
++data->readQueueSizeMetric;
|
++data->readQueueSizeMetric;
|
||||||
data->maxQueryQueue = std::max<int>( data->maxQueryQueue, data->counters.allQueries.getValue() - data->counters.finishedQueries.getValue());
|
data->maxQueryQueue = std::max<int>( data->maxQueryQueue, data->counters.allQueries.getValue() - data->counters.finishedQueries.getValue());
|
||||||
|
|
||||||
|
// Active load balancing runs at a very high priority (to obtain accurate queue lengths)
|
||||||
|
// so we need to downgrade here
|
||||||
wait( delay(0, TaskDefaultEndpoint) );
|
wait( delay(0, TaskDefaultEndpoint) );
|
||||||
|
|
||||||
if( req.debugID.present() )
|
if( req.debugID.present() )
|
||||||
|
@ -788,7 +795,8 @@ ACTOR Future<Void> getValueQ( StorageServer* data, GetValueRequest req ) {
|
||||||
|
|
||||||
if (v.present()) {
|
if (v.present()) {
|
||||||
++data->counters.rowsQueried;
|
++data->counters.rowsQueried;
|
||||||
data->counters.bytesQueried += v.get().size();
|
resultSize = v.get().size();
|
||||||
|
data->counters.bytesQueried += resultSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
if( req.debugID.present() )
|
if( req.debugID.present() )
|
||||||
|
@ -805,6 +813,10 @@ ACTOR Future<Void> getValueQ( StorageServer* data, GetValueRequest req ) {
|
||||||
|
|
||||||
++data->counters.finishedQueries;
|
++data->counters.finishedQueries;
|
||||||
--data->readQueueSizeMetric;
|
--data->readQueueSizeMetric;
|
||||||
|
if(data->latencyBandConfig.present()) {
|
||||||
|
int maxReadBytes = data->latencyBandConfig.get().readConfig.maxReadBytes.orDefault(std::numeric_limits<int>::max());
|
||||||
|
data->counters.readLatencyBands.addMeasurement(timer()-req.requestTime, resultSize > maxReadBytes);
|
||||||
|
}
|
||||||
|
|
||||||
return Void();
|
return Void();
|
||||||
};
|
};
|
||||||
|
@ -1241,6 +1253,8 @@ ACTOR Future<Void> getKeyValues( StorageServer* data, GetKeyValuesRequest req )
|
||||||
// Throws a wrong_shard_server if the keys in the request or result depend on data outside this server OR if a large selector offset prevents
|
// Throws a wrong_shard_server if the keys in the request or result depend on data outside this server OR if a large selector offset prevents
|
||||||
// all data from being read in one range read
|
// all data from being read in one range read
|
||||||
{
|
{
|
||||||
|
state int64_t resultSize = 0;
|
||||||
|
|
||||||
++data->counters.getRangeQueries;
|
++data->counters.getRangeQueries;
|
||||||
++data->counters.allQueries;
|
++data->counters.allQueries;
|
||||||
++data->readQueueSizeMetric;
|
++data->readQueueSizeMetric;
|
||||||
|
@ -1329,8 +1343,9 @@ ACTOR Future<Void> getKeyValues( StorageServer* data, GetKeyValuesRequest req )
|
||||||
r.penalty = data->getPenalty();
|
r.penalty = data->getPenalty();
|
||||||
req.reply.send( r );
|
req.reply.send( r );
|
||||||
|
|
||||||
|
resultSize = req.limitBytes - remainingLimitBytes;
|
||||||
|
data->counters.bytesQueried += resultSize;
|
||||||
data->counters.rowsQueried += r.data.size();
|
data->counters.rowsQueried += r.data.size();
|
||||||
data->counters.bytesQueried += req.limitBytes - remainingLimitBytes;
|
|
||||||
}
|
}
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
if(!canReplyWith(e))
|
if(!canReplyWith(e))
|
||||||
|
@ -1340,11 +1355,19 @@ ACTOR Future<Void> getKeyValues( StorageServer* data, GetKeyValuesRequest req )
|
||||||
|
|
||||||
++data->counters.finishedQueries;
|
++data->counters.finishedQueries;
|
||||||
--data->readQueueSizeMetric;
|
--data->readQueueSizeMetric;
|
||||||
|
|
||||||
|
if(data->latencyBandConfig.present()) {
|
||||||
|
int maxReadBytes = data->latencyBandConfig.get().readConfig.maxReadBytes.orDefault(std::numeric_limits<int>::max());
|
||||||
|
int maxSelectorOffset = data->latencyBandConfig.get().readConfig.maxKeySelectorOffset.orDefault(std::numeric_limits<int>::max());
|
||||||
|
data->counters.readLatencyBands.addMeasurement(timer()-req.requestTime, resultSize > maxReadBytes || abs(req.begin.offset) > maxSelectorOffset || abs(req.end.offset) > maxSelectorOffset);
|
||||||
|
}
|
||||||
|
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR Future<Void> getKey( StorageServer* data, GetKeyRequest req ) {
|
ACTOR Future<Void> getKey( StorageServer* data, GetKeyRequest req ) {
|
||||||
|
state int64_t resultSize = 0;
|
||||||
|
|
||||||
++data->counters.getKeyQueries;
|
++data->counters.getKeyQueries;
|
||||||
++data->counters.allQueries;
|
++data->counters.allQueries;
|
||||||
++data->readQueueSizeMetric;
|
++data->readQueueSizeMetric;
|
||||||
|
@ -1371,8 +1394,10 @@ ACTOR Future<Void> getKey( StorageServer* data, GetKeyRequest req ) {
|
||||||
updated = firstGreaterOrEqual(k)+offset-1; // first thing on next shard OR (large offset case) keyAfter largest key retrieved in range read
|
updated = firstGreaterOrEqual(k)+offset-1; // first thing on next shard OR (large offset case) keyAfter largest key retrieved in range read
|
||||||
else
|
else
|
||||||
updated = KeySelectorRef(k,true,0); //found
|
updated = KeySelectorRef(k,true,0); //found
|
||||||
|
|
||||||
|
resultSize = k.size();
|
||||||
|
data->counters.bytesQueried += resultSize;
|
||||||
++data->counters.rowsQueried;
|
++data->counters.rowsQueried;
|
||||||
data->counters.bytesQueried += k.size();
|
|
||||||
|
|
||||||
GetKeyReply reply(updated);
|
GetKeyReply reply(updated);
|
||||||
reply.penalty = data->getPenalty();
|
reply.penalty = data->getPenalty();
|
||||||
|
@ -1387,6 +1412,11 @@ ACTOR Future<Void> getKey( StorageServer* data, GetKeyRequest req ) {
|
||||||
|
|
||||||
++data->counters.finishedQueries;
|
++data->counters.finishedQueries;
|
||||||
--data->readQueueSizeMetric;
|
--data->readQueueSizeMetric;
|
||||||
|
if(data->latencyBandConfig.present()) {
|
||||||
|
int maxReadBytes = data->latencyBandConfig.get().readConfig.maxReadBytes.orDefault(std::numeric_limits<int>::max());
|
||||||
|
int maxSelectorOffset = data->latencyBandConfig.get().readConfig.maxKeySelectorOffset.orDefault(std::numeric_limits<int>::max());
|
||||||
|
data->counters.readLatencyBands.addMeasurement(timer()-req.requestTime, resultSize > maxReadBytes || abs(req.sel.offset) > maxSelectorOffset);
|
||||||
|
}
|
||||||
|
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
|
@ -3310,6 +3340,20 @@ ACTOR Future<Void> storageServerCore( StorageServer* self, StorageServerInterfac
|
||||||
doUpdate = Void();
|
doUpdate = Void();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Optional<LatencyBandConfig> newLatencyBandConfig = self->db->get().latencyBandConfig;
|
||||||
|
if(newLatencyBandConfig.present() != self->latencyBandConfig.present()
|
||||||
|
|| (newLatencyBandConfig.present() && newLatencyBandConfig.get().readConfig != self->latencyBandConfig.get().readConfig))
|
||||||
|
{
|
||||||
|
self->latencyBandConfig = newLatencyBandConfig;
|
||||||
|
self->counters.readLatencyBands.clearBands();
|
||||||
|
TraceEvent("LatencyBandReadUpdatingConfig").detail("Present", newLatencyBandConfig.present());
|
||||||
|
if(self->latencyBandConfig.present()) {
|
||||||
|
for(auto band : self->latencyBandConfig.get().readConfig.bands) {
|
||||||
|
self->counters.readLatencyBands.addThreshold(band);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
when( GetValueRequest req = waitNext(ssi.getValue.getFuture()) ) {
|
when( GetValueRequest req = waitNext(ssi.getValue.getFuture()) ) {
|
||||||
// Warning: This code is executed at extremely high priority (TaskLoadBalancedEndpoint), so downgrade before doing real work
|
// Warning: This code is executed at extremely high priority (TaskLoadBalancedEndpoint), so downgrade before doing real work
|
||||||
|
|
|
@ -31,6 +31,7 @@
|
||||||
#include "fdbserver/IDiskQueue.h"
|
#include "fdbserver/IDiskQueue.h"
|
||||||
#include "fdbclient/DatabaseContext.h"
|
#include "fdbclient/DatabaseContext.h"
|
||||||
#include "fdbserver/ClusterRecruitmentInterface.h"
|
#include "fdbserver/ClusterRecruitmentInterface.h"
|
||||||
|
#include "fdbserver/DataDistributorInterface.h"
|
||||||
#include "fdbserver/ServerDBInfo.h"
|
#include "fdbserver/ServerDBInfo.h"
|
||||||
#include "fdbserver/CoordinationInterface.h"
|
#include "fdbserver/CoordinationInterface.h"
|
||||||
#include "fdbclient/FailureMonitorClient.h"
|
#include "fdbclient/FailureMonitorClient.h"
|
||||||
|
@ -266,19 +267,27 @@ std::vector< DiskStore > getDiskStores( std::string folder ) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR Future<Void> registrationClient( Reference<AsyncVar<Optional<ClusterControllerFullInterface>>> ccInterface, WorkerInterface interf, Reference<AsyncVar<ClusterControllerPriorityInfo>> asyncPriorityInfo, ProcessClass initialClass) {
|
ACTOR Future<Void> registrationClient(
|
||||||
|
Reference<AsyncVar<Optional<ClusterControllerFullInterface>>> ccInterface,
|
||||||
|
WorkerInterface interf,
|
||||||
|
Reference<AsyncVar<ClusterControllerPriorityInfo>> asyncPriorityInfo,
|
||||||
|
ProcessClass initialClass,
|
||||||
|
Reference<AsyncVar<Optional<DataDistributorInterface>>> ddInterf) {
|
||||||
// Keeps the cluster controller (as it may be re-elected) informed that this worker exists
|
// Keeps the cluster controller (as it may be re-elected) informed that this worker exists
|
||||||
// The cluster controller uses waitFailureClient to find out if we die, and returns from registrationReply (requiring us to re-register)
|
// The cluster controller uses waitFailureClient to find out if we die, and returns from registrationReply (requiring us to re-register)
|
||||||
|
// The registration request piggybacks optional distributor interface if it exists.
|
||||||
state Generation requestGeneration = 0;
|
state Generation requestGeneration = 0;
|
||||||
state ProcessClass processClass = initialClass;
|
state ProcessClass processClass = initialClass;
|
||||||
loop {
|
loop {
|
||||||
Future<RegisterWorkerReply> registrationReply = ccInterface->get().present() ? brokenPromiseToNever( ccInterface->get().get().registerWorker.getReply( RegisterWorkerRequest(interf, initialClass, processClass, asyncPriorityInfo->get(), requestGeneration++) ) ) : Never();
|
RegisterWorkerRequest request(interf, initialClass, processClass, asyncPriorityInfo->get(), requestGeneration++, ddInterf->get());
|
||||||
|
Future<RegisterWorkerReply> registrationReply = ccInterface->get().present() ? brokenPromiseToNever( ccInterface->get().get().registerWorker.getReply(request) ) : Never();
|
||||||
choose {
|
choose {
|
||||||
when ( RegisterWorkerReply reply = wait( registrationReply )) {
|
when ( RegisterWorkerReply reply = wait( registrationReply )) {
|
||||||
processClass = reply.processClass;
|
processClass = reply.processClass;
|
||||||
asyncPriorityInfo->set( reply.priorityInfo );
|
asyncPriorityInfo->set( reply.priorityInfo );
|
||||||
}
|
}
|
||||||
when ( wait( ccInterface->onChange() )) { }
|
when ( wait( ccInterface->onChange() )) { }
|
||||||
|
when ( wait( ddInterf->onChange() ) ) {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -504,7 +513,8 @@ ACTOR Future<Void> monitorServerDBInfo( Reference<AsyncVar<Optional<ClusterContr
|
||||||
|
|
||||||
choose {
|
choose {
|
||||||
when( ServerDBInfo ni = wait( ccInterface->get().present() ? brokenPromiseToNever( ccInterface->get().get().getServerDBInfo.getReply( req ) ) : Never() ) ) {
|
when( ServerDBInfo ni = wait( ccInterface->get().present() ? brokenPromiseToNever( ccInterface->get().get().getServerDBInfo.getReply( req ) ) : Never() ) ) {
|
||||||
TraceEvent("GotServerDBInfoChange").detail("ChangeID", ni.id).detail("MasterID", ni.master.id());
|
TraceEvent("GotServerDBInfoChange").detail("ChangeID", ni.id).detail("MasterID", ni.master.id())
|
||||||
|
.detail("DataDistributorID", ni.distributor.present() ? ni.distributor.get().id() : UID());
|
||||||
ServerDBInfo localInfo = ni;
|
ServerDBInfo localInfo = ni;
|
||||||
localInfo.myLocality = locality;
|
localInfo.myLocality = locality;
|
||||||
dbInfo->set(localInfo);
|
dbInfo->set(localInfo);
|
||||||
|
@ -520,6 +530,7 @@ ACTOR Future<Void> monitorServerDBInfo( Reference<AsyncVar<Optional<ClusterContr
|
||||||
ACTOR Future<Void> workerServer( Reference<ClusterConnectionFile> connFile, Reference<AsyncVar<Optional<ClusterControllerFullInterface>>> ccInterface, LocalityData locality,
|
ACTOR Future<Void> workerServer( Reference<ClusterConnectionFile> connFile, Reference<AsyncVar<Optional<ClusterControllerFullInterface>>> ccInterface, LocalityData locality,
|
||||||
Reference<AsyncVar<ClusterControllerPriorityInfo>> asyncPriorityInfo, ProcessClass initialClass, std::string folder, int64_t memoryLimit, std::string metricsConnFile, std::string metricsPrefix, Promise<Void> recoveredDiskFiles) {
|
Reference<AsyncVar<ClusterControllerPriorityInfo>> asyncPriorityInfo, ProcessClass initialClass, std::string folder, int64_t memoryLimit, std::string metricsConnFile, std::string metricsPrefix, Promise<Void> recoveredDiskFiles) {
|
||||||
state PromiseStream< ErrorInfo > errors;
|
state PromiseStream< ErrorInfo > errors;
|
||||||
|
state Reference<AsyncVar<Optional<DataDistributorInterface>>> ddInterf( new AsyncVar<Optional<DataDistributorInterface>>() );
|
||||||
state Future<Void> handleErrors = workerHandleErrors( errors.getFuture() ); // Needs to be stopped last
|
state Future<Void> handleErrors = workerHandleErrors( errors.getFuture() ); // Needs to be stopped last
|
||||||
state ActorCollection errorForwarders(false);
|
state ActorCollection errorForwarders(false);
|
||||||
state Future<Void> loggingTrigger = Void();
|
state Future<Void> loggingTrigger = Void();
|
||||||
|
@ -648,7 +659,7 @@ ACTOR Future<Void> workerServer( Reference<ClusterConnectionFile> connFile, Refe
|
||||||
wait(waitForAll(recoveries));
|
wait(waitForAll(recoveries));
|
||||||
recoveredDiskFiles.send(Void());
|
recoveredDiskFiles.send(Void());
|
||||||
|
|
||||||
errorForwarders.add( registrationClient( ccInterface, interf, asyncPriorityInfo, initialClass ) );
|
errorForwarders.add( registrationClient( ccInterface, interf, asyncPriorityInfo, initialClass, ddInterf ) );
|
||||||
|
|
||||||
TraceEvent("RecoveriesComplete", interf.id());
|
TraceEvent("RecoveriesComplete", interf.id());
|
||||||
|
|
||||||
|
@ -703,7 +714,6 @@ ACTOR Future<Void> workerServer( Reference<ClusterConnectionFile> connFile, Refe
|
||||||
startRole( Role::MASTER, recruited.id(), interf.id() );
|
startRole( Role::MASTER, recruited.id(), interf.id() );
|
||||||
|
|
||||||
DUMPTOKEN( recruited.waitFailure );
|
DUMPTOKEN( recruited.waitFailure );
|
||||||
DUMPTOKEN( recruited.getRateInfo );
|
|
||||||
DUMPTOKEN( recruited.tlogRejoin );
|
DUMPTOKEN( recruited.tlogRejoin );
|
||||||
DUMPTOKEN( recruited.changeCoordinators );
|
DUMPTOKEN( recruited.changeCoordinators );
|
||||||
DUMPTOKEN( recruited.getCommitVersion );
|
DUMPTOKEN( recruited.getCommitVersion );
|
||||||
|
@ -713,6 +723,23 @@ ACTOR Future<Void> workerServer( Reference<ClusterConnectionFile> connFile, Refe
|
||||||
errorForwarders.add( zombie(recruited, forwardError( errors, Role::MASTER, recruited.id(), masterProcess )) );
|
errorForwarders.add( zombie(recruited, forwardError( errors, Role::MASTER, recruited.id(), masterProcess )) );
|
||||||
req.reply.send(recruited);
|
req.reply.send(recruited);
|
||||||
}
|
}
|
||||||
|
when ( InitializeDataDistributorRequest req = waitNext(interf.dataDistributor.getFuture()) ) {
|
||||||
|
DataDistributorInterface recruited(locality);
|
||||||
|
recruited.initEndpoints();
|
||||||
|
|
||||||
|
if ( ddInterf->get().present() ) {
|
||||||
|
recruited = ddInterf->get().get();
|
||||||
|
TEST(true); // Recruited while already a data distributor.
|
||||||
|
} else {
|
||||||
|
startRole( Role::DATA_DISTRIBUTOR, recruited.id(), interf.id() );
|
||||||
|
|
||||||
|
Future<Void> dataDistributorProcess = dataDistributor( recruited, dbInfo );
|
||||||
|
errorForwarders.add( forwardError( errors, Role::DATA_DISTRIBUTOR, recruited.id(), setWhenDoneOrError( dataDistributorProcess, ddInterf, Optional<DataDistributorInterface>() ) ) );
|
||||||
|
ddInterf->set(Optional<DataDistributorInterface>(recruited));
|
||||||
|
}
|
||||||
|
TraceEvent("DataDistributorReceived", req.reqId).detail("DataDistributorId", recruited.id());
|
||||||
|
req.reply.send(recruited);
|
||||||
|
}
|
||||||
when( InitializeTLogRequest req = waitNext(interf.tLog.getFuture()) ) {
|
when( InitializeTLogRequest req = waitNext(interf.tLog.getFuture()) ) {
|
||||||
auto& logData = sharedLogs[req.storeType];
|
auto& logData = sharedLogs[req.storeType];
|
||||||
logData.second.send(req);
|
logData.second.send(req);
|
||||||
|
@ -1086,3 +1113,4 @@ const Role Role::RESOLVER("Resolver", "RV");
|
||||||
const Role Role::CLUSTER_CONTROLLER("ClusterController", "CC");
|
const Role Role::CLUSTER_CONTROLLER("ClusterController", "CC");
|
||||||
const Role Role::TESTER("Tester", "TS");
|
const Role Role::TESTER("Tester", "TS");
|
||||||
const Role Role::LOG_ROUTER("LogRouter", "LR");
|
const Role Role::LOG_ROUTER("LogRouter", "LR");
|
||||||
|
const Role Role::DATA_DISTRIBUTOR("DataDistributor", "DD");
|
||||||
|
|
|
@ -133,7 +133,7 @@ struct MoveKeysWorkload : TestWorkload {
|
||||||
|
|
||||||
try {
|
try {
|
||||||
state Promise<Void> signal;
|
state Promise<Void> signal;
|
||||||
wait( moveKeys( cx, keys, destinationTeamIDs, destinationTeamIDs, lock, signal, &fl1, &fl2, invalidVersion, false, relocateShardInterval.pairID ) );
|
wait( moveKeys( cx, keys, destinationTeamIDs, destinationTeamIDs, lock, signal, &fl1, &fl2, false, relocateShardInterval.pairID ) );
|
||||||
TraceEvent(relocateShardInterval.end()).detail("Result","Success");
|
TraceEvent(relocateShardInterval.end()).detail("Result","Success");
|
||||||
return Void();
|
return Void();
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
|
|
|
@ -31,6 +31,9 @@ extern bool noUnseed;
|
||||||
|
|
||||||
struct StatusWorkload : TestWorkload {
|
struct StatusWorkload : TestWorkload {
|
||||||
double testDuration, requestsPerSecond;
|
double testDuration, requestsPerSecond;
|
||||||
|
bool enableLatencyBands;
|
||||||
|
|
||||||
|
Future<Void> latencyBandActor;
|
||||||
|
|
||||||
PerfIntCounter requests, replies, errors, totalSize;
|
PerfIntCounter requests, replies, errors, totalSize;
|
||||||
Optional<StatusObject> parsedSchema;
|
Optional<StatusObject> parsedSchema;
|
||||||
|
@ -41,6 +44,7 @@ struct StatusWorkload : TestWorkload {
|
||||||
{
|
{
|
||||||
testDuration = getOption(options, LiteralStringRef("testDuration"), 10.0);
|
testDuration = getOption(options, LiteralStringRef("testDuration"), 10.0);
|
||||||
requestsPerSecond = getOption(options, LiteralStringRef("requestsPerSecond"), 0.5);
|
requestsPerSecond = getOption(options, LiteralStringRef("requestsPerSecond"), 0.5);
|
||||||
|
enableLatencyBands = getOption(options, LiteralStringRef("enableLatencyBands"), g_random->random01() < 0.5);
|
||||||
auto statusSchemaStr = getOption(options, LiteralStringRef("schema"), JSONSchemas::statusSchema);
|
auto statusSchemaStr = getOption(options, LiteralStringRef("schema"), JSONSchemas::statusSchema);
|
||||||
if (statusSchemaStr.size()) {
|
if (statusSchemaStr.size()) {
|
||||||
json_spirit::mValue schema = readJSONStrictly(statusSchemaStr.toString());
|
json_spirit::mValue schema = readJSONStrictly(statusSchemaStr.toString());
|
||||||
|
@ -55,6 +59,10 @@ struct StatusWorkload : TestWorkload {
|
||||||
|
|
||||||
virtual std::string description() { return "StatusWorkload"; }
|
virtual std::string description() { return "StatusWorkload"; }
|
||||||
virtual Future<Void> setup(Database const& cx) {
|
virtual Future<Void> setup(Database const& cx) {
|
||||||
|
if(enableLatencyBands) {
|
||||||
|
latencyBandActor = configureLatencyBands(this, cx);
|
||||||
|
}
|
||||||
|
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
virtual Future<Void> start(Database const& cx) {
|
virtual Future<Void> start(Database const& cx) {
|
||||||
|
@ -103,6 +111,56 @@ struct StatusWorkload : TestWorkload {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static std::string generateBands() {
|
||||||
|
int numBands = g_random->randomInt(0, 10);
|
||||||
|
std::vector<double> bands;
|
||||||
|
|
||||||
|
while(bands.size() < numBands) {
|
||||||
|
bands.push_back(g_random->random01() * pow(10, g_random->randomInt(-5, 1)));
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string result = "\"bands\":[";
|
||||||
|
for(int i = 0; i < bands.size(); ++i) {
|
||||||
|
if(i > 0) {
|
||||||
|
result += ",";
|
||||||
|
}
|
||||||
|
|
||||||
|
result += format("%f", bands[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
return result + "]";
|
||||||
|
}
|
||||||
|
|
||||||
|
ACTOR Future<Void> configureLatencyBands(StatusWorkload *self, Database cx) {
|
||||||
|
loop {
|
||||||
|
state Transaction tr(cx);
|
||||||
|
loop {
|
||||||
|
try {
|
||||||
|
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||||
|
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||||
|
|
||||||
|
std::string config = "{"
|
||||||
|
"\"get_read_version\":{" + generateBands() + "},"
|
||||||
|
"\"read\":{" + generateBands() + format(", \"max_key_selector_offset\":%d, \"max_read_bytes\":%d},", g_random->randomInt(0, 10000), g_random->randomInt(0, 1000000)) + ""
|
||||||
|
"\"commit\":{" + generateBands() + format(", \"max_commit_bytes\":%d", g_random->randomInt(0, 1000000)) + "}"
|
||||||
|
"}";
|
||||||
|
|
||||||
|
tr.set(latencyBandConfigKey, ValueRef(config));
|
||||||
|
wait(tr.commit());
|
||||||
|
|
||||||
|
if(g_random->random01() < 0.3) {
|
||||||
|
return Void();
|
||||||
|
}
|
||||||
|
|
||||||
|
wait(delay(g_random->random01() * 120));
|
||||||
|
}
|
||||||
|
catch(Error &e) {
|
||||||
|
wait(tr.onError(e));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ACTOR Future<Void> fetcher(Reference<ClusterConnectionFile> connFile, StatusWorkload *self) {
|
ACTOR Future<Void> fetcher(Reference<ClusterConnectionFile> connFile, StatusWorkload *self) {
|
||||||
state double lastTime = now();
|
state double lastTime = now();
|
||||||
|
|
||||||
|
@ -131,7 +189,6 @@ struct StatusWorkload : TestWorkload {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
WorkloadFactory<StatusWorkload> StatusWorkloadFactory("Status");
|
WorkloadFactory<StatusWorkload> StatusWorkloadFactory("Status");
|
||||||
|
|
|
@ -547,6 +547,14 @@ inline static Standalone<StringRef> makeString( int length ) {
|
||||||
return returnString;
|
return returnString;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline static Standalone<StringRef> makeAlignedString( int alignment, int length ) {
|
||||||
|
Standalone<StringRef> returnString;
|
||||||
|
uint8_t *outData = new (returnString.arena()) uint8_t[alignment + length];
|
||||||
|
outData = (uint8_t*)((((uintptr_t)outData + (alignment - 1)) / alignment) * alignment);
|
||||||
|
((StringRef&)returnString) = StringRef(outData, length);
|
||||||
|
return returnString;
|
||||||
|
}
|
||||||
|
|
||||||
inline static StringRef makeString( int length, Arena& arena ) {
|
inline static StringRef makeString( int length, Arena& arena ) {
|
||||||
uint8_t *outData = new (arena) uint8_t[length];
|
uint8_t *outData = new (arena) uint8_t[length];
|
||||||
return StringRef(outData, length);
|
return StringRef(outData, length);
|
||||||
|
|
|
@ -77,9 +77,7 @@ set(FLOW_SRCS
|
||||||
|
|
||||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/hgVersion.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/hgVersion.h)
|
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/hgVersion.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/hgVersion.h)
|
||||||
|
|
||||||
actor_set(FLOW_BUILD "${FLOW_SRCS}")
|
add_flow_target(STATIC_LIBRARY NAME flow SRCS ${FLOW_SRCS})
|
||||||
add_library(flow STATIC ${FLOW_BUILD})
|
|
||||||
actor_compile(flow "${FLOW_SRCS}")
|
|
||||||
target_include_directories(flow SYSTEM PUBLIC ${CMAKE_THREAD_LIBS_INIT})
|
target_include_directories(flow SYSTEM PUBLIC ${CMAKE_THREAD_LIBS_INIT})
|
||||||
target_include_directories(flow PUBLIC ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR})
|
target_include_directories(flow PUBLIC ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR})
|
||||||
if (NOT APPLE AND NOT WIN32)
|
if (NOT APPLE AND NOT WIN32)
|
||||||
|
|
|
@ -117,3 +117,7 @@ void ErrorCodeTable::addCode(int code, const char *name, const char *description
|
||||||
bool isAssertDisabled(int line) {
|
bool isAssertDisabled(int line) {
|
||||||
return FLOW_KNOBS && (FLOW_KNOBS->DISABLE_ASSERTS == -1 || FLOW_KNOBS->DISABLE_ASSERTS == line);
|
return FLOW_KNOBS && (FLOW_KNOBS->DISABLE_ASSERTS == -1 || FLOW_KNOBS->DISABLE_ASSERTS == line);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void breakpoint_me() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
|
@ -98,6 +98,8 @@ extern bool isAssertDisabled( int line );
|
||||||
catch(Error &e) { criticalError(FDB_EXIT_ABORT, "AbortOnError", e.what()); } \
|
catch(Error &e) { criticalError(FDB_EXIT_ABORT, "AbortOnError", e.what()); } \
|
||||||
catch(...) { criticalError(FDB_EXIT_ABORT, "AbortOnError", "Aborted due to unknown error"); }
|
catch(...) { criticalError(FDB_EXIT_ABORT, "AbortOnError", "Aborted due to unknown error"); }
|
||||||
|
|
||||||
|
EXTERNC void breakpoint_me();
|
||||||
|
|
||||||
#ifdef FDB_CLEAN_BUILD
|
#ifdef FDB_CLEAN_BUILD
|
||||||
# define NOT_IN_CLEAN BOOST_STATIC_ASSERT_MSG(0, "This code can not be enabled in a clean build.");
|
# define NOT_IN_CLEAN BOOST_STATIC_ASSERT_MSG(0, "This code can not be enabled in a clean build.");
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -55,7 +55,7 @@ using namespace boost::asio::ip;
|
||||||
//
|
//
|
||||||
// xyzdev
|
// xyzdev
|
||||||
// vvvv
|
// vvvv
|
||||||
const uint64_t currentProtocolVersion = 0x0FDB00B061020001LL;
|
const uint64_t currentProtocolVersion = 0x0FDB00B061030001LL;
|
||||||
const uint64_t compatibleProtocolVersionMask = 0xffffffffffff0000LL;
|
const uint64_t compatibleProtocolVersionMask = 0xffffffffffff0000LL;
|
||||||
const uint64_t minValidProtocolVersion = 0x0FDB00A200060001LL;
|
const uint64_t minValidProtocolVersion = 0x0FDB00A200060001LL;
|
||||||
|
|
||||||
|
|
76
flow/Stats.h
76
flow/Stats.h
|
@ -38,6 +38,14 @@ MyCounters() : foo("foo", cc), bar("bar", cc), baz("baz", cc) {}
|
||||||
#include "flow/flow.h"
|
#include "flow/flow.h"
|
||||||
#include "flow/TDMetric.actor.h"
|
#include "flow/TDMetric.actor.h"
|
||||||
|
|
||||||
|
struct TimedRequest {
|
||||||
|
double requestTime;
|
||||||
|
|
||||||
|
TimedRequest() {
|
||||||
|
requestTime = timer();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
struct ICounter {
|
struct ICounter {
|
||||||
// All counters have a name and value
|
// All counters have a name and value
|
||||||
virtual std::string const& getName() const = 0;
|
virtual std::string const& getName() const = 0;
|
||||||
|
@ -62,7 +70,7 @@ struct CounterCollection {
|
||||||
std::string id;
|
std::string id;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct Counter : ICounter {
|
struct Counter : ICounter, NonCopyable {
|
||||||
public:
|
public:
|
||||||
typedef int64_t Value;
|
typedef int64_t Value;
|
||||||
|
|
||||||
|
@ -90,7 +98,7 @@ private:
|
||||||
};
|
};
|
||||||
|
|
||||||
template <class F>
|
template <class F>
|
||||||
struct SpecialCounter : ICounter, FastAllocated<SpecialCounter<F>> {
|
struct SpecialCounter : ICounter, FastAllocated<SpecialCounter<F>>, NonCopyable {
|
||||||
SpecialCounter(CounterCollection& collection, std::string const& name, F && f) : name(name), f(f) { collection.counters.push_back(this); collection.counters_to_remove.push_back(this); }
|
SpecialCounter(CounterCollection& collection, std::string const& name, F && f) : name(name), f(f) { collection.counters.push_back(this); collection.counters_to_remove.push_back(this); }
|
||||||
virtual void remove() { delete this; }
|
virtual void remove() { delete this; }
|
||||||
|
|
||||||
|
@ -112,4 +120,68 @@ static void specialCounter(CounterCollection& collection, std::string const& nam
|
||||||
|
|
||||||
Future<Void> traceCounters(std::string const& traceEventName, UID const& traceEventID, double const& interval, CounterCollection* const& counters, std::string const& trackLatestName = std::string());
|
Future<Void> traceCounters(std::string const& traceEventName, UID const& traceEventID, double const& interval, CounterCollection* const& counters, std::string const& trackLatestName = std::string());
|
||||||
|
|
||||||
|
class LatencyBands {
|
||||||
|
public:
|
||||||
|
LatencyBands(std::string name, UID id, double loggingInterval) : name(name), id(id), loggingInterval(loggingInterval), cc(nullptr), filteredCount(nullptr) {}
|
||||||
|
|
||||||
|
void addThreshold(double value) {
|
||||||
|
if(value > 0 && bands.count(value) == 0) {
|
||||||
|
if(bands.size() == 0) {
|
||||||
|
ASSERT(!cc && !filteredCount);
|
||||||
|
cc = new CounterCollection(name, id.toString());
|
||||||
|
logger = traceCounters(name, id, loggingInterval, cc, id.toString() + "/" + name);
|
||||||
|
filteredCount = new Counter("Filtered", *cc);
|
||||||
|
insertBand(std::numeric_limits<double>::infinity());
|
||||||
|
}
|
||||||
|
|
||||||
|
insertBand(value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void addMeasurement(double measurement, bool filtered=false) {
|
||||||
|
if(filtered && filteredCount) {
|
||||||
|
++(*filteredCount);
|
||||||
|
}
|
||||||
|
else if(bands.size() > 0) {
|
||||||
|
auto itr = bands.upper_bound(measurement);
|
||||||
|
ASSERT(itr != bands.end());
|
||||||
|
++(*itr->second);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void clearBands() {
|
||||||
|
logger = Void();
|
||||||
|
|
||||||
|
for(auto itr : bands) {
|
||||||
|
delete itr.second;
|
||||||
|
}
|
||||||
|
|
||||||
|
bands.clear();
|
||||||
|
|
||||||
|
delete filteredCount;
|
||||||
|
delete cc;
|
||||||
|
|
||||||
|
filteredCount = nullptr;
|
||||||
|
cc = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
~LatencyBands() {
|
||||||
|
clearBands();
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::map<double, Counter*> bands;
|
||||||
|
Counter *filteredCount;
|
||||||
|
|
||||||
|
std::string name;
|
||||||
|
UID id;
|
||||||
|
double loggingInterval;
|
||||||
|
|
||||||
|
CounterCollection *cc;
|
||||||
|
Future<Void> logger;
|
||||||
|
|
||||||
|
void insertBand(double value) {
|
||||||
|
bands.insert(std::make_pair(value, new Counter(format("Band%f", value), *cc)));
|
||||||
|
}
|
||||||
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -36,6 +36,19 @@ namespace actorcompiler
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class ErrorMessagePolicy
|
||||||
|
{
|
||||||
|
public bool DisableActorWithoutWaitWarning = false;
|
||||||
|
public void HandleActorWithoutWait(String sourceFile, Actor actor)
|
||||||
|
{
|
||||||
|
if (!DisableActorWithoutWaitWarning && !actor.isTestCase)
|
||||||
|
{
|
||||||
|
// TODO(atn34): Once cmake is the only build system we can make this an error instead of a warning.
|
||||||
|
Console.Error.WriteLine("{0}:{1}: warning: ACTOR {2} does not contain a wait() statement", sourceFile, actor.SourceLine, actor.name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
class Token
|
class Token
|
||||||
{
|
{
|
||||||
public string Value;
|
public string Value;
|
||||||
|
@ -200,10 +213,12 @@ namespace actorcompiler
|
||||||
|
|
||||||
Token[] tokens;
|
Token[] tokens;
|
||||||
string sourceFile;
|
string sourceFile;
|
||||||
|
ErrorMessagePolicy errorMessagePolicy;
|
||||||
|
|
||||||
public ActorParser(string text, string sourceFile)
|
public ActorParser(string text, string sourceFile, ErrorMessagePolicy errorMessagePolicy)
|
||||||
{
|
{
|
||||||
this.sourceFile = sourceFile;
|
this.sourceFile = sourceFile;
|
||||||
|
this.errorMessagePolicy = errorMessagePolicy;
|
||||||
tokens = Tokenize(text).Select(t=>new Token{ Value=t }).ToArray();
|
tokens = Tokenize(text).Select(t=>new Token{ Value=t }).ToArray();
|
||||||
CountParens();
|
CountParens();
|
||||||
//if (sourceFile.EndsWith(".h")) LineNumbersEnabled = false;
|
//if (sourceFile.EndsWith(".h")) LineNumbersEnabled = false;
|
||||||
|
@ -872,21 +887,21 @@ namespace actorcompiler
|
||||||
var body = range(heading.End+1, tokens.Length)
|
var body = range(heading.End+1, tokens.Length)
|
||||||
.TakeWhile(t => t.BraceDepth > toks.First().BraceDepth);
|
.TakeWhile(t => t.BraceDepth > toks.First().BraceDepth);
|
||||||
|
|
||||||
bool warnOnNoWait = false;
|
|
||||||
if (head_token.Value == "ACTOR")
|
if (head_token.Value == "ACTOR")
|
||||||
{
|
{
|
||||||
ParseActorHeading(actor, heading);
|
ParseActorHeading(actor, heading);
|
||||||
warnOnNoWait = true;
|
|
||||||
}
|
}
|
||||||
else if (head_token.Value == "TEST_CASE")
|
else if (head_token.Value == "TEST_CASE") {
|
||||||
ParseTestCaseHeading(actor, heading);
|
ParseTestCaseHeading(actor, heading);
|
||||||
|
actor.isTestCase = true;
|
||||||
|
}
|
||||||
else
|
else
|
||||||
head_token.Assert("ACTOR or TEST_CASE expected!", t => false);
|
head_token.Assert("ACTOR or TEST_CASE expected!", t => false);
|
||||||
|
|
||||||
actor.body = ParseCodeBlock(body);
|
actor.body = ParseCodeBlock(body);
|
||||||
|
|
||||||
if (!actor.body.containsWait() && warnOnNoWait)
|
if (!actor.body.containsWait())
|
||||||
Console.Error.WriteLine("{0}:{1}: warning: ACTOR {2} does not contain a wait() statement", sourceFile, actor.SourceLine, actor.name);
|
this.errorMessagePolicy.HandleActorWithoutWait(sourceFile, actor);
|
||||||
|
|
||||||
end = body.End + 1;
|
end = body.End + 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -234,6 +234,7 @@ namespace actorcompiler
|
||||||
public string testCaseParameters = null;
|
public string testCaseParameters = null;
|
||||||
public string nameSpace = null;
|
public string nameSpace = null;
|
||||||
public bool isForwardDeclaration = false;
|
public bool isForwardDeclaration = false;
|
||||||
|
public bool isTestCase = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
class Descr
|
class Descr
|
||||||
|
|
|
@ -33,16 +33,21 @@ namespace actorcompiler
|
||||||
if (args.Length < 2)
|
if (args.Length < 2)
|
||||||
{
|
{
|
||||||
Console.WriteLine("Usage:");
|
Console.WriteLine("Usage:");
|
||||||
Console.WriteLine(" actorcompiler [input] [output]");
|
Console.WriteLine(" actorcompiler <input> <output> [--disable-actor-without-wait-warning]");
|
||||||
return 100;
|
return 100;
|
||||||
}
|
}
|
||||||
Console.WriteLine("actorcompiler {0}", string.Join(" ", args));
|
Console.WriteLine("actorcompiler {0}", string.Join(" ", args));
|
||||||
string input = args[0], output = args[1], outputtmp = args[1] + ".tmp";
|
string input = args[0], output = args[1], outputtmp = args[1] + ".tmp";
|
||||||
|
ErrorMessagePolicy errorMessagePolicy = new ErrorMessagePolicy();
|
||||||
|
if (args.Contains("--disable-actor-without-wait-warning"))
|
||||||
|
{
|
||||||
|
errorMessagePolicy.DisableActorWithoutWaitWarning = true;
|
||||||
|
}
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
var inputData = File.ReadAllText(input);
|
var inputData = File.ReadAllText(input);
|
||||||
using (var outputStream = new StreamWriter(outputtmp))
|
using (var outputStream = new StreamWriter(outputtmp))
|
||||||
new ActorParser(inputData, input.Replace('\\', '/')).Write(outputStream, output.Replace('\\', '/'));
|
new ActorParser(inputData, input.Replace('\\', '/'), errorMessagePolicy).Write(outputStream, output.Replace('\\', '/'));
|
||||||
if (File.Exists(output))
|
if (File.Exists(output))
|
||||||
{
|
{
|
||||||
File.SetAttributes(output, FileAttributes.Normal);
|
File.SetAttributes(output, FileAttributes.Normal);
|
||||||
|
|
|
@ -12,7 +12,6 @@
|
||||||
<TargetFrameworkVersion>v4.0</TargetFrameworkVersion>
|
<TargetFrameworkVersion>v4.0</TargetFrameworkVersion>
|
||||||
<FileAlignment>512</FileAlignment>
|
<FileAlignment>512</FileAlignment>
|
||||||
<OutputPath>$(SolutionDir)bin\$(Configuration)\</OutputPath>
|
<OutputPath>$(SolutionDir)bin\$(Configuration)\</OutputPath>
|
||||||
<IntermediateOutputPath>$(SystemDrive)\temp\msvcfdb\$(Configuration)\actorcompiler\</IntermediateOutputPath>
|
|
||||||
<PublishUrl>publish\</PublishUrl>
|
<PublishUrl>publish\</PublishUrl>
|
||||||
<Install>true</Install>
|
<Install>true</Install>
|
||||||
<InstallFrom>Disk</InstallFrom>
|
<InstallFrom>Disk</InstallFrom>
|
||||||
|
|
|
@ -12,7 +12,6 @@
|
||||||
<TargetFrameworkVersion>v4.0</TargetFrameworkVersion>
|
<TargetFrameworkVersion>v4.0</TargetFrameworkVersion>
|
||||||
<FileAlignment>512</FileAlignment>
|
<FileAlignment>512</FileAlignment>
|
||||||
<OutputPath>$(SolutionDir)bin\$(Configuration)\</OutputPath>
|
<OutputPath>$(SolutionDir)bin\$(Configuration)\</OutputPath>
|
||||||
<IntermediateOutputPath>$(SystemDrive)\temp\msvcfdb\$(Configuration)\coveragetool\</IntermediateOutputPath>
|
|
||||||
</PropertyGroup>
|
</PropertyGroup>
|
||||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)' == 'Debug|AnyCPU'">
|
<PropertyGroup Condition="'$(Configuration)|$(Platform)' == 'Debug|AnyCPU'">
|
||||||
<DebugSymbols>true</DebugSymbols>
|
<DebugSymbols>true</DebugSymbols>
|
||||||
|
|
|
@ -775,6 +775,18 @@ Future<Void> setAfter( Reference<AsyncVar<T>> var, double time, T val ) {
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ACTOR template <class T>
|
||||||
|
Future<Void> setWhenDoneOrError( Future<Void> condition, Reference<AsyncVar<T>> var, T val ) {
|
||||||
|
try {
|
||||||
|
wait( condition );
|
||||||
|
}
|
||||||
|
catch ( Error& e ) {
|
||||||
|
if (e.code() == error_code_actor_cancelled) throw;
|
||||||
|
}
|
||||||
|
var->set( val );
|
||||||
|
return Void();
|
||||||
|
}
|
||||||
|
|
||||||
Future<bool> allTrue( const std::vector<Future<bool>>& all );
|
Future<bool> allTrue( const std::vector<Future<bool>>& all );
|
||||||
Future<Void> anyTrue( std::vector<Reference<AsyncVar<bool>>> const& input, Reference<AsyncVar<bool>> const& output );
|
Future<Void> anyTrue( std::vector<Reference<AsyncVar<bool>>> const& input, Reference<AsyncVar<bool>> const& output );
|
||||||
Future<Void> cancelOnly( std::vector<Future<Void>> const& futures );
|
Future<Void> cancelOnly( std::vector<Future<Void>> const& futures );
|
||||||
|
|
Loading…
Reference in New Issue