Merge remote-tracking branch 'apple/master' into task/tls-upgrade
This commit is contained in:
commit
e05b53d755
|
@ -76,6 +76,8 @@ foundationdb.VC.db
|
|||
foundationdb.VC.VC.opendb
|
||||
ipch/
|
||||
compile_commands.json
|
||||
flow/actorcompiler/obj
|
||||
flow/coveragetool/obj
|
||||
|
||||
# Temporary and user configuration files
|
||||
*~
|
||||
|
|
102
CMakeLists.txt
102
CMakeLists.txt
|
@ -17,17 +17,11 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
cmake_minimum_required(VERSION 3.12)
|
||||
project(fdb
|
||||
project(foundationdb
|
||||
VERSION 6.1.0
|
||||
DESCRIPTION "FoundationDB is a scalable, fault-tolerant, ordered key-value store with full ACID transactions."
|
||||
HOMEPAGE_URL "http://www.foundationdb.org/"
|
||||
LANGUAGES ASM C CXX Java)
|
||||
|
||||
if(WIN32)
|
||||
# C# is currently only supported on Windows.
|
||||
# On other platforms we find mono manually
|
||||
enable_language(CSharp)
|
||||
endif()
|
||||
LANGUAGES ASM C CXX)
|
||||
|
||||
set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH};${PROJECT_SOURCE_DIR}/cmake")
|
||||
message (STATUS "${PROJECT_SOURCE_DIR} ${PROJECT_BINARY_DIR}")
|
||||
|
@ -45,42 +39,26 @@ endif()
|
|||
set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/bin)
|
||||
set(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/lib)
|
||||
|
||||
set(OPEN_FOR_IDE OFF CACHE BOOL "Open this in an IDE (won't compile/link)")
|
||||
|
||||
################################################################################
|
||||
# Packages used for bindings
|
||||
################################################################################
|
||||
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
|
||||
|
||||
find_package(PythonInterp 3.4 REQUIRED)
|
||||
set(Python_ADDITIONAL_VERSIONS 3.4 3.5 3.5)
|
||||
find_package(PythonLibs 3.4 REQUIRED)
|
||||
|
||||
|
||||
################################################################################
|
||||
# LibreSSL
|
||||
################################################################################
|
||||
|
||||
set(DISABLE_TLS OFF CACHE BOOL "Don't try to find LibreSSL and always build without TLS support")
|
||||
if(DISABLE_TLS)
|
||||
set(WITH_TLS FALSE)
|
||||
else()
|
||||
set(LIBRESSL_USE_STATIC_LIBS TRUE)
|
||||
find_package(LibreSSL)
|
||||
if(LibreSSL_FOUND)
|
||||
set(WITH_TLS TRUE)
|
||||
else()
|
||||
message(STATUS "LibreSSL NOT Found - Will compile without TLS Support")
|
||||
message(STATUS "You can set LibreSSL_ROOT to the LibreSSL install directory to help cmake find it")
|
||||
set(WITH_TLS FALSE)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
# Compiler configuration
|
||||
################################################################################
|
||||
|
||||
include(ConfigureCompiler)
|
||||
|
||||
################################################################################
|
||||
# Compiler configuration
|
||||
################################################################################
|
||||
|
||||
include(FDBComponents)
|
||||
|
||||
################################################################################
|
||||
# Get repository information
|
||||
################################################################################
|
||||
|
@ -127,10 +105,15 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/fdbclient/versions.h.cmake ${CMAKE_CU
|
|||
# Flow
|
||||
################################################################################
|
||||
|
||||
# Flow and other tools are written in C# - so we need that dependency
|
||||
include(EnableCsharp)
|
||||
|
||||
# First thing we need is the actor compiler - and to compile and run the
|
||||
# actor compiler, we need mono
|
||||
include(CompileActorCompiler)
|
||||
|
||||
include(CompileCoverageTool)
|
||||
|
||||
# with the actor compiler, we can now make the flow commands available
|
||||
include(FlowCommands)
|
||||
|
||||
|
@ -140,50 +123,6 @@ include(FlowCommands)
|
|||
|
||||
include(CompileVexillographer)
|
||||
|
||||
# This macro can be used to install symlinks, which turns out to be
|
||||
# non-trivial due to CMake version differences and limitations on how
|
||||
# files can be installed when building binary packages.
|
||||
#
|
||||
# The rule for binary packaging is that files (including symlinks) must
|
||||
# be installed with the standard CMake install() macro.
|
||||
#
|
||||
# The rule for non-binary packaging is that CMake 2.6 cannot install()
|
||||
# symlinks, but can create the symlink at install-time via scripting.
|
||||
# Though, we assume that CMake 2.6 isn't going to be used to generate
|
||||
# packages because versions later than 2.8.3 are superior for that purpose.
|
||||
#
|
||||
# _filepath: the absolute path to the file to symlink
|
||||
# _sympath: absolute path of the installed symlink
|
||||
|
||||
macro(InstallSymlink _filepath _sympath)
|
||||
get_filename_component(_symname ${_sympath} NAME)
|
||||
get_filename_component(_installdir ${_sympath} PATH)
|
||||
|
||||
if (BINARY_PACKAGING_MODE)
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" -E create_symlink
|
||||
${_filepath}
|
||||
${CMAKE_CURRENT_BINARY_DIR}/${_symname})
|
||||
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${_symname}
|
||||
DESTINATION ${_installdir}
|
||||
COMPONENT clients)
|
||||
else ()
|
||||
# scripting the symlink installation at install time should work
|
||||
# for CMake 2.6.x and 2.8.x
|
||||
install(CODE "
|
||||
if (\"\$ENV{DESTDIR}\" STREQUAL \"\")
|
||||
execute_process(COMMAND \"${CMAKE_COMMAND}\" -E create_symlink
|
||||
${_filepath}
|
||||
${_installdir}/${_symname})
|
||||
else ()
|
||||
execute_process(COMMAND \"${CMAKE_COMMAND}\" -E create_symlink
|
||||
${_filepath}
|
||||
\$ENV{DESTDIR}/${_installdir}/${_symname})
|
||||
endif ()
|
||||
"
|
||||
COMPONENT clients)
|
||||
endif ()
|
||||
endmacro(InstallSymlink)
|
||||
|
||||
################################################################################
|
||||
# Generate config file
|
||||
################################################################################
|
||||
|
@ -235,6 +174,9 @@ endif()
|
|||
add_subdirectory(bindings)
|
||||
add_subdirectory(fdbbackup)
|
||||
add_subdirectory(tests)
|
||||
if(WITH_DOCUMENTATION)
|
||||
add_subdirectory(documentation)
|
||||
endif()
|
||||
|
||||
if(WIN32)
|
||||
add_subdirectory(packaging/msi)
|
||||
|
@ -256,3 +198,11 @@ if (CMAKE_EXPORT_COMPILE_COMMANDS)
|
|||
)
|
||||
add_custom_target(procossed_compile_commands ALL DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json)
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
# Inform user which components we are going to build
|
||||
################################################################################
|
||||
|
||||
print_components()
|
||||
|
||||
message(STATUS "CPACK_COMPONENTS_ALL ${CPACK_COMPONENTS_ALL}")
|
||||
|
|
39
README.md
39
README.md
|
@ -104,16 +104,49 @@ cmake -DLibreSSL_ROOT=/usr/local/libressl-2.8.3/ ../foundationdb
|
|||
FoundationDB will build just fine without LibreSSL, however, the resulting
|
||||
binaries won't support TLS connections.
|
||||
|
||||
### Language Bindings
|
||||
|
||||
The language bindings that are supported by cmake will have a corresponding
|
||||
`README.md` file in the corresponding `bindings/lang` directory.
|
||||
|
||||
Generally, cmake will build all language bindings for which it can find all
|
||||
necessary dependencies. After each successful cmake run, cmake will tell you
|
||||
which language bindings it is going to build.
|
||||
|
||||
|
||||
### Generating compile_commands.json
|
||||
|
||||
CMake can build a compilation database for you. However, the default generatd
|
||||
CMake can build a compilation database for you. However, the default generated
|
||||
one is not too useful as it operates on the generated files. When running make,
|
||||
the build system will create another `compile_commands.json` file in the source
|
||||
directory. This can than be used for tools like
|
||||
[CCLS](https://github.com/MaskRay/ccls),
|
||||
[CQuery](https://github.com/cquery-project/cquery), etc. This way you can get
|
||||
code-completion and code navigation in flow. It is not yet perfect (it will show
|
||||
a few errors) but we are constantly working on improving the developement experience.
|
||||
a few errors) but we are constantly working on improving the development experience.
|
||||
|
||||
### Using IDEs
|
||||
|
||||
CMake has built in support for a number of popular IDEs. However, because flow
|
||||
files are precompiled with the actor compiler, an IDE will not be very useful as
|
||||
a user will only be presented with the generated code - which is not what she
|
||||
wants to edit and get IDE features for.
|
||||
|
||||
The good news is, that it is possible to generate project files for editing
|
||||
flow with a supported IDE. There is a cmake option called `OPEN_FOR_IDE` which
|
||||
will generate a project which can be opened in an IDE for editing. You won't be
|
||||
able to build this project, but you will be able to edit the files and get most
|
||||
edit and navigation features your IDE supports.
|
||||
|
||||
For example, if you want to use XCode to make changes to FoundationDB you can
|
||||
create a XCode-project with the following command:
|
||||
|
||||
```
|
||||
cmake -G Xcode -DOPEN_FOR_IDE=ON <FDB_SOURCE_DIRECTORY>
|
||||
```
|
||||
|
||||
You should create a second build-directory which you will use for building
|
||||
(probably with make or ninja) and debugging.
|
||||
|
||||
### Linux
|
||||
|
||||
|
@ -173,7 +206,7 @@ that Visual Studio is used to compile.
|
|||
1. This should succeed. In which case you can build using msbuild:
|
||||
`msbuild /p:Configuration=Release fdb.sln`. You can also open the resulting
|
||||
solution in Visual Studio and compile from there. However, be aware that
|
||||
using Visual Studio for developement is currently not supported as Visual
|
||||
using Visual Studio for development is currently not supported as Visual
|
||||
Studio will only know about the generated files.
|
||||
|
||||
If you want TLS support to be enabled under Windows you currently have to build
|
||||
|
|
|
@ -1,3 +1,12 @@
|
|||
add_subdirectory(c)
|
||||
add_subdirectory(flow)
|
||||
add_subdirectory(python)
|
||||
add_subdirectory(java)
|
||||
if(WITH_JAVA)
|
||||
add_subdirectory(java)
|
||||
endif()
|
||||
if(WITH_GO)
|
||||
add_subdirectory(go)
|
||||
endif()
|
||||
if(WITH_RUBY)
|
||||
add_subdirectory(ruby)
|
||||
endif()
|
||||
|
|
|
@ -16,7 +16,7 @@ elseif(WIN32)
|
|||
endif()
|
||||
|
||||
add_custom_command(OUTPUT ${asm_file} ${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h
|
||||
COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/generate_asm.py ${platform}
|
||||
COMMAND $<TARGET_FILE:Python::Interpreter> ${CMAKE_CURRENT_SOURCE_DIR}/generate_asm.py ${platform}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/fdb_c.cpp
|
||||
${asm_file}
|
||||
${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h
|
||||
|
@ -41,6 +41,15 @@ if(WIN32)
|
|||
enable_language(ASM_MASM)
|
||||
set_property(SOURCE ${asm_file} PROPERTY LANGUAGE ASM_MASM)
|
||||
endif()
|
||||
|
||||
# The tests don't build on windows
|
||||
if(NOT WIN32)
|
||||
add_executable(fdb_c_performance_test test/performance_test.c test/test.h)
|
||||
target_link_libraries(fdb_c_performance_test PRIVATE fdb_c)
|
||||
add_executable(fdb_c_ryw_benchmark test/ryw_benchmark.c test/test.h)
|
||||
target_link_libraries(fdb_c_ryw_benchmark PRIVATE fdb_c)
|
||||
endif()
|
||||
|
||||
# TODO: re-enable once the old vcxproj-based build system is removed.
|
||||
#generate_export_header(fdb_c EXPORT_MACRO_NAME "DLLEXPORT"
|
||||
# EXPORT_FILE_NAME ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_export.h)
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
set(SRCS
|
||||
DirectoryLayer.actor.cpp
|
||||
DirectoryLayer.h
|
||||
DirectoryPartition.h
|
||||
DirectorySubspace.cpp
|
||||
DirectorySubspace.h
|
||||
FDBLoanerTypes.h
|
||||
HighContentionAllocator.actor.cpp
|
||||
HighContentionAllocator.h
|
||||
IDirectory.h
|
||||
Node.actor.cpp
|
||||
Subspace.cpp
|
||||
Subspace.h
|
||||
Tuple.cpp
|
||||
Tuple.h
|
||||
fdb_flow.actor.cpp
|
||||
fdb_flow.h)
|
||||
|
||||
add_flow_target(NAME fdb_flow SRCS ${SRCS} STATIC_LIBRARY)
|
||||
target_link_libraries(fdb_flow PUBLIC fdb_c)
|
||||
|
||||
add_subdirectory(tester)
|
||||
|
||||
# generate flow-package
|
||||
foreach(f IN LISTS SRCS)
|
||||
if(f MATCHES ".*\\.h$")
|
||||
list(APPEND headers ${CMAKE_CURRENT_SOURCE_DIR}/${f})
|
||||
endif()
|
||||
endforeach()
|
||||
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/packages)
|
||||
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/packages)
|
||||
set(package_dir ${CMAKE_CURRENT_BINARY_DIR}/packages/fdb-flow-${CMAKE_PROJECT_VERSION})
|
||||
set(tar_file ${CMAKE_BINARY_DIR}/packages/fdb-flow-${CMAKE_PROJECT_VERSION}.tar.gz)
|
||||
add_custom_command(OUTPUT ${tar_file}
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E make_directory ${package_dir} &&
|
||||
${CMAKE_COMMAND} -E copy $<TARGET_FILE:fdb_flow> ${headers} ${package_dir} &&
|
||||
${CMAKE_COMMAND} -E tar czf ${tar_file} ${package_dir}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/packages
|
||||
COMMENT "Build fdb_flow package")
|
||||
add_custom_target(package_flow DEPENDS ${tar_file})
|
||||
add_dependencies(packages package_flow)
|
|
@ -0,0 +1,6 @@
|
|||
set(TEST_SRCS
|
||||
DirectoryTester.actor.cpp
|
||||
Tester.actor.cpp
|
||||
Tester.actor.h)
|
||||
add_flow_target(NAME fdb_flow_tester EXECUTABLE SRCS ${TEST_SRCS})
|
||||
target_link_libraries(fdb_flow_tester fdb_flow)
|
|
@ -0,0 +1,121 @@
|
|||
set(SRCS
|
||||
src/_stacktester/directory.go
|
||||
src/fdb/directory/allocator.go
|
||||
src/fdb/directory/node.go
|
||||
src/fdb/futures.go
|
||||
src/fdb/subspace/subspace.go
|
||||
src/_stacktester/stacktester.go
|
||||
src/fdb/directory/directory.go
|
||||
src/fdb/doc.go
|
||||
src/fdb/transaction.go
|
||||
src/fdb/directory/directoryLayer.go
|
||||
src/fdb/errors.go
|
||||
src/fdb/keyselector.go
|
||||
src/fdb/tuple/tuple.go
|
||||
src/fdb/cluster.go
|
||||
src/fdb/directory/directoryPartition.go
|
||||
src/fdb/fdb.go
|
||||
src/fdb/range.go
|
||||
src/fdb/tuple/tuple_test.go
|
||||
src/fdb/database.go
|
||||
src/fdb/directory/directorySubspace.go
|
||||
src/fdb/fdb_test.go
|
||||
src/fdb/snapshot.go)
|
||||
|
||||
set(GOPATH ${CMAKE_CURRENT_BINARY_DIR})
|
||||
set(GO_PACKAGE_ROOT github.com/apple/foundationdb/bindings/go)
|
||||
set(GO_IMPORT_PATH ${GO_PACKAGE_ROOT}/src)
|
||||
set(GO_DEST ${GOPATH}/src/${GO_PACKAGE_ROOT})
|
||||
|
||||
if(APPLE)
|
||||
set(GOPLATFORM darwin_amd64)
|
||||
elseif(WIN32)
|
||||
set(GOPLATFORM windows_amd64)
|
||||
else()
|
||||
set(GOPLATFORM linux_amd64)
|
||||
endif()
|
||||
|
||||
set(GO_PACKAGE_OUTDIR ${GOPATH}/pkg/${GOPLATFORM}/${GO_IMPORT_PATH})
|
||||
|
||||
file(MAKE_DIRECTORY ${GOPATH}
|
||||
${GO_DEST})
|
||||
set(go_options_file ${GO_DEST}/src/fdb/generated.go)
|
||||
|
||||
set(go_env GOPATH=${GOPATH}
|
||||
C_INCLUDE_PATH=${CMAKE_BINARY_DIR}/bindings/c/foundationdb:${CMAKE_SOURCE_DIR}/bindings/c
|
||||
CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/lib)
|
||||
|
||||
foreach(src_file IN LISTS SRCS)
|
||||
set(dest_file ${GO_DEST}/${src_file})
|
||||
get_filename_component(dest_dir ${dest_file} DIRECTORY)
|
||||
list(APPEND SRCS_OUT ${dest_file})
|
||||
add_custom_command(OUTPUT ${dest_file}
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory ${dest_dir} &&
|
||||
${CMAKE_COMMAND} -E copy ${src_file} ${dest_file}
|
||||
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${src_file}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
COMMENT "Creating fdb_go_path")
|
||||
endforeach()
|
||||
add_custom_target(copy_go_sources DEPENDS ${SRCS_OUT})
|
||||
add_custom_command(OUTPUT ${go_options_file}
|
||||
COMMAND ${GO_EXECUTABLE} run ${CMAKE_CURRENT_SOURCE_DIR}/src/_util/translate_fdb_options.go
|
||||
-in ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options
|
||||
-out ${go_options_file}
|
||||
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/_util/translate_fdb_options.go
|
||||
${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options
|
||||
COMMENT "Generate FDBOptions for GO")
|
||||
add_custom_target(go_options_file DEPENDS ${go_options_file})
|
||||
add_dependencies(go_options_file copy_go_sources)
|
||||
|
||||
function(build_go_package)
|
||||
set(options LIBRARY EXECUTABLE)
|
||||
set(oneValueArgs NAME PATH)
|
||||
set(multiValueArgs)
|
||||
cmake_parse_arguments(BGP "${options}" "${oneValueArgs}" "${multiValueArgs}" "${ARGN}")
|
||||
|
||||
if(NOT BGP_NAME OR NOT BGP_PATH)
|
||||
message(FATAL_ERROR "NAME and PATH arguments are missing")
|
||||
endif()
|
||||
if(BGP_LIBRARY AND BGP_EXECUTABLE)
|
||||
message(FATAL_ERROR "Package can't be a library and an executable")
|
||||
endif()
|
||||
if(NOT BGP_LIBRARY AND NOT BGP_EXECUTABLE)
|
||||
message(FATAL_ERROR "Missing type")
|
||||
endif()
|
||||
|
||||
if(BGP_LIBRARY)
|
||||
if(WIN32)
|
||||
set(outfile ${GO_PACKAGE_OUTDIR}/${BGP_PATH}.lib)
|
||||
else()
|
||||
set(outfile ${GO_PACKAGE_OUTDIR}/${BGP_PATH}.a)
|
||||
endif()
|
||||
else()
|
||||
get_filename_component(exec_filename ${BGP_PATH} NAME)
|
||||
if(WIN32)
|
||||
set(outfile ${GOPATH}/bin/${exec_filename}.exe)
|
||||
else()
|
||||
set(outfile ${GOPATH}/bin/${exec_filename})
|
||||
endif()
|
||||
endif()
|
||||
add_custom_command(OUTPUT ${outfile}
|
||||
COMMAND ${CMAKE_COMMAND} -E env ${go_env}
|
||||
${GO_EXECUTABLE} install ${GO_IMPORT_PATH}/${BGP_PATH}
|
||||
DEPENDS ${fdb_options_file}
|
||||
COMMENT "Building ${BGP_NAME}")
|
||||
add_custom_target(${BGP_NAME} ALL DEPENDS ${outfile})
|
||||
endfunction()
|
||||
|
||||
build_go_package(LIBRARY NAME fdb_go PATH fdb)
|
||||
add_dependencies(fdb_go fdb_c go_options_file)
|
||||
|
||||
build_go_package(LIBRARY NAME tuple_go PATH fdb/tuple)
|
||||
add_dependencies(tuple_go fdb_go)
|
||||
|
||||
build_go_package(LIBRARY NAME subspace_go PATH fdb/subspace)
|
||||
add_dependencies(subspace_go tuple_go)
|
||||
|
||||
build_go_package(LIBRARY NAME directory_go PATH fdb/directory)
|
||||
add_dependencies(directory_go tuple_go)
|
||||
|
||||
build_go_package(EXECUTABLE NAME fdb_go_tester PATH _stacktester)
|
||||
add_dependencies(fdb_go_tester directory_go)
|
|
@ -24,8 +24,10 @@ package main
|
|||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/doc"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
|
@ -48,22 +50,22 @@ type Options struct {
|
|||
Scope []Scope
|
||||
}
|
||||
|
||||
func writeOptString(receiver string, function string, opt Option) {
|
||||
fmt.Printf(`func (o %s) %s(param string) error {
|
||||
func writeOptString(w io.Writer, receiver string, function string, opt Option) {
|
||||
fmt.Fprintf(w, `func (o %s) %s(param string) error {
|
||||
return o.setOpt(%d, []byte(param))
|
||||
}
|
||||
`, receiver, function, opt.Code)
|
||||
}
|
||||
|
||||
func writeOptBytes(receiver string, function string, opt Option) {
|
||||
fmt.Printf(`func (o %s) %s(param []byte) error {
|
||||
func writeOptBytes(w io.Writer, receiver string, function string, opt Option) {
|
||||
fmt.Fprintf(w, `func (o %s) %s(param []byte) error {
|
||||
return o.setOpt(%d, param)
|
||||
}
|
||||
`, receiver, function, opt.Code)
|
||||
}
|
||||
|
||||
func writeOptInt(receiver string, function string, opt Option) {
|
||||
fmt.Printf(`func (o %s) %s(param int64) error {
|
||||
func writeOptInt(w io.Writer, receiver string, function string, opt Option) {
|
||||
fmt.Fprintf(w, `func (o %s) %s(param int64) error {
|
||||
b, e := int64ToBytes(param)
|
||||
if e != nil {
|
||||
return e
|
||||
|
@ -73,36 +75,36 @@ func writeOptInt(receiver string, function string, opt Option) {
|
|||
`, receiver, function, opt.Code)
|
||||
}
|
||||
|
||||
func writeOptNone(receiver string, function string, opt Option) {
|
||||
fmt.Printf(`func (o %s) %s() error {
|
||||
func writeOptNone(w io.Writer, receiver string, function string, opt Option) {
|
||||
fmt.Fprintf(w, `func (o %s) %s() error {
|
||||
return o.setOpt(%d, nil)
|
||||
}
|
||||
`, receiver, function, opt.Code)
|
||||
}
|
||||
|
||||
func writeOpt(receiver string, opt Option) {
|
||||
func writeOpt(w io.Writer, receiver string, opt Option) {
|
||||
function := "Set" + translateName(opt.Name)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Fprintln(w)
|
||||
|
||||
if opt.Description != "" {
|
||||
fmt.Printf("// %s\n", opt.Description)
|
||||
fmt.Fprintf(w, "// %s\n", opt.Description)
|
||||
if opt.ParamDesc != "" {
|
||||
fmt.Printf("//\n// Parameter: %s\n", opt.ParamDesc)
|
||||
fmt.Fprintf(w, "//\n// Parameter: %s\n", opt.ParamDesc)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("// Not yet implemented.\n")
|
||||
fmt.Fprintf(w, "// Not yet implemented.\n")
|
||||
}
|
||||
|
||||
switch opt.ParamType {
|
||||
case "String":
|
||||
writeOptString(receiver, function, opt)
|
||||
writeOptString(w, receiver, function, opt)
|
||||
case "Bytes":
|
||||
writeOptBytes(receiver, function, opt)
|
||||
writeOptBytes(w, receiver, function, opt)
|
||||
case "Int":
|
||||
writeOptInt(receiver, function, opt)
|
||||
writeOptInt(w, receiver, function, opt)
|
||||
case "":
|
||||
writeOptNone(receiver, function, opt)
|
||||
writeOptNone(w, receiver, function, opt)
|
||||
default:
|
||||
log.Fatalf("Totally unexpected ParamType %s", opt.ParamType)
|
||||
}
|
||||
|
@ -112,9 +114,9 @@ func translateName(old string) string {
|
|||
return strings.Replace(strings.Title(strings.Replace(old, "_", " ", -1)), " ", "", -1)
|
||||
}
|
||||
|
||||
func writeMutation(opt Option) {
|
||||
func writeMutation(w io.Writer, opt Option) {
|
||||
tname := translateName(opt.Name)
|
||||
fmt.Printf(`
|
||||
fmt.Fprintf(w, `
|
||||
// %s
|
||||
func (t Transaction) %s(key KeyConvertible, param []byte) {
|
||||
t.atomicOp(key.FDBKey(), param, %d)
|
||||
|
@ -122,23 +124,38 @@ func (t Transaction) %s(key KeyConvertible, param []byte) {
|
|||
`, opt.Description, tname, opt.Code)
|
||||
}
|
||||
|
||||
func writeEnum(scope Scope, opt Option, delta int) {
|
||||
fmt.Println()
|
||||
func writeEnum(w io.Writer, scope Scope, opt Option, delta int) {
|
||||
fmt.Fprintln(w)
|
||||
if opt.Description != "" {
|
||||
doc.ToText(os.Stdout, opt.Description, "\t// ", "", 73)
|
||||
doc.ToText(w, opt.Description, "\t// ", "", 73)
|
||||
// fmt.Printf(" // %s\n", opt.Description)
|
||||
}
|
||||
fmt.Printf(" %s %s = %d\n", scope.Name+translateName(opt.Name), scope.Name, opt.Code+delta)
|
||||
fmt.Fprintf(w, " %s %s = %d\n", scope.Name+translateName(opt.Name), scope.Name, opt.Code+delta)
|
||||
}
|
||||
|
||||
func main() {
|
||||
var inFile string
|
||||
var outFile string
|
||||
flag.StringVar(&inFile, "in", "stdin", "Input file")
|
||||
flag.StringVar(&outFile, "out", "stdout", "Output file")
|
||||
flag.Parse()
|
||||
|
||||
var err error
|
||||
|
||||
v := Options{}
|
||||
|
||||
data, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
var data []byte
|
||||
|
||||
if inFile == "stdin" {
|
||||
data, err = ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
data, err = ioutil.ReadFile(inFile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
err = xml.Unmarshal(data, &v)
|
||||
|
@ -146,7 +163,17 @@ func main() {
|
|||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Print(`/*
|
||||
var out *os.File
|
||||
if outFile == "stdout" {
|
||||
out = os.Stdout
|
||||
} else {
|
||||
out, err = os.Create(outFile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprint(out, `/*
|
||||
* generated.go
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
|
@ -197,7 +224,7 @@ func int64ToBytes(i int64) ([]byte, error) {
|
|||
|
||||
for _, opt := range scope.Option {
|
||||
if !opt.Hidden {
|
||||
writeOpt(receiver, opt)
|
||||
writeOpt(out, receiver, opt)
|
||||
}
|
||||
}
|
||||
continue
|
||||
|
@ -206,7 +233,7 @@ func int64ToBytes(i int64) ([]byte, error) {
|
|||
if scope.Name == "MutationType" {
|
||||
for _, opt := range scope.Option {
|
||||
if !opt.Hidden {
|
||||
writeMutation(opt)
|
||||
writeMutation(out, opt)
|
||||
}
|
||||
}
|
||||
continue
|
||||
|
@ -223,16 +250,17 @@ func int64ToBytes(i int64) ([]byte, error) {
|
|||
scope.Name = "conflictRangeType"
|
||||
}
|
||||
|
||||
fmt.Printf(`
|
||||
fmt.Fprintf(out, `
|
||||
type %s int
|
||||
|
||||
const (
|
||||
`, scope.Name)
|
||||
for _, opt := range scope.Option {
|
||||
if !opt.Hidden {
|
||||
writeEnum(scope, opt, d)
|
||||
writeEnum(out, scope, opt, d)
|
||||
}
|
||||
}
|
||||
fmt.Println(")")
|
||||
fmt.Fprintln(out, ")")
|
||||
}
|
||||
out.Close()
|
||||
}
|
||||
|
|
|
@ -1,7 +1,3 @@
|
|||
include(UseJava)
|
||||
find_package(JNI 1.8 REQUIRED)
|
||||
find_package(Java 1.8 COMPONENTS Development REQUIRED)
|
||||
|
||||
set(JAVA_BINDING_SRCS
|
||||
src/main/com/apple/foundationdb/async/AsyncIterable.java
|
||||
src/main/com/apple/foundationdb/async/AsyncIterator.java
|
||||
|
@ -129,11 +125,67 @@ set_target_properties(fdb_java PROPERTIES
|
|||
set(CMAKE_JAVA_COMPILE_FLAGS "-source" "1.8" "-target" "1.8")
|
||||
set(CMAKE_JNI_TARGET TRUE)
|
||||
set(JAR_VERSION "${FDB_MAJOR}.${FDB_MINOR}.${FDB_REVISION}")
|
||||
add_jar(fdb-java ${JAVA_BINDING_SRCS} ${GENERATED_JAVA_FILES}
|
||||
OUTPUT_DIR ${PROJECT_BINARY_DIR}/lib)
|
||||
add_jar(fdb-java ${JAVA_BINDING_SRCS} ${GENERATED_JAVA_FILES} ${CMAKE_SOURCE_DIR}/LICENSE
|
||||
OUTPUT_DIR ${PROJECT_BINARY_DIR}/lib VERSION ${CMAKE_PROJECT_VERSION})
|
||||
add_dependencies(fdb-java fdb_java_options fdb_java)
|
||||
add_jar(foundationdb-tests SOURCES ${JAVA_TESTS_SRCS} INCLUDE_JARS fdb-java)
|
||||
add_dependencies(foundationdb-tests fdb_java_options)
|
||||
|
||||
install_jar(fdb-java DESTINATION ${FDB_SHARE_DIR}/java COMPONENT clients)
|
||||
install(TARGETS fdb_java DESTINATION ${FDB_LIB_DIR} COMPONENT clients)
|
||||
# TODO[mpilman]: The java RPM will require some more effort (mostly on debian). However,
|
||||
# most people will use the fat-jar, so it is not clear how high this priority is.
|
||||
|
||||
#install_jar(fdb-java DESTINATION ${FDB_SHARE_DIR}/java COMPONENT java)
|
||||
#install(TARGETS fdb_java DESTINATION ${FDB_LIB_DIR} COMPONENT java)
|
||||
|
||||
set(FAT_JAR_BINARIES "NOTFOUND" CACHE STRING
|
||||
"Path of a directory structure with libraries to include in fat jar (a lib directory)")
|
||||
|
||||
set(jar_destination ${CMAKE_BINARY_DIR}/packages)
|
||||
set(unpack_dir ${CMAKE_CURRENT_BINARY_DIR}/fat_jar)
|
||||
file(MAKE_DIRECTORY ${jar_destination})
|
||||
file(MAKE_DIRECTORY ${unpack_dir})
|
||||
message(STATUS "Building fat jar to ${jar_destination}")
|
||||
get_property(jar_path TARGET fdb-java PROPERTY JAR_FILE)
|
||||
add_custom_command(OUTPUT ${unpack_dir}/META-INF/MANIFEST.MF
|
||||
COMMAND ${Java_JAR_EXECUTABLE} xf ${jar_path}
|
||||
WORKING_DIRECTORY ${unpack_dir}
|
||||
DEPENDS "${jar_path}"
|
||||
COMMENT "Unpack jar-file")
|
||||
add_custom_target(unpack_jar DEPENDS ${unpack_dir}/META-INF/MANIFEST.MF)
|
||||
add_dependencies(unpack_jar fdb-java)
|
||||
add_custom_command(OUTPUT ${unpack_dir}/LICENSE
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/LICENSE ${unpack_dir}
|
||||
COMMENT "copy license")
|
||||
add_custom_target(copy_license DEPENDS ${unpack_dir}/LICENSE)
|
||||
add_dependencies(unpack_jar copy_license)
|
||||
if(FAT_JAR_BINARIES)
|
||||
add_custom_command(OUTPUT ${unpack_dir}/lib
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_directory ${FAT_JAR_BINARIES} ${unpack_dir}
|
||||
COMMENT "copy additional libraries"
|
||||
DEPENDS ${unpack_dir}/META-INF/MANIFEST.MF)
|
||||
add_custom_target(copy_libs DEPENDS ${unpack_dir}/lib)
|
||||
add_dependencies(unpack_jar copy_libs)
|
||||
endif()
|
||||
if(WIN32)
|
||||
set(lib_destination "windows/amd64")
|
||||
elseif(APPLE)
|
||||
set(lib_destination "osx/x86_64")
|
||||
else()
|
||||
set(lib_destination "linux/amd64")
|
||||
endif()
|
||||
set(lib_destination "${unpack_dir}/lib/${lib_destination}")
|
||||
file(MAKE_DIRECTORY ${lib_destination})
|
||||
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/lib_copied
|
||||
COMMAND ${CMAKE_COMMAND} -E copy $<TARGET_FILE:fdb_java> ${lib_destination} &&
|
||||
${CMAKE_COMMAND} -E touch ${CMAKE_CURRENT_BINARY_DIR}/lib_copied
|
||||
COMMENT "Copy library")
|
||||
add_custom_target(copy_lib DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/lib_copied)
|
||||
add_dependencies(copy_lib unpack_jar)
|
||||
set(target_jar ${jar_destination}/fdb-java-${CMAKE_PROJECT_VERSION}.jar)
|
||||
add_custom_command(OUTPUT ${target_jar}
|
||||
COMMAND ${Java_JAR_EXECUTABLE} cf ${target_jar} .
|
||||
WORKING_DIRECTORY ${unpack_dir}
|
||||
COMMENT "Build ${jar_destination}/fdb-java-${CMAKE_PROJECT_VERSION}.jar")
|
||||
add_custom_target(fat-jar DEPENDS ${target_jar})
|
||||
add_dependencies(fat-jar copy_lib)
|
||||
add_dependencies(packages fat-jar)
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
<img alt="FoundationDB logo" src="documentation/FDB_logo.png?raw=true" width="400">
|
||||
|
||||
FoundationDB is a distributed database designed to handle large volumes of structured data across clusters of commodity servers. It organizes data as an ordered key-value store and employs ACID transactions for all operations. It is especially well-suited for read/write workloads but also has excellent performance for write-intensive workloads. Users interact with the database using API language binding.
|
||||
|
||||
To learn more about FoundationDB, visit [foundationdb.org](https://www.foundationdb.org/)
|
||||
|
||||
## FoundationDB Java Bindings
|
||||
|
||||
In order to build the java bindings,
|
||||
[JDK](http://www.oracle.com/technetwork/java/javase/downloads/index.html) >= 8
|
||||
has to be installed. CMake will try to find a JDK installation, if it can find
|
||||
one it will automatically build the java bindings.
|
||||
|
||||
If you have Java installed but cmake fails to find them, set the
|
||||
`JAVA_HOME`environment variable.
|
||||
|
||||
### Fat Jar
|
||||
|
||||
By default, the generated jar file will depend on an installed libfdb_java
|
||||
(provided with the generated RPM/DEB file on Linux). However, users usually find
|
||||
a Jar-file that contains this library more convenient. This is also what you
|
||||
will get if you download the jar file from Maven.
|
||||
|
||||
This file can be generated by compiling the `packages` target. For example with
|
||||
make, you can run:
|
||||
|
||||
``` shell
|
||||
make packages
|
||||
```
|
||||
|
||||
#### Multi-Platform Jar-File
|
||||
|
||||
If you want to create a jar file that can run on more than one supported
|
||||
architecture (the offical one supports MacOS, Linux, and Windows), you can do
|
||||
that by executing the following steps:
|
||||
|
||||
1. Create a directory called `lib` somewhere on your file system.
|
||||
1. Create a subdirectory for each *additional* platform you want to support
|
||||
(`windows` for windows, `osx` for MacOS, and `linux` for Linux).
|
||||
1. Under each of those create a subdirectory with the name of the architecture
|
||||
(currently only `amd64` is supported - on MacOS this has to be called
|
||||
`x86_64` - `amd64` on all others).
|
||||
1. Set the cmake variable `FAT_JAR_BINARIES` to this `lib` directory. For
|
||||
example, if you created this directory structure under `/foo/bar`, the
|
||||
corresponding cmake command would be:
|
||||
|
||||
```
|
||||
cmake -DFAT_JAR_BINARIES=/foo/bar/lib <PATH_TO_FDB_SOURCE>
|
||||
```
|
||||
|
||||
After executing building the packages (with `make packages` or the packages
|
||||
target in `Visual Studio`) you will find a jar-file in the `packages`
|
||||
directory in your build directory.
|
|
@ -5,7 +5,9 @@ set(SRCS
|
|||
fdb/locality.py
|
||||
fdb/six.py
|
||||
fdb/subspace_impl.py
|
||||
fdb/tuple.py)
|
||||
fdb/tuple.py
|
||||
README.rst
|
||||
MANIFEST.in)
|
||||
|
||||
if(APPLE)
|
||||
list(APPEND SRCS fdb/libfdb_c.dylib.pth)
|
||||
|
@ -20,17 +22,10 @@ foreach(src ${SRCS})
|
|||
if(NOT EXISTS ${dirname})
|
||||
file(MAKE_DIRECTORY ${PROJECT_BINARY_DIR}/bindings/python/${dirname})
|
||||
endif()
|
||||
set(copy_command "cp")
|
||||
set(from_path ${CMAKE_CURRENT_SOURCE_DIR}/${src})
|
||||
set(to_path ${CMAKE_CURRENT_BINARY_DIR}/${src})
|
||||
if (WIN32)
|
||||
set(copy_command "copy")
|
||||
# copy on Windows doesn't understand '/' separators
|
||||
string(REPLACE "/" "\\" from_path "${from_path}")
|
||||
string(REPLACE "/" "\\" to_path "${to_path}")
|
||||
endif()
|
||||
add_custom_command(OUTPUT ${PROJECT_BINARY_DIR}/bindings/python/${src}
|
||||
COMMAND ${copy_command} ${from_path} ${to_path}
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${from_path} ${to_path}
|
||||
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${src}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
COMMENT "copy ${src}")
|
||||
|
@ -46,4 +41,31 @@ vexillographer_compile(TARGET fdb_python_options LANG python OUT ${options_file}
|
|||
add_dependencies(python_binding fdb_python_options)
|
||||
|
||||
set(out_files "${out_files};${options_file}")
|
||||
install(FILES ${out_files} DESTINATION ${FDB_PYTHON_INSTALL_DIR} COMPONENT clients)
|
||||
# TODO[mpilman]: it is not clear whether we want to have rpms for python
|
||||
#install(FILES ${out_files} DESTINATION ${FDB_PYTHON_INSTALL_DIR} COMPONENT python)
|
||||
|
||||
# Create sdist
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.cmake ${CMAKE_CURRENT_BINARY_DIR}/setup.py)
|
||||
configure_file(${CMAKE_SOURCE_DIR}/LICENSE ${CMAKE_CURRENT_BINARY_DIR}/LICENSE COPYONLY)
|
||||
find_program(pycodestyle pycodestyle)
|
||||
if (pycodestyle)
|
||||
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/check_py_code_style
|
||||
COMMAND ${pycodestyle} bindings/python --config=${CMAKE_CURRENT_SOURCE_DIR}/setup.cfg &&
|
||||
${CMAKE_COMMAND} -E ${CMAKE_CURRENT_BINARY_DIR}/check_py_code_style
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
DEPENDS ${out_files}
|
||||
COMMENT "Check python code style")
|
||||
add_custom_target(fdb_python_check DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/check_py_code_style)
|
||||
else()
|
||||
add_custom_target(fdb_python_check COMMAND ${CMAKE_COMMAND} -E echo "Skipped Python style check! Missing: pycodestyle")
|
||||
endif()
|
||||
set(package_file_name foundationdb-${FDB_VERSION}.tar.gz)
|
||||
set(package_file ${CMAKE_BINARY_DIR}/packages/${package_file_name})
|
||||
add_custom_command(OUTPUT ${package_file}
|
||||
COMMAND $<TARGET_FILE:Python::Interpreter> setup.py sdist &&
|
||||
${CMAKE_COMMAND} -E copy dist/${package_file_name} ${package_file}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
COMMENT "Create Python sdist package")
|
||||
add_custom_target(python_package DEPENDS ${package_file})
|
||||
add_dependencies(python_package python_binding)
|
||||
add_dependencies(packages python_package)
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
from distutils.core import setup
|
||||
|
||||
try:
|
||||
with open("README.rst") as f:
|
||||
long_desc = f.read()
|
||||
except:
|
||||
long_desc = ""
|
||||
|
||||
setup(name="foundationdb",
|
||||
version="${FDB_VERSION}",
|
||||
author="FoundationDB",
|
||||
author_email="fdb-dist@apple.com",
|
||||
description="Python bindings for the FoundationDB database",
|
||||
url="https://www.foundationdb.org",
|
||||
packages=['fdb'],
|
||||
package_data={'fdb': ["fdb/*.py"]},
|
||||
long_description=long_desc,
|
||||
classifiers=[
|
||||
'Development Status :: 5 - Production/Stable',
|
||||
'Intended Audience :: Developers',
|
||||
'License :: OSI Approved :: Apache Software License',
|
||||
'Operating System :: MacOS :: MacOS X',
|
||||
'Operating System :: Microsoft :: Windows',
|
||||
'Operating System :: POSIX :: Linux',
|
||||
'Programming Language :: Python :: 2',
|
||||
'Programming Language :: Python :: 2.6',
|
||||
'Programming Language :: Python :: 2.7',
|
||||
'Programming Language :: Python :: 3',
|
||||
'Programming Language :: Python :: 3.0',
|
||||
'Programming Language :: Python :: 3.1',
|
||||
'Programming Language :: Python :: 3.2',
|
||||
'Programming Language :: Python :: 3.3',
|
||||
'Programming Language :: Python :: 3.4',
|
||||
'Programming Language :: Python :: Implementation :: CPython',
|
||||
'Topic :: Database',
|
||||
'Topic :: Database :: Front-Ends'
|
||||
]
|
||||
)
|
|
@ -0,0 +1,16 @@
|
|||
# we put this generated file into the src dir, as it
|
||||
# greatly simplifies debugging
|
||||
vexillographer_compile(TARGET ruby_options LANG ruby
|
||||
OUT ${CMAKE_CURRENT_SOURCE_DIR}/lib/fdboptions.rb ALL)
|
||||
configure_file(fdb.gemspec.cmake fdb.gemspec)
|
||||
|
||||
set(gem_file fdb-${FDB_VERSION}.gem)
|
||||
set(gem_target ${CMAKE_BINARY_DIR}/packages/${gem_file})
|
||||
add_custom_command(OUTPUT ${gem_target}
|
||||
COMMAND ${GEM_COMMAND} build fdb.gemspec &&
|
||||
${CMAKE_COMMAND} -E copy ${gem_file} ${gem_target}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
COMMENT "Building ruby gem")
|
||||
add_custom_target(gem_package DEPENDS ${gem_target})
|
||||
add_dependencies(gem_package ruby_options)
|
||||
add_dependencies(packages gem_package)
|
|
@ -0,0 +1,22 @@
|
|||
# -*- mode: ruby; -*-
|
||||
|
||||
Gem::Specification.new do |s|
|
||||
s.name = 'fdb'
|
||||
s.version = '${FDB_VERSION}'
|
||||
s.date = Time.new.strftime '%Y-%m-%d'
|
||||
s.summary = "Ruby bindings for the FoundationDB database"
|
||||
s.description = <<-EOF
|
||||
Ruby bindings for the FoundationDB database.
|
||||
|
||||
Complete documentation of the FoundationDB Ruby API can be found at:
|
||||
https://apple.github.io/foundationdb/api-ruby.html.
|
||||
EOF
|
||||
s.authors = ["FoundationDB"]
|
||||
s.email = 'fdb-dist@apple.com'
|
||||
s.files = ["${CMAKE_SOURCE_DIR}/LICENSE", "${CMAKE_CURRENT_SOURCE_DIR}/lib/fdb.rb", "${CMAKE_CURRENT_SOURCE_DIR}/lib/fdbdirectory.rb", "${CMAKE_CURRENT_SOURCE_DIR}/lib/fdbimpl.rb", "${CMAKE_CURRENT_SOURCE_DIR}/lib/fdblocality.rb", "${CMAKE_CURRENT_SOURCE_DIR}/lib/fdboptions.rb", "${CMAKE_CURRENT_SOURCE_DIR}/lib/fdbsubspace.rb", "${CMAKE_CURRENT_SOURCE_DIR}/lib/fdbtuple.rb", "${CMAKE_CURRENT_SOURCE_DIR}/lib/fdbimpl_v609.rb"]
|
||||
s.homepage = 'https://www.foundationdb.org'
|
||||
s.license = 'Apache v2'
|
||||
s.add_dependency('ffi', '>= 1.1.5')
|
||||
s.required_ruby_version = '>= 1.9.3'
|
||||
s.requirements << 'These bindings require the FoundationDB client. The client can be obtained from https://www.foundationdb.org/download/.'
|
||||
end
|
|
@ -1,9 +1,9 @@
|
|||
FROM ubuntu:15.04
|
||||
LABEL version=0.0.3
|
||||
LABEL version=0.0.5
|
||||
|
||||
RUN sed -i -e 's/archive.ubuntu.com\|security.ubuntu.com/old-releases.ubuntu.com/g' -e 's/us\.old/old/g' /etc/apt/sources.list && apt-get clean
|
||||
|
||||
RUN apt-get update && apt-get --no-install-recommends install -y --force-yes bzip2 ca-certificates=20141019 adduser apt base-files base-passwd bash binutils build-essential cpp cpp-4.9 dpkg dos2unix fakeroot findutils g++=4:4.9.2-2ubuntu2 g++-4.9=4.9.2-10ubuntu13 gawk=1:4.1.1+dfsg-1 gcc-5-base gcc=4:4.9.2-2ubuntu2 gcc-4.9=4.9.2-10ubuntu13 gcc-4.9-base:amd64=4.9.2-10ubuntu13 gcc-5-base:amd64=5.1~rc1-0ubuntu1 gdb git golang golang-go golang-go-linux-amd64 golang-src grep gzip hostname java-common libasan1 liblsan0 libtsan0 libubsan0 libcilkrts5 libgcc-4.9-dev libstdc++-4.9-dev libgl1-mesa-dri libgl1-mesa-glx libmono-system-xml-linq4.0-cil libmono-system-data-datasetextensions4.0-cil libstdc++-4.9-pic locales login m4 make makedev mawk mono-dmcs npm openjdk-8-jdk passwd python-distlib python-gevent python-greenlet python-html5lib python-minimal python-pip python-pkg-resources python-requests python-setuptools python-six python-urllib3 python-yaml python2.7 python2.7-minimal rpm rpm2cpio ruby ruby2.1 rubygems-integration sed tar texinfo tzdata-java udev unzip util-linux valgrind vim wget golang-go.tools curl sphinx-common gnupg python-dev
|
||||
RUN apt-get update && apt-get --no-install-recommends install -y --force-yes bzip2 ca-certificates=20141019 adduser apt base-files base-passwd bash binutils build-essential cpp cpp-4.9 dpkg dos2unix fakeroot findutils g++=4:4.9.2-2ubuntu2 g++-4.9=4.9.2-10ubuntu13 gawk=1:4.1.1+dfsg-1 gcc-5-base gcc=4:4.9.2-2ubuntu2 gcc-4.9=4.9.2-10ubuntu13 gcc-4.9-base:amd64=4.9.2-10ubuntu13 gcc-5-base:amd64=5.1~rc1-0ubuntu1 gdb git golang golang-go golang-go-linux-amd64 golang-src grep gzip hostname java-common libasan1 liblsan0 libtsan0 libubsan0 libcilkrts5 libgcc-4.9-dev libstdc++-4.9-dev libgl1-mesa-dri libgl1-mesa-glx libmono-system-xml-linq4.0-cil libmono-system-data-datasetextensions4.0-cil libstdc++-4.9-pic locales login m4 make makedev mawk mono-dmcs npm openjdk-8-jdk passwd python-distlib python-gevent python-greenlet python-html5lib python-minimal python-pip python-pkg-resources python-requests python-setuptools python-six python-urllib3 python-yaml python2.7 python2.7-minimal rpm rpm2cpio ruby ruby2.1 rubygems-integration sed tar texinfo tzdata-java udev unzip util-linux valgrind vim wget golang-go.tools curl sphinx-common gnupg python-dev python3 python3-dev
|
||||
|
||||
RUN adduser --disabled-password --gecos '' fdb && chown -R fdb /opt && chmod -R 0777 /opt
|
||||
|
||||
|
@ -31,6 +31,8 @@ RUN cd /opt/ && wget https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-2.6.4
|
|||
./configure CFLAGS="-fPIC -O3" && make -j4 && make install &&\
|
||||
cd /opt/ && rm -r libressl-2.6.4/ libressl-2.6.4.tar.gz libressl-2.6.4.tar.gz.asc libressl.asc
|
||||
|
||||
RUN cd /opt && wget https://cmake.org/files/v3.12/cmake-3.12.1-Linux-x86_64.tar.gz -qO - | tar -xz
|
||||
|
||||
RUN LANGUAGE=en_US.UTF-8 LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 locale-gen en_US.UTF-8
|
||||
|
||||
RUN dpkg-reconfigure locales
|
||||
|
@ -43,3 +45,4 @@ ENV CC=$CC
|
|||
|
||||
ARG LIBRARY_PATH=/usr/local/lib
|
||||
ENV LIBRARY_PATH=$LD_FLAGS
|
||||
ENV PATH=$PATH:/opt/cmake-3.12.1-Linux-x86_64/bin
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
FROM centos:6
|
||||
LABEL version=0.0.4
|
||||
|
||||
RUN yum install -y yum-utils
|
||||
RUN yum-config-manager --enable rhel-server-rhscl-7-rpms
|
||||
RUN yum -y install centos-release-scl
|
||||
RUN yum install -y devtoolset-7
|
||||
|
||||
# install cmake
|
||||
RUN curl -L https://github.com/Kitware/CMake/releases/download/v3.13.4/cmake-3.13.4-Linux-x86_64.tar.gz > /tmp/cmake.tar.gz &&\
|
||||
echo "563a39e0a7c7368f81bfa1c3aff8b590a0617cdfe51177ddc808f66cc0866c76 /tmp/cmake.tar.gz" > /tmp/cmake-sha.txt &&\
|
||||
sha256sum -c /tmp/cmake-sha.txt &&\
|
||||
cd /tmp && tar xf cmake.tar.gz && cp -r cmake-3.13.4-Linux-x86_64/* /usr/local/
|
||||
|
||||
# install boost
|
||||
RUN curl -L https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.tar.bz2 > /tmp/boost.tar.bz2 &&\
|
||||
cd /tmp && echo "2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba boost.tar.bz2" > boost-sha.txt &&\
|
||||
sha256sum -c boost-sha.txt && tar xf boost.tar.bz2 && cp -r boost_1_67_0/boost /usr/local/include/ &&\
|
||||
rm -rf boost.tar.bz2 boost_1_67_0
|
||||
|
||||
# install mono (for actorcompiler)
|
||||
RUN yum install -y epel-release
|
||||
RUN yum install -y mono-core
|
||||
|
||||
# install Java
|
||||
RUN yum install -y java-1.8.0-openjdk-devel
|
||||
|
||||
# install LibreSSL
|
||||
RUN curl https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-2.8.2.tar.gz > /tmp/libressl.tar.gz &&\
|
||||
cd /tmp && echo "b8cb31e59f1294557bfc80f2a662969bc064e83006ceef0574e2553a1c254fd5 libressl.tar.gz" > libressl-sha.txt &&\
|
||||
sha256sum -c libressl-sha.txt && tar xf libressl.tar.gz &&\
|
||||
cd libressl-2.8.2 && cd /tmp/libressl-2.8.2 && scl enable devtoolset-7 -- ./configure --prefix=/usr/local/stow/libressl CFLAGS="-fPIC -O3" --prefix=/usr/local &&\
|
||||
cd /tmp/libressl-2.8.2 && scl enable devtoolset-7 -- make -j`nproc` install &&\
|
||||
rm -rf /tmp/libressl-2.8.2 /tmp/libressl.tar.gz
|
||||
|
||||
|
||||
# install dependencies for bindings and documentation
|
||||
# python 2.7 is required for the documentation
|
||||
RUN yum install -y rh-python36-python-devel rh-ruby24 golang python27
|
||||
|
||||
# install packaging tools
|
||||
RUN yum install -y rpm-build debbuild
|
||||
|
||||
CMD scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash
|
|
@ -0,0 +1,236 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
arguments_usage() {
|
||||
cat <<EOF
|
||||
usage: build.sh [-h] [commands]
|
||||
-h: print this help message and
|
||||
abort execution
|
||||
|
||||
Will execute the passed commands
|
||||
in the order they were passed
|
||||
EOF
|
||||
}
|
||||
|
||||
arguments_parse() {
|
||||
local __res=0
|
||||
while getopts ":ho:" opt
|
||||
do
|
||||
case ${opt} in
|
||||
h )
|
||||
arguments_usage
|
||||
__res=2
|
||||
break
|
||||
;;
|
||||
\? )
|
||||
echo "Unknown option ${opt}"
|
||||
arguments_usage
|
||||
__res=1
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND -1))
|
||||
commands=("$@")
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
configure() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
cmake ../src
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
build_fast() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
make -j`nproc`
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
build() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
configure
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
build_fast
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
package_fast() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
make -j`nproc` packages
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
package() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
configure
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
package_fast
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
rpm() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
cmake -DINSTALL_LAYOUT=RPM ../src
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
build_fast
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
fakeroot cpack
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
deb() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
cmake -DINSTALL_LAYOUT=DEB ../src
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
build_fast
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
fakeroot cpack
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
main() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
arguments_parse "$@"
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
if [ ${__res} -eq 2 ]
|
||||
then
|
||||
# in this case there was no error
|
||||
# We still want to exit the script
|
||||
__res=0
|
||||
fi
|
||||
break
|
||||
fi
|
||||
echo "Num commands ${#commands[@]}"
|
||||
for command in "${commands[@]}"
|
||||
do
|
||||
echo "Command: ${command}"
|
||||
case ${command} in
|
||||
configure )
|
||||
configure
|
||||
__res=$?
|
||||
;;
|
||||
build )
|
||||
build
|
||||
__res=$?
|
||||
;;
|
||||
build/fast )
|
||||
build_fast
|
||||
__res=$?
|
||||
;;
|
||||
package )
|
||||
package
|
||||
__res=$?
|
||||
;;
|
||||
package/fast )
|
||||
package_fast
|
||||
__res=$?
|
||||
;;
|
||||
rpm )
|
||||
rpm
|
||||
;;
|
||||
deb )
|
||||
deb
|
||||
;;
|
||||
linux-pkgs)
|
||||
rpm
|
||||
deb
|
||||
;;
|
||||
* )
|
||||
echo "ERROR: Command not found ($command)"
|
||||
__res=1
|
||||
;;
|
||||
esac
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
main "$@"
|
|
@ -0,0 +1,3 @@
|
|||
FROM centos:6
|
||||
|
||||
RUN yum install -y yum-utils
|
|
@ -0,0 +1,3 @@
|
|||
FROM ubuntu:16.04
|
||||
|
||||
RUN apt-get update
|
|
@ -0,0 +1,57 @@
|
|||
version: "3"
|
||||
|
||||
services:
|
||||
|
||||
common: &common
|
||||
image: foundationdb-build:0.0.4
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
|
||||
build-setup: &build-setup
|
||||
<<: *common
|
||||
depends_on: [common]
|
||||
#debuginfo builds need the build path to be longer than
|
||||
#the path where debuginfo sources are places. Crazy, yes,
|
||||
#see the manual for CPACK_RPM_BUILD_SOURCE_DIRS_PREFIX.
|
||||
volumes:
|
||||
- ../..:/foundationdb/deep/directory/as/debuginfo/doesnt/work/otherwise/src
|
||||
- ${BUILDDIR}:/foundationdb/deep/directory/as/debuginfo/doesnt/work/otherwise/build
|
||||
working_dir: /foundationdb/deep/directory/as/debuginfo/doesnt/work/otherwise/build
|
||||
|
||||
configure: &configure
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../src/build/cmake/build.sh configure
|
||||
|
||||
build: &build
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../src/build/cmake/build.sh build
|
||||
|
||||
build-fast: &build-fast
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../src/build/cmake/build.sh build/fast
|
||||
|
||||
rpm: &rpm
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../src/build/cmake/build.sh rpm
|
||||
|
||||
deb: &deb
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../src/build/cmake/build.sh deb
|
||||
|
||||
linux-pkgs:
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../src/build/cmake/build.sh linux-pkgs
|
||||
|
||||
package: &package
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../src/build/cmake/build.sh package
|
||||
|
||||
package-fast: &package-fast
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../src/build/cmake/build.sh package/fast
|
||||
|
||||
shell:
|
||||
<<: *build-setup
|
||||
volumes:
|
||||
- ..:/foundationdb
|
|
@ -2,10 +2,7 @@ version: "3"
|
|||
|
||||
services:
|
||||
common: &common
|
||||
image: foundationdb-build:0.0.3
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
image: foundationdb/foundationdb-build:0.0.5
|
||||
|
||||
build-setup: &build-setup
|
||||
<<: *common
|
||||
|
@ -15,12 +12,14 @@ services:
|
|||
working_dir: /foundationdb
|
||||
environment:
|
||||
- MAKEJOBS=1
|
||||
- BUILD_DIR=./work
|
||||
|
||||
release-setup: &release-setup
|
||||
<<: *build-setup
|
||||
environment:
|
||||
- MAKEJOBS=1
|
||||
- RELEASE=true
|
||||
- BUILD_DIR=./work
|
||||
|
||||
snapshot-setup: &snapshot-setup
|
||||
<<: *build-setup
|
||||
|
@ -54,6 +53,30 @@ services:
|
|||
<<: *snapshot-bindings
|
||||
|
||||
|
||||
snapshot-cmake: &snapshot-cmake
|
||||
<<: *build-setup
|
||||
command: bash -c 'if [ -f CMakeLists.txt ]; then mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake .. && make -j "$${MAKEJOBS}"; fi'
|
||||
|
||||
prb-cmake:
|
||||
<<: *snapshot-cmake
|
||||
|
||||
|
||||
snapshot-ctest: &snapshot-ctest
|
||||
<<: *build-setup
|
||||
command: bash -c 'if [ -f CMakeLists.txt ]; then mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake .. && make -j "$${MAKEJOBS}" && ctest -L fast -j "$${MAKEJOBS}" --output-on-failure; fi'
|
||||
|
||||
prb-ctest:
|
||||
<<: *snapshot-ctest
|
||||
|
||||
|
||||
snapshot-correctness: &snapshot-correctness
|
||||
<<: *build-setup
|
||||
command: bash -c 'if [ -f CMakeLists.txt ]; then mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake .. && make -j "$${MAKEJOBS}" && ctest -j "$${MAKEJOBS}" --output-on-failure; fi'
|
||||
|
||||
prb-correctness:
|
||||
<<: *snapshot-correctness
|
||||
|
||||
|
||||
shell:
|
||||
<<: *build-setup
|
||||
volumes:
|
||||
|
|
|
@ -22,9 +22,10 @@ TARGETS += packages
|
|||
CLEAN_TARGETS += packages_clean
|
||||
|
||||
PACKAGE_BINARIES = fdbcli fdbserver fdbbackup fdbmonitor fdbrestore fdbdr dr_agent backup_agent
|
||||
PACKAGE_CONTENTS := $(addprefix bin/, $(PACKAGE_BINARIES)) $(addprefix bin/, $(addsuffix .debug, $(PACKAGE_BINARIES))) lib/libfdb_c.$(DLEXT) bindings/python/fdb/fdboptions.py bindings/c/foundationdb/fdb_c_options.g.h
|
||||
PROJECT_BINARIES = $(addprefix bin/, $(PACKAGE_BINARIES))
|
||||
PACKAGE_CONTENTS := $(PROJECT_BINARIES) $(addprefix bin/, $(addsuffix .debug, $(PACKAGE_BINARIES))) lib/libfdb_c.$(DLEXT) bindings/python/fdb/fdboptions.py bindings/c/foundationdb/fdb_c_options.g.h
|
||||
|
||||
packages: TGZ FDBSERVERAPI
|
||||
packages: TGZ BINS FDBSERVERAPI
|
||||
|
||||
TGZ: $(PACKAGE_CONTENTS) versions.target lib/libfdb_java.$(DLEXT)
|
||||
@echo "Archiving tgz"
|
||||
|
@ -32,9 +33,17 @@ TGZ: $(PACKAGE_CONTENTS) versions.target lib/libfdb_java.$(DLEXT)
|
|||
@rm -f packages/FoundationDB-$(PLATFORM)-*.tar.gz
|
||||
@bash -c "tar -czf packages/FoundationDB-$(PLATFORM)-$(VERSION)-$(PKGRELEASE).tar.gz bin/{fdbmonitor{,.debug},fdbcli{,.debug},fdbserver{,.debug},fdbbackup{,.debug},fdbdr{,.debug},fdbrestore{,.debug},dr_agent{,.debug},coverage.{fdbclient,fdbserver,fdbrpc,flow}.xml} lib/libfdb_c.$(DLEXT){,-debug} lib/libfdb_java.$(DLEXT)* bindings/python/fdb/*.py bindings/c/*.h"
|
||||
|
||||
BINS: packages/foundationdb-binaries-$(VERSION)-$(PLATFORM).tar.gz
|
||||
|
||||
packages_clean:
|
||||
@echo "Cleaning packages"
|
||||
@rm -f packages/FoundationDB-$(PLATFORM)-*.tar.gz packages/fdb-tests-$(VERSION).tar.gz packages/fdb-headers-$(VERSION).tar.gz packages/fdb-bindings-$(VERSION).tar.gz packages/fdb-server-$(VERSION)-$(PLATFORM).tar.gz
|
||||
@rm -f packages/FoundationDB-$(PLATFORM)-*.tar.gz packages/foundationdb-binaries-$(VERSION)-$(PLATFORM).tar.gz packages/fdb-tests-$(VERSION).tar.gz packages/fdb-headers-$(VERSION).tar.gz packages/fdb-bindings-$(VERSION).tar.gz packages/fdb-server-$(VERSION)-$(PLATFORM).tar.gz
|
||||
|
||||
packages/foundationdb-binaries-$(VERSION)-$(PLATFORM).tar.gz: $(PROJECT_BINARIES) versions.target
|
||||
@echo "Packaging binaries"
|
||||
@mkdir -p packages
|
||||
@rm -f packages/foundationdb-binaries-$(VERSION)-$(PLATFORM).tar.gz
|
||||
@bash -c "tar -czf packages/foundationdb-binaries-$(VERSION)-$(PLATFORM).tar.gz $(PROJECT_BINARIES)"
|
||||
|
||||
packages/fdb-server-$(VERSION)-$(PLATFORM).tar.gz: bin/fdbserver bin/fdbcli lib/libfdb_c.$(DLEXT)
|
||||
@echo "Packaging fdb server api"
|
||||
|
|
|
@ -101,7 +101,7 @@ function(add_fdb_test)
|
|||
endif()
|
||||
list(TRANSFORM ADD_FDB_TEST_TEST_FILES PREPEND "${CMAKE_CURRENT_SOURCE_DIR}/")
|
||||
add_test(NAME ${test_name}
|
||||
COMMAND ${PYTHON_EXECUTABLE} ${TestRunner}
|
||||
COMMAND $<TARGET_FILE:Python::Interpreter> ${TestRunner}
|
||||
-n ${test_name}
|
||||
-b ${PROJECT_BINARY_DIR}
|
||||
-t ${test_type}
|
||||
|
|
|
@ -16,27 +16,6 @@ if(WIN32)
|
|||
"System.Data"
|
||||
"System.Xml")
|
||||
else()
|
||||
find_program(MONO_EXECUTABLE mono)
|
||||
find_program(MCS_EXECUTABLE dmcs)
|
||||
|
||||
if (NOT MCS_EXECUTABLE)
|
||||
find_program(MCS_EXECUTABLE mcs)
|
||||
endif()
|
||||
|
||||
set(MONO_FOUND FALSE CACHE INTERNAL "")
|
||||
|
||||
if (NOT MCS_EXECUTABLE)
|
||||
find_program(MCS_EXECUTABLE mcs)
|
||||
endif()
|
||||
|
||||
if (MONO_EXECUTABLE AND MCS_EXECUTABLE)
|
||||
set(MONO_FOUND True CACHE INTERNAL "")
|
||||
endif()
|
||||
|
||||
if (NOT MONO_FOUND)
|
||||
message(FATAL_ERROR "Could not find mono")
|
||||
endif()
|
||||
|
||||
set(ACTOR_COMPILER_REFERENCES
|
||||
"-r:System,System.Core,System.Xml.Linq,System.Data.DataSetExtensions,Microsoft.CSharp,System.Data,System.Xml")
|
||||
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
set(COVERAGETOOL_SRCS
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/flow/coveragetool/Program.cs
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/flow/coveragetool/Properties/AssemblyInfo.cs)
|
||||
if(WIN32)
|
||||
add_executable(coveragetool ${COVERAGETOOL_SRCS})
|
||||
target_compile_options(coveragetool PRIVATE "/langversion:6")
|
||||
set_property(TARGET coveragetool PROPERTY VS_DOTNET_REFERENCES
|
||||
"System"
|
||||
"ystem.Core"
|
||||
"System.Xml.Linq"
|
||||
"ystem.Data.DataSetExtensions"
|
||||
"Microsoft.CSharp"
|
||||
"ystem.Data"
|
||||
"System.Xml")
|
||||
else()
|
||||
set(COVERAGETOOL_COMPILER_REFERENCES
|
||||
"-r:System,System.Core,System.Xml.Linq,System.Data.DataSetExtensions,Microsoft.CSharp,System.Data,System.Xml")
|
||||
|
||||
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/coveragetool.exe
|
||||
COMMAND ${MCS_EXECUTABLE} ARGS ${COVERAGETOOL_COMPILER_REFERENCES} ${COVERAGETOOL_SRCS} "-target:exe" "-out:coveragetool.exe"
|
||||
DEPENDS ${COVERAGETOOL_SRCS}
|
||||
COMMENT "Compile coveragetool" VERBATIM)
|
||||
add_custom_target(coveragetool DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/coveragetool.exe)
|
||||
set(coveragetool_exe "${CMAKE_CURRENT_BINARY_DIR}/coveragetool.exe")
|
||||
endif()
|
|
@ -25,10 +25,14 @@ else()
|
|||
add_custom_target(vexillographer DEPENDS ${VEXILLOGRAPHER_EXE})
|
||||
endif()
|
||||
|
||||
macro(vexillographer_compile)
|
||||
function(vexillographer_compile)
|
||||
set(CX_OPTIONS ALL)
|
||||
set(CX_ONE_VALUE_ARGS TARGET LANG OUT)
|
||||
set(CX_MULTI_VALUE_ARGS OUTPUT)
|
||||
cmake_parse_arguments(VX "" "${CX_ONE_VALUE_ARGS}" "${CX_MULTI_VALUE_ARGS}" "${ARGN}")
|
||||
cmake_parse_arguments(VX "${CX_OPTIONS}" "${CX_ONE_VALUE_ARGS}" "${CX_MULTI_VALUE_ARGS}" "${ARGN}")
|
||||
if(NOT VX_OUTPUT)
|
||||
set(VX_OUTPUT ${VX_OUT})
|
||||
endif()
|
||||
if(WIN32)
|
||||
add_custom_command(
|
||||
OUTPUT ${VX_OUTPUT}
|
||||
|
@ -42,5 +46,9 @@ macro(vexillographer_compile)
|
|||
DEPENDS ${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options vexillographer
|
||||
COMMENT "Generate FDBOptions ${VX_LANG} files")
|
||||
endif()
|
||||
add_custom_target(${VX_TARGET} DEPENDS ${VX_OUTPUT})
|
||||
endmacro()
|
||||
if(VX_ALL)
|
||||
add_custom_target(${VX_TARGET} ALL DEPENDS ${VX_OUTPUT})
|
||||
else()
|
||||
add_custom_target(${VX_TARGET} DEPENDS ${VX_OUTPUT})
|
||||
endif()
|
||||
endfunction()
|
||||
|
|
|
@ -4,13 +4,9 @@ set(USE_VALGRIND OFF CACHE BOOL "Compile for valgrind usage")
|
|||
set(USE_GOLD_LINKER OFF CACHE BOOL "Use gold linker")
|
||||
set(ALLOC_INSTRUMENTATION OFF CACHE BOOL "Instrument alloc")
|
||||
set(WITH_UNDODB OFF CACHE BOOL "Use rr or undodb")
|
||||
set(OPEN_FOR_IDE OFF CACHE BOOL "Open this in an IDE (won't compile/link)")
|
||||
set(FDB_RELEASE OFF CACHE BOOL "This is a building of a final release")
|
||||
|
||||
add_compile_options(-DCMAKE_BUILD)
|
||||
if(WITH_TLS)
|
||||
add_compile_options(-DHAVE_OPENSSL)
|
||||
endif()
|
||||
|
||||
find_package(Threads REQUIRED)
|
||||
if(ALLOC_INSTRUMENTATION)
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
if(WIN32)
|
||||
# C# is currently only supported on Windows.
|
||||
# On other platforms we find mono manually
|
||||
enable_language(CSharp)
|
||||
else()
|
||||
# for other platforms we currently use mono
|
||||
find_program(MONO_EXECUTABLE mono)
|
||||
find_program(MCS_EXECUTABLE dmcs)
|
||||
|
||||
if (NOT MCS_EXECUTABLE)
|
||||
find_program(MCS_EXECUTABLE mcs)
|
||||
endif()
|
||||
|
||||
set(MONO_FOUND FALSE CACHE INTERNAL "")
|
||||
|
||||
if (NOT MCS_EXECUTABLE)
|
||||
find_program(MCS_EXECUTABLE mcs)
|
||||
endif()
|
||||
|
||||
if (MONO_EXECUTABLE AND MCS_EXECUTABLE)
|
||||
set(MONO_FOUND True CACHE INTERNAL "")
|
||||
endif()
|
||||
|
||||
if (NOT MONO_FOUND)
|
||||
message(FATAL_ERROR "Could not find mono")
|
||||
endif()
|
||||
endif()
|
|
@ -0,0 +1,105 @@
|
|||
set(FORCE_ALL_COMPONENTS OFF CACHE BOOL "Fails cmake if not all dependencies are found")
|
||||
|
||||
################################################################################
|
||||
# LibreSSL
|
||||
################################################################################
|
||||
|
||||
set(DISABLE_TLS OFF CACHE BOOL "Don't try to find LibreSSL and always build without TLS support")
|
||||
if(DISABLE_TLS)
|
||||
set(WITH_TLS OFF)
|
||||
else()
|
||||
set(LIBRESSL_USE_STATIC_LIBS TRUE)
|
||||
find_package(LibreSSL)
|
||||
if(LibreSSL_FOUND)
|
||||
set(WITH_TLS ON)
|
||||
add_compile_options(-DHAVE_OPENSSL)
|
||||
else()
|
||||
message(STATUS "LibreSSL NOT Found - Will compile without TLS Support")
|
||||
message(STATUS "You can set LibreSSL_ROOT to the LibreSSL install directory to help cmake find it")
|
||||
set(WITH_TLS OFF)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
# Java Bindings
|
||||
################################################################################
|
||||
|
||||
set(WITH_JAVA OFF)
|
||||
find_package(JNI 1.8 REQUIRED)
|
||||
find_package(Java 1.8 COMPONENTS Development)
|
||||
if(JNI_FOUND AND Java_FOUND AND Java_Development_FOUND)
|
||||
set(WITH_JAVA ON)
|
||||
include(UseJava)
|
||||
enable_language(Java)
|
||||
else()
|
||||
set(WITH_JAVA OFF)
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
# Python Bindings
|
||||
################################################################################
|
||||
|
||||
find_package(Python COMPONENTS Interpreter)
|
||||
if(Python_Interpreter_FOUND)
|
||||
set(WITH_PYTHON ON)
|
||||
else()
|
||||
message(FATAL_ERROR "Could not found a suitable python interpreter")
|
||||
set(WITH_PYTHON OFF)
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
# Pip
|
||||
################################################################################
|
||||
|
||||
find_package(Virtualenv)
|
||||
if (Virtualenv_FOUND)
|
||||
set(WITH_DOCUMENTATION ON)
|
||||
else()
|
||||
set(WITH_DOCUMENTATION OFF)
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
# GO
|
||||
################################################################################
|
||||
|
||||
find_program(GO_EXECUTABLE go)
|
||||
# building the go binaries is currently not supported on Windows
|
||||
if(GO_EXECUTABLE AND NOT WIN32)
|
||||
set(WITH_GO ON)
|
||||
else()
|
||||
set(WITH_GO OFF)
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
# Ruby
|
||||
################################################################################
|
||||
|
||||
find_program(GEM_EXECUTABLE gem)
|
||||
set(WITH_RUBY OFF)
|
||||
if(GEM_EXECUTABLE)
|
||||
set(GEM_COMMAND ${RUBY_EXECUTABLE} ${GEM_EXECUTABLE})
|
||||
set(WITH_RUBY ON)
|
||||
endif()
|
||||
|
||||
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/packages)
|
||||
add_custom_target(packages)
|
||||
|
||||
function(print_components)
|
||||
message(STATUS "=========================================")
|
||||
message(STATUS " Components Build Overview ")
|
||||
message(STATUS "=========================================")
|
||||
message(STATUS "Build Java Bindings: ${WITH_JAVA}")
|
||||
message(STATUS "Build with TLS support: ${WITH_TLS}")
|
||||
message(STATUS "Build Go bindings: ${WITH_GO}")
|
||||
message(STATUS "Build Ruby bindings: ${WITH_RUBY}")
|
||||
message(STATUS "Build Python sdist (make package): ${WITH_PYTHON}")
|
||||
message(STATUS "Build Documentation (make html): ${WITH_DOCUMENTATION}")
|
||||
message(STATUS "=========================================")
|
||||
endfunction()
|
||||
|
||||
if(FORCE_ALL_COMPONENTS)
|
||||
if(NOT WITH_JAVA OR NOT WITH_TLS OR NOT WITH_GO OR NOT WITH_RUBY OR NOT WITH_PYTHON OR NOT WITH_DOCUMENTATION)
|
||||
print_components()
|
||||
message(FATAL_ERROR "FORCE_ALL_COMPONENTS is set but not all dependencies could be found")
|
||||
endif()
|
||||
endif()
|
|
@ -0,0 +1,7 @@
|
|||
find_program(SPHINXBUILD
|
||||
sphinx-build
|
||||
DOC "Sphinx-build tool")
|
||||
|
||||
find_package_handle_standard_args(Sphinx
|
||||
FOUND_VAR SPHINX_FOUND
|
||||
REQUIRED_VARS SPHINXBUILD)
|
|
@ -0,0 +1,20 @@
|
|||
find_program(_VIRTUALENV_EXE virtualenv)
|
||||
|
||||
# get version and test that program actually works
|
||||
if(_VIRTUALENV_EXE)
|
||||
execute_process(
|
||||
COMMAND ${_VIRTUALENV_EXE} --version
|
||||
RESULT_VARIABLE ret_code
|
||||
OUTPUT_VARIABLE version_string
|
||||
ERROR_VARIABLE error_output
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
if(ret_code EQUAL 0 AND NOT ERROR_VARIABLE)
|
||||
# we found a working virtualenv
|
||||
set(VIRTUALENV_EXE ${_VIRTUALENV_EXE})
|
||||
set(VIRTUALENV_VERSION version_string)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
find_package_handle_standard_args(Virtualenv
|
||||
REQUIRED_VARS VIRTUALENV_EXE
|
||||
VERSION_VAR ${VIRTUALENV_VERSION})
|
|
@ -1,53 +1,159 @@
|
|||
macro(actor_set varname srcs)
|
||||
set(${varname})
|
||||
foreach(src ${srcs})
|
||||
set(tmp "${src}")
|
||||
if(${src} MATCHES ".*\\.h")
|
||||
continue()
|
||||
elseif(${src} MATCHES ".*\\.actor\\.cpp")
|
||||
string(REPLACE ".actor.cpp" ".actor.g.cpp" tmp ${src})
|
||||
set(tmp "${CMAKE_CURRENT_BINARY_DIR}/${tmp}")
|
||||
endif()
|
||||
set(${varname} "${${varname}};${tmp}")
|
||||
endforeach()
|
||||
endmacro()
|
||||
define_property(TARGET PROPERTY SOURCE_FILES
|
||||
BRIEF_DOCS "Source files a flow target is built off"
|
||||
FULL_DOCS "When compiling a flow target, this property contains a list of the non-generated source files. \
|
||||
This property is set by the add_flow_target function")
|
||||
|
||||
set(ACTOR_TARGET_COUNTER "0")
|
||||
macro(actor_compile target srcs)
|
||||
set(options DISABLE_ACTOR_WITHOUT_WAIT)
|
||||
set(oneValueArg)
|
||||
set(multiValueArgs)
|
||||
cmake_parse_arguments(ACTOR_COMPILE "${options}" "${oneValueArgs}" "${multiValueArgs}" "${ARGN}")
|
||||
set(_tmp_out "")
|
||||
foreach(src ${srcs})
|
||||
set(tmp "")
|
||||
if(${src} MATCHES ".*\\.actor\\.h")
|
||||
string(REPLACE ".actor.h" ".actor.g.h" tmp ${src})
|
||||
elseif(${src} MATCHES ".*\\.actor\\.cpp")
|
||||
string(REPLACE ".actor.cpp" ".actor.g.cpp" tmp ${src})
|
||||
define_property(TARGET PROPERTY COVERAGE_FILTERS
|
||||
BRIEF_DOCS "List of filters for the coverage tool"
|
||||
FULL_DOCS "Holds a list of regular expressions. All filenames matching any regular \
|
||||
expression in this list will be ignored when the coverage.target.xml file is \
|
||||
generated. This property is set through the add_flow_target function.")
|
||||
|
||||
function(generate_coverage_xml)
|
||||
if(NOT (${ARGC} EQUAL "1"))
|
||||
message(FATAL_ERROR "generate_coverage_xml expects one argument")
|
||||
endif()
|
||||
set(target_name ${ARGV0})
|
||||
get_target_property(sources ${target_name} SOURCE_FILES)
|
||||
get_target_property(filters ${target_name} COVERAGE_FILTER_OUT)
|
||||
foreach(src IN LISTS sources)
|
||||
set(include TRUE)
|
||||
foreach(f IN LISTS filters)
|
||||
if("${f}" MATCHES "${src}")
|
||||
set(include FALSE)
|
||||
endif()
|
||||
endforeach()
|
||||
if(include)
|
||||
list(APPEND in_files ${src})
|
||||
endif()
|
||||
set(actor_compiler_flags "")
|
||||
if(ACTOR_COMPILE_DISABLE_ACTOR_WITHOUT_WAIT)
|
||||
set(actor_compiler_flags "--disable-actor-without-wait-error")
|
||||
endforeach()
|
||||
set(target_file ${CMAKE_CURRENT_SOURCE_DIR}/coverage_target_${target_name})
|
||||
# we can't get the targets output dir through a generator expression as this would
|
||||
# create a cyclic dependency.
|
||||
# Instead we follow the following rules:
|
||||
# - For executable we place the coverage file into the directory EXECUTABLE_OUTPUT_PATH
|
||||
# - For static libraries we place it into the directory LIBRARY_OUTPUT_PATH
|
||||
# - For dynamic libraries we place it into LIBRARY_OUTPUT_PATH on Linux and MACOS
|
||||
# and to EXECUTABLE_OUTPUT_PATH on Windows
|
||||
get_target_property(type ${target_name} TYPE)
|
||||
# STATIC_LIBRARY, MODULE_LIBRARY, SHARED_LIBRARY, OBJECT_LIBRARY, INTERFACE_LIBRARY, EXECUTABLE
|
||||
if(type STREQUAL "STATIC_LIBRARY")
|
||||
set(target_file ${LIBRARY_OUTPUT_PATH}/coverage.${target_name}.xml)
|
||||
elseif(type STREQUAL "SHARED_LIBRARY")
|
||||
if(WIN32)
|
||||
set(target_file ${EXECUTABLE_OUTPUT_PATH}/coverage.${target_name}.xml)
|
||||
else()
|
||||
set(target_file ${LIBRARY_OUTPUT_PATH}/coverage.${target_name}.xml)
|
||||
endif()
|
||||
if(tmp)
|
||||
elseif(type STREQUAL "EXECUTABLE")
|
||||
set(target_file ${EXECUTABLE_OUTPUT_PATH}/coverage.${target_name}.xml)
|
||||
endif()
|
||||
if(WIN32)
|
||||
add_custom_command(
|
||||
OUTPUT ${target_file}
|
||||
COMMAND $<TARGET_FILE:coveragetool> ${target_file} ${in_files}
|
||||
DEPENDS ${in_files}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
COMMENT "Generate coverage xml")
|
||||
else()
|
||||
add_custom_command(
|
||||
OUTPUT ${target_file}
|
||||
COMMAND ${MONO_EXECUTABLE} ${coveragetool_exe} ${target_file} ${in_files}
|
||||
DEPENDS ${in_files}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
COMMENT "Generate coverage xml")
|
||||
endif()
|
||||
add_custom_target(coverage_${target_name} DEPENDS ${target_file})
|
||||
add_dependencies(coverage_${target_name} coveragetool)
|
||||
add_dependencies(${target_name} coverage_${target_name})
|
||||
endfunction()
|
||||
|
||||
function(add_flow_target)
|
||||
set(options EXECUTABLE STATIC_LIBRARY
|
||||
DYNAMIC_LIBRARY)
|
||||
set(oneValueArgs NAME)
|
||||
set(multiValueArgs SRCS COVERAGE_FILTER_OUT DISABLE_ACTOR_WITHOUT_WAIT_WARNING)
|
||||
cmake_parse_arguments(AFT "${options}" "${oneValueArgs}" "${multiValueArgs}" "${ARGN}")
|
||||
if(NOT AFT_NAME)
|
||||
message(FATAL_ERROR "add_flow_target requires option NAME")
|
||||
endif()
|
||||
if(NOT AFT_SRCS)
|
||||
message(FATAL_ERROR "No sources provided")
|
||||
endif()
|
||||
if(OPEN_FOR_IDE)
|
||||
set(sources ${AFT_SRCS} ${AFT_DISABLE_ACTOR_WRITHOUT_WAIT_WARNING})
|
||||
if(AFT_EXECUTABLE)
|
||||
set(target_type exec)
|
||||
add_executable(${AFT_NAME} ${sources})
|
||||
endif()
|
||||
if(AFT_STATIC_LIBRARY)
|
||||
if(target_type)
|
||||
message(FATAL_ERROR "add_flow_target can only be of one type")
|
||||
endif()
|
||||
add_library(${AFT_NAME} STATIC ${sources})
|
||||
endif()
|
||||
if(AFT_DYNAMIC_LIBRARY)
|
||||
if(target_type)
|
||||
message(FATAL_ERROR "add_flow_target can only be of one type")
|
||||
endif()
|
||||
add_library(${AFT_NAME} DYNAMIC ${sources})
|
||||
endif()
|
||||
else()
|
||||
foreach(src IN LISTS AFT_SRCS AFT_DISABLE_ACTOR_WITHOUT_WAIT_WARNING)
|
||||
if(${src} MATCHES ".*\\.actor\\.(h|cpp)")
|
||||
list(APPEND actors ${src})
|
||||
if(${src} MATCHES ".*\\.h")
|
||||
string(REPLACE ".actor.h" ".actor.g.h" generated ${src})
|
||||
else()
|
||||
string(REPLACE ".actor.cpp" ".actor.g.cpp" generated ${src})
|
||||
endif()
|
||||
set(actor_compiler_flags "")
|
||||
foreach(s IN LISTS AFT_DISABLE_ACTOR_WITHOUT_WAIT_WARNING)
|
||||
if("${s}" STREQUAL "${src}")
|
||||
set(actor_compiler_flags "--disable-actor-without-wait-warning")
|
||||
break()
|
||||
endif()
|
||||
endforeach()
|
||||
list(APPEND sources ${generated})
|
||||
list(APPEND generated_files ${CMAKE_CURRENT_BINARY_DIR}/${generated})
|
||||
if(WIN32)
|
||||
add_custom_command(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${tmp}"
|
||||
COMMAND $<TARGET_FILE:actorcompiler> "${CMAKE_CURRENT_SOURCE_DIR}/${src}" "${CMAKE_CURRENT_BINARY_DIR}/${tmp}" ${actor_compiler_flags}
|
||||
DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/${src}" actorcompiler ${actor_exe}
|
||||
add_custom_command(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${generated}"
|
||||
COMMAND $<TARGET_FILE:actorcompiler> "${CMAKE_CURRENT_SOURCE_DIR}/${src}" "${CMAKE_CURRENT_BINARY_DIR}/${generated}" ${actor_compiler_flags} ${actor_compiler_flags}
|
||||
DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/${src}" actorcompiler
|
||||
COMMENT "Compile actor: ${src}")
|
||||
else()
|
||||
add_custom_command(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${tmp}"
|
||||
COMMAND ${MONO_EXECUTABLE} ${actor_exe} "${CMAKE_CURRENT_SOURCE_DIR}/${src}" "${CMAKE_CURRENT_BINARY_DIR}/${tmp}" ${actor_compiler_flags} > /dev/null
|
||||
DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/${src}" actorcompiler ${actor_exe}
|
||||
add_custom_command(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${generated}"
|
||||
COMMAND ${MONO_EXECUTABLE} ${actor_exe} "${CMAKE_CURRENT_SOURCE_DIR}/${src}" "${CMAKE_CURRENT_BINARY_DIR}/${generated}" ${actor_compiler_flags} ${actor_compiler_flags} > /dev/null
|
||||
DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/${src}" actorcompiler
|
||||
COMMENT "Compile actor: ${src}")
|
||||
endif()
|
||||
set(_tmp_out "${_tmp_out};${CMAKE_CURRENT_BINARY_DIR}/${tmp}")
|
||||
else()
|
||||
list(APPEND sources ${src})
|
||||
endif()
|
||||
endforeach()
|
||||
MATH(EXPR ACTOR_TARGET_COUNTER "${ACTOR_TARGET_COUNTER}+1")
|
||||
add_custom_target(${target}_actors_${ACTOR_TARGET_COUNTER} DEPENDS ${_tmp_out})
|
||||
add_dependencies(${target} ${target}_actors_${ACTOR_TARGET_COUNTER})
|
||||
target_include_directories(${target} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
target_include_directories(${target} PUBLIC ${CMAKE_CURRENT_BINARY_DIR})
|
||||
endmacro()
|
||||
if(AFT_EXECUTABLE)
|
||||
set(target_type exec)
|
||||
add_executable(${AFT_NAME} ${sources})
|
||||
endif()
|
||||
if(AFT_STATIC_LIBRARY)
|
||||
if(target_type)
|
||||
message(FATAL_ERROR "add_flow_target can only be of one type")
|
||||
endif()
|
||||
add_library(${AFT_NAME} STATIC ${sources})
|
||||
endif()
|
||||
if(AFT_DYNAMIC_LIBRARY)
|
||||
if(target_type)
|
||||
message(FATAL_ERROR "add_flow_target can only be of one type")
|
||||
endif()
|
||||
add_library(${AFT_NAME} DYNAMIC ${sources})
|
||||
endif()
|
||||
|
||||
set_property(TARGET ${AFT_NAME} PROPERTY SOURCE_FILES ${AFT_SRCS})
|
||||
set_property(TARGET ${AFT_NAME} PROPERTY COVERAGE_FILTERS ${AFT_SRCS})
|
||||
|
||||
add_custom_target(${AFT_NAME}_actors DEPENDS ${generated_files})
|
||||
add_dependencies(${AFT_NAME} ${AFT_NAME}_actors)
|
||||
generate_coverage_xml(${AFT_NAME})
|
||||
endif()
|
||||
target_include_directories(${AFT_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
||||
endfunction()
|
||||
|
|
|
@ -1,3 +1,21 @@
|
|||
################################################################################
|
||||
# Helper Functions
|
||||
################################################################################
|
||||
|
||||
function(install_symlink)
|
||||
set(options "")
|
||||
set(one_value_options COMPONENT TO DESTINATION)
|
||||
set(multi_value_options)
|
||||
cmake_parse_arguments(SYM "${options}" "${one_value_options}" "${multi_value_options}" "${ARGN}")
|
||||
|
||||
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/symlinks)
|
||||
get_filename_component(fname ${SYM_DESTINATION} NAME)
|
||||
get_filename_component(dest_dir ${SYM_DESTINATION} DIRECTORY)
|
||||
set(sl ${CMAKE_CURRENT_BINARY_DIR}/symlinks/${fname})
|
||||
execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink ${SYM_TO} ${sl})
|
||||
install(FILES ${sl} DESTINATION ${dest_dir} COMPONENT ${SYM_COMPONENT})
|
||||
endfunction()
|
||||
|
||||
if(NOT INSTALL_LAYOUT)
|
||||
if(WIN32)
|
||||
set(DEFAULT_INSTALL_LAYOUT "WIN")
|
||||
|
@ -14,6 +32,9 @@ if(DIR_LAYOUT MATCHES "TARGZ")
|
|||
endif()
|
||||
|
||||
get_property(LIB64 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB64_PATHS)
|
||||
|
||||
set(CPACK_PACKAGE_CHECKSUM SHA256)
|
||||
|
||||
set(FDB_CONFIG_DIR "etc/foundationdb")
|
||||
if("${LIB64}" STREQUAL "TRUE")
|
||||
set(LIBSUFFIX 64)
|
||||
|
@ -54,8 +75,24 @@ elseif(DIR_LAYOUT MATCHES "OSX")
|
|||
set(FDB_PYTHON_INSTALL_DIR "Library/Python/2.7/site-packages/fdb")
|
||||
set(FDB_SHARE_DIR "usr/local/share")
|
||||
else()
|
||||
# DEB
|
||||
set(CPACK_GENERATOR "DEB")
|
||||
if(DIR_LAYOUT MATCHES "RPM")
|
||||
set(CPACK_GENERATOR RPM)
|
||||
else()
|
||||
# DEB
|
||||
set(CPACK_GENERATOR "DEB")
|
||||
set(LIBSUFFIX "")
|
||||
endif()
|
||||
set(CMAKE_INSTALL_PREFIX "/")
|
||||
set(CPACK_PACKAGING_INSTALL_PREFIX "/")
|
||||
set(FDB_CONFIG_DIR "etc/foundationdb")
|
||||
set(FDB_LIB_DIR "usr/lib${LIBSUFFIX}")
|
||||
set(FDB_LIB_NOSUFFIX "usr/lib")
|
||||
set(FDB_LIBEXEC_DIR ${FDB_LIB_DIR})
|
||||
set(FDB_BIN_DIR "usr/bin")
|
||||
set(FDB_SBIN_DIR "usr/sbin")
|
||||
set(FDB_INCLUDE_INSTALL_DIR "usr/include")
|
||||
set(FDB_PYTHON_INSTALL_DIR "${FDB_LIB_DIR}/python2.7/site-packages/fdb")
|
||||
set(FDB_SHARE_DIR "usr/share")
|
||||
endif()
|
||||
|
||||
if(INSTALL_LAYOUT MATCHES "OSX")
|
||||
|
@ -104,6 +141,12 @@ endif()
|
|||
################################################################################
|
||||
# Configuration for RPM
|
||||
################################################################################
|
||||
################################################################################
|
||||
|
||||
if(UNIX AND NOT APPLE)
|
||||
install(DIRECTORY DESTINATION "var/log/foundationdb" COMPONENT server)
|
||||
install(DIRECTORY DESTINATION "var/lib/foundationdb/data" COMPONENT server)
|
||||
endif()
|
||||
|
||||
if(INSTALL_LAYOUT MATCHES "RPM")
|
||||
set(CPACK_RPM_server_USER_FILELIST
|
||||
|
@ -122,7 +165,8 @@ if(INSTALL_LAYOUT MATCHES "RPM")
|
|||
"/lib/systemd"
|
||||
"/lib/systemd/system"
|
||||
"/etc/rc.d/init.d")
|
||||
set(CPACK_RPM_DEBUGINFO_PACKAGE ON)
|
||||
set(CPACK_RPM_server_DEBUGINFO_PACKAGE ON)
|
||||
set(CPACK_RPM_clients_DEBUGINFO_PACKAGE ON)
|
||||
set(CPACK_RPM_BUILD_SOURCE_DIRS_PREFIX /usr/src)
|
||||
set(CPACK_RPM_COMPONENT_INSTALL ON)
|
||||
set(CPACK_RPM_clients_PRE_INSTALL_SCRIPT_FILE
|
||||
|
@ -136,6 +180,11 @@ if(INSTALL_LAYOUT MATCHES "RPM")
|
|||
set(CPACK_RPM_server_PRE_UNINSTALL_SCRIPT_FILE
|
||||
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preunserver.sh)
|
||||
set(CPACK_RPM_server_PACKAGE_REQUIRES
|
||||
"foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}, initscripts >= 9.03")
|
||||
set(CPACK_RPM_server_PACKAGE_RE)
|
||||
#set(CPACK_RPM_java_PACKAGE_REQUIRES
|
||||
# "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
|
||||
set(CPACK_RPM_python_PACKAGE_REQUIRES
|
||||
"foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
|
||||
endif()
|
||||
|
||||
|
@ -148,12 +197,12 @@ if(INSTALL_LAYOUT MATCHES "DEB")
|
|||
set(CPACK_DEBIAN_PACKAGE_SECTION "database")
|
||||
set(CPACK_DEBIAN_ENABLE_COMPONENT_DEPENDS ON)
|
||||
|
||||
set(CPACK_DEBIAN_server_PACKAGE_DEPENDS "adduser, libc6 (>= 2.11), python (>= 2.6)")
|
||||
set(CPACK_DEBIAN_clients_PACKAGE_DEPENDS "adduser, libc6 (>= 2.11)")
|
||||
set(CPACK_DEBIAN_SERVER_PACKAGE_DEPENDS "adduser, libc6 (>= 2.12), python (>= 2.6), foundationdb-clients (= ${FDB_VERSION})")
|
||||
set(CPACK_DEBIAN_CLIENTS_PACKAGE_DEPENDS "adduser, libc6 (>= 2.12)")
|
||||
set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://www.foundationdb.org")
|
||||
set(CPACK_DEBIAN_clients_PACKAGE_CONTROL_EXTRA
|
||||
set(CPACK_DEBIAN_CLIENTS_PACKAGE_CONTROL_EXTRA
|
||||
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-clients/postinst)
|
||||
set(CPACK_DEBIAN_server_PACKAGE_CONTROL_EXTRA
|
||||
set(CPACK_DEBIAN_SERVER_PACKAGE_CONTROL_EXTRA
|
||||
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/conffiles
|
||||
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/preinst
|
||||
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/postinst
|
||||
|
@ -223,36 +272,18 @@ if((INSTALL_LAYOUT MATCHES "RPM") OR (INSTALL_LAYOUT MATCHES "DEB"))
|
|||
RESULT_VARIABLE IS_SYSTEMD
|
||||
OUTPUT_QUIET
|
||||
ERROR_QUIET)
|
||||
if(IS_SYSTEMD EQUAL "0")
|
||||
configure_file(${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb.service
|
||||
${CMAKE_BINARY_DIR}/packaging/rpm/foundationdb.service)
|
||||
install(FILES ${CMAKE_BINARY_DIR}/packaging/rpm/foundationdb.service
|
||||
DESTINATION "lib/systemd/system"
|
||||
install(FILES ${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb.service
|
||||
DESTINATION "lib/systemd/system"
|
||||
COMPONENT server)
|
||||
if(INSTALL_LAYOUT MATCHES "RPM")
|
||||
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb-init
|
||||
DESTINATION "etc/rc.d/init.d"
|
||||
RENAME "foundationdb"
|
||||
COMPONENT server)
|
||||
else()
|
||||
if(INSTALL_LAYOUT MATCHES "RPM")
|
||||
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb-init
|
||||
DESTINATION "etc/rc.d/init.d"
|
||||
RENAME "foundationdb"
|
||||
COMPONENT server)
|
||||
else()
|
||||
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/deb/foundationdb-init
|
||||
DESTINATION "etc/init.d"
|
||||
RENAME "foundationdb"
|
||||
COMPONENT server)
|
||||
endif()
|
||||
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/deb/foundationdb-init
|
||||
DESTINATION "etc/init.d"
|
||||
RENAME "foundationdb"
|
||||
COMPONENT server)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
# Helper Macros
|
||||
################################################################################
|
||||
|
||||
macro(install_symlink filepath sympath compondent)
|
||||
install(CODE "execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink ${filepath} ${sympath})" COMPONENT ${component})
|
||||
install(CODE "message(\"-- Created symlink: ${sympath} -> ${filepath}\")")
|
||||
endmacro()
|
||||
macro(install_mkdir dirname component)
|
||||
install(CODE "execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory ${dirname})" COMPONENT ${component})
|
||||
install(CODE "message(\"-- Created directory: ${dirname}\")")
|
||||
endmacro()
|
||||
|
|
|
@ -0,0 +1,68 @@
|
|||
# build a virtualenv
|
||||
set(sphinx_dir ${CMAKE_CURRENT_SOURCE_DIR}/sphinx)
|
||||
set(venv_dir ${CMAKE_CURRENT_BINARY_DIR}/venv)
|
||||
set(EXE_SUFFIX "")
|
||||
if(WIN32)
|
||||
set(EXE_SUFFIX ".exe")
|
||||
endif()
|
||||
set(pip_command ${venv_dir}/bin/pip${EXE_SUFFIX})
|
||||
set(python_command ${venv_dir}/bin/python${EXE_SUFFIX})
|
||||
|
||||
add_custom_command(OUTPUT ${venv_dir}/venv_setup
|
||||
COMMAND ${VIRTUALENV_EXE} venv &&
|
||||
${CMAKE_COMMAND} -E copy ${sphinx_dir}/.pip.conf ${venv_dir}/pip.conf &&
|
||||
. ${venv_dir}/bin/activate &&
|
||||
${pip_command} install --upgrade pip &&
|
||||
${pip_command} install --upgrade -r ${sphinx_dir}/requirements.txt &&
|
||||
${pip_command} install sphinx-autobuild && # somehow this is missing in requirements.txt
|
||||
${CMAKE_COMMAND} -E touch ${venv_dir}/venv_setup
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
COMMENT "Set up virtualenv")
|
||||
add_custom_target(buildsphinx DEPENDS ${venv_dir}/venv_setup)
|
||||
|
||||
file(GLOB_RECURSE SRCS *.rst)
|
||||
|
||||
function(add_documentation_target)
|
||||
set(options)
|
||||
set(oneValueArgs GENERATOR SPHINX_COMMAND DOCTREE)
|
||||
set(multiValueArgs ADDITIONAL_ARGUMENTS)
|
||||
cmake_parse_arguments(ADT "${options}" "${oneValueArgs}" "${multiValueArgs}" "${ARGN}")
|
||||
if(NOT ADT_GENERATOR)
|
||||
message(ERROR "GENERATOR is a required argument to add_documentation_target")
|
||||
endif()
|
||||
set(target ${ADT_GENERATOR})
|
||||
set(SPHINX_COMMAND "${venv_dir}/bin/sphinx-build")
|
||||
if(ADT_SPHINX_COMMAND)
|
||||
set(SPHINX_COMMAND "${venv_dir}/bin/${ADT_SPHINX_COMMAND}")
|
||||
endif()
|
||||
set(doctree "doctree")
|
||||
if (ADT_DOCTREE)
|
||||
set(doctree "${ADT_DOCTREE}")
|
||||
endif()
|
||||
set(out_dir ${CMAKE_CURRENT_BINARY_DIR}/${target})
|
||||
add_custom_command(
|
||||
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${target}_done
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory ${out_dir} &&
|
||||
${python_command} ${SPHINX_COMMAND} -b ${target}
|
||||
-d ${doctree} -c ${sphinx_dir}
|
||||
${sphinx_dir}/source
|
||||
${CMAKE_CURRENT_BINARY_DIR}/${target} &&
|
||||
${CMAKE_COMMAND} -E touch ${CMAKE_CURRENT_BINARY_DIR}/${target}_done
|
||||
DEPENDS ${SRCS}
|
||||
WORKING_DIRECTORY ${venv_dir})
|
||||
message(STATUS "add_custom_target(${target} DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${target}_done)")
|
||||
add_custom_target(${target} DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${target}_done)
|
||||
add_dependencies(${target} buildsphinx)
|
||||
endfunction()
|
||||
|
||||
message(STATUS "Add html target")
|
||||
add_documentation_target(GENERATOR html)
|
||||
|
||||
set(tar_file ${CMAKE_BINARY_DIR}/packages/${CMAKE_PROJECT_NAME}-docs-${FDB_VERSION}.tar.gz)
|
||||
add_custom_command(
|
||||
OUTPUT ${tar_file}
|
||||
COMMAND ${CMAKE_COMMAND} -E tar czf ${tar_file} .
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/html)
|
||||
add_custom_target(package_html DEPENDS ${tar_file})
|
||||
add_dependencies(package_html html)
|
||||
add_dependencies(packages package_html)
|
|
@ -8,6 +8,8 @@ Release Notes
|
|||
Features
|
||||
--------
|
||||
|
||||
* Get read version, read, and commit requests are counted and aggregated by server-side latency in configurable latency bands and output in JSON status. `(PR #1084) <https://github.com/apple/foundationdb/pull/1084>`_
|
||||
|
||||
Performance
|
||||
-----------
|
||||
|
||||
|
|
|
@ -1,25 +1,23 @@
|
|||
set(FDBBACKUP_SRCS
|
||||
backup.actor.cpp)
|
||||
|
||||
actor_set(FDBBACKUP_BUILD "${FDBBACKUP_SRCS}")
|
||||
add_executable(fdbbackup "${FDBBACKUP_BUILD}")
|
||||
actor_compile(fdbbackup "${FDBBACKUP_SRCS}")
|
||||
add_flow_target(EXECUTABLE NAME fdbbackup SRCS ${FDBBACKUP_SRCS})
|
||||
target_link_libraries(fdbbackup PRIVATE fdbclient)
|
||||
|
||||
install(TARGETS fdbbackup DESTINATION ${FDB_BIN_DIR} COMPONENT clients)
|
||||
install(PROGRAMS $<TARGET_FILE:fdbbackup>
|
||||
DESTINATION ${FDB_LIB_DIR}/foundationdb/backup_agent
|
||||
RENAME backup_agent
|
||||
install_symlink(
|
||||
TO /${FDB_BIN_DIR}/fdbbackup
|
||||
DESTINATION ${FDB_LIB_DIR}/foundationdb/backup_agent/backup_agent
|
||||
COMPONENT clients)
|
||||
install(PROGRAMS $<TARGET_FILE:fdbbackup>
|
||||
DESTINATION ${FDB_BIN_DIR}
|
||||
RENAME fdbrestore
|
||||
install_symlink(
|
||||
TO /${FDB_BIN_DIR}/fdbbackup
|
||||
DESTINATION ${FDB_BIN_DIR}/fdbrestore
|
||||
COMPONENT clients)
|
||||
install(PROGRAMS $<TARGET_FILE:fdbbackup>
|
||||
DESTINATION ${FDB_BIN_DIR}
|
||||
RENAME dr_agent
|
||||
install_symlink(
|
||||
TO /${FDB_BIN_DIR}/fdbbackup
|
||||
DESTINATION ${FDB_BIN_DIR}/dr_agent
|
||||
COMPONENT clients)
|
||||
install(PROGRAMS $<TARGET_FILE:fdbbackup>
|
||||
DESTINATION ${FDB_BIN_DIR}
|
||||
RENAME fdbdr
|
||||
install_symlink(
|
||||
TO /${FDB_BIN_DIR}/fdbbackup
|
||||
DESTINATION ${FDB_BIN_DIR}/fdbdr
|
||||
COMPONENT clients)
|
||||
|
|
|
@ -8,9 +8,7 @@ if(NOT WIN32)
|
|||
list(APPEND FDBCLI_SRCS linenoise/linenoise.c)
|
||||
endif()
|
||||
|
||||
actor_set(FDBCLI_BUILD "${FDBCLI_SRCS}")
|
||||
add_executable(fdbcli "${FDBCLI_BUILD}")
|
||||
actor_compile(fdbcli "${FDBCLI_SRCS}")
|
||||
add_flow_target(EXECUTABLE NAME fdbcli SRCS ${FDBCLI_SRCS})
|
||||
target_link_libraries(fdbcli PRIVATE fdbclient)
|
||||
|
||||
install(TARGETS fdbcli DESTINATION ${FDB_BIN_DIR} COMPONENT clients)
|
||||
|
|
|
@ -1682,7 +1682,7 @@ ACTOR Future<bool> fileConfigure(Database db, std::string filePath, bool isNewDa
|
|||
StatusObject configJSON = config.get_obj();
|
||||
|
||||
json_spirit::mValue schema;
|
||||
if(!json_spirit::read_string( JSONSchemas::configurationSchema.toString(), schema )) {
|
||||
if(!json_spirit::read_string( JSONSchemas::clusterConfigurationSchema.toString(), schema )) {
|
||||
ASSERT(false);
|
||||
}
|
||||
|
||||
|
|
|
@ -87,8 +87,6 @@ set(FDBCLIENT_SRCS
|
|||
vexillographer_compile(TARGET fdboptions LANG cpp OUT ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g
|
||||
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.h ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.cpp)
|
||||
|
||||
actor_set(FDBCLIENT_BUILD "${FDBCLIENT_SRCS}")
|
||||
add_library(fdbclient STATIC ${FDBCLIENT_BUILD})
|
||||
add_flow_target(STATIC_LIBRARY NAME fdbclient SRCS ${FDBCLIENT_SRCS})
|
||||
add_dependencies(fdbclient fdboptions)
|
||||
actor_compile(fdbclient "${FDBCLIENT_SRCS}")
|
||||
target_link_libraries(fdbclient PUBLIC fdbrpc)
|
||||
|
|
|
@ -287,6 +287,7 @@ ACTOR Future<ConfigurationResult::Type> changeConfig( Database cx, std::map<std:
|
|||
}
|
||||
|
||||
state Future<Void> tooLong = delay(4.5);
|
||||
state std::string versionKey = g_random->randomUniqueID().toString();
|
||||
loop {
|
||||
try {
|
||||
tr.setOption( FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE );
|
||||
|
@ -432,6 +433,9 @@ ACTOR Future<ConfigurationResult::Type> changeConfig( Database cx, std::map<std:
|
|||
for(auto i=m.begin(); i!=m.end(); ++i)
|
||||
tr.set( StringRef(i->first), StringRef(i->second) );
|
||||
|
||||
tr.addReadConflictRange( singleKeyRange(configVersionKey) );
|
||||
tr.set( configVersionKey, versionKey );
|
||||
|
||||
wait( tr.commit() );
|
||||
break;
|
||||
} catch (Error& e) {
|
||||
|
@ -698,6 +702,7 @@ ConfigureAutoResult parseConfig( StatusObject const& status ) {
|
|||
|
||||
ACTOR Future<ConfigurationResult::Type> autoConfig( Database cx, ConfigureAutoResult conf ) {
|
||||
state Transaction tr(cx);
|
||||
state std::string versionKey = g_random->randomUniqueID().toString();
|
||||
|
||||
if(!conf.address_class.size())
|
||||
return ConfigurationResult::INCOMPLETE_CONFIGURATION; //FIXME: correct return type
|
||||
|
@ -747,6 +752,9 @@ ACTOR Future<ConfigurationResult::Type> autoConfig( Database cx, ConfigureAutoRe
|
|||
tr.set(kv.first, kv.second);
|
||||
}
|
||||
|
||||
tr.addReadConflictRange( singleKeyRange(configVersionKey) );
|
||||
tr.set( configVersionKey, versionKey );
|
||||
|
||||
wait( tr.commit() );
|
||||
return ConfigurationResult::SUCCESS;
|
||||
} catch( Error &e ) {
|
||||
|
@ -1125,6 +1133,7 @@ Reference<IQuorumChange> autoQuorumChange( int desired ) { return Reference<IQuo
|
|||
ACTOR Future<Void> excludeServers( Database cx, vector<AddressExclusion> servers ) {
|
||||
state Transaction tr(cx);
|
||||
state std::string versionKey = g_random->randomUniqueID().toString();
|
||||
state std::string excludeVersionKey = g_random->randomUniqueID().toString();
|
||||
loop {
|
||||
try {
|
||||
tr.setOption( FDBTransactionOptions::ACCESS_SYSTEM_KEYS );
|
||||
|
@ -1132,7 +1141,9 @@ ACTOR Future<Void> excludeServers( Database cx, vector<AddressExclusion> servers
|
|||
tr.setOption( FDBTransactionOptions::LOCK_AWARE );
|
||||
|
||||
tr.addReadConflictRange( singleKeyRange(excludedServersVersionKey) ); //To conflict with parallel includeServers
|
||||
tr.set( excludedServersVersionKey, versionKey );
|
||||
tr.addReadConflictRange( singleKeyRange(configVersionKey) );
|
||||
tr.set( configVersionKey, versionKey );
|
||||
tr.set( excludedServersVersionKey, excludeVersionKey );
|
||||
for(auto& s : servers)
|
||||
tr.set( encodeExcludedServersKey(s), StringRef() );
|
||||
|
||||
|
@ -1150,6 +1161,7 @@ ACTOR Future<Void> includeServers( Database cx, vector<AddressExclusion> servers
|
|||
state bool includeAll = false;
|
||||
state Transaction tr(cx);
|
||||
state std::string versionKey = g_random->randomUniqueID().toString();
|
||||
state std::string excludeVersionKey = g_random->randomUniqueID().toString();
|
||||
loop {
|
||||
try {
|
||||
tr.setOption( FDBTransactionOptions::ACCESS_SYSTEM_KEYS );
|
||||
|
@ -1159,15 +1171,27 @@ ACTOR Future<Void> includeServers( Database cx, vector<AddressExclusion> servers
|
|||
// includeServers might be used in an emergency transaction, so make sure it is retry-self-conflicting and CAUSAL_WRITE_RISKY
|
||||
tr.setOption( FDBTransactionOptions::CAUSAL_WRITE_RISKY );
|
||||
tr.addReadConflictRange( singleKeyRange(excludedServersVersionKey) );
|
||||
tr.addReadConflictRange( singleKeyRange(configVersionKey) );
|
||||
|
||||
tr.set( configVersionKey, versionKey );
|
||||
tr.set( excludedServersVersionKey, excludeVersionKey );
|
||||
|
||||
tr.set( excludedServersVersionKey, versionKey );
|
||||
for(auto& s : servers ) {
|
||||
if (!s.isValid()) {
|
||||
tr.clear( excludedServersKeys );
|
||||
includeAll = true;
|
||||
} else if (s.isWholeMachine()) {
|
||||
// Eliminate both any ip-level exclusion (1.2.3.4) and any port-level exclusions (1.2.3.4:5)
|
||||
tr.clear( KeyRangeRef( encodeExcludedServersKey(s), encodeExcludedServersKey(s) + char(':'+1) ) );
|
||||
// Eliminate both any ip-level exclusion (1.2.3.4) and any
|
||||
// port-level exclusions (1.2.3.4:5)
|
||||
// The range ['IP', 'IP;'] was originally deleted. ';' is
|
||||
// char(':' + 1). This does not work, as other for all
|
||||
// x between 0 and 9, 'IPx' will also be in this range.
|
||||
//
|
||||
// This is why we now make two clears: first only of the ip
|
||||
// address, the second will delete all ports.
|
||||
auto addr = encodeExcludedServersKey(s);
|
||||
tr.clear(singleKeyRange(addr));
|
||||
tr.clear(KeyRangeRef(addr + ':', addr + char(':' + 1)));
|
||||
} else {
|
||||
tr.clear( encodeExcludedServersKey(s) );
|
||||
}
|
||||
|
@ -1564,120 +1588,121 @@ void schemaCoverage( std::string const& spath, bool covered ) {
|
|||
}
|
||||
}
|
||||
|
||||
bool schemaMatch( StatusObject const schema, StatusObject const result, std::string& errorStr, Severity sev, bool checkCoverage, std::string path, std::string schema_path ) {
|
||||
bool schemaMatch( json_spirit::mValue const& schemaValue, json_spirit::mValue const& resultValue, std::string& errorStr, Severity sev, bool checkCoverage, std::string path, std::string schemaPath ) {
|
||||
// Returns true if everything in `result` is permitted by `schema`
|
||||
|
||||
// Really this should recurse on "values" rather than "objects"?
|
||||
|
||||
bool ok = true;
|
||||
|
||||
try {
|
||||
for(auto& rkv : result) {
|
||||
auto& key = rkv.first;
|
||||
auto& rv = rkv.second;
|
||||
std::string kpath = path + "." + key;
|
||||
std::string spath = schema_path + "." + key;
|
||||
if(normJSONType(schemaValue.type()) != normJSONType(resultValue.type())) {
|
||||
errorStr += format("ERROR: Incorrect value type for key `%s'\n", path.c_str());
|
||||
TraceEvent(sev, "SchemaMismatch").detail("Path", path).detail("SchemaType", schemaValue.type()).detail("ValueType", resultValue.type());
|
||||
return false;
|
||||
}
|
||||
|
||||
if(checkCoverage) schemaCoverage(spath);
|
||||
if(resultValue.type() == json_spirit::obj_type) {
|
||||
auto& result = resultValue.get_obj();
|
||||
auto& schema = schemaValue.get_obj();
|
||||
|
||||
if (!schema.count(key)) {
|
||||
errorStr += format("ERROR: Unknown key `%s'\n", kpath.c_str());
|
||||
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaPath", spath);
|
||||
ok = false;
|
||||
continue;
|
||||
}
|
||||
auto& sv = schema.at(key);
|
||||
for(auto& rkv : result) {
|
||||
auto& key = rkv.first;
|
||||
auto& rv = rkv.second;
|
||||
std::string kpath = path + "." + key;
|
||||
std::string spath = schemaPath + "." + key;
|
||||
|
||||
if (sv.type() == json_spirit::obj_type && sv.get_obj().count("$enum")) {
|
||||
auto& enum_values = sv.get_obj().at("$enum").get_array();
|
||||
if(checkCoverage) {
|
||||
schemaCoverage(spath);
|
||||
}
|
||||
|
||||
bool any_match = false;
|
||||
for(auto& enum_item : enum_values)
|
||||
if (enum_item == rv) {
|
||||
any_match = true;
|
||||
if(checkCoverage) schemaCoverage(spath + ".$enum." + enum_item.get_str());
|
||||
break;
|
||||
if(!schema.count(key)) {
|
||||
errorStr += format("ERROR: Unknown key `%s'\n", kpath.c_str());
|
||||
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaPath", spath);
|
||||
ok = false;
|
||||
continue;
|
||||
}
|
||||
auto& sv = schema.at(key);
|
||||
|
||||
if(sv.type() == json_spirit::obj_type && sv.get_obj().count("$enum")) {
|
||||
auto& enum_values = sv.get_obj().at("$enum").get_array();
|
||||
|
||||
bool any_match = false;
|
||||
for(auto& enum_item : enum_values)
|
||||
if(enum_item == rv) {
|
||||
any_match = true;
|
||||
if(checkCoverage) {
|
||||
schemaCoverage(spath + ".$enum." + enum_item.get_str());
|
||||
}
|
||||
break;
|
||||
}
|
||||
if(!any_match) {
|
||||
errorStr += format("ERROR: Unknown value `%s' for key `%s'\n", json_spirit::write_string(rv).c_str(), kpath.c_str());
|
||||
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaEnumItems", enum_values.size()).detail("Value", json_spirit::write_string(rv));
|
||||
if(checkCoverage) {
|
||||
schemaCoverage(spath + ".$enum." + json_spirit::write_string(rv));
|
||||
}
|
||||
ok = false;
|
||||
}
|
||||
if (!any_match) {
|
||||
errorStr += format("ERROR: Unknown value `%s' for key `%s'\n", json_spirit::write_string(rv).c_str(), kpath.c_str());
|
||||
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaEnumItems", enum_values.size()).detail("Value", json_spirit::write_string(rv));
|
||||
if(checkCoverage) schemaCoverage(spath + ".$enum." + json_spirit::write_string(rv));
|
||||
ok = false;
|
||||
}
|
||||
} else if (sv.type() == json_spirit::obj_type && sv.get_obj().count("$map")) {
|
||||
if (rv.type() != json_spirit::obj_type) {
|
||||
errorStr += format("ERROR: Expected an object as the value for key `%s'\n", kpath.c_str());
|
||||
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaType", sv.type()).detail("ValueType", rv.type());
|
||||
ok = false;
|
||||
continue;
|
||||
}
|
||||
if(sv.get_obj().at("$map").type() != json_spirit::obj_type) {
|
||||
continue;
|
||||
}
|
||||
auto& schema_obj = sv.get_obj().at("$map").get_obj();
|
||||
auto& value_obj = rv.get_obj();
|
||||
|
||||
if(checkCoverage) schemaCoverage(spath + ".$map");
|
||||
|
||||
for(auto& value_pair : value_obj) {
|
||||
auto vpath = kpath + "[" + value_pair.first + "]";
|
||||
auto upath = spath + ".$map";
|
||||
if (value_pair.second.type() != json_spirit::obj_type) {
|
||||
errorStr += format("ERROR: Expected an object for `%s'\n", vpath.c_str());
|
||||
TraceEvent(sev, "SchemaMismatch").detail("Path", vpath).detail("ValueType", value_pair.second.type());
|
||||
} else if(sv.type() == json_spirit::obj_type && sv.get_obj().count("$map")) {
|
||||
if(rv.type() != json_spirit::obj_type) {
|
||||
errorStr += format("ERROR: Expected an object as the value for key `%s'\n", kpath.c_str());
|
||||
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaType", sv.type()).detail("ValueType", rv.type());
|
||||
ok = false;
|
||||
continue;
|
||||
}
|
||||
if (!schemaMatch(schema_obj, value_pair.second.get_obj(), errorStr, sev, checkCoverage, vpath, upath))
|
||||
ok = false;
|
||||
}
|
||||
} else {
|
||||
// The schema entry isn't an operator, so it asserts a type and (depending on the type) recursive schema definition
|
||||
if (normJSONType(sv.type()) != normJSONType(rv.type())) {
|
||||
errorStr += format("ERROR: Incorrect value type for key `%s'\n", kpath.c_str());
|
||||
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaType", sv.type()).detail("ValueType", rv.type());
|
||||
ok = false;
|
||||
continue;
|
||||
}
|
||||
if (rv.type() == json_spirit::array_type) {
|
||||
auto& value_array = rv.get_array();
|
||||
auto& schema_array = sv.get_array();
|
||||
if (!schema_array.size()) {
|
||||
// An empty schema array means that the value array is required to be empty
|
||||
if (value_array.size()) {
|
||||
errorStr += format("ERROR: Expected an empty array for key `%s'\n", kpath.c_str());
|
||||
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath).detail("SchemaSize", schema_array.size()).detail("ValueSize", value_array.size());
|
||||
if(sv.get_obj().at("$map").type() != json_spirit::obj_type) {
|
||||
continue;
|
||||
}
|
||||
auto& schemaVal = sv.get_obj().at("$map");
|
||||
auto& valueObj = rv.get_obj();
|
||||
|
||||
if(checkCoverage) {
|
||||
schemaCoverage(spath + ".$map");
|
||||
}
|
||||
|
||||
for(auto& valuePair : valueObj) {
|
||||
auto vpath = kpath + "[" + valuePair.first + "]";
|
||||
auto upath = spath + ".$map";
|
||||
if (valuePair.second.type() != json_spirit::obj_type) {
|
||||
errorStr += format("ERROR: Expected an object for `%s'\n", vpath.c_str());
|
||||
TraceEvent(sev, "SchemaMismatch").detail("Path", vpath).detail("ValueType", valuePair.second.type());
|
||||
ok = false;
|
||||
continue;
|
||||
}
|
||||
} else if (schema_array.size() == 1 && schema_array[0].type() == json_spirit::obj_type) {
|
||||
// A one item schema array means that all items in the value must match the first item in the schema
|
||||
auto& schema_obj = schema_array[0].get_obj();
|
||||
int index = 0;
|
||||
for(auto &value_item : value_array) {
|
||||
if (value_item.type() != json_spirit::obj_type) {
|
||||
errorStr += format("ERROR: Expected all array elements to be objects for key `%s'\n", kpath.c_str());
|
||||
TraceEvent(sev, "SchemaMismatch").detail("Path", kpath + format("[%d]",index)).detail("ValueType", value_item.type());
|
||||
ok = false;
|
||||
continue;
|
||||
}
|
||||
if (!schemaMatch(schema_obj, value_item.get_obj(), errorStr, sev, checkCoverage, kpath + format("[%d]", index), spath + "[0]"))
|
||||
ok = false;
|
||||
index++;
|
||||
if(!schemaMatch(schemaVal, valuePair.second, errorStr, sev, checkCoverage, vpath, upath)) {
|
||||
ok = false;
|
||||
}
|
||||
} else
|
||||
ASSERT(false); // Schema doesn't make sense
|
||||
} else if (rv.type() == json_spirit::obj_type) {
|
||||
auto& schema_obj = sv.get_obj();
|
||||
auto& value_obj = rv.get_obj();
|
||||
if (!schemaMatch(schema_obj, value_obj, errorStr, sev, checkCoverage, kpath, spath))
|
||||
}
|
||||
} else {
|
||||
if(!schemaMatch(sv, rv, errorStr, sev, checkCoverage, kpath, spath)) {
|
||||
ok = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if(resultValue.type() == json_spirit::array_type) {
|
||||
auto& valueArray = resultValue.get_array();
|
||||
auto& schemaArray = schemaValue.get_array();
|
||||
if(!schemaArray.size()) {
|
||||
// An empty schema array means that the value array is required to be empty
|
||||
if(valueArray.size()) {
|
||||
errorStr += format("ERROR: Expected an empty array for key `%s'\n", path.c_str());
|
||||
TraceEvent(sev, "SchemaMismatch").detail("Path", path).detail("SchemaSize", schemaArray.size()).detail("ValueSize", valueArray.size());
|
||||
return false;
|
||||
}
|
||||
} else if(schemaArray.size() == 1) {
|
||||
// A one item schema array means that all items in the value must match the first item in the schema
|
||||
int index = 0;
|
||||
for(auto &valueItem : valueArray) {
|
||||
if(!schemaMatch(schemaArray[0], valueItem, errorStr, sev, checkCoverage, path + format("[%d]", index), schemaPath + "[0]")) {
|
||||
ok = false;
|
||||
}
|
||||
index++;
|
||||
}
|
||||
} else {
|
||||
ASSERT(false); // Schema doesn't make sense
|
||||
}
|
||||
}
|
||||
return ok;
|
||||
} catch (std::exception& e) {
|
||||
TraceEvent(SevError, "SchemaMatchException").detail("What", e.what()).detail("Path", path).detail("SchemaPath", schema_path);
|
||||
TraceEvent(SevError, "SchemaMatchException").detail("What", e.what()).detail("Path", path).detail("SchemaPath", schemaPath);
|
||||
throw unknown_error();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -177,6 +177,6 @@ Future<Void> waitForPrimaryDC( Database const& cx, StringRef const& dcId );
|
|||
Future<std::vector<NetworkAddress>> getCoordinators( Database const& cx );
|
||||
|
||||
void schemaCoverage( std::string const& spath, bool covered=true );
|
||||
bool schemaMatch( StatusObject const schema, StatusObject const result, std::string& errorStr, Severity sev=SevError, bool checkCoverage=false, std::string path = std::string(), std::string schema_path = std::string() );
|
||||
bool schemaMatch( json_spirit::mValue const& schema, json_spirit::mValue const& result, std::string& errorStr, Severity sev=SevError, bool checkCoverage=false, std::string path = std::string(), std::string schema_path = std::string() );
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
|
||||
/*
|
||||
* MasterProxyInterface.h
|
||||
*
|
||||
|
@ -26,6 +27,8 @@
|
|||
#include "fdbclient/StorageServerInterface.h"
|
||||
#include "fdbclient/CommitTransaction.h"
|
||||
|
||||
#include "flow/Stats.h"
|
||||
|
||||
struct MasterProxyInterface {
|
||||
enum { LocationAwareLoadBalance = 1 };
|
||||
enum { AlwaysFresh = 1 };
|
||||
|
@ -74,7 +77,7 @@ struct CommitID {
|
|||
CommitID( Version version, uint16_t txnBatchId ) : version(version), txnBatchId(txnBatchId) {}
|
||||
};
|
||||
|
||||
struct CommitTransactionRequest {
|
||||
struct CommitTransactionRequest : TimedRequest {
|
||||
enum {
|
||||
FLAG_IS_LOCK_AWARE = 0x1,
|
||||
FLAG_FIRST_IN_BATCH = 0x2
|
||||
|
@ -120,7 +123,7 @@ struct GetReadVersionReply {
|
|||
}
|
||||
};
|
||||
|
||||
struct GetReadVersionRequest {
|
||||
struct GetReadVersionRequest : TimedRequest {
|
||||
enum {
|
||||
PRIORITY_SYSTEM_IMMEDIATE = 15 << 24, // Highest possible priority, always executed even if writes are otherwise blocked
|
||||
PRIORITY_DEFAULT = 8 << 24,
|
||||
|
|
|
@ -124,6 +124,15 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
|
|||
"hz":0.0,
|
||||
"counter":0,
|
||||
"roughness":0.0
|
||||
},
|
||||
"grv_latency_bands":{
|
||||
"$map": 1
|
||||
},
|
||||
"read_latency_bands":{
|
||||
"$map": 1
|
||||
},
|
||||
"commit_latency_bands":{
|
||||
"$map": 1
|
||||
}
|
||||
}
|
||||
],
|
||||
|
@ -604,7 +613,7 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
|
|||
}
|
||||
})statusSchema");
|
||||
|
||||
const KeyRef JSONSchemas::configurationSchema = LiteralStringRef(R"configSchema(
|
||||
const KeyRef JSONSchemas::clusterConfigurationSchema = LiteralStringRef(R"configSchema(
|
||||
{
|
||||
"create":{
|
||||
"$enum":[
|
||||
|
@ -671,3 +680,25 @@ const KeyRef JSONSchemas::configurationSchema = LiteralStringRef(R"configSchema(
|
|||
"auto_logs":3,
|
||||
"proxies":5
|
||||
})configSchema");
|
||||
|
||||
const KeyRef JSONSchemas::latencyBandConfigurationSchema = LiteralStringRef(R"configSchema(
|
||||
{
|
||||
"get_read_version":{
|
||||
"bands":[
|
||||
0.0
|
||||
]
|
||||
},
|
||||
"read":{
|
||||
"bands":[
|
||||
0.0
|
||||
],
|
||||
"max_key_selector_offset":0,
|
||||
"max_read_bytes":0
|
||||
},
|
||||
"commit":{
|
||||
"bands":[
|
||||
0.0
|
||||
],
|
||||
"max_commit_bytes":0
|
||||
}
|
||||
})configSchema");
|
||||
|
|
|
@ -28,7 +28,8 @@
|
|||
|
||||
struct JSONSchemas {
|
||||
static const KeyRef statusSchema;
|
||||
static const KeyRef configurationSchema;
|
||||
static const KeyRef clusterConfigurationSchema;
|
||||
static const KeyRef latencyBandConfigurationSchema;
|
||||
};
|
||||
|
||||
#endif /* FDBCLIENT_SCHEMAS_H */
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include "fdbrpc/QueueModel.h"
|
||||
#include "fdbrpc/fdbrpc.h"
|
||||
#include "fdbrpc/LoadBalance.actor.h"
|
||||
#include "flow/Stats.h"
|
||||
|
||||
struct StorageServerInterface {
|
||||
enum {
|
||||
|
@ -107,7 +108,7 @@ struct GetValueReply : public LoadBalancedReply {
|
|||
}
|
||||
};
|
||||
|
||||
struct GetValueRequest {
|
||||
struct GetValueRequest : TimedRequest {
|
||||
Key key;
|
||||
Version version;
|
||||
Optional<UID> debugID;
|
||||
|
@ -150,7 +151,7 @@ struct GetKeyValuesReply : public LoadBalancedReply {
|
|||
}
|
||||
};
|
||||
|
||||
struct GetKeyValuesRequest {
|
||||
struct GetKeyValuesRequest : TimedRequest {
|
||||
Arena arena;
|
||||
KeySelectorRef begin, end;
|
||||
Version version; // or latestVersion
|
||||
|
@ -178,7 +179,7 @@ struct GetKeyReply : public LoadBalancedReply {
|
|||
}
|
||||
};
|
||||
|
||||
struct GetKeyRequest {
|
||||
struct GetKeyRequest : TimedRequest {
|
||||
Arena arena;
|
||||
KeySelectorRef sel;
|
||||
Version version; // or latestVersion
|
||||
|
|
|
@ -381,6 +381,8 @@ std::string encodeExcludedServersKey( AddressExclusion const& addr ) {
|
|||
return excludedServersPrefix.toString() + as;
|
||||
}
|
||||
|
||||
const KeyRef configVersionKey = LiteralStringRef("\xff/conf/confChange");
|
||||
|
||||
const KeyRangeRef workerListKeys( LiteralStringRef("\xff/worker/"), LiteralStringRef("\xff/worker0") );
|
||||
const KeyRef workerListPrefix = workerListKeys.begin;
|
||||
|
||||
|
@ -434,6 +436,9 @@ const KeyRangeRef fdbClientInfoPrefixRange(LiteralStringRef("\xff\x02/fdbClientI
|
|||
const KeyRef fdbClientInfoTxnSampleRate = LiteralStringRef("\xff\x02/fdbClientInfo/client_txn_sample_rate/");
|
||||
const KeyRef fdbClientInfoTxnSizeLimit = LiteralStringRef("\xff\x02/fdbClientInfo/client_txn_size_limit/");
|
||||
|
||||
// Request latency measurement key
|
||||
const KeyRef latencyBandConfigKey = LiteralStringRef("\xff\x02/latencyBandConfig");
|
||||
|
||||
// Keyspace to maintain wall clock to version map
|
||||
const KeyRangeRef timeKeeperPrefixRange(LiteralStringRef("\xff\x02/timeKeeper/map/"), LiteralStringRef("\xff\x02/timeKeeper/map0"));
|
||||
const KeyRef timeKeeperVersionKey = LiteralStringRef("\xff\x02/timeKeeper/version");
|
||||
|
|
|
@ -133,6 +133,11 @@ extern const KeyRef excludedServersVersionKey; // The value of this key shall b
|
|||
const AddressExclusion decodeExcludedServersKey( KeyRef const& key ); // where key.startsWith(excludedServersPrefix)
|
||||
std::string encodeExcludedServersKey( AddressExclusion const& );
|
||||
|
||||
// "\xff/conf/confChange" := ""
|
||||
// This is the key representing the version of the configuration, which should be updated for each
|
||||
// new configuration.
|
||||
extern const KeyRef configVersionKey;
|
||||
|
||||
// "\xff/workers/[[processID]]" := ""
|
||||
// Asynchronously updated by the cluster controller, this is a list of fdbserver processes that have joined the cluster
|
||||
// and are currently (recently) available
|
||||
|
@ -212,6 +217,9 @@ extern const KeyRangeRef fdbClientInfoPrefixRange;
|
|||
extern const KeyRef fdbClientInfoTxnSampleRate;
|
||||
extern const KeyRef fdbClientInfoTxnSizeLimit;
|
||||
|
||||
// Request latency measurement key
|
||||
extern const KeyRef latencyBandConfigKey;
|
||||
|
||||
// Keyspace to maintain wall clock to version map
|
||||
extern const KeyRangeRef timeKeeperPrefixRange;
|
||||
extern const KeyRef timeKeeperVersionKey;
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
set(FDBRPC_SRCS
|
||||
ActorFuzz.actor.cpp
|
||||
AsyncFileCached.actor.h
|
||||
AsyncFileEIO.actor.h
|
||||
AsyncFileKAIO.actor.h
|
||||
|
@ -11,9 +10,7 @@ set(FDBRPC_SRCS
|
|||
AsyncFileWriteChecker.cpp
|
||||
batcher.actor.h
|
||||
crc32c.cpp
|
||||
dsltest.actor.cpp
|
||||
FailureMonitor.actor.cpp
|
||||
FlowTests.actor.cpp
|
||||
FlowTransport.actor.cpp
|
||||
genericactors.actor.h
|
||||
genericactors.actor.cpp
|
||||
|
@ -55,8 +52,13 @@ if(NOT WIN32)
|
|||
list(APPEND FDBRPC_SRCS libcoroutine/context.c libeio/eio.c)
|
||||
endif()
|
||||
|
||||
actor_set(FDBRPC_BUILD "${FDBRPC_SRCS}")
|
||||
add_library(fdbrpc STATIC ${FDBRPC_BUILD})
|
||||
actor_compile(fdbrpc "${FDBRPC_SRCS}")
|
||||
set(FDBRPC_SRCS_DISABLE_ACTOR_WITHOUT_WAIT_WARNING
|
||||
ActorFuzz.actor.cpp
|
||||
FlowTests.actor.cpp
|
||||
dsltest.actor.cpp)
|
||||
|
||||
add_flow_target(STATIC_LIBRARY NAME fdbrpc
|
||||
SRCS ${FDBRPC_SRCS}
|
||||
DISABLE_ACTOR_WITHOUT_WAIT_WARNING ${FDBRPC_SRCS_DISABLE_ACTOR_WITHOUT_WAIT_WARNING})
|
||||
target_include_directories(fdbrpc PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/libeio)
|
||||
target_link_libraries(fdbrpc PUBLIC flow)
|
||||
|
|
|
@ -148,6 +148,27 @@ ProcessClass::Fitness ProcessClass::machineClassFitness( ClusterRole role ) cons
|
|||
default:
|
||||
return ProcessClass::WorstFit;
|
||||
}
|
||||
case ProcessClass::DataDistributor:
|
||||
switch( _class ) {
|
||||
case ProcessClass::DataDistributorClass:
|
||||
return ProcessClass::BestFit;
|
||||
case ProcessClass::StatelessClass:
|
||||
return ProcessClass::GoodFit;
|
||||
case ProcessClass::MasterClass:
|
||||
return ProcessClass::OkayFit;
|
||||
case ProcessClass::ResolutionClass:
|
||||
return ProcessClass::OkayFit;
|
||||
case ProcessClass::TransactionClass:
|
||||
return ProcessClass::OkayFit;
|
||||
case ProcessClass::ProxyClass:
|
||||
return ProcessClass::OkayFit;
|
||||
case ProcessClass::UnsetClass:
|
||||
return ProcessClass::UnsetFit;
|
||||
case ProcessClass::TesterClass:
|
||||
return ProcessClass::NeverAssign;
|
||||
default:
|
||||
return ProcessClass::WorstFit;
|
||||
}
|
||||
default:
|
||||
return ProcessClass::NeverAssign;
|
||||
}
|
||||
|
|
|
@ -26,9 +26,9 @@
|
|||
|
||||
struct ProcessClass {
|
||||
// This enum is stored in restartInfo.ini for upgrade tests, so be very careful about changing the existing items!
|
||||
enum ClassType { UnsetClass, StorageClass, TransactionClass, ResolutionClass, TesterClass, ProxyClass, MasterClass, StatelessClass, LogClass, ClusterControllerClass, LogRouterClass, InvalidClass = -1 };
|
||||
enum ClassType { UnsetClass, StorageClass, TransactionClass, ResolutionClass, TesterClass, ProxyClass, MasterClass, StatelessClass, LogClass, ClusterControllerClass, LogRouterClass, DataDistributorClass, InvalidClass = -1 };
|
||||
enum Fitness { BestFit, GoodFit, UnsetFit, OkayFit, WorstFit, ExcludeFit, NeverAssign }; //cannot be larger than 7 because of leader election mask
|
||||
enum ClusterRole { Storage, TLog, Proxy, Master, Resolver, LogRouter, ClusterController, NoRole };
|
||||
enum ClusterRole { Storage, TLog, Proxy, Master, Resolver, LogRouter, ClusterController, DataDistributor, NoRole };
|
||||
enum ClassSource { CommandLineSource, AutoSource, DBSource, InvalidSource = -1 };
|
||||
int16_t _class;
|
||||
int16_t _source;
|
||||
|
@ -48,6 +48,7 @@ public:
|
|||
else if (s=="log") _class = LogClass;
|
||||
else if (s=="router") _class = LogRouterClass;
|
||||
else if (s=="cluster_controller") _class = ClusterControllerClass;
|
||||
else if (s=="data_distributor") _class = DataDistributorClass;
|
||||
else _class = InvalidClass;
|
||||
}
|
||||
|
||||
|
@ -63,6 +64,7 @@ public:
|
|||
else if (classStr=="log") _class = LogClass;
|
||||
else if (classStr=="router") _class = LogRouterClass;
|
||||
else if (classStr=="cluster_controller") _class = ClusterControllerClass;
|
||||
else if (classStr=="data_distributor") _class = DataDistributorClass;
|
||||
else _class = InvalidClass;
|
||||
|
||||
if (sourceStr=="command_line") _source = CommandLineSource;
|
||||
|
@ -93,6 +95,7 @@ public:
|
|||
case LogClass: return "log";
|
||||
case LogRouterClass: return "router";
|
||||
case ClusterControllerClass: return "cluster_controller";
|
||||
case DataDistributorClass: return "data_distributor";
|
||||
default: return "invalid";
|
||||
}
|
||||
}
|
||||
|
|
|
@ -98,6 +98,7 @@ public:
|
|||
case ProcessClass::LogClass: return true;
|
||||
case ProcessClass::LogRouterClass: return false;
|
||||
case ProcessClass::ClusterControllerClass: return false;
|
||||
case ProcessClass::DataDistributorClass: return false;
|
||||
default: return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -149,7 +149,7 @@ static void applyMetadataMutations(UID const& dbgid, Arena &arena, VectorRef<Mut
|
|||
}
|
||||
else if (m.param1.startsWith(configKeysPrefix) || m.param1 == coordinatorsKey) {
|
||||
if(Optional<StringRef>(m.param2) != txnStateStore->readValue(m.param1).get().castTo<StringRef>()) { // FIXME: Make this check more specific, here or by reading configuration whenever there is a change
|
||||
if(!m.param1.startsWith( excludedServersPrefix ) && m.param1 != excludedServersVersionKey) {
|
||||
if(!m.param1.startsWith( excludedServersPrefix ) && m.param1 != excludedServersVersionKey && m.param1 != configVersionKey) {
|
||||
auto t = txnStateStore->readValue(m.param1).get();
|
||||
TraceEvent("MutationRequiresRestart", dbgid).detail("M", m.toString()).detail("PrevValue", t.present() ? printable(t.get()) : "(none)").detail("ToCommit", toCommit!=NULL);
|
||||
if(confChange) *confChange = true;
|
||||
|
|
|
@ -13,6 +13,7 @@ set(FDBSERVER_SRCS
|
|||
DataDistribution.h
|
||||
DataDistributionQueue.actor.cpp
|
||||
DataDistributionTracker.actor.cpp
|
||||
DataDistributorInterface.h
|
||||
DBCoreState.h
|
||||
DiskQueue.actor.cpp
|
||||
fdbserver.actor.cpp
|
||||
|
@ -27,6 +28,8 @@ set(FDBSERVER_SRCS
|
|||
KeyValueStoreSQLite.actor.cpp
|
||||
Knobs.cpp
|
||||
Knobs.h
|
||||
LatencyBandConfig.cpp
|
||||
LatencyBandConfig.h
|
||||
LeaderElection.actor.cpp
|
||||
LeaderElection.h
|
||||
LogProtocolMessage.h
|
||||
|
@ -173,9 +176,7 @@ set(FDBSERVER_SRCS
|
|||
|
||||
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/workloads)
|
||||
|
||||
actor_set(FDBSERVER_BUILD "${FDBSERVER_SRCS}")
|
||||
add_executable(fdbserver ${FDBSERVER_BUILD})
|
||||
actor_compile(fdbserver "${FDBSERVER_SRCS}")
|
||||
add_flow_target(EXECUTABLE NAME fdbserver SRCS ${FDBSERVER_SRCS})
|
||||
target_include_directories(fdbserver PRIVATE
|
||||
${CMAKE_CURRENT_BINARY_DIR}/workloads
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/workloads)
|
||||
|
|
|
@ -22,14 +22,17 @@
|
|||
#include "flow/ActorCollection.h"
|
||||
#include "fdbclient/NativeAPI.h"
|
||||
#include "fdbserver/CoordinationInterface.h"
|
||||
#include "fdbserver/DataDistributorInterface.h"
|
||||
#include "fdbserver/Knobs.h"
|
||||
#include "fdbserver/MoveKeys.h"
|
||||
#include "fdbserver/WorkerInterface.h"
|
||||
#include "fdbserver/LeaderElection.h"
|
||||
#include "fdbserver/LogSystemConfig.h"
|
||||
#include "fdbserver/WaitFailure.h"
|
||||
#include "fdbserver/ClusterRecruitmentInterface.h"
|
||||
#include "fdbserver/ServerDBInfo.h"
|
||||
#include "fdbserver/Status.h"
|
||||
#include "fdbserver/LatencyBandConfig.h"
|
||||
#include <algorithm>
|
||||
#include "fdbclient/DatabaseContext.h"
|
||||
#include "fdbserver/RecoveryState.h"
|
||||
|
@ -105,7 +108,20 @@ public:
|
|||
serverInfo( new AsyncVar<ServerDBInfo>( ServerDBInfo() ) ),
|
||||
db( DatabaseContext::create( clientInfo, Future<Void>(), LocalityData(), true, TaskDefaultEndpoint, true ) ) // SOMEDAY: Locality!
|
||||
{
|
||||
}
|
||||
|
||||
void setDistributor(const DataDistributorInterface& distributorInterf) {
|
||||
ServerDBInfo newInfo = serverInfo->get();
|
||||
newInfo.id = g_random->randomUniqueID();
|
||||
newInfo.distributor = distributorInterf;
|
||||
serverInfo->set( newInfo );
|
||||
}
|
||||
|
||||
void clearDistributor() {
|
||||
ServerDBInfo newInfo = serverInfo->get();
|
||||
newInfo.id = g_random->randomUniqueID();
|
||||
newInfo.distributor = Optional<DataDistributorInterface>();
|
||||
serverInfo->set( newInfo );
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -502,12 +518,19 @@ public:
|
|||
return result;
|
||||
}
|
||||
|
||||
void updateKnownIds(std::map< Optional<Standalone<StringRef>>, int>* id_used) {
|
||||
(*id_used)[masterProcessId]++;
|
||||
(*id_used)[clusterControllerProcessId]++;
|
||||
if (db.serverInfo->get().distributor.present()) {
|
||||
(*id_used)[db.serverInfo->get().distributor.get().locality.processId()]++;
|
||||
}
|
||||
}
|
||||
|
||||
RecruitRemoteFromConfigurationReply findRemoteWorkersForConfiguration( RecruitRemoteFromConfigurationRequest const& req ) {
|
||||
RecruitRemoteFromConfigurationReply result;
|
||||
std::map< Optional<Standalone<StringRef>>, int> id_used;
|
||||
|
||||
id_used[masterProcessId]++;
|
||||
id_used[clusterControllerProcessId]++;
|
||||
updateKnownIds(&id_used);
|
||||
|
||||
std::set<Optional<Key>> remoteDC;
|
||||
remoteDC.insert(req.dcId);
|
||||
|
@ -545,8 +568,7 @@ public:
|
|||
ErrorOr<RecruitFromConfigurationReply> findWorkersForConfiguration( RecruitFromConfigurationRequest const& req, Optional<Key> dcId ) {
|
||||
RecruitFromConfigurationReply result;
|
||||
std::map< Optional<Standalone<StringRef>>, int> id_used;
|
||||
id_used[masterProcessId]++;
|
||||
id_used[clusterControllerProcessId]++;
|
||||
updateKnownIds(&id_used);
|
||||
|
||||
ASSERT(dcId.present());
|
||||
|
||||
|
@ -674,9 +696,7 @@ public:
|
|||
} else {
|
||||
RecruitFromConfigurationReply result;
|
||||
std::map< Optional<Standalone<StringRef>>, int> id_used;
|
||||
id_used[masterProcessId]++;
|
||||
id_used[clusterControllerProcessId]++;
|
||||
|
||||
updateKnownIds(&id_used);
|
||||
auto tlogs = getWorkersForTlogs( req.configuration, req.configuration.tLogReplicationFactor, req.configuration.getDesiredLogs(), req.configuration.tLogPolicy, id_used );
|
||||
for(int i = 0; i < tlogs.size(); i++) {
|
||||
result.tLogs.push_back(tlogs[i].first);
|
||||
|
@ -898,6 +918,9 @@ public:
|
|||
|
||||
std::map< Optional<Standalone<StringRef>>, int> id_used;
|
||||
id_used[clusterControllerProcessId]++;
|
||||
if (db.serverInfo->get().distributor.present()) {
|
||||
id_used[db.serverInfo->get().distributor.get().locality.processId()]++;
|
||||
}
|
||||
WorkerFitnessInfo mworker = getWorkerForRoleInDatacenter(clusterControllerDcId, ProcessClass::Master, ProcessClass::NeverAssign, db.config, id_used, true);
|
||||
|
||||
if ( oldMasterFit < mworker.fitness )
|
||||
|
@ -991,8 +1014,31 @@ public:
|
|||
return false;
|
||||
}
|
||||
|
||||
std::map< Optional<Standalone<StringRef>>, int> getUsedIds() {
|
||||
std::map<Optional<Standalone<StringRef>>, int> idUsed;
|
||||
updateKnownIds(&idUsed);
|
||||
|
||||
auto dbInfo = db.serverInfo->get();
|
||||
for (const auto& tlogset : dbInfo.logSystemConfig.tLogs) {
|
||||
for (const auto& tlog: tlogset.tLogs) {
|
||||
if (tlog.present()) {
|
||||
idUsed[tlog.interf().locality.processId()]++;
|
||||
}
|
||||
}
|
||||
}
|
||||
for (const MasterProxyInterface& interf : dbInfo.client.proxies) {
|
||||
ASSERT(interf.locality.processId().present());
|
||||
idUsed[interf.locality.processId()]++;
|
||||
}
|
||||
for (const ResolverInterface& interf: dbInfo.resolvers) {
|
||||
ASSERT(interf.locality.processId().present());
|
||||
idUsed[interf.locality.processId()]++;
|
||||
}
|
||||
return idUsed;
|
||||
}
|
||||
|
||||
std::map< Optional<Standalone<StringRef>>, WorkerInfo > id_worker;
|
||||
std::map< Optional<Standalone<StringRef>>, ProcessClass > id_class; //contains the mapping from process id to process class from the database
|
||||
std::map< Optional<Standalone<StringRef>>, ProcessClass > id_class; //contains the mapping from process id to process class from the database
|
||||
Standalone<RangeResultRef> lastProcessClasses;
|
||||
bool gotProcessClasses;
|
||||
bool gotFullyRecoveredConfig;
|
||||
|
@ -1016,6 +1062,7 @@ public:
|
|||
Optional<double> remoteStartTime;
|
||||
Version datacenterVersionDifference;
|
||||
bool versionDifferenceUpdated;
|
||||
PromiseStream<Future<Void>> addActor;
|
||||
|
||||
ClusterControllerData( ClusterControllerFullInterface const& ccInterface, LocalityData const& locality )
|
||||
: id(ccInterface.id()), ac(false), outstandingRequestChecker(Void()), gotProcessClasses(false), gotFullyRecoveredConfig(false), startTime(now()), datacenterVersionDifference(0), versionDifferenceUpdated(false)
|
||||
|
@ -1035,14 +1082,6 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
template <class K, class T>
|
||||
vector<T> values( std::map<K,T> const& map ) {
|
||||
vector<T> t;
|
||||
for(auto i = map.begin(); i!=map.end(); ++i)
|
||||
t.push_back(i->second);
|
||||
return t;
|
||||
}
|
||||
|
||||
ACTOR Future<Void> clusterWatchDatabase( ClusterControllerData* cluster, ClusterControllerData::DBInfo* db )
|
||||
{
|
||||
state MasterInterface iMaster;
|
||||
|
@ -1064,6 +1103,9 @@ ACTOR Future<Void> clusterWatchDatabase( ClusterControllerData* cluster, Cluster
|
|||
//This should always be possible, because we can recruit the master on the same process as the cluster controller.
|
||||
std::map< Optional<Standalone<StringRef>>, int> id_used;
|
||||
id_used[cluster->clusterControllerProcessId]++;
|
||||
if (cluster->db.serverInfo->get().distributor.present()) {
|
||||
id_used[cluster->db.serverInfo->get().distributor.get().locality.processId()]++;
|
||||
}
|
||||
state WorkerFitnessInfo masterWorker = cluster->getWorkerForRoleInDatacenter(cluster->clusterControllerDcId, ProcessClass::Master, ProcessClass::NeverAssign, db->config, id_used);
|
||||
if( ( masterWorker.worker.second.machineClassFitness( ProcessClass::Master ) > SERVER_KNOBS->EXPECTED_MASTER_FITNESS || masterWorker.worker.first.locality.processId() == cluster->clusterControllerProcessId )
|
||||
&& now() - cluster->startTime < SERVER_KNOBS->WAIT_FOR_GOOD_RECRUITMENT_DELAY ) {
|
||||
|
@ -1099,6 +1141,7 @@ ACTOR Future<Void> clusterWatchDatabase( ClusterControllerData* cluster, Cluster
|
|||
dbInfo.masterLifetime = db->serverInfo->get().masterLifetime;
|
||||
++dbInfo.masterLifetime;
|
||||
dbInfo.clusterInterface = db->serverInfo->get().clusterInterface;
|
||||
dbInfo.distributor = db->serverInfo->get().distributor;
|
||||
|
||||
TraceEvent("CCWDB", cluster->id).detail("Lifetime", dbInfo.masterLifetime.toString()).detail("ChangeID", dbInfo.id);
|
||||
db->serverInfo->set( dbInfo );
|
||||
|
@ -1704,6 +1747,11 @@ void registerWorker( RegisterWorkerRequest req, ClusterControllerData *self ) {
|
|||
}
|
||||
}
|
||||
|
||||
if ( req.distributorInterf.present() && !self->db.serverInfo->get().distributor.present() ) {
|
||||
const DataDistributorInterface& di = req.distributorInterf.get();
|
||||
TraceEvent("ClusterController_RegisterDataDistributor", self->id).detail("DDID", di.id());
|
||||
self->db.setDistributor( di );
|
||||
}
|
||||
if( info == self->id_worker.end() ) {
|
||||
self->id_worker[w.locality.processId()] = WorkerInfo( workerAvailabilityWatch( w, newProcessClass, self ), req.reply, req.generation, w, req.initialClass, newProcessClass, newPriorityInfo );
|
||||
checkOutstandingRequests( self );
|
||||
|
@ -1968,6 +2016,43 @@ ACTOR Future<Void> monitorProcessClasses(ClusterControllerData *self) {
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> monitorServerInfoConfig(ClusterControllerData::DBInfo* db) {
|
||||
loop {
|
||||
state ReadYourWritesTransaction tr(db->db);
|
||||
loop {
|
||||
try {
|
||||
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
||||
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||
tr.setOption(FDBTransactionOptions::READ_LOCK_AWARE);
|
||||
|
||||
Optional<Value> configVal = wait(tr.get(latencyBandConfigKey));
|
||||
Optional<LatencyBandConfig> config;
|
||||
if(configVal.present()) {
|
||||
config = LatencyBandConfig::parse(configVal.get());
|
||||
}
|
||||
|
||||
ServerDBInfo serverInfo = db->serverInfo->get();
|
||||
if(config != serverInfo.latencyBandConfig) {
|
||||
TraceEvent("LatencyBandConfigChanged").detail("Present", config.present());
|
||||
serverInfo.id = g_random->randomUniqueID();
|
||||
serverInfo.latencyBandConfig = config;
|
||||
db->serverInfo->set(serverInfo);
|
||||
}
|
||||
|
||||
state Future<Void> configChangeFuture = tr.watch(latencyBandConfigKey);
|
||||
|
||||
wait(tr.commit());
|
||||
wait(configChangeFuture);
|
||||
|
||||
break;
|
||||
}
|
||||
catch (Error &e) {
|
||||
wait(tr.onError(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> monitorClientTxnInfoConfigs(ClusterControllerData::DBInfo* db) {
|
||||
loop {
|
||||
state ReadYourWritesTransaction tr(db->db);
|
||||
|
@ -2177,24 +2262,85 @@ ACTOR Future<Void> updateDatacenterVersionDifference( ClusterControllerData *sel
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<DataDistributorInterface> startDataDistributor( ClusterControllerData *self ) {
|
||||
state Optional<Key> dcId = self->clusterControllerDcId;
|
||||
while ( !self->clusterControllerProcessId.present() || !self->masterProcessId.present() ) {
|
||||
wait( delay(SERVER_KNOBS->WAIT_FOR_GOOD_RECRUITMENT_DELAY) );
|
||||
}
|
||||
|
||||
loop {
|
||||
try {
|
||||
while ( self->db.serverInfo->get().recoveryState < RecoveryState::ACCEPTING_COMMITS ) {
|
||||
wait( self->db.serverInfo->onChange() );
|
||||
}
|
||||
|
||||
std::map<Optional<Standalone<StringRef>>, int> id_used = self->getUsedIds();
|
||||
state WorkerFitnessInfo data_distributor = self->getWorkerForRoleInDatacenter(dcId, ProcessClass::DataDistributor, ProcessClass::NeverAssign, self->db.config, id_used);
|
||||
state InitializeDataDistributorRequest req;
|
||||
req.reqId = g_random->randomUniqueID();
|
||||
TraceEvent("ClusterController_DataDistributorRecruit", req.reqId).detail("Addr", data_distributor.worker.first.address());
|
||||
|
||||
ErrorOr<DataDistributorInterface> distributor = wait( data_distributor.worker.first.dataDistributor.getReplyUnlessFailedFor(req, SERVER_KNOBS->WAIT_FOR_DISTRIBUTOR_JOIN_DELAY, 0) );
|
||||
if (distributor.present()) {
|
||||
TraceEvent("ClusterController_DataDistributorRecruited", req.reqId).detail("Addr", data_distributor.worker.first.address());
|
||||
return distributor.get();
|
||||
}
|
||||
}
|
||||
catch (Error& e) {
|
||||
TraceEvent("ClusterController_DataDistributorRecruitError", req.reqId).error(e);
|
||||
if ( e.code() != error_code_no_more_servers ) {
|
||||
throw;
|
||||
}
|
||||
}
|
||||
wait( delay(SERVER_KNOBS->ATTEMPT_RECRUITMENT_DELAY) );
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> waitDDRejoinOrStartDD( ClusterControllerData *self, ClusterControllerFullInterface *clusterInterface ) {
|
||||
state Future<Void> initialDelay = delay(SERVER_KNOBS->WAIT_FOR_DISTRIBUTOR_JOIN_DELAY);
|
||||
|
||||
// wait for a while to see if existing data distributor will join.
|
||||
loop choose {
|
||||
when ( wait(initialDelay) ) { break; }
|
||||
when ( wait(self->db.serverInfo->onChange()) ) { // Rejoins via worker registration
|
||||
if ( self->db.serverInfo->get().distributor.present() ) {
|
||||
TraceEvent("ClusterController_InfoChange", self->id)
|
||||
.detail("DataDistributorID", self->db.serverInfo->get().distributor.get().id());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
loop {
|
||||
if ( self->db.serverInfo->get().distributor.present() ) {
|
||||
wait( waitFailureClient( self->db.serverInfo->get().distributor.get().waitFailure, SERVER_KNOBS->DD_FAILURE_TIME ) );
|
||||
TraceEvent("ClusterController", self->id)
|
||||
.detail("DataDistributorDied", self->db.serverInfo->get().distributor.get().id());
|
||||
self->db.clearDistributor();
|
||||
} else {
|
||||
DataDistributorInterface distributorInterf = wait( startDataDistributor(self) );
|
||||
self->db.setDistributor( distributorInterf );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> clusterControllerCore( ClusterControllerFullInterface interf, Future<Void> leaderFail, ServerCoordinators coordinators, LocalityData locality ) {
|
||||
state ClusterControllerData self( interf, locality );
|
||||
state Future<Void> coordinationPingDelay = delay( SERVER_KNOBS->WORKER_COORDINATION_PING_DELAY );
|
||||
state uint64_t step = 0;
|
||||
state PromiseStream<Future<Void>> addActor;
|
||||
state Future<ErrorOr<Void>> error = errorOr( actorCollection( addActor.getFuture() ) );
|
||||
state Future<ErrorOr<Void>> error = errorOr( actorCollection( self.addActor.getFuture() ) );
|
||||
|
||||
auto pSelf = &self;
|
||||
addActor.send( failureDetectionServer( self.id, &self.db, interf.clientInterface.failureMonitoring.getFuture() ) );
|
||||
addActor.send( clusterWatchDatabase( &self, &self.db ) ); // Start the master database
|
||||
addActor.send( self.updateWorkerList.init( self.db.db ) );
|
||||
addActor.send( statusServer( interf.clientInterface.databaseStatus.getFuture(), &self, coordinators));
|
||||
addActor.send( timeKeeper(&self) );
|
||||
addActor.send( monitorProcessClasses(&self) );
|
||||
addActor.send( monitorClientTxnInfoConfigs(&self.db) );
|
||||
addActor.send( updatedChangingDatacenters(&self) );
|
||||
addActor.send( updatedChangedDatacenters(&self) );
|
||||
addActor.send( updateDatacenterVersionDifference(&self) );
|
||||
self.addActor.send( failureDetectionServer( self.id, &self.db, interf.clientInterface.failureMonitoring.getFuture() ) );
|
||||
self.addActor.send( clusterWatchDatabase( &self, &self.db ) ); // Start the master database
|
||||
self.addActor.send( self.updateWorkerList.init( self.db.db ) );
|
||||
self.addActor.send( statusServer( interf.clientInterface.databaseStatus.getFuture(), &self, coordinators));
|
||||
self.addActor.send( timeKeeper(&self) );
|
||||
self.addActor.send( monitorProcessClasses(&self) );
|
||||
self.addActor.send( monitorClientTxnInfoConfigs(&self.db) );
|
||||
self.addActor.send( updatedChangingDatacenters(&self) );
|
||||
self.addActor.send( updatedChangedDatacenters(&self) );
|
||||
self.addActor.send( updateDatacenterVersionDifference(&self) );
|
||||
self.addActor.send( waitDDRejoinOrStartDD(&self, &interf) );
|
||||
//printf("%s: I am the cluster controller\n", g_network->getLocalAddress().toString().c_str());
|
||||
|
||||
loop choose {
|
||||
|
@ -2210,13 +2356,13 @@ ACTOR Future<Void> clusterControllerCore( ClusterControllerFullInterface interf,
|
|||
return Void();
|
||||
}
|
||||
when( OpenDatabaseRequest req = waitNext( interf.clientInterface.openDatabase.getFuture() ) ) {
|
||||
addActor.send( clusterOpenDatabase( &self.db, req.knownClientInfoID, req.issues.toString(), req.supportedVersions, req.traceLogGroup, req.reply ) );
|
||||
self.addActor.send( clusterOpenDatabase( &self.db, req.knownClientInfoID, req.issues.toString(), req.supportedVersions, req.traceLogGroup, req.reply ) );
|
||||
}
|
||||
when( RecruitFromConfigurationRequest req = waitNext( interf.recruitFromConfiguration.getFuture() ) ) {
|
||||
addActor.send( clusterRecruitFromConfiguration( &self, req ) );
|
||||
self.addActor.send( clusterRecruitFromConfiguration( &self, req ) );
|
||||
}
|
||||
when( RecruitRemoteFromConfigurationRequest req = waitNext( interf.recruitRemoteFromConfiguration.getFuture() ) ) {
|
||||
addActor.send( clusterRecruitRemoteFromConfiguration( &self, req ) );
|
||||
self.addActor.send( clusterRecruitRemoteFromConfiguration( &self, req ) );
|
||||
}
|
||||
when( RecruitStorageRequest req = waitNext( interf.recruitStorage.getFuture() ) ) {
|
||||
clusterRecruitStorage( &self, req );
|
||||
|
@ -2271,7 +2417,7 @@ ACTOR Future<Void> clusterControllerCore( ClusterControllerFullInterface interf,
|
|||
clusterRegisterMaster( &self, req );
|
||||
}
|
||||
when( GetServerDBInfoRequest req = waitNext( interf.getServerDBInfo.getFuture() ) ) {
|
||||
addActor.send( clusterGetServerInfo( &self.db, req.knownServerInfoID, req.issues.toString(), req.incompatiblePeers, req.reply ) );
|
||||
self.addActor.send( clusterGetServerInfo( &self.db, req.knownServerInfoID, req.issues.toString(), req.incompatiblePeers, req.reply ) );
|
||||
}
|
||||
when( wait( leaderFail ) ) {
|
||||
// We are no longer the leader if this has changed.
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include "fdbclient/StorageServerInterface.h"
|
||||
#include "fdbclient/MasterProxyInterface.h"
|
||||
#include "fdbclient/DatabaseConfiguration.h"
|
||||
#include "fdbserver/DataDistributorInterface.h"
|
||||
#include "fdbserver/MasterInterface.h"
|
||||
#include "fdbserver/RecoveryState.h"
|
||||
#include "fdbserver/TLogInterface.h"
|
||||
|
@ -166,15 +167,16 @@ struct RegisterWorkerRequest {
|
|||
ProcessClass processClass;
|
||||
ClusterControllerPriorityInfo priorityInfo;
|
||||
Generation generation;
|
||||
Optional<DataDistributorInterface> distributorInterf;
|
||||
ReplyPromise<RegisterWorkerReply> reply;
|
||||
|
||||
RegisterWorkerRequest() : priorityInfo(ProcessClass::UnsetFit, false, ClusterControllerPriorityInfo::FitnessUnknown) {}
|
||||
RegisterWorkerRequest(WorkerInterface wi, ProcessClass initialClass, ProcessClass processClass, ClusterControllerPriorityInfo priorityInfo, Generation generation) :
|
||||
wi(wi), initialClass(initialClass), processClass(processClass), priorityInfo(priorityInfo), generation(generation) {}
|
||||
RegisterWorkerRequest(WorkerInterface wi, ProcessClass initialClass, ProcessClass processClass, ClusterControllerPriorityInfo priorityInfo, Generation generation, Optional<DataDistributorInterface> ddInterf) :
|
||||
wi(wi), initialClass(initialClass), processClass(processClass), priorityInfo(priorityInfo), generation(generation), distributorInterf(ddInterf) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
serializer(ar, wi, initialClass, processClass, priorityInfo, generation, reply);
|
||||
serializer(ar, wi, initialClass, processClass, priorityInfo, generation, distributorInterf, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -200,17 +200,6 @@ struct InitialDataDistribution : ReferenceCounted<InitialDataDistribution> {
|
|||
vector<DDShardInfo> shards;
|
||||
};
|
||||
|
||||
Future<Void> dataDistribution(
|
||||
Reference<AsyncVar<struct ServerDBInfo>> const& db,
|
||||
MasterInterface const& mi, DatabaseConfiguration const& configuration,
|
||||
PromiseStream< std::pair<UID, Optional<StorageServerInterface>> > const& serverChanges,
|
||||
Reference<ILogSystem> const& logSystem,
|
||||
Version const& recoveryCommitVersion,
|
||||
std::vector<Optional<Key>> const& primaryDcId,
|
||||
std::vector<Optional<Key>> const& remoteDcIds,
|
||||
double* const& lastLimited,
|
||||
Future<Void> const& remoteRecovered);
|
||||
|
||||
Future<Void> dataDistributionTracker(
|
||||
Reference<InitialDataDistribution> const& initData,
|
||||
Database const& cx,
|
||||
|
@ -220,7 +209,7 @@ Future<Void> dataDistributionTracker(
|
|||
FutureStream<Promise<int64_t>> const& getAverageShardBytes,
|
||||
Promise<Void> const& readyToStart,
|
||||
Reference<AsyncVar<bool>> const& zeroHealthyTeams,
|
||||
UID const& masterId);
|
||||
UID const& distributorId);
|
||||
|
||||
Future<Void> dataDistributionQueue(
|
||||
Database const& cx,
|
||||
|
@ -232,10 +221,9 @@ Future<Void> dataDistributionQueue(
|
|||
Reference<ShardsAffectedByTeamFailure> const& shardsAffectedByTeamFailure,
|
||||
MoveKeysLock const& lock,
|
||||
PromiseStream<Promise<int64_t>> const& getAverageShardBytes,
|
||||
MasterInterface const& mi,
|
||||
UID const& distributorId,
|
||||
int const& teamSize,
|
||||
double* const& lastLimited,
|
||||
Version const& recoveryVersion);
|
||||
double* const& lastLimited);
|
||||
|
||||
//Holds the permitted size and IO Bounds for a shard
|
||||
struct ShardSizeBounds {
|
||||
|
|
|
@ -331,10 +331,9 @@ void complete( RelocateData const& relocation, std::map<UID, Busyness> & busymap
|
|||
Future<Void> dataDistributionRelocator( struct DDQueueData* const& self, RelocateData const& rd );
|
||||
|
||||
struct DDQueueData {
|
||||
MasterInterface mi;
|
||||
UID distributorId;
|
||||
MoveKeysLock lock;
|
||||
Database cx;
|
||||
Version recoveryVersion;
|
||||
|
||||
std::vector<TeamCollectionInterface> teamCollections;
|
||||
Reference<ShardsAffectedByTeamFailure> shardsAffectedByTeamFailure;
|
||||
|
@ -394,13 +393,13 @@ struct DDQueueData {
|
|||
priority_relocations[priority]--;
|
||||
}
|
||||
|
||||
DDQueueData( MasterInterface mi, MoveKeysLock lock, Database cx, std::vector<TeamCollectionInterface> teamCollections,
|
||||
DDQueueData( UID mid, MoveKeysLock lock, Database cx, std::vector<TeamCollectionInterface> teamCollections,
|
||||
Reference<ShardsAffectedByTeamFailure> sABTF, PromiseStream<Promise<int64_t>> getAverageShardBytes,
|
||||
int teamSize, PromiseStream<RelocateShard> output, FutureStream<RelocateShard> input, PromiseStream<GetMetricsRequest> getShardMetrics, double* lastLimited, Version recoveryVersion ) :
|
||||
int teamSize, PromiseStream<RelocateShard> output, FutureStream<RelocateShard> input, PromiseStream<GetMetricsRequest> getShardMetrics, double* lastLimited ) :
|
||||
activeRelocations( 0 ), queuedRelocations( 0 ), bytesWritten ( 0 ), teamCollections( teamCollections ),
|
||||
shardsAffectedByTeamFailure( sABTF ), getAverageShardBytes( getAverageShardBytes ), mi( mi ), lock( lock ),
|
||||
shardsAffectedByTeamFailure( sABTF ), getAverageShardBytes( getAverageShardBytes ), distributorId( mid ), lock( lock ),
|
||||
cx( cx ), teamSize( teamSize ), output( output ), input( input ), getShardMetrics( getShardMetrics ), startMoveKeysParallelismLock( SERVER_KNOBS->DD_MOVE_KEYS_PARALLELISM ),
|
||||
finishMoveKeysParallelismLock( SERVER_KNOBS->DD_MOVE_KEYS_PARALLELISM ), lastLimited(lastLimited), recoveryVersion(recoveryVersion),
|
||||
finishMoveKeysParallelismLock( SERVER_KNOBS->DD_MOVE_KEYS_PARALLELISM ), lastLimited(lastLimited),
|
||||
suppressIntervals(0), lastInterval(0), unhealthyRelocations(0), rawProcessingUnhealthy( new AsyncVar<bool>(false) ) {}
|
||||
|
||||
void validate() {
|
||||
|
@ -506,7 +505,7 @@ struct DDQueueData {
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> getSourceServersForRange( Database cx, MasterInterface mi, RelocateData input, PromiseStream<RelocateData> output ) {
|
||||
ACTOR Future<Void> getSourceServersForRange( Database cx, RelocateData input, PromiseStream<RelocateData> output ) {
|
||||
state std::set<UID> servers;
|
||||
state Transaction tr(cx);
|
||||
|
||||
|
@ -637,14 +636,14 @@ struct DDQueueData {
|
|||
rrs.keys = affectedQueuedItems[r];
|
||||
|
||||
rrs.interval = TraceInterval("QueuedRelocation");
|
||||
/*TraceEvent(rrs.interval.begin(), mi.id());
|
||||
/*TraceEvent(rrs.interval.begin(), distributorId);
|
||||
.detail("KeyBegin", printable(rrs.keys.begin)).detail("KeyEnd", printable(rrs.keys.end))
|
||||
.detail("Priority", rrs.priority).detail("WantsNewServers", rrs.wantsNewServers);*/
|
||||
queuedRelocations++;
|
||||
startRelocation(rrs.priority);
|
||||
|
||||
fetchingSourcesQueue.insert( rrs );
|
||||
getSourceActors.insert( rrs.keys, getSourceServersForRange( cx, mi, rrs, fetchSourceServersComplete ) );
|
||||
getSourceActors.insert( rrs.keys, getSourceServersForRange( cx, rrs, fetchSourceServersComplete ) );
|
||||
} else {
|
||||
RelocateData newData( rrs );
|
||||
newData.keys = affectedQueuedItems[r];
|
||||
|
@ -657,7 +656,7 @@ struct DDQueueData {
|
|||
if( serverQueue.erase(rrs) > 0 ) {
|
||||
if( !foundActiveRelocation ) {
|
||||
newData.interval = TraceInterval("QueuedRelocation");
|
||||
/*TraceEvent(newData.interval.begin(), mi.id());
|
||||
/*TraceEvent(newData.interval.begin(), distributorId);
|
||||
.detail("KeyBegin", printable(newData.keys.begin)).detail("KeyEnd", printable(newData.keys.end))
|
||||
.detail("Priority", newData.priority).detail("WantsNewServers", newData.wantsNewServers);*/
|
||||
queuedRelocations++;
|
||||
|
@ -677,14 +676,14 @@ struct DDQueueData {
|
|||
}
|
||||
}
|
||||
|
||||
/*TraceEvent("ReceivedRelocateShard", mi.id())
|
||||
/*TraceEvent("ReceivedRelocateShard", distributorId)
|
||||
.detail("KeyBegin", printable(rd.keys.begin))
|
||||
.detail("KeyEnd", printable(rd.keys.end))
|
||||
.detail("Priority", rd.priority)
|
||||
.detail("AffectedRanges", affectedQueuedItems.size()); */
|
||||
}
|
||||
|
||||
void completeSourceFetch( RelocateData results ) {
|
||||
void completeSourceFetch( const RelocateData& results ) {
|
||||
ASSERT( fetchingSourcesQueue.count( results ) );
|
||||
|
||||
//logRelocation( results, "GotSourceServers" );
|
||||
|
@ -696,12 +695,12 @@ struct DDQueueData {
|
|||
}
|
||||
}
|
||||
|
||||
void logRelocation( RelocateData rd, const char *title ) {
|
||||
void logRelocation( const RelocateData& rd, const char *title ) {
|
||||
std::string busyString;
|
||||
for(int i = 0; i < rd.src.size() && i < teamSize * 2; i++)
|
||||
busyString += describe(rd.src[i]) + " - (" + busymap[ rd.src[i] ].toString() + "); ";
|
||||
|
||||
TraceEvent(title, mi.id())
|
||||
TraceEvent(title, distributorId)
|
||||
.detail("KeyBegin", printable(rd.keys.begin))
|
||||
.detail("KeyEnd", printable(rd.keys.end))
|
||||
.detail("Priority", rd.priority)
|
||||
|
@ -759,7 +758,7 @@ struct DDQueueData {
|
|||
!rd.keys.contains( it->range() ) &&
|
||||
it->value().priority >= rd.priority &&
|
||||
rd.priority < PRIORITY_TEAM_UNHEALTHY ) {
|
||||
/*TraceEvent("OverlappingInFlight", mi.id())
|
||||
/*TraceEvent("OverlappingInFlight", distributorId)
|
||||
.detail("KeyBegin", printable(it->value().keys.begin))
|
||||
.detail("KeyEnd", printable(it->value().keys.end))
|
||||
.detail("Priority", it->value().priority); */
|
||||
|
@ -792,7 +791,7 @@ struct DDQueueData {
|
|||
|
||||
//logRelocation( rd, "LaunchingRelocation" );
|
||||
|
||||
//TraceEvent(rd.interval.end(), mi.id()).detail("Result","Success");
|
||||
//TraceEvent(rd.interval.end(), distributorId).detail("Result","Success");
|
||||
queuedRelocations--;
|
||||
finishRelocation(rd.priority);
|
||||
|
||||
|
@ -832,7 +831,7 @@ struct DDQueueData {
|
|||
TraceEvent(SevWarnAlways, "LaunchingQueueSlowx1000").detail("Elapsed", now() - startTime );
|
||||
|
||||
/*if( startedHere > 0 ) {
|
||||
TraceEvent("StartedDDRelocators", mi.id())
|
||||
TraceEvent("StartedDDRelocators", distributorId)
|
||||
.detail("QueueSize", queuedRelocations)
|
||||
.detail("StartedHere", startedHere)
|
||||
.detail("ActiveRelocations", activeRelocations);
|
||||
|
@ -853,7 +852,7 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
|||
state PromiseStream<RelocateData> dataTransferComplete( self->dataTransferComplete );
|
||||
state PromiseStream<RelocateData> relocationComplete( self->relocationComplete );
|
||||
state bool signalledTransferComplete = false;
|
||||
state UID masterId = self->mi.id();
|
||||
state UID distributorId = self->distributorId;
|
||||
state ParallelTCInfo healthyDestinations;
|
||||
|
||||
state bool anyHealthy = false;
|
||||
|
@ -867,7 +866,7 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
|||
self->suppressIntervals++;
|
||||
}
|
||||
|
||||
TraceEvent(relocateShardInterval.begin(), masterId)
|
||||
TraceEvent(relocateShardInterval.begin(), distributorId)
|
||||
.detail("KeyBegin", printable(rd.keys.begin)).detail("KeyEnd", printable(rd.keys.end))
|
||||
.detail("Priority", rd.priority).detail("RelocationID", relocateShardInterval.pairID).detail("SuppressedEventCount", self->suppressIntervals);
|
||||
|
||||
|
@ -928,7 +927,7 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
|||
|
||||
TEST(true); //did not find a healthy destination team on the first attempt
|
||||
stuckCount++;
|
||||
TraceEvent(stuckCount > 50 ? SevWarnAlways : SevWarn, "BestTeamStuck", masterId)
|
||||
TraceEvent(stuckCount > 50 ? SevWarnAlways : SevWarn, "BestTeamStuck", distributorId)
|
||||
.suppressFor(1.0)
|
||||
.detail("Count", stuckCount)
|
||||
.detail("TeamCollectionId", tciIndex)
|
||||
|
@ -981,14 +980,14 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
|||
//FIXME: do not add data in flight to servers that were already in the src.
|
||||
healthyDestinations.addDataInFlightToTeam(+metrics.bytes);
|
||||
|
||||
TraceEvent(relocateShardInterval.severity, "RelocateShardHasDestination", masterId)
|
||||
TraceEvent(relocateShardInterval.severity, "RelocateShardHasDestination", distributorId)
|
||||
.detail("PairId", relocateShardInterval.pairID)
|
||||
.detail("DestinationTeam", describe(destIds))
|
||||
.detail("ExtraIds", describe(extraIds));
|
||||
|
||||
state Error error = success();
|
||||
state Promise<Void> dataMovementComplete;
|
||||
state Future<Void> doMoveKeys = moveKeys(self->cx, rd.keys, destIds, healthyIds, self->lock, dataMovementComplete, &self->startMoveKeysParallelismLock, &self->finishMoveKeysParallelismLock, self->recoveryVersion, self->teamCollections.size() > 1, relocateShardInterval.pairID );
|
||||
state Future<Void> doMoveKeys = moveKeys(self->cx, rd.keys, destIds, healthyIds, self->lock, dataMovementComplete, &self->startMoveKeysParallelismLock, &self->finishMoveKeysParallelismLock, self->teamCollections.size() > 1, relocateShardInterval.pairID );
|
||||
state Future<Void> pollHealth = signalledTransferComplete ? Never() : delay( SERVER_KNOBS->HEALTH_POLL_TIME, TaskDataDistributionLaunch );
|
||||
try {
|
||||
loop {
|
||||
|
@ -999,7 +998,7 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
|||
healthyIds.insert(healthyIds.end(), extraIds.begin(), extraIds.end());
|
||||
extraIds.clear();
|
||||
ASSERT(totalIds == destIds.size()); // Sanity check the destIDs before we move keys
|
||||
doMoveKeys = moveKeys(self->cx, rd.keys, destIds, healthyIds, self->lock, Promise<Void>(), &self->startMoveKeysParallelismLock, &self->finishMoveKeysParallelismLock, self->recoveryVersion, self->teamCollections.size() > 1, relocateShardInterval.pairID );
|
||||
doMoveKeys = moveKeys(self->cx, rd.keys, destIds, healthyIds, self->lock, Promise<Void>(), &self->startMoveKeysParallelismLock, &self->finishMoveKeysParallelismLock, self->teamCollections.size() > 1, relocateShardInterval.pairID );
|
||||
} else {
|
||||
self->fetchKeysComplete.insert( rd );
|
||||
break;
|
||||
|
@ -1027,7 +1026,7 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
|||
error = e;
|
||||
}
|
||||
|
||||
//TraceEvent("RelocateShardFinished", masterId).detail("RelocateId", relocateShardInterval.pairID);
|
||||
//TraceEvent("RelocateShardFinished", distributorId).detail("RelocateId", relocateShardInterval.pairID);
|
||||
|
||||
if( error.code() != error_code_move_to_removed_server ) {
|
||||
if( !error.code() ) {
|
||||
|
@ -1042,7 +1041,7 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
|||
|
||||
// onFinished.send( rs );
|
||||
if( !error.code() ) {
|
||||
TraceEvent(relocateShardInterval.end(), masterId).detail("Result","Success");
|
||||
TraceEvent(relocateShardInterval.end(), distributorId).detail("Result","Success");
|
||||
if(rd.keys.begin == keyServersPrefix) {
|
||||
TraceEvent("MovedKeyServerKeys").detail("Dest", describe(destIds)).trackLatest("MovedKeyServers");
|
||||
}
|
||||
|
@ -1066,7 +1065,7 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
|||
}
|
||||
}
|
||||
} catch (Error& e) {
|
||||
TraceEvent(relocateShardInterval.end(), masterId).error(e, true);
|
||||
TraceEvent(relocateShardInterval.end(), distributorId).error(e, true);
|
||||
if( !signalledTransferComplete )
|
||||
dataTransferComplete.send( rd );
|
||||
|
||||
|
@ -1100,7 +1099,7 @@ ACTOR Future<bool> rebalanceTeams( DDQueueData* self, int priority, Reference<ID
|
|||
std::vector<KeyRange> shards = self->shardsAffectedByTeamFailure->getShardsFor( ShardsAffectedByTeamFailure::Team( sourceTeam->getServerIDs(), primary ) );
|
||||
for( int i = 0; i < shards.size(); i++ ) {
|
||||
if( moveShard == shards[i] ) {
|
||||
TraceEvent(priority == PRIORITY_REBALANCE_OVERUTILIZED_TEAM ? "BgDDMountainChopper" : "BgDDValleyFiller", self->mi.id())
|
||||
TraceEvent(priority == PRIORITY_REBALANCE_OVERUTILIZED_TEAM ? "BgDDMountainChopper" : "BgDDValleyFiller", self->distributorId)
|
||||
.detail("SourceBytes", sourceBytes)
|
||||
.detail("DestBytes", destBytes)
|
||||
.detail("ShardBytes", metrics.bytes)
|
||||
|
@ -1195,12 +1194,11 @@ ACTOR Future<Void> dataDistributionQueue(
|
|||
Reference<ShardsAffectedByTeamFailure> shardsAffectedByTeamFailure,
|
||||
MoveKeysLock lock,
|
||||
PromiseStream<Promise<int64_t>> getAverageShardBytes,
|
||||
MasterInterface mi,
|
||||
UID distributorId,
|
||||
int teamSize,
|
||||
double* lastLimited,
|
||||
Version recoveryVersion)
|
||||
double* lastLimited)
|
||||
{
|
||||
state DDQueueData self( mi, lock, cx, teamCollections, shardsAffectedByTeamFailure, getAverageShardBytes, teamSize, output, input, getShardMetrics, lastLimited, recoveryVersion );
|
||||
state DDQueueData self( distributorId, lock, cx, teamCollections, shardsAffectedByTeamFailure, getAverageShardBytes, teamSize, output, input, getShardMetrics, lastLimited );
|
||||
state std::set<UID> serversToLaunchFrom;
|
||||
state KeyRange keysToLaunchFrom;
|
||||
state RelocateData launchData;
|
||||
|
@ -1286,7 +1284,7 @@ ACTOR Future<Void> dataDistributionQueue(
|
|||
highPriorityRelocations += it->second;
|
||||
}
|
||||
|
||||
TraceEvent("MovingData", mi.id())
|
||||
TraceEvent("MovingData", distributorId)
|
||||
.detail( "InFlight", self.activeRelocations )
|
||||
.detail( "InQueue", self.queuedRelocations )
|
||||
.detail( "AverageShardSize", req.getFuture().isReady() ? req.getFuture().get() : -1 )
|
||||
|
@ -1303,7 +1301,7 @@ ACTOR Future<Void> dataDistributionQueue(
|
|||
} catch (Error& e) {
|
||||
if (e.code() != error_code_broken_promise && // FIXME: Get rid of these broken_promise errors every time we are killed by the master dying
|
||||
e.code() != error_code_movekeys_conflict)
|
||||
TraceEvent(SevError, "DataDistributionQueueError", mi.id()).error(e);
|
||||
TraceEvent(SevError, "DataDistributionQueueError", distributorId).error(e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ struct ShardTrackedData {
|
|||
|
||||
struct DataDistributionTracker {
|
||||
Database cx;
|
||||
UID masterId;
|
||||
UID distributorId;
|
||||
KeyRangeMap< ShardTrackedData > shards;
|
||||
ActorCollection sizeChanges;
|
||||
|
||||
|
@ -79,8 +79,8 @@ struct DataDistributionTracker {
|
|||
Promise<Void> readyToStart;
|
||||
Reference<AsyncVar<bool>> anyZeroHealthyTeams;
|
||||
|
||||
DataDistributionTracker(Database cx, UID masterId, Promise<Void> const& readyToStart, PromiseStream<RelocateShard> const& output, Reference<ShardsAffectedByTeamFailure> shardsAffectedByTeamFailure, Reference<AsyncVar<bool>> anyZeroHealthyTeams)
|
||||
: cx(cx), masterId( masterId ), dbSizeEstimate( new AsyncVar<int64_t>() ),
|
||||
DataDistributionTracker(Database cx, UID distributorId, Promise<Void> const& readyToStart, PromiseStream<RelocateShard> const& output, Reference<ShardsAffectedByTeamFailure> shardsAffectedByTeamFailure, Reference<AsyncVar<bool>> anyZeroHealthyTeams)
|
||||
: cx(cx), distributorId( distributorId ), dbSizeEstimate( new AsyncVar<int64_t>() ),
|
||||
maxShardSize( new AsyncVar<Optional<int64_t>>() ),
|
||||
sizeChanges(false), readyToStart(readyToStart), output( output ), shardsAffectedByTeamFailure(shardsAffectedByTeamFailure), anyZeroHealthyTeams(anyZeroHealthyTeams) {}
|
||||
|
||||
|
@ -328,7 +328,7 @@ ACTOR Future<Void> shardSplitter(
|
|||
int numShards = splitKeys.size() - 1;
|
||||
|
||||
if( g_random->random01() < 0.01 ) {
|
||||
TraceEvent("RelocateShardStartSplitx100", self->masterId)
|
||||
TraceEvent("RelocateShardStartSplitx100", self->distributorId)
|
||||
.detail("Begin", printable(keys.begin))
|
||||
.detail("End", printable(keys.end))
|
||||
.detail("MaxBytes", shardBounds.max.bytes)
|
||||
|
@ -449,7 +449,7 @@ Future<Void> shardMerger(
|
|||
//restarting shard tracker will derefenced values in the shard map, so make a copy
|
||||
KeyRange mergeRange = merged;
|
||||
|
||||
TraceEvent("RelocateShardMergeMetrics", self->masterId)
|
||||
TraceEvent("RelocateShardMergeMetrics", self->distributorId)
|
||||
.detail("OldKeys", printable(keys))
|
||||
.detail("NewKeys", printable(mergeRange))
|
||||
.detail("EndingSize", endingStats.bytes)
|
||||
|
@ -495,7 +495,7 @@ ACTOR Future<Void> shardEvaluator(
|
|||
}
|
||||
}
|
||||
|
||||
/*TraceEvent("ShardEvaluator", self->masterId)
|
||||
/*TraceEvent("ShardEvaluator", self->distributorId)
|
||||
.detail("TrackerId", trackerID)
|
||||
.detail("ShouldSplit", shouldSplit)
|
||||
.detail("ShouldMerge", shouldMerge)
|
||||
|
@ -531,7 +531,7 @@ ACTOR Future<Void> shardTracker(
|
|||
// Since maxShardSize will become present for all shards at once, avoid slow tasks with a short delay
|
||||
wait( delay( 0, TaskDataDistribution ) );
|
||||
|
||||
/*TraceEvent("ShardTracker", self->masterId)
|
||||
/*TraceEvent("ShardTracker", self->distributorId)
|
||||
.detail("Begin", printable(keys.begin))
|
||||
.detail("End", printable(keys.end))
|
||||
.detail("TrackerID", trackerID)
|
||||
|
@ -571,7 +571,7 @@ void restartShardTrackers( DataDistributionTracker* self, KeyRangeRef keys, Opti
|
|||
// we can use the old size if it is available. This will be the case when merging shards.
|
||||
if( startingSize.present() ) {
|
||||
ASSERT( ranges.size() == 1 );
|
||||
/*TraceEvent("ShardTrackerSizePreset", self->masterId)
|
||||
/*TraceEvent("ShardTrackerSizePreset", self->distributorId)
|
||||
.detail("Keys", printable(keys))
|
||||
.detail("Size", startingSize.get().metrics.bytes)
|
||||
.detail("Merges", startingSize.get().merges);*/
|
||||
|
@ -589,7 +589,7 @@ void restartShardTrackers( DataDistributionTracker* self, KeyRangeRef keys, Opti
|
|||
|
||||
ACTOR Future<Void> trackInitialShards(DataDistributionTracker *self, Reference<InitialDataDistribution> initData)
|
||||
{
|
||||
TraceEvent("TrackInitialShards", self->masterId).detail("InitialShardCount", initData->shards.size());
|
||||
TraceEvent("TrackInitialShards", self->distributorId).detail("InitialShardCount", initData->shards.size());
|
||||
|
||||
//This line reduces the priority of shard initialization to prevent interference with failure monitoring.
|
||||
//SOMEDAY: Figure out what this priority should actually be
|
||||
|
@ -659,9 +659,9 @@ ACTOR Future<Void> dataDistributionTracker(
|
|||
FutureStream<Promise<int64_t>> getAverageShardBytes,
|
||||
Promise<Void> readyToStart,
|
||||
Reference<AsyncVar<bool>> anyZeroHealthyTeams,
|
||||
UID masterId)
|
||||
UID distributorId)
|
||||
{
|
||||
state DataDistributionTracker self(cx, masterId, readyToStart, output, shardsAffectedByTeamFailure, anyZeroHealthyTeams);
|
||||
state DataDistributionTracker self(cx, distributorId, readyToStart, output, shardsAffectedByTeamFailure, anyZeroHealthyTeams);
|
||||
state Future<Void> loggingTrigger = Void();
|
||||
try {
|
||||
wait( trackInitialShards( &self, initData ) );
|
||||
|
@ -672,7 +672,7 @@ ACTOR Future<Void> dataDistributionTracker(
|
|||
req.send( self.maxShardSize->get().get() / 2 );
|
||||
}
|
||||
when( wait( loggingTrigger ) ) {
|
||||
TraceEvent("DDTrackerStats", self.masterId)
|
||||
TraceEvent("DDTrackerStats", self.distributorId)
|
||||
.detail("Shards", self.shards.size())
|
||||
.detail("TotalSizeBytes", self.dbSizeEstimate->get())
|
||||
.trackLatest( "DDTrackerStats" );
|
||||
|
@ -685,7 +685,7 @@ ACTOR Future<Void> dataDistributionTracker(
|
|||
when( wait( self.sizeChanges.getResult() ) ) {}
|
||||
}
|
||||
} catch (Error& e) {
|
||||
TraceEvent(SevError, "DataDistributionTrackerError", self.masterId).error(e);
|
||||
TraceEvent(SevError, "DataDistributionTrackerError", self.distributorId).error(e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* DataDistributorInterface.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDBSERVER_DATADISTRIBUTORINTERFACE_H
|
||||
#define FDBSERVER_DATADISTRIBUTORINTERFACE_H
|
||||
|
||||
#include "fdbrpc/fdbrpc.h"
|
||||
#include "fdbrpc/Locality.h"
|
||||
|
||||
struct DataDistributorInterface {
|
||||
RequestStream<ReplyPromise<Void>> waitFailure;
|
||||
RequestStream<struct GetRateInfoRequest> getRateInfo;
|
||||
struct LocalityData locality;
|
||||
|
||||
DataDistributorInterface() {}
|
||||
explicit DataDistributorInterface(const struct LocalityData& l) : locality(l) {}
|
||||
|
||||
void initEndpoints() {}
|
||||
UID id() const { return getRateInfo.getEndpoint().token; }
|
||||
NetworkAddress address() const { return getRateInfo.getEndpoint().getPrimaryAddress(); }
|
||||
bool operator== (const DataDistributorInterface& r) const {
|
||||
return id() == r.id();
|
||||
}
|
||||
bool operator!= (const DataDistributorInterface& r) const {
|
||||
return !(*this == r);
|
||||
}
|
||||
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar) {
|
||||
serializer(ar, waitFailure, getRateInfo, locality);
|
||||
}
|
||||
};
|
||||
|
||||
struct GetRateInfoRequest {
|
||||
UID requesterID;
|
||||
int64_t totalReleasedTransactions;
|
||||
ReplyPromise<struct GetRateInfoReply> reply;
|
||||
|
||||
GetRateInfoRequest() {}
|
||||
GetRateInfoRequest( UID const& requesterID, int64_t totalReleasedTransactions ) : requesterID(requesterID), totalReleasedTransactions(totalReleasedTransactions) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, requesterID, totalReleasedTransactions, reply);
|
||||
}
|
||||
};
|
||||
|
||||
struct GetRateInfoReply {
|
||||
double transactionRate;
|
||||
double leaseDuration;
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, transactionRate, leaseDuration);
|
||||
}
|
||||
};
|
||||
|
||||
#endif //FDBSERVER_DATADISTRIBUTORINTERFACE_H
|
|
@ -22,6 +22,7 @@
|
|||
#include "fdbrpc/IAsyncFile.h"
|
||||
#include "fdbserver/Knobs.h"
|
||||
#include "fdbrpc/simulator.h"
|
||||
#include "flow/genericactors.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
typedef bool(*compare_pages)(void*,void*);
|
||||
|
@ -109,7 +110,36 @@ private:
|
|||
}
|
||||
};
|
||||
|
||||
class RawDiskQueue_TwoFiles {
|
||||
// We use a Tracked instead of a Reference when the shutdown/destructor code would need to wait().
|
||||
template <typename T>
|
||||
class Tracked {
|
||||
protected:
|
||||
struct TrackMe : NonCopyable {
|
||||
T* self;
|
||||
explicit TrackMe( T* self ) : self(self) {
|
||||
self->actorCount++;
|
||||
if (self->actorCount == 1) self->actorCountIsZero.set(false);
|
||||
}
|
||||
~TrackMe() {
|
||||
self->actorCount--;
|
||||
if (self->actorCount == 0) self->actorCountIsZero.set(true);
|
||||
}
|
||||
};
|
||||
|
||||
Future<Void> onSafeToDestruct() {
|
||||
if (actorCountIsZero.get()) {
|
||||
return Void();
|
||||
} else {
|
||||
return actorCountIsZero.onChange();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
int actorCount = 0;
|
||||
AsyncVar<bool> actorCountIsZero = true;
|
||||
};
|
||||
|
||||
class RawDiskQueue_TwoFiles : public Tracked<RawDiskQueue_TwoFiles> {
|
||||
public:
|
||||
RawDiskQueue_TwoFiles( std::string basename, std::string fileExtension, UID dbgid, int64_t fileSizeWarningLimit )
|
||||
: basename(basename), fileExtension(fileExtension), onError(delayed(error.getFuture())), onStopped(stopped.getFuture()),
|
||||
|
@ -121,6 +151,13 @@ public:
|
|||
fileExtensionBytes = 8<<10;
|
||||
files[0].dbgFilename = filename(0);
|
||||
files[1].dbgFilename = filename(1);
|
||||
// We issue reads into firstPages, so it needs to be 4k aligned.
|
||||
firstPages.reserve(firstPages.arena(), 2);
|
||||
void* pageMemory = operator new (sizeof(Page) * 3, firstPages.arena());
|
||||
firstPages[0] = (Page*)((((uintptr_t)pageMemory + 4095) / 4096) * 4096);
|
||||
memset(firstPages[0], 0, sizeof(Page));
|
||||
firstPages[1] = (Page*)((uintptr_t)firstPages[0] + 4096);
|
||||
memset(firstPages[1], 0, sizeof(Page));
|
||||
stallCount.init(LiteralStringRef("RawDiskQueue.StallCount"));
|
||||
}
|
||||
|
||||
|
@ -143,6 +180,8 @@ public:
|
|||
|
||||
Future<Void> setPoppedPage( int file, int64_t page, int64_t debugSeq ) { return setPoppedPage(this, file, page, debugSeq); }
|
||||
|
||||
// FIXME: let the caller pass in where to write the data.
|
||||
Future<Standalone<StringRef>> read(int file, int page, int nPages) { return read(this, file, page, nPages); }
|
||||
Future<Standalone<StringRef>> readNextPage() { return readNextPage(this); }
|
||||
Future<Void> truncateBeforeLastReadPage() { return truncateBeforeLastReadPage(this); }
|
||||
|
||||
|
@ -178,6 +217,7 @@ public:
|
|||
}
|
||||
};
|
||||
File files[2]; // After readFirstAndLastPages(), files[0] is logically before files[1] (pushes are always into files[1])
|
||||
Standalone<VectorRef<Page*>> firstPages;
|
||||
|
||||
std::string basename;
|
||||
std::string fileExtension;
|
||||
|
@ -202,20 +242,8 @@ public:
|
|||
|
||||
int64_t fileExtensionBytes;
|
||||
|
||||
AsyncMap<bool,int> recoveryActorCount;
|
||||
|
||||
Int64MetricHandle stallCount;
|
||||
|
||||
struct TrackMe : NonCopyable {
|
||||
RawDiskQueue_TwoFiles* self;
|
||||
TrackMe( RawDiskQueue_TwoFiles* self ) : self(self) {
|
||||
self->recoveryActorCount.set(false, self->recoveryActorCount.get(false)+1);
|
||||
}
|
||||
~TrackMe() {
|
||||
self->recoveryActorCount.set(false, self->recoveryActorCount.get(false)-1);
|
||||
}
|
||||
};
|
||||
|
||||
Future<Void> truncateFile(int file, int64_t pos) { return truncateFile(this, file, pos); }
|
||||
|
||||
Future<Void> push(StringRef pageData, vector<Reference<SyncQueue>>& toSync) {
|
||||
|
@ -243,6 +271,7 @@ public:
|
|||
|
||||
dbg_file0BeginSeq += files[0].size;
|
||||
std::swap(files[0], files[1]);
|
||||
std::swap(firstPages[0], firstPages[1]);
|
||||
files[1].popped = 0;
|
||||
writingPos = 0;
|
||||
} else {
|
||||
|
@ -259,6 +288,10 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
if (writingPos == 0) {
|
||||
*firstPages[1] = *(const Page*)pageData.begin();
|
||||
}
|
||||
|
||||
/*TraceEvent("RDQWrite", this->dbgid).detail("File1name", files[1].dbgFilename).detail("File1size", files[1].size)
|
||||
.detail("WritingPos", writingPos).detail("WritingBytes", pageData.size());*/
|
||||
files[1].size = std::max( files[1].size, writingPos + pageData.size() );
|
||||
|
@ -299,7 +332,9 @@ public:
|
|||
TEST(2==syncFiles.size()); // push spans both files
|
||||
wait( pushed );
|
||||
|
||||
delete pageMem;
|
||||
if (!g_network->isSimulated()) {
|
||||
delete pageMem;
|
||||
}
|
||||
pageMem = 0;
|
||||
|
||||
Future<Void> sync = syncFiles[0]->onSync();
|
||||
|
@ -320,7 +355,9 @@ public:
|
|||
|
||||
committed.send(Void());
|
||||
} catch (Error& e) {
|
||||
delete pageMem;
|
||||
if (!g_network->isSimulated()) {
|
||||
delete pageMem;
|
||||
}
|
||||
TEST(true); // push error
|
||||
TEST(2==syncFiles.size()); // push spanning both files error
|
||||
TraceEvent(SevError, "RDQPushAndCommitError", dbgid).error(e, true).detail("InitialFilename0", filename);
|
||||
|
@ -411,8 +448,7 @@ public:
|
|||
state Error error = success();
|
||||
try {
|
||||
wait(success(errorOr(self->lastCommit)));
|
||||
while (self->recoveryActorCount.get(false))
|
||||
wait( self->recoveryActorCount.onChange(false) );
|
||||
wait( self->onSafeToDestruct() );
|
||||
|
||||
for(int i=0; i<2; i++)
|
||||
self->files[i].f.clear();
|
||||
|
@ -443,12 +479,8 @@ public:
|
|||
|
||||
ACTOR static UNCANCELLABLE Future<Standalone<StringRef>> readFirstAndLastPages(RawDiskQueue_TwoFiles* self, compare_pages compare) {
|
||||
state TrackMe trackMe(self);
|
||||
state StringBuffer result( self->dbgid );
|
||||
|
||||
try {
|
||||
result.alignReserve( sizeof(Page), sizeof(Page)*3 );
|
||||
state Page* firstPage = (Page*)result.append(sizeof(Page)*3);
|
||||
|
||||
// Open both files or create both files
|
||||
wait( openFiles(self) );
|
||||
|
||||
|
@ -464,20 +496,19 @@ public:
|
|||
}
|
||||
|
||||
// Read the first pages
|
||||
memset(firstPage, 0, sizeof(Page)*2);
|
||||
vector<Future<int>> reads;
|
||||
for(int i=0; i<2; i++)
|
||||
if( self->files[i].size > 0)
|
||||
reads.push_back( self->files[i].f->read( &firstPage[i], sizeof(Page), 0 ) );
|
||||
reads.push_back( self->files[i].f->read( self->firstPages[i], sizeof(Page), 0 ) );
|
||||
wait( waitForAll(reads) );
|
||||
|
||||
// Determine which file comes first
|
||||
if ( compare( &firstPage[1], &firstPage[0] ) ) {
|
||||
std::swap( firstPage[0], firstPage[1] );
|
||||
if ( compare( self->firstPages[1], self->firstPages[0] ) ) {
|
||||
std::swap( self->firstPages[0], self->firstPages[1] );
|
||||
std::swap( self->files[0], self->files[1] );
|
||||
}
|
||||
|
||||
if ( !compare( &firstPage[1], &firstPage[1] ) ) {
|
||||
if ( !compare( self->firstPages[1], self->firstPages[1] ) ) {
|
||||
// Both files are invalid... the queue is empty!
|
||||
// Begin pushing at the beginning of files[1]
|
||||
|
||||
|
@ -498,12 +529,13 @@ public:
|
|||
return Standalone<StringRef>();
|
||||
}
|
||||
|
||||
// A page in files[1] is "valid" iff compare(&firstPage[1], page)
|
||||
// A page in files[1] is "valid" iff compare(self->firstPages[1], page)
|
||||
// Binary search to find a page in files[1] that is "valid" but the next page is not valid
|
||||
// Invariant: the page at begin is valid, and the page at end is invalid
|
||||
state int64_t begin = 0;
|
||||
state int64_t end = self->files[1].size/sizeof(Page);
|
||||
state Page *middlePage = &firstPage[2];
|
||||
state Standalone<StringRef> middlePageAllocation = makeAlignedString(sizeof(Page), sizeof(Page));
|
||||
state Page *middlePage = (Page*)middlePageAllocation.begin();
|
||||
while ( begin + 1 != end ) {
|
||||
state int64_t middle = (begin+end)/2;
|
||||
ASSERT( middle > begin && middle < end ); // So the loop always changes begin or end
|
||||
|
@ -511,7 +543,7 @@ public:
|
|||
int len = wait( self->files[1].f->read( middlePage, sizeof(Page), middle*sizeof(Page) ) );
|
||||
ASSERT( len == sizeof(Page) );
|
||||
|
||||
bool middleValid = compare( &firstPage[1], middlePage );
|
||||
bool middleValid = compare( self->firstPages[1], middlePage );
|
||||
|
||||
TraceEvent("RDQBS", self->dbgid).detail("Begin", begin).detail("End", end).detail("Middle", middle).detail("Valid", middleValid).detail("File0Name", self->files[0].dbgFilename);
|
||||
|
||||
|
@ -522,16 +554,16 @@ public:
|
|||
}
|
||||
// Now by the invariant and the loop condition, begin is a valid page and begin+1 is an invalid page
|
||||
// Check that begin+1 is invalid
|
||||
int len = wait( self->files[1].f->read( &firstPage[2], sizeof(Page), (begin+1)*sizeof(Page) ) );
|
||||
ASSERT( !(len == sizeof(Page) && compare( &firstPage[1], &firstPage[2] )) );
|
||||
int len1 = wait( self->files[1].f->read( middlePage, sizeof(Page), (begin+1)*sizeof(Page) ) );
|
||||
ASSERT( !(len1 == sizeof(Page) && compare( self->firstPages[1], middlePage )) );
|
||||
|
||||
// Read it
|
||||
int len = wait( self->files[1].f->read( &firstPage[2], sizeof(Page), begin*sizeof(Page) ) );
|
||||
ASSERT( len == sizeof(Page) && compare( &firstPage[1], &firstPage[2] ) );
|
||||
int len2 = wait( self->files[1].f->read( middlePage, sizeof(Page), begin*sizeof(Page) ) );
|
||||
ASSERT( len2 == sizeof(Page) && compare( self->firstPages[1], middlePage ) );
|
||||
|
||||
TraceEvent("RDQEndFound", self->dbgid).detail("File0Name", self->files[0].dbgFilename).detail("Pos", begin).detail("FileSize", self->files[1].size);
|
||||
|
||||
return result.str;
|
||||
return middlePageAllocation;
|
||||
} catch (Error& e) {
|
||||
bool ok = e.code() == error_code_file_not_found;
|
||||
TraceEvent(ok ? SevInfo : SevError, "RDQReadFirstAndLastPagesError", self->dbgid).error(e, true).detail("File0Name", self->files[0].dbgFilename);
|
||||
|
@ -540,6 +572,16 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR static Future<Standalone<StringRef>> read(RawDiskQueue_TwoFiles* self, int file, int pageOffset, int nPages) {
|
||||
state TrackMe trackMe(self);
|
||||
state const size_t bytesRequested = nPages * sizeof(Page);
|
||||
state Standalone<StringRef> result = makeAlignedString(sizeof(Page), bytesRequested);
|
||||
if (file == 1) ASSERT_WE_THINK(pageOffset * sizeof(Page) + bytesRequested <= self->writingPos );
|
||||
int bytesRead = wait( self->files[file].f->read( mutateString(result), bytesRequested, pageOffset*sizeof(Page) ) );
|
||||
ASSERT_WE_THINK(bytesRead == bytesRequested);
|
||||
return result;
|
||||
}
|
||||
|
||||
Future<int> fillReadingBuffer() {
|
||||
// If we're right at the end of a file...
|
||||
if ( readingPage*sizeof(Page) >= (size_t)files[readingFile].size ) {
|
||||
|
@ -599,6 +641,9 @@ public:
|
|||
state TrackMe trackMe(self);
|
||||
TraceEvent("DQTruncateFile", self->dbgid).detail("File", file).detail("Pos", pos).detail("File0Name", self->files[0].dbgFilename);
|
||||
state Reference<IAsyncFile> f = self->files[file].f; // Hold onto a reference in the off-chance that the DQ is removed from underneath us.
|
||||
if (pos == 0) {
|
||||
memset(self->firstPages[file], 0, _PAGE_SIZE);
|
||||
}
|
||||
wait( f->zeroRange( pos, self->files[file].size-pos ) );
|
||||
wait(self->files[file].syncQueue->onSync());
|
||||
// We intentionally don't return the f->zero future, so that TrackMe is destructed after f->zero finishes.
|
||||
|
@ -629,6 +674,7 @@ public:
|
|||
|
||||
if (swap) {
|
||||
std::swap(self->files[0], self->files[1]);
|
||||
std::swap(self->firstPages[0], self->firstPages[1]);
|
||||
self->files[0].popped = self->files[0].size;
|
||||
}
|
||||
|
||||
|
@ -641,11 +687,12 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
class DiskQueue : public IDiskQueue {
|
||||
class DiskQueue : public IDiskQueue, public Tracked<DiskQueue> {
|
||||
public:
|
||||
// FIXME: Is setting lastCommittedSeq to -1 instead of 0 necessary?
|
||||
DiskQueue( std::string basename, std::string fileExtension, UID dbgid, int64_t fileSizeWarningLimit )
|
||||
: rawQueue( new RawDiskQueue_TwoFiles(basename, fileExtension, dbgid, fileSizeWarningLimit) ), dbgid(dbgid), anyPopped(false), nextPageSeq(0), poppedSeq(0), lastPoppedSeq(0),
|
||||
nextReadLocation(-1), readBufPage(NULL), readBufPos(0), pushed_page_buffer(NULL), recovered(false), lastCommittedSeq(0), warnAlwaysForMemory(true)
|
||||
nextReadLocation(-1), readBufPage(NULL), readBufPos(0), pushed_page_buffer(NULL), recovered(false), initialized(false), lastCommittedSeq(-1), warnAlwaysForMemory(true)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -666,6 +713,7 @@ public:
|
|||
}
|
||||
return endLocation();
|
||||
}
|
||||
|
||||
virtual void pop( location upTo ) {
|
||||
ASSERT( !upTo.hi );
|
||||
ASSERT( !recovered || upTo.lo <= endLocation() );
|
||||
|
@ -685,6 +733,8 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
virtual Future<Standalone<StringRef>> read(location from, location to) { return read(this, from, to); }
|
||||
|
||||
int getMaxPayload() {
|
||||
return Page::maxPayload;
|
||||
}
|
||||
|
@ -728,6 +778,9 @@ public:
|
|||
|
||||
lastCommittedSeq = backPage().endSeq();
|
||||
auto f = rawQueue->pushAndCommit( pushed_page_buffer->ref(), pushed_page_buffer, poppedSeq/sizeof(Page) - lastPoppedSeq/sizeof(Page) );
|
||||
if (g_network->isSimulated()) {
|
||||
verifyCommit(this, f, pushed_page_buffer, ((Page*)pushed_page_buffer->ref().begin())->seq, lastCommittedSeq);
|
||||
}
|
||||
lastPoppedSeq = poppedSeq;
|
||||
pushed_page_buffer = 0;
|
||||
return f;
|
||||
|
@ -737,17 +790,27 @@ public:
|
|||
rawQueue->stall();
|
||||
}
|
||||
|
||||
virtual Future<bool> initializeRecovery() { return initializeRecovery( this ); }
|
||||
virtual Future<Standalone<StringRef>> readNext( int bytes ) { return readNext(this, bytes); }
|
||||
|
||||
// FIXME: getNextReadLocation should ASSERT( initialized ), but the memory storage engine needs
|
||||
// to be changed to understand the new intiailizeRecovery protocol.
|
||||
virtual location getNextReadLocation() { return nextReadLocation; }
|
||||
|
||||
virtual Future<Void> getError() { return rawQueue->getError(); }
|
||||
virtual Future<Void> onClosed() { return rawQueue->onClosed(); }
|
||||
|
||||
virtual void dispose() {
|
||||
TraceEvent("DQDestroy", dbgid).detail("LastPoppedSeq", lastPoppedSeq).detail("PoppedSeq", poppedSeq).detail("NextPageSeq", nextPageSeq).detail("File0Name", rawQueue->files[0].dbgFilename);
|
||||
rawQueue->dispose();
|
||||
delete this;
|
||||
dispose(this);
|
||||
}
|
||||
ACTOR static void dispose(DiskQueue* self) {
|
||||
wait( self->onSafeToDestruct() );
|
||||
TraceEvent("DQDestroyDone", self->dbgid).detail("File0Name", self->rawQueue->files[0].dbgFilename);
|
||||
self->rawQueue->dispose();
|
||||
delete self;
|
||||
}
|
||||
|
||||
virtual void close() {
|
||||
TraceEvent("DQClose", dbgid)
|
||||
.detail("LastPoppedSeq", lastPoppedSeq)
|
||||
|
@ -755,8 +818,13 @@ public:
|
|||
.detail("NextPageSeq", nextPageSeq)
|
||||
.detail("PoppedCommitted", rawQueue->dbg_file0BeginSeq + rawQueue->files[0].popped + rawQueue->files[1].popped)
|
||||
.detail("File0Name", rawQueue->files[0].dbgFilename);
|
||||
rawQueue->close();
|
||||
delete this;
|
||||
close(this);
|
||||
}
|
||||
ACTOR static void close(DiskQueue* self) {
|
||||
wait( self->onSafeToDestruct() );
|
||||
TraceEvent("DQCloseDone", self->dbgid).detail("File0Name", self->rawQueue->files[0].dbgFilename);
|
||||
self->rawQueue->close();
|
||||
delete self;
|
||||
}
|
||||
|
||||
virtual StorageBytes getStorageBytes() {
|
||||
|
@ -828,6 +896,127 @@ private:
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR static void verifyCommit(DiskQueue* self, Future<Void> commitSynced, StringBuffer* buffer, loc_t start, loc_t end) {
|
||||
state TrackMe trackme(self);
|
||||
try {
|
||||
wait( commitSynced );
|
||||
Standalone<StringRef> pagedData = wait( readPages(self, start, end) );
|
||||
const int startOffset = start % _PAGE_SIZE;
|
||||
const int dataLen = end - start;
|
||||
ASSERT( pagedData.substr(startOffset, dataLen).compare( buffer->ref().substr(0, dataLen) ) == 0 );
|
||||
} catch (Error& e) {
|
||||
if (e.code() != error_code_io_error) {
|
||||
delete buffer;
|
||||
throw;
|
||||
}
|
||||
}
|
||||
delete buffer;
|
||||
}
|
||||
|
||||
ACTOR static Future<Standalone<StringRef>> readPages(DiskQueue *self, location start, location end) {
|
||||
state TrackMe trackme(self);
|
||||
state int fromFile;
|
||||
state int toFile;
|
||||
state int64_t fromPage;
|
||||
state int64_t toPage;
|
||||
state uint64_t file0size = self->firstPages(1).seq - self->firstPages(0).seq;
|
||||
ASSERT(end > start);
|
||||
ASSERT(start.lo >= self->firstPages(0).seq);
|
||||
self->findPhysicalLocation(start.lo, &fromFile, &fromPage, nullptr);
|
||||
self->findPhysicalLocation(end.lo-1, &toFile, &toPage, nullptr);
|
||||
if (fromFile == 0) { ASSERT( fromPage < file0size / _PAGE_SIZE ); }
|
||||
if (toFile == 0) { ASSERT( toPage < file0size / _PAGE_SIZE ); }
|
||||
// FIXME I think there's something with nextReadLocation we can do here when initialized && !recovered.
|
||||
if (fromFile == 1 && self->recovered) { ASSERT( fromPage < self->rawQueue->writingPos / _PAGE_SIZE ); }
|
||||
if (toFile == 1 && self->recovered) { ASSERT( toPage < self->rawQueue->writingPos / _PAGE_SIZE ); }
|
||||
if (fromFile == toFile) {
|
||||
ASSERT(toPage >= fromPage);
|
||||
Standalone<StringRef> pagedData = wait( self->rawQueue->read( fromFile, fromPage, toPage - fromPage + 1 ) );
|
||||
if ( self->firstPages(0).seq > start.lo ) {
|
||||
// Simulation allows for reads to be delayed and executed after overlapping subsequent
|
||||
// write operations. This means that by the time our read was executed, it's possible
|
||||
// that both disk queue files have been completely overwritten.
|
||||
// I'm not clear what is the actual contract for read/write in this case, so simulation
|
||||
// might be a bit overly aggressive here, but it's behavior we need to tolerate.
|
||||
throw io_error();
|
||||
}
|
||||
ASSERT( ((Page*)pagedData.begin())->seq == start.lo / _PAGE_SIZE * _PAGE_SIZE );
|
||||
ASSERT(pagedData.size() == (toPage - fromPage + 1) * _PAGE_SIZE );
|
||||
|
||||
ASSERT( ((Page*)pagedData.end() - 1)->seq == (end.lo - 1) / _PAGE_SIZE * _PAGE_SIZE );
|
||||
return pagedData;
|
||||
} else {
|
||||
ASSERT(fromFile == 0);
|
||||
state Standalone<StringRef> firstChunk;
|
||||
state Standalone<StringRef> secondChunk;
|
||||
wait( store(firstChunk, self->rawQueue->read( fromFile, fromPage, ( file0size / sizeof(Page) ) - fromPage )) &&
|
||||
store(secondChunk, self->rawQueue->read( toFile, 0, toPage + 1 )) );
|
||||
if ( self->firstPages(0).seq > start.lo ) {
|
||||
// See above.
|
||||
throw io_error();
|
||||
}
|
||||
ASSERT(firstChunk.size() == ( ( file0size / sizeof(Page) ) - fromPage ) * _PAGE_SIZE );
|
||||
ASSERT( ((Page*)firstChunk.begin())->seq == start.lo / _PAGE_SIZE * _PAGE_SIZE );
|
||||
ASSERT(secondChunk.size() == (toPage + 1) * _PAGE_SIZE);
|
||||
ASSERT( ((Page*)secondChunk.end() - 1)->seq == (end.lo - 1) / _PAGE_SIZE * _PAGE_SIZE );
|
||||
return firstChunk.withSuffix(secondChunk);
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR static Future<Standalone<StringRef>> read(DiskQueue *self, location start, location end) {
|
||||
// This `state` is unnecessary, but works around pagedData wrongly becoming const
|
||||
// due to the actor compiler.
|
||||
state Standalone<StringRef> pagedData = wait(readPages(self, start, end));
|
||||
ASSERT(start.lo % sizeof(Page) == 0 ||
|
||||
start.lo % sizeof(Page) >= sizeof(PageHeader));
|
||||
int startingOffset = start.lo % sizeof(Page);
|
||||
if (startingOffset > 0) startingOffset -= sizeof(PageHeader);
|
||||
ASSERT(end.lo % sizeof(Page) == 0 ||
|
||||
end.lo % sizeof(Page) > sizeof(PageHeader));
|
||||
int endingOffset = end.lo % sizeof(Page);
|
||||
if (endingOffset == 0) endingOffset = sizeof(Page);
|
||||
if (endingOffset > 0) endingOffset -= sizeof(PageHeader);
|
||||
|
||||
if ((end.lo-1)/sizeof(Page)*sizeof(Page) == start.lo/sizeof(Page)*sizeof(Page)) {
|
||||
// start and end are on the same page
|
||||
ASSERT(pagedData.size() == sizeof(Page));
|
||||
pagedData.contents() = pagedData.substr(sizeof(PageHeader) + startingOffset, endingOffset - startingOffset);
|
||||
return pagedData;
|
||||
} else {
|
||||
// FIXME: This allocation is excessive and unnecessary. We know the overhead per page that
|
||||
// we'll be stripping out (sizeof(PageHeader)), so we should be able to do a smaller
|
||||
// allocation. But we should be able to re-use the space allocated for pagedData, which
|
||||
// would mean not having to allocate 2x the space for a read.
|
||||
Standalone<StringRef> unpagedData = makeString(pagedData.size());
|
||||
uint8_t *buf = mutateString(unpagedData);
|
||||
memset(buf, 0, unpagedData.size());
|
||||
const Page *data = reinterpret_cast<const Page*>(pagedData.begin());
|
||||
|
||||
// Only start copying from `start` in the first page.
|
||||
if( data->payloadSize > startingOffset ) {
|
||||
memcpy(buf, data->payload+startingOffset, data->payloadSize-startingOffset);
|
||||
buf += data->payloadSize-startingOffset;
|
||||
}
|
||||
data++;
|
||||
|
||||
// Copy all the middle pages
|
||||
while (data->seq != ((end.lo-1)/sizeof(Page)*sizeof(Page))) {
|
||||
// These pages can have varying amounts of data, as pages with partial
|
||||
// data will be zero-filled when commit is called.
|
||||
memcpy(buf, data->payload, data->payloadSize);
|
||||
buf += data->payloadSize;
|
||||
data++;
|
||||
}
|
||||
|
||||
// Copy only until `end` in the last page.
|
||||
memcpy(buf, data->payload, std::min(endingOffset, data->payloadSize));
|
||||
buf += std::min(endingOffset, data->payloadSize);
|
||||
|
||||
unpagedData.contents() = unpagedData.substr(0, buf - unpagedData.begin());
|
||||
return unpagedData;
|
||||
}
|
||||
}
|
||||
|
||||
void readFromBuffer( StringBuffer* result, int* bytes ) {
|
||||
// extract up to bytes from readBufPage into result
|
||||
int len = std::min( readBufPage->payloadSize - readBufPos, *bytes );
|
||||
|
@ -847,21 +1036,14 @@ private:
|
|||
|
||||
ASSERT( !self->recovered );
|
||||
|
||||
if (self->nextReadLocation < 0) {
|
||||
bool nonempty = wait( findStart(self) );
|
||||
if (!nonempty) {
|
||||
// The constructor has already put everything in the right state for an empty queue
|
||||
self->recovered = true;
|
||||
ASSERT( self->poppedSeq <= self->endLocation() );
|
||||
if (!self->initialized) {
|
||||
bool recoveryComplete = wait( initializeRecovery(self) );
|
||||
|
||||
//The next read location isn't necessarily the end of the last commit, but this is sufficient for helping us check an ASSERTion
|
||||
self->lastCommittedSeq = self->nextReadLocation;
|
||||
if (recoveryComplete) {
|
||||
ASSERT( self->poppedSeq <= self->endLocation() );
|
||||
|
||||
return Standalone<StringRef>();
|
||||
}
|
||||
self->readBufPos = self->nextReadLocation % sizeof(Page) - sizeof(PageHeader);
|
||||
if (self->readBufPos < 0) { self->nextReadLocation -= self->readBufPos; self->readBufPos = 0; }
|
||||
TraceEvent("DQRecStart", self->dbgid).detail("ReadBufPos", self->readBufPos).detail("NextReadLoc", self->nextReadLocation).detail("File0Name", self->rawQueue->files[0].dbgFilename);
|
||||
}
|
||||
|
||||
loop {
|
||||
|
@ -909,7 +1091,6 @@ private:
|
|||
TraceEvent("DQRecovered", self->dbgid).detail("LastPoppedSeq", self->lastPoppedSeq).detail("PoppedSeq", self->poppedSeq).detail("NextPageSeq", self->nextPageSeq).detail("File0Name", self->rawQueue->files[0].dbgFilename);
|
||||
self->recovered = true;
|
||||
ASSERT( self->poppedSeq <= self->endLocation() );
|
||||
self->recoveryFirstPages = Standalone<StringRef>();
|
||||
|
||||
TEST( result.size() == 0 ); // End of queue at border between reads
|
||||
TEST( result.size() != 0 ); // Partial read at end of queue
|
||||
|
@ -920,19 +1101,22 @@ private:
|
|||
return result.str;
|
||||
}
|
||||
|
||||
ACTOR static Future<bool> findStart( DiskQueue* self ) {
|
||||
Standalone<StringRef> epbuf = wait( self->rawQueue->readFirstAndLastPages( &comparePages ) );
|
||||
ASSERT( epbuf.size() % sizeof(Page) == 0 );
|
||||
self->recoveryFirstPages = epbuf;
|
||||
ACTOR static Future<bool> initializeRecovery( DiskQueue* self ) {
|
||||
if (self->initialized) {
|
||||
return self->recovered;
|
||||
}
|
||||
Standalone<StringRef> lastPageData = wait( self->rawQueue->readFirstAndLastPages( &comparePages ) );
|
||||
self->initialized = true;
|
||||
|
||||
if (!epbuf.size()) {
|
||||
if (!lastPageData.size()) {
|
||||
// There are no valid pages, so apparently this is a completely empty queue
|
||||
self->nextReadLocation = 0;
|
||||
return false;
|
||||
self->lastCommittedSeq = 0;
|
||||
self->recovered = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
int n = epbuf.size() / sizeof(Page);
|
||||
Page* lastPage = (Page*)epbuf.end() - 1;
|
||||
Page* lastPage = (Page*)lastPageData.begin();
|
||||
self->nextReadLocation = self->poppedSeq = lastPage->popped;
|
||||
|
||||
/*
|
||||
|
@ -951,48 +1135,55 @@ private:
|
|||
self->findPhysicalLocation( self->poppedSeq, &file, &page, "poppedSeq" );
|
||||
self->rawQueue->setStartPage( file, page );
|
||||
|
||||
return true;
|
||||
self->readBufPos = self->nextReadLocation % sizeof(Page) - sizeof(PageHeader);
|
||||
if (self->readBufPos < 0) { self->nextReadLocation -= self->readBufPos; self->readBufPos = 0; }
|
||||
TraceEvent("DQRecStart", self->dbgid).detail("ReadBufPos", self->readBufPos).detail("NextReadLoc", self->nextReadLocation).detail("File0Name", self->rawQueue->files[0].dbgFilename);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
Page& firstPages(int i) {
|
||||
ASSERT( initialized );
|
||||
return *(Page*)rawQueue->firstPages[i];
|
||||
}
|
||||
|
||||
void findPhysicalLocation( loc_t loc, int* file, int64_t* page, const char* context ) {
|
||||
bool ok = false;
|
||||
Page*p = (Page*)recoveryFirstPages.begin();
|
||||
|
||||
TraceEvent(SevInfo, "FindPhysicalLocation", dbgid)
|
||||
.detail("RecoveryFirstPages", recoveryFirstPages.size())
|
||||
.detail("Page0Valid", p[0].checkHash())
|
||||
.detail("Page0Seq", p[0].seq)
|
||||
.detail("Page1Valid", p[1].checkHash())
|
||||
.detail("Page1Seq", p[1].seq)
|
||||
if (context)
|
||||
TraceEvent(SevInfo, "FindPhysicalLocation", dbgid)
|
||||
.detail("Page0Valid", firstPages(0).checkHash())
|
||||
.detail("Page0Seq", firstPages(0).seq)
|
||||
.detail("Page1Valid", firstPages(1).checkHash())
|
||||
.detail("Page1Seq", firstPages(1).seq)
|
||||
.detail("Location", loc)
|
||||
.detail("Context", context)
|
||||
.detail("File0Name", rawQueue->files[0].dbgFilename);
|
||||
|
||||
for(int i=recoveryFirstPages.size() / sizeof(Page) - 2; i>=0; i--)
|
||||
if ( p[i].checkHash() && p[i].seq <= (size_t)loc ) {
|
||||
for(int i = 1; i >= 0; i--)
|
||||
if ( firstPages(i).checkHash() && firstPages(i).seq <= (size_t)loc ) {
|
||||
*file = i;
|
||||
*page = (loc - p[i].seq)/sizeof(Page);
|
||||
TraceEvent("FoundPhysicalLocation", dbgid)
|
||||
.detail("PageIndex", i)
|
||||
.detail("PageLocation", *page)
|
||||
.detail("RecoveryFirstPagesSize", recoveryFirstPages.size())
|
||||
.detail("SizeofPage", sizeof(Page))
|
||||
.detail("PageSequence", p[i].seq)
|
||||
.detail("Location", loc)
|
||||
.detail("Context", context)
|
||||
.detail("File0Name", rawQueue->files[0].dbgFilename);
|
||||
*page = (loc - firstPages(i).seq)/sizeof(Page);
|
||||
if (context)
|
||||
TraceEvent("FoundPhysicalLocation", dbgid)
|
||||
.detail("PageIndex", i)
|
||||
.detail("PageLocation", *page)
|
||||
.detail("SizeofPage", sizeof(Page))
|
||||
.detail("PageSequence", firstPages(i).seq)
|
||||
.detail("Location", loc)
|
||||
.detail("Context", context)
|
||||
.detail("File0Name", rawQueue->files[0].dbgFilename);
|
||||
ok = true;
|
||||
break;
|
||||
}
|
||||
if (!ok)
|
||||
TraceEvent(SevError, "DiskQueueLocationError", dbgid)
|
||||
.detail("RecoveryFirstPages", recoveryFirstPages.size())
|
||||
.detail("Page0Valid", p[0].checkHash())
|
||||
.detail("Page0Seq", p[0].seq)
|
||||
.detail("Page1Valid", p[1].checkHash())
|
||||
.detail("Page1Seq", p[1].seq)
|
||||
.detail("Page0Valid", firstPages(0).checkHash())
|
||||
.detail("Page0Seq", firstPages(0).seq)
|
||||
.detail("Page1Valid", firstPages(1).checkHash())
|
||||
.detail("Page1Seq", firstPages(1).seq)
|
||||
.detail("Location", loc)
|
||||
.detail("Context", context)
|
||||
.detail("Context", context ? context : "")
|
||||
.detail("File0Name", rawQueue->files[0].dbgFilename);
|
||||
ASSERT( ok );
|
||||
}
|
||||
|
@ -1025,11 +1216,11 @@ private:
|
|||
|
||||
// Recovery state
|
||||
bool recovered;
|
||||
bool initialized;
|
||||
loc_t nextReadLocation;
|
||||
Arena readBufArena;
|
||||
Page* readBufPage;
|
||||
int readBufPos;
|
||||
Standalone<StringRef> recoveryFirstPages;
|
||||
};
|
||||
|
||||
//A class wrapping DiskQueue which durably allows uncommitted data to be popped
|
||||
|
@ -1048,10 +1239,13 @@ public:
|
|||
void close() { queue->close(); delete this; }
|
||||
|
||||
//IDiskQueue
|
||||
Future<bool> initializeRecovery() { return queue->initializeRecovery(); }
|
||||
Future<Standalone<StringRef>> readNext( int bytes ) { return readNext(this, bytes); }
|
||||
|
||||
virtual location getNextReadLocation() { return queue->getNextReadLocation(); }
|
||||
|
||||
virtual Future<Standalone<StringRef>> read( location start, location end ) { return queue->read( start, end ); }
|
||||
|
||||
virtual location push( StringRef contents ) {
|
||||
pushed = queue->push(contents);
|
||||
return pushed;
|
||||
|
|
|
@ -41,11 +41,20 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
//! Find the first and last pages in the disk queue, and initialize invariants.
|
||||
//!
|
||||
//! Most importantly, most invariants only hold after this function returns, and
|
||||
//! some functions assert that the IDiskQueue has been initialized.
|
||||
//!
|
||||
//! \returns True, if DiskQueue is now considered in a recovered state.
|
||||
//! False, if the caller should call readNext until recovered is true.
|
||||
virtual Future<bool> initializeRecovery() = 0;
|
||||
// Before calling push or commit, the caller *must* perform recovery by calling readNext() until it returns less than the requested number of bytes.
|
||||
// Thereafter it may not be called again.
|
||||
virtual Future<Standalone<StringRef>> readNext( int bytes ) = 0; // Return the next bytes in the queue (beginning, the first time called, with the first unpopped byte)
|
||||
virtual location getNextReadLocation() = 0; // Returns a location >= the location of all bytes previously returned by readNext(), and <= the location of all bytes subsequently returned
|
||||
|
||||
virtual Future<Standalone<StringRef>> read( location start, location end ) = 0;
|
||||
virtual location push( StringRef contents ) = 0; // Appends the given bytes to the byte stream. Returns a location token representing the *end* of the contents.
|
||||
virtual void pop( location upTo ) = 0; // Removes all bytes before the given location token from the byte stream.
|
||||
virtual Future<Void> commit() = 0; // returns when all prior pushes and pops are durable. If commit does not return (due to close or a crash), any prefix of the pushed bytes and any prefix of the popped bytes may be durable.
|
||||
|
|
|
@ -165,6 +165,8 @@ ServerKnobs::ServerKnobs(bool randomize, ClientKnobs* clientKnobs) {
|
|||
init( DD_LOCATION_CACHE_SIZE, 2000000 ); if( randomize && BUGGIFY ) DD_LOCATION_CACHE_SIZE = 3;
|
||||
init( MOVEKEYS_LOCK_POLLING_DELAY, 5.0 );
|
||||
init( DEBOUNCE_RECRUITING_DELAY, 5.0 );
|
||||
init( DD_FAILURE_TIME, 1.0 ); if( randomize && BUGGIFY ) DD_FAILURE_TIME = 10.0;
|
||||
init( DD_ZERO_HEALTHY_TEAM_DELAY, 1.0 );
|
||||
|
||||
// Redwood Storage Engine
|
||||
init( PREFIX_TREE_IMMEDIATE_KEY_SIZE_LIMIT, 30 );
|
||||
|
@ -295,6 +297,7 @@ ServerKnobs::ServerKnobs(bool randomize, ClientKnobs* clientKnobs) {
|
|||
init( WAIT_FOR_GOOD_RECRUITMENT_DELAY, 1.0 );
|
||||
init( WAIT_FOR_GOOD_REMOTE_RECRUITMENT_DELAY, 5.0 );
|
||||
init( ATTEMPT_RECRUITMENT_DELAY, 0.035 );
|
||||
init( WAIT_FOR_DISTRIBUTOR_JOIN_DELAY, 1.0 );
|
||||
init( WORKER_FAILURE_TIME, 1.0 ); if( randomize && BUGGIFY ) WORKER_FAILURE_TIME = 10.0;
|
||||
init( CHECK_OUTSTANDING_INTERVAL, 0.5 ); if( randomize && BUGGIFY ) CHECK_OUTSTANDING_INTERVAL = 0.001;
|
||||
init( VERSION_LAG_METRIC_INTERVAL, 0.5 ); if( randomize && BUGGIFY ) VERSION_LAG_METRIC_INTERVAL = 10.0;
|
||||
|
|
|
@ -128,6 +128,8 @@ public:
|
|||
int64_t DD_LOCATION_CACHE_SIZE;
|
||||
double MOVEKEYS_LOCK_POLLING_DELAY;
|
||||
double DEBOUNCE_RECRUITING_DELAY;
|
||||
double DD_FAILURE_TIME;
|
||||
double DD_ZERO_HEALTHY_TEAM_DELAY;
|
||||
|
||||
// Redwood Storage Engine
|
||||
int PREFIX_TREE_IMMEDIATE_KEY_SIZE_LIMIT;
|
||||
|
@ -234,6 +236,7 @@ public:
|
|||
double WAIT_FOR_GOOD_RECRUITMENT_DELAY;
|
||||
double WAIT_FOR_GOOD_REMOTE_RECRUITMENT_DELAY;
|
||||
double ATTEMPT_RECRUITMENT_DELAY;
|
||||
double WAIT_FOR_DISTRIBUTOR_JOIN_DELAY;
|
||||
double WORKER_FAILURE_TIME;
|
||||
double CHECK_OUTSTANDING_INTERVAL;
|
||||
double INCOMPATIBLE_PEERS_LOGGING_INTERVAL;
|
||||
|
|
|
@ -0,0 +1,120 @@
|
|||
/*
|
||||
* LatencyBandConfig.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbserver/LatencyBandConfig.h"
|
||||
|
||||
#include "fdbclient/ManagementAPI.h"
|
||||
#include "fdbclient/Schemas.h"
|
||||
|
||||
bool operator==(LatencyBandConfig::RequestConfig const& lhs, LatencyBandConfig::RequestConfig const& rhs) {
|
||||
return typeid(lhs) == typeid(rhs) && lhs.isEqual(rhs);
|
||||
}
|
||||
|
||||
bool operator!=(LatencyBandConfig::RequestConfig const& lhs, LatencyBandConfig::RequestConfig const& rhs) {
|
||||
return !(lhs == rhs);
|
||||
}
|
||||
|
||||
bool LatencyBandConfig::RequestConfig::isEqual(RequestConfig const& r) const {
|
||||
return bands == r.bands;
|
||||
};
|
||||
|
||||
void LatencyBandConfig::RequestConfig::fromJson(JSONDoc json) {
|
||||
json_spirit::mArray bandsArray;
|
||||
if(json.get("bands", bandsArray)) {
|
||||
for(auto b : bandsArray) {
|
||||
bands.insert(b.get_real());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void LatencyBandConfig::ReadConfig::fromJson(JSONDoc json) {
|
||||
RequestConfig::fromJson(json);
|
||||
|
||||
int value;
|
||||
if(json.get("max_read_bytes", value)) {
|
||||
maxReadBytes = value;
|
||||
}
|
||||
if(json.get("max_key_selector_offset", value)) {
|
||||
maxKeySelectorOffset = value;
|
||||
}
|
||||
}
|
||||
|
||||
bool LatencyBandConfig::ReadConfig::isEqual(RequestConfig const& r) const {
|
||||
ReadConfig const& other = static_cast<ReadConfig const&>(r);
|
||||
return RequestConfig::isEqual(r) && maxReadBytes == other.maxReadBytes && maxKeySelectorOffset == other.maxKeySelectorOffset;
|
||||
}
|
||||
|
||||
void LatencyBandConfig::CommitConfig::fromJson(JSONDoc json) {
|
||||
RequestConfig::fromJson(json);
|
||||
|
||||
int value;
|
||||
if(json.get("max_commit_bytes", value)) {
|
||||
maxCommitBytes = value;
|
||||
}
|
||||
}
|
||||
|
||||
bool LatencyBandConfig::CommitConfig::isEqual(RequestConfig const& r) const {
|
||||
CommitConfig const& other = static_cast<CommitConfig const&>(r);
|
||||
return RequestConfig::isEqual(r) && maxCommitBytes == other.maxCommitBytes;
|
||||
}
|
||||
|
||||
Optional<LatencyBandConfig> LatencyBandConfig::parse(ValueRef configurationString) {
|
||||
Optional<LatencyBandConfig> config;
|
||||
if(configurationString.size() == 0) {
|
||||
return config;
|
||||
}
|
||||
|
||||
json_spirit::mValue parsedConfig;
|
||||
if(!json_spirit::read_string(configurationString.toString(), parsedConfig)) {
|
||||
TraceEvent(SevWarnAlways, "InvalidLatencyBandConfiguration").detail("Reason", "InvalidJSON").detail("Configuration", printable(configurationString));
|
||||
return config;
|
||||
}
|
||||
|
||||
json_spirit::mObject configJson = parsedConfig.get_obj();
|
||||
|
||||
json_spirit::mValue schema;
|
||||
if(!json_spirit::read_string(JSONSchemas::latencyBandConfigurationSchema.toString(), schema)) {
|
||||
ASSERT(false);
|
||||
}
|
||||
|
||||
std::string errorStr;
|
||||
if(!schemaMatch(schema.get_obj(), configJson, errorStr)) {
|
||||
TraceEvent(SevWarnAlways, "InvalidLatencyBandConfiguration").detail("Reason", "SchemaMismatch").detail("Configuration", printable(configurationString)).detail("Error", errorStr);
|
||||
return config;
|
||||
}
|
||||
|
||||
JSONDoc configDoc(configJson);
|
||||
|
||||
config = LatencyBandConfig();
|
||||
|
||||
config.get().grvConfig.fromJson(configDoc.subDoc("get_read_version"));
|
||||
config.get().readConfig.fromJson(configDoc.subDoc("read"));
|
||||
config.get().commitConfig.fromJson(configDoc.subDoc("commit"));
|
||||
|
||||
return config;
|
||||
}
|
||||
|
||||
bool LatencyBandConfig::operator==(LatencyBandConfig const& r) const {
|
||||
return grvConfig == r.grvConfig && readConfig == r.readConfig && commitConfig == r.commitConfig;
|
||||
}
|
||||
|
||||
bool LatencyBandConfig::operator!=(LatencyBandConfig const& r) const {
|
||||
return !(*this == r);
|
||||
}
|
|
@ -0,0 +1,106 @@
|
|||
/*
|
||||
* LatencyBandConfig.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDBSERVER_LATENCYBANDCONFIG_H
|
||||
#define FDBSERVER_LATENCYBANDCONFIG_H
|
||||
#pragma once
|
||||
|
||||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbclient/JSONDoc.h"
|
||||
|
||||
struct LatencyBandConfig {
|
||||
struct RequestConfig {
|
||||
std::set<double> bands;
|
||||
|
||||
friend bool operator==(RequestConfig const& lhs, RequestConfig const& rhs);
|
||||
friend bool operator!=(RequestConfig const& lhs, RequestConfig const& rhs);
|
||||
|
||||
virtual void fromJson(JSONDoc json);
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
uint64_t bandsSize = (uint64_t)bands.size();
|
||||
serializer(ar, bandsSize);
|
||||
|
||||
if(ar.isDeserializing) {
|
||||
double band;
|
||||
for(uint64_t i = 0; i < bandsSize; i++) {
|
||||
serializer(ar, band);
|
||||
bands.insert(band);
|
||||
}
|
||||
}
|
||||
else {
|
||||
for(double band : bands) {
|
||||
serializer(ar, band);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual bool isEqual(RequestConfig const& r) const;
|
||||
};
|
||||
|
||||
struct GrvConfig : RequestConfig {};
|
||||
|
||||
struct ReadConfig : RequestConfig {
|
||||
Optional<int> maxReadBytes;
|
||||
Optional<int> maxKeySelectorOffset;
|
||||
|
||||
virtual void fromJson(JSONDoc json);
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, *(RequestConfig*)this, maxReadBytes, maxKeySelectorOffset);
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual bool isEqual(RequestConfig const& r) const;
|
||||
};
|
||||
|
||||
struct CommitConfig : RequestConfig {
|
||||
Optional<int> maxCommitBytes;
|
||||
|
||||
virtual void fromJson(JSONDoc json);
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, *(RequestConfig*)this, maxCommitBytes);
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual bool isEqual(RequestConfig const& r) const;
|
||||
};
|
||||
|
||||
GrvConfig grvConfig;
|
||||
ReadConfig readConfig;
|
||||
CommitConfig commitConfig;
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, grvConfig, readConfig, commitConfig);
|
||||
}
|
||||
|
||||
static Optional<LatencyBandConfig> parse(ValueRef configurationString);
|
||||
|
||||
bool operator==(LatencyBandConfig const& r) const;
|
||||
bool operator!=(LatencyBandConfig const& r) const;
|
||||
};
|
||||
|
||||
#endif
|
|
@ -67,8 +67,10 @@ public:
|
|||
virtual void close();
|
||||
|
||||
// IDiskQueue interface
|
||||
virtual Future<bool> initializeRecovery() { return false; }
|
||||
virtual Future<Standalone<StringRef>> readNext( int bytes );
|
||||
virtual IDiskQueue::location getNextReadLocation();
|
||||
virtual Future<Standalone<StringRef>> read( location start, location end ) { ASSERT(false); throw internal_error(); }
|
||||
virtual IDiskQueue::location push( StringRef contents );
|
||||
virtual void pop( IDiskQueue::location upTo );
|
||||
virtual Future<Void> commit();
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbclient/StorageServerInterface.h"
|
||||
#include "fdbclient/CommitTransaction.h"
|
||||
#include "fdbclient/DatabaseConfiguration.h"
|
||||
#include "fdbserver/TLogInterface.h"
|
||||
|
||||
typedef uint64_t DBRecoveryCount;
|
||||
|
@ -32,7 +33,6 @@ typedef uint64_t DBRecoveryCount;
|
|||
struct MasterInterface {
|
||||
LocalityData locality;
|
||||
RequestStream< ReplyPromise<Void> > waitFailure;
|
||||
RequestStream< struct GetRateInfoRequest > getRateInfo;
|
||||
RequestStream< struct TLogRejoinRequest > tlogRejoin; // sent by tlog (whether or not rebooted) to communicate with a new master
|
||||
RequestStream< struct ChangeCoordinatorsRequest > changeCoordinators;
|
||||
RequestStream< struct GetCommitVersionRequest > getCommitVersion;
|
||||
|
@ -43,7 +43,7 @@ struct MasterInterface {
|
|||
template <class Archive>
|
||||
void serialize(Archive& ar) {
|
||||
ASSERT( ar.protocolVersion() >= 0x0FDB00A200040001LL );
|
||||
serializer(ar, locality, waitFailure, getRateInfo, tlogRejoin, changeCoordinators, getCommitVersion);
|
||||
serializer(ar, locality, waitFailure, tlogRejoin, changeCoordinators, getCommitVersion);
|
||||
}
|
||||
|
||||
void initEndpoints() {
|
||||
|
@ -51,30 +51,6 @@ struct MasterInterface {
|
|||
}
|
||||
};
|
||||
|
||||
struct GetRateInfoRequest {
|
||||
UID requesterID;
|
||||
int64_t totalReleasedTransactions;
|
||||
ReplyPromise<struct GetRateInfoReply> reply;
|
||||
|
||||
GetRateInfoRequest() {}
|
||||
GetRateInfoRequest( UID const& requesterID, int64_t totalReleasedTransactions ) : requesterID(requesterID), totalReleasedTransactions(totalReleasedTransactions) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, requesterID, totalReleasedTransactions, reply);
|
||||
}
|
||||
};
|
||||
|
||||
struct GetRateInfoReply {
|
||||
double transactionRate;
|
||||
double leaseDuration;
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, transactionRate, leaseDuration);
|
||||
}
|
||||
};
|
||||
|
||||
struct TLogRejoinRequest {
|
||||
TLogInterface myInterface;
|
||||
ReplyPromise<bool> reply; // false means someone else registered, so we should re-register. true means this master is recovered, so don't send again to the same master.
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include "flow/Stats.h"
|
||||
#include "fdbserver/ApplyMetadataMutation.h"
|
||||
#include "fdbserver/RecoveryState.h"
|
||||
#include "fdbserver/LatencyBandConfig.h"
|
||||
#include "fdbclient/Atomic.h"
|
||||
#include "flow/TDMetric.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
@ -55,13 +56,17 @@ struct ProxyStats {
|
|||
Counter conflictRanges;
|
||||
Version lastCommitVersionAssigned;
|
||||
|
||||
LatencyBands commitLatencyBands;
|
||||
LatencyBands grvLatencyBands;
|
||||
|
||||
Future<Void> logger;
|
||||
|
||||
explicit ProxyStats(UID id, Version* pVersion, NotifiedVersion* pCommittedVersion, int64_t *commitBatchesMemBytesCountPtr)
|
||||
: cc("ProxyStats", id.toString()),
|
||||
txnStartIn("TxnStartIn", cc), txnStartOut("TxnStartOut", cc), txnStartBatch("TxnStartBatch", cc), txnSystemPriorityStartIn("TxnSystemPriorityStartIn", cc), txnSystemPriorityStartOut("TxnSystemPriorityStartOut", cc), txnBatchPriorityStartIn("TxnBatchPriorityStartIn", cc), txnBatchPriorityStartOut("TxnBatchPriorityStartOut", cc),
|
||||
txnDefaultPriorityStartIn("TxnDefaultPriorityStartIn", cc), txnDefaultPriorityStartOut("TxnDefaultPriorityStartOut", cc), txnCommitIn("TxnCommitIn", cc), txnCommitVersionAssigned("TxnCommitVersionAssigned", cc), txnCommitResolving("TxnCommitResolving", cc), txnCommitResolved("TxnCommitResolved", cc), txnCommitOut("TxnCommitOut", cc),
|
||||
txnCommitOutSuccess("TxnCommitOutSuccess", cc), txnConflicts("TxnConflicts", cc), commitBatchIn("CommitBatchIn", cc), commitBatchOut("CommitBatchOut", cc), mutationBytes("MutationBytes", cc), mutations("Mutations", cc), conflictRanges("ConflictRanges", cc), lastCommitVersionAssigned(0)
|
||||
txnCommitOutSuccess("TxnCommitOutSuccess", cc), txnConflicts("TxnConflicts", cc), commitBatchIn("CommitBatchIn", cc), commitBatchOut("CommitBatchOut", cc), mutationBytes("MutationBytes", cc), mutations("Mutations", cc), conflictRanges("ConflictRanges", cc), lastCommitVersionAssigned(0),
|
||||
commitLatencyBands("CommitLatencyMetrics", id, SERVER_KNOBS->STORAGE_LOGGING_DELAY), grvLatencyBands("GRVLatencyMetrics", id, SERVER_KNOBS->STORAGE_LOGGING_DELAY)
|
||||
{
|
||||
specialCounter(cc, "LastAssignedCommitVersion", [this](){return this->lastCommitVersionAssigned;});
|
||||
specialCounter(cc, "Version", [pVersion](){return *pVersion; });
|
||||
|
@ -82,28 +87,40 @@ Future<Void> forwardValue(Promise<T> out, Future<T> in)
|
|||
|
||||
int getBytes(Promise<Version> const& r) { return 0; }
|
||||
|
||||
ACTOR Future<Void> getRate(UID myID, MasterInterface master, int64_t* inTransactionCount, double* outTransactionRate) {
|
||||
state Future<Void> nextRequestTimer = Void();
|
||||
ACTOR Future<Void> getRate(UID myID, Reference<AsyncVar<ServerDBInfo>> db, int64_t* inTransactionCount, double* outTransactionRate) {
|
||||
state Future<Void> nextRequestTimer = Never();
|
||||
state Future<Void> leaseTimeout = Never();
|
||||
state Future<GetRateInfoReply> reply;
|
||||
state Future<GetRateInfoReply> reply = Never();
|
||||
state int64_t lastTC = 0;
|
||||
|
||||
loop choose{
|
||||
when(wait(nextRequestTimer)) {
|
||||
nextRequestTimer = Never();
|
||||
reply = brokenPromiseToNever(master.getRateInfo.getReply(GetRateInfoRequest(myID, *inTransactionCount)));
|
||||
if (db->get().distributor.present()) nextRequestTimer = Void();
|
||||
loop choose {
|
||||
when ( wait( db->onChange() ) ) {
|
||||
if ( db->get().distributor.present() ) {
|
||||
TraceEvent("Proxy_DataDistributorChanged", myID)
|
||||
.detail("DDID", db->get().distributor.get().id());
|
||||
nextRequestTimer = Void(); // trigger GetRate request
|
||||
} else {
|
||||
TraceEvent("Proxy_DataDistributorDied", myID);
|
||||
nextRequestTimer = Never();
|
||||
reply = Never();
|
||||
}
|
||||
}
|
||||
when(GetRateInfoReply rep = wait(reply)) {
|
||||
when ( wait( nextRequestTimer ) ) {
|
||||
nextRequestTimer = Never();
|
||||
reply = brokenPromiseToNever(db->get().distributor.get().getRateInfo.getReply(GetRateInfoRequest(myID, *inTransactionCount)));
|
||||
}
|
||||
when ( GetRateInfoReply rep = wait(reply) ) {
|
||||
reply = Never();
|
||||
*outTransactionRate = rep.transactionRate;
|
||||
//TraceEvent("MasterProxyRate", myID).detail("Rate", rep.transactionRate).detail("Lease", rep.leaseDuration).detail("ReleasedTransactions", *inTransactionCount - lastTC);
|
||||
// TraceEvent("MasterProxyRate", myID).detail("Rate", rep.transactionRate).detail("Lease", rep.leaseDuration).detail("ReleasedTransactions", *inTransactionCount - lastTC);
|
||||
lastTC = *inTransactionCount;
|
||||
leaseTimeout = delay(rep.leaseDuration);
|
||||
nextRequestTimer = delayJittered(rep.leaseDuration / 2);
|
||||
}
|
||||
when(wait(leaseTimeout)) {
|
||||
when ( wait(leaseTimeout ) ) {
|
||||
*outTransactionRate = 0;
|
||||
//TraceEvent("MasterProxyRate", myID).detail("Rate", 0).detail("Lease", "Expired");
|
||||
// TraceEvent("MasterProxyRate", myID).detail("Rate", 0).detail("Lease", "Expired");
|
||||
leaseTimeout = Never();
|
||||
}
|
||||
}
|
||||
|
@ -208,6 +225,8 @@ struct ProxyCommitData {
|
|||
Version lastTxsPop;
|
||||
bool popRemoteTxs;
|
||||
|
||||
Optional<LatencyBandConfig> latencyBandConfig;
|
||||
|
||||
//The tag related to a storage server rarely change, so we keep a vector of tags for each key range to be slightly more CPU efficient.
|
||||
//When a tag related to a storage server does change, we empty out all of these vectors to signify they must be repopulated.
|
||||
//We do not repopulate them immediately to avoid a slow task.
|
||||
|
@ -458,11 +477,13 @@ ACTOR Future<Void> commitBatch(
|
|||
|
||||
ResolutionRequestBuilder requests( self, commitVersion, prevVersion, self->version );
|
||||
int conflictRangeCount = 0;
|
||||
state int64_t maxTransactionBytes = 0;
|
||||
for (int t = 0; t<trs.size(); t++) {
|
||||
requests.addTransaction(trs[t].transaction, t);
|
||||
conflictRangeCount += trs[t].transaction.read_conflict_ranges.size() + trs[t].transaction.write_conflict_ranges.size();
|
||||
//TraceEvent("MPTransactionDump", self->dbgid).detail("Snapshot", trs[t].transaction.read_snapshot);
|
||||
//for(auto& m : trs[t].transaction.mutations)
|
||||
maxTransactionBytes = std::max<int64_t>(maxTransactionBytes, trs[t].transaction.expectedSize());
|
||||
// TraceEvent("MPTransactionsDump", self->dbgid).detail("Mutation", m.toString());
|
||||
}
|
||||
self->stats.conflictRanges += conflictRangeCount;
|
||||
|
@ -952,16 +973,24 @@ ACTOR Future<Void> commitBatch(
|
|||
}
|
||||
|
||||
// Send replies to clients
|
||||
for (int t = 0; t < trs.size(); t++)
|
||||
{
|
||||
double endTime = timer();
|
||||
for (int t = 0; t < trs.size(); t++) {
|
||||
if (committed[t] == ConflictBatch::TransactionCommitted && (!locked || trs[t].isLockAware())) {
|
||||
ASSERT_WE_THINK(commitVersion != invalidVersion);
|
||||
trs[t].reply.send(CommitID(commitVersion, t));
|
||||
}
|
||||
else if (committed[t] == ConflictBatch::TransactionTooOld)
|
||||
else if (committed[t] == ConflictBatch::TransactionTooOld) {
|
||||
trs[t].reply.sendError(transaction_too_old());
|
||||
else
|
||||
}
|
||||
else {
|
||||
trs[t].reply.sendError(not_committed());
|
||||
}
|
||||
|
||||
// TODO: filter if pipelined with large commit
|
||||
if(self->latencyBandConfig.present()) {
|
||||
bool filter = maxTransactionBytes > self->latencyBandConfig.get().commitConfig.maxCommitBytes.orDefault(std::numeric_limits<int>::max());
|
||||
self->stats.commitLatencyBands.addMeasurement(endTime - trs[t].requestTime, filter);
|
||||
}
|
||||
}
|
||||
|
||||
++self->stats.commitBatchOut;
|
||||
|
@ -1049,9 +1078,19 @@ ACTOR Future<Void> fetchVersions(ProxyCommitData *commitData) {
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> sendGrvReplies(Future<GetReadVersionReply> replyFuture, std::vector<GetReadVersionRequest> requests, ProxyStats *stats) {
|
||||
GetReadVersionReply reply = wait(replyFuture);
|
||||
double end = timer();
|
||||
for(GetReadVersionRequest const& request : requests) {
|
||||
stats->grvLatencyBands.addMeasurement(end - request.requestTime);
|
||||
request.reply.send(reply);
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> transactionStarter(
|
||||
MasterProxyInterface proxy,
|
||||
MasterInterface master,
|
||||
Reference<AsyncVar<ServerDBInfo>> db,
|
||||
PromiseStream<Future<Void>> addActor,
|
||||
ProxyCommitData* commitData
|
||||
|
@ -1068,7 +1107,7 @@ ACTOR static Future<Void> transactionStarter(
|
|||
state vector<MasterProxyInterface> otherProxies;
|
||||
|
||||
state PromiseStream<double> replyTimes;
|
||||
addActor.send(getRate(proxy.id(), master, &transactionCount, &transactionRate));
|
||||
addActor.send( getRate(proxy.id(), db, &transactionCount, &transactionRate) );
|
||||
addActor.send(queueTransactionStartRequests(&transactionQueue, proxy.getConsistentReadVersion.getFuture(), GRVTimer, &lastGRVTime, &GRVBatchTime, replyTimes.getFuture(), &commitData->stats));
|
||||
|
||||
// Get a list of the other proxies that go together with us
|
||||
|
@ -1098,7 +1137,7 @@ ACTOR static Future<Void> transactionStarter(
|
|||
int defaultPriTransactionsStarted[2] = { 0, 0 };
|
||||
int batchPriTransactionsStarted[2] = { 0, 0 };
|
||||
|
||||
vector<vector<ReplyPromise<GetReadVersionReply>>> start(2); // start[0] is transactions starting with !(flags&CAUSAL_READ_RISKY), start[1] is transactions starting with flags&CAUSAL_READ_RISKY
|
||||
vector<vector<GetReadVersionRequest>> start(2); // start[0] is transactions starting with !(flags&CAUSAL_READ_RISKY), start[1] is transactions starting with flags&CAUSAL_READ_RISKY
|
||||
Optional<UID> debugID;
|
||||
|
||||
double leftToStart = 0;
|
||||
|
@ -1114,7 +1153,6 @@ ACTOR static Future<Void> transactionStarter(
|
|||
if (!debugID.present()) debugID = g_nondeterministic_random->randomUniqueID();
|
||||
g_traceBatch.addAttach("TransactionAttachID", req.debugID.get().first(), debugID.get().first());
|
||||
}
|
||||
start[req.flags & 1].push_back(std::move(req.reply)); static_assert(GetReadVersionRequest::FLAG_CAUSAL_READ_RISKY == 1, "Implementation dependent on flag value");
|
||||
|
||||
transactionsStarted[req.flags&1] += tc;
|
||||
if (req.priority() >= GetReadVersionRequest::PRIORITY_SYSTEM_IMMEDIATE)
|
||||
|
@ -1124,6 +1162,7 @@ ACTOR static Future<Void> transactionStarter(
|
|||
else
|
||||
batchPriTransactionsStarted[req.flags & 1] += tc;
|
||||
|
||||
start[req.flags & 1].push_back(std::move(req)); static_assert(GetReadVersionRequest::FLAG_CAUSAL_READ_RISKY == 1, "Implementation dependent on flag value");
|
||||
transactionQueue.pop();
|
||||
}
|
||||
|
||||
|
@ -1141,20 +1180,22 @@ ACTOR static Future<Void> transactionStarter(
|
|||
.detail("TransactionBudget", transactionBudget)
|
||||
.detail("LastLeftToStart", leftToStart);*/
|
||||
|
||||
// dynamic batching
|
||||
ReplyPromise<GetReadVersionReply> GRVReply;
|
||||
if (start[0].size()){
|
||||
start[0].push_back(GRVReply); // for now, base dynamic batching on the time for normal requests (not read_risky)
|
||||
addActor.send(timeReply(GRVReply.getFuture(), replyTimes));
|
||||
}
|
||||
|
||||
transactionCount += transactionsStarted[0] + transactionsStarted[1];
|
||||
transactionBudget = std::max(std::min(nTransactionsToStart - transactionsStarted[0] - transactionsStarted[1], SERVER_KNOBS->START_TRANSACTION_MAX_BUDGET_SIZE), -SERVER_KNOBS->START_TRANSACTION_MAX_BUDGET_SIZE);
|
||||
if (debugID.present())
|
||||
|
||||
if (debugID.present()) {
|
||||
g_traceBatch.addEvent("TransactionDebug", debugID.get().first(), "MasterProxyServer.masterProxyServerCore.Broadcast");
|
||||
for (int i = 0; i<start.size(); i++) {
|
||||
}
|
||||
|
||||
for (int i = 0; i < start.size(); i++) {
|
||||
if (start[i].size()) {
|
||||
addActor.send(broadcast(getLiveCommittedVersion(commitData, i, &otherProxies, debugID, transactionsStarted[i], systemTransactionsStarted[i], defaultPriTransactionsStarted[i], batchPriTransactionsStarted[i]), start[i]));
|
||||
Future<GetReadVersionReply> readVersionReply = getLiveCommittedVersion(commitData, i, &otherProxies, debugID, transactionsStarted[i], systemTransactionsStarted[i], defaultPriTransactionsStarted[i], batchPriTransactionsStarted[i]);
|
||||
addActor.send(sendGrvReplies(readVersionReply, start[i], &commitData->stats));
|
||||
|
||||
// for now, base dynamic batching on the time for normal requests (not read_risky)
|
||||
if (i == 0) {
|
||||
addActor.send(timeReply(readVersionReply, replyTimes));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1383,7 +1424,7 @@ ACTOR Future<Void> masterProxyServerCore(
|
|||
TraceEvent(SevInfo, "CommitBatchesMemoryLimit").detail("BytesLimit", commitBatchesMemoryLimit);
|
||||
|
||||
addActor.send(monitorRemoteCommitted(&commitData, db));
|
||||
addActor.send(transactionStarter(proxy, master, db, addActor, &commitData));
|
||||
addActor.send(transactionStarter(proxy, db, addActor, &commitData));
|
||||
addActor.send(readRequestServer(proxy, &commitData));
|
||||
|
||||
// wait for txnStateStore recovery
|
||||
|
@ -1405,6 +1446,34 @@ ACTOR Future<Void> masterProxyServerCore(
|
|||
}
|
||||
commitData.logSystem->pop(commitData.lastTxsPop, txsTag, 0, tagLocalityRemoteLog);
|
||||
}
|
||||
|
||||
Optional<LatencyBandConfig> newLatencyBandConfig = db->get().latencyBandConfig;
|
||||
|
||||
if(newLatencyBandConfig.present() != commitData.latencyBandConfig.present()
|
||||
|| (newLatencyBandConfig.present() && newLatencyBandConfig.get().grvConfig != commitData.latencyBandConfig.get().grvConfig))
|
||||
{
|
||||
TraceEvent("LatencyBandGrvUpdatingConfig").detail("Present", newLatencyBandConfig.present());
|
||||
commitData.stats.grvLatencyBands.clearBands();
|
||||
if(newLatencyBandConfig.present()) {
|
||||
for(auto band : newLatencyBandConfig.get().grvConfig.bands) {
|
||||
commitData.stats.grvLatencyBands.addThreshold(band);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(newLatencyBandConfig.present() != commitData.latencyBandConfig.present()
|
||||
|| (newLatencyBandConfig.present() && newLatencyBandConfig.get().commitConfig != commitData.latencyBandConfig.get().commitConfig))
|
||||
{
|
||||
TraceEvent("LatencyBandCommitUpdatingConfig").detail("Present", newLatencyBandConfig.present());
|
||||
commitData.stats.commitLatencyBands.clearBands();
|
||||
if(newLatencyBandConfig.present()) {
|
||||
for(auto band : newLatencyBandConfig.get().commitConfig.bands) {
|
||||
commitData.stats.commitLatencyBands.addThreshold(band);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
commitData.latencyBandConfig = newLatencyBandConfig;
|
||||
}
|
||||
when(wait(onError)) {}
|
||||
when(std::pair<vector<CommitTransactionRequest>, int> batchedRequests = waitNext(batchedCommits.getFuture())) {
|
||||
|
|
|
@ -387,11 +387,11 @@ ACTOR Future<Void> startMoveKeys( Database occ, KeyRange keys, vector<UID> serve
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> waitForShardReady( StorageServerInterface server, KeyRange keys, Version minVersion, Version recoveryVersion, GetShardStateRequest::waitMode mode){
|
||||
ACTOR Future<Void> waitForShardReady( StorageServerInterface server, KeyRange keys, Version minVersion, GetShardStateRequest::waitMode mode ) {
|
||||
loop {
|
||||
try {
|
||||
std::pair<Version,Version> rep = wait( server.getShardState.getReply( GetShardStateRequest(keys, mode), TaskMoveKeys ) );
|
||||
if (rep.first >= minVersion && (recoveryVersion == invalidVersion || rep.second >= recoveryVersion)) {
|
||||
if (rep.first >= minVersion) {
|
||||
return Void();
|
||||
}
|
||||
wait( delayJittered( SERVER_KNOBS->SHARD_READY_DELAY, TaskMoveKeys ) );
|
||||
|
@ -431,7 +431,7 @@ ACTOR Future<Void> checkFetchingState( Database cx, vector<UID> dest, KeyRange k
|
|||
}
|
||||
auto si = decodeServerListValue(serverListValues[s].get());
|
||||
ASSERT( si.id() == dest[s] );
|
||||
requests.push_back( waitForShardReady( si, keys, tr.getReadVersion().get(), invalidVersion, GetShardStateRequest::FETCHING ) );
|
||||
requests.push_back( waitForShardReady( si, keys, tr.getReadVersion().get(), GetShardStateRequest::FETCHING ) );
|
||||
}
|
||||
|
||||
wait( timeoutError( waitForAll( requests ),
|
||||
|
@ -452,7 +452,7 @@ ACTOR Future<Void> checkFetchingState( Database cx, vector<UID> dest, KeyRange k
|
|||
// keyServers[k].dest must be the same for all k in keys
|
||||
// Set serverKeys[dest][keys] = true; serverKeys[src][keys] = false for all src not in dest
|
||||
// Should be cancelled and restarted if keyServers[keys].dest changes (?so this is no longer true?)
|
||||
ACTOR Future<Void> finishMoveKeys( Database occ, KeyRange keys, vector<UID> destinationTeam, MoveKeysLock lock, FlowLock *finishMoveKeysParallelismLock, Version recoveryVersion, bool hasRemote, UID relocationIntervalId )
|
||||
ACTOR Future<Void> finishMoveKeys( Database occ, KeyRange keys, vector<UID> destinationTeam, MoveKeysLock lock, FlowLock *finishMoveKeysParallelismLock, bool hasRemote, UID relocationIntervalId )
|
||||
{
|
||||
state TraceInterval interval("RelocateShard_FinishMoveKeys");
|
||||
state TraceInterval waitInterval("");
|
||||
|
@ -626,7 +626,7 @@ ACTOR Future<Void> finishMoveKeys( Database occ, KeyRange keys, vector<UID> dest
|
|||
}
|
||||
|
||||
for(int s=0; s<storageServerInterfaces.size(); s++)
|
||||
serverReady.push_back( waitForShardReady( storageServerInterfaces[s], keys, tr.getReadVersion().get(), recoveryVersion, GetShardStateRequest::READABLE) );
|
||||
serverReady.push_back( waitForShardReady( storageServerInterfaces[s], keys, tr.getReadVersion().get(), GetShardStateRequest::READABLE) );
|
||||
wait( timeout( waitForAll( serverReady ), SERVER_KNOBS->SERVER_READY_QUORUM_TIMEOUT, Void(), TaskMoveKeys ) );
|
||||
int count = dest.size() - newDestinations.size();
|
||||
for(int s=0; s<serverReady.size(); s++)
|
||||
|
@ -881,7 +881,6 @@ ACTOR Future<Void> moveKeys(
|
|||
Promise<Void> dataMovementComplete,
|
||||
FlowLock *startMoveKeysParallelismLock,
|
||||
FlowLock *finishMoveKeysParallelismLock,
|
||||
Version recoveryVersion,
|
||||
bool hasRemote,
|
||||
UID relocationIntervalId)
|
||||
{
|
||||
|
@ -891,7 +890,7 @@ ACTOR Future<Void> moveKeys(
|
|||
|
||||
state Future<Void> completionSignaller = checkFetchingState( cx, healthyDestinations, keys, dataMovementComplete, relocationIntervalId );
|
||||
|
||||
wait( finishMoveKeys( cx, keys, destinationTeam, lock, finishMoveKeysParallelismLock, recoveryVersion, hasRemote, relocationIntervalId ) );
|
||||
wait( finishMoveKeys( cx, keys, destinationTeam, lock, finishMoveKeysParallelismLock, hasRemote, relocationIntervalId ) );
|
||||
|
||||
//This is defensive, but make sure that we always say that the movement is complete before moveKeys completes
|
||||
completionSignaller.cancel();
|
||||
|
|
|
@ -59,7 +59,6 @@ Future<Void> moveKeys(
|
|||
Promise<Void> const& dataMovementComplete,
|
||||
FlowLock* const& startMoveKeysParallelismLock,
|
||||
FlowLock* const& finishMoveKeysParallelismLock,
|
||||
Version const& recoveryVersion,
|
||||
bool const& hasRemote,
|
||||
UID const& relocationIntervalId); // for logging only
|
||||
// Eventually moves the given keys to the given destination team
|
||||
|
|
|
@ -64,32 +64,56 @@ ACTOR Future<WorkerInterface> getMasterWorker( Database cx, Reference<AsyncVar<S
|
|||
}
|
||||
}
|
||||
|
||||
//Gets the number of bytes in flight from the master
|
||||
ACTOR Future<int64_t> getDataInFlight( Database cx, WorkerInterface masterWorker ) {
|
||||
// Gets the WorkerInterface representing the data distributor.
|
||||
ACTOR Future<WorkerInterface> getDataDistributorWorker( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo ) {
|
||||
TraceEvent("GetDataDistributorWorker").detail("Stage", "GettingWorkers");
|
||||
|
||||
loop {
|
||||
state vector<std::pair<WorkerInterface, ProcessClass>> workers = wait( getWorkers( dbInfo ) );
|
||||
if (!dbInfo->get().distributor.present()) continue;
|
||||
|
||||
for( int i = 0; i < workers.size(); i++ ) {
|
||||
if( workers[i].first.address() == dbInfo->get().distributor.get().address() ) {
|
||||
TraceEvent("GetDataDistributorWorker").detail("Stage", "GotWorkers")
|
||||
.detail("DataDistributorId", dbInfo->get().distributor.get().id())
|
||||
.detail("WorkerId", workers[i].first.id());
|
||||
return workers[i].first;
|
||||
}
|
||||
}
|
||||
|
||||
TraceEvent(SevWarn, "GetDataDistributorWorker")
|
||||
.detail("Error", "DataDistributorWorkerNotFound")
|
||||
.detail("DataDistributorId", dbInfo->get().distributor.get().id())
|
||||
.detail("DataDistributorAddress", dbInfo->get().distributor.get().address())
|
||||
.detail("WorkerCount", workers.size());
|
||||
}
|
||||
}
|
||||
|
||||
// Gets the number of bytes in flight from the data distributor.
|
||||
ACTOR Future<int64_t> getDataInFlight( Database cx, WorkerInterface distributorWorker ) {
|
||||
try {
|
||||
TraceEvent("DataInFlight").detail("Stage", "ContactingMaster");
|
||||
TraceEventFields md = wait( timeoutError(masterWorker.eventLogRequest.getReply(
|
||||
TraceEvent("DataInFlight").detail("Stage", "ContactingDataDistributor");
|
||||
TraceEventFields md = wait( timeoutError(distributorWorker.eventLogRequest.getReply(
|
||||
EventLogRequest( LiteralStringRef("TotalDataInFlight") ) ), 1.0 ) );
|
||||
int64_t dataInFlight;
|
||||
sscanf(md.getValue("TotalBytes").c_str(), "%lld", &dataInFlight);
|
||||
return dataInFlight;
|
||||
} catch( Error &e ) {
|
||||
TraceEvent("QuietDatabaseFailure", masterWorker.id()).error(e).detail("Reason", "Failed to extract DataInFlight");
|
||||
TraceEvent("QuietDatabaseFailure", distributorWorker.id()).error(e).detail("Reason", "Failed to extract DataInFlight");
|
||||
throw;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//Gets the number of bytes in flight from the master
|
||||
//Convenience method that first finds the master worker from a zookeeper interface
|
||||
// Gets the number of bytes in flight from the data distributor.
|
||||
ACTOR Future<int64_t> getDataInFlight( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo ) {
|
||||
WorkerInterface masterWorker = wait(getMasterWorker(cx, dbInfo));
|
||||
int64_t dataInFlight = wait(getDataInFlight(cx, masterWorker));
|
||||
WorkerInterface distributorInterf = wait( getDataDistributorWorker(cx, dbInfo) );
|
||||
int64_t dataInFlight = wait(getDataInFlight(cx, distributorInterf));
|
||||
return dataInFlight;
|
||||
}
|
||||
|
||||
//Computes the queue size for storage servers and tlogs using the bytesInput and bytesDurable attributes
|
||||
int64_t getQueueSize( TraceEventFields md ) {
|
||||
int64_t getQueueSize( const TraceEventFields& md ) {
|
||||
double inputRate, durableRate;
|
||||
double inputRoughness, durableRoughness;
|
||||
int64_t inputBytes, durableBytes;
|
||||
|
@ -101,7 +125,7 @@ int64_t getQueueSize( TraceEventFields md ) {
|
|||
}
|
||||
|
||||
// This is not robust in the face of a TLog failure
|
||||
ACTOR Future<int64_t> getMaxTLogQueueSize( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo, WorkerInterface masterWorker ) {
|
||||
ACTOR Future<int64_t> getMaxTLogQueueSize( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo ) {
|
||||
TraceEvent("MaxTLogQueueSize").detail("Stage", "ContactingLogs");
|
||||
|
||||
state std::vector<std::pair<WorkerInterface, ProcessClass>> workers = wait(getWorkers(dbInfo));
|
||||
|
@ -139,12 +163,6 @@ ACTOR Future<int64_t> getMaxTLogQueueSize( Database cx, Reference<AsyncVar<Serve
|
|||
return maxQueueSize;
|
||||
}
|
||||
|
||||
ACTOR Future<int64_t> getMaxTLogQueueSize( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo ) {
|
||||
WorkerInterface masterWorker = wait(getMasterWorker(cx, dbInfo));
|
||||
int64_t maxQueueSize = wait(getMaxTLogQueueSize(cx, dbInfo, masterWorker));
|
||||
return maxQueueSize;
|
||||
}
|
||||
|
||||
ACTOR Future<vector<StorageServerInterface>> getStorageServers( Database cx, bool use_system_priority = false) {
|
||||
state Transaction tr( cx );
|
||||
if (use_system_priority)
|
||||
|
@ -167,7 +185,7 @@ ACTOR Future<vector<StorageServerInterface>> getStorageServers( Database cx, boo
|
|||
}
|
||||
|
||||
//Gets the maximum size of all the storage server queues
|
||||
ACTOR Future<int64_t> getMaxStorageServerQueueSize( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo, WorkerInterface masterWorker ) {
|
||||
ACTOR Future<int64_t> getMaxStorageServerQueueSize( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo ) {
|
||||
TraceEvent("MaxStorageServerQueueSize").detail("Stage", "ContactingStorageServers");
|
||||
|
||||
Future<std::vector<StorageServerInterface>> serversFuture = getStorageServers(cx);
|
||||
|
@ -202,7 +220,7 @@ ACTOR Future<int64_t> getMaxStorageServerQueueSize( Database cx, Reference<Async
|
|||
try {
|
||||
maxQueueSize = std::max( maxQueueSize, getQueueSize( messages[i].get() ) );
|
||||
} catch( Error &e ) {
|
||||
TraceEvent("QuietDatabaseFailure", masterWorker.id()).detail("Reason", "Failed to extract MaxStorageServerQueue").detail("SS", servers[i].id());
|
||||
TraceEvent("QuietDatabaseFailure").detail("Reason", "Failed to extract MaxStorageServerQueue").detail("SS", servers[i].id());
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
@ -210,20 +228,12 @@ ACTOR Future<int64_t> getMaxStorageServerQueueSize( Database cx, Reference<Async
|
|||
return maxQueueSize;
|
||||
}
|
||||
|
||||
//Gets the maximum size of all the storage server queues
|
||||
//Convenience method that first gets the master worker and system map from a zookeeper interface
|
||||
ACTOR Future<int64_t> getMaxStorageServerQueueSize( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo ) {
|
||||
WorkerInterface masterWorker = wait(getMasterWorker(cx, dbInfo));
|
||||
int64_t maxQueueSize = wait(getMaxStorageServerQueueSize(cx, dbInfo, masterWorker));
|
||||
return maxQueueSize;
|
||||
}
|
||||
|
||||
//Gets the size of the data distribution queue. If reportInFlight is true, then data in flight is considered part of the queue
|
||||
ACTOR Future<int64_t> getDataDistributionQueueSize( Database cx, WorkerInterface masterWorker, bool reportInFlight) {
|
||||
ACTOR Future<int64_t> getDataDistributionQueueSize( Database cx, WorkerInterface distributorWorker, bool reportInFlight) {
|
||||
try {
|
||||
TraceEvent("DataDistributionQueueSize").detail("Stage", "ContactingMaster");
|
||||
TraceEvent("DataDistributionQueueSize").detail("Stage", "ContactingDataDistributor");
|
||||
|
||||
TraceEventFields movingDataMessage = wait( timeoutError(masterWorker.eventLogRequest.getReply(
|
||||
TraceEventFields movingDataMessage = wait( timeoutError(distributorWorker.eventLogRequest.getReply(
|
||||
EventLogRequest( LiteralStringRef("MovingData") ) ), 1.0 ) );
|
||||
|
||||
TraceEvent("DataDistributionQueueSize").detail("Stage", "GotString");
|
||||
|
@ -239,7 +249,7 @@ ACTOR Future<int64_t> getDataDistributionQueueSize( Database cx, WorkerInterface
|
|||
|
||||
return inQueue;
|
||||
} catch( Error &e ) {
|
||||
TraceEvent("QuietDatabaseFailure", masterWorker.id()).detail("Reason", "Failed to extract DataDistributionQueueSize");
|
||||
TraceEvent("QuietDatabaseFailure", distributorWorker.id()).detail("Reason", "Failed to extract DataDistributionQueueSize");
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
@ -247,37 +257,39 @@ ACTOR Future<int64_t> getDataDistributionQueueSize( Database cx, WorkerInterface
|
|||
//Gets the size of the data distribution queue. If reportInFlight is true, then data in flight is considered part of the queue
|
||||
//Convenience method that first finds the master worker from a zookeeper interface
|
||||
ACTOR Future<int64_t> getDataDistributionQueueSize( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo, bool reportInFlight ) {
|
||||
WorkerInterface masterWorker = wait(getMasterWorker(cx, dbInfo));
|
||||
int64_t inQueue = wait(getDataDistributionQueueSize( cx, masterWorker, reportInFlight));
|
||||
WorkerInterface distributorInterf = wait( getDataDistributorWorker(cx, dbInfo) );
|
||||
int64_t inQueue = wait( getDataDistributionQueueSize( cx, distributorInterf, reportInFlight) );
|
||||
return inQueue;
|
||||
}
|
||||
|
||||
//Checks that data distribution is active
|
||||
ACTOR Future<bool> getDataDistributionActive( Database cx, WorkerInterface masterWorker ) {
|
||||
// Checks that data distribution is active
|
||||
ACTOR Future<bool> getDataDistributionActive( Database cx, WorkerInterface distributorWorker ) {
|
||||
try {
|
||||
TraceEvent("DataDistributionActive").detail("Stage", "ContactingMaster");
|
||||
TraceEvent("DataDistributionActive").detail("Stage", "ContactingDataDistributor");
|
||||
|
||||
TraceEventFields activeMessage = wait( timeoutError(masterWorker.eventLogRequest.getReply(
|
||||
TraceEventFields activeMessage = wait( timeoutError(distributorWorker.eventLogRequest.getReply(
|
||||
EventLogRequest( LiteralStringRef("DDTrackerStarting") ) ), 1.0 ) );
|
||||
|
||||
return activeMessage.getValue("State") == "Active";
|
||||
} catch( Error &e ) {
|
||||
TraceEvent("QuietDatabaseFailure", masterWorker.id()).detail("Reason", "Failed to extract DataDistributionActive");
|
||||
TraceEvent("QuietDatabaseFailure", distributorWorker.id()).detail("Reason", "Failed to extract DataDistributionActive");
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
//Checks to see if any storage servers are being recruited
|
||||
ACTOR Future<bool> getStorageServersRecruiting( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo, WorkerInterface masterWorker ) {
|
||||
// Checks to see if any storage servers are being recruited
|
||||
ACTOR Future<bool> getStorageServersRecruiting( Database cx, WorkerInterface distributorWorker, UID distributorUID ) {
|
||||
try {
|
||||
TraceEvent("StorageServersRecruiting").detail("Stage", "ContactingMaster");
|
||||
|
||||
TraceEventFields recruitingMessage = wait( timeoutError(masterWorker.eventLogRequest.getReply(
|
||||
EventLogRequest( StringRef( "StorageServerRecruitment_" + dbInfo->get().master.id().toString()) ) ), 1.0 ) );
|
||||
TraceEvent("StorageServersRecruiting").detail("Stage", "ContactingDataDistributor");
|
||||
TraceEventFields recruitingMessage = wait( timeoutError(distributorWorker.eventLogRequest.getReply(
|
||||
EventLogRequest( StringRef( "StorageServerRecruitment_" + distributorUID.toString()) ) ), 1.0 ) );
|
||||
|
||||
TraceEvent("StorageServersRecruiting").detail("Message", recruitingMessage.toString());
|
||||
return recruitingMessage.getValue("State") == "Recruiting";
|
||||
} catch( Error &e ) {
|
||||
TraceEvent("QuietDatabaseFailure", masterWorker.id()).detail("Reason", "Failed to extract StorageServersRecruiting").detail("MasterID", dbInfo->get().master.id());
|
||||
TraceEvent("QuietDatabaseFailure", distributorWorker.id())
|
||||
.detail("Reason", "Failed to extract StorageServersRecruiting")
|
||||
.detail("DataDistributorID", distributorUID);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
@ -323,16 +335,17 @@ ACTOR Future<Void> waitForQuietDatabase( Database cx, Reference<AsyncVar<ServerD
|
|||
|
||||
loop {
|
||||
try {
|
||||
TraceEvent("QuietDatabaseWaitingOnMaster");
|
||||
WorkerInterface masterWorker = wait(getMasterWorker( cx, dbInfo ));
|
||||
TraceEvent("QuietDatabaseGotMaster");
|
||||
TraceEvent("QuietDatabaseWaitingOnDataDistributor");
|
||||
WorkerInterface distributorWorker = wait( getDataDistributorWorker( cx, dbInfo ) );
|
||||
UID distributorUID = dbInfo->get().distributor.get().id();
|
||||
TraceEvent("QuietDatabaseGotDataDistributor", distributorUID).detail("Locality", distributorWorker.locality.toString());
|
||||
|
||||
state Future<int64_t> dataInFlight = getDataInFlight( cx, masterWorker);
|
||||
state Future<int64_t> tLogQueueSize = getMaxTLogQueueSize( cx, dbInfo, masterWorker );
|
||||
state Future<int64_t> dataDistributionQueueSize = getDataDistributionQueueSize( cx, masterWorker, dataInFlightGate == 0);
|
||||
state Future<int64_t> storageQueueSize = getMaxStorageServerQueueSize( cx, dbInfo, masterWorker );
|
||||
state Future<bool> dataDistributionActive = getDataDistributionActive( cx, masterWorker );
|
||||
state Future<bool> storageServersRecruiting = getStorageServersRecruiting ( cx, dbInfo, masterWorker );
|
||||
state Future<int64_t> dataInFlight = getDataInFlight( cx, distributorWorker);
|
||||
state Future<int64_t> tLogQueueSize = getMaxTLogQueueSize( cx, dbInfo );
|
||||
state Future<int64_t> dataDistributionQueueSize = getDataDistributionQueueSize( cx, distributorWorker, dataInFlightGate == 0);
|
||||
state Future<int64_t> storageQueueSize = getMaxStorageServerQueueSize( cx, dbInfo );
|
||||
state Future<bool> dataDistributionActive = getDataDistributionActive( cx, distributorWorker );
|
||||
state Future<bool> storageServersRecruiting = getStorageServersRecruiting ( cx, distributorWorker, distributorUID );
|
||||
|
||||
wait( success( dataInFlight ) && success( tLogQueueSize ) && success( dataDistributionQueueSize )
|
||||
&& success( storageQueueSize ) && success( dataDistributionActive ) && success( storageServersRecruiting ) );
|
||||
|
|
|
@ -23,9 +23,11 @@
|
|||
#pragma once
|
||||
|
||||
#include "fdbserver/ClusterRecruitmentInterface.h"
|
||||
#include "fdbserver/DataDistributorInterface.h"
|
||||
#include "fdbserver/MasterInterface.h"
|
||||
#include "fdbserver/LogSystemConfig.h"
|
||||
#include "fdbserver/RecoveryState.h"
|
||||
#include "fdbserver/LatencyBandConfig.h"
|
||||
|
||||
struct ServerDBInfo {
|
||||
// This structure contains transient information which is broadcast to all workers for a database,
|
||||
|
@ -35,6 +37,7 @@ struct ServerDBInfo {
|
|||
UID id; // Changes each time any other member changes
|
||||
ClusterControllerFullInterface clusterInterface;
|
||||
ClientDBInfo client; // After a successful recovery, eventually proxies that communicate with it
|
||||
Optional<DataDistributorInterface> distributor; // The best guess of current data distributor.
|
||||
MasterInterface master; // The best guess as to the most recent master, which might still be recovering
|
||||
vector<ResolverInterface> resolvers;
|
||||
DBRecoveryCount recoveryCount; // A recovery count from DBCoreState. A successful master recovery increments it twice; unsuccessful recoveries may increment it once. Depending on where the current master is in its recovery process, this might not have been written by the current master.
|
||||
|
@ -43,6 +46,7 @@ struct ServerDBInfo {
|
|||
LocalityData myLocality; // (Not serialized) Locality information, if available, for the *local* process
|
||||
LogSystemConfig logSystemConfig;
|
||||
std::vector<UID> priorCommittedLogServers; // If !fullyRecovered and logSystemConfig refers to a new log system which may not have been committed to the coordinated state yet, then priorCommittedLogServers are the previous, fully committed generation which need to stay alive in case this recovery fails
|
||||
Optional<LatencyBandConfig> latencyBandConfig;
|
||||
|
||||
explicit ServerDBInfo() : recoveryCount(0), recoveryState(RecoveryState::UNINITIALIZED) {}
|
||||
|
||||
|
@ -51,7 +55,7 @@ struct ServerDBInfo {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
serializer(ar, id, clusterInterface, client, master, resolvers, recoveryCount, masterLifetime, logSystemConfig, priorCommittedLogServers, recoveryState);
|
||||
serializer(ar, id, clusterInterface, client, distributor, master, resolvers, recoveryCount, recoveryState, masterLifetime, logSystemConfig, priorCommittedLogServers, latencyBandConfig);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -80,7 +80,8 @@ extern int limitReasonEnd;
|
|||
extern const char* limitReasonName[];
|
||||
extern const char* limitReasonDesc[];
|
||||
|
||||
struct WorkerEvents : std::map<NetworkAddress, TraceEventFields> {};
|
||||
struct WorkerEvents : std::map<NetworkAddress, TraceEventFields> {};
|
||||
typedef std::map<std::string, TraceEventFields> EventMap;
|
||||
|
||||
ACTOR static Future< Optional<TraceEventFields> > latestEventOnWorker(WorkerInterface worker, std::string eventName) {
|
||||
try {
|
||||
|
@ -160,39 +161,52 @@ static Optional<std::pair<WorkerInterface, ProcessClass>> getWorker(std::map<Net
|
|||
}
|
||||
|
||||
class StatusCounter {
|
||||
public:
|
||||
StatusCounter(double hz=0.0, double roughness=0.0, int64_t counter=0) : _hz(hz), _roughness(roughness), _counter(counter) {}
|
||||
StatusCounter(const std::string& parsableText) {
|
||||
parseText(parsableText);
|
||||
}
|
||||
public:
|
||||
StatusCounter() : hz(0), roughness(0), counter(0) {}
|
||||
StatusCounter(double hz, double roughness, int64_t counter) : hz(hz), roughness(roughness), counter(counter) {}
|
||||
StatusCounter(const std::string& parsableText) {
|
||||
parseText(parsableText);
|
||||
}
|
||||
|
||||
StatusCounter& parseText(const std::string& parsableText) {
|
||||
sscanf(parsableText.c_str(), "%lf %lf %lld", &_hz, &_roughness, &_counter);
|
||||
return *this;
|
||||
}
|
||||
StatusCounter& parseText(const std::string& parsableText) {
|
||||
sscanf(parsableText.c_str(), "%lf %lf %lld", &hz, &roughness, &counter);
|
||||
return *this;
|
||||
}
|
||||
|
||||
StatusCounter& updateValues(const StatusCounter& statusCounter) {
|
||||
double hzNew = _hz + statusCounter._hz;
|
||||
double roughnessNew = (_hz + statusCounter._hz) ? (_roughness*_hz + statusCounter._roughness*statusCounter._hz) / (_hz + statusCounter._hz) : 0.0;
|
||||
int64_t counterNew = _counter + statusCounter._counter;
|
||||
_hz = hzNew;
|
||||
_roughness = roughnessNew;
|
||||
_counter = counterNew;
|
||||
return *this;
|
||||
}
|
||||
StatusCounter& updateValues(const StatusCounter& statusCounter) {
|
||||
double hzNew = hz + statusCounter.hz;
|
||||
double roughnessNew = (hz + statusCounter.hz) ? (roughness*hz + statusCounter.roughness*statusCounter.hz) / (hz + statusCounter.hz) : 0.0;
|
||||
int64_t counterNew = counter + statusCounter.counter;
|
||||
hz = hzNew;
|
||||
roughness = roughnessNew;
|
||||
counter = counterNew;
|
||||
return *this;
|
||||
}
|
||||
|
||||
JsonBuilderObject getStatus() const {
|
||||
JsonBuilderObject statusObject;
|
||||
statusObject["hz"] = _hz;
|
||||
statusObject["roughness"] = _roughness;
|
||||
statusObject["counter"] = _counter;
|
||||
return statusObject;
|
||||
}
|
||||
JsonBuilderObject getStatus() const {
|
||||
JsonBuilderObject statusObject;
|
||||
statusObject["hz"] = hz;
|
||||
statusObject["roughness"] = roughness;
|
||||
statusObject["counter"] = counter;
|
||||
return statusObject;
|
||||
}
|
||||
|
||||
protected:
|
||||
double _hz;
|
||||
double _roughness;
|
||||
int64_t _counter;
|
||||
double getHz() {
|
||||
return hz;
|
||||
}
|
||||
|
||||
double getRoughness() {
|
||||
return roughness;
|
||||
}
|
||||
|
||||
int64_t getCounter() {
|
||||
return counter;
|
||||
}
|
||||
|
||||
protected:
|
||||
double hz;
|
||||
double roughness;
|
||||
int64_t counter;
|
||||
};
|
||||
|
||||
static double parseDouble(std::string const& s, bool permissive = false) {
|
||||
|
@ -290,7 +304,7 @@ static JsonBuilderObject machineStatusFetcher(WorkerEvents mMetrics, vector<std:
|
|||
std::map<std::string, int32_t> workerContribMap;
|
||||
std::map<std::string, JsonBuilderObject> machineJsonMap;
|
||||
|
||||
for (auto worker : workers){
|
||||
for (auto const& worker : workers){
|
||||
locality[worker.first.address()] = worker.first.locality;
|
||||
if (worker.first.locality.dcId().present())
|
||||
dcIds[worker.first.address()] = worker.first.locality.dcId().get().printable();
|
||||
|
@ -394,40 +408,65 @@ struct MachineMemoryInfo {
|
|||
|
||||
struct RolesInfo {
|
||||
std::multimap<NetworkAddress, JsonBuilderObject> roles;
|
||||
|
||||
JsonBuilderObject addLatencyBandInfo(TraceEventFields const& metrics) {
|
||||
JsonBuilderObject latency;
|
||||
std::map<std::string, JsonBuilderObject> bands;
|
||||
|
||||
for(auto itr = metrics.begin(); itr != metrics.end(); ++itr) {
|
||||
std::string band;
|
||||
if(itr->first.substr(0, 4) == "Band") {
|
||||
band = itr->first.substr(4);
|
||||
}
|
||||
else if(itr->first == "Filtered") {
|
||||
band = "filtered";
|
||||
}
|
||||
else {
|
||||
continue;
|
||||
}
|
||||
|
||||
latency[band] = StatusCounter(itr->second).getCounter();
|
||||
}
|
||||
|
||||
return latency;
|
||||
}
|
||||
|
||||
JsonBuilderObject& addRole( NetworkAddress address, std::string const& role, UID id) {
|
||||
JsonBuilderObject obj;
|
||||
obj["id"] = id.shortString();
|
||||
obj["role"] = role;
|
||||
return roles.insert( std::make_pair(address, obj ))->second;
|
||||
}
|
||||
JsonBuilderObject& addRole(std::string const& role, StorageServerInterface& iface, TraceEventFields const& metrics, Version maxTLogVersion, double* pDataLagSeconds) {
|
||||
JsonBuilderObject& addRole(std::string const& role, StorageServerInterface& iface, EventMap const& metrics, Version maxTLogVersion, double* pDataLagSeconds) {
|
||||
JsonBuilderObject obj;
|
||||
double dataLagSeconds = -1.0;
|
||||
obj["id"] = iface.id().shortString();
|
||||
obj["role"] = role;
|
||||
try {
|
||||
obj.setKeyRawNumber("stored_bytes", metrics.getValue("BytesStored"));
|
||||
obj.setKeyRawNumber("kvstore_used_bytes", metrics.getValue("KvstoreBytesUsed"));
|
||||
obj.setKeyRawNumber("kvstore_free_bytes", metrics.getValue("KvstoreBytesFree"));
|
||||
obj.setKeyRawNumber("kvstore_available_bytes", metrics.getValue("KvstoreBytesAvailable"));
|
||||
obj.setKeyRawNumber("kvstore_total_bytes", metrics.getValue("KvstoreBytesTotal"));
|
||||
obj["input_bytes"] = StatusCounter(metrics.getValue("BytesInput")).getStatus();
|
||||
obj["durable_bytes"] = StatusCounter(metrics.getValue("BytesDurable")).getStatus();
|
||||
obj.setKeyRawNumber("query_queue_max", metrics.getValue("QueryQueueMax"));
|
||||
obj["total_queries"] = StatusCounter(metrics.getValue("QueryQueue")).getStatus();
|
||||
obj["finished_queries"] = StatusCounter(metrics.getValue("FinishedQueries")).getStatus();
|
||||
obj["bytes_queried"] = StatusCounter(metrics.getValue("BytesQueried")).getStatus();
|
||||
obj["keys_queried"] = StatusCounter(metrics.getValue("RowsQueried")).getStatus();
|
||||
obj["mutation_bytes"] = StatusCounter(metrics.getValue("MutationBytes")).getStatus();
|
||||
obj["mutations"] = StatusCounter(metrics.getValue("Mutations")).getStatus();
|
||||
TraceEventFields const& storageMetrics = metrics.at("StorageMetrics");
|
||||
|
||||
Version version = parseInt64(metrics.getValue("Version"));
|
||||
Version durableVersion = parseInt64(metrics.getValue("DurableVersion"));
|
||||
obj.setKeyRawNumber("stored_bytes", storageMetrics.getValue("BytesStored"));
|
||||
obj.setKeyRawNumber("kvstore_used_bytes", storageMetrics.getValue("KvstoreBytesUsed"));
|
||||
obj.setKeyRawNumber("kvstore_free_bytes", storageMetrics.getValue("KvstoreBytesFree"));
|
||||
obj.setKeyRawNumber("kvstore_available_bytes", storageMetrics.getValue("KvstoreBytesAvailable"));
|
||||
obj.setKeyRawNumber("kvstore_total_bytes", storageMetrics.getValue("KvstoreBytesTotal"));
|
||||
obj["input_bytes"] = StatusCounter(storageMetrics.getValue("BytesInput")).getStatus();
|
||||
obj["durable_bytes"] = StatusCounter(storageMetrics.getValue("BytesDurable")).getStatus();
|
||||
obj.setKeyRawNumber("query_queue_max", storageMetrics.getValue("QueryQueueMax"));
|
||||
obj["total_queries"] = StatusCounter(storageMetrics.getValue("QueryQueue")).getStatus();
|
||||
obj["finished_queries"] = StatusCounter(storageMetrics.getValue("FinishedQueries")).getStatus();
|
||||
obj["bytes_queried"] = StatusCounter(storageMetrics.getValue("BytesQueried")).getStatus();
|
||||
obj["keys_queried"] = StatusCounter(storageMetrics.getValue("RowsQueried")).getStatus();
|
||||
obj["mutation_bytes"] = StatusCounter(storageMetrics.getValue("MutationBytes")).getStatus();
|
||||
obj["mutations"] = StatusCounter(storageMetrics.getValue("Mutations")).getStatus();
|
||||
|
||||
Version version = parseInt64(storageMetrics.getValue("Version"));
|
||||
Version durableVersion = parseInt64(storageMetrics.getValue("DurableVersion"));
|
||||
|
||||
obj["data_version"] = version;
|
||||
obj["durable_version"] = durableVersion;
|
||||
|
||||
int64_t versionLag = parseInt64(metrics.getValue("VersionLag"));
|
||||
int64_t versionLag = parseInt64(storageMetrics.getValue("VersionLag"));
|
||||
if(maxTLogVersion > 0) {
|
||||
// It's possible that the storage server hasn't talked to the logs recently, in which case it may not be aware of how far behind it is.
|
||||
// To account for that, we also compute the version difference between each storage server and the tlog with the largest version.
|
||||
|
@ -437,6 +476,11 @@ struct RolesInfo {
|
|||
versionLag = std::max<int64_t>(versionLag, maxTLogVersion - version - SERVER_KNOBS->STORAGE_LOGGING_DELAY * SERVER_KNOBS->VERSIONS_PER_SECOND);
|
||||
}
|
||||
|
||||
TraceEventFields const& readLatencyMetrics = metrics.at("ReadLatencyMetrics");
|
||||
if(readLatencyMetrics.size()) {
|
||||
obj["read_latency_bands"] = addLatencyBandInfo(readLatencyMetrics);
|
||||
}
|
||||
|
||||
JsonBuilderObject dataLag;
|
||||
dataLag["versions"] = versionLag;
|
||||
dataLagSeconds = versionLag / (double)SERVER_KNOBS->VERSIONS_PER_SECOND;
|
||||
|
@ -453,27 +497,32 @@ struct RolesInfo {
|
|||
if(e.code() != error_code_attribute_not_found)
|
||||
throw e;
|
||||
}
|
||||
if (pDataLagSeconds)
|
||||
|
||||
if (pDataLagSeconds) {
|
||||
*pDataLagSeconds = dataLagSeconds;
|
||||
}
|
||||
|
||||
return roles.insert( std::make_pair(iface.address(), obj ))->second;
|
||||
}
|
||||
JsonBuilderObject& addRole(std::string const& role, TLogInterface& iface, TraceEventFields const& metrics, Version* pMetricVersion) {
|
||||
JsonBuilderObject& addRole(std::string const& role, TLogInterface& iface, EventMap const& metrics, Version* pMetricVersion) {
|
||||
JsonBuilderObject obj;
|
||||
Version metricVersion = 0;
|
||||
obj["id"] = iface.id().shortString();
|
||||
obj["role"] = role;
|
||||
try {
|
||||
obj.setKeyRawNumber("kvstore_used_bytes",metrics.getValue("KvstoreBytesUsed"));
|
||||
obj.setKeyRawNumber("kvstore_free_bytes",metrics.getValue("KvstoreBytesFree"));
|
||||
obj.setKeyRawNumber("kvstore_available_bytes",metrics.getValue("KvstoreBytesAvailable"));
|
||||
obj.setKeyRawNumber("kvstore_total_bytes",metrics.getValue("KvstoreBytesTotal"));
|
||||
obj.setKeyRawNumber("queue_disk_used_bytes",metrics.getValue("QueueDiskBytesUsed"));
|
||||
obj.setKeyRawNumber("queue_disk_free_bytes",metrics.getValue("QueueDiskBytesFree"));
|
||||
obj.setKeyRawNumber("queue_disk_available_bytes",metrics.getValue("QueueDiskBytesAvailable"));
|
||||
obj.setKeyRawNumber("queue_disk_total_bytes",metrics.getValue("QueueDiskBytesTotal"));
|
||||
obj["input_bytes"] = StatusCounter(metrics.getValue("BytesInput")).getStatus();
|
||||
obj["durable_bytes"] = StatusCounter(metrics.getValue("BytesDurable")).getStatus();
|
||||
metricVersion = parseInt64(metrics.getValue("Version"));
|
||||
TraceEventFields const& tlogMetrics = metrics.at("TLogMetrics");
|
||||
|
||||
obj.setKeyRawNumber("kvstore_used_bytes", tlogMetrics.getValue("KvstoreBytesUsed"));
|
||||
obj.setKeyRawNumber("kvstore_free_bytes", tlogMetrics.getValue("KvstoreBytesFree"));
|
||||
obj.setKeyRawNumber("kvstore_available_bytes", tlogMetrics.getValue("KvstoreBytesAvailable"));
|
||||
obj.setKeyRawNumber("kvstore_total_bytes", tlogMetrics.getValue("KvstoreBytesTotal"));
|
||||
obj.setKeyRawNumber("queue_disk_used_bytes", tlogMetrics.getValue("QueueDiskBytesUsed"));
|
||||
obj.setKeyRawNumber("queue_disk_free_bytes", tlogMetrics.getValue("QueueDiskBytesFree"));
|
||||
obj.setKeyRawNumber("queue_disk_available_bytes", tlogMetrics.getValue("QueueDiskBytesAvailable"));
|
||||
obj.setKeyRawNumber("queue_disk_total_bytes", tlogMetrics.getValue("QueueDiskBytesTotal"));
|
||||
obj["input_bytes"] = StatusCounter(tlogMetrics.getValue("BytesInput")).getStatus();
|
||||
obj["durable_bytes"] = StatusCounter(tlogMetrics.getValue("BytesDurable")).getStatus();
|
||||
metricVersion = parseInt64(tlogMetrics.getValue("Version"));
|
||||
obj["data_version"] = metricVersion;
|
||||
} catch (Error& e) {
|
||||
if(e.code() != error_code_attribute_not_found)
|
||||
|
@ -483,6 +532,28 @@ struct RolesInfo {
|
|||
*pMetricVersion = metricVersion;
|
||||
return roles.insert( std::make_pair(iface.address(), obj ))->second;
|
||||
}
|
||||
JsonBuilderObject& addRole(std::string const& role, MasterProxyInterface& iface, EventMap const& metrics) {
|
||||
JsonBuilderObject obj;
|
||||
obj["id"] = iface.id().shortString();
|
||||
obj["role"] = role;
|
||||
try {
|
||||
TraceEventFields const& grvLatencyMetrics = metrics.at("GRVLatencyMetrics");
|
||||
if(grvLatencyMetrics.size()) {
|
||||
obj["grv_latency_bands"] = addLatencyBandInfo(grvLatencyMetrics);
|
||||
}
|
||||
|
||||
TraceEventFields const& commitLatencyMetrics = metrics.at("CommitLatencyMetrics");
|
||||
if(commitLatencyMetrics.size()) {
|
||||
obj["commit_latency_bands"] = addLatencyBandInfo(commitLatencyMetrics);
|
||||
}
|
||||
} catch (Error &e) {
|
||||
if(e.code() != error_code_attribute_not_found) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
return roles.insert( std::make_pair(iface.address(), obj ))->second;
|
||||
}
|
||||
template <class InterfaceType>
|
||||
JsonBuilderObject& addRole(std::string const& role, InterfaceType& iface) {
|
||||
return addRole(iface.address(), role, iface.id());
|
||||
|
@ -507,8 +578,9 @@ ACTOR static Future<JsonBuilderObject> processStatusFetcher(
|
|||
WorkerEvents traceFileOpenErrors,
|
||||
WorkerEvents programStarts,
|
||||
std::map<std::string, JsonBuilderObject> processIssues,
|
||||
vector<std::pair<StorageServerInterface, TraceEventFields>> storageServers,
|
||||
vector<std::pair<TLogInterface, TraceEventFields>> tLogs,
|
||||
vector<std::pair<StorageServerInterface, EventMap>> storageServers,
|
||||
vector<std::pair<TLogInterface, EventMap>> tLogs,
|
||||
vector<std::pair<MasterProxyInterface, EventMap>> proxies,
|
||||
Database cx,
|
||||
Optional<DatabaseConfiguration> configuration,
|
||||
std::set<std::string> *incomplete_reasons) {
|
||||
|
@ -567,16 +639,13 @@ ACTOR static Future<JsonBuilderObject> processStatusFetcher(
|
|||
roles.addRole("master", db->get().master);
|
||||
roles.addRole("cluster_controller", db->get().clusterInterface.clientInterface);
|
||||
|
||||
state Reference<ProxyInfo> proxies = cx->getMasterProxies();
|
||||
if (proxies) {
|
||||
state int proxyIndex;
|
||||
for(proxyIndex = 0; proxyIndex < proxies->size(); proxyIndex++) {
|
||||
roles.addRole( "proxy", proxies->getInterface(proxyIndex) );
|
||||
wait(yield());
|
||||
}
|
||||
state std::vector<std::pair<MasterProxyInterface, EventMap>>::iterator proxy;
|
||||
for(proxy = proxies.begin(); proxy != proxies.end(); ++proxy) {
|
||||
roles.addRole( "proxy", proxy->first, proxy->second );
|
||||
wait(yield());
|
||||
}
|
||||
|
||||
state std::vector<std::pair<TLogInterface, TraceEventFields>>::iterator log;
|
||||
state std::vector<std::pair<TLogInterface, EventMap>>::iterator log;
|
||||
state Version maxTLogVersion = 0;
|
||||
|
||||
// Get largest TLog version
|
||||
|
@ -587,7 +656,7 @@ ACTOR static Future<JsonBuilderObject> processStatusFetcher(
|
|||
wait(yield());
|
||||
}
|
||||
|
||||
state std::vector<std::pair<StorageServerInterface, TraceEventFields>>::iterator ss;
|
||||
state std::vector<std::pair<StorageServerInterface, EventMap>>::iterator ss;
|
||||
state std::map<NetworkAddress, double> ssLag;
|
||||
state double lagSeconds;
|
||||
for(ss = storageServers.begin(); ss != storageServers.end(); ++ss) {
|
||||
|
@ -1217,37 +1286,68 @@ namespace std
|
|||
}
|
||||
|
||||
ACTOR template <class iface>
|
||||
static Future<vector<std::pair<iface, TraceEventFields>>> getServerMetrics(vector<iface> servers, std::unordered_map<NetworkAddress, WorkerInterface> address_workers, std::string suffix) {
|
||||
static Future<vector<std::pair<iface, EventMap>>> getServerMetrics(vector<iface> servers, std::unordered_map<NetworkAddress, WorkerInterface> address_workers, std::vector<std::string> eventNames) {
|
||||
state vector<Future<Optional<TraceEventFields>>> futures;
|
||||
for (auto s : servers) {
|
||||
futures.push_back(latestEventOnWorker(address_workers[s.address()], s.id().toString() + suffix));
|
||||
for (auto name : eventNames) {
|
||||
futures.push_back(latestEventOnWorker(address_workers[s.address()], s.id().toString() + "/" + name));
|
||||
}
|
||||
}
|
||||
|
||||
wait(waitForAll(futures));
|
||||
|
||||
vector<std::pair<iface, TraceEventFields>> results;
|
||||
vector<std::pair<iface, EventMap>> results;
|
||||
auto futureItr = futures.begin();
|
||||
|
||||
for (int i = 0; i < servers.size(); i++) {
|
||||
results.push_back(std::make_pair(servers[i], futures[i].get().present() ? futures[i].get().get() : TraceEventFields()));
|
||||
EventMap serverResults;
|
||||
for (auto name : eventNames) {
|
||||
ASSERT(futureItr != futures.end());
|
||||
serverResults[name] = futureItr->get().present() ? futureItr->get().get() : TraceEventFields();
|
||||
++futureItr;
|
||||
}
|
||||
|
||||
results.push_back(std::make_pair(servers[i], serverResults));
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
ACTOR static Future<vector<std::pair<StorageServerInterface, TraceEventFields>>> getStorageServersAndMetrics(Database cx, std::unordered_map<NetworkAddress, WorkerInterface> address_workers) {
|
||||
ACTOR static Future<vector<std::pair<StorageServerInterface, EventMap>>> getStorageServersAndMetrics(Database cx, std::unordered_map<NetworkAddress, WorkerInterface> address_workers) {
|
||||
vector<StorageServerInterface> servers = wait(timeoutError(getStorageServers(cx, true), 5.0));
|
||||
vector<std::pair<StorageServerInterface, TraceEventFields>> results = wait(getServerMetrics(servers, address_workers, "/StorageMetrics"));
|
||||
vector<std::pair<StorageServerInterface, EventMap>> results = wait(getServerMetrics(servers, address_workers,
|
||||
std::vector<std::string>{ "StorageMetrics", "ReadLatencyMetrics" }));
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
ACTOR static Future<vector<std::pair<TLogInterface, TraceEventFields>>> getTLogsAndMetrics(Reference<AsyncVar<struct ServerDBInfo>> db, std::unordered_map<NetworkAddress, WorkerInterface> address_workers) {
|
||||
ACTOR static Future<vector<std::pair<TLogInterface, EventMap>>> getTLogsAndMetrics(Reference<AsyncVar<struct ServerDBInfo>> db, std::unordered_map<NetworkAddress, WorkerInterface> address_workers) {
|
||||
vector<TLogInterface> servers = db->get().logSystemConfig.allPresentLogs();
|
||||
vector<std::pair<TLogInterface, TraceEventFields>> results = wait(getServerMetrics(servers, address_workers, "/TLogMetrics"));
|
||||
vector<std::pair<TLogInterface, EventMap>> results = wait(getServerMetrics(servers, address_workers,
|
||||
std::vector<std::string>{ "TLogMetrics" }));
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
static int getExtraTLogEligibleMachines(vector<std::pair<WorkerInterface, ProcessClass>> workers, DatabaseConfiguration configuration) {
|
||||
ACTOR static Future<vector<std::pair<MasterProxyInterface, EventMap>>> getProxiesAndMetrics(Database cx, std::unordered_map<NetworkAddress, WorkerInterface> address_workers) {
|
||||
Reference<ProxyInfo> proxyInfo = cx->getMasterProxies();
|
||||
std::vector<MasterProxyInterface> servers;
|
||||
if(proxyInfo) {
|
||||
for(int i = 0; i < proxyInfo->size(); ++i) {
|
||||
servers.push_back(proxyInfo->getInterface(i));
|
||||
}
|
||||
}
|
||||
|
||||
vector<std::pair<MasterProxyInterface, EventMap>> results = wait(getServerMetrics(servers, address_workers,
|
||||
std::vector<std::string>{ "GRVLatencyMetrics", "CommitLatencyMetrics" }));
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
static int getExtraTLogEligibleMachines(const vector<std::pair<WorkerInterface, ProcessClass>>& workers, const DatabaseConfiguration& configuration) {
|
||||
std::set<StringRef> allMachines;
|
||||
std::map<Key,std::set<StringRef>> dcId_machine;
|
||||
for(auto worker : workers) {
|
||||
for(auto const& worker : workers) {
|
||||
if(worker.second.machineClassFitness(ProcessClass::TLog) < ProcessClass::NeverAssign
|
||||
&& !configuration.isExcludedServer(worker.first.address()))
|
||||
{
|
||||
|
@ -1284,7 +1384,7 @@ static int getExtraTLogEligibleMachines(vector<std::pair<WorkerInterface, Proces
|
|||
}
|
||||
|
||||
ACTOR static Future<JsonBuilderObject> workloadStatusFetcher(Reference<AsyncVar<struct ServerDBInfo>> db, vector<std::pair<WorkerInterface, ProcessClass>> workers, std::pair<WorkerInterface, ProcessClass> mWorker,
|
||||
JsonBuilderObject *qos, JsonBuilderObject *data_overlay, std::set<std::string> *incomplete_reasons, Future<ErrorOr<vector<std::pair<StorageServerInterface, TraceEventFields>>>> storageServerFuture)
|
||||
JsonBuilderObject *qos, JsonBuilderObject *data_overlay, std::set<std::string> *incomplete_reasons, Future<ErrorOr<vector<std::pair<StorageServerInterface, EventMap>>>> storageServerFuture)
|
||||
{
|
||||
state JsonBuilderObject statusObj;
|
||||
state JsonBuilderObject operationsObj;
|
||||
|
@ -1295,7 +1395,7 @@ ACTOR static Future<JsonBuilderObject> workloadStatusFetcher(Reference<AsyncVar<
|
|||
try {
|
||||
vector<Future<TraceEventFields>> proxyStatFutures;
|
||||
std::map<NetworkAddress, std::pair<WorkerInterface, ProcessClass>> workersMap;
|
||||
for (auto w : workers) {
|
||||
for (auto const& w : workers) {
|
||||
workersMap[w.first.address()] = w;
|
||||
}
|
||||
for (auto &p : db->get().client.proxies) {
|
||||
|
@ -1387,7 +1487,7 @@ ACTOR static Future<JsonBuilderObject> workloadStatusFetcher(Reference<AsyncVar<
|
|||
|
||||
// Reads
|
||||
try {
|
||||
ErrorOr<vector<std::pair<StorageServerInterface, TraceEventFields>>> storageServers = wait(storageServerFuture);
|
||||
ErrorOr<vector<std::pair<StorageServerInterface, EventMap>>> storageServers = wait(storageServerFuture);
|
||||
if(!storageServers.present()) {
|
||||
throw storageServers.getError();
|
||||
}
|
||||
|
@ -1398,10 +1498,12 @@ ACTOR static Future<JsonBuilderObject> workloadStatusFetcher(Reference<AsyncVar<
|
|||
StatusCounter readBytes;
|
||||
|
||||
for(auto &ss : storageServers.get()) {
|
||||
readRequests.updateValues( StatusCounter(ss.second.getValue("QueryQueue")));
|
||||
reads.updateValues( StatusCounter(ss.second.getValue("FinishedQueries")));
|
||||
readKeys.updateValues( StatusCounter(ss.second.getValue("RowsQueried")));
|
||||
readBytes.updateValues( StatusCounter(ss.second.getValue("BytesQueried")));
|
||||
TraceEventFields const& storageMetrics = ss.second.at("StorageMetrics");
|
||||
|
||||
readRequests.updateValues( StatusCounter(storageMetrics.getValue("QueryQueue")));
|
||||
reads.updateValues( StatusCounter(storageMetrics.getValue("FinishedQueries")));
|
||||
readKeys.updateValues( StatusCounter(storageMetrics.getValue("RowsQueried")));
|
||||
readBytes.updateValues( StatusCounter(storageMetrics.getValue("BytesQueried")));
|
||||
}
|
||||
|
||||
operationsObj["read_requests"] = readRequests.getStatus();
|
||||
|
@ -1778,8 +1880,9 @@ ACTOR Future<StatusReply> clusterGetStatus(
|
|||
}
|
||||
|
||||
state std::map<std::string, JsonBuilderObject> processIssues = getProcessIssuesAsMessages(workerIssues);
|
||||
state vector<std::pair<StorageServerInterface, TraceEventFields>> storageServers;
|
||||
state vector<std::pair<TLogInterface, TraceEventFields>> tLogs;
|
||||
state vector<std::pair<StorageServerInterface, EventMap>> storageServers;
|
||||
state vector<std::pair<TLogInterface, EventMap>> tLogs;
|
||||
state vector<std::pair<MasterProxyInterface, EventMap>> proxies;
|
||||
state JsonBuilderObject qos;
|
||||
state JsonBuilderObject data_overlay;
|
||||
|
||||
|
@ -1814,10 +1917,13 @@ ACTOR Future<StatusReply> clusterGetStatus(
|
|||
// Start getting storage servers now (using system priority) concurrently. Using sys priority because having storage servers
|
||||
// in status output is important to give context to error messages in status that reference a storage server role ID.
|
||||
state std::unordered_map<NetworkAddress, WorkerInterface> address_workers;
|
||||
for (auto worker : workers)
|
||||
for (auto const& worker : workers) {
|
||||
address_workers[worker.first.address()] = worker.first;
|
||||
state Future<ErrorOr<vector<std::pair<StorageServerInterface, TraceEventFields>>>> storageServerFuture = errorOr(getStorageServersAndMetrics(cx, address_workers));
|
||||
state Future<ErrorOr<vector<std::pair<TLogInterface, TraceEventFields>>>> tLogFuture = errorOr(getTLogsAndMetrics(db, address_workers));
|
||||
}
|
||||
|
||||
state Future<ErrorOr<vector<std::pair<StorageServerInterface, EventMap>>>> storageServerFuture = errorOr(getStorageServersAndMetrics(cx, address_workers));
|
||||
state Future<ErrorOr<vector<std::pair<TLogInterface, EventMap>>>> tLogFuture = errorOr(getTLogsAndMetrics(db, address_workers));
|
||||
state Future<ErrorOr<vector<std::pair<MasterProxyInterface, EventMap>>>> proxyFuture = errorOr(getProxiesAndMetrics(cx, address_workers));
|
||||
|
||||
state int minReplicasRemaining = -1;
|
||||
std::vector<Future<JsonBuilderObject>> futures2;
|
||||
|
@ -1870,20 +1976,31 @@ ACTOR Future<StatusReply> clusterGetStatus(
|
|||
}
|
||||
|
||||
// Need storage servers now for processStatusFetcher() below.
|
||||
ErrorOr<vector<std::pair<StorageServerInterface, TraceEventFields>>> _storageServers = wait(storageServerFuture);
|
||||
ErrorOr<vector<std::pair<StorageServerInterface, EventMap>>> _storageServers = wait(storageServerFuture);
|
||||
if (_storageServers.present()) {
|
||||
storageServers = _storageServers.get();
|
||||
}
|
||||
else
|
||||
else {
|
||||
messages.push_back(JsonBuilder::makeMessage("storage_servers_error", "Timed out trying to retrieve storage servers."));
|
||||
}
|
||||
|
||||
// ...also tlogs
|
||||
ErrorOr<vector<std::pair<TLogInterface, TraceEventFields>>> _tLogs = wait(tLogFuture);
|
||||
ErrorOr<vector<std::pair<TLogInterface, EventMap>>> _tLogs = wait(tLogFuture);
|
||||
if (_tLogs.present()) {
|
||||
tLogs = _tLogs.get();
|
||||
}
|
||||
else
|
||||
else {
|
||||
messages.push_back(JsonBuilder::makeMessage("log_servers_error", "Timed out trying to retrieve log servers."));
|
||||
}
|
||||
|
||||
// ...also proxies
|
||||
ErrorOr<vector<std::pair<MasterProxyInterface, EventMap>>> _proxies = wait(proxyFuture);
|
||||
if (_proxies.present()) {
|
||||
proxies = _proxies.get();
|
||||
}
|
||||
else {
|
||||
messages.push_back(JsonBuilder::makeMessage("proxies_error", "Timed out trying to retrieve proxies."));
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Set layers status to { _valid: false, error: "configurationMissing"}
|
||||
|
@ -1893,7 +2010,7 @@ ACTOR Future<StatusReply> clusterGetStatus(
|
|||
statusObj["layers"] = layers;
|
||||
}
|
||||
|
||||
JsonBuilderObject processStatus = wait(processStatusFetcher(db, workers, pMetrics, mMetrics, latestError, traceFileOpenErrors, programStarts, processIssues, storageServers, tLogs, cx, configuration, &status_incomplete_reasons));
|
||||
JsonBuilderObject processStatus = wait(processStatusFetcher(db, workers, pMetrics, mMetrics, latestError, traceFileOpenErrors, programStarts, processIssues, storageServers, tLogs, proxies, cx, configuration, &status_incomplete_reasons));
|
||||
statusObj["processes"] = processStatus;
|
||||
statusObj["clients"] = clientStatusFetcher(clientVersionMap, traceLogGroupMap);
|
||||
|
||||
|
|
|
@ -1909,7 +1909,6 @@ ACTOR Future<Void> updateLogSystem(TLogData* self, Reference<LogData> logData, L
|
|||
|
||||
ACTOR Future<Void> tLogStart( TLogData* self, InitializeTLogRequest req, LocalityData locality ) {
|
||||
state TLogInterface recruited(self->dbgid, locality);
|
||||
recruited.locality = locality;
|
||||
recruited.initEndpoints();
|
||||
|
||||
DUMPTOKEN( recruited.peekMessages );
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#define FDBSERVER_WORKERINTERFACE_H
|
||||
#pragma once
|
||||
|
||||
#include "fdbserver/DataDistributorInterface.h"
|
||||
#include "fdbserver/MasterInterface.h"
|
||||
#include "fdbserver/TLogInterface.h"
|
||||
#include "fdbserver/ResolverInterface.h"
|
||||
|
@ -40,6 +41,7 @@ struct WorkerInterface {
|
|||
RequestStream< struct InitializeTLogRequest > tLog;
|
||||
RequestStream< struct RecruitMasterRequest > master;
|
||||
RequestStream< struct InitializeMasterProxyRequest > masterProxy;
|
||||
RequestStream< struct InitializeDataDistributorRequest > dataDistributor;
|
||||
RequestStream< struct InitializeResolverRequest > resolver;
|
||||
RequestStream< struct InitializeStorageRequest > storage;
|
||||
RequestStream< struct InitializeLogRouterRequest > logRouter;
|
||||
|
@ -58,11 +60,11 @@ struct WorkerInterface {
|
|||
NetworkAddress address() const { return tLog.getEndpoint().getPrimaryAddress(); }
|
||||
|
||||
WorkerInterface() {}
|
||||
WorkerInterface( LocalityData locality ) : locality( locality ) {}
|
||||
WorkerInterface( const LocalityData& locality ) : locality( locality ) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, clientInterface, locality, tLog, master, masterProxy, resolver, storage, logRouter, debugPing, coordinationPing, waitFailure, setMetricsRate, eventLogRequest, traceBatchDumpRequest, testerInterface, diskStoreRequest);
|
||||
serializer(ar, clientInterface, locality, tLog, master, masterProxy, dataDistributor, resolver, storage, logRouter, debugPing, coordinationPing, waitFailure, setMetricsRate, eventLogRequest, traceBatchDumpRequest, testerInterface, diskStoreRequest);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -133,6 +135,16 @@ struct InitializeMasterProxyRequest {
|
|||
}
|
||||
};
|
||||
|
||||
struct InitializeDataDistributorRequest {
|
||||
UID reqId;
|
||||
ReplyPromise<DataDistributorInterface> reply;
|
||||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
serializer(ar, reqId, reply);
|
||||
}
|
||||
};
|
||||
|
||||
struct InitializeResolverRequest {
|
||||
uint64_t recoveryCount;
|
||||
int proxyCount;
|
||||
|
@ -281,6 +293,7 @@ struct Role {
|
|||
static const Role CLUSTER_CONTROLLER;
|
||||
static const Role TESTER;
|
||||
static const Role LOG_ROUTER;
|
||||
static const Role DATA_DISTRIBUTOR;
|
||||
|
||||
std::string roleName;
|
||||
std::string abbreviation;
|
||||
|
@ -330,6 +343,7 @@ Future<Void> tLog( class IKeyValueStore* const& persistentData, class IDiskQueue
|
|||
Future<Void> monitorServerDBInfo( Reference<AsyncVar<Optional<ClusterControllerFullInterface>>> const& ccInterface, Reference<ClusterConnectionFile> const&, LocalityData const&, Reference<AsyncVar<ServerDBInfo>> const& dbInfo );
|
||||
Future<Void> resolver( ResolverInterface const& proxy, InitializeResolverRequest const&, Reference<AsyncVar<ServerDBInfo>> const& db );
|
||||
Future<Void> logRouter( TLogInterface const& interf, InitializeLogRouterRequest const& req, Reference<AsyncVar<ServerDBInfo>> const& db );
|
||||
Future<Void> dataDistributor( DataDistributorInterface const& ddi, Reference<AsyncVar<ServerDBInfo>> const& db );
|
||||
|
||||
void registerThreadForProfiling();
|
||||
void updateCpuProfiler(ProfilerRequest req);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Import Project="$(SolutionDir)versions.target" />
|
||||
<PropertyGroup Condition="'$(Release)' != 'true' ">
|
||||
|
@ -58,6 +58,7 @@
|
|||
<ActorCompiler Include="MemoryPager.actor.cpp" />
|
||||
<ActorCompiler Include="LogRouter.actor.cpp" />
|
||||
<ActorCompiler Include="OldTLogServer.actor.cpp" />
|
||||
<ClCompile Include="LatencyBandConfig.cpp" />
|
||||
<ClCompile Include="SkipList.cpp" />
|
||||
<ActorCompiler Include="WaitFailure.actor.cpp" />
|
||||
<ActorCompiler Include="tester.actor.cpp" />
|
||||
|
@ -157,12 +158,14 @@
|
|||
<ClInclude Include="CoordinationInterface.h" />
|
||||
<ClInclude Include="CoroFlow.h" />
|
||||
<ClInclude Include="DataDistribution.h" />
|
||||
<ClInclude Include="DataDistributorInterface.h" />
|
||||
<ClInclude Include="DBCoreState.h" />
|
||||
<ClInclude Include="IDiskQueue.h" />
|
||||
<ClInclude Include="IKeyValueStore.h" />
|
||||
<ClInclude Include="IndirectShadowPager.h" />
|
||||
<ClInclude Include="IPager.h" />
|
||||
<ClInclude Include="IVersionedStore.h" />
|
||||
<ClInclude Include="LatencyBandConfig.h" />
|
||||
<ClInclude Include="LeaderElection.h" />
|
||||
<ClInclude Include="LogProtocolMessage.h" />
|
||||
<ClInclude Include="LogSystem.h" />
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<ItemGroup>
|
||||
<ActorCompiler Include="ClusterController.actor.cpp" />
|
||||
|
@ -295,7 +295,6 @@
|
|||
<Filter>sqlite</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="VFSAsync.cpp" />
|
||||
<ClCompile Include="DatabaseConfiguration.cpp" />
|
||||
<ClCompile Include="workloads\AsyncFile.cpp">
|
||||
<Filter>workloads</Filter>
|
||||
</ClCompile>
|
||||
|
@ -303,6 +302,7 @@
|
|||
<ClCompile Include="workloads\MemoryKeyValueStore.cpp">
|
||||
<Filter>workloads</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="LatencyBandConfig.cpp" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="ConflictSet.h" />
|
||||
|
@ -369,6 +369,7 @@
|
|||
<ClInclude Include="MemoryPager.h" />
|
||||
<ClInclude Include="IndirectShadowPager.h" />
|
||||
<ClInclude Include="template_fdb.h" />
|
||||
<ClInclude Include="LatencyBandConfig.h" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<Filter Include="workloads">
|
||||
|
|
|
@ -1031,7 +1031,6 @@ static std::set<int> const& normalMasterErrors() {
|
|||
s.insert( error_code_no_more_servers );
|
||||
s.insert( error_code_master_recovery_failed );
|
||||
s.insert( error_code_coordinated_state_conflict );
|
||||
s.insert( error_code_movekeys_conflict );
|
||||
s.insert( error_code_master_max_versions_in_flight );
|
||||
s.insert( error_code_worker_removed );
|
||||
s.insert( error_code_new_coordinators_timed_out );
|
||||
|
@ -1155,7 +1154,7 @@ ACTOR Future<Void> configurationMonitor( Reference<MasterData> self ) {
|
|||
self->registrationTrigger.trigger();
|
||||
}
|
||||
|
||||
state Future<Void> watchFuture = tr.watch(excludedServersVersionKey);
|
||||
state Future<Void> watchFuture = tr.watch(configVersionKey);
|
||||
wait(tr.commit());
|
||||
wait(watchFuture);
|
||||
break;
|
||||
|
@ -1349,14 +1348,6 @@ ACTOR Future<Void> masterCore( Reference<MasterData> self ) {
|
|||
.detail("RecoveryDuration", recoveryDuration)
|
||||
.trackLatest("MasterRecoveryState");
|
||||
|
||||
// Now that the master is recovered we can start auxiliary services that happen to run here
|
||||
{
|
||||
PromiseStream< std::pair<UID, Optional<StorageServerInterface>> > ddStorageServerChanges;
|
||||
state double lastLimited = 0;
|
||||
self->addActor.send( reportErrorsExcept( dataDistribution( self->dbInfo, self->myInterface, self->configuration, ddStorageServerChanges, self->logSystem, self->recoveryTransactionVersion, self->primaryDcId, self->remoteDcIds, &lastLimited, remoteRecovered.getFuture() ), "DataDistribution", self->dbgid, &normalMasterErrors() ) );
|
||||
self->addActor.send( reportErrors( rateKeeper( self->dbInfo, ddStorageServerChanges, self->myInterface.getRateInfo.getFuture(), self->configuration, &lastLimited ), "Ratekeeper", self->dbgid) );
|
||||
}
|
||||
|
||||
if( self->resolvers.size() > 1 )
|
||||
self->addActor.send( resolutionBalancing(self) );
|
||||
|
||||
|
|
|
@ -47,6 +47,7 @@
|
|||
#include "fdbserver/LogSystem.h"
|
||||
#include "fdbserver/RecoveryState.h"
|
||||
#include "fdbserver/LogProtocolMessage.h"
|
||||
#include "fdbserver/LatencyBandConfig.h"
|
||||
#include "flow/TDMetric.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
|
@ -431,6 +432,8 @@ public:
|
|||
return val;
|
||||
}
|
||||
|
||||
Optional<LatencyBandConfig> latencyBandConfig;
|
||||
|
||||
struct Counters {
|
||||
CounterCollection cc;
|
||||
Counter allQueries, getKeyQueries, getValueQueries, getRangeQueries, finishedQueries, rowsQueried, bytesQueried, watchQueries;
|
||||
|
@ -441,6 +444,8 @@ public:
|
|||
Counter loops;
|
||||
Counter fetchWaitingMS, fetchWaitingCount, fetchExecutingMS, fetchExecutingCount;
|
||||
|
||||
LatencyBands readLatencyBands;
|
||||
|
||||
Counters(StorageServer* self)
|
||||
: cc("StorageServer", self->thisServerID.toString()),
|
||||
getKeyQueries("GetKeyQueries", cc),
|
||||
|
@ -465,7 +470,8 @@ public:
|
|||
fetchWaitingMS("FetchWaitingMS", cc),
|
||||
fetchWaitingCount("FetchWaitingCount", cc),
|
||||
fetchExecutingMS("FetchExecutingMS", cc),
|
||||
fetchExecutingCount("FetchExecutingCount", cc)
|
||||
fetchExecutingCount("FetchExecutingCount", cc),
|
||||
readLatencyBands("ReadLatencyMetrics", self->thisServerID, SERVER_KNOBS->STORAGE_LOGGING_DELAY)
|
||||
{
|
||||
specialCounter(cc, "LastTLogVersion", [self](){ return self->lastTLogVersion; });
|
||||
specialCounter(cc, "Version", [self](){ return self->version.get(); });
|
||||
|
@ -733,15 +739,16 @@ ACTOR Future<Version> waitForVersionNoTooOld( StorageServer* data, Version versi
|
|||
}
|
||||
|
||||
ACTOR Future<Void> getValueQ( StorageServer* data, GetValueRequest req ) {
|
||||
state double startTime = timer();
|
||||
state int64_t resultSize = 0;
|
||||
|
||||
try {
|
||||
// Active load balancing runs at a very high priority (to obtain accurate queue lengths)
|
||||
// so we need to downgrade here
|
||||
++data->counters.getValueQueries;
|
||||
++data->counters.allQueries;
|
||||
++data->readQueueSizeMetric;
|
||||
data->maxQueryQueue = std::max<int>( data->maxQueryQueue, data->counters.allQueries.getValue() - data->counters.finishedQueries.getValue());
|
||||
|
||||
// Active load balancing runs at a very high priority (to obtain accurate queue lengths)
|
||||
// so we need to downgrade here
|
||||
wait( delay(0, TaskDefaultEndpoint) );
|
||||
|
||||
if( req.debugID.present() )
|
||||
|
@ -788,7 +795,8 @@ ACTOR Future<Void> getValueQ( StorageServer* data, GetValueRequest req ) {
|
|||
|
||||
if (v.present()) {
|
||||
++data->counters.rowsQueried;
|
||||
data->counters.bytesQueried += v.get().size();
|
||||
resultSize = v.get().size();
|
||||
data->counters.bytesQueried += resultSize;
|
||||
}
|
||||
|
||||
if( req.debugID.present() )
|
||||
|
@ -805,6 +813,10 @@ ACTOR Future<Void> getValueQ( StorageServer* data, GetValueRequest req ) {
|
|||
|
||||
++data->counters.finishedQueries;
|
||||
--data->readQueueSizeMetric;
|
||||
if(data->latencyBandConfig.present()) {
|
||||
int maxReadBytes = data->latencyBandConfig.get().readConfig.maxReadBytes.orDefault(std::numeric_limits<int>::max());
|
||||
data->counters.readLatencyBands.addMeasurement(timer()-req.requestTime, resultSize > maxReadBytes);
|
||||
}
|
||||
|
||||
return Void();
|
||||
};
|
||||
|
@ -1241,6 +1253,8 @@ ACTOR Future<Void> getKeyValues( StorageServer* data, GetKeyValuesRequest req )
|
|||
// Throws a wrong_shard_server if the keys in the request or result depend on data outside this server OR if a large selector offset prevents
|
||||
// all data from being read in one range read
|
||||
{
|
||||
state int64_t resultSize = 0;
|
||||
|
||||
++data->counters.getRangeQueries;
|
||||
++data->counters.allQueries;
|
||||
++data->readQueueSizeMetric;
|
||||
|
@ -1329,8 +1343,9 @@ ACTOR Future<Void> getKeyValues( StorageServer* data, GetKeyValuesRequest req )
|
|||
r.penalty = data->getPenalty();
|
||||
req.reply.send( r );
|
||||
|
||||
resultSize = req.limitBytes - remainingLimitBytes;
|
||||
data->counters.bytesQueried += resultSize;
|
||||
data->counters.rowsQueried += r.data.size();
|
||||
data->counters.bytesQueried += req.limitBytes - remainingLimitBytes;
|
||||
}
|
||||
} catch (Error& e) {
|
||||
if(!canReplyWith(e))
|
||||
|
@ -1340,11 +1355,19 @@ ACTOR Future<Void> getKeyValues( StorageServer* data, GetKeyValuesRequest req )
|
|||
|
||||
++data->counters.finishedQueries;
|
||||
--data->readQueueSizeMetric;
|
||||
|
||||
if(data->latencyBandConfig.present()) {
|
||||
int maxReadBytes = data->latencyBandConfig.get().readConfig.maxReadBytes.orDefault(std::numeric_limits<int>::max());
|
||||
int maxSelectorOffset = data->latencyBandConfig.get().readConfig.maxKeySelectorOffset.orDefault(std::numeric_limits<int>::max());
|
||||
data->counters.readLatencyBands.addMeasurement(timer()-req.requestTime, resultSize > maxReadBytes || abs(req.begin.offset) > maxSelectorOffset || abs(req.end.offset) > maxSelectorOffset);
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> getKey( StorageServer* data, GetKeyRequest req ) {
|
||||
state int64_t resultSize = 0;
|
||||
|
||||
++data->counters.getKeyQueries;
|
||||
++data->counters.allQueries;
|
||||
++data->readQueueSizeMetric;
|
||||
|
@ -1371,8 +1394,10 @@ ACTOR Future<Void> getKey( StorageServer* data, GetKeyRequest req ) {
|
|||
updated = firstGreaterOrEqual(k)+offset-1; // first thing on next shard OR (large offset case) keyAfter largest key retrieved in range read
|
||||
else
|
||||
updated = KeySelectorRef(k,true,0); //found
|
||||
|
||||
resultSize = k.size();
|
||||
data->counters.bytesQueried += resultSize;
|
||||
++data->counters.rowsQueried;
|
||||
data->counters.bytesQueried += k.size();
|
||||
|
||||
GetKeyReply reply(updated);
|
||||
reply.penalty = data->getPenalty();
|
||||
|
@ -1387,6 +1412,11 @@ ACTOR Future<Void> getKey( StorageServer* data, GetKeyRequest req ) {
|
|||
|
||||
++data->counters.finishedQueries;
|
||||
--data->readQueueSizeMetric;
|
||||
if(data->latencyBandConfig.present()) {
|
||||
int maxReadBytes = data->latencyBandConfig.get().readConfig.maxReadBytes.orDefault(std::numeric_limits<int>::max());
|
||||
int maxSelectorOffset = data->latencyBandConfig.get().readConfig.maxKeySelectorOffset.orDefault(std::numeric_limits<int>::max());
|
||||
data->counters.readLatencyBands.addMeasurement(timer()-req.requestTime, resultSize > maxReadBytes || abs(req.sel.offset) > maxSelectorOffset);
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
@ -3310,6 +3340,20 @@ ACTOR Future<Void> storageServerCore( StorageServer* self, StorageServerInterfac
|
|||
doUpdate = Void();
|
||||
}
|
||||
}
|
||||
|
||||
Optional<LatencyBandConfig> newLatencyBandConfig = self->db->get().latencyBandConfig;
|
||||
if(newLatencyBandConfig.present() != self->latencyBandConfig.present()
|
||||
|| (newLatencyBandConfig.present() && newLatencyBandConfig.get().readConfig != self->latencyBandConfig.get().readConfig))
|
||||
{
|
||||
self->latencyBandConfig = newLatencyBandConfig;
|
||||
self->counters.readLatencyBands.clearBands();
|
||||
TraceEvent("LatencyBandReadUpdatingConfig").detail("Present", newLatencyBandConfig.present());
|
||||
if(self->latencyBandConfig.present()) {
|
||||
for(auto band : self->latencyBandConfig.get().readConfig.bands) {
|
||||
self->counters.readLatencyBands.addThreshold(band);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
when( GetValueRequest req = waitNext(ssi.getValue.getFuture()) ) {
|
||||
// Warning: This code is executed at extremely high priority (TaskLoadBalancedEndpoint), so downgrade before doing real work
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include "fdbserver/IDiskQueue.h"
|
||||
#include "fdbclient/DatabaseContext.h"
|
||||
#include "fdbserver/ClusterRecruitmentInterface.h"
|
||||
#include "fdbserver/DataDistributorInterface.h"
|
||||
#include "fdbserver/ServerDBInfo.h"
|
||||
#include "fdbserver/CoordinationInterface.h"
|
||||
#include "fdbclient/FailureMonitorClient.h"
|
||||
|
@ -266,19 +267,27 @@ std::vector< DiskStore > getDiskStores( std::string folder ) {
|
|||
return result;
|
||||
}
|
||||
|
||||
ACTOR Future<Void> registrationClient( Reference<AsyncVar<Optional<ClusterControllerFullInterface>>> ccInterface, WorkerInterface interf, Reference<AsyncVar<ClusterControllerPriorityInfo>> asyncPriorityInfo, ProcessClass initialClass) {
|
||||
ACTOR Future<Void> registrationClient(
|
||||
Reference<AsyncVar<Optional<ClusterControllerFullInterface>>> ccInterface,
|
||||
WorkerInterface interf,
|
||||
Reference<AsyncVar<ClusterControllerPriorityInfo>> asyncPriorityInfo,
|
||||
ProcessClass initialClass,
|
||||
Reference<AsyncVar<Optional<DataDistributorInterface>>> ddInterf) {
|
||||
// Keeps the cluster controller (as it may be re-elected) informed that this worker exists
|
||||
// The cluster controller uses waitFailureClient to find out if we die, and returns from registrationReply (requiring us to re-register)
|
||||
// The registration request piggybacks optional distributor interface if it exists.
|
||||
state Generation requestGeneration = 0;
|
||||
state ProcessClass processClass = initialClass;
|
||||
loop {
|
||||
Future<RegisterWorkerReply> registrationReply = ccInterface->get().present() ? brokenPromiseToNever( ccInterface->get().get().registerWorker.getReply( RegisterWorkerRequest(interf, initialClass, processClass, asyncPriorityInfo->get(), requestGeneration++) ) ) : Never();
|
||||
RegisterWorkerRequest request(interf, initialClass, processClass, asyncPriorityInfo->get(), requestGeneration++, ddInterf->get());
|
||||
Future<RegisterWorkerReply> registrationReply = ccInterface->get().present() ? brokenPromiseToNever( ccInterface->get().get().registerWorker.getReply(request) ) : Never();
|
||||
choose {
|
||||
when ( RegisterWorkerReply reply = wait( registrationReply )) {
|
||||
processClass = reply.processClass;
|
||||
asyncPriorityInfo->set( reply.priorityInfo );
|
||||
}
|
||||
when ( wait( ccInterface->onChange() )) { }
|
||||
when ( wait( ddInterf->onChange() ) ) {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -504,7 +513,8 @@ ACTOR Future<Void> monitorServerDBInfo( Reference<AsyncVar<Optional<ClusterContr
|
|||
|
||||
choose {
|
||||
when( ServerDBInfo ni = wait( ccInterface->get().present() ? brokenPromiseToNever( ccInterface->get().get().getServerDBInfo.getReply( req ) ) : Never() ) ) {
|
||||
TraceEvent("GotServerDBInfoChange").detail("ChangeID", ni.id).detail("MasterID", ni.master.id());
|
||||
TraceEvent("GotServerDBInfoChange").detail("ChangeID", ni.id).detail("MasterID", ni.master.id())
|
||||
.detail("DataDistributorID", ni.distributor.present() ? ni.distributor.get().id() : UID());
|
||||
ServerDBInfo localInfo = ni;
|
||||
localInfo.myLocality = locality;
|
||||
dbInfo->set(localInfo);
|
||||
|
@ -520,6 +530,7 @@ ACTOR Future<Void> monitorServerDBInfo( Reference<AsyncVar<Optional<ClusterContr
|
|||
ACTOR Future<Void> workerServer( Reference<ClusterConnectionFile> connFile, Reference<AsyncVar<Optional<ClusterControllerFullInterface>>> ccInterface, LocalityData locality,
|
||||
Reference<AsyncVar<ClusterControllerPriorityInfo>> asyncPriorityInfo, ProcessClass initialClass, std::string folder, int64_t memoryLimit, std::string metricsConnFile, std::string metricsPrefix, Promise<Void> recoveredDiskFiles) {
|
||||
state PromiseStream< ErrorInfo > errors;
|
||||
state Reference<AsyncVar<Optional<DataDistributorInterface>>> ddInterf( new AsyncVar<Optional<DataDistributorInterface>>() );
|
||||
state Future<Void> handleErrors = workerHandleErrors( errors.getFuture() ); // Needs to be stopped last
|
||||
state ActorCollection errorForwarders(false);
|
||||
state Future<Void> loggingTrigger = Void();
|
||||
|
@ -648,7 +659,7 @@ ACTOR Future<Void> workerServer( Reference<ClusterConnectionFile> connFile, Refe
|
|||
wait(waitForAll(recoveries));
|
||||
recoveredDiskFiles.send(Void());
|
||||
|
||||
errorForwarders.add( registrationClient( ccInterface, interf, asyncPriorityInfo, initialClass ) );
|
||||
errorForwarders.add( registrationClient( ccInterface, interf, asyncPriorityInfo, initialClass, ddInterf ) );
|
||||
|
||||
TraceEvent("RecoveriesComplete", interf.id());
|
||||
|
||||
|
@ -703,7 +714,6 @@ ACTOR Future<Void> workerServer( Reference<ClusterConnectionFile> connFile, Refe
|
|||
startRole( Role::MASTER, recruited.id(), interf.id() );
|
||||
|
||||
DUMPTOKEN( recruited.waitFailure );
|
||||
DUMPTOKEN( recruited.getRateInfo );
|
||||
DUMPTOKEN( recruited.tlogRejoin );
|
||||
DUMPTOKEN( recruited.changeCoordinators );
|
||||
DUMPTOKEN( recruited.getCommitVersion );
|
||||
|
@ -713,6 +723,23 @@ ACTOR Future<Void> workerServer( Reference<ClusterConnectionFile> connFile, Refe
|
|||
errorForwarders.add( zombie(recruited, forwardError( errors, Role::MASTER, recruited.id(), masterProcess )) );
|
||||
req.reply.send(recruited);
|
||||
}
|
||||
when ( InitializeDataDistributorRequest req = waitNext(interf.dataDistributor.getFuture()) ) {
|
||||
DataDistributorInterface recruited(locality);
|
||||
recruited.initEndpoints();
|
||||
|
||||
if ( ddInterf->get().present() ) {
|
||||
recruited = ddInterf->get().get();
|
||||
TEST(true); // Recruited while already a data distributor.
|
||||
} else {
|
||||
startRole( Role::DATA_DISTRIBUTOR, recruited.id(), interf.id() );
|
||||
|
||||
Future<Void> dataDistributorProcess = dataDistributor( recruited, dbInfo );
|
||||
errorForwarders.add( forwardError( errors, Role::DATA_DISTRIBUTOR, recruited.id(), setWhenDoneOrError( dataDistributorProcess, ddInterf, Optional<DataDistributorInterface>() ) ) );
|
||||
ddInterf->set(Optional<DataDistributorInterface>(recruited));
|
||||
}
|
||||
TraceEvent("DataDistributorReceived", req.reqId).detail("DataDistributorId", recruited.id());
|
||||
req.reply.send(recruited);
|
||||
}
|
||||
when( InitializeTLogRequest req = waitNext(interf.tLog.getFuture()) ) {
|
||||
auto& logData = sharedLogs[req.storeType];
|
||||
logData.second.send(req);
|
||||
|
@ -1086,3 +1113,4 @@ const Role Role::RESOLVER("Resolver", "RV");
|
|||
const Role Role::CLUSTER_CONTROLLER("ClusterController", "CC");
|
||||
const Role Role::TESTER("Tester", "TS");
|
||||
const Role Role::LOG_ROUTER("LogRouter", "LR");
|
||||
const Role Role::DATA_DISTRIBUTOR("DataDistributor", "DD");
|
||||
|
|
|
@ -133,7 +133,7 @@ struct MoveKeysWorkload : TestWorkload {
|
|||
|
||||
try {
|
||||
state Promise<Void> signal;
|
||||
wait( moveKeys( cx, keys, destinationTeamIDs, destinationTeamIDs, lock, signal, &fl1, &fl2, invalidVersion, false, relocateShardInterval.pairID ) );
|
||||
wait( moveKeys( cx, keys, destinationTeamIDs, destinationTeamIDs, lock, signal, &fl1, &fl2, false, relocateShardInterval.pairID ) );
|
||||
TraceEvent(relocateShardInterval.end()).detail("Result","Success");
|
||||
return Void();
|
||||
} catch (Error& e) {
|
||||
|
|
|
@ -31,6 +31,9 @@ extern bool noUnseed;
|
|||
|
||||
struct StatusWorkload : TestWorkload {
|
||||
double testDuration, requestsPerSecond;
|
||||
bool enableLatencyBands;
|
||||
|
||||
Future<Void> latencyBandActor;
|
||||
|
||||
PerfIntCounter requests, replies, errors, totalSize;
|
||||
Optional<StatusObject> parsedSchema;
|
||||
|
@ -41,6 +44,7 @@ struct StatusWorkload : TestWorkload {
|
|||
{
|
||||
testDuration = getOption(options, LiteralStringRef("testDuration"), 10.0);
|
||||
requestsPerSecond = getOption(options, LiteralStringRef("requestsPerSecond"), 0.5);
|
||||
enableLatencyBands = getOption(options, LiteralStringRef("enableLatencyBands"), g_random->random01() < 0.5);
|
||||
auto statusSchemaStr = getOption(options, LiteralStringRef("schema"), JSONSchemas::statusSchema);
|
||||
if (statusSchemaStr.size()) {
|
||||
json_spirit::mValue schema = readJSONStrictly(statusSchemaStr.toString());
|
||||
|
@ -55,6 +59,10 @@ struct StatusWorkload : TestWorkload {
|
|||
|
||||
virtual std::string description() { return "StatusWorkload"; }
|
||||
virtual Future<Void> setup(Database const& cx) {
|
||||
if(enableLatencyBands) {
|
||||
latencyBandActor = configureLatencyBands(this, cx);
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
virtual Future<Void> start(Database const& cx) {
|
||||
|
@ -103,6 +111,56 @@ struct StatusWorkload : TestWorkload {
|
|||
}
|
||||
}
|
||||
|
||||
static std::string generateBands() {
|
||||
int numBands = g_random->randomInt(0, 10);
|
||||
std::vector<double> bands;
|
||||
|
||||
while(bands.size() < numBands) {
|
||||
bands.push_back(g_random->random01() * pow(10, g_random->randomInt(-5, 1)));
|
||||
}
|
||||
|
||||
std::string result = "\"bands\":[";
|
||||
for(int i = 0; i < bands.size(); ++i) {
|
||||
if(i > 0) {
|
||||
result += ",";
|
||||
}
|
||||
|
||||
result += format("%f", bands[i]);
|
||||
}
|
||||
|
||||
return result + "]";
|
||||
}
|
||||
|
||||
ACTOR Future<Void> configureLatencyBands(StatusWorkload *self, Database cx) {
|
||||
loop {
|
||||
state Transaction tr(cx);
|
||||
loop {
|
||||
try {
|
||||
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
|
||||
std::string config = "{"
|
||||
"\"get_read_version\":{" + generateBands() + "},"
|
||||
"\"read\":{" + generateBands() + format(", \"max_key_selector_offset\":%d, \"max_read_bytes\":%d},", g_random->randomInt(0, 10000), g_random->randomInt(0, 1000000)) + ""
|
||||
"\"commit\":{" + generateBands() + format(", \"max_commit_bytes\":%d", g_random->randomInt(0, 1000000)) + "}"
|
||||
"}";
|
||||
|
||||
tr.set(latencyBandConfigKey, ValueRef(config));
|
||||
wait(tr.commit());
|
||||
|
||||
if(g_random->random01() < 0.3) {
|
||||
return Void();
|
||||
}
|
||||
|
||||
wait(delay(g_random->random01() * 120));
|
||||
}
|
||||
catch(Error &e) {
|
||||
wait(tr.onError(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> fetcher(Reference<ClusterConnectionFile> connFile, StatusWorkload *self) {
|
||||
state double lastTime = now();
|
||||
|
||||
|
@ -131,7 +189,6 @@ struct StatusWorkload : TestWorkload {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
WorkloadFactory<StatusWorkload> StatusWorkloadFactory("Status");
|
||||
|
|
|
@ -547,6 +547,14 @@ inline static Standalone<StringRef> makeString( int length ) {
|
|||
return returnString;
|
||||
}
|
||||
|
||||
inline static Standalone<StringRef> makeAlignedString( int alignment, int length ) {
|
||||
Standalone<StringRef> returnString;
|
||||
uint8_t *outData = new (returnString.arena()) uint8_t[alignment + length];
|
||||
outData = (uint8_t*)((((uintptr_t)outData + (alignment - 1)) / alignment) * alignment);
|
||||
((StringRef&)returnString) = StringRef(outData, length);
|
||||
return returnString;
|
||||
}
|
||||
|
||||
inline static StringRef makeString( int length, Arena& arena ) {
|
||||
uint8_t *outData = new (arena) uint8_t[length];
|
||||
return StringRef(outData, length);
|
||||
|
|
|
@ -77,9 +77,7 @@ set(FLOW_SRCS
|
|||
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/hgVersion.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/hgVersion.h)
|
||||
|
||||
actor_set(FLOW_BUILD "${FLOW_SRCS}")
|
||||
add_library(flow STATIC ${FLOW_BUILD})
|
||||
actor_compile(flow "${FLOW_SRCS}")
|
||||
add_flow_target(STATIC_LIBRARY NAME flow SRCS ${FLOW_SRCS})
|
||||
target_include_directories(flow SYSTEM PUBLIC ${CMAKE_THREAD_LIBS_INIT})
|
||||
target_include_directories(flow PUBLIC ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
if (NOT APPLE AND NOT WIN32)
|
||||
|
|
|
@ -117,3 +117,7 @@ void ErrorCodeTable::addCode(int code, const char *name, const char *description
|
|||
bool isAssertDisabled(int line) {
|
||||
return FLOW_KNOBS && (FLOW_KNOBS->DISABLE_ASSERTS == -1 || FLOW_KNOBS->DISABLE_ASSERTS == line);
|
||||
}
|
||||
|
||||
void breakpoint_me() {
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -98,6 +98,8 @@ extern bool isAssertDisabled( int line );
|
|||
catch(Error &e) { criticalError(FDB_EXIT_ABORT, "AbortOnError", e.what()); } \
|
||||
catch(...) { criticalError(FDB_EXIT_ABORT, "AbortOnError", "Aborted due to unknown error"); }
|
||||
|
||||
EXTERNC void breakpoint_me();
|
||||
|
||||
#ifdef FDB_CLEAN_BUILD
|
||||
# define NOT_IN_CLEAN BOOST_STATIC_ASSERT_MSG(0, "This code can not be enabled in a clean build.");
|
||||
#else
|
||||
|
|
|
@ -55,7 +55,7 @@ using namespace boost::asio::ip;
|
|||
//
|
||||
// xyzdev
|
||||
// vvvv
|
||||
const uint64_t currentProtocolVersion = 0x0FDB00B061020001LL;
|
||||
const uint64_t currentProtocolVersion = 0x0FDB00B061030001LL;
|
||||
const uint64_t compatibleProtocolVersionMask = 0xffffffffffff0000LL;
|
||||
const uint64_t minValidProtocolVersion = 0x0FDB00A200060001LL;
|
||||
|
||||
|
|
76
flow/Stats.h
76
flow/Stats.h
|
@ -38,6 +38,14 @@ MyCounters() : foo("foo", cc), bar("bar", cc), baz("baz", cc) {}
|
|||
#include "flow/flow.h"
|
||||
#include "flow/TDMetric.actor.h"
|
||||
|
||||
struct TimedRequest {
|
||||
double requestTime;
|
||||
|
||||
TimedRequest() {
|
||||
requestTime = timer();
|
||||
}
|
||||
};
|
||||
|
||||
struct ICounter {
|
||||
// All counters have a name and value
|
||||
virtual std::string const& getName() const = 0;
|
||||
|
@ -62,7 +70,7 @@ struct CounterCollection {
|
|||
std::string id;
|
||||
};
|
||||
|
||||
struct Counter : ICounter {
|
||||
struct Counter : ICounter, NonCopyable {
|
||||
public:
|
||||
typedef int64_t Value;
|
||||
|
||||
|
@ -90,7 +98,7 @@ private:
|
|||
};
|
||||
|
||||
template <class F>
|
||||
struct SpecialCounter : ICounter, FastAllocated<SpecialCounter<F>> {
|
||||
struct SpecialCounter : ICounter, FastAllocated<SpecialCounter<F>>, NonCopyable {
|
||||
SpecialCounter(CounterCollection& collection, std::string const& name, F && f) : name(name), f(f) { collection.counters.push_back(this); collection.counters_to_remove.push_back(this); }
|
||||
virtual void remove() { delete this; }
|
||||
|
||||
|
@ -112,4 +120,68 @@ static void specialCounter(CounterCollection& collection, std::string const& nam
|
|||
|
||||
Future<Void> traceCounters(std::string const& traceEventName, UID const& traceEventID, double const& interval, CounterCollection* const& counters, std::string const& trackLatestName = std::string());
|
||||
|
||||
class LatencyBands {
|
||||
public:
|
||||
LatencyBands(std::string name, UID id, double loggingInterval) : name(name), id(id), loggingInterval(loggingInterval), cc(nullptr), filteredCount(nullptr) {}
|
||||
|
||||
void addThreshold(double value) {
|
||||
if(value > 0 && bands.count(value) == 0) {
|
||||
if(bands.size() == 0) {
|
||||
ASSERT(!cc && !filteredCount);
|
||||
cc = new CounterCollection(name, id.toString());
|
||||
logger = traceCounters(name, id, loggingInterval, cc, id.toString() + "/" + name);
|
||||
filteredCount = new Counter("Filtered", *cc);
|
||||
insertBand(std::numeric_limits<double>::infinity());
|
||||
}
|
||||
|
||||
insertBand(value);
|
||||
}
|
||||
}
|
||||
|
||||
void addMeasurement(double measurement, bool filtered=false) {
|
||||
if(filtered && filteredCount) {
|
||||
++(*filteredCount);
|
||||
}
|
||||
else if(bands.size() > 0) {
|
||||
auto itr = bands.upper_bound(measurement);
|
||||
ASSERT(itr != bands.end());
|
||||
++(*itr->second);
|
||||
}
|
||||
}
|
||||
|
||||
void clearBands() {
|
||||
logger = Void();
|
||||
|
||||
for(auto itr : bands) {
|
||||
delete itr.second;
|
||||
}
|
||||
|
||||
bands.clear();
|
||||
|
||||
delete filteredCount;
|
||||
delete cc;
|
||||
|
||||
filteredCount = nullptr;
|
||||
cc = nullptr;
|
||||
}
|
||||
|
||||
~LatencyBands() {
|
||||
clearBands();
|
||||
}
|
||||
|
||||
private:
|
||||
std::map<double, Counter*> bands;
|
||||
Counter *filteredCount;
|
||||
|
||||
std::string name;
|
||||
UID id;
|
||||
double loggingInterval;
|
||||
|
||||
CounterCollection *cc;
|
||||
Future<Void> logger;
|
||||
|
||||
void insertBand(double value) {
|
||||
bands.insert(std::make_pair(value, new Counter(format("Band%f", value), *cc)));
|
||||
}
|
||||
};
|
||||
#endif
|
||||
|
|
|
@ -36,6 +36,19 @@ namespace actorcompiler
|
|||
}
|
||||
};
|
||||
|
||||
class ErrorMessagePolicy
|
||||
{
|
||||
public bool DisableActorWithoutWaitWarning = false;
|
||||
public void HandleActorWithoutWait(String sourceFile, Actor actor)
|
||||
{
|
||||
if (!DisableActorWithoutWaitWarning && !actor.isTestCase)
|
||||
{
|
||||
// TODO(atn34): Once cmake is the only build system we can make this an error instead of a warning.
|
||||
Console.Error.WriteLine("{0}:{1}: warning: ACTOR {2} does not contain a wait() statement", sourceFile, actor.SourceLine, actor.name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class Token
|
||||
{
|
||||
public string Value;
|
||||
|
@ -200,10 +213,12 @@ namespace actorcompiler
|
|||
|
||||
Token[] tokens;
|
||||
string sourceFile;
|
||||
ErrorMessagePolicy errorMessagePolicy;
|
||||
|
||||
public ActorParser(string text, string sourceFile)
|
||||
public ActorParser(string text, string sourceFile, ErrorMessagePolicy errorMessagePolicy)
|
||||
{
|
||||
this.sourceFile = sourceFile;
|
||||
this.errorMessagePolicy = errorMessagePolicy;
|
||||
tokens = Tokenize(text).Select(t=>new Token{ Value=t }).ToArray();
|
||||
CountParens();
|
||||
//if (sourceFile.EndsWith(".h")) LineNumbersEnabled = false;
|
||||
|
@ -872,21 +887,21 @@ namespace actorcompiler
|
|||
var body = range(heading.End+1, tokens.Length)
|
||||
.TakeWhile(t => t.BraceDepth > toks.First().BraceDepth);
|
||||
|
||||
bool warnOnNoWait = false;
|
||||
if (head_token.Value == "ACTOR")
|
||||
{
|
||||
ParseActorHeading(actor, heading);
|
||||
warnOnNoWait = true;
|
||||
}
|
||||
else if (head_token.Value == "TEST_CASE")
|
||||
else if (head_token.Value == "TEST_CASE") {
|
||||
ParseTestCaseHeading(actor, heading);
|
||||
actor.isTestCase = true;
|
||||
}
|
||||
else
|
||||
head_token.Assert("ACTOR or TEST_CASE expected!", t => false);
|
||||
|
||||
actor.body = ParseCodeBlock(body);
|
||||
|
||||
if (!actor.body.containsWait() && warnOnNoWait)
|
||||
Console.Error.WriteLine("{0}:{1}: warning: ACTOR {2} does not contain a wait() statement", sourceFile, actor.SourceLine, actor.name);
|
||||
if (!actor.body.containsWait())
|
||||
this.errorMessagePolicy.HandleActorWithoutWait(sourceFile, actor);
|
||||
|
||||
end = body.End + 1;
|
||||
}
|
||||
|
|
|
@ -234,6 +234,7 @@ namespace actorcompiler
|
|||
public string testCaseParameters = null;
|
||||
public string nameSpace = null;
|
||||
public bool isForwardDeclaration = false;
|
||||
public bool isTestCase = false;
|
||||
};
|
||||
|
||||
class Descr
|
||||
|
|
|
@ -33,16 +33,21 @@ namespace actorcompiler
|
|||
if (args.Length < 2)
|
||||
{
|
||||
Console.WriteLine("Usage:");
|
||||
Console.WriteLine(" actorcompiler [input] [output]");
|
||||
Console.WriteLine(" actorcompiler <input> <output> [--disable-actor-without-wait-warning]");
|
||||
return 100;
|
||||
}
|
||||
Console.WriteLine("actorcompiler {0}", string.Join(" ", args));
|
||||
string input = args[0], output = args[1], outputtmp = args[1] + ".tmp";
|
||||
ErrorMessagePolicy errorMessagePolicy = new ErrorMessagePolicy();
|
||||
if (args.Contains("--disable-actor-without-wait-warning"))
|
||||
{
|
||||
errorMessagePolicy.DisableActorWithoutWaitWarning = true;
|
||||
}
|
||||
try
|
||||
{
|
||||
var inputData = File.ReadAllText(input);
|
||||
using (var outputStream = new StreamWriter(outputtmp))
|
||||
new ActorParser(inputData, input.Replace('\\', '/')).Write(outputStream, output.Replace('\\', '/'));
|
||||
new ActorParser(inputData, input.Replace('\\', '/'), errorMessagePolicy).Write(outputStream, output.Replace('\\', '/'));
|
||||
if (File.Exists(output))
|
||||
{
|
||||
File.SetAttributes(output, FileAttributes.Normal);
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
<TargetFrameworkVersion>v4.0</TargetFrameworkVersion>
|
||||
<FileAlignment>512</FileAlignment>
|
||||
<OutputPath>$(SolutionDir)bin\$(Configuration)\</OutputPath>
|
||||
<IntermediateOutputPath>$(SystemDrive)\temp\msvcfdb\$(Configuration)\actorcompiler\</IntermediateOutputPath>
|
||||
<PublishUrl>publish\</PublishUrl>
|
||||
<Install>true</Install>
|
||||
<InstallFrom>Disk</InstallFrom>
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
<TargetFrameworkVersion>v4.0</TargetFrameworkVersion>
|
||||
<FileAlignment>512</FileAlignment>
|
||||
<OutputPath>$(SolutionDir)bin\$(Configuration)\</OutputPath>
|
||||
<IntermediateOutputPath>$(SystemDrive)\temp\msvcfdb\$(Configuration)\coveragetool\</IntermediateOutputPath>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)' == 'Debug|AnyCPU'">
|
||||
<DebugSymbols>true</DebugSymbols>
|
||||
|
|
|
@ -775,6 +775,18 @@ Future<Void> setAfter( Reference<AsyncVar<T>> var, double time, T val ) {
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR template <class T>
|
||||
Future<Void> setWhenDoneOrError( Future<Void> condition, Reference<AsyncVar<T>> var, T val ) {
|
||||
try {
|
||||
wait( condition );
|
||||
}
|
||||
catch ( Error& e ) {
|
||||
if (e.code() == error_code_actor_cancelled) throw;
|
||||
}
|
||||
var->set( val );
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<bool> allTrue( const std::vector<Future<bool>>& all );
|
||||
Future<Void> anyTrue( std::vector<Reference<AsyncVar<bool>>> const& input, Reference<AsyncVar<bool>> const& output );
|
||||
Future<Void> cancelOnly( std::vector<Future<Void>> const& futures );
|
||||
|
|
Loading…
Reference in New Issue