merge conflicts
This commit is contained in:
commit
c77d9e4abe
|
@ -95,3 +95,6 @@ flow/coveragetool/obj
|
|||
.DS_Store
|
||||
temp/
|
||||
/versions.target
|
||||
/compile_commands.json
|
||||
/.ccls-cache
|
||||
.clangd/
|
||||
|
|
|
@ -293,12 +293,12 @@ bool FDBLibTLSPolicy::set_verify_peers(int count, const uint8_t* verify_peers[],
|
|||
break;
|
||||
}
|
||||
if(split == start || verifyString[split-1] != '\\') {
|
||||
Reference<FDBLibTLSVerify> verify = Reference<FDBLibTLSVerify>(new FDBLibTLSVerify(verifyString.substr(start,split-start)));
|
||||
auto verify = makeReference<FDBLibTLSVerify>(verifyString.substr(start, split - start));
|
||||
verify_rules.push_back(verify);
|
||||
start = split+1;
|
||||
}
|
||||
}
|
||||
Reference<FDBLibTLSVerify> verify = Reference<FDBLibTLSVerify>(new FDBLibTLSVerify(verifyString.substr(start)));
|
||||
auto verify = makeReference<FDBLibTLSVerify>(verifyString.substr(start));
|
||||
verify_rules.push_back(verify);
|
||||
} catch ( const std::runtime_error& ) {
|
||||
verify_rules.clear();
|
||||
|
|
|
@ -66,7 +66,7 @@ static void logf(const char* event, void* uid, bool is_error, ...) {
|
|||
int FDBLibTLSVerifyTest::run() {
|
||||
Reference<FDBLibTLSVerify> verify;
|
||||
try {
|
||||
verify = Reference<FDBLibTLSVerify>(new FDBLibTLSVerify(input));
|
||||
verify = makeReference<FDBLibTLSVerify>(input);
|
||||
} catch ( const std::runtime_error& e ) {
|
||||
if (valid) {
|
||||
std::cerr << "FAIL: Verify test failed, but should have succeeded - '" << input << "'\n";
|
||||
|
@ -102,8 +102,8 @@ int FDBLibTLSVerifyTest::run() {
|
|||
}
|
||||
|
||||
static int policy_verify_test() {
|
||||
Reference<FDBLibTLSPlugin> plugin = Reference<FDBLibTLSPlugin>(new FDBLibTLSPlugin());
|
||||
Reference<FDBLibTLSPolicy> policy = Reference<FDBLibTLSPolicy>(new FDBLibTLSPolicy(plugin, (ITLSLogFunc)logf));
|
||||
auto plugin = makeReference<FDBLibTLSPlugin>();
|
||||
auto policy = makeReference<FDBLibTLSPolicy>(plugin, (ITLSLogFunc)logf);
|
||||
|
||||
const char *verify_peers[] = {
|
||||
"S.CN=abc",
|
||||
|
@ -116,9 +116,9 @@ static int policy_verify_test() {
|
|||
(int)strlen(verify_peers[2]),
|
||||
};
|
||||
Reference<FDBLibTLSVerify> verify_rules[] = {
|
||||
Reference<FDBLibTLSVerify>(new FDBLibTLSVerify(std::string(verify_peers[0], verify_peers_len[0]))),
|
||||
Reference<FDBLibTLSVerify>(new FDBLibTLSVerify(std::string(verify_peers[1], verify_peers_len[1]))),
|
||||
Reference<FDBLibTLSVerify>(new FDBLibTLSVerify(std::string(verify_peers[2], verify_peers_len[2]))),
|
||||
makeReference<FDBLibTLSVerify>(std::string(verify_peers[0], verify_peers_len[0])),
|
||||
makeReference<FDBLibTLSVerify>(std::string(verify_peers[1], verify_peers_len[1])),
|
||||
makeReference<FDBLibTLSVerify>(std::string(verify_peers[2], verify_peers_len[2])),
|
||||
};
|
||||
|
||||
if (!policy->set_verify_peers(3, (const uint8_t **)verify_peers, verify_peers_len)) {
|
||||
|
|
|
@ -124,9 +124,18 @@ fdb_error_t fdb_run_network() {
|
|||
CATCH_AND_RETURN( API->runNetwork(); );
|
||||
}
|
||||
|
||||
#ifdef ADDRESS_SANITIZER
|
||||
extern "C" void __lsan_do_leak_check();
|
||||
#endif
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
fdb_error_t fdb_stop_network() {
|
||||
CATCH_AND_RETURN( API->stopNetwork(); );
|
||||
#ifdef ADDRESS_SANITIZER
|
||||
// fdb_stop_network intentionally leaks a bunch of memory, so let's do the
|
||||
// leak check before that so it's meaningful
|
||||
__lsan_do_leak_check();
|
||||
#endif
|
||||
CATCH_AND_RETURN(API->stopNetwork(););
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
|
|
|
@ -8,10 +8,10 @@ RUN yum install -y yum-utils &&\
|
|||
http://opensource.wandisco.com/centos/6/git/x86_64/wandisco-git-release-6-1.noarch.rpm &&\
|
||||
yum -y install devtoolset-8-8.1-1.el6 java-1.8.0-openjdk-devel \
|
||||
devtoolset-8-gcc-8.3.1 devtoolset-8-gcc-c++-8.3.1 \
|
||||
devtoolset-8-libubsan-devel devtoolset-8-valgrind-devel \
|
||||
devtoolset-8-libubsan-devel devtoolset-8-libasan-devel devtoolset-8-valgrind-devel \
|
||||
rh-python36-python-devel rh-ruby24 golang python27 rpm-build \
|
||||
mono-core debbuild python-pip dos2unix valgrind-devel ccache \
|
||||
distcc wget git &&\
|
||||
distcc wget git lz4 lz4-devel lz4-static &&\
|
||||
pip install boto3==1.1.1
|
||||
|
||||
USER root
|
||||
|
@ -61,8 +61,8 @@ RUN cd /opt/ && curl -L https://github.com/facebook/rocksdb/archive/v6.10.1.tar.
|
|||
ARG TIMEZONEINFO=America/Los_Angeles
|
||||
RUN rm -f /etc/localtime && ln -s /usr/share/zoneinfo/${TIMEZONEINFO} /etc/localtime
|
||||
|
||||
LABEL version=0.1.17
|
||||
ENV DOCKER_IMAGEVER=0.1.17
|
||||
LABEL version=0.1.19
|
||||
ENV DOCKER_IMAGEVER=0.1.19
|
||||
ENV JAVA_HOME=/usr/lib/jvm/java-1.8.0
|
||||
ENV CC=/opt/rh/devtoolset-8/root/usr/bin/gcc
|
||||
ENV CXX=/opt/rh/devtoolset-8/root/usr/bin/g++
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM foundationdb/foundationdb-build:0.1.17
|
||||
FROM foundationdb/foundationdb-build:0.1.19
|
||||
|
||||
USER root
|
||||
|
||||
|
@ -50,8 +50,8 @@ RUN cp -iv /usr/local/bin/clang++ /usr/local/bin/clang++.deref &&\
|
|||
ldconfig &&\
|
||||
rm -rf /mnt/artifacts
|
||||
|
||||
LABEL version=0.11.9
|
||||
ENV DOCKER_IMAGEVER=0.11.9
|
||||
LABEL version=0.11.10
|
||||
ENV DOCKER_IMAGEVER=0.11.10
|
||||
|
||||
ENV CLANGCC=/usr/local/bin/clang.de8a65ef
|
||||
ENV CLANGCXX=/usr/local/bin/clang++.de8a65ef
|
||||
|
|
|
@ -2,7 +2,7 @@ version: "3"
|
|||
|
||||
services:
|
||||
common: &common
|
||||
image: foundationdb/foundationdb-build:0.1.17
|
||||
image: foundationdb/foundationdb-build:0.1.19
|
||||
|
||||
build-setup: &build-setup
|
||||
<<: *common
|
||||
|
|
|
@ -1,24 +1,12 @@
|
|||
if ((NOT GENERATE_EL6) AND (NOT "$ENV{GENERATE_EL6}" STREQUAL ""))
|
||||
if (("$ENV{GENERATE_EL6}" STREQUAL "ON") OR ("$ENV{GENERATE_EL6}" STREQUAL "1") OR ("$ENV{GENERATE_EL6}" STREQUAL "YES"))
|
||||
set(GENERATE_EL6 ON)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# RPM specifics
|
||||
if(CPACK_GENERATOR MATCHES "RPM")
|
||||
set(CPACK_PACKAGING_INSTALL_PREFIX "/")
|
||||
if(GENERATE_EL6)
|
||||
message(STATUS "Building EL6 components")
|
||||
set(CPACK_COMPONENTS_ALL clients-el6 server-el6)
|
||||
else()
|
||||
message(STATUS "Building EL7 components")
|
||||
set(CPACK_COMPONENTS_ALL clients-el7 server-el7)
|
||||
endif()
|
||||
set(CPACK_COMPONENTS_ALL clients-el7 server-el7 clients-versioned server-versioned)
|
||||
set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/README.md)
|
||||
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE)
|
||||
elseif(CPACK_GENERATOR MATCHES "DEB")
|
||||
set(CPACK_PACKAGING_INSTALL_PREFIX "/")
|
||||
set(CPACK_COMPONENTS_ALL clients-deb server-deb)
|
||||
set(CPACK_COMPONENTS_ALL clients-deb server-deb clients-versioned server-versioned)
|
||||
set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/README.md)
|
||||
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE)
|
||||
elseif(CPACK_GENERATOR MATCHES "productbuild")
|
||||
|
|
|
@ -18,7 +18,7 @@ if (RocksDB_FOUND)
|
|||
-DWITH_CORE_TOOLS=OFF
|
||||
-DWITH_BENCHMARK_TOOLS=OFF
|
||||
-DWITH_BZ2=OFF
|
||||
-DWITH_LZ4=OFF
|
||||
-DWITH_LZ4=ON
|
||||
-DWITH_SNAPPY=OFF
|
||||
-DWITH_ZLIB=OFF
|
||||
-DWITH_ZSTD=OFF
|
||||
|
@ -45,7 +45,7 @@ else()
|
|||
-DWITH_CORE_TOOLS=OFF
|
||||
-DWITH_BENCHMARK_TOOLS=OFF
|
||||
-DWITH_BZ2=OFF
|
||||
-DWITH_LZ4=OFF
|
||||
-DWITH_LZ4=ON
|
||||
-DWITH_SNAPPY=OFF
|
||||
-DWITH_ZLIB=OFF
|
||||
-DWITH_ZSTD=OFF
|
||||
|
|
|
@ -106,10 +106,10 @@ endif()
|
|||
# RocksDB
|
||||
################################################################################
|
||||
|
||||
set(SSD_ROCKSDB_EXPERIMENTAL OFF CACHE BOOL "Build with experimental RocksDB support")
|
||||
set(SSD_ROCKSDB_EXPERIMENTAL ON CACHE BOOL "Build with experimental RocksDB support")
|
||||
# RocksDB is currently enabled by default for GCC but does not build with the latest
|
||||
# Clang.
|
||||
if (SSD_ROCKSDB_EXPERIMENTAL OR GCC)
|
||||
if (SSD_ROCKSDB_EXPERIMENTAL AND GCC)
|
||||
set(WITH_ROCKSDB_EXPERIMENTAL ON)
|
||||
else()
|
||||
set(WITH_ROCKSDB_EXPERIMENTAL OFF)
|
||||
|
|
|
@ -0,0 +1,260 @@
|
|||
function(fdb_install_packages)
|
||||
set(FDB_INSTALL_PACKAGES ${ARGV} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
function(fdb_install_dirs)
|
||||
set(FDB_INSTALL_DIRS ${ARGV} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
function(install_symlink_impl)
|
||||
if (NOT WIN32)
|
||||
return()
|
||||
endif()
|
||||
set(options "")
|
||||
set(one_value_options TO DESTINATION)
|
||||
set(multi_value_options COMPONENTS)
|
||||
cmake_parse_arguments(SYM "${options}" "${one_value_options}" "${multi_value_options}" "${ARGN}")
|
||||
|
||||
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/symlinks)
|
||||
get_filename_component(fname ${SYM_DESTINATION} NAME)
|
||||
get_filename_component(dest_dir ${SYM_DESTINATION} DIRECTORY)
|
||||
set(sl ${CMAKE_CURRENT_BINARY_DIR}/symlinks/${fname})
|
||||
execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink ${SYM_TO} ${sl})
|
||||
foreach(component IN LISTS SYM_COMPONENTS)
|
||||
install(FILES ${sl} DESTINATION ${dest_dir} COMPONENT ${component})
|
||||
endforeach()
|
||||
endfunction()
|
||||
|
||||
function(install_symlink)
|
||||
if(NOT WIN32 AND NOT OPEN_FOR_IDE)
|
||||
return()
|
||||
endif()
|
||||
set(options "")
|
||||
set(one_value_options COMPONENT LINK_DIR FILE_DIR LINK_NAME FILE_NAME)
|
||||
set(multi_value_options "")
|
||||
cmake_parse_arguments(IN "${options}" "${one_value_options}" "${multi_value_options}" "${ARGN}")
|
||||
|
||||
set(rel_path "")
|
||||
string(REGEX MATCHALL "\\/" slashes "${IN_LINK_NAME}")
|
||||
foreach(ignored IN LISTS slashes)
|
||||
set(rel_path "../${rel_path}")
|
||||
endforeach()
|
||||
if("${IN_FILE_DIR}" MATCHES "bin")
|
||||
if("${IN_LINK_DIR}" MATCHES "lib")
|
||||
install_symlink_impl(
|
||||
TO "../${rel_path}bin/${IN_FILE_NAME}"
|
||||
DESTINATION "lib/${IN_LINK_NAME}"
|
||||
COMPONENTS "${IN_COMPONENT}-tgz")
|
||||
install_symlink_impl(
|
||||
TO "../${rel_path}bin/${IN_FILE_NAME}"
|
||||
DESTINATION "usr/lib64/${IN_LINK_NAME}"
|
||||
COMPONENTS
|
||||
"${IN_COMPONENT}-el7"
|
||||
"${IN_COMPONENT}-deb")
|
||||
install_symlink_impl(
|
||||
TO "../${rel_path}bin/${IN_FILE_NAME}"
|
||||
DESTINATION "usr/lib64/${IN_LINK_NAME}"
|
||||
COMPONENTS "${IN_COMPONENT}-deb")
|
||||
elseif("${IN_LINK_DIR}" MATCHES "bin")
|
||||
install_symlink_impl(
|
||||
TO "../${rel_path}bin/${IN_FILE_NAME}"
|
||||
DESTINATION "bin/${IN_LINK_NAME}"
|
||||
COMPONENTS "${IN_COMPONENT}-tgz")
|
||||
install_symlink_impl(
|
||||
TO "../${rel_path}bin/${IN_FILE_NAME}"
|
||||
DESTINATION "usr/bin/${IN_LINK_NAME}"
|
||||
COMPONENTS
|
||||
"${IN_COMPONENT}-el7"
|
||||
"${IN_COMPONENT}-deb")
|
||||
elseif("${IN_LINK_DIR}" MATCHES "fdbmonitor")
|
||||
install_symlink_impl(
|
||||
TO "../../${rel_path}bin/${IN_FILE_NAME}"
|
||||
DESTINATION "lib/foundationdb/${IN_LINK_NAME}"
|
||||
COMPONENTS "${IN_COMPONENT}-tgz")
|
||||
install_symlink_impl(
|
||||
TO "../../${rel_path}bin/${IN_FILE_NAME}"
|
||||
DESTINATION "usr/lib/foundationdb/${IN_LINK_NAME}"
|
||||
COMPONENTS
|
||||
"${IN_COMPONENT}-el7"
|
||||
"${IN_COMPONENT}-deb")
|
||||
else()
|
||||
message(FATAL_ERROR "Unknown LINK_DIR ${IN_LINK_DIR}")
|
||||
endif()
|
||||
else()
|
||||
message(FATAL_ERROR "Unknown FILE_DIR ${IN_FILE_DIR}")
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
function(symlink_files)
|
||||
if (NOT WIN32)
|
||||
set(options "")
|
||||
set(one_value_options LOCATION SOURCE)
|
||||
set(multi_value_options TARGETS)
|
||||
cmake_parse_arguments(SYM "${options}" "${one_value_options}" "${multi_value_options}" "${ARGN}")
|
||||
|
||||
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/${SYM_LOCATION})
|
||||
foreach(component IN LISTS SYM_TARGETS)
|
||||
execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink ${SYM_SOURCE} ${CMAKE_BINARY_DIR}/${SYM_LOCATION}/${component} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/${SYM_LOCATION})
|
||||
endforeach()
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
function(pop_front)
|
||||
if(ARGC LESS 2)
|
||||
message(FATAL_ERROR "USAGE: pop_front(<list> <out-var> [<count>])")
|
||||
endif()
|
||||
set(count ${ARGV2})
|
||||
if(NOT count)
|
||||
set(count 1)
|
||||
endif()
|
||||
set(result)
|
||||
foreach(elem IN LISTS ${ARGV0})
|
||||
if(count GREATER 0)
|
||||
math(EXPR count "${count} - 1")
|
||||
else()
|
||||
list(APPEND result ${elem})
|
||||
endif()
|
||||
endforeach()
|
||||
set(${ARGV1} ${result} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
function(install_destinations)
|
||||
if(NOT ARGV0)
|
||||
message(FATAL_ERROR "No package passed")
|
||||
endif()
|
||||
set(package ${ARGV0})
|
||||
set(REST_ARGS ${ARGV})
|
||||
pop_front(REST_ARGS REST_ARGS)
|
||||
list(FIND FDB_INSTALL_PACKAGES ${package} idx)
|
||||
if(idx LESS 0)
|
||||
message(FATAL_ERROR "Package ${package} does not exist")
|
||||
endif()
|
||||
cmake_parse_arguments(MY "" "${FDB_INSTALL_DIRS}" "" ${REST_ARGS})
|
||||
foreach(dir IN LISTS FDB_INSTALL_DIRS)
|
||||
if(MY_${dir})
|
||||
set(var ${MY_${dir}})
|
||||
set(__install_dest_${package}_${dir} ${MY_${dir}} PARENT_SCOPE)
|
||||
endif()
|
||||
endforeach()
|
||||
endfunction()
|
||||
|
||||
function(get_install_dest)
|
||||
if(ARGC LESS 3)
|
||||
message(FATAL_ERROR "USAGE: get_install_dest(<pkg> <dir> <out-var> [<var-name>])")
|
||||
endif()
|
||||
set(package ${ARGV0})
|
||||
set(dir ${ARGV1})
|
||||
set(out ${ARGV2})
|
||||
set(${out} ${__install_dest_${package}_${dir}} PARENT_SCOPE)
|
||||
if(ARGV3)
|
||||
set(${ARGV3} "__install_dest_${package}_${dir}")
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
function(print_install_destinations)
|
||||
foreach(pkg IN LISTS FDB_INSTALL_PACKAGES)
|
||||
message(STATUS "Destinations for ${pkg}")
|
||||
set(old_indent ${CMAKE_MESSAGE_INDENT})
|
||||
set(CMAKE_MESSAGE_INDENT "${CMAKE_MESSAGE_INDENT} ")
|
||||
foreach(dir IN LISTS FDB_INSTALL_DIRS)
|
||||
get_install_dest(${pkg} ${dir} d)
|
||||
message(STATUS "${dir} -> ${d}")
|
||||
endforeach()
|
||||
set(CMAKE_MESSAGE_INDENT ${old_indent})
|
||||
endforeach()
|
||||
endfunction()
|
||||
|
||||
function(get_install_var)
|
||||
if(NOT ARGC EQUAL 3)
|
||||
message(FATAL_ERROR "USAGE: get_install_var(<pkg> <dir> <out-var>)")
|
||||
endif()
|
||||
set(${ARGV2} "__install_dest_${ARGV0}_${ARGV1}" PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
function(copy_install_destinations)
|
||||
if(ARGC LESS 2)
|
||||
message(FATAL_ERROR "USAGE: copy_install_destinations(<from> <to> [PREFIX prefix])")
|
||||
endif()
|
||||
set(from ${ARGV0})
|
||||
set(to ${ARGV1})
|
||||
set(REST_ARGS ${ARGV})
|
||||
pop_front(REST_ARGS REST_ARGS 2)
|
||||
cmake_parse_arguments(MY "" "PREFIX" "" ${REST_ARGS})
|
||||
foreach(dir IN LISTS FDB_INSTALL_DIRS)
|
||||
get_install_dest(${from} ${dir} d)
|
||||
get_install_var(${to} ${dir} name)
|
||||
if(MY_PREFIX)
|
||||
set(d "${MY_PREFIX}${d}")
|
||||
endif()
|
||||
set(${name} ${d} PARENT_SCOPE)
|
||||
endforeach()
|
||||
endfunction()
|
||||
|
||||
function(fdb_configure_and_install)
|
||||
if(NOT WIN32 AND NOT OPEN_FOR_IDE)
|
||||
set(one_value_options COMPONENT DESTINATION FILE DESTINATION_SUFFIX)
|
||||
cmake_parse_arguments(IN "${options}" "${one_value_options}" "${multi_value_options}" "${ARGN}")
|
||||
foreach(pkg IN LISTS FDB_INSTALL_PACKAGES)
|
||||
string(TOLOWER "${pkg}" package)
|
||||
string(TOUPPER "${IN_DESTINATION}" destination)
|
||||
get_install_dest(${pkg} INCLUDE INCLUDE_DIR)
|
||||
get_install_dest(${pkg} INCLUDE LIB_DIR)
|
||||
get_install_dest(${pkg} ${destination} install_path)
|
||||
string(REGEX REPLACE "\.in$" "" name "${IN_FILE}")
|
||||
get_filename_component(name "${name}" NAME)
|
||||
set(generated_file_name "${generated_dir}/${package}/${name}")
|
||||
configure_file("${IN_FILE}" "${generated_file_name}" @ONLY)
|
||||
install(
|
||||
FILES "${generated_file_name}"
|
||||
DESTINATION "${install_path}${IN_DESTINATION_SUFFIX}"
|
||||
COMPONENT "${IN_COMPONENT}-${package}")
|
||||
endforeach()
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
function(fdb_install)
|
||||
if(NOT WIN32 AND NOT OPEN_FOR_IDE)
|
||||
set(one_value_options COMPONENT DESTINATION EXPORT DESTINATION_SUFFIX)
|
||||
set(multi_value_options TARGETS FILES PROGRAMS DIRECTORY)
|
||||
cmake_parse_arguments(IN "${options}" "${one_value_options}" "${multi_value_options}" "${ARGN}")
|
||||
|
||||
set(install_export 0)
|
||||
if(IN_TARGETS)
|
||||
set(args TARGETS ${IN_TARGETS})
|
||||
elseif(IN_FILES)
|
||||
set(args FILES ${IN_FILES})
|
||||
elseif(IN_PROGRAMS)
|
||||
set(args PROGRAMS ${IN_PROGRAMS})
|
||||
elseif(IN_DIRECTORY)
|
||||
set(args DIRECTORY ${IN_DIRECTORY})
|
||||
elseif(IN_EXPORT)
|
||||
set(install_export 1)
|
||||
else()
|
||||
message(FATAL_ERROR "Expected FILES, PROGRAMS, DIRECTORY, or TARGETS")
|
||||
endif()
|
||||
string(TOUPPER "${IN_DESTINATION}" destination)
|
||||
foreach(pkg IN LISTS FDB_INSTALL_PACKAGES)
|
||||
get_install_dest(${pkg} ${destination} install_path)
|
||||
string(TOLOWER "${pkg}" package)
|
||||
if(install_export)
|
||||
install(
|
||||
EXPORT "${IN_EXPORT}-${package}"
|
||||
DESTINATION "${install_path}${IN_DESTINATION_SUFFIX}"
|
||||
FILE "${IN_EXPORT}.cmake"
|
||||
COMPONENT "${IN_COMPONENT}-${package}")
|
||||
else()
|
||||
set(export_args "")
|
||||
if (IN_EXPORT)
|
||||
set(export_args EXPORT "${IN_EXPORT}-${package}")
|
||||
endif()
|
||||
if(NOT ${install_path} STREQUAL "")
|
||||
install(
|
||||
${args}
|
||||
${export_args}
|
||||
DESTINATION "${install_path}${IN_DESTINATION_SUFFIX}"
|
||||
COMPONENT "${IN_COMPONENT}-${package}")
|
||||
endif()
|
||||
endif()
|
||||
endforeach()
|
||||
endif()
|
||||
endfunction()
|
|
@ -1,6 +1,4 @@
|
|||
################################################################################
|
||||
# Helper Functions
|
||||
################################################################################
|
||||
include(FDBInstall)
|
||||
|
||||
function(install_symlink_impl)
|
||||
if (NOT WIN32)
|
||||
|
@ -48,6 +46,10 @@ function(install_symlink)
|
|||
TO "../${rel_path}bin/${IN_FILE_NAME}"
|
||||
DESTINATION "usr/lib64/${IN_LINK_NAME}"
|
||||
COMPONENTS "${IN_COMPONENT}-deb")
|
||||
install_symlink_impl(
|
||||
TO "../${rel_path}local/bin/${IN_FILE_NAME}"
|
||||
DESTINATION "usr/lib64/${IN_LINK_NAME}"
|
||||
COMPONENTS "${IN_COMPONENT}-pm")
|
||||
elseif("${IN_LINK_DIR}" MATCHES "bin")
|
||||
install_symlink_impl(
|
||||
TO "../${rel_path}bin/${IN_FILE_NAME}"
|
||||
|
@ -59,6 +61,10 @@ function(install_symlink)
|
|||
COMPONENTS "${IN_COMPONENT}-el6"
|
||||
"${IN_COMPONENT}-el7"
|
||||
"${IN_COMPONENT}-deb")
|
||||
install_symlink_impl(
|
||||
TO "../${rel_path}/bin/${IN_FILE_NAME}"
|
||||
DESTINATION "usr/local/bin/${IN_LINK_NAME}"
|
||||
COMPONENTS "${IN_COMPONENT}-pm")
|
||||
elseif("${IN_LINK_DIR}" MATCHES "fdbmonitor")
|
||||
install_symlink_impl(
|
||||
TO "../../${rel_path}bin/${IN_FILE_NAME}"
|
||||
|
@ -70,6 +76,10 @@ function(install_symlink)
|
|||
COMPONENTS "${IN_COMPONENT}-el6"
|
||||
"${IN_COMPONENT}-el7"
|
||||
"${IN_COMPONENT}-deb")
|
||||
install_symlink_impl(
|
||||
TO "../../${rel_path}/bin/${IN_FILE_NAME}"
|
||||
DESTINATION "usr/local/lib/foundationdb/${IN_LINK_NAME}"
|
||||
COMPONENTS "${IN_COMPONENT}-pm")
|
||||
else()
|
||||
message(FATAL_ERROR "Unknown LINK_DIR ${IN_LINK_DIR}")
|
||||
endif()
|
||||
|
@ -109,7 +119,7 @@ set(install_destination_for_lib_tgz "lib")
|
|||
set(install_destination_for_lib_deb "usr/lib")
|
||||
set(install_destination_for_lib_el6 "usr/lib64")
|
||||
set(install_destination_for_lib_el7 "usr/lib64")
|
||||
set(install_destination_for_lib_pm "lib")
|
||||
set(install_destination_for_lib_pm "usr/local/lib")
|
||||
set(install_destination_for_fdbmonitor_tgz "sbin")
|
||||
set(install_destination_for_fdbmonitor_deb "usr/lib/foundationdb")
|
||||
set(install_destination_for_fdbmonitor_el6 "usr/lib/foundationdb")
|
||||
|
@ -129,78 +139,51 @@ set(install_destination_for_log_tgz "log/foundationdb")
|
|||
set(install_destination_for_log_deb "var/log/foundationdb")
|
||||
set(install_destination_for_log_el6 "var/log/foundationdb")
|
||||
set(install_destination_for_log_el7 "var/log/foundationdb")
|
||||
set(install_destination_for_log_pm "")
|
||||
set(install_destination_for_log_pm "usr/local/foundationdb/logs")
|
||||
set(install_destination_for_data_tgz "lib/foundationdb")
|
||||
set(install_destination_for_data_deb "var/lib/foundationdb/data")
|
||||
set(install_destination_for_data_el6 "var/lib/foundationdb/data")
|
||||
set(install_destination_for_data_el7 "var/lib/foundationdb/data")
|
||||
set(install_destination_for_data_pm "")
|
||||
set(install_destination_for_data_pm "usr/local/foundationdb/data")
|
||||
fdb_install_packages(TGZ DEB EL7 PM VERSIONED)
|
||||
fdb_install_dirs(BIN SBIN LIB FDBMONITOR INCLUDE ETC LOG DATA)
|
||||
message(STATUS "FDB_INSTALL_DIRS -> ${FDB_INSTALL_DIRS}")
|
||||
|
||||
# 'map' from (destination, package) to path
|
||||
# format vars like install_destination_for_${destination}_${package}
|
||||
install_destinations(TGZ
|
||||
BIN bin
|
||||
SBIN sbin
|
||||
LIB lib
|
||||
FDBMONITOR sbin
|
||||
INCLUDE include
|
||||
ETC etc/foundationdb
|
||||
LOG log/foundationdb
|
||||
DATA lib/foundationdb)
|
||||
copy_install_destinations(TGZ VERSIONED PREFIX "usr/lib/foundationdb-${PROJECT_VERSION}/")
|
||||
install_destinations(DEB
|
||||
BIN usr/bin
|
||||
SBIN usr/sbin
|
||||
LIB usr/lib
|
||||
FDBMONITOR usr/lib/foundationdb
|
||||
INCLUDE usr/include
|
||||
ETC etc/foundationdb
|
||||
LOG var/log/foundationdb
|
||||
DATA var/lib/foundationdb)
|
||||
copy_install_destinations(DEB EL7)
|
||||
install_destinations(EL7 LIB usr/lib64)
|
||||
install_destinations(PM
|
||||
BIN usr/local/bin
|
||||
SBIN usr/local/sbin
|
||||
LIB lib
|
||||
FDBMONITOR usr/local/libexec
|
||||
INCLUDE usr/local/include
|
||||
ETC usr/local/etc/foundationdb)
|
||||
|
||||
# This can be used for debugging in case above is behaving funky
|
||||
#print_install_destinations()
|
||||
|
||||
set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated")
|
||||
function(fdb_configure_and_install)
|
||||
if(NOT WIN32 AND NOT OPEN_FOR_IDE)
|
||||
set(one_value_options COMPONENT DESTINATION FILE DESTINATION_SUFFIX)
|
||||
cmake_parse_arguments(IN "${options}" "${one_value_options}" "${multi_value_options}" "${ARGN}")
|
||||
foreach(package tgz deb el6 el7 pm)
|
||||
set(INCLUDE_DIR "${install_destination_for_include_${package}}")
|
||||
set(LIB_DIR "${install_destination_for_lib_${package}}")
|
||||
set(install_path "${install_destination_for_${IN_DESTINATION}_${package}}")
|
||||
string(REGEX REPLACE "\.in$" "" name "${IN_FILE}")
|
||||
get_filename_component(name "${name}" NAME)
|
||||
set(generated_file_name "${generated_dir}/${package}/${name}")
|
||||
configure_file("${IN_FILE}" "${generated_file_name}" @ONLY)
|
||||
install(
|
||||
FILES "${generated_file_name}"
|
||||
DESTINATION "${install_path}${IN_DESTINATION_SUFFIX}"
|
||||
COMPONENT "${IN_COMPONENT}-${package}")
|
||||
endforeach()
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
function(fdb_install)
|
||||
if(NOT WIN32 AND NOT OPEN_FOR_IDE)
|
||||
set(one_value_options COMPONENT DESTINATION EXPORT DESTINATION_SUFFIX)
|
||||
set(multi_value_options TARGETS FILES PROGRAMS DIRECTORY)
|
||||
cmake_parse_arguments(IN "${options}" "${one_value_options}" "${multi_value_options}" "${ARGN}")
|
||||
|
||||
set(install_export 0)
|
||||
if(IN_TARGETS)
|
||||
set(args TARGETS ${IN_TARGETS})
|
||||
elseif(IN_FILES)
|
||||
set(args FILES ${IN_FILES})
|
||||
elseif(IN_PROGRAMS)
|
||||
set(args PROGRAMS ${IN_PROGRAMS})
|
||||
elseif(IN_DIRECTORY)
|
||||
set(args DIRECTORY ${IN_DIRECTORY})
|
||||
elseif(IN_EXPORT)
|
||||
set(install_export 1)
|
||||
else()
|
||||
message(FATAL_ERROR "Expected FILES, PROGRAMS, DIRECTORY, or TARGETS")
|
||||
endif()
|
||||
foreach(package tgz deb el6 el7 pm)
|
||||
set(install_path "${install_destination_for_${IN_DESTINATION}_${package}}")
|
||||
if(install_export)
|
||||
install(
|
||||
EXPORT "${IN_EXPORT}-${package}"
|
||||
DESTINATION "${install_path}${IN_DESTINATION_SUFFIX}"
|
||||
FILE "${IN_EXPORT}.cmake"
|
||||
COMPONENT "${IN_COMPONENT}-${package}")
|
||||
else()
|
||||
set(export_args "")
|
||||
if (IN_EXPORT)
|
||||
set(export_args EXPORT "${IN_EXPORT}-${package}")
|
||||
endif()
|
||||
if(NOT ${install_path} STREQUAL "")
|
||||
install(
|
||||
${args}
|
||||
${export_args}
|
||||
DESTINATION "${install_path}${IN_DESTINATION_SUFFIX}"
|
||||
COMPONENT "${IN_COMPONENT}-${package}")
|
||||
endif()
|
||||
endif()
|
||||
endforeach()
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
if(APPLE)
|
||||
set(CPACK_GENERATOR TGZ productbuild)
|
||||
|
@ -228,6 +211,22 @@ list(GET FDB_VERSION_LIST 0 FDB_MAJOR)
|
|||
list(GET FDB_VERSION_LIST 1 FDB_MINOR)
|
||||
list(GET FDB_VERSION_LIST 2 FDB_PATCH)
|
||||
|
||||
|
||||
################################################################################
|
||||
# Alternatives config
|
||||
################################################################################
|
||||
|
||||
math(EXPR ALTERNATIVES_PRIORITY "(${PROJECT_VERSION_MAJOR} * 1000) + (${PROJECT_VERSION_MINOR} * 100) + ${PROJECT_VERSION_PATCH}")
|
||||
set(script_dir "${PROJECT_BINARY_DIR}/packaging/multiversion/")
|
||||
file(MAKE_DIRECTORY "${script_dir}/server" "${script_dir}/clients")
|
||||
configure_file("${PROJECT_SOURCE_DIR}/packaging/multiversion/server/postinst" "${script_dir}/server" @ONLY)
|
||||
configure_file("${PROJECT_SOURCE_DIR}/packaging/multiversion/server/prerm" "${script_dir}/server" @ONLY)
|
||||
set(LIB_DIR lib)
|
||||
configure_file("${PROJECT_SOURCE_DIR}/packaging/multiversion/clients/postinst" "${script_dir}/clients" @ONLY)
|
||||
set(LIB_DIR lib64)
|
||||
configure_file("${PROJECT_SOURCE_DIR}/packaging/multiversion/clients/postinst" "${script_dir}/clients/postinst-el7" @ONLY)
|
||||
configure_file("${PROJECT_SOURCE_DIR}/packaging/multiversion/clients/prerm" "${script_dir}/clients" @ONLY)
|
||||
|
||||
################################################################################
|
||||
# General CPack configuration
|
||||
################################################################################
|
||||
|
@ -247,23 +246,23 @@ set(CPACK_PACKAGE_DESCRIPTION_SUMMARY
|
|||
set(CPACK_PACKAGE_ICON ${CMAKE_SOURCE_DIR}/packaging/foundationdb.ico)
|
||||
set(CPACK_PACKAGE_CONTACT "The FoundationDB Community")
|
||||
|
||||
set(CPACK_COMPONENT_SERVER-EL6_DEPENDS clients-el6)
|
||||
set(CPACK_COMPONENT_SERVER-EL7_DEPENDS clients-el7)
|
||||
set(CPACK_COMPONENT_SERVER-DEB_DEPENDS clients-deb)
|
||||
set(CPACK_COMPONENT_SERVER-TGZ_DEPENDS clients-tgz)
|
||||
set(CPACK_COMPONENT_SERVER-PM_DEPENDS clients-pm)
|
||||
set(CPACK_COMPONENT_SERVER-VERSIONED_DEPENDS clients-versioned)
|
||||
|
||||
set(CPACK_COMPONENT_SERVER-EL6_DISPLAY_NAME "foundationdb-server")
|
||||
set(CPACK_COMPONENT_SERVER-EL7_DISPLAY_NAME "foundationdb-server")
|
||||
set(CPACK_COMPONENT_SERVER-DEB_DISPLAY_NAME "foundationdb-server")
|
||||
set(CPACK_COMPONENT_SERVER-TGZ_DISPLAY_NAME "foundationdb-server")
|
||||
set(CPACK_COMPONENT_SERVER-PM_DISPLAY_NAME "foundationdb-server")
|
||||
set(CPACK_COMPONENT_SERVER-VERSIONED_DISPLAY_NAME "foundationdb-server-${PROJECT_VERSION}")
|
||||
|
||||
set(CPACK_COMPONENT_CLIENTS-EL6_DISPLAY_NAME "foundationdb-clients")
|
||||
set(CPACK_COMPONENT_CLIENTS-EL7_DISPLAY_NAME "foundationdb-clients")
|
||||
set(CPACK_COMPONENT_CLIENTS-DEB_DISPLAY_NAME "foundationdb-clients")
|
||||
set(CPACK_COMPONENT_CLIENTS-TGZ_DISPLAY_NAME "foundationdb-clients")
|
||||
set(CPACK_COMPONENT_CLIENTS-PM_DISPLAY_NAME "foundationdb-clients")
|
||||
set(CPACK_COMPONENT_CLIENTS-VERSIONED_DISPLAY_NAME "foundationdb-clients-${PROJECT_VERSION}")
|
||||
|
||||
|
||||
# MacOS needs a file exiension for the LICENSE file
|
||||
|
@ -300,39 +299,34 @@ set(deb-server-filename "foundationdb-server_${PROJECT_VERSION}${prerelease_stri
|
|||
set(CPACK_RPM_PACKAGE_LICENSE "Apache 2.0")
|
||||
|
||||
set(CPACK_RPM_PACKAGE_NAME "foundationdb")
|
||||
set(CPACK_RPM_CLIENTS-EL6_PACKAGE_NAME "foundationdb-clients")
|
||||
set(CPACK_RPM_CLIENTS-EL7_PACKAGE_NAME "foundationdb-clients")
|
||||
set(CPACK_RPM_SERVER-EL6_PACKAGE_NAME "foundationdb-server")
|
||||
set(CPACK_RPM_SERVER-EL7_PACKAGE_NAME "foundationdb-server")
|
||||
set(CPACK_RPM_SERVER-VERSIONED_PACKAGE_NAME "foundationdb-server-${PROJECT_VERSION}")
|
||||
|
||||
set(CPACK_RPM_CLIENTS-EL6_FILE_NAME "${rpm-clients-filename}.el6.x86_64.rpm")
|
||||
set(CPACK_RPM_CLIENTS-EL7_FILE_NAME "${rpm-clients-filename}.el7.x86_64.rpm")
|
||||
set(CPACK_RPM_SERVER-EL6_FILE_NAME "${rpm-server-filename}.el6.x86_64.rpm")
|
||||
set(CPACK_RPM_CLIENTS-VERSIONED_FILE_NAME "${rpm-clients-filename}.versioned.x86_64.rpm")
|
||||
set(CPACK_RPM_SERVER-EL7_FILE_NAME "${rpm-server-filename}.el7.x86_64.rpm")
|
||||
set(CPACK_RPM_SERVER-VERSIONED_FILE_NAME "${rpm-server-filename}.versioned.x86_64.rpm")
|
||||
|
||||
set(CPACK_RPM_CLIENTS-EL6_DEBUGINFO_FILE_NAME "${rpm-clients-filename}.el6-debuginfo.x86_64.rpm")
|
||||
set(CPACK_RPM_CLIENTS-EL7_DEBUGINFO_FILE_NAME "${rpm-clients-filename}.el7-debuginfo.x86_64.rpm")
|
||||
set(CPACK_RPM_SERVER-EL6_DEBUGINFO_FILE_NAME "${rpm-server-filename}.el6-debuginfo.x86_64.rpm")
|
||||
set(CPACK_RPM_CLIENTS-VERSIONED_DEBUGINFO_FILE_NAME "${rpm-clients-filename}.versioned-debuginfo.x86_64.rpm")
|
||||
set(CPACK_RPM_SERVER-EL7_DEBUGINFO_FILE_NAME "${rpm-server-filename}.el7-debuginfo.x86_64.rpm")
|
||||
set(CPACK_RPM_SERVER-VERSIONED_DEBUGINFO_FILE_NAME "${rpm-server-filename}.versioned-debuginfo.x86_64.rpm")
|
||||
|
||||
file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir")
|
||||
fdb_install(DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir/" DESTINATION data COMPONENT server)
|
||||
fdb_install(DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir/" DESTINATION log COMPONENT server)
|
||||
fdb_install(DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir/" DESTINATION etc COMPONENT clients)
|
||||
|
||||
set(CPACK_RPM_SERVER-EL6_USER_FILELIST
|
||||
"%config(noreplace) /etc/foundationdb/foundationdb.conf"
|
||||
"%attr(0700,foundationdb,foundationdb) /var/log/foundationdb"
|
||||
"%attr(0700, foundationdb, foundationdb) /var/lib/foundationdb")
|
||||
set(CPACK_RPM_SERVER-EL7_USER_FILELIST
|
||||
"%config(noreplace) /etc/foundationdb/foundationdb.conf"
|
||||
"%attr(0700,foundationdb,foundationdb) /var/log/foundationdb"
|
||||
"%attr(0700, foundationdb, foundationdb) /var/lib/foundationdb")
|
||||
set(CPACK_RPM_CLIENTS-EL6_USER_FILELIST "%dir /etc/foundationdb")
|
||||
set(CPACK_RPM_CLIENTS-EL7_USER_FILELIST "%dir /etc/foundationdb")
|
||||
set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION
|
||||
"/usr/sbin"
|
||||
"/usr/share/java"
|
||||
"/usr/lib"
|
||||
"/usr/lib64/cmake"
|
||||
"/etc/foundationdb"
|
||||
"/usr/lib64/pkgconfig"
|
||||
|
@ -346,42 +340,38 @@ set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION
|
|||
"/lib/systemd/system"
|
||||
"/etc/rc.d/init.d")
|
||||
set(CPACK_RPM_DEBUGINFO_PACKAGE ${GENERATE_DEBUG_PACKAGES})
|
||||
#set(CPACK_RPM_BUILD_SOURCE_DIRS_PREFIX /usr/src)
|
||||
#set(CPACK_RPM_BUILD_SOURCE_FDB_INSTALL_DIRS_PREFIX /usr/src)
|
||||
set(CPACK_RPM_COMPONENT_INSTALL ON)
|
||||
|
||||
set(CPACK_RPM_CLIENTS-EL6_PRE_INSTALL_SCRIPT_FILE
|
||||
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preclients.sh)
|
||||
set(CPACK_RPM_clients-el7_PRE_INSTALL_SCRIPT_FILE
|
||||
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preclients.sh)
|
||||
|
||||
set(CPACK_RPM_CLIENTS-EL6_POST_INSTALL_SCRIPT_FILE
|
||||
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postclients.sh)
|
||||
set(CPACK_RPM_CLIENTS-EL7_POST_INSTALL_SCRIPT_FILE
|
||||
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postclients.sh)
|
||||
|
||||
set(CPACK_RPM_SERVER-EL6_PRE_INSTALL_SCRIPT_FILE
|
||||
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preserver.sh)
|
||||
set(CPACK_RPM_SERVER-EL7_PRE_INSTALL_SCRIPT_FILE
|
||||
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preserver.sh)
|
||||
|
||||
set(CPACK_RPM_SERVER-EL6_POST_INSTALL_SCRIPT_FILE
|
||||
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postserver-el6.sh)
|
||||
set(CPACK_RPM_SERVER-EL7_POST_INSTALL_SCRIPT_FILE
|
||||
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postserver.sh)
|
||||
|
||||
set(CPACK_RPM_SERVER-EL6_PRE_UNINSTALL_SCRIPT_FILE
|
||||
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preunserver.sh)
|
||||
set(CPACK_RPM_SERVER-EL7_PRE_UNINSTALL_SCRIPT_FILE
|
||||
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preunserver.sh)
|
||||
|
||||
set(CPACK_RPM_SERVER-EL6_PACKAGE_REQUIRES
|
||||
"foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
|
||||
set(CPACK_RPM_SERVER-EL7_PACKAGE_REQUIRES
|
||||
"foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
|
||||
#set(CPACK_RPM_java_PACKAGE_REQUIRES
|
||||
# "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
|
||||
#set(CPACK_RPM_python_PACKAGE_REQUIRES
|
||||
# "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
|
||||
|
||||
set(CPACK_RPM_SERVER-VERSIONED_POST_INSTALL_SCRIPT_FILE
|
||||
${CMAKE_BINARY_DIR}/packaging/multiversion/server/postinst)
|
||||
|
||||
set(CPACK_RPM_SERVER-VERSIONED_PRE_UNINSTALL_SCRIPT_FILE
|
||||
${CMAKE_BINARY_DIR}/packaging/multiversion/server/prerm)
|
||||
|
||||
set(CPACK_RPM_CLIENTS-VERSIONED_POST_INSTALL_SCRIPT_FILE
|
||||
${CMAKE_BINARY_DIR}/packaging/multiversion/clients/postinst-el7)
|
||||
|
||||
set(CPACK_RPM_CLIENTS-VERSIONED_PRE_UNINSTALL_SCRIPT_FILE
|
||||
${CMAKE_BINARY_DIR}/packaging/multiversion/clients/prerm)
|
||||
|
||||
################################################################################
|
||||
# Configuration for DEB
|
||||
|
@ -396,6 +386,8 @@ set(CPACK_DEBIAN_ENABLE_COMPONENT_DEPENDS ON)
|
|||
|
||||
set(CPACK_DEBIAN_SERVER-DEB_PACKAGE_NAME "foundationdb-server")
|
||||
set(CPACK_DEBIAN_CLIENTS-DEB_PACKAGE_NAME "foundationdb-clients")
|
||||
set(CPACK_DEBIAN_SERVER-VERSIONED_PACKAGE_NAME "foundationdb-server-${PROJECT_VERSION}")
|
||||
set(CPACK_DEBIAN_CLIENTS-VERSIONED_PACKAGE_NAME "foundationdb-clients-${PROJECT_VERSION}")
|
||||
|
||||
set(CPACK_DEBIAN_SERVER-DEB_PACKAGE_DEPENDS "adduser, libc6 (>= 2.12), foundationdb-clients (= ${FDB_VERSION})")
|
||||
set(CPACK_DEBIAN_SERVER-DEB_PACKAGE_RECOMMENDS "python (>= 2.6)")
|
||||
|
@ -410,6 +402,13 @@ set(CPACK_DEBIAN_SERVER-DEB_PACKAGE_CONTROL_EXTRA
|
|||
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/prerm
|
||||
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/postrm)
|
||||
|
||||
set(CPACK_DEBIAN_CLIENTS-VERSIONED_PACKAGE_CONTROL_EXTRA
|
||||
${CMAKE_BINARY_DIR}/packaging/multiversion/clients/postinst
|
||||
${CMAKE_BINARY_DIR}/packaging/multiversion/clients/prerm)
|
||||
set(CPACK_DEBIAN_SERVER-VERSIONED_PACKAGE_CONTROL_EXTRA
|
||||
${CMAKE_BINARY_DIR}/packaging/multiversion/server/postinst
|
||||
${CMAKE_BINARY_DIR}/packaging/multiversion/server/prerm)
|
||||
|
||||
################################################################################
|
||||
# MacOS configuration
|
||||
################################################################################
|
||||
|
@ -447,21 +446,21 @@ if(NOT WIN32)
|
|||
fdb_install(FILES ${CMAKE_SOURCE_DIR}/packaging/foundationdb.conf
|
||||
DESTINATION etc
|
||||
COMPONENT server)
|
||||
install(FILES ${CMAKE_SOURCE_DIR}/packaging/make_public.py
|
||||
DESTINATION "usr/lib/foundationdb"
|
||||
COMPONENT server-el6)
|
||||
install(FILES ${CMAKE_SOURCE_DIR}/packaging/make_public.py
|
||||
DESTINATION "usr/lib/foundationdb"
|
||||
COMPONENT server-deb)
|
||||
install(FILES ${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb.service
|
||||
DESTINATION "lib/systemd/system"
|
||||
COMPONENT server-el7)
|
||||
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb-init
|
||||
DESTINATION "etc/rc.d/init.d"
|
||||
RENAME "foundationdb"
|
||||
COMPONENT server-el6)
|
||||
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/deb/foundationdb-init
|
||||
DESTINATION "etc/init.d"
|
||||
RENAME "foundationdb"
|
||||
COMPONENT server-deb)
|
||||
install(FILES ${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb.service
|
||||
DESTINATION "usr/lib/foundationdb-${PROJECT_VERSION}/lib/systemd/system"
|
||||
COMPONENT server-versioned)
|
||||
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/deb/foundationdb-init
|
||||
DESTINATION "usr/lib/foundationdb-${PROJECT_VERSION}/etc/init.d"
|
||||
RENAME "foundationdb"
|
||||
COMPONENT server-versioned)
|
||||
endif()
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -22,6 +22,8 @@ FoundationDB supports language bindings for application development using the or
|
|||
|
||||
* :doc:`api-version-upgrade-guide` contains information about upgrading client code to a new API version.
|
||||
|
||||
* :doc:`transaction-profiler-analyzer` contains information about enabling transaction profiling and analyzing.
|
||||
|
||||
* :doc:`known-limitations` describes both long-term design limitations of FoundationDB and short-term limitations applicable to the current version.
|
||||
|
||||
.. toctree::
|
||||
|
@ -38,4 +40,5 @@ FoundationDB supports language bindings for application development using the or
|
|||
api-general
|
||||
transaction-tagging
|
||||
known-limitations
|
||||
transaction-profiler-analyzer
|
||||
api-version-upgrade-guide
|
||||
|
|
|
@ -313,7 +313,7 @@ client
|
|||
|
||||
``profile client <get|set>``
|
||||
|
||||
Reads or sets parameters of client transaction sampling. Use ``get`` to list the current parameters, and ``set <RATE|default> <SIZE|default>`` to set them. ``RATE`` is the fraction of transactions to be sampled, and ``SIZE`` is the amount (in bytes) of sampled data to store in the database.
|
||||
Reads or sets parameters of client transaction sampling. Use ``get`` to list the current parameters, and ``set <RATE|default> <SIZE|default>`` to set them. ``RATE`` is the fraction of transactions to be sampled, and ``SIZE`` is the amount (in bytes) of sampled data to store in the database. For more information, see :doc:`transaction-profiler-analyzer`.
|
||||
|
||||
list
|
||||
^^^^
|
||||
|
|
|
@ -866,7 +866,7 @@ Some of this information is also available in ``\xff\xff/status/json``, but thes
|
|||
>>> for k, v in db.get_range_startswith('\xff\xff/metrics/health/'):
|
||||
... print(k, v)
|
||||
...
|
||||
('\xff\xff/metrics/health/aggregate', '{"batch_limited":false,"tps_limit":483988.66315011407,"worst_storage_durability_lag":5000001,"worst_storage_queue":2036,"worst_log_queue":300}')
|
||||
('\xff\xff/metrics/health/aggregate', '{"batch_limited":false,"limiting_storage_durability_lag":5000000,"limiting_storage_queue":1000,"tps_limit":483988.66315011407,"worst_storage_durability_lag":5000001,"worst_storage_queue":2036,"worst_log_queue":300}')
|
||||
('\xff\xff/metrics/health/log/e639a9ad0373367784cc550c615c469b', '{"log_queue":300}')
|
||||
('\xff\xff/metrics/health/storage/ab2ce4caf743c9c1ae57063629c6678a', '{"cpu_usage":2.398696781487125,"disk_usage":0.059995917598039405,"storage_durability_lag":5000001,"storage_queue":2036}')
|
||||
|
||||
|
@ -874,15 +874,17 @@ Some of this information is also available in ``\xff\xff/status/json``, but thes
|
|||
|
||||
Aggregate stats about cluster health. Reading this key alone is slightly cheaper than reading any of the per-process keys.
|
||||
|
||||
============================ ======== ===============
|
||||
**Field** **Type** **Description**
|
||||
---------------------------- -------- ---------------
|
||||
batch_limited boolean Whether or not the cluster is limiting batch priority transactions
|
||||
tps_limit number The rate at which normal priority transactions are allowed to start
|
||||
worst_storage_durability_lag number See the description for storage_durability_lag
|
||||
worst_storage_queue number See the description for storage_queue
|
||||
worst_log_queue number See the description for log_queue
|
||||
============================ ======== ===============
|
||||
=================================== ======== ===============
|
||||
**Field** **Type** **Description**
|
||||
----------------------------------- -------- ---------------
|
||||
batch_limited boolean Whether or not the cluster is limiting batch priority transactions
|
||||
limiting_storage_durability_lag number storage_durability_lag that ratekeeper is using to determing throttling (see the description for storage_durability_lag)
|
||||
limiting_storage_queue number storage_queue that ratekeeper is using to determing throttling (see the description for storage_queue)
|
||||
tps_limit number The rate at which normal priority transactions are allowed to start
|
||||
worst_storage_durability_lag number See the description for storage_durability_lag
|
||||
worst_storage_queue number See the description for storage_queue
|
||||
worst_log_queue number See the description for log_queue
|
||||
=================================== ======== ===============
|
||||
|
||||
``\xff\xff/metrics/health/log/<id>``
|
||||
|
||||
|
|
|
@ -2,6 +2,12 @@
|
|||
Release Notes
|
||||
#############
|
||||
|
||||
6.2.28
|
||||
======
|
||||
* Log detailed team collection information when median available space ratio of all teams is too low. `(PR #3912) <https://github.com/apple/foundationdb/pull/3912>`_
|
||||
* Bug fix, blob client did not support authentication key sizes over 64 bytes. `(PR #3964) <https://github.com/apple/foundationdb/pull/3964>`_
|
||||
|
||||
|
||||
6.2.27
|
||||
======
|
||||
* For clusters with a large number of shards, avoid slow tasks in the data distributor by adding yields to the shard map destruction. `(PR #3834) <https://github.com/apple/foundationdb/pull/3834>`_
|
||||
|
|
|
@ -2,6 +2,15 @@
|
|||
Release Notes
|
||||
#############
|
||||
|
||||
6.3.10
|
||||
======
|
||||
|
||||
Packaging
|
||||
---------
|
||||
|
||||
* Create versioned RPM and DEB packages. This will allow users to install multiple versions of FoundationDB on the same machine and use alternatives to switch between versions. `(PR #3983) <https://github.com/apple/foundationdb/pull/3983>`_
|
||||
* Remove support for RHEL 6 and CentOS 6. This version reached EOL and is not anymore officially supported by FoundationDB. `(PR #3983) <https://github.com/apple/foundationdb/pull/3983>`_
|
||||
|
||||
6.3.9
|
||||
=====
|
||||
|
||||
|
@ -63,6 +72,7 @@ Fixes
|
|||
* Fix an issue where ``fdbcli --exec 'exclude no_wait ...'`` would incorrectly report that processes can safely be removed from the cluster. [6.3.5] `(PR #3566) <https://github.com/apple/foundationdb/pull/3566>`_
|
||||
* Commit latencies could become large because of inaccurate compute estimates. [6.3.9] `(PR #3845) <https://github.com/apple/foundationdb/pull/3845>`_
|
||||
* Added a timeout on TLS handshakes to prevent them from hanging indefinitely. [6.3.9] `(PR #3850) <https://github.com/apple/foundationdb/pull/3850>`_
|
||||
* Bug fix, blob client did not support authentication key sizes over 64 bytes. `(PR #3964) <https://github.com/apple/foundationdb/pull/3964>`_
|
||||
|
||||
Status
|
||||
------
|
||||
|
@ -121,6 +131,8 @@ Fixes from previous versions
|
|||
* The 6.3.1 patch release includes all fixes from the patch releases 6.2.21 and 6.2.22. :doc:`(6.2 Release Notes) </release-notes/release-notes-620>`
|
||||
* The 6.3.3 patch release includes all fixes from the patch release 6.2.23. :doc:`(6.2 Release Notes) </release-notes/release-notes-620>`
|
||||
* The 6.3.5 patch release includes all fixes from the patch releases 6.2.24 and 6.2.25. :doc:`(6.2 Release Notes) </release-notes/release-notes-620>`
|
||||
* The 6.3.9 patch release includes all fixes from the patch releases 6.2.26. :doc:`(6.2 Release Notes) </release-notes/release-notes-620>`
|
||||
* The 6.3.10 patch release includes all fixes from the patch releases 6.2.27. :doc:`(6.2 Release Notes) </release-notes/release-notes-620>`
|
||||
|
||||
Fixes only impacting 6.3.0+
|
||||
---------------------------
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
.. _transaction-profiler-analyzer:
|
||||
|
||||
###################################
|
||||
Transaction profiling and analyzing
|
||||
###################################
|
||||
|
||||
FoundationDB natively implements transaction profiling and analyzing. There are two ways to enable transaction profiling in FoundationDB. One is globally through the database, via ``fdbcli`` command which sets keys in the database and the clients pick it up.
|
||||
|
||||
``e.g. fdbcli> profile client set 0.01 100MB`` profiles 1% of transactions and maintains 100MB worth of history in the database.
|
||||
|
||||
Second way is through client side knobs ``CSI_SAMPLING_PROBABILITY`` and ``CSI_SIZE_LIMIT`` which have to be set at every client that you want to profile. Enabling transaction profiling through the database setting has higher precedence and overrides any client knob settings.
|
||||
|
||||
There are only two inputs for transaction profiling i.e. sampling rate and the size limit.
|
||||
|
||||
The transactions are sampled at the specified rate and all the events for that sampled transaction are recorded. Then at 30 second interval, the data for all the sampled transactions during that interval is flushed to the database. The sampled data is written into special key space ``“\xff\x02/fdbClientInfo/ - \xff\x02/fdbClientInfo0”``
|
||||
|
||||
The second part of transaction profiling involves deleting old sampled data to restrict the size. Retention is purely based on the input size limit. If the size of all the recorded data exceeds the input limit, then the old ones get deleted. But the limit is a soft limit, you could go over the limit temporarily.
|
||||
|
||||
There are many ways that this data can be exposed for analysis. One can imagine building a client that reads the data from the database and streams it to external tools such as Wavefront.
|
||||
|
||||
One such tool that’s available as part of open source FDB is a python script called ``transaction_profiling_analyzer.py`` that's available here on `GitHUb <https://github.com/apple/foundationdb/blob/master/contrib/transaction_profiling_analyzer/transaction_profiling_analyzer.py>`_. It reads the sampled data from the database and outputs it in a user friendly format. Currently it’s most useful in identifying hot key-ranges (for both reading and writing).
|
||||
|
||||
Prerequisites
|
||||
=============
|
||||
|
||||
* ``python3``
|
||||
* ``fdb python bindings`` - If you don't have the Python bindings installed, you can append $BUILDDIR/bindings/python to the PYTHONPATH environment variable, then you should be able to import fdb
|
||||
|
||||
Additional packages
|
||||
===================
|
||||
|
||||
* ``dateparser`` - for human date parsing
|
||||
* ``sortedcontainers`` - for estimating key range read/write density
|
||||
|
||||
Sample usage
|
||||
============
|
||||
|
||||
* ``$python3 transaction_profiling_analyzer.py --help`` - Shows the help message and exits
|
||||
|
||||
* ``python3 transaction_profiling_analyzer.py -C fdb.cluster --start-time "17:00 2020/07/07 PDT" --end-time "17:50 2020/07/07 PDT"`` - Analyzes and prints full information between a start and end time frame
|
||||
|
||||
Using filters:
|
||||
==============
|
||||
|
||||
* ``python3 ~/transaction_profiling_analyzer.py -C fdb.cluster --filter-get --start-time "17:00 2020/07/07 PDT" --end-time "17:50 2020/07/07 PDT"`` - Analyzes and prints information about gets between a start and end time frame
|
||||
|
||||
* ``python3 ~/transaction_profiling_analyzer.py -C fdb.cluster --filter-get --start-time "17:00 2020/07/07 PDT" --end-time "17:50 2020/07/07 PDT" --top-requests 5`` - Analyzes and prints information about top 5 keys for gets between a start and end time frame
|
|
@ -298,7 +298,7 @@ struct MutationFilesReadProgress : public ReferenceCounted<MutationFilesReadProg
|
|||
// Attempt decode the first few blocks of log files until beginVersion is consumed
|
||||
std::vector<Future<Void>> fileDecodes;
|
||||
for (int i = 0; i < asyncFiles.size(); i++) {
|
||||
Reference<FileProgress> fp(new FileProgress(asyncFiles[i].get(), i));
|
||||
auto fp = makeReference<FileProgress>(asyncFiles[i].get(), i);
|
||||
progress->fileProgress.push_back(fp);
|
||||
fileDecodes.push_back(
|
||||
decodeToVersion(fp, progress->beginVersion, progress->endVersion, progress->getLogFile(i)));
|
||||
|
|
|
@ -3433,8 +3433,7 @@ int main(int argc, char* argv[]) {
|
|||
usePartitionedLog = true;
|
||||
break;
|
||||
case OPT_INCREMENTALONLY:
|
||||
// TODO: Enable this command-line argument once atomics are supported
|
||||
// incrementalBackupOnly = true;
|
||||
incrementalBackupOnly = true;
|
||||
break;
|
||||
case OPT_RESTORECONTAINER:
|
||||
restoreContainer = args->OptionArg();
|
||||
|
@ -3790,7 +3789,7 @@ int main(int argc, char* argv[]) {
|
|||
auto initCluster = [&](bool quiet = false) {
|
||||
auto resolvedClusterFile = ClusterConnectionFile::lookupClusterFileName(clusterFile);
|
||||
try {
|
||||
ccf = Reference<ClusterConnectionFile>(new ClusterConnectionFile(resolvedClusterFile.first));
|
||||
ccf = makeReference<ClusterConnectionFile>(resolvedClusterFile.first);
|
||||
}
|
||||
catch (Error& e) {
|
||||
if(!quiet)
|
||||
|
@ -3813,7 +3812,7 @@ int main(int argc, char* argv[]) {
|
|||
if(sourceClusterFile.size()) {
|
||||
auto resolvedSourceClusterFile = ClusterConnectionFile::lookupClusterFileName(sourceClusterFile);
|
||||
try {
|
||||
sourceCcf = Reference<ClusterConnectionFile>(new ClusterConnectionFile(resolvedSourceClusterFile.first));
|
||||
sourceCcf = makeReference<ClusterConnectionFile>(resolvedSourceClusterFile.first);
|
||||
}
|
||||
catch (Error& e) {
|
||||
fprintf(stderr, "%s\n", ClusterConnectionFile::getErrorString(resolvedSourceClusterFile, e).c_str());
|
||||
|
|
|
@ -2509,7 +2509,7 @@ ACTOR Future<bool> setClass( Database db, std::vector<StringRef> tokens ) {
|
|||
|
||||
Reference<ReadYourWritesTransaction> getTransaction(Database db, Reference<ReadYourWritesTransaction> &tr, FdbOptions *options, bool intrans) {
|
||||
if(!tr || !intrans) {
|
||||
tr = Reference<ReadYourWritesTransaction>(new ReadYourWritesTransaction(db));
|
||||
tr = makeReference<ReadYourWritesTransaction>(db);
|
||||
options->apply(tr);
|
||||
}
|
||||
|
||||
|
@ -3011,7 +3011,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|||
|
||||
state std::pair<std::string, bool> resolvedClusterFile = ClusterConnectionFile::lookupClusterFileName( opt.clusterFile );
|
||||
try {
|
||||
ccf = Reference<ClusterConnectionFile>( new ClusterConnectionFile( resolvedClusterFile.first ) );
|
||||
ccf = makeReference<ClusterConnectionFile>(resolvedClusterFile.first);
|
||||
} catch (Error& e) {
|
||||
fprintf(stderr, "%s\n", ClusterConnectionFile::getErrorString(resolvedClusterFile, e).c_str());
|
||||
return 1;
|
||||
|
@ -3472,7 +3472,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|||
LiteralStringRef("\xff\xff/worker_interfaces0")),
|
||||
CLIENT_KNOBS->TOO_MANY)));
|
||||
ASSERT(!kvs.more);
|
||||
Reference<FlowLock> connectLock(new FlowLock(CLIENT_KNOBS->CLI_CONNECT_PARALLELISM));
|
||||
auto connectLock = makeReference<FlowLock>(CLIENT_KNOBS->CLI_CONNECT_PARALLELISM);
|
||||
std::vector<Future<Void>> addInterfs;
|
||||
for( auto it : kvs ) {
|
||||
addInterfs.push_back(addInterface(&address_interface, connectLock, it));
|
||||
|
@ -3537,7 +3537,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|||
LiteralStringRef("\xff\xff/worker_interfaces0")),
|
||||
CLIENT_KNOBS->TOO_MANY)));
|
||||
ASSERT(!kvs.more);
|
||||
Reference<FlowLock> connectLock(new FlowLock(CLIENT_KNOBS->CLI_CONNECT_PARALLELISM));
|
||||
auto connectLock = makeReference<FlowLock>(CLIENT_KNOBS->CLI_CONNECT_PARALLELISM);
|
||||
std::vector<Future<Void>> addInterfs;
|
||||
for( auto it : kvs ) {
|
||||
addInterfs.push_back(addInterface(&address_interface, connectLock, it));
|
||||
|
@ -3875,7 +3875,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|||
LiteralStringRef("\xff\xff/worker_interfaces0")),
|
||||
CLIENT_KNOBS->TOO_MANY)));
|
||||
ASSERT(!kvs.more);
|
||||
Reference<FlowLock> connectLock(new FlowLock(CLIENT_KNOBS->CLI_CONNECT_PARALLELISM));
|
||||
auto connectLock = makeReference<FlowLock>(CLIENT_KNOBS->CLI_CONNECT_PARALLELISM);
|
||||
std::vector<Future<Void>> addInterfs;
|
||||
for( auto it : kvs ) {
|
||||
addInterfs.push_back(addInterface(&address_interface, connectLock, it));
|
||||
|
|
|
@ -269,6 +269,7 @@ public:
|
|||
|
||||
enum ERestoreState { UNITIALIZED = 0, QUEUED = 1, STARTING = 2, RUNNING = 3, COMPLETED = 4, ABORTED = 5 };
|
||||
static StringRef restoreStateText(ERestoreState id);
|
||||
static Key getPauseKey();
|
||||
|
||||
// parallel restore
|
||||
Future<Void> parallelRestoreFinish(Database cx, UID randomUID, bool unlockDB = true);
|
||||
|
@ -427,7 +428,8 @@ public:
|
|||
return runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr){ return discontinueBackup(tr, tagName); });
|
||||
}
|
||||
|
||||
Future<Void> abortBackup(Database cx, Key tagName, bool partial = false, bool abortOldBackup = false, bool dstOnly = false);
|
||||
Future<Void> abortBackup(Database cx, Key tagName, bool partial = false, bool abortOldBackup = false,
|
||||
bool dstOnly = false, bool waitForDestUID = false);
|
||||
|
||||
Future<std::string> getStatus(Database cx, int errorLimit, Key tagName);
|
||||
|
||||
|
|
|
@ -638,7 +638,8 @@ ACTOR Future<Void> applyMutations(Database cx, Key uid, Key addPrefix, Key remov
|
|||
|
||||
for (int i = 0; i < ranges.size(); ++i) {
|
||||
results.push_back(PromiseStream<RCGroup>());
|
||||
locks.push_back(Reference<FlowLock>( new FlowLock(std::max(CLIENT_KNOBS->APPLY_MAX_LOCK_BYTES/ranges.size(), CLIENT_KNOBS->APPLY_MIN_LOCK_BYTES))));
|
||||
locks.push_back(makeReference<FlowLock>(
|
||||
std::max(CLIENT_KNOBS->APPLY_MAX_LOCK_BYTES / ranges.size(), CLIENT_KNOBS->APPLY_MIN_LOCK_BYTES)));
|
||||
rc.push_back(readCommitted(cx, results[i], locks[i], ranges[i], decodeBKMutationLogKey));
|
||||
}
|
||||
|
||||
|
|
|
@ -363,8 +363,7 @@ Future<std::vector<std::string>> IBackupContainer::listContainers(const std::str
|
|||
|
||||
ACTOR Future<Version> timeKeeperVersionFromDatetime(std::string datetime, Database db) {
|
||||
state KeyBackedMap<int64_t, Version> versionMap(timeKeeperPrefixRange.begin);
|
||||
state Reference<ReadYourWritesTransaction> tr =
|
||||
Reference<ReadYourWritesTransaction>(new ReadYourWritesTransaction(db));
|
||||
state Reference<ReadYourWritesTransaction> tr = makeReference<ReadYourWritesTransaction>(db);
|
||||
|
||||
state int64_t time = BackupAgentBase::parseTime(datetime);
|
||||
if (time < 0) {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -41,8 +41,6 @@ set(FDBCLIENT_SRCS
|
|||
ManagementAPI.actor.cpp
|
||||
ManagementAPI.actor.h
|
||||
CommitProxyInterface.h
|
||||
MetricLogger.actor.cpp
|
||||
MetricLogger.h
|
||||
MonitorLeader.actor.cpp
|
||||
MonitorLeader.h
|
||||
MultiVersionAssignmentVars.h
|
||||
|
|
|
@ -159,9 +159,9 @@ namespace dbBackup {
|
|||
|
||||
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> parentTask, Key begin, Key end, TaskCompletionKey completionKey, Reference<TaskFuture> waitFor = Reference<TaskFuture>()) {
|
||||
Key doneKey = wait(completionKey.get(tr, taskBucket));
|
||||
Reference<Task> task(new Task(BackupRangeTaskFunc::name, BackupRangeTaskFunc::version, doneKey));
|
||||
auto task = makeReference<Task>(BackupRangeTaskFunc::name, BackupRangeTaskFunc::version, doneKey);
|
||||
|
||||
copyDefaultParameters(parentTask, task);
|
||||
copyDefaultParameters(parentTask, task);
|
||||
|
||||
task->params[BackupAgentBase::keyBeginKey] = begin;
|
||||
task->params[BackupAgentBase::keyEndKey] = end;
|
||||
|
@ -263,8 +263,8 @@ namespace dbBackup {
|
|||
|
||||
state int valueLoc = 0;
|
||||
state int committedValueLoc = 0;
|
||||
state Reference<ReadYourWritesTransaction> tr = Reference<ReadYourWritesTransaction>( new ReadYourWritesTransaction(cx) );
|
||||
loop{
|
||||
state Reference<ReadYourWritesTransaction> tr = makeReference<ReadYourWritesTransaction>(cx);
|
||||
loop{
|
||||
try {
|
||||
tr->reset();
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
|
@ -447,9 +447,9 @@ namespace dbBackup {
|
|||
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> parentTask, TaskCompletionKey completionKey, Reference<TaskFuture> waitFor = Reference<TaskFuture>()) {
|
||||
// After the BackupRangeTask completes, set the stop key which will stop the BackupLogsTask
|
||||
Key doneKey = wait(completionKey.get(tr, taskBucket));
|
||||
Reference<Task> task(new Task(FinishFullBackupTaskFunc::name, FinishFullBackupTaskFunc::version, doneKey));
|
||||
auto task = makeReference<Task>(FinishFullBackupTaskFunc::name, FinishFullBackupTaskFunc::version, doneKey);
|
||||
|
||||
copyDefaultParameters(parentTask, task);
|
||||
copyDefaultParameters(parentTask, task);
|
||||
|
||||
if (!waitFor) {
|
||||
return taskBucket->addTask(tr, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]);
|
||||
|
@ -497,9 +497,9 @@ namespace dbBackup {
|
|||
|
||||
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> parentTask, Version endVersion, TaskCompletionKey completionKey, Reference<TaskFuture> waitFor = Reference<TaskFuture>()) {
|
||||
Key doneKey = wait(completionKey.get(tr, taskBucket));
|
||||
Reference<Task> task(new Task(EraseLogRangeTaskFunc::name, EraseLogRangeTaskFunc::version, doneKey, 1));
|
||||
auto task = makeReference<Task>(EraseLogRangeTaskFunc::name, EraseLogRangeTaskFunc::version, doneKey, 1);
|
||||
|
||||
copyDefaultParameters(parentTask, task);
|
||||
copyDefaultParameters(parentTask, task);
|
||||
|
||||
task->params[DatabaseBackupAgent::keyBeginVersion] = BinaryWriter::toValue(1, Unversioned()); //FIXME: remove in 6.X, only needed for 5.2 backward compatibility
|
||||
task->params[DatabaseBackupAgent::keyEndVersion] = BinaryWriter::toValue(endVersion, Unversioned());
|
||||
|
@ -692,8 +692,8 @@ namespace dbBackup {
|
|||
|
||||
for (int j = results.size(); j < prefetchTo; j ++) {
|
||||
results.push_back(PromiseStream<RCGroup>());
|
||||
locks.push_back(Reference<FlowLock>(new FlowLock(CLIENT_KNOBS->COPY_LOG_READ_AHEAD_BYTES)));
|
||||
rc.push_back(readCommitted(taskBucket->src, results[j], Future<Void>(Void()), locks[j], ranges[j], decodeBKMutationLogKey, true, true, true));
|
||||
locks.push_back(makeReference<FlowLock>(CLIENT_KNOBS->COPY_LOG_READ_AHEAD_BYTES));
|
||||
rc.push_back(readCommitted(taskBucket->src, results[j], Future<Void>(Void()), locks[j], ranges[j], decodeBKMutationLogKey, true, true, true));
|
||||
}
|
||||
|
||||
// copy the range
|
||||
|
@ -731,9 +731,9 @@ namespace dbBackup {
|
|||
|
||||
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> parentTask, Version beginVersion, Version endVersion, TaskCompletionKey completionKey, Reference<TaskFuture> waitFor = Reference<TaskFuture>()) {
|
||||
Key doneKey = wait(completionKey.get(tr, taskBucket));
|
||||
Reference<Task> task(new Task(CopyLogRangeTaskFunc::name, CopyLogRangeTaskFunc::version, doneKey, 1));
|
||||
auto task = makeReference<Task>(CopyLogRangeTaskFunc::name, CopyLogRangeTaskFunc::version, doneKey, 1);
|
||||
|
||||
copyDefaultParameters(parentTask, task);
|
||||
copyDefaultParameters(parentTask, task);
|
||||
|
||||
task->params[DatabaseBackupAgent::keyBeginVersion] = BinaryWriter::toValue(beginVersion, Unversioned());
|
||||
task->params[DatabaseBackupAgent::keyEndVersion] = BinaryWriter::toValue(endVersion, Unversioned());
|
||||
|
@ -852,9 +852,9 @@ namespace dbBackup {
|
|||
|
||||
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> parentTask, Version prevBeginVersion, Version beginVersion, TaskCompletionKey completionKey, Reference<TaskFuture> waitFor = Reference<TaskFuture>()) {
|
||||
Key doneKey = wait(completionKey.get(tr, taskBucket));
|
||||
Reference<Task> task(new Task(CopyLogsTaskFunc::name, CopyLogsTaskFunc::version, doneKey, 1));
|
||||
auto task = makeReference<Task>(CopyLogsTaskFunc::name, CopyLogsTaskFunc::version, doneKey, 1);
|
||||
|
||||
copyDefaultParameters(parentTask, task);
|
||||
copyDefaultParameters(parentTask, task);
|
||||
task->params[BackupAgentBase::keyBeginVersion] = BinaryWriter::toValue(beginVersion, Unversioned());
|
||||
task->params[DatabaseBackupAgent::keyPrevBeginVersion] = BinaryWriter::toValue(prevBeginVersion, Unversioned());
|
||||
|
||||
|
@ -931,9 +931,10 @@ namespace dbBackup {
|
|||
|
||||
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> parentTask, TaskCompletionKey completionKey, Reference<TaskFuture> waitFor = Reference<TaskFuture>()) {
|
||||
Key doneKey = wait(completionKey.get(tr, taskBucket));
|
||||
Reference<Task> task(new Task(FinishedFullBackupTaskFunc::name, FinishedFullBackupTaskFunc::version, doneKey));
|
||||
auto task =
|
||||
makeReference<Task>(FinishedFullBackupTaskFunc::name, FinishedFullBackupTaskFunc::version, doneKey);
|
||||
|
||||
copyDefaultParameters(parentTask, task);
|
||||
copyDefaultParameters(parentTask, task);
|
||||
|
||||
if (!waitFor) {
|
||||
return taskBucket->addTask(tr, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]);
|
||||
|
@ -1032,9 +1033,9 @@ namespace dbBackup {
|
|||
|
||||
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> parentTask, Version prevBeginVersion, Version beginVersion, TaskCompletionKey completionKey, Reference<TaskFuture> waitFor = Reference<TaskFuture>()) {
|
||||
Key doneKey = wait(completionKey.get(tr, taskBucket));
|
||||
Reference<Task> task(new Task(CopyDiffLogsTaskFunc::name, CopyDiffLogsTaskFunc::version, doneKey, 1));
|
||||
auto task = makeReference<Task>(CopyDiffLogsTaskFunc::name, CopyDiffLogsTaskFunc::version, doneKey, 1);
|
||||
|
||||
copyDefaultParameters(parentTask, task);
|
||||
copyDefaultParameters(parentTask, task);
|
||||
|
||||
task->params[DatabaseBackupAgent::keyBeginVersion] = BinaryWriter::toValue(beginVersion, Unversioned());
|
||||
task->params[DatabaseBackupAgent::keyPrevBeginVersion] = BinaryWriter::toValue(prevBeginVersion, Unversioned());
|
||||
|
@ -1210,9 +1211,10 @@ namespace dbBackup {
|
|||
|
||||
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> parentTask, Version beginVersion, Version endVersion, TaskCompletionKey completionKey, Reference<TaskFuture> waitFor = Reference<TaskFuture>()) {
|
||||
Key doneKey = wait(completionKey.get(tr, taskBucket));
|
||||
Reference<Task> task(new Task(OldCopyLogRangeTaskFunc::name, OldCopyLogRangeTaskFunc::version, doneKey, 1));
|
||||
auto task =
|
||||
makeReference<Task>(OldCopyLogRangeTaskFunc::name, OldCopyLogRangeTaskFunc::version, doneKey, 1);
|
||||
|
||||
copyDefaultParameters(parentTask, task);
|
||||
copyDefaultParameters(parentTask, task);
|
||||
|
||||
task->params[DatabaseBackupAgent::keyBeginVersion] = BinaryWriter::toValue(beginVersion, Unversioned());
|
||||
task->params[DatabaseBackupAgent::keyEndVersion] = BinaryWriter::toValue(endVersion, Unversioned());
|
||||
|
@ -1289,9 +1291,9 @@ namespace dbBackup {
|
|||
|
||||
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> parentTask, TaskCompletionKey completionKey, Reference<TaskFuture> waitFor = Reference<TaskFuture>()) {
|
||||
Key doneKey = wait(completionKey.get(tr, taskBucket));
|
||||
Reference<Task> task(new Task(AbortOldBackupTaskFunc::name, AbortOldBackupTaskFunc::version, doneKey, 1));
|
||||
auto task = makeReference<Task>(AbortOldBackupTaskFunc::name, AbortOldBackupTaskFunc::version, doneKey, 1);
|
||||
|
||||
copyDefaultParameters(parentTask, task);
|
||||
copyDefaultParameters(parentTask, task);
|
||||
|
||||
if (!waitFor) {
|
||||
return taskBucket->addTask(tr, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]);
|
||||
|
@ -1498,9 +1500,9 @@ namespace dbBackup {
|
|||
|
||||
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> parentTask, TaskCompletionKey completionKey, Reference<TaskFuture> waitFor = Reference<TaskFuture>()) {
|
||||
Key doneKey = wait(completionKey.get(tr, taskBucket));
|
||||
Reference<Task> task(new Task(BackupRestorableTaskFunc::name, BackupRestorableTaskFunc::version, doneKey));
|
||||
auto task = makeReference<Task>(BackupRestorableTaskFunc::name, BackupRestorableTaskFunc::version, doneKey);
|
||||
|
||||
copyDefaultParameters(parentTask, task);
|
||||
copyDefaultParameters(parentTask, task);
|
||||
|
||||
if (!waitFor) {
|
||||
return taskBucket->addTask(tr, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]);
|
||||
|
@ -1597,6 +1599,7 @@ namespace dbBackup {
|
|||
wait(tr->commit());
|
||||
break;
|
||||
} catch (Error &e) {
|
||||
TraceEvent("SetDestUidOrBeginVersionError").error(e, true);
|
||||
wait(tr->onError(e));
|
||||
}
|
||||
}
|
||||
|
@ -1690,9 +1693,9 @@ namespace dbBackup {
|
|||
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Key logUid, Key backupUid, Key keyAddPrefix, Key keyRemovePrefix, Key keyConfigBackupRanges, Key tagName, TaskCompletionKey completionKey, Reference<TaskFuture> waitFor = Reference<TaskFuture>(), bool databasesInSync=false)
|
||||
{
|
||||
Key doneKey = wait(completionKey.get(tr, taskBucket));
|
||||
Reference<Task> task(new Task(StartFullBackupTaskFunc::name, StartFullBackupTaskFunc::version, doneKey));
|
||||
auto task = makeReference<Task>(StartFullBackupTaskFunc::name, StartFullBackupTaskFunc::version, doneKey);
|
||||
|
||||
task->params[BackupAgentBase::keyFolderId] = backupUid;
|
||||
task->params[BackupAgentBase::keyFolderId] = backupUid;
|
||||
task->params[BackupAgentBase::keyConfigLogUid] = logUid;
|
||||
task->params[DatabaseBackupAgent::keyAddPrefix] = keyAddPrefix;
|
||||
task->params[DatabaseBackupAgent::keyRemovePrefix] = keyRemovePrefix;
|
||||
|
@ -2167,7 +2170,8 @@ public:
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> abortBackup(DatabaseBackupAgent* backupAgent, Database cx, Key tagName, bool partial, bool abortOldBackup, bool dstOnly) {
|
||||
ACTOR static Future<Void> abortBackup(DatabaseBackupAgent* backupAgent, Database cx, Key tagName, bool partial,
|
||||
bool abortOldBackup, bool dstOnly, bool waitForDestUID) {
|
||||
state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(cx));
|
||||
state Key logUidValue, destUidValue;
|
||||
state UID logUid, destUid;
|
||||
|
@ -2187,14 +2191,19 @@ public:
|
|||
state Future<UID> destUidFuture = backupAgent->getDestUid(tr, logUid);
|
||||
wait(success(statusFuture) && success(destUidFuture));
|
||||
|
||||
UID destUid = destUidFuture.get();
|
||||
if (destUid.isValid()) {
|
||||
destUidValue = BinaryWriter::toValue(destUid, Unversioned());
|
||||
}
|
||||
EBackupState status = statusFuture.get();
|
||||
if (!backupAgent->isRunnable(status)) {
|
||||
throw backup_unneeded();
|
||||
}
|
||||
UID destUid = destUidFuture.get();
|
||||
if (destUid.isValid()) {
|
||||
destUidValue = BinaryWriter::toValue(destUid, Unversioned());
|
||||
} else if (destUidValue.size() == 0 && waitForDestUID) {
|
||||
// Give DR task a chance to update destUid to avoid the problem of
|
||||
// leftover version key. If we got an commit_unknown_result before,
|
||||
// reuse the previous destUidValue.
|
||||
throw not_committed();
|
||||
}
|
||||
|
||||
Optional<Value> _backupUid = wait(tr->get(backupAgent->states.get(logUidValue).pack(DatabaseBackupAgent::keyFolderId)));
|
||||
backupUid = _backupUid.get();
|
||||
|
@ -2215,11 +2224,12 @@ public:
|
|||
break;
|
||||
}
|
||||
catch (Error &e) {
|
||||
TraceEvent("DBA_AbortError").error(e, true);
|
||||
wait(tr->onError(e));
|
||||
}
|
||||
}
|
||||
|
||||
tr = Reference<ReadYourWritesTransaction>(new ReadYourWritesTransaction(cx));
|
||||
tr = makeReference<ReadYourWritesTransaction>(cx);
|
||||
loop {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
|
@ -2327,7 +2337,7 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
tr = Reference<ReadYourWritesTransaction>(new ReadYourWritesTransaction(cx));
|
||||
tr = makeReference<ReadYourWritesTransaction>(cx);
|
||||
loop {
|
||||
try {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
|
@ -2523,8 +2533,9 @@ Future<Void> DatabaseBackupAgent::discontinueBackup(Reference<ReadYourWritesTran
|
|||
return DatabaseBackupAgentImpl::discontinueBackup(this, tr, tagName);
|
||||
}
|
||||
|
||||
Future<Void> DatabaseBackupAgent::abortBackup(Database cx, Key tagName, bool partial, bool abortOldBackup, bool dstOnly){
|
||||
return DatabaseBackupAgentImpl::abortBackup(this, cx, tagName, partial, abortOldBackup, dstOnly);
|
||||
Future<Void> DatabaseBackupAgent::abortBackup(Database cx, Key tagName, bool partial, bool abortOldBackup, bool dstOnly,
|
||||
bool waitForDestUID) {
|
||||
return DatabaseBackupAgentImpl::abortBackup(this, cx, tagName, partial, abortOldBackup, dstOnly, waitForDestUID);
|
||||
}
|
||||
|
||||
Future<std::string> DatabaseBackupAgent::getStatus(Database cx, int errorLimit, Key tagName) {
|
||||
|
|
|
@ -193,6 +193,10 @@ std::string describe( Reference<T> const& item ) {
|
|||
return item->toString();
|
||||
}
|
||||
|
||||
static std::string describe(UID const& item) {
|
||||
return item.shortString();
|
||||
}
|
||||
|
||||
template <class T>
|
||||
std::string describe( T const& item ) {
|
||||
return item.toString();
|
||||
|
@ -997,7 +1001,9 @@ struct HealthMetrics {
|
|||
};
|
||||
|
||||
int64_t worstStorageQueue;
|
||||
int64_t limitingStorageQueue;
|
||||
int64_t worstStorageDurabilityLag;
|
||||
int64_t limitingStorageDurabilityLag;
|
||||
int64_t worstTLogQueue;
|
||||
double tpsLimit;
|
||||
bool batchLimited;
|
||||
|
@ -1005,17 +1011,15 @@ struct HealthMetrics {
|
|||
std::map<UID, int64_t> tLogQueue;
|
||||
|
||||
HealthMetrics()
|
||||
: worstStorageQueue(0)
|
||||
, worstStorageDurabilityLag(0)
|
||||
, worstTLogQueue(0)
|
||||
, tpsLimit(0.0)
|
||||
, batchLimited(false)
|
||||
{}
|
||||
: worstStorageQueue(0), limitingStorageQueue(0), worstStorageDurabilityLag(0), limitingStorageDurabilityLag(0),
|
||||
worstTLogQueue(0), tpsLimit(0.0), batchLimited(false) {}
|
||||
|
||||
void update(const HealthMetrics& hm, bool detailedInput, bool detailedOutput)
|
||||
{
|
||||
worstStorageQueue = hm.worstStorageQueue;
|
||||
limitingStorageQueue = hm.limitingStorageQueue;
|
||||
worstStorageDurabilityLag = hm.worstStorageDurabilityLag;
|
||||
limitingStorageDurabilityLag = hm.limitingStorageDurabilityLag;
|
||||
worstTLogQueue = hm.worstTLogQueue;
|
||||
tpsLimit = hm.tpsLimit;
|
||||
batchLimited = hm.batchLimited;
|
||||
|
@ -1030,19 +1034,16 @@ struct HealthMetrics {
|
|||
}
|
||||
|
||||
bool operator==(HealthMetrics const& r) const {
|
||||
return (
|
||||
worstStorageQueue == r.worstStorageQueue &&
|
||||
worstStorageDurabilityLag == r.worstStorageDurabilityLag &&
|
||||
worstTLogQueue == r.worstTLogQueue &&
|
||||
storageStats == r.storageStats &&
|
||||
tLogQueue == r.tLogQueue &&
|
||||
batchLimited == r.batchLimited
|
||||
);
|
||||
return (worstStorageQueue == r.worstStorageQueue && limitingStorageQueue == r.limitingStorageQueue &&
|
||||
worstStorageDurabilityLag == r.worstStorageDurabilityLag &&
|
||||
limitingStorageDurabilityLag == r.limitingStorageDurabilityLag && worstTLogQueue == r.worstTLogQueue &&
|
||||
storageStats == r.storageStats && tLogQueue == r.tLogQueue && batchLimited == r.batchLimited);
|
||||
}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, worstStorageQueue, worstStorageDurabilityLag, worstTLogQueue, tpsLimit, batchLimited, storageStats, tLogQueue);
|
||||
serializer(ar, worstStorageQueue, worstStorageDurabilityLag, worstTLogQueue, tpsLimit, batchLimited,
|
||||
storageStats, tLogQueue, limitingStorageQueue, limitingStorageDurabilityLag);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -99,6 +99,11 @@ StringRef FileBackupAgent::restoreStateText(ERestoreState id) {
|
|||
}
|
||||
}
|
||||
|
||||
Key FileBackupAgent::getPauseKey() {
|
||||
FileBackupAgent backupAgent;
|
||||
return backupAgent.taskBucket->getPauseKey();
|
||||
}
|
||||
|
||||
template<> Tuple Codec<ERestoreState>::pack(ERestoreState const &val) { return Tuple().append(val); }
|
||||
template<> ERestoreState Codec<ERestoreState>::unpack(Tuple const &val) { return (ERestoreState)val.getInt(0); }
|
||||
|
||||
|
@ -1398,8 +1403,8 @@ namespace fileBackup {
|
|||
else {
|
||||
ASSERT(snapshotBatchSize.present());
|
||||
// Batch future key exists in the config so create future from it
|
||||
snapshotBatchFuture = Reference<TaskFuture>(new TaskFuture(futureBucket, snapshotBatchFutureKey.get()));
|
||||
}
|
||||
snapshotBatchFuture = makeReference<TaskFuture>(futureBucket, snapshotBatchFutureKey.get());
|
||||
}
|
||||
|
||||
break;
|
||||
} catch(Error &e) {
|
||||
|
|
|
@ -1758,6 +1758,7 @@ ACTOR Future<std::set<NetworkAddress>> checkForExcludingServers(Database cx, vec
|
|||
|
||||
wait( delayJittered( 1.0 ) ); // SOMEDAY: watches!
|
||||
} catch (Error& e) {
|
||||
TraceEvent("CheckForExcludingServersError").error(e);
|
||||
wait( tr.onError(e) );
|
||||
}
|
||||
}
|
||||
|
|
|
@ -368,7 +368,7 @@ ClientCoordinators::ClientCoordinators( Key clusterKey, std::vector<NetworkAddre
|
|||
for (const auto& coord : coordinators) {
|
||||
clientLeaderServers.push_back( ClientLeaderRegInterface( coord ) );
|
||||
}
|
||||
ccf = Reference<ClusterConnectionFile>(new ClusterConnectionFile( ClusterConnectionString( coordinators, clusterKey ) ) );
|
||||
ccf = makeReference<ClusterConnectionFile>(ClusterConnectionString(coordinators, clusterKey));
|
||||
}
|
||||
|
||||
ClientLeaderRegInterface::ClientLeaderRegInterface( NetworkAddress remote )
|
||||
|
@ -473,7 +473,8 @@ ACTOR Future<MonitorLeaderInfo> monitorLeaderOneGeneration( Reference<ClusterCon
|
|||
if (leader.present()) {
|
||||
if( leader.get().first.forward ) {
|
||||
TraceEvent("MonitorLeaderForwarding").detail("NewConnStr", leader.get().first.serializedInfo.toString()).detail("OldConnStr", info.intermediateConnFile->getConnectionString().toString());
|
||||
info.intermediateConnFile = Reference<ClusterConnectionFile>(new ClusterConnectionFile(connFile->getFilename(), ClusterConnectionString(leader.get().first.serializedInfo.toString())));
|
||||
info.intermediateConnFile = makeReference<ClusterConnectionFile>(
|
||||
connFile->getFilename(), ClusterConnectionString(leader.get().first.serializedInfo.toString()));
|
||||
return info;
|
||||
}
|
||||
if(connFile != info.intermediateConnFile) {
|
||||
|
@ -501,7 +502,7 @@ template <class LeaderInterface>
|
|||
Future<Void> monitorLeaderRemotely(Reference<ClusterConnectionFile> const& connFile,
|
||||
Reference<AsyncVar<Optional<LeaderInterface>>> const& outKnownLeader) {
|
||||
LeaderDeserializer<LeaderInterface> deserializer;
|
||||
Reference<AsyncVar<Value>> serializedInfo( new AsyncVar<Value> );
|
||||
auto serializedInfo = makeReference<AsyncVar<Value>>();
|
||||
Future<Void> m = monitorLeaderRemotelyInternal( connFile, serializedInfo );
|
||||
return m || deserializer( serializedInfo, outKnownLeader );
|
||||
}
|
||||
|
|
|
@ -100,7 +100,7 @@ template <class LeaderInterface>
|
|||
Future<Void> monitorLeader(Reference<ClusterConnectionFile> const& connFile,
|
||||
Reference<AsyncVar<Optional<LeaderInterface>>> const& outKnownLeader) {
|
||||
LeaderDeserializer<LeaderInterface> deserializer;
|
||||
Reference<AsyncVar<Value>> serializedInfo( new AsyncVar<Value> );
|
||||
auto serializedInfo = makeReference<AsyncVar<Value>>();
|
||||
Future<Void> m = monitorLeaderInternal( connFile, serializedInfo );
|
||||
return m || deserializer( serializedInfo, outKnownLeader );
|
||||
}
|
||||
|
|
|
@ -447,7 +447,7 @@ Reference<IDatabase> DLApi::createDatabase609(const char *clusterFilePath) {
|
|||
}));
|
||||
});
|
||||
|
||||
return Reference<DLDatabase>(new DLDatabase(api, dbFuture));
|
||||
return makeReference<DLDatabase>(api, dbFuture);
|
||||
}
|
||||
|
||||
Reference<IDatabase> DLApi::createDatabase(const char *clusterFilePath) {
|
||||
|
@ -916,7 +916,8 @@ void MultiVersionDatabase::DatabaseState::stateChanged() {
|
|||
|
||||
void MultiVersionDatabase::DatabaseState::addConnection(Reference<ClientInfo> client, std::string clusterFilePath) {
|
||||
clients.push_back(client);
|
||||
connectionAttempts.push_back(Reference<Connector>(new Connector(Reference<DatabaseState>::addRef(this), client, clusterFilePath)));
|
||||
connectionAttempts.push_back(
|
||||
makeReference<Connector>(Reference<DatabaseState>::addRef(this), client, clusterFilePath));
|
||||
}
|
||||
|
||||
void MultiVersionDatabase::DatabaseState::startConnections() {
|
||||
|
@ -983,7 +984,7 @@ Reference<ClientInfo> MultiVersionApi::getLocalClient() {
|
|||
|
||||
void MultiVersionApi::selectApiVersion(int apiVersion) {
|
||||
if(!localClient) {
|
||||
localClient = Reference<ClientInfo>(new ClientInfo(ThreadSafeApi::api));
|
||||
localClient = makeReference<ClientInfo>(ThreadSafeApi::api);
|
||||
}
|
||||
|
||||
if(this->apiVersion != 0 && this->apiVersion != apiVersion) {
|
||||
|
@ -1047,7 +1048,7 @@ void MultiVersionApi::addExternalLibrary(std::string path) {
|
|||
|
||||
if(externalClients.count(filename) == 0) {
|
||||
TraceEvent("AddingExternalClient").detail("LibraryPath", filename);
|
||||
externalClients[filename] = Reference<ClientInfo>(new ClientInfo(new DLApi(path), path));
|
||||
externalClients[filename] = makeReference<ClientInfo>(new DLApi(path), path);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1064,7 +1065,7 @@ void MultiVersionApi::addExternalLibraryDirectory(std::string path) {
|
|||
std::string lib = abspath(joinPath(path, filename));
|
||||
if(externalClients.count(filename) == 0) {
|
||||
TraceEvent("AddingExternalClient").detail("LibraryPath", filename);
|
||||
externalClients[filename] = Reference<ClientInfo>(new ClientInfo(new DLApi(lib), lib));
|
||||
externalClients[filename] = makeReference<ClientInfo>(new DLApi(lib), lib);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1767,7 +1768,7 @@ struct DLTest {
|
|||
static Reference<FdbCApi> getApi() {
|
||||
static Reference<FdbCApi> api;
|
||||
if(!api) {
|
||||
api = Reference<FdbCApi>(new FdbCApi());
|
||||
api = makeReference<FdbCApi>();
|
||||
|
||||
// Functions needed for DLSingleAssignmentVar
|
||||
api->futureSetCallback = [](FdbCApi::FDBFuture *f, FdbCApi::FDBCallback callback, void *callbackParameter) {
|
||||
|
|
|
@ -84,12 +84,6 @@ using std::pair;
|
|||
|
||||
namespace {
|
||||
|
||||
ACTOR template <class T, class Fun>
|
||||
Future<T> runAfter(Future<T> in, Fun func) {
|
||||
T res = wait(in);
|
||||
return func(res);
|
||||
}
|
||||
|
||||
template <class Interface, class Request>
|
||||
Future<REPLY_TYPE(Request)> loadBalance(
|
||||
DatabaseContext* ctx, const Reference<LocationInfo> alternatives, RequestStream<Request> Interface::*channel,
|
||||
|
@ -99,13 +93,14 @@ Future<REPLY_TYPE(Request)> loadBalance(
|
|||
if (alternatives->hasCaches) {
|
||||
return loadBalance(alternatives->locations(), channel, request, taskID, atMostOnce, model);
|
||||
}
|
||||
return runAfter(loadBalance(alternatives->locations(), channel, request, taskID, atMostOnce, model),
|
||||
[ctx](auto res) {
|
||||
if (res.cached) {
|
||||
ctx->updateCache.trigger();
|
||||
}
|
||||
return res;
|
||||
});
|
||||
return fmap(
|
||||
[ctx](auto const& res) {
|
||||
if (res.cached) {
|
||||
ctx->updateCache.trigger();
|
||||
}
|
||||
return res;
|
||||
},
|
||||
loadBalance(alternatives->locations(), channel, request, taskID, atMostOnce, model));
|
||||
}
|
||||
} // namespace
|
||||
|
||||
|
@ -521,9 +516,9 @@ void updateLocationCacheWithCaches(DatabaseContext* self, const std::map<UID, St
|
|||
}
|
||||
}
|
||||
for (const auto& p : added) {
|
||||
interfaces.emplace_back(Reference<ReferencedInterface<StorageServerInterface>>{new ReferencedInterface<StorageServerInterface>{p.second}});
|
||||
interfaces.push_back(makeReference<ReferencedInterface<StorageServerInterface>>(p.second));
|
||||
}
|
||||
iter->value() = Reference<LocationInfo>{ new LocationInfo(interfaces, true) };
|
||||
iter->value() = makeReference<LocationInfo>(interfaces, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -536,7 +531,7 @@ Reference<LocationInfo> addCaches(const Reference<LocationInfo>& loc,
|
|||
interfaces.emplace_back((*loc)[i]);
|
||||
}
|
||||
interfaces.insert(interfaces.end(), other.begin(), other.end());
|
||||
return Reference<LocationInfo>{ new LocationInfo{ interfaces, true } };
|
||||
return makeReference<LocationInfo>(interfaces, true);
|
||||
}
|
||||
|
||||
ACTOR Future<Void> updateCachedRanges(DatabaseContext* self, std::map<UID, StorageServerInterface>* cacheServers) {
|
||||
|
@ -556,8 +551,7 @@ ACTOR Future<Void> updateCachedRanges(DatabaseContext* self, std::map<UID, Stora
|
|||
std::vector<Reference<ReferencedInterface<StorageServerInterface>>> cacheInterfaces;
|
||||
cacheInterfaces.reserve(cacheServers->size());
|
||||
for (const auto& p : *cacheServers) {
|
||||
cacheInterfaces.emplace_back(Reference<ReferencedInterface<StorageServerInterface>>{
|
||||
new ReferencedInterface<StorageServerInterface>{ p.second } });
|
||||
cacheInterfaces.push_back(makeReference<ReferencedInterface<StorageServerInterface>>(p.second));
|
||||
}
|
||||
bool currCached = false;
|
||||
KeyRef begin, end;
|
||||
|
@ -766,7 +760,9 @@ static Standalone<RangeResultRef> healthMetricsToKVPairs(const HealthMetrics& me
|
|||
statsObj["batch_limited"] = metrics.batchLimited;
|
||||
statsObj["tps_limit"] = metrics.tpsLimit;
|
||||
statsObj["worst_storage_durability_lag"] = metrics.worstStorageDurabilityLag;
|
||||
statsObj["limiting_storage_durability_lag"] = metrics.limitingStorageDurabilityLag;
|
||||
statsObj["worst_storage_queue"] = metrics.worstStorageQueue;
|
||||
statsObj["limiting_storage_queue"] = metrics.limitingStorageQueue;
|
||||
statsObj["worst_log_queue"] = metrics.worstTLogQueue;
|
||||
std::string statsString =
|
||||
json_spirit::write_string(json_spirit::mValue(statsObj), json_spirit::Output_options::raw_utf8);
|
||||
|
@ -1109,7 +1105,7 @@ Reference<LocationInfo> DatabaseContext::setCachedLocation( const KeyRangeRef& k
|
|||
}
|
||||
|
||||
int maxEvictionAttempts = 100, attempts = 0;
|
||||
Reference<LocationInfo> loc = Reference<LocationInfo>( new LocationInfo(serverRefs) );
|
||||
auto loc = makeReference<LocationInfo>(serverRefs);
|
||||
while( locationCache.size() > locationCacheSize && attempts < maxEvictionAttempts) {
|
||||
TEST( true ); // NativeAPI storage server locationCache entry evicted
|
||||
attempts++;
|
||||
|
@ -1185,10 +1181,10 @@ void DatabaseContext::setOption( FDBDatabaseOptions::Option option, Optional<Str
|
|||
case FDBDatabaseOptions::MACHINE_ID:
|
||||
clientLocality = LocalityData( clientLocality.processId(), value.present() ? Standalone<StringRef>(value.get()) : Optional<Standalone<StringRef>>(), clientLocality.machineId(), clientLocality.dcId() );
|
||||
if (clientInfo->get().commitProxies.size())
|
||||
commitProxies = Reference<CommitProxyInfo>(new CommitProxyInfo(clientInfo->get().commitProxies, false));
|
||||
commitProxies = makeReference<CommitProxyInfo>(clientInfo->get().commitProxies, false);
|
||||
if( clientInfo->get().grvProxies.size() )
|
||||
grvProxies = Reference<GrvProxyInfo>( new GrvProxyInfo( clientInfo->get().grvProxies, true) );
|
||||
server_interf.clear();
|
||||
grvProxies = makeReference<GrvProxyInfo>(clientInfo->get().grvProxies, true);
|
||||
server_interf.clear();
|
||||
locationCache.insert( allKeys, Reference<LocationInfo>() );
|
||||
break;
|
||||
case FDBDatabaseOptions::MAX_WATCHES:
|
||||
|
@ -1197,10 +1193,10 @@ void DatabaseContext::setOption( FDBDatabaseOptions::Option option, Optional<Str
|
|||
case FDBDatabaseOptions::DATACENTER_ID:
|
||||
clientLocality = LocalityData(clientLocality.processId(), clientLocality.zoneId(), clientLocality.machineId(), value.present() ? Standalone<StringRef>(value.get()) : Optional<Standalone<StringRef>>());
|
||||
if (clientInfo->get().commitProxies.size())
|
||||
commitProxies = Reference<CommitProxyInfo>( new CommitProxyInfo(clientInfo->get().commitProxies, false));
|
||||
commitProxies = makeReference<CommitProxyInfo>(clientInfo->get().commitProxies, false);
|
||||
if( clientInfo->get().grvProxies.size() )
|
||||
grvProxies = Reference<GrvProxyInfo>( new GrvProxyInfo( clientInfo->get().grvProxies, true));
|
||||
server_interf.clear();
|
||||
grvProxies = makeReference<GrvProxyInfo>(clientInfo->get().grvProxies, true);
|
||||
server_interf.clear();
|
||||
locationCache.insert( allKeys, Reference<LocationInfo>() );
|
||||
break;
|
||||
case FDBDatabaseOptions::SNAPSHOT_RYW_ENABLE:
|
||||
|
@ -1341,8 +1337,8 @@ Database Database::createDatabase( Reference<ClusterConnectionFile> connFile, in
|
|||
|
||||
g_network->initTLS();
|
||||
|
||||
Reference<AsyncVar<ClientDBInfo>> clientInfo(new AsyncVar<ClientDBInfo>());
|
||||
Reference<AsyncVar<Reference<ClusterConnectionFile>>> connectionFile(new AsyncVar<Reference<ClusterConnectionFile>>());
|
||||
auto clientInfo = makeReference<AsyncVar<ClientDBInfo>>();
|
||||
auto connectionFile = makeReference<AsyncVar<Reference<ClusterConnectionFile>>>();
|
||||
connectionFile->set(connFile);
|
||||
Future<Void> clientInfoMonitor = monitorProxies(connectionFile, clientInfo, networkOptions.supportedVersions, StringRef(networkOptions.traceLogGroup));
|
||||
|
||||
|
@ -1585,11 +1581,11 @@ void DatabaseContext::updateProxies() {
|
|||
grvProxies.clear();
|
||||
bool commitProxyProvisional = false, grvProxyProvisional = false;
|
||||
if (clientInfo->get().commitProxies.size()) {
|
||||
commitProxies = Reference<CommitProxyInfo>(new CommitProxyInfo(clientInfo->get().commitProxies, false));
|
||||
commitProxies = makeReference<CommitProxyInfo>(clientInfo->get().commitProxies, false);
|
||||
commitProxyProvisional = clientInfo->get().commitProxies[0].provisional;
|
||||
}
|
||||
if (clientInfo->get().grvProxies.size()) {
|
||||
grvProxies = Reference<GrvProxyInfo>(new GrvProxyInfo(clientInfo->get().grvProxies, true));
|
||||
grvProxies = makeReference<GrvProxyInfo>(clientInfo->get().grvProxies, true);
|
||||
grvProxyProvisional = clientInfo->get().grvProxies[0].provisional;
|
||||
}
|
||||
if (clientInfo->get().commitProxies.size() && clientInfo->get().grvProxies.size()) {
|
||||
|
@ -2279,7 +2275,7 @@ ACTOR Future<Standalone<RangeResultRef>> getExactRange( Database cx, Version ver
|
|||
}
|
||||
|
||||
if (!more || locations[shard].first.empty()) {
|
||||
TEST(true);
|
||||
TEST(true); // getExactrange (!more || locations[shard].first.empty())
|
||||
if(shard == locations.size()-1) {
|
||||
const KeyRangeRef& range = locations[shard].first;
|
||||
KeyRef begin = reverse ? keys.begin : range.end;
|
||||
|
@ -3779,8 +3775,8 @@ void Transaction::setOption( FDBTransactionOptions::Option option, Optional<Stri
|
|||
}
|
||||
}
|
||||
else {
|
||||
trLogInfo = Reference<TransactionLogInfo>(new TransactionLogInfo(value.get().printable(), TransactionLogInfo::DONT_LOG));
|
||||
trLogInfo->maxFieldLength = options.maxTransactionLoggingFieldLength;
|
||||
trLogInfo = makeReference<TransactionLogInfo>(value.get().printable(), TransactionLogInfo::DONT_LOG);
|
||||
trLogInfo->maxFieldLength = options.maxTransactionLoggingFieldLength;
|
||||
}
|
||||
if (info.debugID.present()) {
|
||||
TraceEvent(SevInfo, "TransactionBeingTraced")
|
||||
|
@ -4198,6 +4194,7 @@ ACTOR Future<ProtocolVersion> coordinatorProtocolsFetcher(Reference<ClusterConne
|
|||
|
||||
ACTOR Future<uint64_t> getCoordinatorProtocols(Reference<ClusterConnectionFile> f) {
|
||||
// TODO: let client know if server is present but before this feature is introduced
|
||||
std::cout << "MAKING GET PROTOCOL REQUEST" << std::endl;
|
||||
ProtocolVersion protocolVersion = wait(coordinatorProtocolsFetcher(f));
|
||||
return protocolVersion.version();
|
||||
}
|
||||
|
@ -4494,7 +4491,6 @@ Future< StorageMetrics > Transaction::getStorageMetrics( KeyRange const& keys, i
|
|||
|
||||
ACTOR Future<Standalone<VectorRef<DDMetricsRef>>> waitDataDistributionMetricsList(Database cx, KeyRange keys,
|
||||
int shardLimit) {
|
||||
state Future<Void> clientTimeout = delay(5.0);
|
||||
loop {
|
||||
choose {
|
||||
when(wait(cx->onProxiesChanged())) {}
|
||||
|
@ -4506,7 +4502,6 @@ ACTOR Future<Standalone<VectorRef<DDMetricsRef>>> waitDataDistributionMetricsLis
|
|||
}
|
||||
return rep.get().storageMetricsList;
|
||||
}
|
||||
when(wait(clientTimeout)) { throw timed_out(); }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4636,7 +4631,7 @@ Reference<TransactionLogInfo> Transaction::createTrLogInfoProbabilistically(cons
|
|||
if(!cx->isError()) {
|
||||
double clientSamplingProbability = std::isinf(cx->clientInfo->get().clientTxnInfoSampleRate) ? CLIENT_KNOBS->CSI_SAMPLING_PROBABILITY : cx->clientInfo->get().clientTxnInfoSampleRate;
|
||||
if (((networkOptions.logClientInfo.present() && networkOptions.logClientInfo.get()) || BUGGIFY) && deterministicRandom()->random01() < clientSamplingProbability && (!g_network->isSimulated() || !g_simulator.speedUpSimulation)) {
|
||||
return Reference<TransactionLogInfo>(new TransactionLogInfo(TransactionLogInfo::DATABASE));
|
||||
return makeReference<TransactionLogInfo>(TransactionLogInfo::DATABASE);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1228,7 +1228,7 @@ ACTOR Future<Standalone<RangeResultRef>> getWorkerInterfaces (Reference<ClusterC
|
|||
}
|
||||
|
||||
Future< Optional<Value> > ReadYourWritesTransaction::get( const Key& key, bool snapshot ) {
|
||||
TEST(true);
|
||||
TEST(true); // ReadYourWritesTransaction::get
|
||||
|
||||
if (getDatabase()->apiVersionAtLeast(630)) {
|
||||
if (specialKeys.contains(key)) {
|
||||
|
|
|
@ -235,8 +235,8 @@ Reference<S3BlobStoreEndpoint> S3BlobStoreEndpoint::fromString(std::string const
|
|||
StringRef key = c.eat(":");
|
||||
StringRef secret = c.eat();
|
||||
|
||||
return Reference<S3BlobStoreEndpoint>(new S3BlobStoreEndpoint(
|
||||
host.toString(), service.toString(), key.toString(), secret.toString(), knobs, extraHeaders));
|
||||
return makeReference<S3BlobStoreEndpoint>(host.toString(), service.toString(), key.toString(),
|
||||
secret.toString(), knobs, extraHeaders);
|
||||
|
||||
} catch (std::string& err) {
|
||||
if (error != nullptr) *error = err;
|
||||
|
@ -991,7 +991,12 @@ Future<std::vector<std::string>> S3BlobStoreEndpoint::listBuckets() {
|
|||
std::string S3BlobStoreEndpoint::hmac_sha1(std::string const& msg) {
|
||||
std::string key = secret;
|
||||
|
||||
// First pad the key to 64 bytes.
|
||||
// Hash key to shorten it if it is longer than SHA1 block size
|
||||
if(key.size() > 64) {
|
||||
key = SHA1::from_string(key);
|
||||
}
|
||||
|
||||
// Pad key up to SHA1 block size if needed
|
||||
key.append(64 - key.size(), '\0');
|
||||
|
||||
std::string kipad = key;
|
||||
|
|
|
@ -940,6 +940,8 @@ const KeyRef JSONSchemas::storageHealthSchema = LiteralStringRef(R"""(
|
|||
const KeyRef JSONSchemas::aggregateHealthSchema = LiteralStringRef(R"""(
|
||||
{
|
||||
"batch_limited": false,
|
||||
"limiting_storage_durability_lag": 5050809,
|
||||
"limiting_storage_queue": 2030,
|
||||
"tps_limit": 457082.8105811302,
|
||||
"worst_storage_durability_lag": 5050809,
|
||||
"worst_storage_queue": 2030,
|
||||
|
|
|
@ -243,12 +243,12 @@ ACTOR Future<Standalone<RangeResultRef>> SpecialKeySpace::getRangeAggregationAct
|
|||
// Handle all corner cases like what RYW does
|
||||
// return if range inverted
|
||||
if (actualBeginOffset >= actualEndOffset && begin.getKey() >= end.getKey()) {
|
||||
TEST(true);
|
||||
TEST(true); // inverted range
|
||||
return RangeResultRef(false, false);
|
||||
}
|
||||
// If touches begin or end, return with readToBegin and readThroughEnd flags
|
||||
if (begin.getKey() == moduleBoundary.end || end.getKey() == moduleBoundary.begin) {
|
||||
TEST(true);
|
||||
TEST(true); // query touches begin or end
|
||||
return result;
|
||||
}
|
||||
state RangeMap<Key, SpecialKeyRangeReadImpl*, KeyRangeRef>::Ranges ranges =
|
||||
|
@ -540,25 +540,34 @@ Future<Standalone<RangeResultRef>> ConflictingKeysImpl::getRange(ReadYourWritesT
|
|||
}
|
||||
|
||||
ACTOR Future<Standalone<RangeResultRef>> ddMetricsGetRangeActor(ReadYourWritesTransaction* ryw, KeyRangeRef kr) {
|
||||
try {
|
||||
auto keys = kr.removePrefix(ddStatsRange.begin);
|
||||
Standalone<VectorRef<DDMetricsRef>> resultWithoutPrefix =
|
||||
wait(waitDataDistributionMetricsList(ryw->getDatabase(), keys, CLIENT_KNOBS->STORAGE_METRICS_SHARD_LIMIT));
|
||||
Standalone<RangeResultRef> result;
|
||||
for (const auto& ddMetricsRef : resultWithoutPrefix) {
|
||||
// each begin key is the previous end key, thus we only encode the begin key in the result
|
||||
KeyRef beginKey = ddMetricsRef.beginKey.withPrefix(ddStatsRange.begin, result.arena());
|
||||
// Use json string encoded in utf-8 to encode the values, easy for adding more fields in the future
|
||||
json_spirit::mObject statsObj;
|
||||
statsObj["shard_bytes"] = ddMetricsRef.shardBytes;
|
||||
std::string statsString =
|
||||
json_spirit::write_string(json_spirit::mValue(statsObj), json_spirit::Output_options::raw_utf8);
|
||||
ValueRef bytes(result.arena(), statsString);
|
||||
result.push_back(result.arena(), KeyValueRef(beginKey, bytes));
|
||||
loop {
|
||||
try {
|
||||
auto keys = kr.removePrefix(ddStatsRange.begin);
|
||||
Standalone<VectorRef<DDMetricsRef>> resultWithoutPrefix = wait(
|
||||
waitDataDistributionMetricsList(ryw->getDatabase(), keys, CLIENT_KNOBS->STORAGE_METRICS_SHARD_LIMIT));
|
||||
Standalone<RangeResultRef> result;
|
||||
for (const auto& ddMetricsRef : resultWithoutPrefix) {
|
||||
// each begin key is the previous end key, thus we only encode the begin key in the result
|
||||
KeyRef beginKey = ddMetricsRef.beginKey.withPrefix(ddStatsRange.begin, result.arena());
|
||||
// Use json string encoded in utf-8 to encode the values, easy for adding more fields in the future
|
||||
json_spirit::mObject statsObj;
|
||||
statsObj["shard_bytes"] = ddMetricsRef.shardBytes;
|
||||
std::string statsString =
|
||||
json_spirit::write_string(json_spirit::mValue(statsObj), json_spirit::Output_options::raw_utf8);
|
||||
ValueRef bytes(result.arena(), statsString);
|
||||
result.push_back(result.arena(), KeyValueRef(beginKey, bytes));
|
||||
}
|
||||
return result;
|
||||
} catch (Error& e) {
|
||||
state Error err(e);
|
||||
if (e.code() == error_code_dd_not_found) {
|
||||
TraceEvent(SevWarnAlways, "DataDistributorNotPresent")
|
||||
.detail("Operation", "DDMetricsReqestThroughSpecialKeys");
|
||||
wait(delayJittered(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY));
|
||||
continue;
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
return result;
|
||||
} catch (Error& e) {
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -479,7 +479,7 @@ ACTOR Future<StatusObject> statusFetcherImpl( Reference<ClusterConnectionFile> f
|
|||
state int coordinatorsFaultTolerance = 0;
|
||||
|
||||
try {
|
||||
state int64_t clientTime = time(0);
|
||||
state int64_t clientTime = g_network->timer();
|
||||
|
||||
StatusObject _statusObjClient = wait(clientStatusFetcher(f, &clientMessages, &quorum_reachable, &coordinatorsFaultTolerance));
|
||||
statusObjClient = _statusObjClient;
|
||||
|
@ -574,7 +574,7 @@ ACTOR Future<Void> timeoutMonitorLeader(Database db) {
|
|||
Future<StatusObject> StatusClient::statusFetcher( Database db ) {
|
||||
db->lastStatusFetch = now();
|
||||
if(!db->statusClusterInterface) {
|
||||
db->statusClusterInterface = Reference<AsyncVar<Optional<ClusterInterface>>>(new AsyncVar<Optional<ClusterInterface>>);
|
||||
db->statusClusterInterface = makeReference<AsyncVar<Optional<ClusterInterface>>>();
|
||||
db->statusLeaderMon = timeoutMonitorLeader(db);
|
||||
}
|
||||
|
||||
|
|
|
@ -35,8 +35,10 @@ void TagSet::addTag(TransactionTagRef tag) {
|
|||
throw too_many_tags();
|
||||
}
|
||||
|
||||
auto result = tags.insert(TransactionTagRef(arena, tag));
|
||||
if(result.second) {
|
||||
TransactionTagRef tagRef(arena, tag);
|
||||
auto it = find(tags.begin(), tags.end(), tagRef);
|
||||
if (it == tags.end()) {
|
||||
tags.push_back(std::move(tagRef));
|
||||
bytes += tag.size();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ typedef Standalone<TransactionTagRef> TransactionTag;
|
|||
|
||||
class TagSet {
|
||||
public:
|
||||
typedef std::set<TransactionTagRef>::const_iterator const_iterator;
|
||||
typedef std::vector<TransactionTagRef>::const_iterator const_iterator;
|
||||
|
||||
TagSet() : bytes(0) {}
|
||||
|
||||
|
@ -54,51 +54,35 @@ public:
|
|||
const_iterator end() const {
|
||||
return tags.end();
|
||||
}
|
||||
|
||||
void clear() {
|
||||
tags.clear();
|
||||
bytes = 0;
|
||||
}
|
||||
//private:
|
||||
Arena arena;
|
||||
std::set<TransactionTagRef> tags;
|
||||
size_t bytes;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct dynamic_size_traits<TagSet> : std::true_type {
|
||||
// May be called multiple times during one serialization
|
||||
template <class Context>
|
||||
static size_t size(const TagSet& t, Context&) {
|
||||
return t.tags.size() + t.bytes;
|
||||
}
|
||||
|
||||
// Guaranteed to be called only once during serialization
|
||||
template <class Context>
|
||||
static void save(uint8_t* out, const TagSet& t, Context& c) {
|
||||
void save(uint8_t* out, Context& c) const {
|
||||
uint8_t *start = out;
|
||||
for (const auto& tag : t.tags) {
|
||||
for (const auto& tag : *this) {
|
||||
*(out++) = (uint8_t)tag.size();
|
||||
|
||||
std::copy(tag.begin(), tag.end(), out);
|
||||
out += tag.size();
|
||||
}
|
||||
|
||||
ASSERT((size_t)(out-start) == size(t, c));
|
||||
ASSERT((size_t)(out - start) == size() + bytes);
|
||||
}
|
||||
|
||||
// Context is an arbitrary type that is plumbed by reference throughout the
|
||||
// load call tree.
|
||||
template <class Context>
|
||||
static void load(const uint8_t* data, size_t size, TagSet& t, Context& context) {
|
||||
void load(const uint8_t* data, size_t size, Context& context) {
|
||||
//const uint8_t *start = data;
|
||||
const uint8_t *end = data + size;
|
||||
while(data < end) {
|
||||
uint8_t len = *(data++);
|
||||
TransactionTagRef tag(context.tryReadZeroCopy(data, len), len);
|
||||
// Tags are already deduplicated
|
||||
const auto& tag = tags.emplace_back(context.tryReadZeroCopy(data, len), len);
|
||||
data += len;
|
||||
|
||||
t.tags.insert(tag);
|
||||
t.bytes += tag.size();
|
||||
bytes += tag.size();
|
||||
}
|
||||
|
||||
ASSERT(data == end);
|
||||
|
@ -106,7 +90,41 @@ struct dynamic_size_traits<TagSet> : std::true_type {
|
|||
// Deserialized tag sets share the arena with the request that contained them
|
||||
// For this reason, persisting a TagSet that shares memory with other request
|
||||
// members should be done with caution.
|
||||
t.arena = context.arena();
|
||||
arena = context.arena();
|
||||
}
|
||||
|
||||
size_t getBytes() const { return bytes; }
|
||||
|
||||
const Arena& getArena() const { return arena; }
|
||||
|
||||
private:
|
||||
size_t bytes;
|
||||
Arena arena;
|
||||
// Currently there are never >= 256 tags, so
|
||||
// std::vector is faster than std::set. This may
|
||||
// change if we allow more tags in the future.
|
||||
std::vector<TransactionTagRef> tags;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct dynamic_size_traits<TagSet> : std::true_type {
|
||||
// May be called multiple times during one serialization
|
||||
template <class Context>
|
||||
static size_t size(const TagSet& t, Context&) {
|
||||
return t.size() + t.getBytes();
|
||||
}
|
||||
|
||||
// Guaranteed to be called only once during serialization
|
||||
template <class Context>
|
||||
static void save(uint8_t* out, const TagSet& t, Context& c) {
|
||||
t.save(out, c);
|
||||
}
|
||||
|
||||
// Context is an arbitrary type that is plumbed by reference throughout the
|
||||
// load call tree.
|
||||
template <class Context>
|
||||
static void load(const uint8_t* data, size_t size, TagSet& t, Context& context) {
|
||||
t.load(data, size, context);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -208,4 +226,4 @@ using PrioritizedTransactionTagMap = std::map<TransactionPriority, TransactionTa
|
|||
|
||||
template <class Value>
|
||||
using UIDTransactionTagMap = std::unordered_map<UID, TransactionTagMap<Value>>;
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -513,7 +513,7 @@ public:
|
|||
}
|
||||
|
||||
ACTOR static Future<Void> run(Database cx, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, double *pollDelay, int maxConcurrentTasks) {
|
||||
state Reference<AsyncVar<bool>> paused = Reference<AsyncVar<bool>>( new AsyncVar<bool>(true) );
|
||||
state Reference<AsyncVar<bool>> paused = makeReference<AsyncVar<bool>>(true);
|
||||
state Future<Void> watchPausedFuture = watchPaused(cx, taskBucket, paused);
|
||||
taskBucket->metricLogger = traceCounters("TaskBucketMetrics", taskBucket->dbgid, CLIENT_KNOBS->TASKBUCKET_LOGGING_DELAY, &taskBucket->cc);
|
||||
loop {
|
||||
|
@ -528,7 +528,7 @@ public:
|
|||
static Future<Standalone<StringRef>> addIdle(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket) {
|
||||
taskBucket->setOptions(tr);
|
||||
|
||||
Reference<Task> newTask(new Task(IdleTaskFunc::name, IdleTaskFunc::version));
|
||||
auto newTask = makeReference<Task>(IdleTaskFunc::name, IdleTaskFunc::version);
|
||||
return taskBucket->addTask(tr, newTask);
|
||||
}
|
||||
|
||||
|
@ -991,7 +991,7 @@ Future<Void> FutureBucket::clear(Reference<ReadYourWritesTransaction> tr){
|
|||
Reference<TaskFuture> FutureBucket::future(Reference<ReadYourWritesTransaction> tr){
|
||||
setOptions(tr);
|
||||
|
||||
Reference<TaskFuture> taskFuture(new TaskFuture(Reference<FutureBucket>::addRef(this)));
|
||||
auto taskFuture = makeReference<TaskFuture>(Reference<FutureBucket>::addRef(this));
|
||||
taskFuture->addBlock(tr, StringRef());
|
||||
|
||||
return taskFuture;
|
||||
|
@ -1002,7 +1002,7 @@ Future<bool> FutureBucket::isEmpty(Reference<ReadYourWritesTransaction> tr) {
|
|||
}
|
||||
|
||||
Reference<TaskFuture> FutureBucket::unpack(Key key) {
|
||||
return Reference<TaskFuture>(new TaskFuture(Reference<FutureBucket>::addRef(this), key));
|
||||
return makeReference<TaskFuture>(Reference<FutureBucket>::addRef(this), key);
|
||||
}
|
||||
|
||||
class TaskFutureImpl {
|
||||
|
@ -1028,7 +1028,7 @@ public:
|
|||
for (int i = 0; i < vectorFuture.size(); ++i) {
|
||||
Key key = StringRef(deterministicRandom()->randomUniqueID().toString());
|
||||
taskFuture->addBlock(tr, key);
|
||||
Reference<Task> task(new Task());
|
||||
auto task = makeReference<Task>();
|
||||
task->params[Task::reservedTaskParamKeyType] = LiteralStringRef("UnblockFuture");
|
||||
task->params[Task::reservedTaskParamKeyFuture] = taskFuture->key;
|
||||
task->params[Task::reservedTaskParamKeyBlockID] = key;
|
||||
|
@ -1111,7 +1111,7 @@ public:
|
|||
// If we see a new task ID and the old one isn't empty then process the task accumulated so far and make a new task
|
||||
if(taskID.size() != 0 && taskID != lastTaskID) {
|
||||
actions.push_back(performAction(tr, taskBucket, taskFuture, task));
|
||||
task = Reference<Task>(new Task());
|
||||
task = makeReference<Task>();
|
||||
}
|
||||
task->params[key] = s.value;
|
||||
lastTaskID = taskID;
|
||||
|
|
|
@ -135,10 +135,10 @@ namespace PTreeImpl {
|
|||
// and should drop its reference count
|
||||
Reference<PTree<T>> r;
|
||||
if (which)
|
||||
r = Reference<PTree<T>>( new PTree<T>( node->priority, node->data, node->child(0, at), ptr, at ) );
|
||||
else
|
||||
r = Reference<PTree<T>>( new PTree<T>( node->priority, node->data, ptr, node->child(1, at), at ) );
|
||||
node->pointer[2].clear();
|
||||
r = makeReference<PTree<T>>(node->priority, node->data, node->child(0, at), ptr, at);
|
||||
else
|
||||
r = makeReference<PTree<T>>(node->priority, node->data, ptr, node->child(1, at), at);
|
||||
node->pointer[2].clear();
|
||||
return r;
|
||||
} else {
|
||||
if (node->updated)
|
||||
|
@ -150,10 +150,10 @@ namespace PTreeImpl {
|
|||
}
|
||||
if ( node->updated ) {
|
||||
if (which)
|
||||
return Reference<PTree<T>>( new PTree<T>( node->priority, node->data, node->child(0, at), ptr, at ) );
|
||||
else
|
||||
return Reference<PTree<T>>( new PTree<T>( node->priority, node->data, ptr, node->child(1, at), at ) );
|
||||
} else {
|
||||
return makeReference<PTree<T>>(node->priority, node->data, node->child(0, at), ptr, at);
|
||||
else
|
||||
return makeReference<PTree<T>>(node->priority, node->data, ptr, node->child(1, at), at);
|
||||
} else {
|
||||
node->lastUpdateVersion = at;
|
||||
node->replacedPointer = which;
|
||||
node->pointer[2] = ptr;
|
||||
|
@ -269,8 +269,8 @@ namespace PTreeImpl {
|
|||
template<class T>
|
||||
void insert(Reference<PTree<T>>& p, Version at, const T& x) {
|
||||
if (!p){
|
||||
p = Reference<PTree<T>>(new PTree<T>(x, at));
|
||||
} else {
|
||||
p = makeReference<PTree<T>>(x, at);
|
||||
} else {
|
||||
bool direction = !(x < p->data);
|
||||
Reference<PTree<T>> child = p->child(direction, at);
|
||||
insert(child, at, x);
|
||||
|
@ -425,8 +425,8 @@ namespace PTreeImpl {
|
|||
if (!left) return right;
|
||||
if (!right) return left;
|
||||
|
||||
Reference<PTree<T>> r = Reference<PTree<T>>(new PTree<T>(lastNode(left, at)->data, at));
|
||||
if (EXPENSIVE_VALIDATION) {
|
||||
Reference<PTree<T>> r = makeReference<PTree<T>>(lastNode(left, at)->data, at);
|
||||
if (EXPENSIVE_VALIDATION) {
|
||||
ASSERT( r->data < firstNode(right, at)->data);
|
||||
}
|
||||
Reference<PTree<T>> a = left;
|
||||
|
|
|
@ -628,7 +628,7 @@ private:
|
|||
bool end_conflict = it.is_conflict_range();
|
||||
bool end_unreadable = it.is_unreadable();
|
||||
|
||||
TEST( it.is_conflict_range() != lastConflicted );
|
||||
TEST( it.is_conflict_range() != lastConflicted ); // not last conflicted
|
||||
|
||||
it.tree.clear();
|
||||
|
||||
|
|
|
@ -20,8 +20,10 @@ endif()
|
|||
# Create a local sandbox for quick manual testing without simulator
|
||||
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/sandbox/data)
|
||||
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/sandbox/logs)
|
||||
configure_file(${CMAKE_SOURCE_DIR}/cmake/Sandbox.conf.cmake
|
||||
${CMAKE_BINARY_DIR}/sandbox/foundationdb.conf)
|
||||
if(NOT EXISTS ${CMAKE_BINARY_DIR}/sandbox/foundationdb.conf)
|
||||
configure_file(${CMAKE_SOURCE_DIR}/cmake/Sandbox.conf.cmake
|
||||
${CMAKE_BINARY_DIR}/sandbox/foundationdb.conf)
|
||||
endif()
|
||||
|
||||
# this is not portable on Windows - but fdbmonitor isn't built there anyways...
|
||||
add_custom_target(clean_sandbox
|
||||
|
|
|
@ -60,7 +60,8 @@ Future<Reference<IAsyncFile>> AsyncFileCached::open_impl( std::string filename,
|
|||
if(cacheItr == simulatorPageCaches.end()) {
|
||||
int64_t pageCacheSize4k = (BUGGIFY) ? FLOW_KNOBS->BUGGIFY_SIM_PAGE_CACHE_4K : FLOW_KNOBS->SIM_PAGE_CACHE_4K;
|
||||
int64_t pageCacheSize64k = (BUGGIFY) ? FLOW_KNOBS->BUGGIFY_SIM_PAGE_CACHE_64K : FLOW_KNOBS->SIM_PAGE_CACHE_64K;
|
||||
auto caches = std::make_pair(Reference<EvictablePageCache>(new EvictablePageCache(4096, pageCacheSize4k)), Reference<EvictablePageCache>(new EvictablePageCache(65536, pageCacheSize64k)));
|
||||
auto caches = std::make_pair(makeReference<EvictablePageCache>(4096, pageCacheSize4k),
|
||||
makeReference<EvictablePageCache>(65536, pageCacheSize64k));
|
||||
simulatorPageCaches[g_network->getLocalAddress()] = caches;
|
||||
pageCache = (flags & IAsyncFile::OPEN_LARGE_PAGES) ? caches.second : caches.first;
|
||||
}
|
||||
|
@ -69,10 +70,10 @@ Future<Reference<IAsyncFile>> AsyncFileCached::open_impl( std::string filename,
|
|||
}
|
||||
else {
|
||||
if(flags & IAsyncFile::OPEN_LARGE_PAGES) {
|
||||
if(!pc64k.present()) pc64k = Reference<EvictablePageCache>(new EvictablePageCache(65536, FLOW_KNOBS->PAGE_CACHE_64K));
|
||||
if (!pc64k.present()) pc64k = makeReference<EvictablePageCache>(65536, FLOW_KNOBS->PAGE_CACHE_64K);
|
||||
pageCache = pc64k.get();
|
||||
} else {
|
||||
if(!pc4k.present()) pc4k = Reference<EvictablePageCache>(new EvictablePageCache(4096, FLOW_KNOBS->PAGE_CACHE_4K));
|
||||
if (!pc4k.present()) pc4k = makeReference<EvictablePageCache>(4096, FLOW_KNOBS->PAGE_CACHE_4K);
|
||||
pageCache = pc4k.get();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -172,11 +172,11 @@ public:
|
|||
|
||||
static Future<Void> async_fdatasync( int fd ) {
|
||||
// Used by AsyncFileKAIO, since kernel AIO doesn't really implement fdatasync yet
|
||||
return sync_impl( fd, Reference<ErrorInfo>(new ErrorInfo) );
|
||||
return sync_impl(fd, makeReference<ErrorInfo>());
|
||||
}
|
||||
static Future<Void> async_fsync( int fd ) {
|
||||
// Used by AsyncFileKAIO, since kernel AIO doesn't really implement fsync yet
|
||||
return sync_impl( fd, Reference<ErrorInfo>(new ErrorInfo), true );
|
||||
return sync_impl(fd, makeReference<ErrorInfo>(), true);
|
||||
}
|
||||
ACTOR static Future<Void> waitAndAtomicRename( Future<Void> fsync, std::string part_filename, std::string final_filename ) {
|
||||
// First wait for the data in the part file to be durable
|
||||
|
|
|
@ -74,9 +74,7 @@ public:
|
|||
when( wait(success( g_simulator.getCurrentProcess()->shutdownSignal.getFuture() )) ) {
|
||||
throw io_error().asInjectedFault();
|
||||
}
|
||||
when( Reference<IAsyncFile> f = wait( wrappedFile ) ) {
|
||||
return Reference<AsyncFileDetachable>( new AsyncFileDetachable(f) );
|
||||
}
|
||||
when(Reference<IAsyncFile> f = wait(wrappedFile)) { return makeReference<AsyncFileDetachable>(f); }
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -250,6 +250,7 @@ struct YieldMockNetwork final : INetwork, ReferenceCounted<YieldMockNetwork> {
|
|||
void setCurrentTask(TaskPriority taskID) override { baseNetwork->setCurrentTask(taskID); }
|
||||
double now() const override { return baseNetwork->now(); }
|
||||
double timer() override { return baseNetwork->timer(); }
|
||||
double timer_monotonic() override { return baseNetwork->timer_monotonic(); }
|
||||
void stop() override { return baseNetwork->stop(); }
|
||||
void addStopCallback(std::function<void()> fn) override {
|
||||
ASSERT(false);
|
||||
|
@ -686,7 +687,7 @@ TEST_CASE("/flow/flow/yieldedFuture/progress")
|
|||
// Check that if check_yield always returns true, the yieldedFuture will do nothing immediately but will
|
||||
// get one thing done per "tick" (per delay(0) returning).
|
||||
|
||||
Reference<YieldMockNetwork> yn( new YieldMockNetwork );
|
||||
auto yn = makeReference<YieldMockNetwork>();
|
||||
|
||||
yn->nextYield = 0;
|
||||
|
||||
|
@ -721,7 +722,7 @@ TEST_CASE("/flow/flow/yieldedFuture/random")
|
|||
{
|
||||
// Check expectations about exactly how yieldedFuture responds to check_yield results
|
||||
|
||||
Reference<YieldMockNetwork> yn( new YieldMockNetwork );
|
||||
auto yn = makeReference<YieldMockNetwork>();
|
||||
|
||||
for(int r=0; r<100; r++) {
|
||||
Promise<Void> p;
|
||||
|
@ -769,7 +770,7 @@ TEST_CASE("/flow/perf/yieldedFuture")
|
|||
double start;
|
||||
int N = 1000000;
|
||||
|
||||
Reference<YieldMockNetwork> yn( new YieldMockNetwork );
|
||||
auto yn = makeReference<YieldMockNetwork>();
|
||||
|
||||
yn->nextYield = 2*N + 100;
|
||||
|
||||
|
|
|
@ -263,18 +263,36 @@ ACTOR Future<Void> pingLatencyLogger(TransportData* self) {
|
|||
if(!peer) {
|
||||
TraceEvent(SevWarnAlways, "MissingNetworkAddress").suppressFor(10.0).detail("PeerAddr", lastAddress);
|
||||
}
|
||||
if (peer->lastLoggedTime <= 0.0) {
|
||||
peer->lastLoggedTime = peer->lastConnectTime;
|
||||
}
|
||||
|
||||
if(peer && peer->pingLatencies.getPopulationSize() >= 10) {
|
||||
TraceEvent("PingLatency")
|
||||
.detail("PeerAddr", lastAddress)
|
||||
.detail("MinLatency", peer->pingLatencies.min())
|
||||
.detail("MaxLatency", peer->pingLatencies.max())
|
||||
.detail("MeanLatency", peer->pingLatencies.mean())
|
||||
.detail("MedianLatency", peer->pingLatencies.median())
|
||||
.detail("P90Latency", peer->pingLatencies.percentile(0.90))
|
||||
.detail("Count", peer->pingLatencies.getPopulationSize())
|
||||
.detail("BytesReceived", peer->bytesReceived - peer->lastLoggedBytesReceived)
|
||||
.detail("BytesSent", peer->bytesSent - peer->lastLoggedBytesSent);
|
||||
.detail("Elapsed", now() - peer->lastLoggedTime)
|
||||
.detail("PeerAddr", lastAddress)
|
||||
.detail("MinLatency", peer->pingLatencies.min())
|
||||
.detail("MaxLatency", peer->pingLatencies.max())
|
||||
.detail("MeanLatency", peer->pingLatencies.mean())
|
||||
.detail("MedianLatency", peer->pingLatencies.median())
|
||||
.detail("P90Latency", peer->pingLatencies.percentile(0.90))
|
||||
.detail("Count", peer->pingLatencies.getPopulationSize())
|
||||
.detail("BytesReceived", peer->bytesReceived - peer->lastLoggedBytesReceived)
|
||||
.detail("BytesSent", peer->bytesSent - peer->lastLoggedBytesSent)
|
||||
.detail("ConnectOutgoingCount", peer->connectOutgoingCount)
|
||||
.detail("ConnectIncomingCount", peer->connectIncomingCount)
|
||||
.detail("ConnectFailedCount", peer->connectFailedCount)
|
||||
.detail("ConnectMinLatency", peer->connectLatencies.min())
|
||||
.detail("ConnectMaxLatency", peer->connectLatencies.max())
|
||||
.detail("ConnectMeanLatency", peer->connectLatencies.mean())
|
||||
.detail("ConnectMedianLatency", peer->connectLatencies.median())
|
||||
.detail("ConnectP90Latency", peer->connectLatencies.percentile(0.90));
|
||||
peer->lastLoggedTime = now();
|
||||
peer->connectOutgoingCount = 0;
|
||||
peer->connectIncomingCount = 0;
|
||||
peer->connectFailedCount = 0;
|
||||
peer->pingLatencies.clear();
|
||||
peer->connectLatencies.clear();
|
||||
peer->lastLoggedBytesReceived = peer->bytesReceived;
|
||||
peer->lastLoggedBytesSent = peer->bytesSent;
|
||||
wait(delay(FLOW_KNOBS->PING_LOGGING_INTERVAL));
|
||||
|
@ -296,7 +314,7 @@ TransportData::TransportData(uint64_t transportId)
|
|||
transportId(transportId),
|
||||
numIncompatibleConnections(0)
|
||||
{
|
||||
degraded = Reference<AsyncVar<bool>>( new AsyncVar<bool>(false) );
|
||||
degraded = makeReference<AsyncVar<bool>>(false);
|
||||
pingLogger = pingLatencyLogger(this);
|
||||
}
|
||||
|
||||
|
@ -571,6 +589,7 @@ ACTOR Future<Void> connectionKeeper( Reference<Peer> self,
|
|||
.detail("FailureStatus", IFailureMonitor::failureMonitor().getState(self->destination).isAvailable()
|
||||
? "OK"
|
||||
: "FAILED");
|
||||
++self->connectOutgoingCount;
|
||||
|
||||
try {
|
||||
choose {
|
||||
|
@ -578,6 +597,10 @@ ACTOR Future<Void> connectionKeeper( Reference<Peer> self,
|
|||
wait(INetworkConnections::net()->connect(self->destination))) {
|
||||
conn = _conn;
|
||||
wait(conn->connectHandshake());
|
||||
self->connectLatencies.addSample(now() - self->lastConnectTime);
|
||||
if (FlowTransport::isClient()) {
|
||||
IFailureMonitor::failureMonitor().setStatus(self->destination, FailureStatus(false));
|
||||
}
|
||||
if (self->unsent.empty()) {
|
||||
delayedHealthUpdateF = delayedHealthUpdate(self->destination);
|
||||
choose {
|
||||
|
@ -601,8 +624,9 @@ ACTOR Future<Void> connectionKeeper( Reference<Peer> self,
|
|||
throw connection_failed();
|
||||
}
|
||||
}
|
||||
} catch (Error& e) {
|
||||
if (e.code() != error_code_connection_failed) {
|
||||
} catch(Error &e) {
|
||||
++self->connectFailedCount;
|
||||
if(e.code() != error_code_connection_failed) {
|
||||
throw;
|
||||
}
|
||||
TraceEvent("ConnectionTimedOut", conn ? conn->getDebugID() : UID())
|
||||
|
@ -732,7 +756,8 @@ Peer::Peer(TransportData* transport, NetworkAddress const& destination)
|
|||
reconnectionDelay(FLOW_KNOBS->INITIAL_RECONNECTION_TIME), compatible(true), outstandingReplies(0),
|
||||
incompatibleProtocolVersionNewer(false), peerReferences(-1), bytesReceived(0), lastDataPacketSentTime(now()),
|
||||
pingLatencies(destination.isPublic() ? FLOW_KNOBS->PING_SAMPLE_AMOUNT : 1), lastLoggedBytesReceived(0),
|
||||
bytesSent(0), lastLoggedBytesSent(0) {
|
||||
bytesSent(0), lastLoggedBytesSent(0), lastLoggedTime(0.0), connectOutgoingCount(0), connectIncomingCount(0),
|
||||
connectFailedCount(0), connectLatencies(destination.isPublic() ? FLOW_KNOBS->NETWORK_CONNECT_SAMPLE_AMOUNT : 1) {
|
||||
IFailureMonitor::failureMonitor().setStatus(destination, FailureStatus(false));
|
||||
}
|
||||
|
||||
|
@ -783,6 +808,7 @@ void Peer::discardUnreliablePackets() {
|
|||
void Peer::onIncomingConnection( Reference<Peer> self, Reference<IConnection> conn, Future<Void> reader ) {
|
||||
// In case two processes are trying to connect to each other simultaneously, the process with the larger canonical NetworkAddress
|
||||
// gets to keep its outgoing connection.
|
||||
++self->connectIncomingCount;
|
||||
if ( !destination.isPublic() && !outgoingConnectionIdle ) throw address_in_use();
|
||||
NetworkAddress compatibleAddr = transport->localAddresses.address;
|
||||
if(transport->localAddresses.secondaryAddress.present() && transport->localAddresses.secondaryAddress.get().isTLS() == destination.isTLS()) {
|
||||
|
@ -1217,7 +1243,7 @@ Reference<Peer> TransportData::getPeer( NetworkAddress const& address ) {
|
|||
Reference<Peer> TransportData::getOrOpenPeer( NetworkAddress const& address, bool startConnectionKeeper ) {
|
||||
auto peer = getPeer(address);
|
||||
if(!peer) {
|
||||
peer = Reference<Peer>( new Peer(this, address) );
|
||||
peer = makeReference<Peer>(this, address);
|
||||
if(startConnectionKeeper && !isLocalAddress(address)) {
|
||||
peer->connect = connectionKeeper(peer);
|
||||
}
|
||||
|
|
|
@ -156,8 +156,14 @@ struct Peer : public ReferenceCounted<Peer> {
|
|||
double lastDataPacketSentTime;
|
||||
int outstandingReplies;
|
||||
ContinuousSample<double> pingLatencies;
|
||||
double lastLoggedTime;
|
||||
int64_t lastLoggedBytesReceived;
|
||||
int64_t lastLoggedBytesSent;
|
||||
// Cleared every time stats are logged for this peer.
|
||||
int connectOutgoingCount;
|
||||
int connectIncomingCount;
|
||||
int connectFailedCount;
|
||||
ContinuousSample<double> connectLatencies;
|
||||
|
||||
explicit Peer(TransportData* transport, NetworkAddress const& destination);
|
||||
|
||||
|
|
|
@ -184,7 +184,7 @@ public:
|
|||
localitySet = itKeyValue->_resultset;
|
||||
}
|
||||
else {
|
||||
localitySet = Reference<LocalitySet>(new LocalitySet(*_localitygroup));
|
||||
localitySet = makeReference<LocalitySet>(*_localitygroup);
|
||||
_cachemisses ++;
|
||||
// If the key is not within the current key set, skip it because no items within
|
||||
// the current entry array has the key
|
||||
|
@ -213,7 +213,7 @@ public:
|
|||
|
||||
// This function is used to create an subset containing the specified entries
|
||||
Reference<LocalitySet> restrict(std::vector<LocalityEntry> const& entryArray) {
|
||||
Reference<LocalitySet> localitySet(new LocalitySet(*_localitygroup));
|
||||
auto localitySet = makeReference<LocalitySet>(*_localitygroup);
|
||||
for (auto& entry : entryArray) {
|
||||
localitySet->add(getRecordViaEntry(entry), *this);
|
||||
}
|
||||
|
@ -509,7 +509,7 @@ struct LocalityGroup : public LocalitySet {
|
|||
|
||||
LocalityEntry const& add(LocalityData const& data) {
|
||||
// _recordArray.size() is the new entry index for the new data
|
||||
Reference<LocalityRecord> record(new LocalityRecord(convertToAttribMap(data), _recordArray.size()));
|
||||
auto record = makeReference<LocalityRecord>(convertToAttribMap(data), _recordArray.size());
|
||||
_recordArray.push_back(record);
|
||||
return LocalitySet::add(record, *this);
|
||||
}
|
||||
|
@ -552,7 +552,7 @@ struct LocalityGroup : public LocalitySet {
|
|||
|
||||
// Convert locality data to sorted vector of int pairs
|
||||
Reference<KeyValueMap> convertToAttribMap(LocalityData const& data) {
|
||||
Reference<KeyValueMap> attribHashMap(new KeyValueMap);
|
||||
auto attribHashMap = makeReference<KeyValueMap>();
|
||||
for (auto& dataPair : data._data) {
|
||||
auto indexKey = keyIndex(dataPair.first);
|
||||
auto indexValue = valueIndex(dataPair.second);
|
||||
|
|
|
@ -45,7 +45,7 @@ double Counter::getRate() const {
|
|||
}
|
||||
|
||||
double Counter::getRoughness() const {
|
||||
double elapsed = now() - roughness_interval_start;
|
||||
double elapsed = last_event - roughness_interval_start;
|
||||
if(elapsed == 0) {
|
||||
return -1;
|
||||
}
|
||||
|
@ -87,7 +87,9 @@ void CounterCollection::logToTraceEvent(TraceEvent &te) const {
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> traceCounters(std::string traceEventName, UID traceEventID, double interval, CounterCollection* counters, std::string trackLatestName) {
|
||||
ACTOR Future<Void> traceCounters(std::string traceEventName, UID traceEventID, double interval,
|
||||
CounterCollection* counters, std::string trackLatestName,
|
||||
std::function<void(TraceEvent&)> decorator) {
|
||||
wait(delay(0)); // Give an opportunity for all members used in special counters to be initialized
|
||||
|
||||
for (ICounter* c : counters->counters)
|
||||
|
@ -100,6 +102,7 @@ ACTOR Future<Void> traceCounters(std::string traceEventName, UID traceEventID, d
|
|||
te.detail("Elapsed", now() - last_interval);
|
||||
|
||||
counters->logToTraceEvent(te);
|
||||
decorator(te);
|
||||
|
||||
if (!trackLatestName.empty()) {
|
||||
te.trackLatest(trackLatestName);
|
||||
|
|
|
@ -146,7 +146,9 @@ struct SpecialCounter final : ICounter, FastAllocated<SpecialCounter<F>>, NonCop
|
|||
template <class F>
|
||||
static void specialCounter(CounterCollection& collection, std::string const& name, F && f) { new SpecialCounter<F>(collection, name, std::move(f)); }
|
||||
|
||||
Future<Void> traceCounters(std::string const& traceEventName, UID const& traceEventID, double const& interval, CounterCollection* const& counters, std::string const& trackLatestName = std::string());
|
||||
Future<Void> traceCounters(std::string const& traceEventName, UID const& traceEventID, double const& interval,
|
||||
CounterCollection* const& counters, std::string const& trackLatestName = std::string(),
|
||||
std::function<void(TraceEvent&)> const& decorator = [](TraceEvent& te) {});
|
||||
|
||||
class LatencyBands {
|
||||
public:
|
||||
|
|
|
@ -63,7 +63,7 @@ Future<REPLY_TYPE(Req)> retryBrokenPromise( RequestStream<Req> to, Req request,
|
|||
throw;
|
||||
resetReply( request );
|
||||
wait( delayJittered(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY, taskID) );
|
||||
TEST(true); // retryBrokenPromise
|
||||
TEST(true); // retryBrokenPromise with taskID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,13 @@
|
|||
*/
|
||||
|
||||
#include <cinttypes>
|
||||
#include <deque>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "fdbrpc/simulator.h"
|
||||
#include "flow/ActorCollection.h"
|
||||
#include "flow/IRandom.h"
|
||||
#include "flow/IThreadPool.h"
|
||||
#include "flow/ProtocolVersion.h"
|
||||
#include "flow/Util.h"
|
||||
|
@ -30,6 +35,8 @@
|
|||
#include "flow/crc32c.h"
|
||||
#include "fdbrpc/TraceFileIO.h"
|
||||
#include "flow/FaultInjection.h"
|
||||
#include "flow/flow.h"
|
||||
#include "flow/genericactors.actor.h"
|
||||
#include "flow/network.h"
|
||||
#include "flow/TLSConfig.actor.h"
|
||||
#include "fdbrpc/Net2FileSystem.h"
|
||||
|
@ -86,8 +93,6 @@ void ISimulator::displayWorkers() const
|
|||
return;
|
||||
}
|
||||
|
||||
ISimulator* g_pSimulator = 0;
|
||||
thread_local ISimulator::ProcessInfo* ISimulator::currentProcess = 0;
|
||||
int openCount = 0;
|
||||
|
||||
struct SimClogging {
|
||||
|
@ -425,8 +430,10 @@ public:
|
|||
|
||||
static bool should_poll() { return false; }
|
||||
|
||||
ACTOR static Future<Reference<IAsyncFile>> open( std::string filename, int flags, int mode,
|
||||
Reference<DiskParameters> diskParameters = Reference<DiskParameters>(new DiskParameters(25000, 150000000)), bool delayOnWrite = true ) {
|
||||
ACTOR static Future<Reference<IAsyncFile>> open(
|
||||
std::string filename, int flags, int mode,
|
||||
Reference<DiskParameters> diskParameters = makeReference<DiskParameters>(25000, 150000000),
|
||||
bool delayOnWrite = true) {
|
||||
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
|
||||
state TaskPriority currentTaskID = g_network->getCurrentTask();
|
||||
|
||||
|
@ -553,8 +560,8 @@ private:
|
|||
|
||||
debugFileCheck("SimpleFileRead", self->filename, data, offset, length);
|
||||
|
||||
INJECT_FAULT(io_timeout, "SimpleFile::read");
|
||||
INJECT_FAULT(io_error, "SimpleFile::read");
|
||||
INJECT_FAULT(io_timeout, "SimpleFile::read"); // SimpleFile::read io_timeout injected
|
||||
INJECT_FAULT(io_error, "SimpleFile::read"); // SimpleFile::read io_error injected
|
||||
|
||||
return read_bytes;
|
||||
}
|
||||
|
@ -591,8 +598,8 @@ private:
|
|||
|
||||
debugFileCheck("SimpleFileWrite", self->filename, (void*)data.begin(), offset, data.size());
|
||||
|
||||
INJECT_FAULT(io_timeout, "SimpleFile::write");
|
||||
INJECT_FAULT(io_error, "SimpleFile::write");
|
||||
INJECT_FAULT(io_timeout, "SimpleFile::write"); // SimpleFile::write inject io_timeout
|
||||
INJECT_FAULT(io_error, "SimpleFile::write"); // SimpleFile::write inject io_error
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
@ -618,8 +625,8 @@ private:
|
|||
if (randLog)
|
||||
fprintf( randLog, "SFT2 %s %s %s\n", self->dbgId.shortString().c_str(), self->filename.c_str(), opId.shortString().c_str());
|
||||
|
||||
INJECT_FAULT( io_timeout, "SimpleFile::truncate" );
|
||||
INJECT_FAULT( io_error, "SimpleFile::truncate" );
|
||||
INJECT_FAULT( io_timeout, "SimpleFile::truncate" ); // SimpleFile::truncate inject io_timeout
|
||||
INJECT_FAULT( io_error, "SimpleFile::truncate" ); // SimpleFile::truncate inject io_error
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
@ -651,8 +658,8 @@ private:
|
|||
if (randLog)
|
||||
fprintf( randLog, "SFC2 %s %s %s\n", self->dbgId.shortString().c_str(), self->filename.c_str(), opId.shortString().c_str());
|
||||
|
||||
INJECT_FAULT( io_timeout, "SimpleFile::sync" );
|
||||
INJECT_FAULT( io_error, "SimpleFile::sync" );
|
||||
INJECT_FAULT( io_timeout, "SimpleFile::sync" ); // SimpleFile::sync inject io_timeout
|
||||
INJECT_FAULT( io_error, "SimpleFile::sync" ); // SimpleFile::sync inject io_errot
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
@ -672,7 +679,7 @@ private:
|
|||
|
||||
if (randLog)
|
||||
fprintf(randLog, "SFS2 %s %s %s %" PRId64 "\n", self->dbgId.shortString().c_str(), self->filename.c_str(), opId.shortString().c_str(), pos);
|
||||
INJECT_FAULT( io_error, "SimpleFile::size" );
|
||||
INJECT_FAULT( io_error, "SimpleFile::size" ); // SimpleFile::size inject io_error
|
||||
|
||||
return pos;
|
||||
}
|
||||
|
@ -739,6 +746,8 @@ public:
|
|||
return timerTime;
|
||||
}
|
||||
|
||||
double timer_monotonic() override { return timer(); }
|
||||
|
||||
Future<class Void> delay(double seconds, TaskPriority taskID) override {
|
||||
ASSERT(taskID >= TaskPriority::Min && taskID <= TaskPriority::Max);
|
||||
return delay( seconds, taskID, currentProcess );
|
||||
|
@ -792,8 +801,8 @@ public:
|
|||
return waitForProcessAndConnect( toAddr, this );
|
||||
}
|
||||
auto peerp = getProcessByAddress(toAddr);
|
||||
Reference<Sim2Conn> myc( new Sim2Conn( getCurrentProcess() ) );
|
||||
Reference<Sim2Conn> peerc( new Sim2Conn( peerp ) );
|
||||
auto myc = makeReference<Sim2Conn>(getCurrentProcess());
|
||||
auto peerc = makeReference<Sim2Conn>(peerp);
|
||||
|
||||
myc->connect(peerc, toAddr);
|
||||
IPAddress localIp;
|
||||
|
@ -810,7 +819,11 @@ public:
|
|||
((Sim2Listener*)peerp->getListener(toAddr).getPtr())->incomingConnection( 0.5*deterministicRandom()->random01(), Reference<IConnection>(peerc) );
|
||||
return onConnect( ::delay(0.5*deterministicRandom()->random01()), myc );
|
||||
}
|
||||
Future<std::vector<NetworkAddress>> resolveTCPEndpoint(std::string host, std::string service) override {
|
||||
|
||||
Future<Reference<IUDPSocket>> createUDPSocket(NetworkAddress toAddr) override;
|
||||
Future<Reference<IUDPSocket>> createUDPSocket(bool isV6 = false) override;
|
||||
|
||||
Future<std::vector<NetworkAddress>> resolveTCPEndpoint(std::string host, std::string service) override {
|
||||
throw lookup_failed();
|
||||
}
|
||||
ACTOR static Future<Reference<IConnection>> onConnect( Future<Void> ready, Reference<Sim2Conn> conn ) {
|
||||
|
@ -1434,7 +1447,7 @@ public:
|
|||
|
||||
// Check if any processes on machine are rebooting
|
||||
if ( processesOnMachine != processesPerMachine ) {
|
||||
TEST(true); //Attempted reboot, but the target did not have all of its processes running
|
||||
TEST(true); //Attempted reboot and kill, but the target did not have all of its processes running
|
||||
TraceEvent(SevWarn, "AbortedKill").detail("KillType", kt).detail("MachineId", machineId).detail("Reason", "Machine processes does not match number of processes per machine").detail("Processes", processesOnMachine).detail("ProcessesPerMachine", processesPerMachine).backtrace();
|
||||
if (ktFinal) *ktFinal = None;
|
||||
return false;
|
||||
|
@ -1545,12 +1558,12 @@ public:
|
|||
.detail("KilledDC", kt==ktMin);
|
||||
|
||||
TEST(kt != ktMin); // DataCenter kill was rejected by killMachine
|
||||
TEST((kt==ktMin) && (kt == RebootAndDelete)); // Resulted in a reboot and delete
|
||||
TEST((kt==ktMin) && (kt == Reboot)); // Resulted in a reboot
|
||||
TEST((kt==ktMin) && (kt == KillInstantly)); // Resulted in an instant kill
|
||||
TEST((kt==ktMin) && (kt == InjectFaults)); // Resulted in a kill by injecting faults
|
||||
TEST((kt==ktMin) && (kt != ktOrig)); // Kill request was downgraded
|
||||
TEST((kt==ktMin) && (kt == ktOrig)); // Requested kill was done
|
||||
TEST((kt==ktMin) && (kt == RebootAndDelete)); // Datacenter kill Resulted in a reboot and delete
|
||||
TEST((kt==ktMin) && (kt == Reboot)); // Datacenter kill Resulted in a reboot
|
||||
TEST((kt==ktMin) && (kt == KillInstantly)); // Datacenter kill Resulted in an instant kill
|
||||
TEST((kt==ktMin) && (kt == InjectFaults)); // Datacenter kill Resulted in a kill by injecting faults
|
||||
TEST((kt==ktMin) && (kt != ktOrig)); // Datacenter Kill request was downgraded
|
||||
TEST((kt==ktMin) && (kt == ktOrig)); // Datacenter kill - Requested kill was done
|
||||
|
||||
if (ktFinal) *ktFinal = ktMin;
|
||||
|
||||
|
@ -1729,6 +1742,202 @@ public:
|
|||
int yield_limit; // how many more times yield may return false before next returning true
|
||||
};
|
||||
|
||||
class UDPSimSocket : public IUDPSocket, ReferenceCounted<UDPSimSocket> {
|
||||
using Packet = std::shared_ptr<std::vector<uint8_t>>;
|
||||
UID id;
|
||||
ISimulator::ProcessInfo* process;
|
||||
Optional<NetworkAddress> peerAddress;
|
||||
Optional<ISimulator::ProcessInfo*> peerProcess;
|
||||
Optional<Reference<UDPSimSocket>> peerSocket;
|
||||
ActorCollection actors;
|
||||
Promise<Void> closed;
|
||||
std::deque<std::pair<NetworkAddress, Packet>> recvBuffer;
|
||||
AsyncVar<int64_t> writtenPackets;
|
||||
NetworkAddress _localAddress;
|
||||
bool randomDropPacket() {
|
||||
auto res = deterministicRandom()->random01() < .000001;
|
||||
TEST(res); // UDP packet drop
|
||||
return res;
|
||||
}
|
||||
|
||||
bool isClosed() const { return closed.getFuture().isReady(); }
|
||||
Future<Void> onClosed() const { return closed.getFuture(); }
|
||||
|
||||
ACTOR static Future<Void> cleanupPeerSocket(UDPSimSocket* self) {
|
||||
wait(self->peerSocket.get()->onClosed());
|
||||
self->peerSocket.reset();
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> send(UDPSimSocket* self, Reference<UDPSimSocket> peerSocket, uint8_t const* begin,
|
||||
uint8_t const* end) {
|
||||
state Packet packet(std::make_shared<std::vector<uint8_t>>());
|
||||
packet->resize(end - begin);
|
||||
std::copy(begin, end, packet->begin());
|
||||
wait( delay( .002 * deterministicRandom()->random01() ) );
|
||||
peerSocket->recvBuffer.emplace_back(self->_localAddress, std::move(packet));
|
||||
peerSocket->writtenPackets.set(peerSocket->writtenPackets.get() + 1);
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<int> receiveFrom(UDPSimSocket* self, uint8_t* begin, uint8_t* end, NetworkAddress* sender) {
|
||||
state TaskPriority currentTaskID = g_sim2.getCurrentTask();
|
||||
wait(self->writtenPackets.onChange());
|
||||
wait(g_sim2.onProcess(self->process, currentTaskID));
|
||||
auto packet = self->recvBuffer.front().second;
|
||||
int sz = packet->size();
|
||||
ASSERT(sz <= end - begin);
|
||||
if (sender) {
|
||||
*sender = self->recvBuffer.front().first;
|
||||
}
|
||||
std::copy(packet->begin(), packet->end(), begin);
|
||||
self->recvBuffer.pop_front();
|
||||
return sz;
|
||||
}
|
||||
|
||||
public:
|
||||
UDPSimSocket(NetworkAddress const& localAddress, Optional<NetworkAddress> const& peerAddress)
|
||||
: id(deterministicRandom()->randomUniqueID()), process(g_simulator.getCurrentProcess()), peerAddress(peerAddress),
|
||||
actors(false), _localAddress(localAddress) {
|
||||
g_sim2.addressMap.emplace(_localAddress, process);
|
||||
process->boundUDPSockets.emplace(localAddress, this);
|
||||
}
|
||||
~UDPSimSocket() {
|
||||
if (!closed.getFuture().isReady()) {
|
||||
close();
|
||||
closed.send(Void());
|
||||
}
|
||||
actors.clear(true);
|
||||
}
|
||||
void close() override {
|
||||
process->boundUDPSockets.erase(_localAddress);
|
||||
g_sim2.addressMap.erase(_localAddress);
|
||||
}
|
||||
UID getDebugID() const override { return id; }
|
||||
void addref() override { ReferenceCounted<UDPSimSocket>::addref(); }
|
||||
void delref() override { ReferenceCounted<UDPSimSocket>::delref(); }
|
||||
|
||||
Future<int> send(uint8_t const* begin, uint8_t const* end) override {
|
||||
int sz = int(end - begin);
|
||||
auto res = fmap([sz](Void){ return sz; }, delay(0.0));
|
||||
ASSERT(sz <= IUDPSocket::MAX_PACKET_SIZE);
|
||||
ASSERT(peerAddress.present());
|
||||
if (!peerProcess.present()) {
|
||||
auto iter = g_sim2.addressMap.find(peerAddress.get());
|
||||
if (iter == g_sim2.addressMap.end()) {
|
||||
return res;
|
||||
}
|
||||
peerProcess = iter->second;
|
||||
}
|
||||
if (!peerSocket.present() || peerSocket.get()->isClosed()) {
|
||||
peerSocket.reset();
|
||||
auto iter = peerProcess.get()->boundUDPSockets.find(peerAddress.get());
|
||||
if (iter == peerProcess.get()->boundUDPSockets.end()) {
|
||||
return fmap([sz](Void){ return sz; }, delay(0.0));
|
||||
}
|
||||
peerSocket = iter->second.castTo<UDPSimSocket>();
|
||||
// the notation of leaking connections doesn't make much sense in the context of UDP
|
||||
// so we simply handle those in the simulator
|
||||
actors.add(cleanupPeerSocket(this));
|
||||
}
|
||||
if (randomDropPacket()) {
|
||||
return res;
|
||||
}
|
||||
actors.add(send(this, peerSocket.get(), begin, end));
|
||||
return res;
|
||||
}
|
||||
Future<int> sendTo(uint8_t const* begin, uint8_t const* end, NetworkAddress const& peer) override {
|
||||
int sz = int(end - begin);
|
||||
auto res = fmap([sz](Void){ return sz; }, delay(0.0));
|
||||
ASSERT(sz <= MAX_PACKET_SIZE);
|
||||
ISimulator::ProcessInfo* peerProcess = nullptr;
|
||||
Reference<UDPSimSocket> peerSocket;
|
||||
{
|
||||
auto iter = g_sim2.addressMap.find(peer);
|
||||
if (iter == g_sim2.addressMap.end()) {
|
||||
return res;
|
||||
}
|
||||
peerProcess = iter->second;
|
||||
}
|
||||
{
|
||||
auto iter = peerProcess->boundUDPSockets.find(peer);
|
||||
if (iter == peerProcess->boundUDPSockets.end()) {
|
||||
return res;
|
||||
}
|
||||
peerSocket = iter->second.castTo<UDPSimSocket>();
|
||||
}
|
||||
actors.add(send(this, peerSocket, begin, end));
|
||||
return res;
|
||||
}
|
||||
Future<int> receive(uint8_t* begin, uint8_t* end) override {
|
||||
return receiveFrom(begin, end, nullptr);
|
||||
}
|
||||
Future<int> receiveFrom(uint8_t* begin, uint8_t* end, NetworkAddress* sender) override {
|
||||
if (!recvBuffer.empty()) {
|
||||
auto buf = recvBuffer.front().second;
|
||||
if (sender) {
|
||||
*sender = recvBuffer.front().first;
|
||||
}
|
||||
int sz = buf->size();
|
||||
ASSERT(sz <= end - begin);
|
||||
std::copy(buf->begin(), buf->end(), begin);
|
||||
auto res = fmap([sz](Void){ return sz; }, delay(0.0));
|
||||
recvBuffer.pop_front();
|
||||
return res;
|
||||
}
|
||||
return receiveFrom(this, begin, end, sender);
|
||||
}
|
||||
void bind(NetworkAddress const& addr) override {
|
||||
g_sim2.addressMap.erase(_localAddress);
|
||||
process->boundUDPSockets.erase(_localAddress);
|
||||
process->boundUDPSockets.emplace(addr, Reference<UDPSimSocket>::addRef(this));
|
||||
_localAddress = addr;
|
||||
g_sim2.addressMap.emplace(_localAddress, process);
|
||||
}
|
||||
|
||||
NetworkAddress localAddress() const override {
|
||||
return _localAddress;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
Future<Reference<IUDPSocket>> Sim2::createUDPSocket(NetworkAddress toAddr) {
|
||||
NetworkAddress localAddress;
|
||||
auto process = g_simulator.getCurrentProcess();
|
||||
if (process->address.ip.isV6()) {
|
||||
IPAddress::IPAddressStore store = process->address.ip.toV6();
|
||||
uint16_t* ipParts = (uint16_t*)store.data();
|
||||
ipParts[7] += deterministicRandom()->randomInt(0, 256);
|
||||
localAddress.ip = IPAddress(store);
|
||||
} else {
|
||||
localAddress.ip = IPAddress(process->address.ip.toV4() + deterministicRandom()->randomInt(0, 256));
|
||||
}
|
||||
localAddress.port = deterministicRandom()->randomInt(40000, 60000);
|
||||
return Reference<IUDPSocket>(new UDPSimSocket(localAddress, toAddr));
|
||||
}
|
||||
|
||||
Future<Reference<IUDPSocket>> Sim2::createUDPSocket(bool isV6) {
|
||||
NetworkAddress localAddress;
|
||||
auto process = g_simulator.getCurrentProcess();
|
||||
if (process->address.ip.isV6() == isV6) {
|
||||
localAddress = process->address;
|
||||
} else {
|
||||
ASSERT(process->addresses.secondaryAddress.present() &&
|
||||
process->addresses.secondaryAddress.get().isV6() == isV6);
|
||||
localAddress = process->addresses.secondaryAddress.get();
|
||||
}
|
||||
if (localAddress.ip.isV6()) {
|
||||
IPAddress::IPAddressStore store = localAddress.ip.toV6();
|
||||
uint16_t* ipParts = (uint16_t*)store.data();
|
||||
ipParts[7] += deterministicRandom()->randomInt(0, 256);
|
||||
localAddress.ip = IPAddress(store);
|
||||
} else {
|
||||
localAddress.ip = IPAddress(localAddress.ip.toV4() + deterministicRandom()->randomInt(0, 256));
|
||||
}
|
||||
localAddress.port = deterministicRandom()->randomInt(40000, 60000);
|
||||
return Reference<IUDPSocket>(new UDPSimSocket(localAddress, Optional<NetworkAddress>{}));
|
||||
}
|
||||
|
||||
void startNewSimulator() {
|
||||
ASSERT( !g_network );
|
||||
g_network = g_pSimulator = new Sim2();
|
||||
|
@ -1847,7 +2056,8 @@ Future< Reference<class IAsyncFile> > Sim2FileSystem::open( std::string filename
|
|||
}
|
||||
// Simulated disk parameters are shared by the AsyncFileNonDurable and the underlying SimpleFile.
|
||||
// This way, they can both keep up with the time to start the next operation
|
||||
Reference<DiskParameters> diskParameters(new DiskParameters(FLOW_KNOBS->SIM_DISK_IOPS, FLOW_KNOBS->SIM_DISK_BANDWIDTH));
|
||||
auto diskParameters =
|
||||
makeReference<DiskParameters>(FLOW_KNOBS->SIM_DISK_IOPS, FLOW_KNOBS->SIM_DISK_BANDWIDTH);
|
||||
machineCache[actualFilename] = AsyncFileNonDurable::open(filename, actualFilename, SimpleFile::open(filename, flags, mode, diskParameters, false), diskParameters);
|
||||
}
|
||||
Future<Reference<IAsyncFile>> f = AsyncFileDetachable::open( machineCache[actualFilename] );
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#pragma once
|
||||
|
||||
#include "flow/flow.h"
|
||||
#include "flow/Histogram.h"
|
||||
#include "fdbrpc/FailureMonitor.h"
|
||||
#include "fdbrpc/Locality.h"
|
||||
#include "fdbrpc/IAsyncFile.h"
|
||||
|
@ -55,7 +56,9 @@ public:
|
|||
LocalityData locality;
|
||||
ProcessClass startingClass;
|
||||
TDMetricCollection tdmetrics;
|
||||
HistogramRegistry histograms;
|
||||
std::map<NetworkAddress, Reference<IListener>> listenerMap;
|
||||
std::map<NetworkAddress, Reference<IUDPSocket>> boundUDPSockets;
|
||||
bool failed;
|
||||
bool excluded;
|
||||
bool cleared;
|
||||
|
|
|
@ -32,7 +32,7 @@ Reference<StorageInfo> getStorageInfo(UID id, std::map<UID, Reference<StorageInf
|
|||
Reference<StorageInfo> storageInfo;
|
||||
auto cacheItr = storageCache->find(id);
|
||||
if(cacheItr == storageCache->end()) {
|
||||
storageInfo = Reference<StorageInfo>( new StorageInfo() );
|
||||
storageInfo = makeReference<StorageInfo>();
|
||||
storageInfo->tag = decodeServerTagValue( txnStateStore->readValue( serverTagKeyFor(id) ).get().get() );
|
||||
storageInfo->interf = decodeServerListValue( txnStateStore->readValue( serverListKeyFor(id) ).get().get() );
|
||||
(*storageCache)[id] = storageInfo;
|
||||
|
@ -55,12 +55,12 @@ void applyMetadataMutations(SpanID const& spanContext, UID const& dbgid, Arena&
|
|||
std::map<Tag, Version>* tag_popped, bool initialCommit) {
|
||||
//std::map<keyRef, vector<uint16_t>> cacheRangeInfo;
|
||||
std::map<KeyRef, MutationRef> cachedRangeInfo;
|
||||
if (toCommit) {
|
||||
toCommit->addTransactionInfo(spanContext);
|
||||
}
|
||||
|
||||
for (auto const& m : mutations) {
|
||||
//TraceEvent("MetadataMutation", dbgid).detail("M", m.toString());
|
||||
if (toCommit) {
|
||||
toCommit->addTransactionInfo(spanContext);
|
||||
}
|
||||
|
||||
if (m.param1.size() && m.param1[0] == systemKeys.begin[0] && m.type == MutationRef::SetValue) {
|
||||
if(m.param1.startsWith(keyServersPrefix)) {
|
||||
|
@ -127,7 +127,7 @@ void applyMetadataMutations(SpanID const& spanContext, UID const& dbgid, Arena&
|
|||
if(storageCache) {
|
||||
auto cacheItr = storageCache->find(id);
|
||||
if(cacheItr == storageCache->end()) {
|
||||
Reference<StorageInfo> storageInfo = Reference<StorageInfo>( new StorageInfo() );
|
||||
Reference<StorageInfo> storageInfo = makeReference<StorageInfo>();
|
||||
storageInfo->tag = tag;
|
||||
Optional<Key> interfKey = txnStateStore->readValue( serverListKeyFor(id) ).get();
|
||||
if(interfKey.present()) {
|
||||
|
@ -198,7 +198,7 @@ void applyMetadataMutations(SpanID const& spanContext, UID const& dbgid, Arena&
|
|||
|
||||
auto cacheItr = storageCache->find(id);
|
||||
if(cacheItr == storageCache->end()) {
|
||||
Reference<StorageInfo> storageInfo = Reference<StorageInfo>( new StorageInfo() );
|
||||
Reference<StorageInfo> storageInfo = makeReference<StorageInfo>();
|
||||
storageInfo->interf = interf;
|
||||
Optional<Key> tagKey = txnStateStore->readValue( serverTagKeyFor(id) ).get();
|
||||
if(tagKey.present()) {
|
||||
|
@ -221,7 +221,7 @@ void applyMetadataMutations(SpanID const& spanContext, UID const& dbgid, Arena&
|
|||
auto &p = (*uid_applyMutationsData)[uid];
|
||||
p.endVersion = BinaryReader::fromStringRef<Version>(m.param2, Unversioned());
|
||||
if(p.keyVersion == Reference<KeyRangeMap<Version>>())
|
||||
p.keyVersion = Reference<KeyRangeMap<Version>>( new KeyRangeMap<Version>() );
|
||||
p.keyVersion = makeReference<KeyRangeMap<Version>>();
|
||||
if(!p.worker.isValid() || p.worker.isReady()) {
|
||||
auto addPrefixValue = txnStateStore->readValue(uid.withPrefix(applyMutationsAddPrefixRange.begin)).get();
|
||||
auto removePrefixValue = txnStateStore->readValue(uid.withPrefix(applyMutationsRemovePrefixRange.begin)).get();
|
||||
|
@ -241,7 +241,7 @@ void applyMetadataMutations(SpanID const& spanContext, UID const& dbgid, Arena&
|
|||
Key k = m.param1.substr(applyMutationsKeyVersionMapRange.begin.size() + sizeof(UID));
|
||||
auto &p = (*uid_applyMutationsData)[uid];
|
||||
if(p.keyVersion == Reference<KeyRangeMap<Version>>())
|
||||
p.keyVersion = Reference<KeyRangeMap<Version>>( new KeyRangeMap<Version>() );
|
||||
p.keyVersion = makeReference<KeyRangeMap<Version>>();
|
||||
p.keyVersion->rawInsert( k, BinaryReader::fromStringRef<Version>(m.param2, Unversioned()) );
|
||||
}
|
||||
}
|
||||
|
@ -416,7 +416,7 @@ void applyMetadataMutations(SpanID const& spanContext, UID const& dbgid, Arena&
|
|||
if(uid == uid2) {
|
||||
auto &p = (*uid_applyMutationsData)[uid];
|
||||
if(p.keyVersion == Reference<KeyRangeMap<Version>>())
|
||||
p.keyVersion = Reference<KeyRangeMap<Version>>( new KeyRangeMap<Version>() );
|
||||
p.keyVersion = makeReference<KeyRangeMap<Version>>();
|
||||
p.keyVersion->rawErase( KeyRangeRef( m.param1.substr(applyMutationsKeyVersionMapRange.begin.size() + sizeof(UID)), m.param2.substr(applyMutationsKeyVersionMapRange.begin.size() + sizeof(UID))) );
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ set(FDBSERVER_SRCS
|
|||
FDBExecHelper.actor.h
|
||||
GrvProxyServer.actor.cpp
|
||||
IDiskQueue.h
|
||||
IKeyValueContainer.h
|
||||
IKeyValueStore.h
|
||||
IPager.h
|
||||
IVersionedStore.h
|
||||
|
@ -46,6 +47,8 @@ set(FDBSERVER_SRCS
|
|||
LogSystemDiskQueueAdapter.h
|
||||
LogSystemPeekCursor.actor.cpp
|
||||
MasterInterface.h
|
||||
MetricLogger.actor.cpp
|
||||
MetricLogger.h
|
||||
CommitProxyServer.actor.cpp
|
||||
masterserver.actor.cpp
|
||||
MutationTracking.h
|
||||
|
@ -63,6 +66,7 @@ set(FDBSERVER_SRCS
|
|||
pubsub.h
|
||||
QuietDatabase.actor.cpp
|
||||
QuietDatabase.h
|
||||
RadixTree.h
|
||||
Ratekeeper.actor.cpp
|
||||
RatekeeperInterface.h
|
||||
RecoveryState.h
|
||||
|
@ -215,6 +219,7 @@ set(FDBSERVER_SRCS
|
|||
workloads/UnitPerf.actor.cpp
|
||||
workloads/UnitTests.actor.cpp
|
||||
workloads/Unreadable.actor.cpp
|
||||
workloads/UDPWorkload.actor.cpp
|
||||
workloads/VersionStamp.actor.cpp
|
||||
workloads/WatchAndWait.actor.cpp
|
||||
workloads/Watches.actor.cpp
|
||||
|
@ -240,6 +245,10 @@ if (WITH_ROCKSDB_EXPERIMENTAL)
|
|||
set(PORTABLE_ROCKSDB 1)
|
||||
|
||||
include(CompileRocksDB)
|
||||
# CompileRocksDB sets `lz4_LIBRARIES` to be the shared lib, we want to link
|
||||
# statically, so find the static library here.
|
||||
find_library(lz4_STATIC_LIBRARIES
|
||||
NAMES liblz4.a REQUIRED)
|
||||
endif()
|
||||
|
||||
# Suppress warnings in sqlite since it's third party
|
||||
|
@ -260,7 +269,7 @@ target_include_directories(fdbserver PRIVATE
|
|||
if (WITH_ROCKSDB_EXPERIMENTAL)
|
||||
add_dependencies(fdbserver rocksdb)
|
||||
target_include_directories(fdbserver PRIVATE ${ROCKSDB_INCLUDE_DIR})
|
||||
target_link_libraries(fdbserver PRIVATE fdbclient fdb_sqlite ${ROCKSDB_LIBRARIES})
|
||||
target_link_libraries(fdbserver PRIVATE fdbclient fdb_sqlite ${ROCKSDB_LIBRARIES} ${lz4_STATIC_LIBRARIES})
|
||||
else()
|
||||
target_link_libraries(fdbserver PRIVATE fdbclient fdb_sqlite)
|
||||
endif()
|
||||
|
|
|
@ -113,12 +113,13 @@ public:
|
|||
bool cachePopulated;
|
||||
std::map<NetworkAddress, std::pair<double, OpenDatabaseRequest>> clientStatus;
|
||||
|
||||
DBInfo() : masterRegistrationCount(0), recoveryStalled(false), forceRecovery(false), unfinishedRecoveries(0), logGenerations(0), cachePopulated(false),
|
||||
clientInfo( new AsyncVar<ClientDBInfo>( ClientDBInfo() ) ), dbInfoCount(0),
|
||||
serverInfo( new AsyncVar<ServerDBInfo>( ServerDBInfo() ) ),
|
||||
db( DatabaseContext::create( clientInfo, Future<Void>(), LocalityData(), true, TaskPriority::DefaultEndpoint, true ) ) // SOMEDAY: Locality!
|
||||
{
|
||||
}
|
||||
DBInfo()
|
||||
: masterRegistrationCount(0), recoveryStalled(false), forceRecovery(false), unfinishedRecoveries(0),
|
||||
logGenerations(0), cachePopulated(false), clientInfo(new AsyncVar<ClientDBInfo>()), dbInfoCount(0),
|
||||
serverInfo(new AsyncVar<ServerDBInfo>()),
|
||||
db(DatabaseContext::create(clientInfo, Future<Void>(), LocalityData(), true, TaskPriority::DefaultEndpoint,
|
||||
true)) // SOMEDAY: Locality!
|
||||
{}
|
||||
|
||||
void setDistributor(const DataDistributorInterface& interf) {
|
||||
auto newInfo = serverInfo->get();
|
||||
|
@ -1212,7 +1213,9 @@ public:
|
|||
for(auto& logSet : dbi.logSystemConfig.tLogs) {
|
||||
if(region.satelliteTLogPolicy.isValid() && logSet.isLocal && logSet.locality == tagLocalitySatellite) {
|
||||
oldSatelliteFallback = logSet.tLogPolicy->info() != region.satelliteTLogPolicy->info();
|
||||
ASSERT(!oldSatelliteFallback || logSet.tLogPolicy->info() == region.satelliteTLogPolicyFallback->info());
|
||||
ASSERT(!oldSatelliteFallback ||
|
||||
(region.satelliteTLogPolicyFallback.isValid() &&
|
||||
logSet.tLogPolicy->info() == region.satelliteTLogPolicyFallback->info()));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1934,7 +1937,7 @@ ACTOR Future<Void> clusterRecruitFromConfiguration( ClusterControllerData* self,
|
|||
|
||||
ACTOR Future<Void> clusterRecruitRemoteFromConfiguration( ClusterControllerData* self, RecruitRemoteFromConfigurationRequest req ) {
|
||||
// At the moment this doesn't really need to be an actor (it always completes immediately)
|
||||
TEST(true); //ClusterController RecruitTLogsRequest
|
||||
TEST(true); //ClusterController RecruitTLogsRequest Remote
|
||||
loop {
|
||||
try {
|
||||
RecruitRemoteFromConfigurationReply rep = self->findRemoteWorkersForConfiguration( req );
|
||||
|
@ -2186,7 +2189,7 @@ void registerWorker( RegisterWorkerRequest req, ClusterControllerData *self ) {
|
|||
#define TIME_KEEPER_VERSION LiteralStringRef("1")
|
||||
|
||||
ACTOR Future<Void> timeKeeperSetVersion(ClusterControllerData *self) {
|
||||
state Reference<ReadYourWritesTransaction> tr = Reference<ReadYourWritesTransaction>(new ReadYourWritesTransaction(self->cx));
|
||||
state Reference<ReadYourWritesTransaction> tr = makeReference<ReadYourWritesTransaction>(self->cx);
|
||||
loop {
|
||||
try {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
|
@ -2214,7 +2217,7 @@ ACTOR Future<Void> timeKeeper(ClusterControllerData *self) {
|
|||
wait(timeKeeperSetVersion(self));
|
||||
|
||||
loop {
|
||||
state Reference<ReadYourWritesTransaction> tr = Reference<ReadYourWritesTransaction>(new ReadYourWritesTransaction(self->cx));
|
||||
state Reference<ReadYourWritesTransaction> tr = makeReference<ReadYourWritesTransaction>(self->cx);
|
||||
loop {
|
||||
try {
|
||||
if(!g_network->isSimulated()) {
|
||||
|
|
|
@ -547,7 +547,7 @@ ACTOR Future<Void> preresolutionProcessing(CommitBatchContext* self) {
|
|||
}
|
||||
|
||||
// Pre-resolution the commits
|
||||
TEST(pProxyCommitData->latestLocalCommitBatchResolving.get() < localBatchNumber - 1);
|
||||
TEST(pProxyCommitData->latestLocalCommitBatchResolving.get() < localBatchNumber - 1); // Wait for local batch
|
||||
wait(pProxyCommitData->latestLocalCommitBatchResolving.whenAtLeast(localBatchNumber - 1));
|
||||
self->releaseDelay = delay(
|
||||
std::min(SERVER_KNOBS->MAX_PROXY_COMPUTE,
|
||||
|
|
|
@ -239,7 +239,7 @@ struct MovableCoordinatedStateImpl {
|
|||
}
|
||||
// SOMEDAY: If moveState.mode == MovingFrom, read (without locking) old state and assert that it corresponds with our state and is ReallyTo(coordinators)
|
||||
if (moveState.mode == MovableValue::MaybeTo) {
|
||||
TEST(true);
|
||||
TEST(true); // Maybe moveto state
|
||||
ASSERT( moveState.other.present() );
|
||||
wait( self->moveTo( self, &self->cs, ClusterConnectionString( moveState.other.get().toString() ), moveState.value ) );
|
||||
}
|
||||
|
|
|
@ -268,11 +268,12 @@ ACTOR Future<Void> leaderRegister(LeaderElectionRegInterface interf, Key key) {
|
|||
state Future<Void> notifyCheck = delay(SERVER_KNOBS->NOTIFICATION_FULL_CLEAR_TIME / SERVER_KNOBS->MIN_NOTIFICATIONS);
|
||||
state ClientData clientData;
|
||||
state int clientCount = 0;
|
||||
state Reference<AsyncVar<bool>> hasConnectedClients = Reference<AsyncVar<bool>>( new AsyncVar<bool>(false) );
|
||||
state Reference<AsyncVar<bool>> hasConnectedClients = makeReference<AsyncVar<bool>>(false);
|
||||
state ActorCollection actors(false);
|
||||
state Future<Void> leaderMon;
|
||||
state AsyncVar<Value> leaderInterface;
|
||||
state Reference<AsyncVar<Optional<LeaderInfo>>> currentElectedLeader = Reference<AsyncVar<Optional<LeaderInfo>>>( new AsyncVar<Optional<LeaderInfo>>() );
|
||||
state Reference<AsyncVar<Optional<LeaderInfo>>> currentElectedLeader =
|
||||
makeReference<AsyncVar<Optional<LeaderInfo>>>();
|
||||
|
||||
loop choose {
|
||||
when ( OpenDatabaseCoordRequest req = waitNext( interf.openDatabase.getFuture() ) ) {
|
||||
|
|
|
@ -128,8 +128,10 @@ public:
|
|||
vector<Reference<TCMachineInfo>> machines;
|
||||
vector<Standalone<StringRef>> machineIDs;
|
||||
vector<Reference<TCTeamInfo>> serverTeams;
|
||||
UID id;
|
||||
|
||||
explicit TCMachineTeamInfo(vector<Reference<TCMachineInfo>> const& machines) : machines(machines) {
|
||||
explicit TCMachineTeamInfo(vector<Reference<TCMachineInfo>> const& machines)
|
||||
: machines(machines), id(deterministicRandom()->randomUniqueID()) {
|
||||
machineIDs.reserve(machines.size());
|
||||
for (int i = 0; i < machines.size(); i++) {
|
||||
machineIDs.push_back(machines[i]->machineID);
|
||||
|
@ -164,13 +166,15 @@ class TCTeamInfo final : public ReferenceCounted<TCTeamInfo>, public IDataDistri
|
|||
bool healthy;
|
||||
bool wrongConfiguration; //True if any of the servers in the team have the wrong configuration
|
||||
int priority;
|
||||
UID id;
|
||||
|
||||
public:
|
||||
Reference<TCMachineTeamInfo> machineTeam;
|
||||
Future<Void> tracker;
|
||||
|
||||
explicit TCTeamInfo(vector<Reference<TCServerInfo>> const& servers)
|
||||
: servers(servers), healthy(true), priority(SERVER_KNOBS->PRIORITY_TEAM_HEALTHY), wrongConfiguration(false) {
|
||||
: servers(servers), healthy(true), priority(SERVER_KNOBS->PRIORITY_TEAM_HEALTHY), wrongConfiguration(false),
|
||||
id(deterministicRandom()->randomUniqueID()) {
|
||||
if (servers.empty()) {
|
||||
TraceEvent(SevInfo, "ConstructTCTeamFromEmptyServers");
|
||||
}
|
||||
|
@ -180,6 +184,8 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
std::string getTeamID() const override { return id.shortString(); }
|
||||
|
||||
vector<StorageServerInterface> getLastKnownServerInterfaces() const override {
|
||||
vector<StorageServerInterface> v;
|
||||
v.reserve(servers.size());
|
||||
|
@ -365,7 +371,7 @@ ACTOR Future<Reference<InitialDataDistribution>> getInitialDataDistribution(Data
|
|||
MoveKeysLock moveKeysLock,
|
||||
std::vector<Optional<Key>> remoteDcIds,
|
||||
const DDEnabledState* ddEnabledState) {
|
||||
state Reference<InitialDataDistribution> result = Reference<InitialDataDistribution>(new InitialDataDistribution);
|
||||
state Reference<InitialDataDistribution> result = makeReference<InitialDataDistribution>();
|
||||
state Key beginKey = allKeys.begin;
|
||||
|
||||
state bool succeeded;
|
||||
|
@ -622,6 +628,9 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
int lowestUtilizationTeam;
|
||||
int highestUtilizationTeam;
|
||||
|
||||
AsyncTrigger printDetailedTeamsInfo;
|
||||
PromiseStream<GetMetricsRequest> getShardMetrics;
|
||||
|
||||
void resetLocalitySet() {
|
||||
storageServerSet = Reference<LocalitySet>(new LocalityMap<UID>());
|
||||
LocalityMap<UID>* storageServerMap = (LocalityMap<UID>*) storageServerSet.getPtr();
|
||||
|
@ -652,7 +661,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
DatabaseConfiguration configuration, std::vector<Optional<Key>> includedDCs,
|
||||
Optional<std::vector<Optional<Key>>> otherTrackedDCs, Future<Void> readyToStart,
|
||||
Reference<AsyncVar<bool>> zeroHealthyTeams, bool primary,
|
||||
Reference<AsyncVar<bool>> processingUnhealthy)
|
||||
Reference<AsyncVar<bool>> processingUnhealthy, PromiseStream<GetMetricsRequest> getShardMetrics)
|
||||
: cx(cx), distributorId(distributorId), lock(lock), output(output),
|
||||
shardsAffectedByTeamFailure(shardsAffectedByTeamFailure), doBuildTeams(true), lastBuildTeamsFailed(false),
|
||||
teamBuilder(Void()), badTeamRemover(Void()), checkInvalidLocalities(Void()), wrongStoreTypeRemover(Void()), configuration(configuration),
|
||||
|
@ -664,8 +673,10 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
initializationDoneActor(logOnCompletion(readyToStart && initialFailureReactionDelay, this)),
|
||||
optimalTeamCount(0), recruitingStream(0), restartRecruiting(SERVER_KNOBS->DEBOUNCE_RECRUITING_DELAY),
|
||||
unhealthyServers(0), includedDCs(includedDCs), otherTrackedDCs(otherTrackedDCs),
|
||||
zeroHealthyTeams(zeroHealthyTeams), zeroOptimalTeams(true), primary(primary), medianAvailableSpace(SERVER_KNOBS->MIN_AVAILABLE_SPACE_RATIO),
|
||||
lastMedianAvailableSpaceUpdate(0), processingUnhealthy(processingUnhealthy), lowestUtilizationTeam(0), highestUtilizationTeam(0) {
|
||||
zeroHealthyTeams(zeroHealthyTeams), zeroOptimalTeams(true), primary(primary),
|
||||
medianAvailableSpace(SERVER_KNOBS->MIN_AVAILABLE_SPACE_RATIO), lastMedianAvailableSpaceUpdate(0),
|
||||
processingUnhealthy(processingUnhealthy), lowestUtilizationTeam(0), highestUtilizationTeam(0),
|
||||
getShardMetrics(getShardMetrics) {
|
||||
if(!primary || configuration.usableRegions == 1) {
|
||||
TraceEvent("DDTrackerStarting", distributorId)
|
||||
.detail( "State", "Inactive" )
|
||||
|
@ -801,6 +812,13 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
} else {
|
||||
self->medianAvailableSpace = SERVER_KNOBS->MIN_AVAILABLE_SPACE_RATIO;
|
||||
}
|
||||
if (self->medianAvailableSpace < SERVER_KNOBS->TARGET_AVAILABLE_SPACE_RATIO) {
|
||||
TraceEvent(SevWarn, "DDTeamMedianAvailableSpaceTooSmall", self->distributorId)
|
||||
.detail("MedianAvailableSpaceRatio", self->medianAvailableSpace)
|
||||
.detail("TargetAvailableSpaceRatio", SERVER_KNOBS->TARGET_AVAILABLE_SPACE_RATIO)
|
||||
.detail("Primary", self->primary);
|
||||
self->printDetailedTeamsInfo.trigger();
|
||||
}
|
||||
}
|
||||
|
||||
bool foundSrc = false;
|
||||
|
@ -1252,7 +1270,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
|
||||
void addTeam(const vector<Reference<TCServerInfo>>& newTeamServers, bool isInitialTeam,
|
||||
bool redundantTeam = false) {
|
||||
Reference<TCTeamInfo> teamInfo(new TCTeamInfo(newTeamServers));
|
||||
auto teamInfo = makeReference<TCTeamInfo>(newTeamServers);
|
||||
|
||||
// Move satisfiesPolicy to the end for performance benefit
|
||||
bool badTeam = redundantTeam || teamInfo->size() != configuration.storageTeamSize
|
||||
|
@ -1309,7 +1327,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
|
||||
// Add a machine team specified by input machines
|
||||
Reference<TCMachineTeamInfo> addMachineTeam(vector<Reference<TCMachineInfo>> machines) {
|
||||
Reference<TCMachineTeamInfo> machineTeamInfo(new TCMachineTeamInfo(machines));
|
||||
auto machineTeamInfo = makeReference<TCMachineTeamInfo>(machines);
|
||||
machineTeams.push_back(machineTeamInfo);
|
||||
|
||||
// Assign machine teams to machine
|
||||
|
@ -1394,7 +1412,8 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
.detail("TeamIndex", i++)
|
||||
.detail("Healthy", team->isHealthy())
|
||||
.detail("TeamSize", team->size())
|
||||
.detail("MemberIDs", team->getServerIDsStr());
|
||||
.detail("MemberIDs", team->getServerIDsStr())
|
||||
.detail("TeamID", team->getTeamID());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2152,7 +2171,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
.detail("Primary", primary)
|
||||
.detail("AddedTeams", 0)
|
||||
.detail("TeamsToBuild", 0)
|
||||
.detail("CurrentTeams", teams.size())
|
||||
.detail("CurrentServerTeams", teams.size())
|
||||
.detail("DesiredTeams", desiredServerTeams)
|
||||
.detail("MaxTeams", maxServerTeams)
|
||||
.detail("StorageTeamSize", configuration.storageTeamSize)
|
||||
|
@ -2201,11 +2220,11 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
}
|
||||
}
|
||||
uniqueMachines = machines.size();
|
||||
TraceEvent("BuildTeams")
|
||||
.detail("ServerCount", self->server_info.size())
|
||||
.detail("UniqueMachines", uniqueMachines)
|
||||
.detail("Primary", self->primary)
|
||||
.detail("StorageTeamSize", self->configuration.storageTeamSize);
|
||||
TraceEvent("BuildTeams", self->distributorId)
|
||||
.detail("ServerCount", self->server_info.size())
|
||||
.detail("UniqueMachines", uniqueMachines)
|
||||
.detail("Primary", self->primary)
|
||||
.detail("StorageTeamSize", self->configuration.storageTeamSize);
|
||||
|
||||
// If there are too few machines to even build teams or there are too few represented datacenters, build no new teams
|
||||
if( uniqueMachines >= self->configuration.storageTeamSize ) {
|
||||
|
@ -2232,11 +2251,11 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
.detail("TeamsToBuild", teamsToBuild)
|
||||
.detail("DesiredTeams", desiredTeams)
|
||||
.detail("MaxTeams", maxTeams)
|
||||
.detail("BadTeams", self->badTeams.size())
|
||||
.detail("BadServerTeams", self->badTeams.size())
|
||||
.detail("UniqueMachines", uniqueMachines)
|
||||
.detail("TeamSize", self->configuration.storageTeamSize)
|
||||
.detail("Servers", serverCount)
|
||||
.detail("CurrentTrackedTeams", self->teams.size())
|
||||
.detail("CurrentTrackedServerTeams", self->teams.size())
|
||||
.detail("HealthyTeamCount", teamCount)
|
||||
.detail("TotalTeamCount", totalTeamCount)
|
||||
.detail("MachineTeamCount", self->machineTeams.size())
|
||||
|
@ -2253,9 +2272,9 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
int addedTeams = self->addTeamsBestOf(teamsToBuild, desiredTeams, maxTeams);
|
||||
|
||||
if (addedTeams <= 0 && self->teams.size() == 0) {
|
||||
TraceEvent(SevWarn, "NoTeamAfterBuildTeam")
|
||||
.detail("TeamNum", self->teams.size())
|
||||
.detail("Debug", "Check information below");
|
||||
TraceEvent(SevWarn, "NoTeamAfterBuildTeam", self->distributorId)
|
||||
.detail("ServerTeamNum", self->teams.size())
|
||||
.detail("Debug", "Check information below");
|
||||
// Debug: set true for traceAllInfo() to print out more information
|
||||
self->traceAllInfo();
|
||||
}
|
||||
|
@ -2273,7 +2292,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
.detail("Primary", self->primary)
|
||||
.detail("AddedTeams", 0)
|
||||
.detail("TeamsToBuild", teamsToBuild)
|
||||
.detail("CurrentTeams", self->teams.size())
|
||||
.detail("CurrentServerTeams", self->teams.size())
|
||||
.detail("DesiredTeams", desiredTeams)
|
||||
.detail("MaxTeams", maxTeams)
|
||||
.detail("StorageTeamSize", self->configuration.storageTeamSize)
|
||||
|
@ -2314,9 +2333,9 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
}
|
||||
|
||||
TraceEvent(SevWarn, "NoHealthyTeams", distributorId)
|
||||
.detail("CurrentTeamCount", teams.size())
|
||||
.detail("ServerCount", server_info.size())
|
||||
.detail("NonFailedServerCount", desiredServerSet.size());
|
||||
.detail("CurrentServerTeamCount", teams.size())
|
||||
.detail("ServerCount", server_info.size())
|
||||
.detail("NonFailedServerCount", desiredServerSet.size());
|
||||
}
|
||||
|
||||
bool shouldHandleServer(const StorageServerInterface &newServer) {
|
||||
|
@ -2334,7 +2353,11 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
allServers.push_back( newServer.id() );
|
||||
|
||||
TraceEvent("AddedStorageServer", distributorId).detail("ServerID", newServer.id()).detail("ProcessClass", processClass.toString()).detail("WaitFailureToken", newServer.waitFailure.getEndpoint().token).detail("Address", newServer.waitFailure.getEndpoint().getPrimaryAddress());
|
||||
auto &r = server_info[newServer.id()] = Reference<TCServerInfo>( new TCServerInfo( newServer, this, processClass, includedDCs.empty() || std::find(includedDCs.begin(), includedDCs.end(), newServer.locality.dcId()) != includedDCs.end(), storageServerSet ) );
|
||||
auto& r = server_info[newServer.id()] = makeReference<TCServerInfo>(
|
||||
newServer, this, processClass,
|
||||
includedDCs.empty() ||
|
||||
std::find(includedDCs.begin(), includedDCs.end(), newServer.locality.dcId()) != includedDCs.end(),
|
||||
storageServerSet);
|
||||
|
||||
// Establish the relation between server and machine
|
||||
checkAndCreateMachine(r);
|
||||
|
@ -2345,7 +2368,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
}
|
||||
|
||||
bool removeTeam( Reference<TCTeamInfo> team ) {
|
||||
TraceEvent("RemovedTeam", distributorId).detail("Team", team->getDesc());
|
||||
TraceEvent("RemovedServerTeam", distributorId).detail("Team", team->getDesc());
|
||||
bool found = false;
|
||||
for(int t=0; t<teams.size(); t++) {
|
||||
if( teams[t] == team ) {
|
||||
|
@ -2397,10 +2420,10 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
Reference<TCMachineInfo> machineInfo;
|
||||
if (machine_info.find(machine_id) == machine_info.end()) {
|
||||
// uid is the first storage server process on the machine
|
||||
TEST(true);
|
||||
TEST(true); // First storage server in process on the machine
|
||||
// For each machine, store the first server's localityEntry into machineInfo for later use.
|
||||
LocalityEntry localityEntry = machineLocalityMap.add(locality, &server->id);
|
||||
machineInfo = Reference<TCMachineInfo>(new TCMachineInfo(server, localityEntry));
|
||||
machineInfo = makeReference<TCMachineInfo>(server, localityEntry);
|
||||
machine_info.insert(std::make_pair(machine_id, machineInfo));
|
||||
} else {
|
||||
machineInfo = machine_info.find(machine_id)->second;
|
||||
|
@ -2539,9 +2562,10 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
int removedCount = 0;
|
||||
for (int t = 0; t < teams.size(); t++) {
|
||||
if ( std::count( teams[t]->getServerIDs().begin(), teams[t]->getServerIDs().end(), removedServer ) ) {
|
||||
TraceEvent("TeamRemoved")
|
||||
TraceEvent("ServerTeamRemoved")
|
||||
.detail("Primary", primary)
|
||||
.detail("TeamServerIDs", teams[t]->getServerIDsStr());
|
||||
.detail("TeamServerIDs", teams[t]->getServerIDsStr())
|
||||
.detail("TeamID", teams[t]->getTeamID());
|
||||
// removeTeam also needs to remove the team from the machine team info.
|
||||
removeTeam(teams[t]);
|
||||
t--;
|
||||
|
@ -2614,8 +2638,8 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
restartTeamBuilder.trigger();
|
||||
|
||||
TraceEvent("DataDistributionTeamCollectionUpdate", distributorId)
|
||||
.detail("Teams", teams.size())
|
||||
.detail("BadTeams", badTeams.size())
|
||||
.detail("ServerTeams", teams.size())
|
||||
.detail("BadServerTeams", badTeams.size())
|
||||
.detail("Servers", allServers.size())
|
||||
.detail("Machines", machine_info.size())
|
||||
.detail("MachineTeams", machineTeams.size())
|
||||
|
@ -2719,11 +2743,201 @@ ACTOR Future<Void> waitUntilHealthy(DDTeamCollection* self, double extraDelay =
|
|||
}
|
||||
}
|
||||
|
||||
// Take a snapshot of necessary data structures from `DDTeamCollection` and print them out with yields to avoid slow
|
||||
// task on the run loop.
|
||||
ACTOR Future<Void> printSnapshotTeamsInfo(Reference<DDTeamCollection> self) {
|
||||
state DatabaseConfiguration configuration;
|
||||
state std::map<UID, Reference<TCServerInfo>> server_info;
|
||||
state std::map<UID, ServerStatus> server_status;
|
||||
state vector<Reference<TCTeamInfo>> teams;
|
||||
state std::map<Standalone<StringRef>, Reference<TCMachineInfo>> machine_info;
|
||||
state std::vector<Reference<TCMachineTeamInfo>> machineTeams;
|
||||
// state std::vector<std::string> internedLocalityRecordKeyNameStrings;
|
||||
// state int machineLocalityMapEntryArraySize;
|
||||
// state std::vector<Reference<LocalityRecord>> machineLocalityMapRecordArray;
|
||||
state int traceEventsPrinted = 0;
|
||||
state std::vector<const UID*> serverIDs;
|
||||
state double lastPrintTime = 0;
|
||||
loop {
|
||||
wait(self->printDetailedTeamsInfo.onTrigger());
|
||||
if (now() - lastPrintTime < SERVER_KNOBS->DD_TEAMS_INFO_PRINT_INTERVAL) {
|
||||
continue;
|
||||
}
|
||||
lastPrintTime = now();
|
||||
|
||||
traceEventsPrinted = 0;
|
||||
|
||||
double snapshotStart = now();
|
||||
|
||||
configuration = self->configuration;
|
||||
server_info = self->server_info;
|
||||
teams = self->teams;
|
||||
machine_info = self->machine_info;
|
||||
machineTeams = self->machineTeams;
|
||||
// internedLocalityRecordKeyNameStrings = self->machineLocalityMap._keymap->_lookuparray;
|
||||
// machineLocalityMapEntryArraySize = self->machineLocalityMap.size();
|
||||
// machineLocalityMapRecordArray = self->machineLocalityMap.getRecordArray();
|
||||
std::vector<const UID*> _uids = self->machineLocalityMap.getObjects();
|
||||
serverIDs = _uids;
|
||||
|
||||
auto const& keys = self->server_status.getKeys();
|
||||
for (auto const& key : keys) {
|
||||
server_status.emplace(key, self->server_status.get(key));
|
||||
}
|
||||
|
||||
TraceEvent("DDPrintSnapshotTeasmInfo", self->distributorId)
|
||||
.detail("SnapshotSpeed", now() - snapshotStart)
|
||||
.detail("Primary", self->primary);
|
||||
|
||||
// Print to TraceEvents
|
||||
TraceEvent("DDConfig", self->distributorId)
|
||||
.detail("StorageTeamSize", configuration.storageTeamSize)
|
||||
.detail("DesiredTeamsPerServer", SERVER_KNOBS->DESIRED_TEAMS_PER_SERVER)
|
||||
.detail("MaxTeamsPerServer", SERVER_KNOBS->MAX_TEAMS_PER_SERVER)
|
||||
.detail("Primary", self->primary);
|
||||
|
||||
TraceEvent("ServerInfo", self->distributorId)
|
||||
.detail("Size", server_info.size())
|
||||
.detail("Primary", self->primary);
|
||||
state int i;
|
||||
state std::map<UID, Reference<TCServerInfo>>::iterator server = server_info.begin();
|
||||
for (i = 0; i < server_info.size(); i++) {
|
||||
TraceEvent("ServerInfo", self->distributorId)
|
||||
.detail("ServerInfoIndex", i)
|
||||
.detail("ServerID", server->first.toString())
|
||||
.detail("ServerTeamOwned", server->second->teams.size())
|
||||
.detail("MachineID", server->second->machine->machineID.contents().toString())
|
||||
.detail("Primary", self->primary);
|
||||
server++;
|
||||
if (++traceEventsPrinted % SERVER_KNOBS->DD_TEAMS_INFO_PRINT_YIELD_COUNT == 0) {
|
||||
wait(yield());
|
||||
}
|
||||
}
|
||||
|
||||
server = server_info.begin();
|
||||
for (i = 0; i < server_info.size(); i++) {
|
||||
const UID& uid = server->first;
|
||||
TraceEvent("ServerStatus", self->distributorId)
|
||||
.detail("ServerUID", uid)
|
||||
.detail("Healthy", !server_status.at(uid).isUnhealthy())
|
||||
.detail("MachineIsValid", server_info[uid]->machine.isValid())
|
||||
.detail("MachineTeamSize",
|
||||
server_info[uid]->machine.isValid() ? server_info[uid]->machine->machineTeams.size() : -1)
|
||||
.detail("Primary", self->primary);
|
||||
server++;
|
||||
if (++traceEventsPrinted % SERVER_KNOBS->DD_TEAMS_INFO_PRINT_YIELD_COUNT == 0) {
|
||||
wait(yield());
|
||||
}
|
||||
}
|
||||
|
||||
TraceEvent("ServerTeamInfo", self->distributorId).detail("Size", teams.size()).detail("Primary", self->primary);
|
||||
for (i = 0; i < teams.size(); i++) {
|
||||
const auto& team = teams[i];
|
||||
TraceEvent("ServerTeamInfo", self->distributorId)
|
||||
.detail("TeamIndex", i)
|
||||
.detail("Healthy", team->isHealthy())
|
||||
.detail("TeamSize", team->size())
|
||||
.detail("MemberIDs", team->getServerIDsStr())
|
||||
.detail("Primary", self->primary);
|
||||
if (++traceEventsPrinted % SERVER_KNOBS->DD_TEAMS_INFO_PRINT_YIELD_COUNT == 0) {
|
||||
wait(yield());
|
||||
}
|
||||
}
|
||||
|
||||
TraceEvent("MachineInfo", self->distributorId)
|
||||
.detail("Size", machine_info.size())
|
||||
.detail("Primary", self->primary);
|
||||
state std::map<Standalone<StringRef>, Reference<TCMachineInfo>>::iterator machine = machine_info.begin();
|
||||
state bool isMachineHealthy = false;
|
||||
for (i = 0; i < machine_info.size(); i++) {
|
||||
Reference<TCMachineInfo> _machine = machine->second;
|
||||
if (!_machine.isValid() || machine_info.find(_machine->machineID) == machine_info.end() ||
|
||||
_machine->serversOnMachine.empty()) {
|
||||
isMachineHealthy = false;
|
||||
}
|
||||
|
||||
// Healthy machine has at least one healthy server
|
||||
for (auto& server : _machine->serversOnMachine) {
|
||||
if (!server_status.at(server->id).isUnhealthy()) {
|
||||
isMachineHealthy = true;
|
||||
}
|
||||
}
|
||||
|
||||
isMachineHealthy = false;
|
||||
TraceEvent("MachineInfo", self->distributorId)
|
||||
.detail("MachineInfoIndex", i)
|
||||
.detail("Healthy", isMachineHealthy)
|
||||
.detail("MachineID", machine->first.contents().toString())
|
||||
.detail("MachineTeamOwned", machine->second->machineTeams.size())
|
||||
.detail("ServerNumOnMachine", machine->second->serversOnMachine.size())
|
||||
.detail("ServersID", machine->second->getServersIDStr())
|
||||
.detail("Primary", self->primary);
|
||||
machine++;
|
||||
if (++traceEventsPrinted % SERVER_KNOBS->DD_TEAMS_INFO_PRINT_YIELD_COUNT == 0) {
|
||||
wait(yield());
|
||||
}
|
||||
}
|
||||
|
||||
TraceEvent("MachineTeamInfo", self->distributorId)
|
||||
.detail("Size", machineTeams.size())
|
||||
.detail("Primary", self->primary);
|
||||
for (i = 0; i < machineTeams.size(); i++) {
|
||||
const auto& team = machineTeams[i];
|
||||
TraceEvent("MachineTeamInfo", self->distributorId)
|
||||
.detail("TeamIndex", i)
|
||||
.detail("MachineIDs", team->getMachineIDsStr())
|
||||
.detail("ServerTeams", team->serverTeams.size())
|
||||
.detail("Primary", self->primary);
|
||||
if (++traceEventsPrinted % SERVER_KNOBS->DD_TEAMS_INFO_PRINT_YIELD_COUNT == 0) {
|
||||
wait(yield());
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: re-enable the following logging or remove them.
|
||||
// TraceEvent("LocalityRecordKeyName", self->distributorId)
|
||||
// .detail("Size", internedLocalityRecordKeyNameStrings.size())
|
||||
// .detail("Primary", self->primary);
|
||||
// for (i = 0; i < internedLocalityRecordKeyNameStrings.size(); i++) {
|
||||
// TraceEvent("LocalityRecordKeyIndexName", self->distributorId)
|
||||
// .detail("KeyIndex", i)
|
||||
// .detail("KeyName", internedLocalityRecordKeyNameStrings[i])
|
||||
// .detail("Primary", self->primary);
|
||||
// if (++traceEventsPrinted % SERVER_KNOBS->DD_TEAMS_INFO_PRINT_YIELD_COUNT == 0) {
|
||||
// wait(yield());
|
||||
// }
|
||||
// }
|
||||
|
||||
// TraceEvent("MachineLocalityMap", self->distributorId)
|
||||
// .detail("Size", machineLocalityMapEntryArraySize)
|
||||
// .detail("Primary", self->primary);
|
||||
// for (i = 0; i < serverIDs.size(); i++) {
|
||||
// const auto& serverID = serverIDs[i];
|
||||
// Reference<LocalityRecord> record = machineLocalityMapRecordArray[i];
|
||||
// if (record.isValid()) {
|
||||
// TraceEvent("MachineLocalityMap", self->distributorId)
|
||||
// .detail("LocalityIndex", i)
|
||||
// .detail("UID", serverID->toString())
|
||||
// .detail("LocalityRecord", record->toString())
|
||||
// .detail("Primary", self->primary);
|
||||
// } else {
|
||||
// TraceEvent("MachineLocalityMap", self->distributorId)
|
||||
// .detail("LocalityIndex", i)
|
||||
// .detail("UID", serverID->toString())
|
||||
// .detail("LocalityRecord", "[NotFound]")
|
||||
// .detail("Primary", self->primary);
|
||||
// }
|
||||
// if (++traceEventsPrinted % SERVER_KNOBS->DD_TEAMS_INFO_PRINT_YIELD_COUNT == 0) {
|
||||
// wait(yield());
|
||||
// }
|
||||
// }
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> removeBadTeams(DDTeamCollection* self) {
|
||||
wait(self->initialFailureReactionDelay);
|
||||
wait(waitUntilHealthy(self));
|
||||
wait(self->addSubsetComplete.getFuture());
|
||||
TraceEvent("DDRemovingBadTeams", self->distributorId).detail("Primary", self->primary);
|
||||
TraceEvent("DDRemovingBadServerTeams", self->distributorId).detail("Primary", self->primary);
|
||||
for(auto it : self->badTeams) {
|
||||
it->tracker.cancel();
|
||||
}
|
||||
|
@ -2837,9 +3051,9 @@ ACTOR Future<Void> machineTeamRemover(DDTeamCollection* self) {
|
|||
// Check if a server will have 0 team after the team is removed
|
||||
for (auto& s : team->getServers()) {
|
||||
if (s->teams.size() == 0) {
|
||||
TraceEvent(SevError, "TeamRemoverTooAggressive")
|
||||
TraceEvent(SevError, "MachineTeamRemoverTooAggressive", self->distributorId)
|
||||
.detail("Server", s->id)
|
||||
.detail("Team", team->getServerIDsStr());
|
||||
.detail("ServerTeam", team->getDesc());
|
||||
self->traceAllInfo(true);
|
||||
}
|
||||
}
|
||||
|
@ -2851,7 +3065,7 @@ ACTOR Future<Void> machineTeamRemover(DDTeamCollection* self) {
|
|||
// in the serverTeams vector in the machine team.
|
||||
--teamIndex;
|
||||
self->addTeam(team->getServers(), true, true);
|
||||
TEST(true);
|
||||
TEST(true); // Removed machine team
|
||||
}
|
||||
|
||||
self->doBuildTeams = true;
|
||||
|
@ -2862,6 +3076,7 @@ ACTOR Future<Void> machineTeamRemover(DDTeamCollection* self) {
|
|||
}
|
||||
|
||||
TraceEvent("MachineTeamRemover", self->distributorId)
|
||||
.detail("MachineTeamIDToRemove", mt->id.shortString())
|
||||
.detail("MachineTeamToRemove", mt->getMachineIDsStr())
|
||||
.detail("NumProcessTeamsOnTheMachineTeam", minNumProcessTeams)
|
||||
.detail("CurrentMachineTeams", self->machineTeams.size())
|
||||
|
@ -2877,7 +3092,7 @@ ACTOR Future<Void> machineTeamRemover(DDTeamCollection* self) {
|
|||
} else {
|
||||
if (numMachineTeamRemoved > 0) {
|
||||
// Only trace the information when we remove a machine team
|
||||
TraceEvent("TeamRemoverDone")
|
||||
TraceEvent("MachineTeamRemoverDone", self->distributorId)
|
||||
.detail("HealthyMachines", healthyMachineCount)
|
||||
// .detail("CurrentHealthyMachineTeams", currentHealthyMTCount)
|
||||
.detail("CurrentMachineTeams", self->machineTeams.size())
|
||||
|
@ -2930,7 +3145,7 @@ ACTOR Future<Void> serverTeamRemover(DDTeamCollection* self) {
|
|||
bool foundTeam = self->removeTeam(st);
|
||||
ASSERT(foundTeam == true);
|
||||
self->addTeam(st->getServers(), true, true);
|
||||
TEST(true);
|
||||
TEST(true); // Marked team as a bad team
|
||||
|
||||
self->doBuildTeams = true;
|
||||
|
||||
|
@ -2941,6 +3156,7 @@ ACTOR Future<Void> serverTeamRemover(DDTeamCollection* self) {
|
|||
|
||||
TraceEvent("ServerTeamRemover", self->distributorId)
|
||||
.detail("ServerTeamToRemove", st->getServerIDsStr())
|
||||
.detail("ServerTeamID", st->getTeamID())
|
||||
.detail("NumProcessTeamsOnTheServerTeam", maxNumProcessTeams)
|
||||
.detail("CurrentServerTeams", self->teams.size())
|
||||
.detail("DesiredServerTeams", desiredServerTeams);
|
||||
|
@ -2960,6 +3176,35 @@ ACTOR Future<Void> serverTeamRemover(DDTeamCollection* self) {
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> zeroServerLeftLogger_impl(DDTeamCollection* self, Reference<TCTeamInfo> team) {
|
||||
wait(delay(SERVER_KNOBS->DD_TEAM_ZERO_SERVER_LEFT_LOG_DELAY));
|
||||
state vector<KeyRange> shards = self->shardsAffectedByTeamFailure->getShardsFor(
|
||||
ShardsAffectedByTeamFailure::Team(team->getServerIDs(), self->primary));
|
||||
state std::vector<Future<StorageMetrics>> sizes;
|
||||
sizes.reserve(shards.size());
|
||||
|
||||
for (auto const& shard : shards) {
|
||||
sizes.emplace_back(brokenPromiseToNever(self->getShardMetrics.getReply(GetMetricsRequest(shard))));
|
||||
TraceEvent(SevWarnAlways, "DDShardLost", self->distributorId)
|
||||
.detail("ServerTeamID", team->getTeamID())
|
||||
.detail("ShardBegin", shard.begin)
|
||||
.detail("ShardEnd", shard.end);
|
||||
}
|
||||
|
||||
wait(waitForAll(sizes));
|
||||
|
||||
int64_t bytesLost = 0;
|
||||
for (auto const& size : sizes) {
|
||||
bytesLost += size.get().bytes;
|
||||
}
|
||||
|
||||
TraceEvent(SevWarnAlways, "DDZeroServerLeftInTeam", self->distributorId)
|
||||
.detail("Team", team->getDesc())
|
||||
.detail("TotalBytesLost", bytesLost);
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
bool teamContainsFailedServer(DDTeamCollection* self, Reference<TCTeamInfo> team) {
|
||||
auto ssis = team->getLastKnownServerInterfaces();
|
||||
for (const auto &ssi : ssis) {
|
||||
|
@ -2995,18 +3240,22 @@ ACTOR Future<Void> teamTracker(DDTeamCollection* self, Reference<TCTeamInfo> tea
|
|||
state bool lastZeroHealthy = self->zeroHealthyTeams->get();
|
||||
state bool firstCheck = true;
|
||||
|
||||
state Future<Void> zeroServerLeftLogger;
|
||||
|
||||
if(logTeamEvents) {
|
||||
TraceEvent("TeamTrackerStarting", self->distributorId).detail("Reason", "Initial wait complete (sc)").detail("Team", team->getDesc());
|
||||
TraceEvent("ServerTeamTrackerStarting", self->distributorId)
|
||||
.detail("Reason", "Initial wait complete (sc)")
|
||||
.detail("ServerTeam", team->getDesc());
|
||||
}
|
||||
self->priority_teams[team->getPriority()]++;
|
||||
|
||||
try {
|
||||
loop {
|
||||
if(logTeamEvents) {
|
||||
TraceEvent("TeamHealthChangeDetected", self->distributorId)
|
||||
.detail("Team", team->getDesc())
|
||||
.detail("Primary", self->primary)
|
||||
.detail("IsReady", self->initialFailureReactionDelay.isReady());
|
||||
TraceEvent("ServerTeamHealthChangeDetected", self->distributorId)
|
||||
.detail("ServerTeam", team->getDesc())
|
||||
.detail("Primary", self->primary)
|
||||
.detail("IsReady", self->initialFailureReactionDelay.isReady());
|
||||
self->traceTeamCollectionInfo();
|
||||
}
|
||||
// Check if the number of degraded machines has changed
|
||||
|
@ -3082,10 +3331,13 @@ ACTOR Future<Void> teamTracker(DDTeamCollection* self, Reference<TCTeamInfo> tea
|
|||
if (serversLeft != lastServersLeft || anyUndesired != lastAnyUndesired ||
|
||||
anyWrongConfiguration != lastWrongConfiguration || recheck) { // NOTE: do not check wrongSize
|
||||
if(logTeamEvents) {
|
||||
TraceEvent("TeamHealthChanged", self->distributorId)
|
||||
.detail("Team", team->getDesc()).detail("ServersLeft", serversLeft)
|
||||
.detail("LastServersLeft", lastServersLeft).detail("ContainsUndesiredServer", anyUndesired)
|
||||
.detail("HealthyTeamsCount", self->healthyTeamCount).detail("IsWrongConfiguration", anyWrongConfiguration);
|
||||
TraceEvent("ServerTeamHealthChanged", self->distributorId)
|
||||
.detail("ServerTeam", team->getDesc())
|
||||
.detail("ServersLeft", serversLeft)
|
||||
.detail("LastServersLeft", lastServersLeft)
|
||||
.detail("ContainsUndesiredServer", anyUndesired)
|
||||
.detail("HealthyTeamsCount", self->healthyTeamCount)
|
||||
.detail("IsWrongConfiguration", anyWrongConfiguration);
|
||||
}
|
||||
|
||||
team->setWrongConfiguration( anyWrongConfiguration );
|
||||
|
@ -3107,18 +3359,18 @@ ACTOR Future<Void> teamTracker(DDTeamCollection* self, Reference<TCTeamInfo> tea
|
|||
self->zeroHealthyTeams->set(self->healthyTeamCount == 0);
|
||||
|
||||
if( self->healthyTeamCount == 0 ) {
|
||||
TraceEvent(SevWarn, "ZeroTeamsHealthySignalling", self->distributorId)
|
||||
.detail("SignallingTeam", team->getDesc())
|
||||
.detail("Primary", self->primary);
|
||||
TraceEvent(SevWarn, "ZeroServerTeamsHealthySignalling", self->distributorId)
|
||||
.detail("SignallingTeam", team->getDesc())
|
||||
.detail("Primary", self->primary);
|
||||
}
|
||||
|
||||
if(logTeamEvents) {
|
||||
TraceEvent("TeamHealthDifference", self->distributorId)
|
||||
.detail("Team", team->getDesc())
|
||||
.detail("LastOptimal", lastOptimal)
|
||||
.detail("LastHealthy", lastHealthy)
|
||||
.detail("Optimal", optimal)
|
||||
.detail("OptimalTeamCount", self->optimalTeamCount);
|
||||
TraceEvent("ServerTeamHealthDifference", self->distributorId)
|
||||
.detail("ServerTeam", team->getDesc())
|
||||
.detail("LastOptimal", lastOptimal)
|
||||
.detail("LastHealthy", lastHealthy)
|
||||
.detail("Optimal", optimal)
|
||||
.detail("OptimalTeamCount", self->optimalTeamCount);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3155,12 +3407,24 @@ ACTOR Future<Void> teamTracker(DDTeamCollection* self, Reference<TCTeamInfo> tea
|
|||
if(lastPriority != team->getPriority()) {
|
||||
self->priority_teams[lastPriority]--;
|
||||
self->priority_teams[team->getPriority()]++;
|
||||
if (lastPriority == SERVER_KNOBS->PRIORITY_TEAM_0_LEFT &&
|
||||
team->getPriority() < SERVER_KNOBS->PRIORITY_TEAM_0_LEFT) {
|
||||
zeroServerLeftLogger = Void();
|
||||
}
|
||||
if (logTeamEvents) {
|
||||
int dataLoss = team->getPriority() == SERVER_KNOBS->PRIORITY_TEAM_0_LEFT;
|
||||
Severity severity = dataLoss ? SevWarnAlways : SevInfo;
|
||||
TraceEvent(severity, "ServerTeamPriorityChange", self->distributorId)
|
||||
.detail("Priority", team->getPriority())
|
||||
.detail("Info", team->getDesc())
|
||||
.detail("ZeroHealthyServerTeams", self->zeroHealthyTeams->get());
|
||||
if (team->getPriority() == SERVER_KNOBS->PRIORITY_TEAM_0_LEFT) {
|
||||
// 0 servers left in this team, data might be lost.
|
||||
zeroServerLeftLogger = zeroServerLeftLogger_impl(self, team);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(logTeamEvents) {
|
||||
TraceEvent("TeamPriorityChange", self->distributorId).detail("Priority", team->getPriority())
|
||||
.detail("Info", team->getDesc()).detail("ZeroHealthyTeams", self->zeroHealthyTeams->get());
|
||||
}
|
||||
|
||||
lastZeroHealthy = self->zeroHealthyTeams->get(); //set this again in case it changed from this teams health changing
|
||||
if ((self->initialFailureReactionDelay.isReady() && !self->zeroHealthyTeams->get()) || containsFailed) {
|
||||
|
@ -3230,17 +3494,19 @@ ACTOR Future<Void> teamTracker(DDTeamCollection* self, Reference<TCTeamInfo> tea
|
|||
self->output.send(rs);
|
||||
TraceEvent("SendRelocateToDDQueue", self->distributorId)
|
||||
.suppressFor(1.0)
|
||||
.detail("Primary", self->primary)
|
||||
.detail("Team", team->getDesc())
|
||||
.detail("ServerPrimary", self->primary)
|
||||
.detail("ServerTeam", team->getDesc())
|
||||
.detail("KeyBegin", rs.keys.begin)
|
||||
.detail("KeyEnd", rs.keys.end)
|
||||
.detail("Priority", rs.priority)
|
||||
.detail("TeamFailedMachines", team->size() - serversLeft)
|
||||
.detail("TeamOKMachines", serversLeft);
|
||||
.detail("ServerTeamFailedMachines", team->size() - serversLeft)
|
||||
.detail("ServerTeamOKMachines", serversLeft);
|
||||
}
|
||||
} else {
|
||||
if(logTeamEvents) {
|
||||
TraceEvent("TeamHealthNotReady", self->distributorId).detail("HealthyTeamCount", self->healthyTeamCount);
|
||||
TraceEvent("ServerTeamHealthNotReady", self->distributorId)
|
||||
.detail("HealthyServerTeamCount", self->healthyTeamCount)
|
||||
.detail("ServerTeamID", team->getTeamID());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3252,7 +3518,7 @@ ACTOR Future<Void> teamTracker(DDTeamCollection* self, Reference<TCTeamInfo> tea
|
|||
} catch(Error& e) {
|
||||
if(logTeamEvents) {
|
||||
TraceEvent("TeamTrackerStopping", self->distributorId)
|
||||
.detail("Primary", self->primary)
|
||||
.detail("ServerPrimary", self->primary)
|
||||
.detail("Team", team->getDesc())
|
||||
.detail("Priority", team->getPriority());
|
||||
}
|
||||
|
@ -3263,8 +3529,8 @@ ACTOR Future<Void> teamTracker(DDTeamCollection* self, Reference<TCTeamInfo> tea
|
|||
|
||||
if( self->healthyTeamCount == 0 ) {
|
||||
TraceEvent(SevWarn, "ZeroTeamsHealthySignalling", self->distributorId)
|
||||
.detail("Primary", self->primary)
|
||||
.detail("SignallingTeam", team->getDesc());
|
||||
.detail("ServerPrimary", self->primary)
|
||||
.detail("SignallingServerTeam", team->getDesc());
|
||||
self->zeroHealthyTeams->set(true);
|
||||
}
|
||||
}
|
||||
|
@ -4677,7 +4943,7 @@ ACTOR Future<Void> dataDistribution(Reference<DataDistributorData> self,
|
|||
Reference<AsyncVar<bool>> anyZeroHealthyTeams;
|
||||
vector<Reference<AsyncVar<bool>>> zeroHealthyTeams;
|
||||
tcis.push_back(TeamCollectionInterface());
|
||||
zeroHealthyTeams.push_back(Reference<AsyncVar<bool>>( new AsyncVar<bool>(true) ));
|
||||
zeroHealthyTeams.push_back(makeReference<AsyncVar<bool>>(true));
|
||||
int storageTeamSize = configuration.storageTeamSize;
|
||||
|
||||
// Stored outside of data distribution tracker to avoid slow tasks
|
||||
|
@ -4689,8 +4955,8 @@ ACTOR Future<Void> dataDistribution(Reference<DataDistributorData> self,
|
|||
tcis.push_back(TeamCollectionInterface());
|
||||
storageTeamSize = 2*configuration.storageTeamSize;
|
||||
|
||||
zeroHealthyTeams.push_back( Reference<AsyncVar<bool>>( new AsyncVar<bool>(true) ) );
|
||||
anyZeroHealthyTeams = Reference<AsyncVar<bool>>( new AsyncVar<bool>(true) );
|
||||
zeroHealthyTeams.push_back(makeReference<AsyncVar<bool>>(true));
|
||||
anyZeroHealthyTeams = makeReference<AsyncVar<bool>>(true);
|
||||
actors.push_back( anyTrue(zeroHealthyTeams, anyZeroHealthyTeams) );
|
||||
} else {
|
||||
anyZeroHealthyTeams = zeroHealthyTeams[0];
|
||||
|
@ -4709,22 +4975,31 @@ ACTOR Future<Void> dataDistribution(Reference<DataDistributorData> self,
|
|||
"DDQueue", self->ddId, &normalDDQueueErrors()));
|
||||
|
||||
vector<DDTeamCollection*> teamCollectionsPtrs;
|
||||
primaryTeamCollection = Reference<DDTeamCollection>( new DDTeamCollection(cx, self->ddId, lock, output, shardsAffectedByTeamFailure, configuration, primaryDcId, configuration.usableRegions > 1 ? remoteDcIds : std::vector<Optional<Key>>(), readyToStart.getFuture(), zeroHealthyTeams[0], true, processingUnhealthy) );
|
||||
primaryTeamCollection = makeReference<DDTeamCollection>(
|
||||
cx, self->ddId, lock, output, shardsAffectedByTeamFailure, configuration, primaryDcId,
|
||||
configuration.usableRegions > 1 ? remoteDcIds : std::vector<Optional<Key>>(), readyToStart.getFuture(),
|
||||
zeroHealthyTeams[0], true, processingUnhealthy, getShardMetrics);
|
||||
teamCollectionsPtrs.push_back(primaryTeamCollection.getPtr());
|
||||
if (configuration.usableRegions > 1) {
|
||||
remoteTeamCollection = Reference<DDTeamCollection>( new DDTeamCollection(cx, self->ddId, lock, output, shardsAffectedByTeamFailure, configuration, remoteDcIds, Optional<std::vector<Optional<Key>>>(), readyToStart.getFuture() && remoteRecovered(self->dbInfo), zeroHealthyTeams[1], false, processingUnhealthy) );
|
||||
remoteTeamCollection = makeReference<DDTeamCollection>(
|
||||
cx, self->ddId, lock, output, shardsAffectedByTeamFailure, configuration, remoteDcIds,
|
||||
Optional<std::vector<Optional<Key>>>(), readyToStart.getFuture() && remoteRecovered(self->dbInfo),
|
||||
zeroHealthyTeams[1], false, processingUnhealthy, getShardMetrics);
|
||||
teamCollectionsPtrs.push_back(remoteTeamCollection.getPtr());
|
||||
remoteTeamCollection->teamCollections = teamCollectionsPtrs;
|
||||
actors.push_back(
|
||||
reportErrorsExcept(dataDistributionTeamCollection(remoteTeamCollection, initData, tcis[1],
|
||||
self->dbInfo, ddEnabledState),
|
||||
"DDTeamCollectionSecondary", self->ddId, &normalDDQueueErrors()));
|
||||
actors.push_back(printSnapshotTeamsInfo(remoteTeamCollection));
|
||||
}
|
||||
primaryTeamCollection->teamCollections = teamCollectionsPtrs;
|
||||
self->teamCollection = primaryTeamCollection.getPtr();
|
||||
actors.push_back(reportErrorsExcept(
|
||||
dataDistributionTeamCollection(primaryTeamCollection, initData, tcis[0], self->dbInfo, ddEnabledState),
|
||||
"DDTeamCollectionPrimary", self->ddId, &normalDDQueueErrors()));
|
||||
|
||||
actors.push_back(printSnapshotTeamsInfo(primaryTeamCollection));
|
||||
actors.push_back(yieldPromiseStream(output.getFuture(), input));
|
||||
|
||||
wait( waitForAll( actors ) );
|
||||
|
@ -4848,6 +5123,22 @@ ACTOR Future<Void> ddSnapCreateCore(DistributorSnapRequest snapReq, Reference<As
|
|||
TraceEvent("SnapDataDistributor_AfterSnapCoords")
|
||||
.detail("SnapPayload", snapReq.snapPayload)
|
||||
.detail("SnapUID", snapReq.snapUID);
|
||||
tr.reset();
|
||||
loop {
|
||||
try {
|
||||
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
TraceEvent("SnapDataDistributor_ClearFlagAttempt")
|
||||
.detail("SnapPayload", snapReq.snapPayload)
|
||||
.detail("SnapUID", snapReq.snapUID);
|
||||
tr.clear(writeRecoveryKey);
|
||||
wait(tr.commit());
|
||||
break;
|
||||
} catch (Error& e) {
|
||||
TraceEvent("SnapDataDistributor_ClearFlagError").error(e);
|
||||
wait(tr.onError(e));
|
||||
}
|
||||
}
|
||||
} catch (Error& err) {
|
||||
state Error e = err;
|
||||
TraceEvent("SnapDataDistributor_SnapReqExit")
|
||||
|
@ -5108,31 +5399,18 @@ ACTOR Future<Void> dataDistributor(DataDistributorInterface di, Reference<AsyncV
|
|||
}
|
||||
|
||||
DDTeamCollection* testTeamCollection(int teamSize, Reference<IReplicationPolicy> policy, int processCount) {
|
||||
Database database = DatabaseContext::create(
|
||||
Reference<AsyncVar<ClientDBInfo>>(new AsyncVar<ClientDBInfo>()),
|
||||
Never(),
|
||||
LocalityData(),
|
||||
false
|
||||
);
|
||||
Database database =
|
||||
DatabaseContext::create(makeReference<AsyncVar<ClientDBInfo>>(), Never(), LocalityData(), false);
|
||||
|
||||
DatabaseConfiguration conf;
|
||||
conf.storageTeamSize = teamSize;
|
||||
conf.storagePolicy = policy;
|
||||
|
||||
DDTeamCollection* collection = new DDTeamCollection(
|
||||
database,
|
||||
UID(0, 0),
|
||||
MoveKeysLock(),
|
||||
PromiseStream<RelocateShard>(),
|
||||
Reference<ShardsAffectedByTeamFailure>(new ShardsAffectedByTeamFailure()),
|
||||
conf,
|
||||
{},
|
||||
{},
|
||||
Future<Void>(Void()),
|
||||
Reference<AsyncVar<bool>>( new AsyncVar<bool>(true) ),
|
||||
true,
|
||||
Reference<AsyncVar<bool>>( new AsyncVar<bool>(false) )
|
||||
);
|
||||
DDTeamCollection* collection =
|
||||
new DDTeamCollection(database, UID(0, 0), MoveKeysLock(), PromiseStream<RelocateShard>(),
|
||||
makeReference<ShardsAffectedByTeamFailure>(), conf, {}, {}, Future<Void>(Void()),
|
||||
makeReference<AsyncVar<bool>>(true), true, makeReference<AsyncVar<bool>>(false),
|
||||
PromiseStream<GetMetricsRequest>());
|
||||
|
||||
for (int id = 1; id <= processCount; ++id) {
|
||||
UID uid(id, 0);
|
||||
|
@ -5141,7 +5419,8 @@ DDTeamCollection* testTeamCollection(int teamSize, Reference<IReplicationPolicy>
|
|||
interface.locality.set(LiteralStringRef("machineid"), Standalone<StringRef>(std::to_string(id)));
|
||||
interface.locality.set(LiteralStringRef("zoneid"), Standalone<StringRef>(std::to_string(id % 5)));
|
||||
interface.locality.set(LiteralStringRef("data_hall"), Standalone<StringRef>(std::to_string(id % 3)));
|
||||
collection->server_info[uid] = Reference<TCServerInfo>(new TCServerInfo(interface, collection, ProcessClass(), true, collection->storageServerSet));
|
||||
collection->server_info[uid] =
|
||||
makeReference<TCServerInfo>(interface, collection, ProcessClass(), true, collection->storageServerSet);
|
||||
collection->server_status.set(uid, ServerStatus(false, false, interface.locality));
|
||||
collection->checkAndCreateMachine(collection->server_info[uid]);
|
||||
}
|
||||
|
@ -5150,8 +5429,8 @@ DDTeamCollection* testTeamCollection(int teamSize, Reference<IReplicationPolicy>
|
|||
}
|
||||
|
||||
DDTeamCollection* testMachineTeamCollection(int teamSize, Reference<IReplicationPolicy> policy, int processCount) {
|
||||
Database database = DatabaseContext::create(Reference<AsyncVar<ClientDBInfo>>(new AsyncVar<ClientDBInfo>()),
|
||||
Never(), LocalityData(), false);
|
||||
Database database =
|
||||
DatabaseContext::create(makeReference<AsyncVar<ClientDBInfo>>(), Never(), LocalityData(), false);
|
||||
|
||||
DatabaseConfiguration conf;
|
||||
conf.storageTeamSize = teamSize;
|
||||
|
@ -5159,10 +5438,9 @@ DDTeamCollection* testMachineTeamCollection(int teamSize, Reference<IReplication
|
|||
|
||||
DDTeamCollection* collection =
|
||||
new DDTeamCollection(database, UID(0, 0), MoveKeysLock(), PromiseStream<RelocateShard>(),
|
||||
Reference<ShardsAffectedByTeamFailure>(new ShardsAffectedByTeamFailure()), conf, {}, {},
|
||||
Future<Void>(Void()),
|
||||
Reference<AsyncVar<bool>>(new AsyncVar<bool>(true)), true,
|
||||
Reference<AsyncVar<bool>>(new AsyncVar<bool>(false)));
|
||||
makeReference<ShardsAffectedByTeamFailure>(), conf, {}, {}, Future<Void>(Void()),
|
||||
makeReference<AsyncVar<bool>>(true), true, makeReference<AsyncVar<bool>>(false),
|
||||
PromiseStream<GetMetricsRequest>());
|
||||
|
||||
for (int id = 1; id <= processCount; id++) {
|
||||
UID uid(id, 0);
|
||||
|
@ -5182,7 +5460,7 @@ DDTeamCollection* testMachineTeamCollection(int teamSize, Reference<IReplication
|
|||
interface.locality.set(LiteralStringRef("data_hall"), Standalone<StringRef>(std::to_string(data_hall_id)));
|
||||
interface.locality.set(LiteralStringRef("dcid"), Standalone<StringRef>(std::to_string(dc_id)));
|
||||
collection->server_info[uid] =
|
||||
Reference<TCServerInfo>(new TCServerInfo(interface, collection, ProcessClass(), true, collection->storageServerSet));
|
||||
makeReference<TCServerInfo>(interface, collection, ProcessClass(), true, collection->storageServerSet);
|
||||
|
||||
collection->server_status.set(uid, ServerStatus(false, false, interface.locality));
|
||||
}
|
||||
|
|
|
@ -58,10 +58,12 @@ struct IDataDistributionTeam {
|
|||
virtual bool isWrongConfiguration() const = 0;
|
||||
virtual void setWrongConfiguration(bool) = 0;
|
||||
virtual void addServers(const vector<UID> &servers) = 0;
|
||||
virtual std::string getTeamID() const = 0;
|
||||
|
||||
std::string getDesc() const {
|
||||
const auto& servers = getLastKnownServerInterfaces();
|
||||
std::string s = format("Size %d; ", servers.size());
|
||||
std::string s = format("TeamID:%s", getTeamID().c_str());
|
||||
s += format("Size %d; ", servers.size());
|
||||
for(int i=0; i<servers.size(); i++) {
|
||||
if (i) s += ", ";
|
||||
s += servers[i].address().toString() + " " + servers[i].id().shortString();
|
||||
|
|
|
@ -84,6 +84,7 @@ struct RelocateData {
|
|||
|
||||
class ParallelTCInfo final : public ReferenceCounted<ParallelTCInfo>, public IDataDistributionTeam {
|
||||
vector<Reference<IDataDistributionTeam>> teams;
|
||||
vector<UID> tempServerIDs;
|
||||
|
||||
int64_t sum(std::function<int64_t(IDataDistributionTeam const&)> func) const {
|
||||
int64_t result = 0;
|
||||
|
@ -235,6 +236,15 @@ public:
|
|||
ASSERT(!teams.empty());
|
||||
teams[0]->addServers(servers);
|
||||
}
|
||||
|
||||
std::string getTeamID() const override {
|
||||
std::string id;
|
||||
for (int i = 0; i < teams.size(); i++) {
|
||||
auto const& team = teams[i];
|
||||
id += (i == teams.size() - 1) ? team->getTeamID() : format("%s, ", team->getTeamID().c_str());
|
||||
}
|
||||
return id;
|
||||
}
|
||||
};
|
||||
|
||||
struct Busyness {
|
||||
|
@ -1039,6 +1049,9 @@ ACTOR Future<Void> dataDistributionRelocator(DDQueueData* self, RelocateData rd,
|
|||
} else {
|
||||
TraceEvent(relocateShardInterval.severity, "RelocateShardHasDestination", distributorId)
|
||||
.detail("PairId", relocateShardInterval.pairID)
|
||||
.detail("KeyBegin", rd.keys.begin)
|
||||
.detail("KeyEnd", rd.keys.end)
|
||||
.detail("SourceServers", describe(rd.src))
|
||||
.detail("DestinationTeam", describe(destIds))
|
||||
.detail("ExtraIds", describe(extraIds));
|
||||
}
|
||||
|
|
|
@ -701,8 +701,10 @@ ACTOR Future<Void> shardTracker(
|
|||
wait( delay(0, TaskPriority::DataDistribution) );
|
||||
}
|
||||
} catch (Error& e) {
|
||||
if (e.code() != error_code_actor_cancelled)
|
||||
self->output.sendError(e); // Propagate failure to dataDistributionTracker
|
||||
// If e is broken_promise then self may have already been deleted
|
||||
if (e.code() != error_code_actor_cancelled && e.code() != error_code_broken_promise) {
|
||||
self->output.sendError(e); // Propagate failure to dataDistributionTracker
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
@ -717,7 +719,7 @@ void restartShardTrackers(DataDistributionTracker* self, KeyRangeRef keys, Optio
|
|||
continue;
|
||||
}
|
||||
|
||||
Reference<AsyncVar<Optional<ShardMetrics>>> shardMetrics(new AsyncVar<Optional<ShardMetrics>>());
|
||||
auto shardMetrics = makeReference<AsyncVar<Optional<ShardMetrics>>>();
|
||||
|
||||
// For the case where the new tracker will take over at the boundaries of current shard(s)
|
||||
// we can use the old size if it is available. This will be the case when merging shards.
|
||||
|
|
|
@ -241,7 +241,7 @@ public:
|
|||
|
||||
void setFile(Reference<IAsyncFile> f) {
|
||||
this->f = f;
|
||||
this->syncQueue = Reference<SyncQueue>( new SyncQueue(1, f) );
|
||||
this->syncQueue = makeReference<SyncQueue>(1, f);
|
||||
}
|
||||
};
|
||||
File files[2]; // After readFirstAndLastPages(), files[0] is logically before files[1] (pushes are always into files[1])
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
#define IKEYVALUECONTAINER_H
|
||||
#pragma once
|
||||
|
||||
#include "IndexedSet.h"
|
||||
#include "flow/IndexedSet.h"
|
||||
|
||||
// Stored in the IndexedSets that hold the database.
|
||||
// Each KeyValueMapPair is 32 bytes, excluding arena memory.
|
|
@ -18,15 +18,15 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbserver/IKeyValueStore.h"
|
||||
#include "fdbserver/IDiskQueue.h"
|
||||
#include "flow/IKeyValueContainer.h"
|
||||
#include "flow/RadixTree.h"
|
||||
#include "flow/ActorCollection.h"
|
||||
#include "fdbclient/Notified.h"
|
||||
#include "fdbclient/SystemData.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
#include "fdbserver/DeltaTree.h"
|
||||
#include "fdbserver/IDiskQueue.h"
|
||||
#include "fdbserver/IKeyValueContainer.h"
|
||||
#include "fdbserver/IKeyValueStore.h"
|
||||
#include "fdbserver/RadixTree.h"
|
||||
#include "flow/ActorCollection.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
#define OP_DISK_OVERHEAD (sizeof(OpHeader) + 1)
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include "fdbserver/CoroFlow.h"
|
||||
#include "fdbserver/Knobs.h"
|
||||
#include "flow/Hash3.h"
|
||||
#include "flow/xxhash.h"
|
||||
|
||||
extern "C" {
|
||||
#include "fdbserver/sqlite/sqliteInt.h"
|
||||
|
@ -94,28 +95,51 @@ struct PageChecksumCodec {
|
|||
SumType *pSumInPage = (SumType *)(pData + dataLen);
|
||||
|
||||
if (write) {
|
||||
// Always write a CRC32 checksum for new pages
|
||||
pSumInPage->part1 = 0; // Indicates CRC32 is being used
|
||||
pSumInPage->part2 = crc32c_append(0xfdbeefdb, static_cast<uint8_t*>(data), dataLen);
|
||||
// Always write a xxHash3 checksum for new pages
|
||||
// First 8 bits are set to 0 so that with high probability,
|
||||
// checksums written with hashlittle2 don't require calculating
|
||||
// an xxHash3 checksum on read
|
||||
auto xxHash3 = XXH3_64bits(data, dataLen);
|
||||
pSumInPage->part1 = static_cast<uint32_t>((xxHash3 >> 32) & 0x00ffffff);
|
||||
pSumInPage->part2 = static_cast<uint32_t>(xxHash3 & 0xffffffff);
|
||||
return true;
|
||||
}
|
||||
|
||||
SumType sum;
|
||||
SumType crc32Sum;
|
||||
if (pSumInPage->part1 == 0) {
|
||||
// part1 being 0 indicates with high probability that a CRC32 checksum
|
||||
// part1 being 0 indicates with very high probability that a CRC32 checksum
|
||||
// was used, so check that first. If this checksum fails, there is still
|
||||
// some chance the page was written with hashlittle2, so fall back to checking
|
||||
// hashlittle2
|
||||
sum.part1 = 0;
|
||||
sum.part2 = crc32c_append(0xfdbeefdb, static_cast<uint8_t*>(data), dataLen);
|
||||
if (sum == *pSumInPage) return true;
|
||||
// some chance the page was written with another checksum algorithm
|
||||
crc32Sum.part1 = 0;
|
||||
crc32Sum.part2 = crc32c_append(0xfdbeefdb, static_cast<uint8_t*>(data), dataLen);
|
||||
if (crc32Sum == *pSumInPage) {
|
||||
TEST(true); // Read CRC32 checksum
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Try xxhash3
|
||||
SumType xxHash3Sum;
|
||||
if ((pSumInPage->part1 >> 24) == 0) {
|
||||
// The first 8 bits of part1 being 0 indicates with high probability that an
|
||||
// xxHash3 checksum was used, so check that next. If this checksum fails, there is
|
||||
// still some chance the page was written with hashlittle2, so fall back to checking
|
||||
// hashlittle2
|
||||
auto xxHash3 = XXH3_64bits(data, dataLen);
|
||||
xxHash3Sum.part1 = static_cast<uint32_t>((xxHash3 >> 32) & 0x00ffffff);
|
||||
xxHash3Sum.part2 = static_cast<uint32_t>(xxHash3 & 0xffffffff);
|
||||
if (xxHash3Sum == *pSumInPage) return true;
|
||||
}
|
||||
|
||||
// Try hashlittle2
|
||||
SumType hashLittle2Sum;
|
||||
hashLittle2Sum.part1 = pageNumber; // DO NOT CHANGE
|
||||
hashLittle2Sum.part2 = 0x5ca1ab1e;
|
||||
hashlittle2(pData, dataLen, &hashLittle2Sum.part1, &hashLittle2Sum.part2);
|
||||
if (hashLittle2Sum == *pSumInPage) return true;
|
||||
if (hashLittle2Sum == *pSumInPage) {
|
||||
TEST(true); // Read HashLittle2 checksum
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!silent) {
|
||||
TraceEvent trEvent(SevError, "SQLitePageChecksumFailure");
|
||||
|
@ -127,7 +151,12 @@ struct PageChecksumCodec {
|
|||
.detail("PageSize", pageLen)
|
||||
.detail("ChecksumInPage", pSumInPage->toString())
|
||||
.detail("ChecksumCalculatedHL2", hashLittle2Sum.toString());
|
||||
if (pSumInPage->part1 == 0) trEvent.detail("ChecksumCalculatedCRC", sum.toString());
|
||||
if (pSumInPage->part1 == 0) {
|
||||
trEvent.detail("ChecksumCalculatedCRC", crc32Sum.toString());
|
||||
}
|
||||
if (pSumInPage->part1 >> 24 == 0) {
|
||||
trEvent.detail("ChecksumCalculatedXXHash3", xxHash3Sum.toString());
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -1510,7 +1539,7 @@ private:
|
|||
Reference<ReadCursor> getCursor() {
|
||||
Reference<ReadCursor> cursor = *ppReadCursor;
|
||||
if (!cursor) {
|
||||
*ppReadCursor = cursor = Reference<ReadCursor>(new ReadCursor);
|
||||
*ppReadCursor = cursor = makeReference<ReadCursor>();
|
||||
cursor->init(conn);
|
||||
}
|
||||
return cursor;
|
||||
|
|
|
@ -243,6 +243,9 @@ void ServerKnobs::initialize(bool randomize, ClientKnobs* clientKnobs, bool isSi
|
|||
init( DD_SS_FAILURE_VERSIONLAG, 250000000 );
|
||||
init( DD_SS_ALLOWED_VERSIONLAG, 200000000 ); if( randomize && BUGGIFY ) { DD_SS_FAILURE_VERSIONLAG = deterministicRandom()->randomInt(15000000, 500000000); DD_SS_ALLOWED_VERSIONLAG = 0.75 * DD_SS_FAILURE_VERSIONLAG; }
|
||||
init( DD_SS_STUCK_TIME_LIMIT, 300.0 ); if( randomize && BUGGIFY ) { DD_SS_STUCK_TIME_LIMIT = 200.0 + deterministicRandom()->random01() * 100.0; }
|
||||
init( DD_TEAMS_INFO_PRINT_INTERVAL, 60 ); if( randomize && BUGGIFY ) DD_TEAMS_INFO_PRINT_INTERVAL = 10;
|
||||
init( DD_TEAMS_INFO_PRINT_YIELD_COUNT, 100 ); if( randomize && BUGGIFY ) DD_TEAMS_INFO_PRINT_YIELD_COUNT = deterministicRandom()->random01() * 1000 + 1;
|
||||
init( DD_TEAM_ZERO_SERVER_LEFT_LOG_DELAY, 120 ); if( randomize && BUGGIFY ) DD_TEAM_ZERO_SERVER_LEFT_LOG_DELAY = 5;
|
||||
|
||||
// TeamRemover
|
||||
init( TR_FLAG_DISABLE_MACHINE_TEAM_REMOVER, false ); if( randomize && BUGGIFY ) TR_FLAG_DISABLE_MACHINE_TEAM_REMOVER = deterministicRandom()->random01() < 0.1 ? true : false; // false by default. disable the consistency check when it's true
|
||||
|
|
|
@ -191,6 +191,9 @@ public:
|
|||
int64_t DD_SS_FAILURE_VERSIONLAG; // Allowed SS version lag from the current read version before marking it as failed.
|
||||
int64_t DD_SS_ALLOWED_VERSIONLAG; // SS will be marked as healthy if it's version lag goes below this value.
|
||||
double DD_SS_STUCK_TIME_LIMIT; // If a storage server is not getting new versions for this amount of time, then it becomes undesired.
|
||||
int DD_TEAMS_INFO_PRINT_INTERVAL;
|
||||
int DD_TEAMS_INFO_PRINT_YIELD_COUNT;
|
||||
int DD_TEAM_ZERO_SERVER_LEFT_LOG_DELAY;
|
||||
|
||||
// TeamRemover to remove redundant teams
|
||||
bool TR_FLAG_DISABLE_MACHINE_TEAM_REMOVER; // disable the machineTeamRemover actor
|
||||
|
|
|
@ -59,7 +59,7 @@ ACTOR template <class T> Future<Void> buggifyDelayedAsyncVar( Reference<AsyncVar
|
|||
|
||||
template <class T>
|
||||
Future<Void> buggifyDelayedAsyncVar( Reference<AsyncVar<T>> &var ) {
|
||||
Reference<AsyncVar<T>> in( new AsyncVar<T> );
|
||||
auto in = makeReference<AsyncVar<T>>();
|
||||
auto f = buggifyDelayedAsyncVar(in, var);
|
||||
var = in;
|
||||
return f;
|
||||
|
|
|
@ -60,7 +60,7 @@ Future<Void> tryBecomeLeader( ServerCoordinators const& coordinators,
|
|||
bool hasConnected,
|
||||
Reference<AsyncVar<ClusterControllerPriorityInfo>> const& asyncPriorityInfo)
|
||||
{
|
||||
Reference<AsyncVar<Value>> serializedInfo(new AsyncVar<Value>);
|
||||
auto serializedInfo = makeReference<AsyncVar<Value>>();
|
||||
Future<Void> m = tryBecomeLeaderInternal(coordinators, ObjectWriter::toValue(proposedInterface, IncludeVersion()),
|
||||
serializedInfo, hasConnected, asyncPriorityInfo);
|
||||
return m || asyncDeserialize(serializedInfo, outKnownLeader);
|
||||
|
|
|
@ -30,6 +30,8 @@
|
|||
#include "fdbserver/ApplyMetadataMutation.h"
|
||||
#include "fdbserver/RecoveryState.h"
|
||||
#include "fdbclient/Atomic.h"
|
||||
#include "flow/Arena.h"
|
||||
#include "flow/Histogram.h"
|
||||
#include "flow/TDMetric.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
|
@ -77,6 +79,7 @@ struct LogRouterData {
|
|||
|
||||
const UID dbgid;
|
||||
Reference<AsyncVar<Reference<ILogSystem>>> logSystem;
|
||||
Optional<UID> primaryPeekLocation;
|
||||
NotifiedVersion version;
|
||||
NotifiedVersion minPopped;
|
||||
const Version startVersion;
|
||||
|
@ -91,6 +94,8 @@ struct LogRouterData {
|
|||
double maxWaitForVersionTime = 0;
|
||||
double getMoreTime = 0;
|
||||
double maxGetMoreTime = 0;
|
||||
int64_t generation = -1;
|
||||
Reference<Histogram> peekLatencyDist;
|
||||
|
||||
struct PeekTrackerData {
|
||||
std::map<int, Promise<std::pair<Version, bool>>> sequence_version;
|
||||
|
@ -116,14 +121,19 @@ struct LogRouterData {
|
|||
|
||||
//only callable after getTagData returns a null reference
|
||||
Reference<TagData> createTagData(Tag tag, Version popped, Version knownCommittedVersion) {
|
||||
Reference<TagData> newTagData(new TagData(tag, popped, knownCommittedVersion));
|
||||
auto newTagData = makeReference<TagData>(tag, popped, knownCommittedVersion);
|
||||
tag_data[tag.id] = newTagData;
|
||||
return newTagData;
|
||||
}
|
||||
|
||||
LogRouterData(UID dbgid, const InitializeLogRouterRequest& req) : dbgid(dbgid), routerTag(req.routerTag), logSystem(new AsyncVar<Reference<ILogSystem>>()),
|
||||
version(req.startVersion-1), minPopped(0), startVersion(req.startVersion), allowPops(false), minKnownCommittedVersion(0), poppedVersion(0), foundEpochEnd(false),
|
||||
cc("LogRouter", dbgid.toString()), getMoreCount("GetMoreCount", cc), getMoreBlockedCount("GetMoreBlockedCount", cc) {
|
||||
LogRouterData(UID dbgid, const InitializeLogRouterRequest& req)
|
||||
: dbgid(dbgid), routerTag(req.routerTag), logSystem(new AsyncVar<Reference<ILogSystem>>()),
|
||||
version(req.startVersion - 1), minPopped(0), generation(req.recoveryCount), startVersion(req.startVersion),
|
||||
allowPops(false), minKnownCommittedVersion(0), poppedVersion(0), foundEpochEnd(false),
|
||||
cc("LogRouter", dbgid.toString()), getMoreCount("GetMoreCount", cc),
|
||||
getMoreBlockedCount("GetMoreBlockedCount", cc),
|
||||
peekLatencyDist(Histogram::getHistogram(LiteralStringRef("LogRouter"), LiteralStringRef("PeekTLogLatency"),
|
||||
Histogram::Unit::microseconds)) {
|
||||
//setup just enough of a logSet to be able to call getPushLocations
|
||||
logSet.logServers.resize(req.tLogLocalities.size());
|
||||
logSet.tLogPolicy = req.tLogPolicy;
|
||||
|
@ -138,7 +148,7 @@ struct LogRouterData {
|
|||
}
|
||||
}
|
||||
|
||||
eventCacheHolder = Reference<EventCacheHolder>( new EventCacheHolder(dbgid.shortString() + ".PeekLocation") );
|
||||
eventCacheHolder = makeReference<EventCacheHolder>(dbgid.shortString() + ".PeekLocation");
|
||||
|
||||
specialCounter(cc, "Version", [this](){ return this->version.get(); });
|
||||
specialCounter(cc, "MinPopped", [this](){ return this->minPopped.get(); });
|
||||
|
@ -150,7 +160,12 @@ struct LogRouterData {
|
|||
specialCounter(cc, "WaitForVersionMaxMS", [this](){ double val = this->maxWaitForVersionTime; this->maxWaitForVersionTime = 0; return 1000*val; });
|
||||
specialCounter(cc, "GetMoreMS", [this](){ double val = this->getMoreTime; this->getMoreTime = 0; return 1000*val; });
|
||||
specialCounter(cc, "GetMoreMaxMS", [this](){ double val = this->maxGetMoreTime; this->maxGetMoreTime = 0; return 1000*val; });
|
||||
logger = traceCounters("LogRouterMetrics", dbgid, SERVER_KNOBS->WORKER_LOGGING_INTERVAL, &cc, "LogRouterMetrics");
|
||||
specialCounter(cc, "Generation", [this]() { return this->generation; });
|
||||
logger = traceCounters("LogRouterMetrics", dbgid, SERVER_KNOBS->WORKER_LOGGING_INTERVAL, &cc,
|
||||
"LogRouterMetrics", [this](TraceEvent& te) {
|
||||
te.detail("PrimaryPeekLocation", this->primaryPeekLocation);
|
||||
te.detail("RouterTag", this->routerTag.toString());
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -257,13 +272,16 @@ ACTOR Future<Void> pullAsyncData( LogRouterData *self ) {
|
|||
state double startTime = now();
|
||||
choose {
|
||||
when(wait( getMoreF ) ) {
|
||||
self->getMoreTime += now() - startTime;
|
||||
self->maxGetMoreTime = std::max(self->maxGetMoreTime, now() - startTime);
|
||||
double peekTime = now() - startTime;
|
||||
self->peekLatencyDist->sampleSeconds(peekTime);
|
||||
self->getMoreTime += peekTime;
|
||||
self->maxGetMoreTime = std::max(self->maxGetMoreTime, peekTime);
|
||||
break;
|
||||
}
|
||||
when( wait( dbInfoChange ) ) { //FIXME: does this actually happen?
|
||||
if( self->logSystem->get() ) {
|
||||
r = self->logSystem->get()->peekLogRouter( self->dbgid, tagAt, self->routerTag );
|
||||
self->primaryPeekLocation = r->getPrimaryPeekLocation();
|
||||
TraceEvent("LogRouterPeekLocation", self->dbgid).detail("LogID", r->getPrimaryPeekLocation()).trackLatest(self->eventCacheHolder->trackingKey);
|
||||
} else {
|
||||
r = Reference<ILogSystem::IPeekCursor>();
|
||||
|
|
|
@ -877,7 +877,6 @@ struct LogPushData : NonCopyable {
|
|||
void addTransactionInfo(SpanID const& context) {
|
||||
TEST(!spanContext.isValid()); // addTransactionInfo with invalid SpanID
|
||||
spanContext = context;
|
||||
transactionSubseq = 0;
|
||||
writtenLocations.clear();
|
||||
}
|
||||
|
||||
|
@ -919,21 +918,30 @@ struct LogPushData : NonCopyable {
|
|||
|
||||
BinaryWriter bw(AssumeVersion(g_network->protocolVersion()));
|
||||
|
||||
// Metadata messages should be written before span information. If this
|
||||
// isn't a metadata message, make sure all locations have had
|
||||
// transaction info written to them. Mutations may have different sets
|
||||
// of tags, so it is necessary to check all tag locations each time a
|
||||
// mutation is written.
|
||||
// Metadata messages (currently LogProtocolMessage is the only metadata
|
||||
// message) should be written before span information. If this isn't a
|
||||
// metadata message, make sure all locations have had transaction info
|
||||
// written to them. Mutations may have different sets of tags, so it
|
||||
// is necessary to check all tag locations each time a mutation is
|
||||
// written.
|
||||
if (!metadataMessage) {
|
||||
// If span information hasn't been written for this transaction yet,
|
||||
// generate a subsequence value for the message.
|
||||
if (!transactionSubseq) {
|
||||
transactionSubseq = this->subsequence++;
|
||||
}
|
||||
|
||||
uint32_t subseq = this->subsequence++;
|
||||
bool updatedLocation = false;
|
||||
for (int loc : msg_locations) {
|
||||
writeTransactionInfo(loc);
|
||||
updatedLocation = writeTransactionInfo(loc, subseq) || updatedLocation;
|
||||
}
|
||||
// If this message doesn't write to any new locations, the
|
||||
// subsequence wasn't actually used and can be decremented.
|
||||
if (!updatedLocation) {
|
||||
this->subsequence--;
|
||||
TEST(true); // No new SpanContextMessage written to transaction logs
|
||||
ASSERT(this->subsequence > 0);
|
||||
}
|
||||
} else {
|
||||
// When writing a metadata message, make sure transaction state has
|
||||
// been reset. If you are running into this assertion, make sure
|
||||
// you are calling addTransactionInfo before each transaction.
|
||||
ASSERT(writtenLocations.size() == 0);
|
||||
}
|
||||
|
||||
uint32_t subseq = this->subsequence++;
|
||||
|
@ -975,33 +983,31 @@ private:
|
|||
// field.
|
||||
std::unordered_set<int> writtenLocations;
|
||||
uint32_t subsequence;
|
||||
// Store transaction subsequence separately, as multiple mutations may need
|
||||
// to write transaction info. This can happen if later mutations in a
|
||||
// transaction need to write to a different location than earlier
|
||||
// mutations.
|
||||
uint32_t transactionSubseq;
|
||||
SpanID spanContext;
|
||||
|
||||
// Writes transaction info to the message stream for the given location if
|
||||
// it has not already been written (for the current transaction).
|
||||
void writeTransactionInfo(int location) {
|
||||
if (!FLOW_KNOBS->WRITE_TRACING_ENABLED || logSystem->getTLogVersion() < TLogVersion::V6) {
|
||||
return;
|
||||
// Writes transaction info to the message stream at the given location if
|
||||
// it has not already been written (for the current transaction). Returns
|
||||
// true on a successful write, and false if the location has already been
|
||||
// written.
|
||||
bool writeTransactionInfo(int location, uint32_t subseq) {
|
||||
if (!FLOW_KNOBS->WRITE_TRACING_ENABLED || logSystem->getTLogVersion() < TLogVersion::V6 || writtenLocations.count(location) != 0) {
|
||||
return false;
|
||||
}
|
||||
if (writtenLocations.count(location) == 0) {
|
||||
writtenLocations.insert(location);
|
||||
|
||||
BinaryWriter& wr = messagesWriter[location];
|
||||
SpanContextMessage contextMessage(spanContext);
|
||||
TEST(true); // Wrote SpanContextMessage to a transaction log
|
||||
writtenLocations.insert(location);
|
||||
|
||||
int offset = wr.getLength();
|
||||
wr << uint32_t(0) << transactionSubseq << uint16_t(prev_tags.size());
|
||||
for(auto& tag : prev_tags)
|
||||
wr << tag;
|
||||
wr << contextMessage;
|
||||
int length = wr.getLength() - offset;
|
||||
*(uint32_t*)((uint8_t*)wr.getData() + offset) = length - sizeof(uint32_t);
|
||||
}
|
||||
BinaryWriter& wr = messagesWriter[location];
|
||||
SpanContextMessage contextMessage(spanContext);
|
||||
|
||||
int offset = wr.getLength();
|
||||
wr << uint32_t(0) << subseq << uint16_t(prev_tags.size());
|
||||
for(auto& tag : prev_tags)
|
||||
wr << tag;
|
||||
wr << contextMessage;
|
||||
int length = wr.getLength() - offset;
|
||||
*(uint32_t*)((uint8_t*)wr.getData() + offset) = length - sizeof(uint32_t);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -48,7 +48,8 @@ ILogSystem::ServerPeekCursor::ServerPeekCursor( TLogPeekReply const& results, Lo
|
|||
}
|
||||
|
||||
Reference<ILogSystem::IPeekCursor> ILogSystem::ServerPeekCursor::cloneNoMore() {
|
||||
return Reference<ILogSystem::ServerPeekCursor>( new ILogSystem::ServerPeekCursor( results, messageVersion, end, messageAndTags, hasMsg, poppedVersion, tag ) );
|
||||
return makeReference<ILogSystem::ServerPeekCursor>(results, messageVersion, end, messageAndTags, hasMsg,
|
||||
poppedVersion, tag);
|
||||
}
|
||||
|
||||
void ILogSystem::ServerPeekCursor::setProtocolVersion( ProtocolVersion version ) {
|
||||
|
@ -141,8 +142,20 @@ ACTOR Future<Void> resetChecker( ILogSystem::ServerPeekCursor* self, NetworkAddr
|
|||
self->unknownReplies = 0;
|
||||
self->fastReplies = 0;
|
||||
wait(delay(SERVER_KNOBS->PEEK_STATS_INTERVAL));
|
||||
TraceEvent("SlowPeekStats").detail("PeerAddress", addr).detail("SlowReplies", self->slowReplies).detail("FastReplies", self->fastReplies).detail("UnknownReplies", self->unknownReplies);
|
||||
if(self->slowReplies >= SERVER_KNOBS->PEEK_STATS_SLOW_AMOUNT && self->slowReplies/double(self->slowReplies+self->fastReplies) >= SERVER_KNOBS->PEEK_STATS_SLOW_RATIO) {
|
||||
TraceEvent("SlowPeekStats", self->randomID)
|
||||
.detail("PeerAddress", addr)
|
||||
.detail("SlowReplies", self->slowReplies)
|
||||
.detail("FastReplies", self->fastReplies)
|
||||
.detail("UnknownReplies", self->unknownReplies);
|
||||
|
||||
if (self->slowReplies >= SERVER_KNOBS->PEEK_STATS_SLOW_AMOUNT &&
|
||||
self->slowReplies / double(self->slowReplies + self->fastReplies) >= SERVER_KNOBS->PEEK_STATS_SLOW_RATIO) {
|
||||
|
||||
TraceEvent("ConnectionResetSlowPeek", self->randomID)
|
||||
.detail("PeerAddress", addr)
|
||||
.detail("SlowReplies", self->slowReplies)
|
||||
.detail("FastReplies", self->fastReplies)
|
||||
.detail("UnknownReplies", self->unknownReplies);
|
||||
FlowTransport::transport().resetConnection(addr);
|
||||
self->lastReset = now();
|
||||
}
|
||||
|
@ -351,7 +364,7 @@ ILogSystem::MergedPeekCursor::MergedPeekCursor( std::vector<Reference<AsyncVar<O
|
|||
bool parallelGetMore, std::vector< LocalityData > const& tLogLocalities, Reference<IReplicationPolicy> const tLogPolicy, int tLogReplicationFactor )
|
||||
: bestServer(bestServer), readQuorum(readQuorum), tag(tag), currentCursor(0), hasNextMessage(false), messageVersion(begin), randomID(deterministicRandom()->randomUniqueID()), tLogReplicationFactor(tLogReplicationFactor) {
|
||||
if(tLogPolicy) {
|
||||
logSet = Reference<LogSet>( new LogSet() );
|
||||
logSet = makeReference<LogSet>();
|
||||
logSet->tLogPolicy = tLogPolicy;
|
||||
logSet->tLogLocalities = tLogLocalities;
|
||||
filterLocalityDataForPolicy(logSet->tLogPolicy, &logSet->tLogLocalities);
|
||||
|
@ -359,7 +372,8 @@ ILogSystem::MergedPeekCursor::MergedPeekCursor( std::vector<Reference<AsyncVar<O
|
|||
}
|
||||
|
||||
for( int i = 0; i < logServers.size(); i++ ) {
|
||||
Reference<ILogSystem::ServerPeekCursor> cursor( new ILogSystem::ServerPeekCursor( logServers[i], tag, begin, end, bestServer >= 0, parallelGetMore ) );
|
||||
auto cursor = makeReference<ILogSystem::ServerPeekCursor>(logServers[i], tag, begin, end, bestServer >= 0,
|
||||
parallelGetMore);
|
||||
//TraceEvent("MPC_Starting", randomID).detail("Cursor", cursor->randomID).detail("End", end);
|
||||
serverCursors.push_back( cursor );
|
||||
}
|
||||
|
@ -378,7 +392,8 @@ Reference<ILogSystem::IPeekCursor> ILogSystem::MergedPeekCursor::cloneNoMore() {
|
|||
for( auto it : serverCursors ) {
|
||||
cursors.push_back(it->cloneNoMore());
|
||||
}
|
||||
return Reference<ILogSystem::MergedPeekCursor>( new ILogSystem::MergedPeekCursor( cursors, messageVersion, bestServer, readQuorum, nextVersion, logSet, tLogReplicationFactor ) );
|
||||
return makeReference<ILogSystem::MergedPeekCursor>(cursors, messageVersion, bestServer, readQuorum, nextVersion,
|
||||
logSet, tLogReplicationFactor);
|
||||
}
|
||||
|
||||
void ILogSystem::MergedPeekCursor::setProtocolVersion( ProtocolVersion version ) {
|
||||
|
@ -589,7 +604,8 @@ ILogSystem::SetPeekCursor::SetPeekCursor( std::vector<Reference<LogSet>> const&
|
|||
int maxServers = 0;
|
||||
for( int i = 0; i < logSets.size(); i++ ) {
|
||||
for( int j = 0; j < logSets[i]->logServers.size(); j++) {
|
||||
Reference<ILogSystem::ServerPeekCursor> cursor( new ILogSystem::ServerPeekCursor( logSets[i]->logServers[j], tag, begin, end, true, parallelGetMore ) );
|
||||
auto cursor = makeReference<ILogSystem::ServerPeekCursor>(logSets[i]->logServers[j], tag, begin, end, true,
|
||||
parallelGetMore);
|
||||
serverCursors[i].push_back( cursor );
|
||||
}
|
||||
maxServers = std::max<int>(maxServers, serverCursors[i].size());
|
||||
|
@ -616,7 +632,8 @@ Reference<ILogSystem::IPeekCursor> ILogSystem::SetPeekCursor::cloneNoMore() {
|
|||
cursors[i].push_back( serverCursors[i][j]->cloneNoMore() );
|
||||
}
|
||||
}
|
||||
return Reference<ILogSystem::SetPeekCursor>( new ILogSystem::SetPeekCursor( logSets, cursors, messageVersion, bestSet, bestServer, nextVersion, useBestSet ) );
|
||||
return makeReference<ILogSystem::SetPeekCursor>(logSets, cursors, messageVersion, bestSet, bestServer, nextVersion,
|
||||
useBestSet);
|
||||
}
|
||||
|
||||
void ILogSystem::SetPeekCursor::setProtocolVersion( ProtocolVersion version ) {
|
||||
|
@ -723,7 +740,7 @@ void ILogSystem::SetPeekCursor::updateMessage(int logIdx, bool usePolicy) {
|
|||
c->advanceTo(messageVersion);
|
||||
if( start <= messageVersion && messageVersion < c->version() ) {
|
||||
advancedPast = true;
|
||||
TEST(true); //Merge peek cursor advanced past desired sequence
|
||||
TEST(true); //Merge peek cursor with logIdx advanced past desired sequence
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -999,7 +1016,8 @@ ILogSystem::BufferedCursor::BufferedCursor( std::vector<Reference<AsyncVar<Optio
|
|||
messages.reserve(SERVER_KNOBS->DESIRED_OUTSTANDING_MESSAGES);
|
||||
cursorMessages.resize(logServers.size());
|
||||
for( int i = 0; i < logServers.size(); i++ ) {
|
||||
Reference<ILogSystem::ServerPeekCursor> cursor( new ILogSystem::ServerPeekCursor( logServers[i], tag, begin, end, false, parallelGetMore ) );
|
||||
auto cursor =
|
||||
makeReference<ILogSystem::ServerPeekCursor>(logServers[i], tag, begin, end, false, parallelGetMore);
|
||||
cursors.push_back( cursor );
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -24,6 +24,7 @@
|
|||
#include "fdbclient/DatabaseContext.h"
|
||||
#include "fdbclient/ReadYourWrites.h"
|
||||
#include "fdbclient/KeyBackedTypes.h"
|
||||
#include "fdbserver/MetricLogger.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
struct MetricsRule {
|
|
@ -461,8 +461,8 @@ namespace oldTLog_4_6 {
|
|||
state Version stopVersion = logData->version.get();
|
||||
|
||||
TEST(true); // TLog stopped by recovering master
|
||||
TEST( logData->stopped );
|
||||
TEST( !logData->stopped );
|
||||
TEST( logData->stopped ); // LogData already stopped
|
||||
TEST( !logData->stopped ); // LogData not yet stopped
|
||||
|
||||
TraceEvent("TLogStop", logData->logId).detail("Ver", stopVersion).detail("IsStopped", logData->stopped).detail("QueueCommitted", logData->queueCommittedVersion.get());
|
||||
|
||||
|
@ -1005,7 +1005,7 @@ namespace oldTLog_4_6 {
|
|||
auto& sequenceData = trackerData.sequence_version[sequence+1];
|
||||
if(sequenceData.isSet()) {
|
||||
if(sequenceData.getFuture().get() != reply.end) {
|
||||
TEST(true); //tlog peek second attempt ended at a different version
|
||||
TEST(true); //tlog peek second attempt ended at a different version (2)
|
||||
req.reply.sendError(operation_obsolete());
|
||||
return Void();
|
||||
}
|
||||
|
|
|
@ -589,8 +589,8 @@ ACTOR Future<Void> tLogLock( TLogData* self, ReplyPromise< TLogLockResult > repl
|
|||
state Version stopVersion = logData->version.get();
|
||||
|
||||
TEST(true); // TLog stopped by recovering master
|
||||
TEST( logData->stopped );
|
||||
TEST( !logData->stopped );
|
||||
TEST( logData->stopped ); // logData already stopped
|
||||
TEST( !logData->stopped ); // logData not yet stopped
|
||||
|
||||
TraceEvent("TLogStop", logData->logId).detail("Ver", stopVersion).detail("IsStopped", logData->stopped).detail("QueueCommitted", logData->queueCommittedVersion.get());
|
||||
|
||||
|
@ -1295,7 +1295,7 @@ ACTOR Future<Void> tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere
|
|||
if(sequenceData.isSet()) {
|
||||
trackerData.duplicatePeeks++;
|
||||
if(sequenceData.getFuture().get().first != reply.end) {
|
||||
TEST(true); //tlog peek second attempt ended at a different version
|
||||
TEST(true); //tlog peek second attempt ended at a different version (2)
|
||||
req.reply.sendError(operation_obsolete());
|
||||
return Void();
|
||||
}
|
||||
|
|
|
@ -680,8 +680,8 @@ ACTOR Future<Void> tLogLock( TLogData* self, ReplyPromise< TLogLockResult > repl
|
|||
state Version stopVersion = logData->version.get();
|
||||
|
||||
TEST(true); // TLog stopped by recovering master
|
||||
TEST( logData->stopped );
|
||||
TEST( !logData->stopped );
|
||||
TEST( logData->stopped ); // logData already stopped
|
||||
TEST( !logData->stopped ); // logData not yet stopped
|
||||
|
||||
TraceEvent("TLogStop", logData->logId).detail("Ver", stopVersion).detail("IsStopped", logData->stopped).detail("QueueCommitted", logData->queueCommittedVersion.get());
|
||||
|
||||
|
@ -1689,7 +1689,7 @@ ACTOR Future<Void> tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere
|
|||
if(sequenceData.isSet()) {
|
||||
trackerData.duplicatePeeks++;
|
||||
if(sequenceData.getFuture().get().first != reply.end) {
|
||||
TEST(true); //tlog peek second attempt ended at a different version
|
||||
TEST(true); //tlog peek second attempt ended at a different version (2)
|
||||
req.reply.sendError(operation_obsolete());
|
||||
return Void();
|
||||
}
|
||||
|
|
|
@ -31,7 +31,8 @@
|
|||
#include <map>
|
||||
#include <stdexcept>
|
||||
|
||||
#include "Arena.h"
|
||||
#include "fdbserver/IKeyValueContainer.h"
|
||||
#include "flow/Arena.h"
|
||||
|
||||
// forward declaration
|
||||
const int LEAF_BYTE = -1;
|
|
@ -1118,7 +1118,9 @@ void updateRate(RatekeeperData* self, RatekeeperLimits* limits) {
|
|||
}
|
||||
|
||||
self->healthMetrics.worstStorageQueue = worstStorageQueueStorageServer;
|
||||
self->healthMetrics.limitingStorageQueue = limitingStorageQueueStorageServer;
|
||||
self->healthMetrics.worstStorageDurabilityLag = worstDurabilityLag;
|
||||
self->healthMetrics.limitingStorageDurabilityLag = limitingDurabilityLag;
|
||||
|
||||
double writeToReadLatencyLimit = 0;
|
||||
Version worstVersionLag = 0;
|
||||
|
|
|
@ -43,8 +43,7 @@ ACTOR static Future<Void> handleApplyToDBRequest(RestoreVersionBatchRequest req,
|
|||
void handleUpdateRateRequest(RestoreUpdateRateRequest req, Reference<RestoreApplierData> self);
|
||||
|
||||
ACTOR Future<Void> restoreApplierCore(RestoreApplierInterface applierInterf, int nodeIndex, Database cx) {
|
||||
state Reference<RestoreApplierData> self =
|
||||
Reference<RestoreApplierData>(new RestoreApplierData(applierInterf.id(), nodeIndex));
|
||||
state Reference<RestoreApplierData> self = makeReference<RestoreApplierData>(applierInterf.id(), nodeIndex);
|
||||
state ActorCollection actors(false);
|
||||
state Future<Void> exitRole = Never();
|
||||
|
||||
|
@ -774,4 +773,4 @@ Value applyAtomicOp(Optional<StringRef> existingValue, Value value, MutationRef:
|
|||
ASSERT(false);
|
||||
}
|
||||
return Value();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -108,7 +108,7 @@ ACTOR Future<Void> startRestoreController(Reference<RestoreWorkerData> controlle
|
|||
ASSERT(controllerWorker.isValid());
|
||||
ASSERT(controllerWorker->controllerInterf.present());
|
||||
state Reference<RestoreControllerData> self =
|
||||
Reference<RestoreControllerData>(new RestoreControllerData(controllerWorker->controllerInterf.get().id()));
|
||||
makeReference<RestoreControllerData>(controllerWorker->controllerInterf.get().id());
|
||||
state Future<Void> error = actorCollection(self->addActor.getFuture());
|
||||
|
||||
try {
|
||||
|
@ -373,8 +373,8 @@ ACTOR static Future<Version> processRestoreRequest(Reference<RestoreControllerDa
|
|||
.detail("BatchSize", versionBatch->size)
|
||||
.detail("RunningVersionBatches", self->runningVersionBatches.get())
|
||||
.detail("VersionBatches", versionBatches.size());
|
||||
self->batch[batchIndex] = Reference<ControllerBatchData>(new ControllerBatchData());
|
||||
self->batchStatus[batchIndex] = Reference<ControllerBatchStatus>(new ControllerBatchStatus());
|
||||
self->batch[batchIndex] = makeReference<ControllerBatchData>();
|
||||
self->batchStatus[batchIndex] = makeReference<ControllerBatchStatus>();
|
||||
fBatches.push_back(distributeWorkloadPerVersionBatch(self, batchIndex, cx, request, *versionBatch));
|
||||
// Wait a bit to give the current version batch a head start from the next version batch
|
||||
wait(delay(SERVER_KNOBS->FASTRESTORE_VB_LAUNCH_DELAY));
|
||||
|
@ -1164,4 +1164,4 @@ ACTOR static Future<Void> checkRolesLiveness(Reference<RestoreControllerData> se
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -218,8 +218,7 @@ ACTOR Future<Void> dispatchRequests(Reference<RestoreLoaderData> self) {
|
|||
|
||||
ACTOR Future<Void> restoreLoaderCore(RestoreLoaderInterface loaderInterf, int nodeIndex, Database cx,
|
||||
RestoreControllerInterface ci) {
|
||||
state Reference<RestoreLoaderData> self =
|
||||
Reference<RestoreLoaderData>(new RestoreLoaderData(loaderInterf.id(), nodeIndex, ci));
|
||||
state Reference<RestoreLoaderData> self = makeReference<RestoreLoaderData>(loaderInterf.id(), nodeIndex, ci);
|
||||
state Future<Void> error = actorCollection(self->addActor.getFuture());
|
||||
state ActorCollection actors(false); // actors whose errors can be ignored
|
||||
state Future<Void> exitRole = Never();
|
||||
|
|
|
@ -188,7 +188,7 @@ struct RestoreLoaderData : RestoreRoleData, public ReferenceCounted<RestoreLoade
|
|||
nodeID = loaderInterfID;
|
||||
nodeIndex = assignedIndex;
|
||||
role = RestoreRole::Loader;
|
||||
hasPendingRequests = Reference<AsyncVar<bool>>(new AsyncVar<bool>(false));
|
||||
hasPendingRequests = makeReference<AsyncVar<bool>>(false);
|
||||
}
|
||||
|
||||
~RestoreLoaderData() = default;
|
||||
|
@ -216,8 +216,8 @@ struct RestoreLoaderData : RestoreRoleData, public ReferenceCounted<RestoreLoade
|
|||
|
||||
void initVersionBatch(int batchIndex) {
|
||||
TraceEvent("FastRestoreLoaderInitVersionBatch", nodeID).detail("BatchIndex", batchIndex);
|
||||
batch[batchIndex] = Reference<LoaderBatchData>(new LoaderBatchData(nodeID, batchIndex));
|
||||
status[batchIndex] = Reference<LoaderBatchStatus>(new LoaderBatchStatus());
|
||||
batch[batchIndex] = makeReference<LoaderBatchData>(nodeID, batchIndex);
|
||||
status[batchIndex] = makeReference<LoaderBatchStatus>();
|
||||
}
|
||||
|
||||
void resetPerRestoreRequest() {
|
||||
|
|
|
@ -340,10 +340,9 @@ ACTOR Future<Void> monitorleader(Reference<AsyncVar<RestoreWorkerInterface>> lea
|
|||
ACTOR Future<Void> _restoreWorker(Database cx, LocalityData locality) {
|
||||
state ActorCollection actors(false);
|
||||
state Future<Void> myWork = Never();
|
||||
state Reference<AsyncVar<RestoreWorkerInterface>> leader =
|
||||
Reference<AsyncVar<RestoreWorkerInterface>>(new AsyncVar<RestoreWorkerInterface>());
|
||||
state Reference<AsyncVar<RestoreWorkerInterface>> leader = makeReference<AsyncVar<RestoreWorkerInterface>>();
|
||||
state RestoreWorkerInterface myWorkerInterf;
|
||||
state Reference<RestoreWorkerData> self = Reference<RestoreWorkerData>(new RestoreWorkerData());
|
||||
state Reference<RestoreWorkerData> self = makeReference<RestoreWorkerData>();
|
||||
|
||||
myWorkerInterf.initEndpoints();
|
||||
self->workerID = myWorkerInterf.id();
|
||||
|
|
|
@ -96,7 +96,7 @@ ACTOR Future<Void> runDr( Reference<ClusterConnectionFile> connFile ) {
|
|||
if (g_simulator.drAgents == ISimulator::BackupToDB) {
|
||||
Database cx = Database::createDatabase(connFile, -1);
|
||||
|
||||
Reference<ClusterConnectionFile> extraFile(new ClusterConnectionFile(*g_simulator.extraDB));
|
||||
auto extraFile = makeReference<ClusterConnectionFile>(*g_simulator.extraDB);
|
||||
state Database extraDB = Database::createDatabase(extraFile, -1);
|
||||
|
||||
TraceEvent("StartingDrAgents").detail("ConnFile", connFile->getConnectionString().toString()).detail("ExtraString", extraFile->getConnectionString().toString());
|
||||
|
@ -271,10 +271,11 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(Reference<ClusterConnec
|
|||
|
||||
if(!useSeedFile) {
|
||||
writeFile(joinPath(*dataFolder, "fdb.cluster"), connStr.toString());
|
||||
connFile = Reference<ClusterConnectionFile>( new ClusterConnectionFile( joinPath( *dataFolder, "fdb.cluster" )));
|
||||
connFile = makeReference<ClusterConnectionFile>(joinPath(*dataFolder, "fdb.cluster"));
|
||||
}
|
||||
else {
|
||||
connFile = Reference<ClusterConnectionFile>( new ClusterConnectionFile( joinPath( *dataFolder, "fdb.cluster" ), connStr.toString() ) );
|
||||
connFile =
|
||||
makeReference<ClusterConnectionFile>(joinPath(*dataFolder, "fdb.cluster"), connStr.toString());
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
@ -652,8 +653,9 @@ ACTOR Future<Void> restartSimulatedSystem(vector<Future<Void>>* systemActors, st
|
|||
// SOMEDAY: parse backup agent from test file
|
||||
systemActors->push_back(reportErrors(
|
||||
simulatedMachine(conn, ipAddrs, usingSSL, localities, processClass, baseFolder, true,
|
||||
i == useSeedForMachine, enableExtraDB ? AgentAddition : AgentNone,
|
||||
usingSSL && (listenersPerProcess == 1 || processClass == ProcessClass::TesterClass), whitelistBinPaths, protocolVersion),
|
||||
i == useSeedForMachine, AgentAddition,
|
||||
usingSSL && (listenersPerProcess == 1 || processClass == ProcessClass::TesterClass),
|
||||
whitelistBinPaths, protocolVersion),
|
||||
processClass == ProcessClass::TesterClass ? "SimulatedTesterMachine" : "SimulatedMachine"));
|
||||
}
|
||||
|
||||
|
@ -764,7 +766,7 @@ void SimulationConfig::generateNormalConfig(int minimumReplication, int minimumR
|
|||
break;
|
||||
}
|
||||
case 3: {
|
||||
TEST(true); // Simulated cluster using radix-tree storage engine
|
||||
TEST(true); // Simulated cluster using redwood storage engine
|
||||
set_config("ssd-redwood-experimental");
|
||||
break;
|
||||
}
|
||||
|
@ -865,7 +867,7 @@ void SimulationConfig::generateNormalConfig(int minimumReplication, int minimumR
|
|||
int satellite_replication_type = deterministicRandom()->randomInt(0,3);
|
||||
switch (satellite_replication_type) {
|
||||
case 0: {
|
||||
TEST( true ); // Simulated cluster using no satellite redundancy mode
|
||||
TEST( true ); // Simulated cluster using no satellite redundancy mode (>4 datacenters)
|
||||
break;
|
||||
}
|
||||
case 1: {
|
||||
|
@ -892,7 +894,7 @@ void SimulationConfig::generateNormalConfig(int minimumReplication, int minimumR
|
|||
break;
|
||||
}
|
||||
case 1: {
|
||||
TEST( true ); // Simulated cluster using no satellite redundancy mode
|
||||
TEST( true ); // Simulated cluster using no satellite redundancy mode (<4 datacenters)
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
|
@ -1146,8 +1148,8 @@ void setupSimulatedSystem(vector<Future<Void>>* systemActors, std::string baseFo
|
|||
|
||||
// Use IPv6 25% of the time
|
||||
bool useIPv6 = deterministicRandom()->random01() < 0.25;
|
||||
TEST( useIPv6 );
|
||||
TEST( !useIPv6 );
|
||||
TEST( useIPv6 ); // Use IPv6
|
||||
TEST( !useIPv6 ); // Use IPv4
|
||||
|
||||
vector<NetworkAddress> coordinatorAddresses;
|
||||
if(minimumRegions > 1) {
|
||||
|
@ -1452,10 +1454,9 @@ ACTOR void setupAndRun(std::string dataFolder, const char *testFile, bool reboot
|
|||
std::string clusterFileDir = joinPath( dataFolder, deterministicRandom()->randomUniqueID().toString() );
|
||||
platform::createDirectory( clusterFileDir );
|
||||
writeFile(joinPath(clusterFileDir, "fdb.cluster"), connFile.get().toString());
|
||||
wait(timeoutError(runTests(Reference<ClusterConnectionFile>(
|
||||
new ClusterConnectionFile(joinPath(clusterFileDir, "fdb.cluster"))),
|
||||
TEST_TYPE_FROM_FILE, TEST_ON_TESTERS, testerCount, testFile, startingConfiguration),
|
||||
isBuggifyEnabled(BuggifyType::General) ? 36000.0 : 5400.0));
|
||||
wait(timeoutError(runTests(makeReference<ClusterConnectionFile>(joinPath(clusterFileDir, "fdb.cluster")),
|
||||
TEST_TYPE_FROM_FILE, TEST_ON_TESTERS, testerCount, testFile, startingConfiguration),
|
||||
isBuggifyEnabled(BuggifyType::General) ? 36000.0 : 5400.0));
|
||||
} catch (Error& e) {
|
||||
TraceEvent(SevError, "SetupAndRunError").error(e);
|
||||
}
|
||||
|
|
|
@ -487,6 +487,7 @@ struct RolesInfo {
|
|||
|
||||
obj["data_lag"] = getLagObject(versionLag);
|
||||
obj["durability_lag"] = getLagObject(version - durableVersion);
|
||||
dataLagSeconds = versionLag / (double)SERVER_KNOBS->VERSIONS_PER_SECOND;
|
||||
|
||||
TraceEventFields const& busiestReadTag = metrics.at("BusiestReadTag");
|
||||
if(busiestReadTag.size()) {
|
||||
|
@ -1118,7 +1119,7 @@ ACTOR static Future<JsonBuilderObject> recoveryStateStatusFetcher(Database cx, W
|
|||
}
|
||||
|
||||
ACTOR static Future<double> doGrvProbe(Transaction *tr, Optional<FDBTransactionOptions::Option> priority = Optional<FDBTransactionOptions::Option>()) {
|
||||
state double start = timer_monotonic();
|
||||
state double start = g_network->timer_monotonic();
|
||||
|
||||
loop {
|
||||
try {
|
||||
|
@ -1128,7 +1129,7 @@ ACTOR static Future<double> doGrvProbe(Transaction *tr, Optional<FDBTransactionO
|
|||
}
|
||||
|
||||
wait(success(tr->getReadVersion()));
|
||||
return timer_monotonic() - start;
|
||||
return g_network->timer_monotonic() - start;
|
||||
}
|
||||
catch(Error &e) {
|
||||
wait(tr->onError(e));
|
||||
|
@ -1142,13 +1143,13 @@ ACTOR static Future<double> doReadProbe(Future<double> grvProbe, Transaction *tr
|
|||
throw grv.getError();
|
||||
}
|
||||
|
||||
state double start = timer_monotonic();
|
||||
state double start = g_network->timer_monotonic();
|
||||
|
||||
loop {
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
try {
|
||||
Optional<Standalone<StringRef> > _ = wait(tr->get(LiteralStringRef("\xff/StatusJsonTestKey62793")));
|
||||
return timer_monotonic() - start;
|
||||
return g_network->timer_monotonic() - start;
|
||||
}
|
||||
catch(Error &e) {
|
||||
wait(tr->onError(e));
|
||||
|
@ -1166,7 +1167,7 @@ ACTOR static Future<double> doCommitProbe(Future<double> grvProbe, Transaction *
|
|||
ASSERT(sourceTr->getReadVersion().isReady());
|
||||
tr->setVersion(sourceTr->getReadVersion().get());
|
||||
|
||||
state double start = timer_monotonic();
|
||||
state double start = g_network->timer_monotonic();
|
||||
|
||||
loop {
|
||||
try {
|
||||
|
@ -1174,7 +1175,7 @@ ACTOR static Future<double> doCommitProbe(Future<double> grvProbe, Transaction *
|
|||
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||
tr->makeSelfConflicting();
|
||||
wait(tr->commit());
|
||||
return timer_monotonic() - start;
|
||||
return g_network->timer_monotonic() - start;
|
||||
}
|
||||
catch(Error &e) {
|
||||
wait(tr->onError(e));
|
||||
|
@ -2738,7 +2739,7 @@ ACTOR Future<StatusReply> clusterGetStatus(
|
|||
|
||||
statusObj["messages"] = messages;
|
||||
|
||||
int64_t clusterTime = time(0);
|
||||
int64_t clusterTime = g_network->timer();
|
||||
if (clusterTime != -1){
|
||||
statusObj["cluster_controller_timestamp"] = clusterTime;
|
||||
}
|
||||
|
|
|
@ -1196,7 +1196,7 @@ ACTOR Future<Void> fetchKeys( StorageCacheData *data, AddingCacheRange* cacheRan
|
|||
lastAvailable = std::max(lastAvailable, r->value());
|
||||
|
||||
if (lastAvailable != invalidVersion && lastAvailable >= data->oldestVersion.get()) {
|
||||
TEST(true);
|
||||
TEST(true); // wait for oldest version
|
||||
wait( data->oldestVersion.whenAtLeast(lastAvailable+1) );
|
||||
}
|
||||
|
||||
|
@ -1388,9 +1388,9 @@ ACTOR Future<Void> fetchKeys( StorageCacheData *data, AddingCacheRange* cacheRan
|
|||
//++data->counters.fetchExecutingCount;
|
||||
//data->counters.fetchExecutingMS += 1000*(now() - executeStart);
|
||||
|
||||
TraceEvent(SevDebug, interval.end(), data->thisServerID);
|
||||
// TraceEvent(SevDebug, interval.end(), data->thisServerID);
|
||||
} catch (Error &e){
|
||||
TraceEvent(SevDebug, interval.end(), data->thisServerID).error(e, true).detail("Version", data->version.get());
|
||||
// TraceEvent(SevDebug, interval.end(), data->thisServerID).error(e, true).detail("Version", data->version.get());
|
||||
|
||||
// TODO define the shuttingDown state of cache server
|
||||
if (e.code() == error_code_actor_cancelled && /* !data->shuttingDown &&*/ cacheRange->phase >= AddingCacheRange::Fetching) {
|
||||
|
@ -1951,7 +1951,7 @@ ACTOR Future<Void> storageCacheStartUpWarmup(StorageCacheData* self) {
|
|||
state Transaction tr(self->cx);
|
||||
state Value trueValue = storageCacheValue(std::vector<uint16_t>{ 0 });
|
||||
state Value falseValue = storageCacheValue(std::vector<uint16_t>{});
|
||||
state MutationRef privatized;
|
||||
state Standalone<MutationRef> privatized;
|
||||
privatized.type = MutationRef::SetValue;
|
||||
state Version readVersion;
|
||||
try {
|
||||
|
@ -1969,7 +1969,7 @@ ACTOR Future<Void> storageCacheStartUpWarmup(StorageCacheData* self) {
|
|||
ASSERT(currCached == (kv.value == falseValue));
|
||||
if (kv.value == trueValue) {
|
||||
begin = kv.key;
|
||||
privatized.param1 = begin.withPrefix(systemKeys.begin);
|
||||
privatized.param1 = begin.withPrefix(systemKeys.begin, privatized.arena());
|
||||
privatized.param2 = serverKeysTrue;
|
||||
//TraceEvent(SevDebug, "SCStartupFetch", self->thisServerID).
|
||||
// detail("BeginKey", begin.substr(storageCacheKeys.begin.size())).
|
||||
|
@ -1979,7 +1979,7 @@ ACTOR Future<Void> storageCacheStartUpWarmup(StorageCacheData* self) {
|
|||
} else {
|
||||
currCached = false;
|
||||
end = kv.key;
|
||||
privatized.param1 = begin.withPrefix(systemKeys.begin);
|
||||
privatized.param1 = begin.withPrefix(systemKeys.begin, privatized.arena());
|
||||
privatized.param2 = serverKeysFalse;
|
||||
//TraceEvent(SevDebug, "SCStartupFetch", self->thisServerID).detail("EndKey", end.substr(storageCacheKeys.begin.size())).
|
||||
// detail("ReadVersion", readVersion).detail("DataVersion", self->version.get());
|
||||
|
|
|
@ -212,9 +212,9 @@ struct StorageServerMetrics {
|
|||
void notify( KeyRef key, StorageMetrics& metrics ) {
|
||||
ASSERT (metrics.bytes == 0); // ShardNotifyMetrics
|
||||
if (g_network->isSimulated()) {
|
||||
TEST(metrics.bytesPerKSecond != 0); // ShardNotifyMetrics
|
||||
TEST(metrics.iosPerKSecond != 0); // ShardNotifyMetrics
|
||||
TEST(metrics.bytesReadPerKSecond != 0); // ShardNotifyMetrics
|
||||
TEST(metrics.bytesPerKSecond != 0); // ShardNotifyMetrics bytes
|
||||
TEST(metrics.iosPerKSecond != 0); // ShardNotifyMetrics ios
|
||||
TEST(metrics.bytesReadPerKSecond != 0); // ShardNotifyMetrics bytesRead
|
||||
}
|
||||
|
||||
double expire = now() + SERVER_KNOBS->STORAGE_METRICS_AVERAGE_INTERVAL;
|
||||
|
@ -232,7 +232,7 @@ struct StorageServerMetrics {
|
|||
auto& v = waitMetricsMap[key];
|
||||
for(int i=0; i<v.size(); i++) {
|
||||
if (g_network->isSimulated()) {
|
||||
TEST(true);
|
||||
TEST(true); // shard notify metrics
|
||||
}
|
||||
// ShardNotifyMetrics
|
||||
v[i].send( notifyMetrics );
|
||||
|
|
|
@ -482,7 +482,7 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
|
|||
if(tag.locality != tagLocalityLogRouter && tag.locality != tagLocalityTxs && tag != txsTag && allTags.size() && !allTags.count(tag) && popped <= recoveredAt) {
|
||||
popped = recoveredAt + 1;
|
||||
}
|
||||
Reference<TagData> newTagData = Reference<TagData>( new TagData(tag, popped, 0, nothingPersistent, poppedRecently, unpoppedRecovered) );
|
||||
auto newTagData = makeReference<TagData>(tag, popped, 0, nothingPersistent, poppedRecently, unpoppedRecovered);
|
||||
tag_data[tag.toTagDataIndex()][tag.id] = newTagData;
|
||||
return newTagData;
|
||||
}
|
||||
|
@ -600,6 +600,7 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
|
|||
specialCounter(cc, "QueueDiskBytesTotal", [tLogData](){ return tLogData->rawPersistentQueue->getStorageBytes().total; });
|
||||
specialCounter(cc, "PeekMemoryReserved", [tLogData]() { return tLogData->peekMemoryLimiter.activePermits(); });
|
||||
specialCounter(cc, "PeekMemoryRequestsStalled", [tLogData]() { return tLogData->peekMemoryLimiter.waiters(); });
|
||||
specialCounter(cc, "Geneartion", [this]() { return this->recoveryCount; });
|
||||
}
|
||||
|
||||
~LogData() {
|
||||
|
@ -704,8 +705,8 @@ ACTOR Future<Void> tLogLock( TLogData* self, ReplyPromise< TLogLockResult > repl
|
|||
state Version stopVersion = logData->version.get();
|
||||
|
||||
TEST(true); // TLog stopped by recovering master
|
||||
TEST( logData->stopped );
|
||||
TEST( !logData->stopped );
|
||||
TEST( logData->stopped ); // logData already stopped
|
||||
TEST( !logData->stopped ); // logData not yet stopped
|
||||
|
||||
TraceEvent("TLogStop", logData->logId).detail("Ver", stopVersion).detail("IsStopped", logData->stopped).detail("QueueCommitted", logData->queueCommittedVersion.get());
|
||||
|
||||
|
@ -1728,7 +1729,7 @@ ACTOR Future<Void> tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere
|
|||
if(sequenceData.isSet()) {
|
||||
trackerData.duplicatePeeks++;
|
||||
if(sequenceData.getFuture().get().first != reply.end) {
|
||||
TEST(true); //tlog peek second attempt ended at a different version
|
||||
TEST(true); //tlog peek second attempt ended at a different version (2)
|
||||
req.reply.sendError(operation_obsolete());
|
||||
return Void();
|
||||
}
|
||||
|
@ -2593,7 +2594,8 @@ ACTOR Future<Void> restorePersistentState( TLogData* self, LocalityData locality
|
|||
TLogSpillType logSpillType = BinaryReader::fromStringRef<TLogSpillType>( fTLogSpillTypes.get()[idx].value, AssumeVersion(protocolVersion) );
|
||||
|
||||
//We do not need the remoteTag, because we will not be loading any additional data
|
||||
logData = Reference<LogData>( new LogData(self, recruited, Tag(), true, id_logRouterTags[id1], id_txsTags[id1], UID(), protocolVersion, logSpillType, std::vector<Tag>(), "Restored") );
|
||||
logData = makeReference<LogData>(self, recruited, Tag(), true, id_logRouterTags[id1], id_txsTags[id1], UID(),
|
||||
protocolVersion, logSpillType, std::vector<Tag>(), "Restored");
|
||||
logData->locality = id_locality[id1];
|
||||
logData->stopped = true;
|
||||
self->id_data[id1] = logData;
|
||||
|
@ -2805,7 +2807,9 @@ ACTOR Future<Void> tLogStart( TLogData* self, InitializeTLogRequest req, Localit
|
|||
stopAllTLogs(self, recruited.id());
|
||||
|
||||
bool recovering = (req.recoverFrom.logSystemType == LogSystemType::tagPartitioned);
|
||||
state Reference<LogData> logData = Reference<LogData>( new LogData(self, recruited, req.remoteTag, req.isPrimary, req.logRouterTags, req.txsTags, req.recruitmentID, g_network->protocolVersion(), req.spillType, req.allTags, recovering ? "Recovered" : "Recruited") );
|
||||
state Reference<LogData> logData = makeReference<LogData>(
|
||||
self, recruited, req.remoteTag, req.isPrimary, req.logRouterTags, req.txsTags, req.recruitmentID,
|
||||
g_network->protocolVersion(), req.spillType, req.allTags, recovering ? "Recovered" : "Recruited");
|
||||
self->id_data[recruited.id()] = logData;
|
||||
logData->locality = req.locality;
|
||||
logData->recoveryCount = req.epoch;
|
||||
|
|
|
@ -60,7 +60,7 @@ struct OldLogData {
|
|||
pseudoLocalities(conf.pseudoLocalities), epoch(conf.epoch) {
|
||||
tLogs.resize(conf.tLogs.size());
|
||||
for (int j = 0; j < conf.tLogs.size(); j++) {
|
||||
Reference<LogSet> logSet(new LogSet(conf.tLogs[j]));
|
||||
auto logSet = makeReference<LogSet>(conf.tLogs[j]);
|
||||
tLogs[j] = logSet;
|
||||
}
|
||||
}
|
||||
|
@ -84,13 +84,13 @@ LogSet::LogSet(const TLogSet& tLogSet) :
|
|||
satelliteTagLocations(tLogSet.satelliteTagLocations)
|
||||
{
|
||||
for (const auto& log : tLogSet.tLogs) {
|
||||
logServers.emplace_back(new AsyncVar<OptionalInterface<TLogInterface>>(log));
|
||||
logServers.push_back(makeReference<AsyncVar<OptionalInterface<TLogInterface>>>(log));
|
||||
}
|
||||
for (const auto& log : tLogSet.logRouters) {
|
||||
logRouters.emplace_back(new AsyncVar<OptionalInterface<TLogInterface>>(log));
|
||||
logRouters.push_back(makeReference<AsyncVar<OptionalInterface<TLogInterface>>>(log));
|
||||
}
|
||||
for (const auto& log : tLogSet.backupWorkers) {
|
||||
backupWorkers.emplace_back(new AsyncVar<OptionalInterface<BackupInterface>>(log));
|
||||
backupWorkers.push_back(makeReference<AsyncVar<OptionalInterface<BackupInterface>>>(log));
|
||||
}
|
||||
filterLocalityDataForPolicy(tLogPolicy, &tLogLocalities);
|
||||
updateLocalitySet(tLogLocalities);
|
||||
|
@ -105,7 +105,8 @@ LogSet::LogSet(const CoreTLogSet& coreSet) :
|
|||
satelliteTagLocations(coreSet.satelliteTagLocations)
|
||||
{
|
||||
for (const auto& log : coreSet.tLogs) {
|
||||
logServers.emplace_back(new AsyncVar<OptionalInterface<TLogInterface>>(OptionalInterface<TLogInterface>(log)));
|
||||
logServers.push_back(
|
||||
makeReference<AsyncVar<OptionalInterface<TLogInterface>>>(OptionalInterface<TLogInterface>(log)));
|
||||
}
|
||||
// Do NOT recover coreSet.backupWorkers, because master will recruit new ones.
|
||||
filterLocalityDataForPolicy(tLogPolicy, &tLogLocalities);
|
||||
|
@ -285,8 +286,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
static Reference<ILogSystem> fromLogSystemConfig( UID const& dbgid, LocalityData const& locality, LogSystemConfig const& lsConf, bool excludeRemote, bool useRecoveredAt, Optional<PromiseStream<Future<Void>>> addActor ) {
|
||||
ASSERT(lsConf.logSystemType == LogSystemType::tagPartitioned || (lsConf.logSystemType == LogSystemType::empty && !lsConf.tLogs.size()));
|
||||
//ASSERT(lsConf.epoch == epoch); //< FIXME
|
||||
Reference<TagPartitionedLogSystem> logSystem(
|
||||
new TagPartitionedLogSystem(dbgid, locality, lsConf.epoch, addActor));
|
||||
auto logSystem = makeReference<TagPartitionedLogSystem>(dbgid, locality, lsConf.epoch, addActor);
|
||||
|
||||
logSystem->tLogs.reserve(lsConf.tLogs.size());
|
||||
logSystem->expectedLogSets = lsConf.expectedLogSets;
|
||||
|
@ -300,7 +300,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
logSystem->pseudoLocalities = lsConf.pseudoLocalities;
|
||||
for (const TLogSet& tLogSet : lsConf.tLogs) {
|
||||
if (!excludeRemote || tLogSet.isLocal) {
|
||||
logSystem->tLogs.emplace_back(new LogSet(tLogSet));
|
||||
logSystem->tLogs.push_back(makeReference<LogSet>(tLogSet));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -320,11 +320,11 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
ASSERT( lsConf.logSystemType == LogSystemType::tagPartitioned || (lsConf.logSystemType == LogSystemType::empty && !lsConf.tLogs.size()) );
|
||||
//ASSERT(lsConf.epoch == epoch); //< FIXME
|
||||
const LogEpoch e = lsConf.oldTLogs.size() > 0 ? lsConf.oldTLogs[0].epoch : 0;
|
||||
Reference<TagPartitionedLogSystem> logSystem(new TagPartitionedLogSystem(dbgid, locality, e));
|
||||
auto logSystem = makeReference<TagPartitionedLogSystem>(dbgid, locality, e);
|
||||
|
||||
if (lsConf.oldTLogs.size()) {
|
||||
for (const TLogSet& tLogSet : lsConf.oldTLogs[0].tLogs) {
|
||||
logSystem->tLogs.emplace_back(new LogSet(tLogSet));
|
||||
logSystem->tLogs.push_back(makeReference<LogSet>(tLogSet));
|
||||
}
|
||||
logSystem->logRouterTags = lsConf.oldTLogs[0].logRouterTags;
|
||||
logSystem->txsTags = lsConf.oldTLogs[0].txsTags;
|
||||
|
@ -538,7 +538,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
if(it->isLocal && it->logServers.size()) {
|
||||
if(it->connectionResetTrackers.size() == 0) {
|
||||
for(int i = 0; i < it->logServers.size(); i++) {
|
||||
it->connectionResetTrackers.push_back(Reference<ConnectionResetInfo>( new ConnectionResetInfo() ));
|
||||
it->connectionResetTrackers.push_back(makeReference<ConnectionResetInfo>());
|
||||
}
|
||||
}
|
||||
vector<Future<Void>> tLogCommitResults;
|
||||
|
@ -582,14 +582,17 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
|
||||
if(begin >= lastBegin && localSets.size()) {
|
||||
TraceEvent("TLogPeekAllCurrentOnly", dbgid).detail("Tag", tag.toString()).detail("Begin", begin).detail("End", end).detail("BestLogs", localSets[bestSet]->logServerString());
|
||||
return Reference<ILogSystem::SetPeekCursor>( new ILogSystem::SetPeekCursor( localSets, bestSet, localSets[bestSet]->bestLocationFor( tag ), tag, begin, end, parallelGetMore ) );
|
||||
return makeReference<ILogSystem::SetPeekCursor>(
|
||||
localSets, bestSet, localSets[bestSet]->bestLocationFor(tag), tag, begin, end, parallelGetMore);
|
||||
} else {
|
||||
std::vector< Reference<ILogSystem::IPeekCursor> > cursors;
|
||||
std::vector< LogMessageVersion > epochEnds;
|
||||
|
||||
if(lastBegin < end && localSets.size()) {
|
||||
TraceEvent("TLogPeekAllAddingCurrent", dbgid).detail("Tag", tag.toString()).detail("Begin", begin).detail("End", end).detail("BestLogs", localSets[bestSet]->logServerString());
|
||||
cursors.emplace_back(new ILogSystem::SetPeekCursor( localSets, bestSet, localSets[bestSet]->bestLocationFor( tag ), tag, lastBegin, end, parallelGetMore));
|
||||
cursors.push_back(makeReference<ILogSystem::SetPeekCursor>(localSets, bestSet,
|
||||
localSets[bestSet]->bestLocationFor(tag),
|
||||
tag, lastBegin, end, parallelGetMore));
|
||||
}
|
||||
for (int i = 0; begin < lastBegin; i++) {
|
||||
if(i == oldLogData.size()) {
|
||||
|
@ -597,7 +600,9 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
break;
|
||||
}
|
||||
TraceEvent("TLogPeekAllDead", dbgid).detail("Tag", tag.toString()).detail("Begin", begin).detail("End", end).detail("LastBegin", lastBegin).detail("OldLogDataSize", oldLogData.size());
|
||||
return Reference<ILogSystem::ServerPeekCursor>( new ILogSystem::ServerPeekCursor( Reference<AsyncVar<OptionalInterface<TLogInterface>>>(), tag, begin, getPeekEnd(), false, false ) );
|
||||
return makeReference<ILogSystem::ServerPeekCursor>(
|
||||
Reference<AsyncVar<OptionalInterface<TLogInterface>>>(), tag, begin, getPeekEnd(), false,
|
||||
false);
|
||||
}
|
||||
|
||||
int bestOldSet = 0;
|
||||
|
@ -623,7 +628,9 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
if(!cursors.size() && !foundSpecial) {
|
||||
continue;
|
||||
}
|
||||
return Reference<ILogSystem::ServerPeekCursor>( new ILogSystem::ServerPeekCursor( Reference<AsyncVar<OptionalInterface<TLogInterface>>>(), tag, begin, getPeekEnd(), false, false ) );
|
||||
return makeReference<ILogSystem::ServerPeekCursor>(
|
||||
Reference<AsyncVar<OptionalInterface<TLogInterface>>>(), tag, begin, getPeekEnd(), false,
|
||||
false);
|
||||
}
|
||||
if(thisSpecial) {
|
||||
foundSpecial = true;
|
||||
|
@ -632,14 +639,16 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
if(thisBegin < lastBegin) {
|
||||
if(thisBegin < end) {
|
||||
TraceEvent("TLogPeekAllAddingOld", dbgid).detail("Tag", tag.toString()).detail("Begin", begin).detail("End", end).detail("BestLogs", localOldSets[bestOldSet]->logServerString()).detail("LastBegin", lastBegin).detail("ThisBegin", thisBegin);
|
||||
cursors.emplace_back(new ILogSystem::SetPeekCursor(localOldSets, bestOldSet, localOldSets[bestOldSet]->bestLocationFor( tag ), tag, thisBegin, std::min(lastBegin, end), parallelGetMore));
|
||||
cursors.push_back(makeReference<ILogSystem::SetPeekCursor>(
|
||||
localOldSets, bestOldSet, localOldSets[bestOldSet]->bestLocationFor(tag), tag, thisBegin,
|
||||
std::min(lastBegin, end), parallelGetMore));
|
||||
epochEnds.push_back(LogMessageVersion(std::min(lastBegin, end)));
|
||||
}
|
||||
lastBegin = thisBegin;
|
||||
}
|
||||
}
|
||||
|
||||
return Reference<ILogSystem::MultiCursor>( new ILogSystem::MultiCursor(cursors, epochEnds) );
|
||||
return makeReference<ILogSystem::MultiCursor>(cursors, epochEnds);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -658,21 +667,27 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
}
|
||||
if(bestSet == -1) {
|
||||
TraceEvent("TLogPeekRemoteNoBestSet", dbgid).detail("Tag", tag.toString()).detail("Begin", begin).detail("End", end.present() ? end.get() : getPeekEnd());
|
||||
return Reference<ILogSystem::ServerPeekCursor>( new ILogSystem::ServerPeekCursor( Reference<AsyncVar<OptionalInterface<TLogInterface>>>(), tag, begin, getPeekEnd(), false, parallelGetMore ) );
|
||||
return makeReference<ILogSystem::ServerPeekCursor>(Reference<AsyncVar<OptionalInterface<TLogInterface>>>(),
|
||||
tag, begin, getPeekEnd(), false, parallelGetMore);
|
||||
}
|
||||
if(begin >= lastBegin) {
|
||||
TraceEvent("TLogPeekRemoteBestOnly", dbgid).detail("Tag", tag.toString()).detail("Begin", begin).detail("End", end.present() ? end.get() : getPeekEnd()).detail("BestSet", bestSet).detail("BestSetStart", lastBegin).detail("LogRouterIds", tLogs[bestSet]->logRouterString());
|
||||
return Reference<ILogSystem::BufferedCursor>( new ILogSystem::BufferedCursor( tLogs[bestSet]->logRouters, tag, begin, end.present() ? end.get() + 1 : getPeekEnd(), parallelGetMore ) );
|
||||
return makeReference<ILogSystem::BufferedCursor>(
|
||||
tLogs[bestSet]->logRouters, tag, begin, end.present() ? end.get() + 1 : getPeekEnd(), parallelGetMore);
|
||||
} else {
|
||||
std::vector< Reference<ILogSystem::IPeekCursor> > cursors;
|
||||
std::vector< LogMessageVersion > epochEnds;
|
||||
TraceEvent("TLogPeekRemoteAddingBest", dbgid).detail("Tag", tag.toString()).detail("Begin", begin).detail("End", end.present() ? end.get() : getPeekEnd()).detail("BestSet", bestSet).detail("BestSetStart", lastBegin).detail("LogRouterIds", tLogs[bestSet]->logRouterString());
|
||||
cursors.emplace_back(new ILogSystem::BufferedCursor( tLogs[bestSet]->logRouters, tag, lastBegin, end.present() ? end.get() + 1 : getPeekEnd(), parallelGetMore ) );
|
||||
cursors.push_back(makeReference<ILogSystem::BufferedCursor>(tLogs[bestSet]->logRouters, tag, lastBegin,
|
||||
end.present() ? end.get() + 1 : getPeekEnd(),
|
||||
parallelGetMore));
|
||||
int i = 0;
|
||||
while(begin < lastBegin) {
|
||||
if(i == oldLogData.size()) {
|
||||
TraceEvent("TLogPeekRemoteDead", dbgid).detail("Tag", tag.toString()).detail("Begin", begin).detail("End", end.present() ? end.get() : getPeekEnd()).detail("LastBegin", lastBegin).detail("OldLogDataSize", oldLogData.size());
|
||||
return Reference<ILogSystem::ServerPeekCursor>( new ILogSystem::ServerPeekCursor( Reference<AsyncVar<OptionalInterface<TLogInterface>>>(), tag, begin, getPeekEnd(), false, parallelGetMore ) );
|
||||
return makeReference<ILogSystem::ServerPeekCursor>(
|
||||
Reference<AsyncVar<OptionalInterface<TLogInterface>>>(), tag, begin, getPeekEnd(), false,
|
||||
parallelGetMore);
|
||||
}
|
||||
|
||||
int bestOldSet = -1;
|
||||
|
@ -689,27 +704,31 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
}
|
||||
if(bestOldSet == -1) {
|
||||
TraceEvent("TLogPeekRemoteNoOldBestSet", dbgid).detail("Tag", tag.toString()).detail("Begin", begin).detail("End", end.present() ? end.get() : getPeekEnd());
|
||||
return Reference<ILogSystem::ServerPeekCursor>( new ILogSystem::ServerPeekCursor( Reference<AsyncVar<OptionalInterface<TLogInterface>>>(), tag, begin, getPeekEnd(), false, parallelGetMore ) );
|
||||
return makeReference<ILogSystem::ServerPeekCursor>(
|
||||
Reference<AsyncVar<OptionalInterface<TLogInterface>>>(), tag, begin, getPeekEnd(), false,
|
||||
parallelGetMore);
|
||||
}
|
||||
|
||||
if(thisBegin < lastBegin) {
|
||||
TraceEvent("TLogPeekRemoteAddingOldBest", dbgid).detail("Tag", tag.toString()).detail("Begin", begin).detail("End", end.present() ? end.get() : getPeekEnd()).detail("BestOldSet", bestOldSet).detail("LogRouterIds", oldLogData[i].tLogs[bestOldSet]->logRouterString())
|
||||
.detail("LastBegin", lastBegin).detail("ThisBegin", thisBegin).detail("BestStartVer", oldLogData[i].tLogs[bestOldSet]->startVersion);
|
||||
cursors.emplace_back(new ILogSystem::BufferedCursor(oldLogData[i].tLogs[bestOldSet]->logRouters, tag, thisBegin, lastBegin, parallelGetMore));
|
||||
cursors.push_back(makeReference<ILogSystem::BufferedCursor>(
|
||||
oldLogData[i].tLogs[bestOldSet]->logRouters, tag, thisBegin, lastBegin, parallelGetMore));
|
||||
epochEnds.emplace_back(lastBegin);
|
||||
lastBegin = thisBegin;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
|
||||
return Reference<ILogSystem::MultiCursor>( new ILogSystem::MultiCursor(cursors, epochEnds) );
|
||||
return makeReference<ILogSystem::MultiCursor>(cursors, epochEnds);
|
||||
}
|
||||
}
|
||||
|
||||
Reference<IPeekCursor> peek( UID dbgid, Version begin, Optional<Version> end, Tag tag, bool parallelGetMore ) final {
|
||||
if(!tLogs.size()) {
|
||||
TraceEvent("TLogPeekNoLogSets", dbgid).detail("Tag", tag.toString()).detail("Begin", begin);
|
||||
return Reference<ILogSystem::ServerPeekCursor>( new ILogSystem::ServerPeekCursor( Reference<AsyncVar<OptionalInterface<TLogInterface>>>(), tag, begin, getPeekEnd(), false, false ) );
|
||||
return makeReference<ILogSystem::ServerPeekCursor>(Reference<AsyncVar<OptionalInterface<TLogInterface>>>(),
|
||||
tag, begin, getPeekEnd(), false, false);
|
||||
}
|
||||
|
||||
if(tag.locality == tagLocalityRemoteLog) {
|
||||
|
@ -722,7 +741,8 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
Reference<IPeekCursor> peek(UID dbgid, Version begin, Optional<Version> end, std::vector<Tag> tags, bool parallelGetMore) final {
|
||||
if(tags.empty()) {
|
||||
TraceEvent("TLogPeekNoTags", dbgid).detail("Begin", begin);
|
||||
return Reference<ILogSystem::ServerPeekCursor>( new ILogSystem::ServerPeekCursor( Reference<AsyncVar<OptionalInterface<TLogInterface>>>(), invalidTag, begin, getPeekEnd(), false, false ) );
|
||||
return makeReference<ILogSystem::ServerPeekCursor>(Reference<AsyncVar<OptionalInterface<TLogInterface>>>(),
|
||||
invalidTag, begin, getPeekEnd(), false, false);
|
||||
}
|
||||
|
||||
if(tags.size() == 1) {
|
||||
|
@ -733,7 +753,8 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
for(auto tag : tags) {
|
||||
cursors.push_back(peek(dbgid, begin, end, tag, parallelGetMore));
|
||||
}
|
||||
return Reference<ILogSystem::BufferedCursor>( new ILogSystem::BufferedCursor(cursors, begin, end.present() ? end.get() + 1 : getPeekEnd(), true, tLogs[0]->locality == tagLocalityUpgraded, false) );
|
||||
return makeReference<ILogSystem::BufferedCursor>(cursors, begin, end.present() ? end.get() + 1 : getPeekEnd(),
|
||||
true, tLogs[0]->locality == tagLocalityUpgraded, false);
|
||||
}
|
||||
|
||||
Reference<IPeekCursor> peekLocal( UID dbgid, Tag tag, Version begin, Version end, bool useMergePeekCursors, int8_t peekLocality = tagLocalityInvalid ) {
|
||||
|
@ -762,17 +783,22 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
if(useMergePeekCursors || logCount > 1) {
|
||||
throw worker_removed();
|
||||
} else {
|
||||
return Reference<ILogSystem::ServerPeekCursor>( new ILogSystem::ServerPeekCursor( Reference<AsyncVar<OptionalInterface<TLogInterface>>>(), tag, begin, getPeekEnd(), false, false ) );
|
||||
return makeReference<ILogSystem::ServerPeekCursor>(
|
||||
Reference<AsyncVar<OptionalInterface<TLogInterface>>>(), tag, begin, getPeekEnd(), false, false);
|
||||
}
|
||||
}
|
||||
|
||||
if(begin >= tLogs[bestSet]->startVersion) {
|
||||
TraceEvent("TLogPeekLocalBestOnly", dbgid).detail("Tag", tag.toString()).detail("Begin", begin).detail("End", end).detail("BestSet", bestSet).detail("BestSetStart", tLogs[bestSet]->startVersion).detail("LogId", tLogs[bestSet]->logServers[tLogs[bestSet]->bestLocationFor( tag )]->get().id());
|
||||
if(useMergePeekCursors) {
|
||||
return Reference<ILogSystem::MergedPeekCursor>( new ILogSystem::MergedPeekCursor( tLogs[bestSet]->logServers, tLogs[bestSet]->bestLocationFor( tag ), tLogs[bestSet]->logServers.size() + 1 - tLogs[bestSet]->tLogReplicationFactor, tag,
|
||||
begin, end, true, tLogs[bestSet]->tLogLocalities, tLogs[bestSet]->tLogPolicy, tLogs[bestSet]->tLogReplicationFactor) );
|
||||
return makeReference<ILogSystem::MergedPeekCursor>(
|
||||
tLogs[bestSet]->logServers, tLogs[bestSet]->bestLocationFor(tag),
|
||||
tLogs[bestSet]->logServers.size() + 1 - tLogs[bestSet]->tLogReplicationFactor, tag, begin, end,
|
||||
true, tLogs[bestSet]->tLogLocalities, tLogs[bestSet]->tLogPolicy,
|
||||
tLogs[bestSet]->tLogReplicationFactor);
|
||||
} else {
|
||||
return Reference<ILogSystem::ServerPeekCursor>( new ILogSystem::ServerPeekCursor( tLogs[bestSet]->logServers[tLogs[bestSet]->bestLocationFor( tag )], tag, begin, end, false, false ) );
|
||||
return makeReference<ILogSystem::ServerPeekCursor>(
|
||||
tLogs[bestSet]->logServers[tLogs[bestSet]->bestLocationFor(tag)], tag, begin, end, false, false);
|
||||
}
|
||||
} else {
|
||||
std::vector< Reference<ILogSystem::IPeekCursor> > cursors;
|
||||
|
@ -781,10 +807,15 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
if(tLogs[bestSet]->startVersion < end) {
|
||||
TraceEvent("TLogPeekLocalAddingBest", dbgid).detail("Tag", tag.toString()).detail("Begin", begin).detail("End", end).detail("BestSet", bestSet).detail("BestSetStart", tLogs[bestSet]->startVersion).detail("LogId", tLogs[bestSet]->logServers[tLogs[bestSet]->bestLocationFor( tag )]->get().id());
|
||||
if(useMergePeekCursors) {
|
||||
cursors.emplace_back(new ILogSystem::MergedPeekCursor(tLogs[bestSet]->logServers, tLogs[bestSet]->bestLocationFor( tag ), tLogs[bestSet]->logServers.size() + 1 - tLogs[bestSet]->tLogReplicationFactor, tag,
|
||||
tLogs[bestSet]->startVersion, end, true, tLogs[bestSet]->tLogLocalities, tLogs[bestSet]->tLogPolicy, tLogs[bestSet]->tLogReplicationFactor));
|
||||
cursors.push_back(makeReference<ILogSystem::MergedPeekCursor>(
|
||||
tLogs[bestSet]->logServers, tLogs[bestSet]->bestLocationFor(tag),
|
||||
tLogs[bestSet]->logServers.size() + 1 - tLogs[bestSet]->tLogReplicationFactor, tag,
|
||||
tLogs[bestSet]->startVersion, end, true, tLogs[bestSet]->tLogLocalities,
|
||||
tLogs[bestSet]->tLogPolicy, tLogs[bestSet]->tLogReplicationFactor));
|
||||
} else {
|
||||
cursors.emplace_back(new ILogSystem::ServerPeekCursor( tLogs[bestSet]->logServers[tLogs[bestSet]->bestLocationFor( tag )], tag, tLogs[bestSet]->startVersion, end, false, false));
|
||||
cursors.push_back(makeReference<ILogSystem::ServerPeekCursor>(
|
||||
tLogs[bestSet]->logServers[tLogs[bestSet]->bestLocationFor(tag)], tag,
|
||||
tLogs[bestSet]->startVersion, end, false, false));
|
||||
}
|
||||
}
|
||||
Version lastBegin = tLogs[bestSet]->startVersion;
|
||||
|
@ -834,15 +865,22 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
TraceEvent("TLogPeekLocalAddingOldBest", dbgid).detail("Tag", tag.toString()).detail("Begin", begin).detail("End", end).detail("BestOldSet", bestOldSet)
|
||||
.detail("LogServers", oldLogData[i].tLogs[bestOldSet]->logServerString()).detail("ThisBegin", thisBegin).detail("LastBegin", lastBegin);
|
||||
//detail("LogId", oldLogData[i].tLogs[bestOldSet]->logServers[tLogs[bestOldSet]->bestLocationFor( tag )]->get().id());
|
||||
cursors.emplace_back(new ILogSystem::MergedPeekCursor( oldLogData[i].tLogs[bestOldSet]->logServers, oldLogData[i].tLogs[bestOldSet]->bestLocationFor( tag ), oldLogData[i].tLogs[bestOldSet]->logServers.size() + 1 - oldLogData[i].tLogs[bestOldSet]->tLogReplicationFactor, tag,
|
||||
thisBegin, std::min(lastBegin, end), useMergePeekCursors, oldLogData[i].tLogs[bestOldSet]->tLogLocalities, oldLogData[i].tLogs[bestOldSet]->tLogPolicy, oldLogData[i].tLogs[bestOldSet]->tLogReplicationFactor));
|
||||
cursors.push_back(makeReference<ILogSystem::MergedPeekCursor>(
|
||||
oldLogData[i].tLogs[bestOldSet]->logServers,
|
||||
oldLogData[i].tLogs[bestOldSet]->bestLocationFor(tag),
|
||||
oldLogData[i].tLogs[bestOldSet]->logServers.size() + 1 -
|
||||
oldLogData[i].tLogs[bestOldSet]->tLogReplicationFactor,
|
||||
tag, thisBegin, std::min(lastBegin, end), useMergePeekCursors,
|
||||
oldLogData[i].tLogs[bestOldSet]->tLogLocalities,
|
||||
oldLogData[i].tLogs[bestOldSet]->tLogPolicy,
|
||||
oldLogData[i].tLogs[bestOldSet]->tLogReplicationFactor));
|
||||
epochEnds.emplace_back(std::min(lastBegin, end));
|
||||
}
|
||||
lastBegin = thisBegin;
|
||||
}
|
||||
}
|
||||
|
||||
return Reference<ILogSystem::MultiCursor>( new ILogSystem::MultiCursor(cursors, epochEnds) );
|
||||
return makeReference<ILogSystem::MultiCursor>(cursors, epochEnds);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -850,7 +888,8 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
Version end = getEnd();
|
||||
if(!tLogs.size()) {
|
||||
TraceEvent("TLogPeekTxsNoLogs", dbgid);
|
||||
return Reference<ILogSystem::ServerPeekCursor>( new ILogSystem::ServerPeekCursor( Reference<AsyncVar<OptionalInterface<TLogInterface>>>(), txsTag, begin, end, false, false ) );
|
||||
return makeReference<ILogSystem::ServerPeekCursor>(Reference<AsyncVar<OptionalInterface<TLogInterface>>>(),
|
||||
txsTag, begin, end, false, false);
|
||||
}
|
||||
TraceEvent("TLogPeekTxs", dbgid).detail("Begin", begin).detail("End", end).detail("LocalEnd", localEnd).detail("PeekLocality", peekLocality).detail("CanDiscardPopped", canDiscardPopped);
|
||||
|
||||
|
@ -872,7 +911,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
cursors.push_back(peekAll(dbgid, begin, end, txsTag, true));
|
||||
}
|
||||
|
||||
return Reference<ILogSystem::BufferedCursor>( new ILogSystem::BufferedCursor(cursors, begin, end, false, false, canDiscardPopped) );
|
||||
return makeReference<ILogSystem::BufferedCursor>(cursors, begin, end, false, false, canDiscardPopped);
|
||||
}
|
||||
|
||||
try {
|
||||
|
@ -886,7 +925,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
cursors.push_back(peekLocal(dbgid, txsTag, begin, end, true, peekLocality));
|
||||
}
|
||||
|
||||
return Reference<ILogSystem::BufferedCursor>( new ILogSystem::BufferedCursor(cursors, begin, end, false, false, canDiscardPopped) );
|
||||
return makeReference<ILogSystem::BufferedCursor>(cursors, begin, end, false, false, canDiscardPopped);
|
||||
}
|
||||
|
||||
std::vector< Reference<ILogSystem::IPeekCursor> > cursors;
|
||||
|
@ -906,11 +945,12 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
allCursors.push_back(peekAll(dbgid, localEnd, end, txsTag, true));
|
||||
}
|
||||
|
||||
cursors[1] = Reference<ILogSystem::BufferedCursor>( new ILogSystem::BufferedCursor(localCursors, begin, localEnd, false, false, canDiscardPopped) );
|
||||
cursors[0] = Reference<ILogSystem::BufferedCursor>( new ILogSystem::BufferedCursor(allCursors, localEnd, end, false, false, false) );
|
||||
cursors[1] = makeReference<ILogSystem::BufferedCursor>(localCursors, begin, localEnd, false, false,
|
||||
canDiscardPopped);
|
||||
cursors[0] = makeReference<ILogSystem::BufferedCursor>(allCursors, localEnd, end, false, false, false);
|
||||
epochEnds.emplace_back(localEnd);
|
||||
|
||||
return Reference<ILogSystem::MultiCursor>( new ILogSystem::MultiCursor(cursors, epochEnds) );
|
||||
return makeReference<ILogSystem::MultiCursor>(cursors, epochEnds);
|
||||
} catch( Error& e ) {
|
||||
if(e.code() == error_code_worker_removed) {
|
||||
std::vector< Reference<ILogSystem::IPeekCursor> > cursors;
|
||||
|
@ -922,7 +962,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
cursors.push_back(peekAll(dbgid, begin, end, txsTag, true));
|
||||
}
|
||||
|
||||
return Reference<ILogSystem::BufferedCursor>( new ILogSystem::BufferedCursor(cursors, begin, end, false, false, canDiscardPopped) );
|
||||
return makeReference<ILogSystem::BufferedCursor>(cursors, begin, end, false, false, canDiscardPopped);
|
||||
}
|
||||
throw;
|
||||
}
|
||||
|
@ -949,7 +989,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
epochEnds.emplace_back(history[i].first);
|
||||
}
|
||||
|
||||
return Reference<ILogSystem::MultiCursor>( new ILogSystem::MultiCursor(cursors, epochEnds) );
|
||||
return makeReference<ILogSystem::MultiCursor>(cursors, epochEnds);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -986,7 +1026,8 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
|
||||
TraceEvent("TLogPeekLogRouterSets", dbgid).detail("Tag", tag.toString()).detail("Begin", begin);
|
||||
//FIXME: do this merge on one of the logs in the other data center to avoid sending multiple copies across the WAN
|
||||
return Reference<ILogSystem::SetPeekCursor>( new ILogSystem::SetPeekCursor( localSets, bestSet, localSets[bestSet]->bestLocationFor( tag ), tag, begin, getPeekEnd(), true ) );
|
||||
return makeReference<ILogSystem::SetPeekCursor>(
|
||||
localSets, bestSet, localSets[bestSet]->bestLocationFor(tag), tag, begin, getPeekEnd(), true);
|
||||
} else {
|
||||
int bestPrimarySet = -1;
|
||||
int bestSatelliteSet = -1;
|
||||
|
@ -1009,7 +1050,8 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
}
|
||||
const auto& log = tLogs[bestSet];
|
||||
TraceEvent("TLogPeekLogRouterBestOnly", dbgid).detail("Tag", tag.toString()).detail("Begin", begin).detail("LogId", log->logServers[log->bestLocationFor( tag )]->get().id());
|
||||
return Reference<ILogSystem::ServerPeekCursor>( new ILogSystem::ServerPeekCursor( log->logServers[log->bestLocationFor( tag )], tag, begin, getPeekEnd(), false, true ) );
|
||||
return makeReference<ILogSystem::ServerPeekCursor>(log->logServers[log->bestLocationFor(tag)], tag,
|
||||
begin, getPeekEnd(), false, true);
|
||||
}
|
||||
}
|
||||
bool firstOld = true;
|
||||
|
@ -1045,11 +1087,14 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
|
||||
TraceEvent("TLogPeekLogRouterOldSets", dbgid).detail("Tag", tag.toString()).detail("Begin", begin).detail("OldEpoch", old.epochEnd).detail("RecoveredAt", recoveredAt.present() ? recoveredAt.get() : -1).detail("FirstOld", firstOld);
|
||||
//FIXME: do this merge on one of the logs in the other data center to avoid sending multiple copies across the WAN
|
||||
return Reference<ILogSystem::SetPeekCursor>( new ILogSystem::SetPeekCursor( localSets, bestSet, localSets[bestSet]->bestLocationFor( tag ), tag, begin, firstOld && recoveredAt.present() ? recoveredAt.get() + 1 : old.epochEnd, true ) );
|
||||
return makeReference<ILogSystem::SetPeekCursor>(
|
||||
localSets, bestSet, localSets[bestSet]->bestLocationFor(tag), tag, begin,
|
||||
firstOld && recoveredAt.present() ? recoveredAt.get() + 1 : old.epochEnd, true);
|
||||
}
|
||||
firstOld = false;
|
||||
}
|
||||
return Reference<ILogSystem::ServerPeekCursor>( new ILogSystem::ServerPeekCursor( Reference<AsyncVar<OptionalInterface<TLogInterface>>>(), tag, begin, getPeekEnd(), false, false ) );
|
||||
return makeReference<ILogSystem::ServerPeekCursor>(Reference<AsyncVar<OptionalInterface<TLogInterface>>>(), tag,
|
||||
begin, getPeekEnd(), false, false);
|
||||
}
|
||||
|
||||
Version getKnownCommittedVersion() final {
|
||||
|
@ -1453,7 +1498,8 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
removedBackupWorkers.erase(reply.interf.id());
|
||||
continue;
|
||||
}
|
||||
Reference<AsyncVar<OptionalInterface<BackupInterface>>> worker(new AsyncVar<OptionalInterface<BackupInterface>>(OptionalInterface<BackupInterface>(reply.interf)));
|
||||
auto worker = makeReference<AsyncVar<OptionalInterface<BackupInterface>>>(
|
||||
OptionalInterface<BackupInterface>(reply.interf));
|
||||
if (reply.backupEpoch != logsetEpoch) {
|
||||
// find the logset from oldLogData
|
||||
logsetEpoch = reply.backupEpoch;
|
||||
|
@ -1618,7 +1664,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
|
||||
if (!prevState.tLogs.size()) {
|
||||
// This is a brand new database
|
||||
Reference<TagPartitionedLogSystem> logSystem( new TagPartitionedLogSystem(dbgid, locality, 0) );
|
||||
auto logSystem = makeReference<TagPartitionedLogSystem>(dbgid, locality, 0);
|
||||
logSystem->logSystemType = prevState.logSystemType;
|
||||
logSystem->recoverAt = 0;
|
||||
logSystem->knownCommittedVersion = 0;
|
||||
|
@ -1733,12 +1779,12 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
state std::vector<Future<Void>> failureTrackers;
|
||||
|
||||
for (const CoreTLogSet& coreSet : prevState.tLogs) {
|
||||
logServers.emplace_back(new LogSet(coreSet));
|
||||
logServers.push_back(makeReference<LogSet>(coreSet));
|
||||
std::vector<Reference<AsyncVar<bool>>> failed;
|
||||
|
||||
for (const auto& logVar : logServers.back()->logServers) {
|
||||
allLogServers.push_back(std::make_pair(logVar,coreSet.tLogPolicy));
|
||||
failed.emplace_back(new AsyncVar<bool>());
|
||||
failed.push_back(makeReference<AsyncVar<bool>>());
|
||||
failureTrackers.push_back(monitorLog(logVar, failed.back()));
|
||||
}
|
||||
logFailed.push_back(failed);
|
||||
|
@ -1833,7 +1879,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
|
||||
std::vector<Reference<AsyncVar<bool>>> failed;
|
||||
for(auto& log : logServers[0]->logServers) {
|
||||
failed.emplace_back(new AsyncVar<bool>());
|
||||
failed.push_back(makeReference<AsyncVar<bool>>());
|
||||
failureTrackers.push_back( monitorLog(log, failed.back() ) );
|
||||
}
|
||||
ASSERT(logFailed.size() == 1);
|
||||
|
@ -1864,8 +1910,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
if(maxEnd > 0 && (!lastEnd.present() || maxEnd < lastEnd.get())) {
|
||||
TEST( lastEnd.present() ); // Restarting recovery at an earlier point
|
||||
|
||||
Reference<TagPartitionedLogSystem> logSystem(
|
||||
new TagPartitionedLogSystem(dbgid, locality, prevState.recoveryCount));
|
||||
auto logSystem = makeReference<TagPartitionedLogSystem>(dbgid, locality, prevState.recoveryCount);
|
||||
|
||||
lastEnd = minEnd;
|
||||
logSystem->tLogs = logServers;
|
||||
|
@ -1922,7 +1967,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
|
||||
if(!found) {
|
||||
TraceEvent("RecruitingOldLogRoutersAddingLocality").detail("Locality", locality).detail("LastStart", lastStart);
|
||||
Reference<LogSet> newLogSet( new LogSet() );
|
||||
auto newLogSet = makeReference<LogSet>();
|
||||
newLogSet->locality = locality;
|
||||
newLogSet->startVersion = lastStart;
|
||||
newLogSet->isLocal = false;
|
||||
|
@ -1967,7 +2012,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
|
||||
if(!found) {
|
||||
TraceEvent("RecruitingOldLogRoutersAddingLocality").detail("Locality", locality).detail("LastStart", lastStart);
|
||||
Reference<LogSet> newLogSet( new LogSet() );
|
||||
auto newLogSet = makeReference<LogSet>();
|
||||
newLogSet->locality = locality;
|
||||
newLogSet->startVersion = lastStart;
|
||||
old.tLogs.push_back(newLogSet);
|
||||
|
@ -2007,7 +2052,8 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
for(auto& tLogs : self->tLogs) {
|
||||
if(tLogs->locality == locality) {
|
||||
for( int i = 0; i < logRouterInitializationReplies[nextReplies].size(); i++ ) {
|
||||
tLogs->logRouters.emplace_back(new AsyncVar<OptionalInterface<TLogInterface>>(OptionalInterface<TLogInterface>(logRouterInitializationReplies[nextReplies][i].get())));
|
||||
tLogs->logRouters.push_back(makeReference<AsyncVar<OptionalInterface<TLogInterface>>>(
|
||||
OptionalInterface<TLogInterface>(logRouterInitializationReplies[nextReplies][i].get())));
|
||||
failed.push_back(waitFailureClient(
|
||||
logRouterInitializationReplies[nextReplies][i].get().waitFailure,
|
||||
SERVER_KNOBS->TLOG_TIMEOUT,
|
||||
|
@ -2028,7 +2074,8 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
for(auto& tLogs : old.tLogs) {
|
||||
if(tLogs->locality == locality) {
|
||||
for( int i = 0; i < logRouterInitializationReplies[nextReplies].size(); i++ ) {
|
||||
tLogs->logRouters.emplace_back(new AsyncVar<OptionalInterface<TLogInterface>>(OptionalInterface<TLogInterface>(logRouterInitializationReplies[nextReplies][i].get())));
|
||||
tLogs->logRouters.push_back(makeReference<AsyncVar<OptionalInterface<TLogInterface>>>(
|
||||
OptionalInterface<TLogInterface>(logRouterInitializationReplies[nextReplies][i].get())));
|
||||
if(!forRemote) {
|
||||
failed.push_back(waitFailureClient(
|
||||
logRouterInitializationReplies[nextReplies][i].get().waitFailure,
|
||||
|
@ -2199,11 +2246,13 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
wait( waitForAll(remoteTLogInitializationReplies) && waitForAll(logRouterInitializationReplies) && oldRouterRecruitment );
|
||||
|
||||
for( int i = 0; i < logRouterInitializationReplies.size(); i++ ) {
|
||||
logSet->logRouters.emplace_back(new AsyncVar<OptionalInterface<TLogInterface>>(OptionalInterface<TLogInterface>(logRouterInitializationReplies[i].get())));
|
||||
logSet->logRouters.push_back(makeReference<AsyncVar<OptionalInterface<TLogInterface>>>(
|
||||
OptionalInterface<TLogInterface>(logRouterInitializationReplies[i].get())));
|
||||
}
|
||||
|
||||
for( int i = 0; i < remoteTLogInitializationReplies.size(); i++ ) {
|
||||
logSet->logServers[i] = Reference<AsyncVar<OptionalInterface<TLogInterface>>>( new AsyncVar<OptionalInterface<TLogInterface>>( OptionalInterface<TLogInterface>(remoteTLogInitializationReplies[i].get()) ) );
|
||||
logSet->logServers[i] = makeReference<AsyncVar<OptionalInterface<TLogInterface>>>(
|
||||
OptionalInterface<TLogInterface>(remoteTLogInitializationReplies[i].get()));
|
||||
logSet->tLogLocalities[i] = remoteWorkers.remoteTLogs[i].locality;
|
||||
}
|
||||
filterLocalityDataForPolicy(logSet->tLogPolicy, &logSet->tLogLocalities);
|
||||
|
@ -2248,7 +2297,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
TraceEvent("AddPseudoLocality", logSystem->getDebugID()).detail("Locality", "Backup");
|
||||
}
|
||||
|
||||
logSystem->tLogs.emplace_back(new LogSet());
|
||||
logSystem->tLogs.push_back(makeReference<LogSet>());
|
||||
logSystem->tLogs[0]->tLogVersion = configuration.tLogVersion;
|
||||
logSystem->tLogs[0]->tLogWriteAntiQuorum = configuration.tLogWriteAntiQuorum;
|
||||
logSystem->tLogs[0]->tLogReplicationFactor = configuration.tLogReplicationFactor;
|
||||
|
@ -2266,7 +2315,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
}
|
||||
|
||||
if(region.satelliteTLogReplicationFactor > 0 && configuration.usableRegions > 1) {
|
||||
logSystem->tLogs.emplace_back(new LogSet());
|
||||
logSystem->tLogs.push_back(makeReference<LogSet>());
|
||||
if(recr.satelliteFallback) {
|
||||
logSystem->tLogs[1]->tLogWriteAntiQuorum = region.satelliteTLogWriteAntiQuorumFallback;
|
||||
logSystem->tLogs[1]->tLogReplicationFactor = region.satelliteTLogReplicationFactorFallback;
|
||||
|
@ -2479,7 +2528,8 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
wait( waitForAll( satelliteInitializationReplies ) || oldRouterRecruitment );
|
||||
|
||||
for( int i = 0; i < satelliteInitializationReplies.size(); i++ ) {
|
||||
logSystem->tLogs[1]->logServers[i] = Reference<AsyncVar<OptionalInterface<TLogInterface>>>( new AsyncVar<OptionalInterface<TLogInterface>>( OptionalInterface<TLogInterface>(satelliteInitializationReplies[i].get()) ) );
|
||||
logSystem->tLogs[1]->logServers[i] = makeReference<AsyncVar<OptionalInterface<TLogInterface>>>(
|
||||
OptionalInterface<TLogInterface>(satelliteInitializationReplies[i].get()));
|
||||
}
|
||||
|
||||
for( int i = 0; i < logSystem->tLogs[1]->logServers.size(); i++)
|
||||
|
@ -2489,7 +2539,8 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
wait( waitForAll( initializationReplies ) || oldRouterRecruitment );
|
||||
|
||||
for( int i = 0; i < initializationReplies.size(); i++ ) {
|
||||
logSystem->tLogs[0]->logServers[i] = Reference<AsyncVar<OptionalInterface<TLogInterface>>>( new AsyncVar<OptionalInterface<TLogInterface>>( OptionalInterface<TLogInterface>(initializationReplies[i].get()) ) );
|
||||
logSystem->tLogs[0]->logServers[i] = makeReference<AsyncVar<OptionalInterface<TLogInterface>>>(
|
||||
OptionalInterface<TLogInterface>(initializationReplies[i].get()));
|
||||
logSystem->tLogs[0]->tLogLocalities[i] = recr.tLogs[i].locality;
|
||||
}
|
||||
filterLocalityDataForPolicy(logSystem->tLogs[0]->tLogPolicy, &logSystem->tLogs[0]->tLogLocalities);
|
||||
|
|
|
@ -2224,10 +2224,10 @@ Reference<IPagerSnapshot> DWALPager::getReadSnapshot(Version v) {
|
|||
|
||||
void DWALPager::addLatestSnapshot() {
|
||||
Promise<Void> expired;
|
||||
snapshots.push_back({ pLastCommittedHeader->committedVersion, expired,
|
||||
Reference<DWALPagerSnapshot>(new DWALPagerSnapshot(this, pLastCommittedHeader->getMetaKey(),
|
||||
pLastCommittedHeader->committedVersion,
|
||||
expired.getFuture())) });
|
||||
snapshots.push_back(
|
||||
{ pLastCommittedHeader->committedVersion, expired,
|
||||
makeReference<DWALPagerSnapshot>(this, pLastCommittedHeader->getMetaKey(),
|
||||
pLastCommittedHeader->committedVersion, expired.getFuture()) });
|
||||
}
|
||||
|
||||
// TODO: Move this to a flow header once it is mature.
|
||||
|
@ -4996,7 +4996,7 @@ public:
|
|||
: parent(toCopy.parent), pageID(toCopy.pageID), page(toCopy.page), cursor(toCopy.cursor) {}
|
||||
|
||||
// Convenience method for copying a PageCursor
|
||||
Reference<PageCursor> copy() const { return Reference<PageCursor>(new PageCursor(*this)); }
|
||||
Reference<PageCursor> copy() const { return makeReference<PageCursor>(*this); }
|
||||
|
||||
const BTreePage* btPage() const { return (const BTreePage*)page->begin(); }
|
||||
|
||||
|
@ -5026,7 +5026,7 @@ public:
|
|||
}
|
||||
|
||||
return map(child, [=](Reference<const IPage> page) {
|
||||
return Reference<PageCursor>(new PageCursor(id, page, Reference<PageCursor>::addRef(this)));
|
||||
return makeReference<PageCursor>(id, page, Reference<PageCursor>::addRef(this));
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -5102,7 +5102,7 @@ public:
|
|||
// Otherwise read the root page
|
||||
Future<Reference<const IPage>> root = readPage(pager, rootPageID, &dbBegin, &dbEnd);
|
||||
return map(root, [=](Reference<const IPage> p) {
|
||||
pageCursor = Reference<PageCursor>(new PageCursor(rootPageID, p));
|
||||
pageCursor = makeReference<PageCursor>(rootPageID, p);
|
||||
return Void();
|
||||
});
|
||||
}
|
||||
|
|
|
@ -436,8 +436,10 @@ ACTOR Future<Void> dumpDatabase( Database cx, std::string outputFilename, KeyRan
|
|||
void memoryTest();
|
||||
void skipListTest();
|
||||
|
||||
Future<Void> startSystemMonitor(std::string dataFolder, Optional<Standalone<StringRef>> zoneId, Optional<Standalone<StringRef>> machineId) {
|
||||
initializeSystemMonitorMachineState(SystemMonitorMachineState(dataFolder, zoneId, machineId, g_network->getLocalAddress().ip));
|
||||
Future<Void> startSystemMonitor(std::string dataFolder, Optional<Standalone<StringRef>> dcId,
|
||||
Optional<Standalone<StringRef>> zoneId, Optional<Standalone<StringRef>> machineId) {
|
||||
initializeSystemMonitorMachineState(
|
||||
SystemMonitorMachineState(dataFolder, dcId, zoneId, machineId, g_network->getLocalAddress().ip));
|
||||
|
||||
systemMonitor();
|
||||
return recurring( &systemMonitor, 5.0, TaskPriority::FlushTrace );
|
||||
|
@ -1445,13 +1447,12 @@ private:
|
|||
fprintf(stderr, "%s\n", ClusterConnectionString::getErrorString(connectionString, e).c_str());
|
||||
throw;
|
||||
}
|
||||
connectionFile = Reference<ClusterConnectionFile>(new ClusterConnectionFile(connFile, ccs));
|
||||
auto connectionFile = makeReference<ClusterConnectionFile>(connFile, ccs);
|
||||
} else {
|
||||
std::pair<std::string, bool> resolvedClusterFile;
|
||||
try {
|
||||
resolvedClusterFile = ClusterConnectionFile::lookupClusterFileName(connFile);
|
||||
connectionFile =
|
||||
Reference<ClusterConnectionFile>(new ClusterConnectionFile(resolvedClusterFile.first));
|
||||
connectionFile = makeReference<ClusterConnectionFile>(resolvedClusterFile.first);
|
||||
} catch (Error& e) {
|
||||
fprintf(stderr, "%s\n", ClusterConnectionFile::getErrorString(resolvedClusterFile, e).c_str());
|
||||
throw;
|
||||
|
@ -1637,6 +1638,7 @@ int main(int argc, char* argv[]) {
|
|||
//startOldSimulator();
|
||||
startNewSimulator();
|
||||
openTraceFile(NetworkAddress(), opts.rollsize, opts.maxLogsSize, opts.logFolder, "trace", opts.logGroup);
|
||||
openTracer(TracerType(deterministicRandom()->randomInt(static_cast<int>(TracerType::DISABLED), static_cast<int>(TracerType::END))));
|
||||
} else {
|
||||
g_network = newNet2(opts.tlsConfig, opts.useThreadPool, true);
|
||||
g_network->addStopCallback( Net2FileSystem::stop );
|
||||
|
@ -1908,14 +1910,14 @@ int main(int argc, char* argv[]) {
|
|||
g_network->run();
|
||||
} else if (role == Test) {
|
||||
setupRunLoopProfiler();
|
||||
auto m = startSystemMonitor(opts.dataFolder, opts.zoneId, opts.zoneId);
|
||||
auto m = startSystemMonitor(opts.dataFolder, opts.dcId, opts.zoneId, opts.zoneId);
|
||||
f = stopAfter(runTests(opts.connectionFile, TEST_TYPE_FROM_FILE, TEST_HERE, 1, opts.testFile, StringRef(),
|
||||
opts.localities));
|
||||
g_network->run();
|
||||
} else if (role == ConsistencyCheck) {
|
||||
setupRunLoopProfiler();
|
||||
|
||||
auto m = startSystemMonitor(opts.dataFolder, opts.zoneId, opts.zoneId);
|
||||
auto m = startSystemMonitor(opts.dataFolder, opts.dcId, opts.zoneId, opts.zoneId);
|
||||
f = stopAfter(runTests(opts.connectionFile, TEST_TYPE_CONSISTENCY_CHECK, TEST_HERE, 1, opts.testFile,
|
||||
StringRef(), opts.localities));
|
||||
g_network->run();
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue