Merge remote-tracking branch 'origin' into env
This commit is contained in:
commit
4a25f8b692
|
@ -206,7 +206,7 @@ endif()
|
|||
if (CMAKE_EXPORT_COMPILE_COMMANDS AND WITH_PYTHON)
|
||||
add_custom_command(
|
||||
OUTPUT ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json
|
||||
COMMAND $<TARGET_FILE:Python::Interpreter> ${CMAKE_CURRENT_SOURCE_DIR}/contrib/gen_compile_db.py
|
||||
COMMAND $<TARGET_FILE:Python3::Interpreter> ${CMAKE_CURRENT_SOURCE_DIR}/contrib/gen_compile_db.py
|
||||
ARGS -b ${CMAKE_CURRENT_BINARY_DIR} -s ${CMAKE_CURRENT_SOURCE_DIR} -o ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json
|
||||
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/gen_compile_db.py ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json
|
||||
COMMENT "Build compile commands for IDE"
|
||||
|
|
|
@ -29,7 +29,7 @@ if(APPLE AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
|
|||
endif()
|
||||
|
||||
add_custom_command(OUTPUT ${asm_file} ${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h
|
||||
COMMAND $<TARGET_FILE:Python::Interpreter> ${CMAKE_CURRENT_SOURCE_DIR}/generate_asm.py ${os} ${cpu}
|
||||
COMMAND $<TARGET_FILE:Python3::Interpreter> ${CMAKE_CURRENT_SOURCE_DIR}/generate_asm.py ${os} ${cpu}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/fdb_c.cpp
|
||||
${asm_file}
|
||||
${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h
|
||||
|
@ -65,7 +65,7 @@ endif()
|
|||
if(APPLE)
|
||||
set(symbols ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.symbols)
|
||||
add_custom_command(OUTPUT ${symbols}
|
||||
COMMAND $<TARGET_FILE:Python::Interpreter> ${CMAKE_CURRENT_SOURCE_DIR}/symbolify.py
|
||||
COMMAND $<TARGET_FILE:Python3::Interpreter> ${CMAKE_CURRENT_SOURCE_DIR}/symbolify.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/foundationdb/fdb_c.h
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/foundationdb/fdb_c_internal.h
|
||||
${symbols}
|
||||
|
@ -428,10 +428,18 @@ endif()
|
|||
# Generate shim library in Linux builds
|
||||
if (OPEN_FOR_IDE)
|
||||
|
||||
add_library(fdb_c_shim OBJECT fdb_c_shim.cpp)
|
||||
add_library(fdb_c_shim OBJECT foundationdb/fdb_c_shim.h fdb_c_shim.cpp)
|
||||
target_link_libraries(fdb_c_shim PUBLIC dl)
|
||||
target_include_directories(fdb_c_shim PUBLIC
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}>
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}/foundationdb>)
|
||||
|
||||
elseif(NOT WIN32 AND NOT APPLE AND NOT OPEN_FOR_IDE AND NOT USE_UBSAN) # Linux, non-ubsan only
|
||||
add_library(fdb_c_shim_lib_tester OBJECT test/shim_lib_tester.cpp)
|
||||
target_link_libraries(fdb_c_shim_lib_tester PRIVATE fdb_c_shim SimpleOpt fdb_cpp Threads::Threads)
|
||||
target_include_directories(fdb_c_shim_lib_tester PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/ ${CMAKE_SOURCE_DIR}/flow/include)
|
||||
|
||||
elseif(NOT WIN32 AND NOT APPLE AND NOT USE_UBSAN) # Linux Only, non-ubsan only
|
||||
|
||||
set(SHIM_LIB_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR})
|
||||
|
||||
|
@ -439,16 +447,31 @@ elseif(NOT WIN32 AND NOT APPLE AND NOT OPEN_FOR_IDE AND NOT USE_UBSAN) # Linux,
|
|||
${SHIM_LIB_OUTPUT_DIR}/libfdb_c.so.init.c
|
||||
${SHIM_LIB_OUTPUT_DIR}/libfdb_c.so.tramp.S)
|
||||
|
||||
set(IMPLIBSO_SRC_DIR ${CMAKE_SOURCE_DIR}/contrib/Implib.so)
|
||||
set(IMPLIBSO_SRC
|
||||
${IMPLIBSO_SRC_DIR}/implib-gen.py
|
||||
${IMPLIBSO_SRC_DIR}/arch/common/init.c.tpl
|
||||
${IMPLIBSO_SRC_DIR}/arch/${CMAKE_SYSTEM_PROCESSOR}/config.ini
|
||||
${IMPLIBSO_SRC_DIR}/arch/${CMAKE_SYSTEM_PROCESSOR}/table.S.tpl
|
||||
${IMPLIBSO_SRC_DIR}/arch/${CMAKE_SYSTEM_PROCESSOR}/trampoline.S.tpl
|
||||
)
|
||||
|
||||
add_custom_command(OUTPUT ${SHIM_LIB_GEN_SRC}
|
||||
COMMAND $<TARGET_FILE:Python::Interpreter> ${CMAKE_SOURCE_DIR}/contrib/Implib.so/implib-gen.py
|
||||
COMMAND $<TARGET_FILE:Python3::Interpreter> ${IMPLIBSO_SRC_DIR}/implib-gen.py
|
||||
--target ${CMAKE_SYSTEM_PROCESSOR}
|
||||
--outdir ${SHIM_LIB_OUTPUT_DIR}
|
||||
--dlopen-callback=fdb_shim_dlopen_callback
|
||||
$<TARGET_FILE:fdb_c>)
|
||||
$<TARGET_FILE:fdb_c>
|
||||
DEPENDS ${IMPLIBSO_SRC}
|
||||
COMMENT "Generating source code for C shim library")
|
||||
|
||||
add_library(fdb_c_shim SHARED ${SHIM_LIB_GEN_SRC} fdb_c_shim.cpp)
|
||||
add_library(fdb_c_shim SHARED ${SHIM_LIB_GEN_SRC} foundationdb/fdb_c_shim.h fdb_c_shim.cpp)
|
||||
target_link_options(fdb_c_shim PRIVATE "LINKER:--version-script=${CMAKE_CURRENT_SOURCE_DIR}/fdb_c.map,-z,nodelete,-z,noexecstack")
|
||||
target_link_libraries(fdb_c_shim PUBLIC dl)
|
||||
target_include_directories(fdb_c_shim PUBLIC
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}>
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}/foundationdb>)
|
||||
|
||||
add_executable(fdb_c_shim_unit_tests)
|
||||
target_link_libraries(fdb_c_shim_unit_tests PRIVATE fdb_c_shim fdb_c_unit_tests_impl)
|
||||
|
@ -456,15 +479,20 @@ elseif(NOT WIN32 AND NOT APPLE AND NOT OPEN_FOR_IDE AND NOT USE_UBSAN) # Linux,
|
|||
add_executable(fdb_c_shim_api_tester)
|
||||
target_link_libraries(fdb_c_shim_api_tester PRIVATE fdb_c_shim fdb_c_api_tester_impl)
|
||||
|
||||
add_executable(fdb_c_shim_lib_tester test/shim_lib_tester.cpp)
|
||||
target_link_libraries(fdb_c_shim_lib_tester PRIVATE fdb_c_shim SimpleOpt fdb_cpp Threads::Threads)
|
||||
target_include_directories(fdb_c_shim_lib_tester PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/ ${CMAKE_SOURCE_DIR}/flow/include)
|
||||
|
||||
add_test(NAME fdb_c_shim_library_tests
|
||||
COMMAND $<TARGET_FILE:Python::Interpreter> ${CMAKE_CURRENT_SOURCE_DIR}/test/fdb_c_shim_tests.py
|
||||
COMMAND $<TARGET_FILE:Python3::Interpreter> ${CMAKE_CURRENT_SOURCE_DIR}/test/fdb_c_shim_tests.py
|
||||
--build-dir ${CMAKE_BINARY_DIR}
|
||||
--unit-tests-bin $<TARGET_FILE:fdb_c_shim_unit_tests>
|
||||
--api-tester-bin $<TARGET_FILE:fdb_c_shim_api_tester>
|
||||
--shim-lib-tester-bin $<TARGET_FILE:fdb_c_shim_lib_tester>
|
||||
--api-test-dir ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests
|
||||
)
|
||||
|
||||
endif() # End Linux, non-ubsan only
|
||||
endif() # End Linux only, non-ubsan only
|
||||
|
||||
# TODO: re-enable once the old vcxproj-based build system is removed.
|
||||
#generate_export_header(fdb_c EXPORT_MACRO_NAME "DLLEXPORT"
|
||||
|
@ -508,3 +536,19 @@ fdb_install(
|
|||
DESTINATION lib
|
||||
DESTINATION_SUFFIX "/cmake/${targets_export_name}"
|
||||
COMPONENT clients)
|
||||
|
||||
if(NOT WIN32 AND NOT APPLE AND NOT USE_UBSAN) # Linux Only, non-ubsan only
|
||||
|
||||
fdb_install(
|
||||
FILES foundationdb/fdb_c_shim.h
|
||||
DESTINATION include
|
||||
DESTINATION_SUFFIX /foundationdb
|
||||
COMPONENT clients)
|
||||
|
||||
fdb_install(
|
||||
TARGETS fdb_c_shim
|
||||
EXPORT ${targets_export_name}
|
||||
DESTINATION lib
|
||||
COMPONENT clients)
|
||||
|
||||
endif() # End Linux only, non-ubsan only
|
||||
|
|
|
@ -20,25 +20,42 @@
|
|||
|
||||
#if (defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__))
|
||||
|
||||
#define DLLEXPORT __attribute__((visibility("default")))
|
||||
|
||||
#include "foundationdb/fdb_c_shim.h"
|
||||
|
||||
#include <dlfcn.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string>
|
||||
|
||||
static const char* FDB_C_CLIENT_LIBRARY_PATH = "FDB_C_CLIENT_LIBRARY_PATH";
|
||||
namespace {
|
||||
|
||||
// Callback that tries different library names
|
||||
const char* FDB_LOCAL_CLIENT_LIBRARY_PATH_ENVVAR = "FDB_LOCAL_CLIENT_LIBRARY_PATH";
|
||||
std::string g_fdbLocalClientLibraryPath;
|
||||
|
||||
} // namespace
|
||||
|
||||
extern "C" DLLEXPORT void fdb_shim_set_local_client_library_path(const char* filePath) {
|
||||
g_fdbLocalClientLibraryPath = filePath;
|
||||
}
|
||||
|
||||
/* The callback of the fdb_c_shim layer that determines the path
|
||||
of the fdb_c library to be dynamically loaded
|
||||
*/
|
||||
extern "C" void* fdb_shim_dlopen_callback(const char* libName) {
|
||||
std::string libPath;
|
||||
char* val = getenv(FDB_C_CLIENT_LIBRARY_PATH);
|
||||
if (val) {
|
||||
libPath = val;
|
||||
if (!g_fdbLocalClientLibraryPath.empty()) {
|
||||
libPath = g_fdbLocalClientLibraryPath;
|
||||
} else {
|
||||
libPath = libName;
|
||||
char* val = getenv(FDB_LOCAL_CLIENT_LIBRARY_PATH_ENVVAR);
|
||||
if (val) {
|
||||
libPath = val;
|
||||
} else {
|
||||
libPath = libName;
|
||||
}
|
||||
}
|
||||
return dlopen(libPath.c_str(), RTLD_LAZY | RTLD_GLOBAL);
|
||||
}
|
||||
|
||||
#else
|
||||
#error Port me!
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -70,6 +70,15 @@ DLLEXPORT fdb_bool_t fdb_error_predicate(int predicate_test, fdb_error_t code);
|
|||
|
||||
#define /* fdb_error_t */ fdb_select_api_version(v) fdb_select_api_version_impl(v, FDB_API_VERSION)
|
||||
|
||||
/*
|
||||
* A variant of fdb_select_api_version that caps the header API version by the maximum API version
|
||||
* supported by the client library. It is intended mainly for use in combination with the shim
|
||||
* layer, which loads the client library dynamically.
|
||||
*/
|
||||
#define /* fdb_error_t */ fdb_select_api_version_capped(v) \
|
||||
fdb_select_api_version_impl( \
|
||||
v, FDB_API_VERSION < fdb_get_max_api_version() ? FDB_API_VERSION : fdb_get_max_api_version())
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_network_set_option(FDBNetworkOption option,
|
||||
uint8_t const* value,
|
||||
int value_length);
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
* fdb_shim_c.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDB_SHIM_C_H
|
||||
#define FDB_SHIM_C_H
|
||||
#pragma once
|
||||
|
||||
#ifndef DLLEXPORT
|
||||
#define DLLEXPORT
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Specify the path of the local libfdb_c.so library to be dynamically loaded by the shim layer
|
||||
*
|
||||
* This enables running the same application code with different client library versions,
|
||||
* e.g. using the latest development build for testing new features, but still using the latest
|
||||
* stable release in production deployments.
|
||||
*
|
||||
* The given path overrides the environment variable FDB_LOCAL_CLIENT_LIBRARY_PATH
|
||||
*/
|
||||
DLLEXPORT void fdb_shim_set_local_client_library_path(const char* filePath);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif
|
|
@ -278,7 +278,7 @@ void fdb_check(fdb::Error e) {
|
|||
}
|
||||
|
||||
void applyNetworkOptions(TesterOptions& options) {
|
||||
if (!options.tmpDir.empty()) {
|
||||
if (!options.tmpDir.empty() && options.apiVersion >= 720) {
|
||||
fdb::network::setOption(FDBNetworkOption::FDB_NET_OPTION_CLIENT_TMP_DIR, options.tmpDir);
|
||||
}
|
||||
if (!options.externalClientLibrary.empty()) {
|
||||
|
@ -419,7 +419,7 @@ int main(int argc, char** argv) {
|
|||
}
|
||||
randomizeOptions(options);
|
||||
|
||||
fdb::selectApiVersion(options.apiVersion);
|
||||
fdb::selectApiVersionCapped(options.apiVersion);
|
||||
applyNetworkOptions(options);
|
||||
fdb::network::setup();
|
||||
|
||||
|
|
|
@ -114,7 +114,7 @@ public:
|
|||
|
||||
explicit Error(CodeType err) noexcept : err(err) {}
|
||||
|
||||
char const* what() noexcept { return native::fdb_get_error(err); }
|
||||
char const* what() const noexcept { return native::fdb_get_error(err); }
|
||||
|
||||
explicit operator bool() const noexcept { return err != 0; }
|
||||
|
||||
|
@ -722,6 +722,20 @@ inline void selectApiVersion(int version) {
|
|||
}
|
||||
}
|
||||
|
||||
inline Error selectApiVersionCappedNothrow(int version) {
|
||||
if (version < 720) {
|
||||
Tenant::tenantManagementMapPrefix = "\xff\xff/management/tenant_map/";
|
||||
}
|
||||
return Error(
|
||||
native::fdb_select_api_version_impl(version, std::min(native::fdb_get_max_api_version(), FDB_API_VERSION)));
|
||||
}
|
||||
|
||||
inline void selectApiVersionCapped(int version) {
|
||||
if (auto err = selectApiVersionCappedNothrow(version)) {
|
||||
throwError(fmt::format("ERROR: fdb_select_api_version_capped({}): ", version), err);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace fdb
|
||||
|
||||
template <>
|
||||
|
|
|
@ -7,13 +7,18 @@ import subprocess
|
|||
import sys
|
||||
import os
|
||||
|
||||
sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..', '..', '..', 'tests', 'TestRunner')]
|
||||
sys.path[:0] = [os.path.join(os.path.dirname(
|
||||
__file__), '..', '..', '..', 'tests', 'TestRunner')]
|
||||
|
||||
# fmt: off
|
||||
from binary_download import FdbBinaryDownloader, CURRENT_VERSION
|
||||
from local_cluster import LocalCluster, random_secret_string
|
||||
# fmt: on
|
||||
|
||||
LAST_RELEASE_VERSION = "7.1.5"
|
||||
TESTER_STATS_INTERVAL_SEC = 5
|
||||
DEFAULT_TEST_FILE = "CApiCorrectnessMultiThr.toml"
|
||||
IMPLIBSO_ERROR_CODE = -6 # SIGABORT
|
||||
|
||||
|
||||
def version_from_str(ver_str):
|
||||
|
@ -55,7 +60,8 @@ class TestEnv(LocalCluster):
|
|||
self.set_env_var("LD_LIBRARY_PATH", self.downloader.lib_dir(version))
|
||||
client_lib = self.downloader.lib_path(version)
|
||||
assert client_lib.exists(), "{} does not exist".format(client_lib)
|
||||
self.client_lib_external = self.tmp_dir.joinpath("libfdb_c_external.so")
|
||||
self.client_lib_external = self.tmp_dir.joinpath(
|
||||
"libfdb_c_external.so")
|
||||
shutil.copyfile(client_lib, self.client_lib_external)
|
||||
|
||||
def __enter__(self):
|
||||
|
@ -91,6 +97,9 @@ class FdbCShimTests:
|
|||
assert self.unit_tests_bin.exists(), "{} does not exist".format(self.unit_tests_bin)
|
||||
self.api_tester_bin = Path(args.api_tester_bin).resolve()
|
||||
assert self.api_tester_bin.exists(), "{} does not exist".format(self.api_tests_bin)
|
||||
self.shim_lib_tester_bin = Path(args.shim_lib_tester_bin).resolve()
|
||||
assert self.shim_lib_tester_bin.exists(
|
||||
), "{} does not exist".format(self.shim_lib_tester_bin)
|
||||
self.api_test_dir = Path(args.api_test_dir).resolve()
|
||||
assert self.api_test_dir.exists(), "{} does not exist".format(self.api_test_dir)
|
||||
self.downloader = FdbBinaryDownloader(args.build_dir)
|
||||
|
@ -98,6 +107,7 @@ class FdbCShimTests:
|
|||
self.platform = platform.machine()
|
||||
if (self.platform == "x86_64"):
|
||||
self.downloader.download_old_binaries(LAST_RELEASE_VERSION)
|
||||
self.downloader.download_old_binaries("7.0.0")
|
||||
|
||||
def build_c_api_tester_args(self, test_env, test_file):
|
||||
test_file_path = self.api_test_dir.joinpath(test_file)
|
||||
|
@ -128,7 +138,8 @@ class FdbCShimTests:
|
|||
with TestEnv(self.build_dir, self.downloader, version) as test_env:
|
||||
cmd_args = self.build_c_api_tester_args(test_env, test_file)
|
||||
env_vars = os.environ.copy()
|
||||
env_vars["LD_LIBRARY_PATH"] = self.downloader.lib_dir(version)
|
||||
env_vars["FDB_LOCAL_CLIENT_LIBRARY_PATH"] = self.downloader.lib_path(
|
||||
version)
|
||||
test_env.exec_client_command(cmd_args, env_vars)
|
||||
|
||||
def run_c_unit_tests(self, version):
|
||||
|
@ -143,38 +154,118 @@ class FdbCShimTests:
|
|||
test_env.client_lib_external
|
||||
]
|
||||
env_vars = os.environ.copy()
|
||||
env_vars["LD_LIBRARY_PATH"] = self.downloader.lib_dir(version)
|
||||
env_vars["FDB_LOCAL_CLIENT_LIBRARY_PATH"] = self.downloader.lib_path(
|
||||
version)
|
||||
test_env.exec_client_command(cmd_args, env_vars)
|
||||
|
||||
def test_invalid_c_client_lib_env_var(self, version):
|
||||
def run_c_shim_lib_tester(
|
||||
self,
|
||||
version,
|
||||
test_env,
|
||||
api_version=None,
|
||||
invalid_lib_path=False,
|
||||
call_set_path=False,
|
||||
set_env_path=False,
|
||||
set_ld_lib_path=False,
|
||||
use_external_lib=True,
|
||||
expected_ret_code=0
|
||||
):
|
||||
print('-' * 80)
|
||||
print("Test invalid FDB_C_CLIENT_LIBRARY_PATH value")
|
||||
if api_version is None:
|
||||
api_version = api_version_from_str(version)
|
||||
test_flags = []
|
||||
if invalid_lib_path:
|
||||
test_flags.append("invalid_lib_path")
|
||||
if call_set_path:
|
||||
test_flags.append("call_set_path")
|
||||
if set_ld_lib_path:
|
||||
test_flags.append("set_ld_lib_path")
|
||||
if use_external_lib:
|
||||
test_flags.append("use_external_lib")
|
||||
else:
|
||||
test_flags.append("use_local_lib")
|
||||
print("C Shim Tests - version: {}, API version: {}, {}".format(version,
|
||||
api_version, ", ".join(test_flags)))
|
||||
print('-' * 80)
|
||||
with TestEnv(self.build_dir, self.downloader, version) as test_env:
|
||||
cmd_args = self.build_c_api_tester_args(test_env, DEFAULT_TEST_FILE)
|
||||
env_vars = os.environ.copy()
|
||||
env_vars["FDB_C_CLIENT_LIBRARY_PATH"] = "dummy"
|
||||
test_env.exec_client_command(cmd_args, env_vars, 1)
|
||||
|
||||
def test_valid_c_client_lib_env_var(self, version):
|
||||
print('-' * 80)
|
||||
print("Test valid FDB_C_CLIENT_LIBRARY_PATH value")
|
||||
print('-' * 80)
|
||||
with TestEnv(self.build_dir, self.downloader, version) as test_env:
|
||||
cmd_args = self.build_c_api_tester_args(test_env, DEFAULT_TEST_FILE)
|
||||
env_vars = os.environ.copy()
|
||||
env_vars["FDB_C_CLIENT_LIBRARY_PATH"] = self.downloader.lib_path(version)
|
||||
test_env.exec_client_command(cmd_args, env_vars)
|
||||
cmd_args = [
|
||||
self.shim_lib_tester_bin,
|
||||
"--cluster-file",
|
||||
test_env.cluster_file,
|
||||
"--api-version",
|
||||
str(api_version),
|
||||
]
|
||||
if call_set_path:
|
||||
cmd_args = cmd_args + [
|
||||
"--local-client-library",
|
||||
("dummy" if invalid_lib_path else self.downloader.lib_path(version))
|
||||
]
|
||||
if use_external_lib:
|
||||
cmd_args = cmd_args + [
|
||||
"--disable-local-client",
|
||||
"--external-client-library",
|
||||
test_env.client_lib_external
|
||||
]
|
||||
env_vars = os.environ.copy()
|
||||
env_vars["LD_LIBRARY_PATH"] = (
|
||||
self.downloader.lib_dir(version) if set_ld_lib_path else "")
|
||||
if set_env_path:
|
||||
env_vars["FDB_LOCAL_CLIENT_LIBRARY_PATH"] = (
|
||||
"dummy" if invalid_lib_path else self.downloader.lib_path(version))
|
||||
test_env.exec_client_command(cmd_args, env_vars, expected_ret_code)
|
||||
|
||||
def run_tests(self):
|
||||
# Test the API workload with the dev version
|
||||
self.run_c_api_test(CURRENT_VERSION, DEFAULT_TEST_FILE)
|
||||
|
||||
# Run unit tests with the dev version
|
||||
self.run_c_unit_tests(CURRENT_VERSION)
|
||||
|
||||
with TestEnv(self.build_dir, self.downloader, CURRENT_VERSION) as test_env:
|
||||
# Test lookup of the client library over LD_LIBRARY_PATH
|
||||
self.run_c_shim_lib_tester(
|
||||
CURRENT_VERSION, test_env, set_ld_lib_path=True)
|
||||
|
||||
# Test setting the client library path over an API call
|
||||
self.run_c_shim_lib_tester(
|
||||
CURRENT_VERSION, test_env, call_set_path=True)
|
||||
|
||||
# Test setting the client library path over an environment variable
|
||||
self.run_c_shim_lib_tester(
|
||||
CURRENT_VERSION, test_env, set_env_path=True)
|
||||
|
||||
# Test using the loaded client library as the local client
|
||||
self.run_c_shim_lib_tester(
|
||||
CURRENT_VERSION, test_env, call_set_path=True, use_external_lib=False)
|
||||
|
||||
# Test setting an invalid client library path over an API call
|
||||
self.run_c_shim_lib_tester(
|
||||
CURRENT_VERSION, test_env, call_set_path=True, invalid_lib_path=True, expected_ret_code=IMPLIBSO_ERROR_CODE)
|
||||
|
||||
# Test setting an invalid client library path over an environment variable
|
||||
self.run_c_shim_lib_tester(
|
||||
CURRENT_VERSION, test_env, set_env_path=True, invalid_lib_path=True, expected_ret_code=IMPLIBSO_ERROR_CODE)
|
||||
|
||||
# Test calling a function that exists in the loaded library, but not for the selected API version
|
||||
self.run_c_shim_lib_tester(
|
||||
CURRENT_VERSION, test_env, call_set_path=True, api_version=700)
|
||||
|
||||
# binary downloads are currently available only for x86_64
|
||||
if (self.platform == "x86_64"):
|
||||
if self.platform == "x86_64":
|
||||
# Test the API workload with the release version
|
||||
self.run_c_api_test(LAST_RELEASE_VERSION, DEFAULT_TEST_FILE)
|
||||
|
||||
self.run_c_api_test(CURRENT_VERSION, DEFAULT_TEST_FILE)
|
||||
self.run_c_unit_tests(CURRENT_VERSION)
|
||||
self.test_invalid_c_client_lib_env_var(CURRENT_VERSION)
|
||||
self.test_valid_c_client_lib_env_var(CURRENT_VERSION)
|
||||
with TestEnv(self.build_dir, self.downloader, LAST_RELEASE_VERSION) as test_env:
|
||||
# Test using the loaded client library as the local client
|
||||
self.run_c_shim_lib_tester(
|
||||
LAST_RELEASE_VERSION, test_env, call_set_path=True, use_external_lib=False)
|
||||
|
||||
# Test the client library of the release version in combination with the dev API version
|
||||
self.run_c_shim_lib_tester(
|
||||
LAST_RELEASE_VERSION, test_env, call_set_path=True, api_version=api_version_from_str(CURRENT_VERSION), expected_ret_code=1)
|
||||
|
||||
# Test calling a function that does not exist in the loaded library
|
||||
self.run_c_shim_lib_tester(
|
||||
"7.0.0", test_env, call_set_path=True, api_version=700, expected_ret_code=IMPLIBSO_ERROR_CODE)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -194,12 +285,26 @@ if __name__ == "__main__":
|
|||
help="FDB build directory",
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument('--unit-tests-bin', type=str,
|
||||
help='Path to the fdb_c_shim_unit_tests executable.')
|
||||
parser.add_argument('--api-tester-bin', type=str,
|
||||
help='Path to the fdb_c_shim_api_tester executable.')
|
||||
parser.add_argument('--api-test-dir', type=str,
|
||||
help='Path to a directory with api test definitions.')
|
||||
parser.add_argument(
|
||||
'--unit-tests-bin',
|
||||
type=str,
|
||||
help='Path to the fdb_c_shim_unit_tests executable.',
|
||||
required=True)
|
||||
parser.add_argument(
|
||||
'--api-tester-bin',
|
||||
type=str,
|
||||
help='Path to the fdb_c_shim_api_tester executable.',
|
||||
required=True)
|
||||
parser.add_argument(
|
||||
'--shim-lib-tester-bin',
|
||||
type=str,
|
||||
help='Path to the fdb_c_shim_lib_tester executable.',
|
||||
required=True)
|
||||
parser.add_argument(
|
||||
'--api-test-dir',
|
||||
type=str,
|
||||
help='Path to a directory with api test definitions.',
|
||||
required=True)
|
||||
args = parser.parse_args()
|
||||
test = FdbCShimTests(args)
|
||||
test.run_tests()
|
||||
|
|
|
@ -0,0 +1,253 @@
|
|||
/*
|
||||
* shim_lib_tester.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* A utility for testing shim library usage with various valid and invalid configurations
|
||||
*/
|
||||
|
||||
#include "fmt/core.h"
|
||||
#include "test/fdb_api.hpp"
|
||||
#include "SimpleOpt/SimpleOpt.h"
|
||||
#include <thread>
|
||||
#include <string_view>
|
||||
#include "foundationdb/fdb_c_shim.h"
|
||||
|
||||
#undef ERROR
|
||||
#define ERROR(name, number, description) enum { error_code_##name = number };
|
||||
|
||||
#include "flow/error_definitions.h"
|
||||
|
||||
using namespace std::string_view_literals;
|
||||
|
||||
namespace {
|
||||
|
||||
enum TesterOptionId {
|
||||
OPT_HELP,
|
||||
OPT_CONNFILE,
|
||||
OPT_LOCAL_CLIENT_LIBRARY,
|
||||
OPT_EXTERNAL_CLIENT_LIBRARY,
|
||||
OPT_EXTERNAL_CLIENT_DIRECTORY,
|
||||
OPT_DISABLE_LOCAL_CLIENT,
|
||||
OPT_API_VERSION
|
||||
};
|
||||
|
||||
const int MIN_TESTABLE_API_VERSION = 400;
|
||||
|
||||
CSimpleOpt::SOption TesterOptionDefs[] = //
|
||||
{ { OPT_HELP, "-h", SO_NONE },
|
||||
{ OPT_HELP, "--help", SO_NONE },
|
||||
{ OPT_CONNFILE, "-C", SO_REQ_SEP },
|
||||
{ OPT_CONNFILE, "--cluster-file", SO_REQ_SEP },
|
||||
{ OPT_LOCAL_CLIENT_LIBRARY, "--local-client-library", SO_REQ_SEP },
|
||||
{ OPT_EXTERNAL_CLIENT_LIBRARY, "--external-client-library", SO_REQ_SEP },
|
||||
{ OPT_EXTERNAL_CLIENT_DIRECTORY, "--external-client-dir", SO_REQ_SEP },
|
||||
{ OPT_DISABLE_LOCAL_CLIENT, "--disable-local-client", SO_NONE },
|
||||
{ OPT_API_VERSION, "--api-version", SO_REQ_SEP },
|
||||
SO_END_OF_OPTIONS };
|
||||
|
||||
class TesterOptions {
|
||||
public:
|
||||
// FDB API version, using the latest version by default
|
||||
int apiVersion = FDB_API_VERSION;
|
||||
std::string clusterFile;
|
||||
std::string localClientLibrary;
|
||||
std::string externalClientLibrary;
|
||||
std::string externalClientDir;
|
||||
bool disableLocalClient = false;
|
||||
};
|
||||
|
||||
void printProgramUsage(const char* execName) {
|
||||
printf("usage: %s [OPTIONS]\n"
|
||||
"\n",
|
||||
execName);
|
||||
printf(" -C, --cluster-file FILE\n"
|
||||
" The path of a file containing the connection string for the\n"
|
||||
" FoundationDB cluster. The default is `fdb.cluster'\n"
|
||||
" --local-client-library FILE\n"
|
||||
" Path to the local client library.\n"
|
||||
" --external-client-library FILE\n"
|
||||
" Path to the external client library.\n"
|
||||
" --external-client-dir DIR\n"
|
||||
" Directory containing external client libraries.\n"
|
||||
" --disable-local-client DIR\n"
|
||||
" Disable the local client, i.e. use only external client libraries.\n"
|
||||
" --api-version VERSION\n"
|
||||
" Required FDB API version (default %d).\n"
|
||||
" -h, --help Display this help and exit.\n",
|
||||
FDB_API_VERSION);
|
||||
}
|
||||
|
||||
bool processIntOption(const std::string& optionName, const std::string& value, int minValue, int maxValue, int& res) {
|
||||
char* endptr;
|
||||
res = strtol(value.c_str(), &endptr, 10);
|
||||
if (*endptr != '\0') {
|
||||
fmt::print(stderr, "Invalid value {} for {}", value, optionName);
|
||||
return false;
|
||||
}
|
||||
if (res < minValue || res > maxValue) {
|
||||
fmt::print(stderr, "Value for {} must be between {} and {}", optionName, minValue, maxValue);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool processArg(TesterOptions& options, const CSimpleOpt& args) {
|
||||
switch (args.OptionId()) {
|
||||
case OPT_CONNFILE:
|
||||
options.clusterFile = args.OptionArg();
|
||||
break;
|
||||
case OPT_LOCAL_CLIENT_LIBRARY:
|
||||
options.localClientLibrary = args.OptionArg();
|
||||
break;
|
||||
case OPT_EXTERNAL_CLIENT_LIBRARY:
|
||||
options.externalClientLibrary = args.OptionArg();
|
||||
break;
|
||||
case OPT_EXTERNAL_CLIENT_DIRECTORY:
|
||||
options.externalClientDir = args.OptionArg();
|
||||
break;
|
||||
case OPT_DISABLE_LOCAL_CLIENT:
|
||||
options.disableLocalClient = true;
|
||||
break;
|
||||
case OPT_API_VERSION:
|
||||
if (!processIntOption(
|
||||
args.OptionText(), args.OptionArg(), MIN_TESTABLE_API_VERSION, FDB_API_VERSION, options.apiVersion)) {
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool parseArgs(TesterOptions& options, int argc, char** argv) {
|
||||
// declare our options parser, pass in the arguments from main
|
||||
// as well as our array of valid options.
|
||||
CSimpleOpt args(argc, argv, TesterOptionDefs);
|
||||
|
||||
// while there are arguments left to process
|
||||
while (args.Next()) {
|
||||
if (args.LastError() == SO_SUCCESS) {
|
||||
if (args.OptionId() == OPT_HELP) {
|
||||
printProgramUsage(argv[0]);
|
||||
return false;
|
||||
}
|
||||
if (!processArg(options, args)) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
fmt::print(stderr, "ERROR: Invalid argument: {}\n", args.OptionText());
|
||||
printProgramUsage(argv[0]);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void fdb_check(fdb::Error e, std::string_view msg, fdb::Error::CodeType expectedError = error_code_success) {
|
||||
if (e.code()) {
|
||||
fmt::print(stderr, "{}, Error: {}({})\n", msg, e.code(), e.what());
|
||||
std::abort();
|
||||
}
|
||||
}
|
||||
|
||||
void applyNetworkOptions(TesterOptions& options) {
|
||||
if (!options.externalClientLibrary.empty()) {
|
||||
fdb::network::setOption(FDBNetworkOption::FDB_NET_OPTION_DISABLE_LOCAL_CLIENT);
|
||||
fdb::network::setOption(FDBNetworkOption::FDB_NET_OPTION_EXTERNAL_CLIENT_LIBRARY,
|
||||
options.externalClientLibrary);
|
||||
} else if (!options.externalClientDir.empty()) {
|
||||
if (options.disableLocalClient) {
|
||||
fdb::network::setOption(FDBNetworkOption::FDB_NET_OPTION_DISABLE_LOCAL_CLIENT);
|
||||
}
|
||||
fdb::network::setOption(FDBNetworkOption::FDB_NET_OPTION_EXTERNAL_CLIENT_DIRECTORY, options.externalClientDir);
|
||||
} else {
|
||||
if (options.disableLocalClient) {
|
||||
fmt::print(stderr, "Invalid options: Cannot disable local client if no external library is provided");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void testBasicApi(const TesterOptions& options) {
|
||||
fdb::Database db(options.clusterFile);
|
||||
fdb::Transaction tx = db.createTransaction();
|
||||
while (true) {
|
||||
try {
|
||||
// Set a time out to avoid long delays when testing invalid configurations
|
||||
tx.setOption(FDB_TR_OPTION_TIMEOUT, 1000);
|
||||
tx.set(fdb::toBytesRef("key1"sv), fdb::toBytesRef("val1"sv));
|
||||
fdb_check(tx.commit().blockUntilReady(), "Wait on commit failed");
|
||||
break;
|
||||
} catch (const fdb::Error& err) {
|
||||
if (err.code() == error_code_timed_out) {
|
||||
exit(1);
|
||||
}
|
||||
auto onErrorFuture = tx.onError(err);
|
||||
fdb_check(onErrorFuture.blockUntilReady(), "Wait on onError failed");
|
||||
fdb_check(onErrorFuture.error(), "onError failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void test710Api(const TesterOptions& options) {
|
||||
fdb::Database db(options.clusterFile);
|
||||
try {
|
||||
db.openTenant(fdb::toBytesRef("not_existing_tenant"sv));
|
||||
} catch (const fdb::Error& err) {
|
||||
fdb_check(err, "Tenant not found expected", error_code_tenant_not_found);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
int retCode = 0;
|
||||
try {
|
||||
TesterOptions options;
|
||||
if (!parseArgs(options, argc, argv)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!options.localClientLibrary.empty()) {
|
||||
// Must be called before the first FDB API call
|
||||
fdb_shim_set_local_client_library_path(options.localClientLibrary.c_str());
|
||||
}
|
||||
|
||||
fdb::selectApiVersionCapped(options.apiVersion);
|
||||
applyNetworkOptions(options);
|
||||
fdb::network::setup();
|
||||
|
||||
std::thread network_thread{ &fdb::network::run };
|
||||
|
||||
// Try calling some basic functionality that is available
|
||||
// in all recent API versions
|
||||
testBasicApi(options);
|
||||
|
||||
// Try calling 710-specific API. This enables testing what
|
||||
// happens if a library is missing a function
|
||||
test710Api(options);
|
||||
|
||||
fdb_check(fdb::network::stop(), "Stop network failed");
|
||||
network_thread.join();
|
||||
} catch (const std::runtime_error& err) {
|
||||
fmt::print(stderr, "runtime error caught: {}\n", err.what());
|
||||
retCode = 1;
|
||||
}
|
||||
return retCode;
|
||||
}
|
|
@ -68,7 +68,7 @@ endif()
|
|||
set(setup_file_name foundationdb-${FDB_VERSION}.tar.gz)
|
||||
set(package_file ${CMAKE_BINARY_DIR}/packages/foundationdb-${FDB_VERSION}${not_fdb_release_string}.tar.gz)
|
||||
add_custom_command(OUTPUT ${package_file}
|
||||
COMMAND $<TARGET_FILE:Python::Interpreter> setup.py sdist --formats=gztar &&
|
||||
COMMAND $<TARGET_FILE:Python3::Interpreter> setup.py sdist --formats=gztar &&
|
||||
${CMAKE_COMMAND} -E copy dist/${setup_file_name} ${package_file}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
COMMENT "Create Python sdist package")
|
||||
|
|
|
@ -628,8 +628,9 @@ def tenants(logger):
|
|||
assert(len(json_output) == 2)
|
||||
assert('tenant' in json_output)
|
||||
assert(json_output['type'] == 'success')
|
||||
assert(len(json_output['tenant']) == 3)
|
||||
assert(len(json_output['tenant']) == 4)
|
||||
assert('id' in json_output['tenant'])
|
||||
assert('encrypted' in json_output['tenant'])
|
||||
assert('prefix' in json_output['tenant'])
|
||||
assert(len(json_output['tenant']['prefix']) == 2)
|
||||
assert('base64' in json_output['tenant']['prefix'])
|
||||
|
@ -649,8 +650,9 @@ def tenants(logger):
|
|||
assert(len(json_output) == 2)
|
||||
assert('tenant' in json_output)
|
||||
assert(json_output['type'] == 'success')
|
||||
assert(len(json_output['tenant']) == 4)
|
||||
assert(len(json_output['tenant']) == 5)
|
||||
assert('id' in json_output['tenant'])
|
||||
assert('encrypted' in json_output['tenant'])
|
||||
assert('prefix' in json_output['tenant'])
|
||||
assert(json_output['tenant']['tenant_state'] == 'ready')
|
||||
assert('tenant_group' in json_output['tenant'])
|
||||
|
|
|
@ -125,7 +125,7 @@ function(add_fdb_test)
|
|||
list(TRANSFORM ADD_FDB_TEST_TEST_FILES PREPEND "${CMAKE_CURRENT_SOURCE_DIR}/")
|
||||
if (ENABLE_SIMULATION_TESTS)
|
||||
add_test(NAME ${test_name}
|
||||
COMMAND $<TARGET_FILE:Python::Interpreter> ${TestRunner}
|
||||
COMMAND $<TARGET_FILE:Python3::Interpreter> ${TestRunner}
|
||||
-n ${test_name}
|
||||
-b ${PROJECT_BINARY_DIR}
|
||||
-t ${test_type}
|
||||
|
@ -440,7 +440,7 @@ function(add_fdbclient_test)
|
|||
message(STATUS "Adding Client test ${T_NAME}")
|
||||
add_test(NAME "${T_NAME}"
|
||||
WORKING_DIRECTORY ${T_WORKING_DIRECTORY}
|
||||
COMMAND ${Python_EXECUTABLE} ${TMP_CLUSTER_CMD}
|
||||
COMMAND ${Python3_EXECUTABLE} ${TMP_CLUSTER_CMD}
|
||||
--
|
||||
${T_COMMAND})
|
||||
if (T_TEST_TIMEOUT)
|
||||
|
@ -473,7 +473,7 @@ function(add_unavailable_fdbclient_test)
|
|||
endif()
|
||||
message(STATUS "Adding unavailable client test ${T_NAME}")
|
||||
add_test(NAME "${T_NAME}"
|
||||
COMMAND ${Python_EXECUTABLE} ${CMAKE_SOURCE_DIR}/tests/TestRunner/fake_cluster.py
|
||||
COMMAND ${Python3_EXECUTABLE} ${CMAKE_SOURCE_DIR}/tests/TestRunner/fake_cluster.py
|
||||
--output-dir ${CMAKE_BINARY_DIR}
|
||||
--
|
||||
${T_COMMAND})
|
||||
|
@ -508,7 +508,7 @@ function(add_multi_fdbclient_test)
|
|||
endif()
|
||||
message(STATUS "Adding Client test ${T_NAME}")
|
||||
add_test(NAME "${T_NAME}"
|
||||
COMMAND ${Python_EXECUTABLE} ${CMAKE_SOURCE_DIR}/tests/TestRunner/tmp_multi_cluster.py
|
||||
COMMAND ${Python3_EXECUTABLE} ${CMAKE_SOURCE_DIR}/tests/TestRunner/tmp_multi_cluster.py
|
||||
--build-dir ${CMAKE_BINARY_DIR}
|
||||
--clusters 3
|
||||
--
|
||||
|
|
|
@ -56,8 +56,8 @@ endif()
|
|||
# Python Bindings
|
||||
################################################################################
|
||||
|
||||
find_package(Python COMPONENTS Interpreter)
|
||||
if(Python_Interpreter_FOUND)
|
||||
find_package(Python3 COMPONENTS Interpreter)
|
||||
if(Python3_Interpreter_FOUND)
|
||||
set(WITH_PYTHON ON)
|
||||
else()
|
||||
message(WARNING "Could not found a suitable python interpreter")
|
||||
|
|
|
@ -26,8 +26,7 @@ extern "C" {
|
|||
#define CHECK(cond, fmt, ...) do { \
|
||||
if(!(cond)) { \
|
||||
fprintf(stderr, "implib-gen: $load_name: " fmt "\n", ##__VA_ARGS__); \
|
||||
assert(0 && "Assertion in generated code"); \
|
||||
exit(1); \
|
||||
abort(); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
|
|
|
@ -789,7 +789,7 @@ namespace SummarizeTest
|
|||
int stderrSeverity = (int)Magnesium.Severity.SevError;
|
||||
|
||||
Dictionary<KeyValuePair<string, Magnesium.Severity>, Magnesium.Severity> severityMap = new Dictionary<KeyValuePair<string, Magnesium.Severity>, Magnesium.Severity>();
|
||||
Dictionary<Tuple<string, string>, bool> codeCoverage = new Dictionary<Tuple<string, string>, bool>();
|
||||
var codeCoverage = new Dictionary<Tuple<string, string, string>, bool>();
|
||||
|
||||
foreach (var traceFileName in traceFiles)
|
||||
{
|
||||
|
@ -902,12 +902,17 @@ namespace SummarizeTest
|
|||
if (ev.Type == "CodeCoverage" && !willRestart)
|
||||
{
|
||||
bool covered = true;
|
||||
if(ev.DDetails.ContainsKey("Covered"))
|
||||
if (ev.DDetails.ContainsKey("Covered"))
|
||||
{
|
||||
covered = int.Parse(ev.Details.Covered) != 0;
|
||||
}
|
||||
|
||||
var key = new Tuple<string, string>(ev.Details.File, ev.Details.Line);
|
||||
var comment = "";
|
||||
if (ev.DDetails.ContainsKey("Comment"))
|
||||
{
|
||||
comment = ev.Details.Comment;
|
||||
}
|
||||
var key = new Tuple<string, string, string>(ev.Details.File, ev.Details.Line, comment);
|
||||
if (covered || !codeCoverage.ContainsKey(key))
|
||||
{
|
||||
codeCoverage[key] = covered;
|
||||
|
@ -961,6 +966,9 @@ namespace SummarizeTest
|
|||
{
|
||||
element.Add(new XAttribute("Covered", "0"));
|
||||
}
|
||||
if (kv.Key.Item3.Length > 0) {
|
||||
element.Add(new XAttribute("Comment", kv.Key.Item3));
|
||||
}
|
||||
|
||||
xout.Add(element);
|
||||
}
|
||||
|
|
|
@ -58,6 +58,69 @@ ACTOR Future<Void> setBlobRange(Database db, Key startKey, Key endKey, Value val
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Version> getLatestReadVersion(Database db) {
|
||||
state Transaction tr(db);
|
||||
loop {
|
||||
try {
|
||||
Version rv = wait(tr.getReadVersion());
|
||||
fmt::print("Resolved latest read version as {0}\n", rv);
|
||||
return rv;
|
||||
} catch (Error& e) {
|
||||
wait(tr.onError(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// print after delay if not cancelled
|
||||
ACTOR Future<Void> printAfterDelay(double delaySeconds, std::string message) {
|
||||
wait(delay(delaySeconds));
|
||||
fmt::print("{}\n", message);
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> doBlobPurge(Database db, Key startKey, Key endKey, Optional<Version> version) {
|
||||
state Version purgeVersion;
|
||||
if (version.present()) {
|
||||
purgeVersion = version.get();
|
||||
} else {
|
||||
wait(store(purgeVersion, getLatestReadVersion(db)));
|
||||
}
|
||||
|
||||
state Key purgeKey = wait(db->purgeBlobGranules(KeyRange(KeyRangeRef(startKey, endKey)), purgeVersion, {}));
|
||||
|
||||
fmt::print("Blob purge registered for [{0} - {1}) @ {2}\n", startKey.printable(), endKey.printable(), purgeVersion);
|
||||
|
||||
state Future<Void> printWarningActor = printAfterDelay(
|
||||
5.0, "Waiting for purge to complete. (interrupting this wait with CTRL+C will not cancel the purge)");
|
||||
wait(db->waitPurgeGranulesComplete(purgeKey));
|
||||
|
||||
fmt::print("Blob purge complete for [{0} - {1}) @ {2}\n", startKey.printable(), endKey.printable(), purgeVersion);
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> doBlobCheck(Database db, Key startKey, Key endKey, Optional<Version> version) {
|
||||
state Transaction tr(db);
|
||||
state Version readVersionOut = invalidVersion;
|
||||
state double elapsed = -timer_monotonic();
|
||||
loop {
|
||||
try {
|
||||
wait(success(tr.readBlobGranules(KeyRange(KeyRangeRef(startKey, endKey)), 0, version, &readVersionOut)));
|
||||
elapsed += timer_monotonic();
|
||||
break;
|
||||
} catch (Error& e) {
|
||||
wait(tr.onError(e));
|
||||
}
|
||||
}
|
||||
|
||||
fmt::print("Blob check complete for [{0} - {1}) @ {2} in {3:.6f} seconds\n",
|
||||
startKey.printable(),
|
||||
endKey.printable(),
|
||||
readVersionOut,
|
||||
elapsed);
|
||||
return Void();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace fdb_cli {
|
||||
|
@ -66,7 +129,7 @@ ACTOR Future<bool> blobRangeCommandActor(Database localDb,
|
|||
Optional<TenantMapEntry> tenantEntry,
|
||||
std::vector<StringRef> tokens) {
|
||||
// enables blob writing for the given range
|
||||
if (tokens.size() != 4) {
|
||||
if (tokens.size() != 4 && tokens.size() != 5) {
|
||||
printUsage(tokens[0]);
|
||||
return false;
|
||||
}
|
||||
|
@ -84,29 +147,60 @@ ACTOR Future<bool> blobRangeCommandActor(Database localDb,
|
|||
|
||||
if (end > LiteralStringRef("\xff")) {
|
||||
// TODO is this something we want?
|
||||
printf("Cannot blobbify system keyspace! Problematic End Key: %s\n", tokens[3].printable().c_str());
|
||||
fmt::print("Cannot blobbify system keyspace! Problematic End Key: {0}\n", tokens[3].printable());
|
||||
return false;
|
||||
} else if (tokens[2] >= tokens[3]) {
|
||||
printf("Invalid blob range [%s - %s)\n", tokens[2].printable().c_str(), tokens[3].printable().c_str());
|
||||
fmt::print("Invalid blob range [{0} - {1})\n", tokens[2].printable(), tokens[3].printable());
|
||||
} else {
|
||||
if (tokencmp(tokens[1], "start")) {
|
||||
printf("Starting blobbify range for [%s - %s)\n",
|
||||
tokens[2].printable().c_str(),
|
||||
tokens[3].printable().c_str());
|
||||
wait(setBlobRange(localDb, begin, end, LiteralStringRef("1")));
|
||||
} else if (tokencmp(tokens[1], "stop")) {
|
||||
printf("Stopping blobbify range for [%s - %s)\n",
|
||||
tokens[2].printable().c_str(),
|
||||
tokens[3].printable().c_str());
|
||||
wait(setBlobRange(localDb, begin, end, StringRef()));
|
||||
if (tokencmp(tokens[1], "start") || tokencmp(tokens[1], "stop")) {
|
||||
bool starting = tokencmp(tokens[1], "start");
|
||||
if (tokens.size() > 4) {
|
||||
printUsage(tokens[0]);
|
||||
return false;
|
||||
}
|
||||
fmt::print("{0} blobbify range for [{1} - {2})\n",
|
||||
starting ? "Starting" : "Stopping",
|
||||
tokens[2].printable().c_str(),
|
||||
tokens[3].printable().c_str());
|
||||
wait(setBlobRange(localDb, begin, end, starting ? LiteralStringRef("1") : StringRef()));
|
||||
} else if (tokencmp(tokens[1], "purge") || tokencmp(tokens[1], "check")) {
|
||||
bool purge = tokencmp(tokens[1], "purge");
|
||||
|
||||
Optional<Version> version;
|
||||
if (tokens.size() > 4) {
|
||||
Version v;
|
||||
int n = 0;
|
||||
if (sscanf(tokens[4].toString().c_str(), "%" PRId64 "%n", &v, &n) != 1 || n != tokens[4].size()) {
|
||||
printUsage(tokens[0]);
|
||||
return false;
|
||||
}
|
||||
version = v;
|
||||
}
|
||||
|
||||
fmt::print("{0} blob range [{1} - {2})",
|
||||
purge ? "Purging" : "Checking",
|
||||
tokens[2].printable(),
|
||||
tokens[3].printable());
|
||||
if (version.present()) {
|
||||
fmt::print(" @ {0}", version.get());
|
||||
}
|
||||
fmt::print("\n");
|
||||
|
||||
if (purge) {
|
||||
wait(doBlobPurge(localDb, begin, end, version));
|
||||
} else {
|
||||
wait(doBlobCheck(localDb, begin, end, version));
|
||||
}
|
||||
} else {
|
||||
printUsage(tokens[0]);
|
||||
printf("Usage: blobrange <start|stop> <startkey> <endkey>");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
CommandFactory blobRangeFactory("blobrange", CommandHelp("blobrange <start|stop> <startkey> <endkey>", "", ""));
|
||||
CommandFactory blobRangeFactory("blobrange",
|
||||
CommandHelp("blobrange <start|stop|purge|check> <startkey> <endkey> [version]",
|
||||
"",
|
||||
""));
|
||||
} // namespace fdb_cli
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -82,7 +82,7 @@ void ClientKnobs::initialize(Randomize randomize) {
|
|||
init( METADATA_VERSION_CACHE_SIZE, 1000 );
|
||||
init( CHANGE_FEED_LOCATION_LIMIT, 10000 );
|
||||
init( CHANGE_FEED_CACHE_SIZE, 100000 ); if( randomize && BUGGIFY ) CHANGE_FEED_CACHE_SIZE = 1;
|
||||
init( CHANGE_FEED_POP_TIMEOUT, 5.0 );
|
||||
init( CHANGE_FEED_POP_TIMEOUT, 10.0 );
|
||||
init( CHANGE_FEED_STREAM_MIN_BYTES, 1e4 ); if( randomize && BUGGIFY ) CHANGE_FEED_STREAM_MIN_BYTES = 1;
|
||||
|
||||
init( MAX_BATCH_SIZE, 1000 ); if( randomize && BUGGIFY ) MAX_BATCH_SIZE = 1;
|
||||
|
|
|
@ -804,6 +804,8 @@ ACTOR Future<Optional<ClusterConnectionString>> getConnectionString(Database cx)
|
|||
}
|
||||
}
|
||||
|
||||
static std::vector<std::string> connectionStrings;
|
||||
|
||||
namespace {
|
||||
|
||||
ACTOR Future<Optional<ClusterConnectionString>> getClusterConnectionStringFromStorageServer(Transaction* tr) {
|
||||
|
@ -821,6 +823,19 @@ ACTOR Future<Optional<ClusterConnectionString>> getClusterConnectionStringFromSt
|
|||
|
||||
Version readVersion = wait(tr->getReadVersion());
|
||||
state Optional<Value> currentKey = wait(tr->get(coordinatorsKey));
|
||||
if (g_network->isSimulated() && currentKey.present()) {
|
||||
// If the change coordinators request succeeded, the coordinators
|
||||
// should have changed to the connection string of the most
|
||||
// recently issued request. If instead the connection string is
|
||||
// equal to one of the previously issued requests, there is a bug
|
||||
// and we are breaking the promises we make with
|
||||
// commit_unknown_result (the transaction must no longer be in
|
||||
// progress when receiving this error).
|
||||
int n = connectionStrings.size() > 0 ? connectionStrings.size() - 1 : 0; // avoid underflow
|
||||
for (int i = 0; i < n; ++i) {
|
||||
ASSERT(currentKey.get() != connectionStrings.at(i));
|
||||
}
|
||||
}
|
||||
|
||||
if (!currentKey.present()) {
|
||||
// Someone deleted this key entirely?
|
||||
|
@ -879,10 +894,12 @@ ACTOR Future<Optional<CoordinatorsResult>> changeQuorumChecker(Transaction* tr,
|
|||
std::sort(old.hostnames.begin(), old.hostnames.end());
|
||||
std::sort(old.coords.begin(), old.coords.end());
|
||||
if (conn->hostnames == old.hostnames && conn->coords == old.coords && old.clusterKeyName() == newName) {
|
||||
connectionStrings.clear();
|
||||
return CoordinatorsResult::SAME_NETWORK_ADDRESSES;
|
||||
}
|
||||
|
||||
conn->parseKey(newName + ':' + deterministicRandom()->randomAlphaNumeric(32));
|
||||
connectionStrings.push_back(conn->toString());
|
||||
|
||||
if (g_network->isSimulated()) {
|
||||
int i = 0;
|
||||
|
|
|
@ -242,7 +242,7 @@ void DatabaseContext::getLatestCommitVersions(const Reference<LocationInfo>& loc
|
|||
return;
|
||||
}
|
||||
|
||||
if (ssVersionVectorCache.getMaxVersion() != invalidVersion && readVersion > ssVersionVectorCache.getMaxVersion()) {
|
||||
if (readVersion > ssVersionVectorCache.getMaxVersion()) {
|
||||
if (!CLIENT_KNOBS->FORCE_GRV_CACHE_OFF && !info->options.skipGrvCache && info->options.useGrvCache) {
|
||||
return;
|
||||
} else {
|
||||
|
@ -255,16 +255,32 @@ void DatabaseContext::getLatestCommitVersions(const Reference<LocationInfo>& loc
|
|||
|
||||
std::map<Version, std::set<Tag>> versionMap; // order the versions to be returned
|
||||
for (int i = 0; i < locationInfo->locations()->size(); i++) {
|
||||
UID uid = locationInfo->locations()->getId(i);
|
||||
if (ssidTagMapping.find(uid) != ssidTagMapping.end()) {
|
||||
Tag tag = ssidTagMapping[uid];
|
||||
bool updatedVersionMap = false;
|
||||
Version commitVersion = invalidVersion;
|
||||
Tag tag = invalidTag;
|
||||
auto iter = ssidTagMapping.find(locationInfo->locations()->getId(i));
|
||||
if (iter != ssidTagMapping.end()) {
|
||||
tag = iter->second;
|
||||
if (ssVersionVectorCache.hasVersion(tag)) {
|
||||
Version commitVersion = ssVersionVectorCache.getVersion(tag); // latest commit version
|
||||
commitVersion = ssVersionVectorCache.getVersion(tag); // latest commit version
|
||||
if (commitVersion < readVersion) {
|
||||
updatedVersionMap = true;
|
||||
versionMap[commitVersion].insert(tag);
|
||||
}
|
||||
}
|
||||
}
|
||||
// commitVersion == readVersion is common, do not log.
|
||||
if (!updatedVersionMap && commitVersion != readVersion) {
|
||||
TraceEvent(SevDebug, "CommitVersionNotFoundForSS")
|
||||
.detail("InSSIDMap", iter != ssidTagMapping.end() ? 1 : 0)
|
||||
.detail("Tag", tag)
|
||||
.detail("CommitVersion", commitVersion)
|
||||
.detail("ReadVersion", readVersion)
|
||||
.detail("VersionVector", ssVersionVectorCache.toString())
|
||||
.setMaxEventLength(11000)
|
||||
.setMaxFieldLength(10000);
|
||||
++transactionCommitVersionNotFoundForSS;
|
||||
}
|
||||
}
|
||||
|
||||
// insert the commit versions in the version vector.
|
||||
|
@ -710,6 +726,7 @@ ACTOR static Future<Void> delExcessClntTxnEntriesActor(Transaction* tr, int64_t
|
|||
tr->clear(KeyRangeRef(txEntries[0].key, strinc(endKey)));
|
||||
TraceEvent(SevInfo, "DeletingExcessCntTxnEntries").detail("BytesToBeDeleted", numBytesToDel);
|
||||
int64_t bytesDel = -numBytesToDel;
|
||||
|
||||
tr->atomicOp(clientLatencyAtomicCtr, StringRef((uint8_t*)&bytesDel, 8), MutationRef::AddValue);
|
||||
wait(tr->commit());
|
||||
}
|
||||
|
@ -1466,13 +1483,13 @@ DatabaseContext::DatabaseContext(Reference<AsyncVar<Reference<IClusterConnection
|
|||
transactionsProcessBehind("ProcessBehind", cc), transactionsThrottled("Throttled", cc),
|
||||
transactionsExpensiveClearCostEstCount("ExpensiveClearCostEstCount", cc),
|
||||
transactionGrvFullBatches("NumGrvFullBatches", cc), transactionGrvTimedOutBatches("NumGrvTimedOutBatches", cc),
|
||||
latencies(1000), readLatencies(1000), commitLatencies(1000), GRVLatencies(1000), mutationsPerCommit(1000),
|
||||
bytesPerCommit(1000), bgLatencies(1000), bgGranulesPerRequest(1000), outstandingWatches(0), sharedStatePtr(nullptr),
|
||||
lastGrvTime(0.0), cachedReadVersion(0), lastRkBatchThrottleTime(0.0), lastRkDefaultThrottleTime(0.0),
|
||||
lastProxyRequestTime(0.0), transactionTracingSample(false), taskID(taskID), clientInfo(clientInfo),
|
||||
clientInfoMonitor(clientInfoMonitor), coordinator(coordinator), apiVersion(apiVersion), mvCacheInsertLocation(0),
|
||||
healthMetricsLastUpdated(0), detailedHealthMetricsLastUpdated(0),
|
||||
smoothMidShardSize(CLIENT_KNOBS->SHARD_STAT_SMOOTH_AMOUNT),
|
||||
transactionCommitVersionNotFoundForSS("CommitVersionNotFoundForSS", cc), latencies(1000), readLatencies(1000),
|
||||
commitLatencies(1000), GRVLatencies(1000), mutationsPerCommit(1000), bytesPerCommit(1000), bgLatencies(1000),
|
||||
bgGranulesPerRequest(1000), outstandingWatches(0), sharedStatePtr(nullptr), lastGrvTime(0.0), cachedReadVersion(0),
|
||||
lastRkBatchThrottleTime(0.0), lastRkDefaultThrottleTime(0.0), lastProxyRequestTime(0.0),
|
||||
transactionTracingSample(false), taskID(taskID), clientInfo(clientInfo), clientInfoMonitor(clientInfoMonitor),
|
||||
coordinator(coordinator), apiVersion(apiVersion), mvCacheInsertLocation(0), healthMetricsLastUpdated(0),
|
||||
detailedHealthMetricsLastUpdated(0), smoothMidShardSize(CLIENT_KNOBS->SHARD_STAT_SMOOTH_AMOUNT),
|
||||
specialKeySpace(std::make_unique<SpecialKeySpace>(specialKeys.begin, specialKeys.end, /* test */ false)),
|
||||
connectToDatabaseEventCacheHolder(format("ConnectToDatabase/%s", dbId.toString().c_str())) {
|
||||
|
||||
|
@ -1765,8 +1782,9 @@ DatabaseContext::DatabaseContext(const Error& err)
|
|||
transactionsProcessBehind("ProcessBehind", cc), transactionsThrottled("Throttled", cc),
|
||||
transactionsExpensiveClearCostEstCount("ExpensiveClearCostEstCount", cc),
|
||||
transactionGrvFullBatches("NumGrvFullBatches", cc), transactionGrvTimedOutBatches("NumGrvTimedOutBatches", cc),
|
||||
latencies(1000), readLatencies(1000), commitLatencies(1000), GRVLatencies(1000), mutationsPerCommit(1000),
|
||||
bytesPerCommit(1000), bgLatencies(1000), bgGranulesPerRequest(1000), transactionTracingSample(false),
|
||||
transactionCommitVersionNotFoundForSS("CommitVersionNotFoundForSS", cc), latencies(1000), readLatencies(1000),
|
||||
commitLatencies(1000), GRVLatencies(1000), mutationsPerCommit(1000), bytesPerCommit(1000), bgLatencies(1000),
|
||||
bgGranulesPerRequest(1000), transactionTracingSample(false),
|
||||
smoothMidShardSize(CLIENT_KNOBS->SHARD_STAT_SMOOTH_AMOUNT),
|
||||
connectToDatabaseEventCacheHolder(format("ConnectToDatabase/%s", dbId.toString().c_str())) {}
|
||||
|
||||
|
@ -1812,7 +1830,7 @@ DatabaseContext::~DatabaseContext() {
|
|||
TraceEvent("DatabaseContextDestructed", dbId).backtrace();
|
||||
}
|
||||
|
||||
Optional<KeyRangeLocationInfo> DatabaseContext::getCachedLocation(const Optional<TenantName>& tenantName,
|
||||
Optional<KeyRangeLocationInfo> DatabaseContext::getCachedLocation(const Optional<TenantNameRef>& tenantName,
|
||||
const KeyRef& key,
|
||||
Reverse isBackward) {
|
||||
TenantMapEntry tenantEntry;
|
||||
|
@ -1838,7 +1856,7 @@ Optional<KeyRangeLocationInfo> DatabaseContext::getCachedLocation(const Optional
|
|||
return Optional<KeyRangeLocationInfo>();
|
||||
}
|
||||
|
||||
bool DatabaseContext::getCachedLocations(const Optional<TenantName>& tenantName,
|
||||
bool DatabaseContext::getCachedLocations(const Optional<TenantNameRef>& tenantName,
|
||||
const KeyRangeRef& range,
|
||||
std::vector<KeyRangeLocationInfo>& result,
|
||||
int limit,
|
||||
|
@ -1895,7 +1913,7 @@ void DatabaseContext::cacheTenant(const TenantName& tenant, const TenantMapEntry
|
|||
}
|
||||
}
|
||||
|
||||
Reference<LocationInfo> DatabaseContext::setCachedLocation(const Optional<TenantName>& tenant,
|
||||
Reference<LocationInfo> DatabaseContext::setCachedLocation(const Optional<TenantNameRef>& tenant,
|
||||
const TenantMapEntry& tenantEntry,
|
||||
const KeyRangeRef& absoluteKeys,
|
||||
const std::vector<StorageServerInterface>& servers) {
|
||||
|
@ -2836,7 +2854,7 @@ void updateTagMappings(Database cx, const GetKeyServerLocationsReply& reply) {
|
|||
// If isBackward == true, returns the shard containing the key before 'key' (an infinitely long, inexpressible key).
|
||||
// Otherwise returns the shard containing key
|
||||
ACTOR Future<KeyRangeLocationInfo> getKeyLocation_internal(Database cx,
|
||||
Optional<TenantName> tenant,
|
||||
TenantInfo tenant,
|
||||
Key key,
|
||||
SpanContext spanContext,
|
||||
Optional<UID> debugID,
|
||||
|
@ -2859,26 +2877,20 @@ ACTOR Future<KeyRangeLocationInfo> getKeyLocation_internal(Database cx,
|
|||
++cx->transactionKeyServerLocationRequests;
|
||||
choose {
|
||||
when(wait(cx->onProxiesChanged())) {}
|
||||
when(GetKeyServerLocationsReply rep =
|
||||
wait(basicLoadBalance(cx->getCommitProxies(useProvisionalProxies),
|
||||
&CommitProxyInterface::getKeyServersLocations,
|
||||
GetKeyServerLocationsRequest(span.context,
|
||||
tenant.castTo<TenantNameRef>(),
|
||||
key,
|
||||
Optional<KeyRef>(),
|
||||
100,
|
||||
isBackward,
|
||||
version,
|
||||
key.arena()),
|
||||
TaskPriority::DefaultPromiseEndpoint))) {
|
||||
when(GetKeyServerLocationsReply rep = wait(basicLoadBalance(
|
||||
cx->getCommitProxies(useProvisionalProxies),
|
||||
&CommitProxyInterface::getKeyServersLocations,
|
||||
GetKeyServerLocationsRequest(
|
||||
span.context, tenant, key, Optional<KeyRef>(), 100, isBackward, version, key.arena()),
|
||||
TaskPriority::DefaultPromiseEndpoint))) {
|
||||
++cx->transactionKeyServerLocationRequestsCompleted;
|
||||
if (debugID.present())
|
||||
g_traceBatch.addEvent(
|
||||
"TransactionDebug", debugID.get().first(), "NativeAPI.getKeyLocation.After");
|
||||
ASSERT(rep.results.size() == 1);
|
||||
|
||||
auto locationInfo =
|
||||
cx->setCachedLocation(tenant, rep.tenantEntry, rep.results[0].first, rep.results[0].second);
|
||||
auto locationInfo = cx->setCachedLocation(
|
||||
tenant.name, rep.tenantEntry, rep.results[0].first, rep.results[0].second);
|
||||
updateTssMappings(cx, rep);
|
||||
updateTagMappings(cx, rep);
|
||||
|
||||
|
@ -2891,8 +2903,8 @@ ACTOR Future<KeyRangeLocationInfo> getKeyLocation_internal(Database cx,
|
|||
}
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_tenant_not_found) {
|
||||
ASSERT(tenant.present());
|
||||
cx->invalidateCachedTenant(tenant.get());
|
||||
ASSERT(tenant.name.present());
|
||||
cx->invalidateCachedTenant(tenant.name.get());
|
||||
}
|
||||
|
||||
throw;
|
||||
|
@ -2930,7 +2942,7 @@ bool checkOnlyEndpointFailed(const Database& cx, const Endpoint& endpoint) {
|
|||
|
||||
template <class F>
|
||||
Future<KeyRangeLocationInfo> getKeyLocation(Database const& cx,
|
||||
Optional<TenantName> const& tenant,
|
||||
TenantInfo const& tenant,
|
||||
Key const& key,
|
||||
F StorageServerInterface::*member,
|
||||
SpanContext spanContext,
|
||||
|
@ -2939,7 +2951,7 @@ Future<KeyRangeLocationInfo> getKeyLocation(Database const& cx,
|
|||
Reverse isBackward,
|
||||
Version version) {
|
||||
// we first check whether this range is cached
|
||||
Optional<KeyRangeLocationInfo> locationInfo = cx->getCachedLocation(tenant, key, isBackward);
|
||||
Optional<KeyRangeLocationInfo> locationInfo = cx->getCachedLocation(tenant.name, key, isBackward);
|
||||
if (!locationInfo.present()) {
|
||||
return getKeyLocation_internal(
|
||||
cx, tenant, key, spanContext, debugID, useProvisionalProxies, isBackward, version);
|
||||
|
@ -2971,7 +2983,7 @@ Future<KeyRangeLocationInfo> getKeyLocation(Reference<TransactionState> trState,
|
|||
UseTenant useTenant,
|
||||
Version version) {
|
||||
auto f = getKeyLocation(trState->cx,
|
||||
useTenant ? trState->tenant() : Optional<TenantName>(),
|
||||
useTenant ? trState->getTenantInfo(AllowInvalidTenantID::True) : TenantInfo(),
|
||||
key,
|
||||
member,
|
||||
trState->spanContext,
|
||||
|
@ -2980,9 +2992,11 @@ Future<KeyRangeLocationInfo> getKeyLocation(Reference<TransactionState> trState,
|
|||
isBackward,
|
||||
version);
|
||||
|
||||
if (trState->tenant().present() && useTenant) {
|
||||
if (trState->tenant().present() && useTenant && trState->tenantId == TenantInfo::INVALID_TENANT) {
|
||||
return map(f, [trState](const KeyRangeLocationInfo& locationInfo) {
|
||||
trState->tenantId = locationInfo.tenantEntry.id;
|
||||
if (trState->tenantId == TenantInfo::INVALID_TENANT) {
|
||||
trState->tenantId = locationInfo.tenantEntry.id;
|
||||
}
|
||||
return locationInfo;
|
||||
});
|
||||
} else {
|
||||
|
@ -2992,7 +3006,7 @@ Future<KeyRangeLocationInfo> getKeyLocation(Reference<TransactionState> trState,
|
|||
|
||||
ACTOR Future<std::vector<KeyRangeLocationInfo>> getKeyRangeLocations_internal(
|
||||
Database cx,
|
||||
Optional<TenantName> tenant,
|
||||
TenantInfo tenant,
|
||||
KeyRange keys,
|
||||
int limit,
|
||||
Reverse reverse,
|
||||
|
@ -3009,18 +3023,12 @@ ACTOR Future<std::vector<KeyRangeLocationInfo>> getKeyRangeLocations_internal(
|
|||
++cx->transactionKeyServerLocationRequests;
|
||||
choose {
|
||||
when(wait(cx->onProxiesChanged())) {}
|
||||
when(GetKeyServerLocationsReply _rep =
|
||||
wait(basicLoadBalance(cx->getCommitProxies(useProvisionalProxies),
|
||||
&CommitProxyInterface::getKeyServersLocations,
|
||||
GetKeyServerLocationsRequest(span.context,
|
||||
tenant.castTo<TenantNameRef>(),
|
||||
keys.begin,
|
||||
keys.end,
|
||||
limit,
|
||||
reverse,
|
||||
version,
|
||||
keys.arena()),
|
||||
TaskPriority::DefaultPromiseEndpoint))) {
|
||||
when(GetKeyServerLocationsReply _rep = wait(basicLoadBalance(
|
||||
cx->getCommitProxies(useProvisionalProxies),
|
||||
&CommitProxyInterface::getKeyServersLocations,
|
||||
GetKeyServerLocationsRequest(
|
||||
span.context, tenant, keys.begin, keys.end, limit, reverse, version, keys.arena()),
|
||||
TaskPriority::DefaultPromiseEndpoint))) {
|
||||
++cx->transactionKeyServerLocationRequestsCompleted;
|
||||
state GetKeyServerLocationsReply rep = _rep;
|
||||
if (debugID.present())
|
||||
|
@ -3037,7 +3045,7 @@ ACTOR Future<std::vector<KeyRangeLocationInfo>> getKeyRangeLocations_internal(
|
|||
rep.tenantEntry,
|
||||
(toRelativeRange(rep.results[shard].first, rep.tenantEntry.prefix) & keys),
|
||||
cx->setCachedLocation(
|
||||
tenant, rep.tenantEntry, rep.results[shard].first, rep.results[shard].second));
|
||||
tenant.name, rep.tenantEntry, rep.results[shard].first, rep.results[shard].second));
|
||||
wait(yield());
|
||||
}
|
||||
updateTssMappings(cx, rep);
|
||||
|
@ -3049,8 +3057,8 @@ ACTOR Future<std::vector<KeyRangeLocationInfo>> getKeyRangeLocations_internal(
|
|||
}
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_tenant_not_found) {
|
||||
ASSERT(tenant.present());
|
||||
cx->invalidateCachedTenant(tenant.get());
|
||||
ASSERT(tenant.name.present());
|
||||
cx->invalidateCachedTenant(tenant.name.get());
|
||||
}
|
||||
|
||||
throw;
|
||||
|
@ -3065,7 +3073,7 @@ ACTOR Future<std::vector<KeyRangeLocationInfo>> getKeyRangeLocations_internal(
|
|||
// [([a, b1), locationInfo), ([b1, c), locationInfo), ([c, d1), locationInfo)].
|
||||
template <class F>
|
||||
Future<std::vector<KeyRangeLocationInfo>> getKeyRangeLocations(Database const& cx,
|
||||
Optional<TenantName> tenant,
|
||||
TenantInfo const& tenant,
|
||||
KeyRange const& keys,
|
||||
int limit,
|
||||
Reverse reverse,
|
||||
|
@ -3078,7 +3086,7 @@ Future<std::vector<KeyRangeLocationInfo>> getKeyRangeLocations(Database const& c
|
|||
ASSERT(!keys.empty());
|
||||
|
||||
std::vector<KeyRangeLocationInfo> locations;
|
||||
if (!cx->getCachedLocations(tenant, keys, locations, limit, reverse)) {
|
||||
if (!cx->getCachedLocations(tenant.name, keys, locations, limit, reverse)) {
|
||||
return getKeyRangeLocations_internal(
|
||||
cx, tenant, keys, limit, reverse, spanContext, debugID, useProvisionalProxies, version);
|
||||
}
|
||||
|
@ -3116,7 +3124,7 @@ Future<std::vector<KeyRangeLocationInfo>> getKeyRangeLocations(Reference<Transac
|
|||
UseTenant useTenant,
|
||||
Version version) {
|
||||
auto f = getKeyRangeLocations(trState->cx,
|
||||
useTenant ? trState->tenant() : Optional<TenantName>(),
|
||||
useTenant ? trState->getTenantInfo(AllowInvalidTenantID::True) : TenantInfo(),
|
||||
keys,
|
||||
limit,
|
||||
reverse,
|
||||
|
@ -3126,10 +3134,12 @@ Future<std::vector<KeyRangeLocationInfo>> getKeyRangeLocations(Reference<Transac
|
|||
trState->useProvisionalProxies,
|
||||
version);
|
||||
|
||||
if (trState->tenant().present() && useTenant) {
|
||||
if (trState->tenant().present() && useTenant && trState->tenantId == TenantInfo::INVALID_TENANT) {
|
||||
return map(f, [trState](const std::vector<KeyRangeLocationInfo>& locationInfo) {
|
||||
ASSERT(!locationInfo.empty());
|
||||
trState->tenantId = locationInfo[0].tenantEntry.id;
|
||||
if (trState->tenantId == TenantInfo::INVALID_TENANT) {
|
||||
trState->tenantId = locationInfo[0].tenantEntry.id;
|
||||
}
|
||||
return locationInfo;
|
||||
});
|
||||
} else {
|
||||
|
@ -3146,7 +3156,7 @@ ACTOR Future<Void> warmRange_impl(Reference<TransactionState> trState, KeyRange
|
|||
loop {
|
||||
std::vector<KeyRangeLocationInfo> locations =
|
||||
wait(getKeyRangeLocations_internal(trState->cx,
|
||||
trState->tenant(),
|
||||
trState->getTenantInfo(),
|
||||
keys,
|
||||
CLIENT_KNOBS->WARM_RANGE_SHARD_LIMIT,
|
||||
Reverse::False,
|
||||
|
@ -3196,6 +3206,8 @@ SpanContext generateSpanID(bool transactionTracingSample, SpanContext parentCont
|
|||
deterministicRandom()->randomUniqueID(), deterministicRandom()->randomUInt64(), TraceFlags::unsampled);
|
||||
}
|
||||
|
||||
FDB_DEFINE_BOOLEAN_PARAM(AllowInvalidTenantID);
|
||||
|
||||
TransactionState::TransactionState(Database cx,
|
||||
Optional<TenantName> tenant,
|
||||
TaskPriority taskID,
|
||||
|
@ -3219,12 +3231,13 @@ Reference<TransactionState> TransactionState::cloneAndReset(Reference<Transactio
|
|||
newState->startTime = startTime;
|
||||
newState->committedVersion = committedVersion;
|
||||
newState->conflictingKeys = conflictingKeys;
|
||||
newState->authToken = authToken;
|
||||
newState->tenantSet = tenantSet;
|
||||
|
||||
return newState;
|
||||
}
|
||||
|
||||
TenantInfo TransactionState::getTenantInfo() {
|
||||
TenantInfo TransactionState::getTenantInfo(AllowInvalidTenantID allowInvalidId /* = false */) {
|
||||
Optional<TenantName> const& t = tenant();
|
||||
|
||||
if (options.rawAccess) {
|
||||
|
@ -3244,8 +3257,8 @@ TenantInfo TransactionState::getTenantInfo() {
|
|||
}
|
||||
}
|
||||
|
||||
ASSERT(tenantId != TenantInfo::INVALID_TENANT);
|
||||
return TenantInfo(t.get(), tenantId);
|
||||
ASSERT(allowInvalidId || tenantId != TenantInfo::INVALID_TENANT);
|
||||
return TenantInfo(t, authToken, tenantId);
|
||||
}
|
||||
|
||||
// Returns the tenant used in this transaction. If the tenant is unset and raw access isn't specified, then the default
|
||||
|
@ -3588,7 +3601,7 @@ ACTOR Future<Version> watchValue(Database cx, Reference<const WatchParameters> p
|
|||
|
||||
loop {
|
||||
state KeyRangeLocationInfo locationInfo = wait(getKeyLocation(cx,
|
||||
parameters->tenant.name,
|
||||
parameters->tenant,
|
||||
parameters->key,
|
||||
&StorageServerInterface::watchValue,
|
||||
parameters->spanContext,
|
||||
|
@ -3719,7 +3732,7 @@ ACTOR Future<Void> watchStorageServerResp(int64_t tenantId, Key key, Database cx
|
|||
}
|
||||
|
||||
ACTOR Future<Void> sameVersionDiffValue(Database cx, Reference<WatchParameters> parameters) {
|
||||
state ReadYourWritesTransaction tr(cx, parameters->tenant.name);
|
||||
state ReadYourWritesTransaction tr(cx, parameters->tenant.name.castTo<TenantName>());
|
||||
loop {
|
||||
try {
|
||||
if (!parameters->tenant.name.present()) {
|
||||
|
@ -5948,8 +5961,12 @@ ACTOR void checkWrites(Reference<TransactionState> trState,
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> commitDummyTransaction(Reference<TransactionState> trState, KeyRange range) {
|
||||
state Transaction tr(trState->cx);
|
||||
FDB_BOOLEAN_PARAM(TenantPrefixPrepended);
|
||||
|
||||
ACTOR static Future<Void> commitDummyTransaction(Reference<TransactionState> trState,
|
||||
KeyRange range,
|
||||
TenantPrefixPrepended tenantPrefixPrepended) {
|
||||
state Transaction tr(trState->cx, trState->tenant());
|
||||
state int retries = 0;
|
||||
state Span span("NAPI:dummyTransaction"_loc, trState->spanContext);
|
||||
tr.span.setParent(span.context);
|
||||
|
@ -5958,7 +5975,14 @@ ACTOR static Future<Void> commitDummyTransaction(Reference<TransactionState> trS
|
|||
TraceEvent("CommitDummyTransaction").detail("Key", range.begin).detail("Retries", retries);
|
||||
tr.trState->options = trState->options;
|
||||
tr.trState->taskID = trState->taskID;
|
||||
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr.trState->authToken = trState->authToken;
|
||||
tr.trState->tenantId = trState->tenantId;
|
||||
if (!trState->hasTenant()) {
|
||||
tr.setOption(FDBTransactionOptions::RAW_ACCESS);
|
||||
} else {
|
||||
tr.trState->skipApplyTenantPrefix = tenantPrefixPrepended;
|
||||
CODE_PROBE(true, "Commit of a dummy transaction in tenant keyspace");
|
||||
}
|
||||
tr.setOption(FDBTransactionOptions::CAUSAL_WRITE_RISKY);
|
||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
tr.addReadConflictRange(range);
|
||||
|
@ -5966,6 +5990,10 @@ ACTOR static Future<Void> commitDummyTransaction(Reference<TransactionState> trS
|
|||
wait(tr.commit());
|
||||
return Void();
|
||||
} catch (Error& e) {
|
||||
// If the tenant is gone, then our original transaction won't be able to commit
|
||||
if (e.code() == error_code_unknown_tenant) {
|
||||
return Void();
|
||||
}
|
||||
TraceEvent("CommitDummyTransactionError")
|
||||
.errorUnsuppressed(e)
|
||||
.detail("Key", range.begin)
|
||||
|
@ -6120,6 +6148,7 @@ ACTOR static Future<Void> tryCommit(Reference<TransactionState> trState,
|
|||
state double startTime = now();
|
||||
state Span span("NAPI:tryCommit"_loc, trState->spanContext);
|
||||
state Optional<UID> debugID = trState->debugID;
|
||||
state TenantPrefixPrepended tenantPrefixPrepended = TenantPrefixPrepended::False;
|
||||
if (debugID.present()) {
|
||||
TraceEvent(interval.begin()).detail("Parent", debugID.get());
|
||||
}
|
||||
|
@ -6137,7 +6166,9 @@ ACTOR static Future<Void> tryCommit(Reference<TransactionState> trState,
|
|||
}
|
||||
|
||||
state Key tenantPrefix;
|
||||
if (trState->tenant().present()) {
|
||||
// skipApplyTenantPrefix is set only in the context of a commitDummyTransaction()
|
||||
// (see member declaration)
|
||||
if (trState->tenant().present() && !trState->skipApplyTenantPrefix) {
|
||||
KeyRangeLocationInfo locationInfo = wait(getKeyLocation(trState,
|
||||
""_sr,
|
||||
&StorageServerInterface::getValue,
|
||||
|
@ -6145,11 +6176,11 @@ ACTOR static Future<Void> tryCommit(Reference<TransactionState> trState,
|
|||
UseTenant::True,
|
||||
req.transaction.read_snapshot));
|
||||
applyTenantPrefix(req, locationInfo.tenantEntry.prefix);
|
||||
tenantPrefixPrepended = TenantPrefixPrepended::True;
|
||||
tenantPrefix = locationInfo.tenantEntry.prefix;
|
||||
}
|
||||
|
||||
CODE_PROBE(trState->skipApplyTenantPrefix, "Tenant prefix prepend skipped for dummy transaction");
|
||||
req.tenantInfo = trState->getTenantInfo();
|
||||
|
||||
startTime = now();
|
||||
state Optional<UID> commitID = Optional<UID>();
|
||||
|
||||
|
@ -6275,7 +6306,8 @@ ACTOR static Future<Void> tryCommit(Reference<TransactionState> trState,
|
|||
|
||||
CODE_PROBE(true, "Waiting for dummy transaction to report commit_unknown_result");
|
||||
|
||||
wait(commitDummyTransaction(trState, singleKeyRange(selfConflictingRange.begin)));
|
||||
wait(
|
||||
commitDummyTransaction(trState, singleKeyRange(selfConflictingRange.begin), tenantPrefixPrepended));
|
||||
}
|
||||
|
||||
// The user needs to be informed that we aren't sure whether the commit happened. Standard retry loops
|
||||
|
@ -6655,6 +6687,13 @@ void Transaction::setOption(FDBTransactionOptions::Option option, Optional<Strin
|
|||
trState->options.rawAccess = true;
|
||||
break;
|
||||
|
||||
case FDBTransactionOptions::AUTHORIZATION_TOKEN:
|
||||
if (value.present())
|
||||
trState->authToken = Standalone<StringRef>(value.get());
|
||||
else
|
||||
trState->authToken.reset();
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -7234,7 +7273,7 @@ ACTOR Future<StorageMetrics> doGetStorageMetrics(Database cx, KeyRange keys, Ref
|
|||
ACTOR Future<StorageMetrics> getStorageMetricsLargeKeyRange(Database cx, KeyRange keys) {
|
||||
state Span span("NAPI:GetStorageMetricsLargeKeyRange"_loc);
|
||||
std::vector<KeyRangeLocationInfo> locations = wait(getKeyRangeLocations(cx,
|
||||
Optional<TenantName>(),
|
||||
TenantInfo(),
|
||||
keys,
|
||||
std::numeric_limits<int>::max(),
|
||||
Reverse::False,
|
||||
|
@ -7336,7 +7375,7 @@ ACTOR Future<Standalone<VectorRef<ReadHotRangeWithMetrics>>> getReadHotRanges(Da
|
|||
// to find the read-hot sub ranges within a read-hot shard.
|
||||
std::vector<KeyRangeLocationInfo> locations =
|
||||
wait(getKeyRangeLocations(cx,
|
||||
Optional<TenantName>(),
|
||||
TenantInfo(),
|
||||
keys,
|
||||
shardLimit,
|
||||
Reverse::False,
|
||||
|
@ -7407,7 +7446,7 @@ ACTOR Future<std::pair<Optional<StorageMetrics>, int>> waitStorageMetrics(Databa
|
|||
state Span span("NAPI:WaitStorageMetrics"_loc, generateSpanID(cx->transactionTracingSample));
|
||||
loop {
|
||||
std::vector<KeyRangeLocationInfo> locations = wait(getKeyRangeLocations(cx,
|
||||
Optional<TenantName>(),
|
||||
TenantInfo(),
|
||||
keys,
|
||||
shardLimit,
|
||||
Reverse::False,
|
||||
|
@ -7582,17 +7621,21 @@ ACTOR Future<TenantMapEntry> blobGranuleGetTenantEntry(Transaction* self, Key ra
|
|||
self->trState->cx->getCachedLocation(self->getTenant().get(), rangeStartKey, Reverse::False);
|
||||
if (!cachedLocationInfo.present()) {
|
||||
KeyRangeLocationInfo l = wait(getKeyLocation_internal(self->trState->cx,
|
||||
self->getTenant().get(),
|
||||
self->trState->getTenantInfo(AllowInvalidTenantID::True),
|
||||
rangeStartKey,
|
||||
self->trState->spanContext,
|
||||
self->trState->debugID,
|
||||
self->trState->useProvisionalProxies,
|
||||
Reverse::False,
|
||||
latestVersion));
|
||||
self->trState->tenantId = l.tenantEntry.id;
|
||||
if (self->trState->tenantId == TenantInfo::INVALID_TENANT) {
|
||||
self->trState->tenantId = l.tenantEntry.id;
|
||||
}
|
||||
return l.tenantEntry;
|
||||
} else {
|
||||
self->trState->tenantId = cachedLocationInfo.get().tenantEntry.id;
|
||||
if (self->trState->tenantId == TenantInfo::INVALID_TENANT) {
|
||||
self->trState->tenantId = cachedLocationInfo.get().tenantEntry.id;
|
||||
}
|
||||
return cachedLocationInfo.get().tenantEntry;
|
||||
}
|
||||
}
|
||||
|
@ -8012,7 +8055,7 @@ ACTOR Future<Void> splitStorageMetricsStream(PromiseStream<Key> resultStream,
|
|||
loop {
|
||||
state std::vector<KeyRangeLocationInfo> locations =
|
||||
wait(getKeyRangeLocations(cx,
|
||||
Optional<TenantName>(),
|
||||
TenantInfo(),
|
||||
KeyRangeRef(beginKey, keys.end),
|
||||
CLIENT_KNOBS->STORAGE_METRICS_SHARD_LIMIT,
|
||||
Reverse::False,
|
||||
|
@ -8112,7 +8155,7 @@ ACTOR Future<Standalone<VectorRef<KeyRef>>> splitStorageMetrics(Database cx,
|
|||
loop {
|
||||
state std::vector<KeyRangeLocationInfo> locations =
|
||||
wait(getKeyRangeLocations(cx,
|
||||
Optional<TenantName>(),
|
||||
TenantInfo(),
|
||||
keys,
|
||||
CLIENT_KNOBS->STORAGE_METRICS_SHARD_LIMIT,
|
||||
Reverse::False,
|
||||
|
@ -8357,7 +8400,7 @@ ACTOR Future<std::vector<CheckpointMetaData>> getCheckpointMetaData(Database cx,
|
|||
try {
|
||||
state std::vector<KeyRangeLocationInfo> locations =
|
||||
wait(getKeyRangeLocations(cx,
|
||||
Optional<TenantName>(),
|
||||
TenantInfo(),
|
||||
keys,
|
||||
CLIENT_KNOBS->TOO_MANY,
|
||||
Reverse::False,
|
||||
|
@ -9272,7 +9315,7 @@ ACTOR Future<Void> getChangeFeedStreamActor(Reference<DatabaseContext> db,
|
|||
keys = fullRange & range;
|
||||
state std::vector<KeyRangeLocationInfo> locations =
|
||||
wait(getKeyRangeLocations(cx,
|
||||
Optional<TenantName>(),
|
||||
TenantInfo(),
|
||||
keys,
|
||||
CLIENT_KNOBS->CHANGE_FEED_LOCATION_LIMIT,
|
||||
Reverse::False,
|
||||
|
@ -9451,7 +9494,7 @@ ACTOR Future<OverlappingChangeFeedsInfo> getOverlappingChangeFeedsActor(Referenc
|
|||
try {
|
||||
state std::vector<KeyRangeLocationInfo> locations =
|
||||
wait(getKeyRangeLocations(cx,
|
||||
Optional<TenantName>(),
|
||||
TenantInfo(),
|
||||
range,
|
||||
CLIENT_KNOBS->CHANGE_FEED_LOCATION_LIMIT,
|
||||
Reverse::False,
|
||||
|
@ -9553,7 +9596,7 @@ ACTOR Future<Void> popChangeFeedMutationsActor(Reference<DatabaseContext> db, Ke
|
|||
|
||||
state std::vector<KeyRangeLocationInfo> locations =
|
||||
wait(getKeyRangeLocations(cx,
|
||||
Optional<TenantName>(),
|
||||
TenantInfo(),
|
||||
keys,
|
||||
3,
|
||||
Reverse::False,
|
||||
|
|
|
@ -914,7 +914,7 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
|
|||
init( BG_SNAPSHOT_FILE_TARGET_CHUNK_BYTES, 64*1024 ); if ( randomize && BUGGIFY ) BG_SNAPSHOT_FILE_TARGET_CHUNK_BYTES = BG_SNAPSHOT_FILE_TARGET_BYTES / (1 << deterministicRandom()->randomInt(0, 8));
|
||||
init( BG_DELTA_BYTES_BEFORE_COMPACT, BG_SNAPSHOT_FILE_TARGET_BYTES/2 );
|
||||
init( BG_DELTA_FILE_TARGET_BYTES, BG_DELTA_BYTES_BEFORE_COMPACT/10 );
|
||||
init( BG_DELTA_FILE_TARGET_CHUNK_BYTES, 64*1024 ); if ( randomize && BUGGIFY ) BG_DELTA_FILE_TARGET_CHUNK_BYTES = BG_DELTA_FILE_TARGET_BYTES / (1 << deterministicRandom()->randomInt(0, 7));
|
||||
init( BG_DELTA_FILE_TARGET_CHUNK_BYTES, 32*1024 ); if ( randomize && BUGGIFY ) BG_DELTA_FILE_TARGET_CHUNK_BYTES = BG_DELTA_FILE_TARGET_BYTES / (1 << deterministicRandom()->randomInt(0, 7));
|
||||
init( BG_MAX_SPLIT_FANOUT, 10 ); if( randomize && BUGGIFY ) BG_MAX_SPLIT_FANOUT = deterministicRandom()->randomInt(5, 15);
|
||||
init( BG_MAX_MERGE_FANIN, 10 ); if( randomize && BUGGIFY ) BG_MAX_MERGE_FANIN = deterministicRandom()->randomInt(2, 15);
|
||||
init( BG_HOT_SNAPSHOT_VERSIONS, 5000000 );
|
||||
|
|
|
@ -129,7 +129,7 @@ const char* TSS_mismatchTraceName(const GetKeyValuesRequest& req) {
|
|||
static void traceKeyValuesSummary(TraceEvent& event,
|
||||
const KeySelectorRef& begin,
|
||||
const KeySelectorRef& end,
|
||||
Optional<TenantName> tenant,
|
||||
Optional<TenantNameRef> tenant,
|
||||
Version version,
|
||||
int limit,
|
||||
int limitBytes,
|
||||
|
@ -152,7 +152,7 @@ static void traceKeyValuesSummary(TraceEvent& event,
|
|||
static void traceKeyValuesDiff(TraceEvent& event,
|
||||
const KeySelectorRef& begin,
|
||||
const KeySelectorRef& end,
|
||||
Optional<TenantName> tenant,
|
||||
Optional<TenantNameRef> tenant,
|
||||
Version version,
|
||||
int limit,
|
||||
int limitBytes,
|
||||
|
|
|
@ -71,11 +71,15 @@ TenantState TenantMapEntry::stringToTenantState(std::string stateStr) {
|
|||
}
|
||||
|
||||
TenantMapEntry::TenantMapEntry() {}
|
||||
TenantMapEntry::TenantMapEntry(int64_t id, TenantState tenantState) : tenantState(tenantState) {
|
||||
TenantMapEntry::TenantMapEntry(int64_t id, TenantState tenantState, bool encrypted)
|
||||
: tenantState(tenantState), encrypted(encrypted) {
|
||||
setId(id);
|
||||
}
|
||||
TenantMapEntry::TenantMapEntry(int64_t id, TenantState tenantState, Optional<TenantGroupName> tenantGroup)
|
||||
: tenantState(tenantState), tenantGroup(tenantGroup) {
|
||||
TenantMapEntry::TenantMapEntry(int64_t id,
|
||||
TenantState tenantState,
|
||||
Optional<TenantGroupName> tenantGroup,
|
||||
bool encrypted)
|
||||
: tenantState(tenantState), tenantGroup(tenantGroup), encrypted(encrypted) {
|
||||
setId(id);
|
||||
}
|
||||
|
||||
|
@ -88,6 +92,7 @@ void TenantMapEntry::setId(int64_t id) {
|
|||
std::string TenantMapEntry::toJson(int apiVersion) const {
|
||||
json_spirit::mObject tenantEntry;
|
||||
tenantEntry["id"] = id;
|
||||
tenantEntry["encrypted"] = encrypted;
|
||||
|
||||
if (apiVersion >= 720 || apiVersion == Database::API_VERSION_LATEST) {
|
||||
json_spirit::mObject prefixObject;
|
||||
|
@ -133,12 +138,12 @@ void TenantMapEntry::configure(Standalone<StringRef> parameter, Optional<Value>
|
|||
}
|
||||
|
||||
TEST_CASE("/fdbclient/TenantMapEntry/Serialization") {
|
||||
TenantMapEntry entry1(1, TenantState::READY);
|
||||
TenantMapEntry entry1(1, TenantState::READY, false);
|
||||
ASSERT(entry1.prefix == "\x00\x00\x00\x00\x00\x00\x00\x01"_sr);
|
||||
TenantMapEntry entry2 = TenantMapEntry::decode(entry1.encode());
|
||||
ASSERT(entry1.id == entry2.id && entry1.prefix == entry2.prefix);
|
||||
|
||||
TenantMapEntry entry3(std::numeric_limits<int64_t>::max(), TenantState::READY);
|
||||
TenantMapEntry entry3(std::numeric_limits<int64_t>::max(), TenantState::READY, false);
|
||||
ASSERT(entry3.prefix == "\x7f\xff\xff\xff\xff\xff\xff\xff"_sr);
|
||||
TenantMapEntry entry4 = TenantMapEntry::decode(entry3.encode());
|
||||
ASSERT(entry3.id == entry4.id && entry3.prefix == entry4.prefix);
|
||||
|
@ -149,7 +154,7 @@ TEST_CASE("/fdbclient/TenantMapEntry/Serialization") {
|
|||
int64_t maxPlusOne = std::min<uint64_t>(UINT64_C(1) << bits, std::numeric_limits<int64_t>::max());
|
||||
int64_t id = deterministicRandom()->randomInt64(min, maxPlusOne);
|
||||
|
||||
TenantMapEntry entry(id, TenantState::READY);
|
||||
TenantMapEntry entry(id, TenantState::READY, false);
|
||||
int64_t bigEndianId = bigEndian64(id);
|
||||
ASSERT(entry.id == id && entry.prefix == StringRef(reinterpret_cast<uint8_t*>(&bigEndianId), 8));
|
||||
|
||||
|
|
|
@ -143,16 +143,11 @@ struct BlobGranuleCipherKeysMetaRef {
|
|||
StringRef ivRef;
|
||||
|
||||
BlobGranuleCipherKeysMetaRef() {}
|
||||
BlobGranuleCipherKeysMetaRef(Arena& to,
|
||||
const EncryptCipherDomainId tDomainId,
|
||||
const EncryptCipherBaseKeyId tBaseCipherId,
|
||||
const EncryptCipherRandomSalt tSalt,
|
||||
const EncryptCipherDomainId hDomainId,
|
||||
const EncryptCipherBaseKeyId hBaseCipherId,
|
||||
const EncryptCipherRandomSalt hSalt,
|
||||
const std::string& ivStr)
|
||||
: textDomainId(tDomainId), textBaseCipherId(tBaseCipherId), textSalt(tSalt), headerDomainId(hDomainId),
|
||||
headerBaseCipherId(hBaseCipherId), headerSalt(hSalt), ivRef(StringRef(to, ivStr)) {}
|
||||
BlobGranuleCipherKeysMetaRef(Arena& to, BlobGranuleCipherKeysMeta cipherKeysMeta)
|
||||
: textDomainId(cipherKeysMeta.textDomainId), textBaseCipherId(cipherKeysMeta.textBaseCipherId),
|
||||
textSalt(cipherKeysMeta.textSalt), headerDomainId(cipherKeysMeta.headerDomainId),
|
||||
headerBaseCipherId(cipherKeysMeta.headerBaseCipherId), headerSalt(cipherKeysMeta.headerSalt),
|
||||
ivRef(StringRef(to, cipherKeysMeta.ivStr)) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
|
@ -162,16 +157,31 @@ struct BlobGranuleCipherKeysMetaRef {
|
|||
|
||||
struct BlobFilePointerRef {
|
||||
constexpr static FileIdentifier file_identifier = 5253554;
|
||||
// Serializable fields
|
||||
StringRef filename;
|
||||
int64_t offset;
|
||||
int64_t length;
|
||||
int64_t fullFileLength;
|
||||
Optional<BlobGranuleCipherKeysMetaRef> cipherKeysMetaRef;
|
||||
Optional<BlobGranuleCipherKeysCtx> cipherKeysCtx;
|
||||
|
||||
// Non-serializable fields
|
||||
Optional<BlobGranuleCipherKeysMetaRef>
|
||||
cipherKeysMetaRef; // Placeholder to cache information sufficient to lookup encryption ciphers
|
||||
|
||||
BlobFilePointerRef() {}
|
||||
|
||||
BlobFilePointerRef(Arena& to, const std::string& filename, int64_t offset, int64_t length, int64_t fullFileLength)
|
||||
: filename(to, filename), offset(offset), length(length), fullFileLength(fullFileLength) {}
|
||||
|
||||
BlobFilePointerRef(Arena& to,
|
||||
const std::string& filename,
|
||||
int64_t offset,
|
||||
int64_t length,
|
||||
int64_t fullFileLength,
|
||||
Optional<BlobGranuleCipherKeysCtx> ciphKeysCtx)
|
||||
: filename(to, filename), offset(offset), length(length), fullFileLength(fullFileLength),
|
||||
cipherKeysCtx(ciphKeysCtx) {}
|
||||
|
||||
BlobFilePointerRef(Arena& to,
|
||||
const std::string& filename,
|
||||
int64_t offset,
|
||||
|
@ -180,30 +190,23 @@ struct BlobFilePointerRef {
|
|||
Optional<BlobGranuleCipherKeysMeta> ciphKeysMeta)
|
||||
: filename(to, filename), offset(offset), length(length), fullFileLength(fullFileLength) {
|
||||
if (ciphKeysMeta.present()) {
|
||||
cipherKeysMetaRef = BlobGranuleCipherKeysMetaRef(to,
|
||||
ciphKeysMeta.get().textDomainId,
|
||||
ciphKeysMeta.get().textBaseCipherId,
|
||||
ciphKeysMeta.get().textSalt,
|
||||
ciphKeysMeta.get().headerDomainId,
|
||||
ciphKeysMeta.get().headerBaseCipherId,
|
||||
ciphKeysMeta.get().headerSalt,
|
||||
ciphKeysMeta.get().ivStr);
|
||||
cipherKeysMetaRef = BlobGranuleCipherKeysMetaRef(to, ciphKeysMeta.get());
|
||||
}
|
||||
}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, filename, offset, length, fullFileLength, cipherKeysMetaRef);
|
||||
serializer(ar, filename, offset, length, fullFileLength, cipherKeysCtx);
|
||||
}
|
||||
|
||||
std::string toString() const {
|
||||
std::stringstream ss;
|
||||
ss << filename.toString() << ":" << offset << ":" << length << ":" << fullFileLength;
|
||||
if (cipherKeysMetaRef.present()) {
|
||||
ss << ":CipherKeysMeta:TextCipher:" << cipherKeysMetaRef.get().textDomainId << ":"
|
||||
<< cipherKeysMetaRef.get().textBaseCipherId << ":" << cipherKeysMetaRef.get().textSalt
|
||||
<< ":HeaderCipher:" << cipherKeysMetaRef.get().headerDomainId << ":"
|
||||
<< cipherKeysMetaRef.get().headerBaseCipherId << ":" << cipherKeysMetaRef.get().headerSalt;
|
||||
if (cipherKeysCtx.present()) {
|
||||
ss << ":CipherKeysCtx:TextCipher:" << cipherKeysCtx.get().textCipherKey.encryptDomainId << ":"
|
||||
<< cipherKeysCtx.get().textCipherKey.baseCipherId << ":" << cipherKeysCtx.get().textCipherKey.salt
|
||||
<< ":HeaderCipher:" << cipherKeysCtx.get().headerCipherKey.encryptDomainId << ":"
|
||||
<< cipherKeysCtx.get().headerCipherKey.baseCipherId << ":" << cipherKeysCtx.get().headerCipherKey.salt;
|
||||
}
|
||||
return std::move(ss).str();
|
||||
}
|
||||
|
@ -224,19 +227,10 @@ struct BlobGranuleChunkRef {
|
|||
VectorRef<BlobFilePointerRef> deltaFiles;
|
||||
GranuleDeltas newDeltas;
|
||||
Optional<KeyRef> tenantPrefix;
|
||||
Optional<BlobGranuleCipherKeysCtx> cipherKeysCtx;
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar,
|
||||
keyRange,
|
||||
includedVersion,
|
||||
snapshotVersion,
|
||||
snapshotFile,
|
||||
deltaFiles,
|
||||
newDeltas,
|
||||
tenantPrefix,
|
||||
cipherKeysCtx);
|
||||
serializer(ar, keyRange, includedVersion, snapshotVersion, snapshotFile, deltaFiles, newDeltas, tenantPrefix);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -26,12 +26,14 @@
|
|||
#include "fdbclient/BlobGranuleCommon.h"
|
||||
#include "flow/CompressionUtils.h"
|
||||
|
||||
Value serializeChunkedSnapshot(Standalone<GranuleSnapshot> snapshot,
|
||||
Value serializeChunkedSnapshot(const Standalone<StringRef>& fileNameRef,
|
||||
const Standalone<GranuleSnapshot>& snapshot,
|
||||
int chunkSize,
|
||||
Optional<CompressionFilter> compressFilter,
|
||||
Optional<BlobGranuleCipherKeysCtx> cipherKeysCtx = {});
|
||||
|
||||
Value serializeChunkedDeltaFile(Standalone<GranuleDeltas> deltas,
|
||||
Value serializeChunkedDeltaFile(const Standalone<StringRef>& fileNameRef,
|
||||
const Standalone<GranuleDeltas>& deltas,
|
||||
const KeyRangeRef& fileRange,
|
||||
int chunkSize,
|
||||
Optional<CompressionFilter> compressFilter,
|
||||
|
|
|
@ -59,7 +59,7 @@ struct BlobWorkerStats {
|
|||
bytesReadFromFDBForInitialSnapshot("BytesReadFromFDBForInitialSnapshot", cc),
|
||||
bytesReadFromS3ForCompaction("BytesReadFromS3ForCompaction", cc),
|
||||
rangeAssignmentRequests("RangeAssignmentRequests", cc), readRequests("ReadRequests", cc),
|
||||
wrongShardServer("WrongShardServer", cc), changeFeedInputBytes("RangeFeedInputBytes", cc),
|
||||
wrongShardServer("WrongShardServer", cc), changeFeedInputBytes("ChangeFeedInputBytes", cc),
|
||||
readReqTotalFilesReturned("ReadReqTotalFilesReturned", cc),
|
||||
readReqDeltaBytesReturned("ReadReqDeltaBytesReturned", cc), commitVersionChecks("CommitVersionChecks", cc),
|
||||
granuleUpdateErrors("GranuleUpdateErrors", cc), granuleRequestTimeouts("GranuleRequestTimeouts", cc),
|
||||
|
|
|
@ -117,6 +117,7 @@ struct ClientDBInfo {
|
|||
Optional<Value> forward;
|
||||
std::vector<VersionHistory> history;
|
||||
UID clusterId;
|
||||
bool isEncryptionEnabled = false;
|
||||
|
||||
TenantMode tenantMode;
|
||||
|
||||
|
@ -130,7 +131,7 @@ struct ClientDBInfo {
|
|||
if constexpr (!is_fb_function<Archive>) {
|
||||
ASSERT(ar.protocolVersion().isValid());
|
||||
}
|
||||
serializer(ar, grvProxies, commitProxies, id, forward, history, tenantMode, clusterId);
|
||||
serializer(ar, grvProxies, commitProxies, id, forward, history, tenantMode, clusterId, isEncryptionEnabled);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -176,6 +177,8 @@ struct CommitTransactionRequest : TimedRequest {
|
|||
CommitTransactionRequest() : CommitTransactionRequest(SpanContext()) {}
|
||||
CommitTransactionRequest(SpanContext const& context) : spanContext(context), flags(0) {}
|
||||
|
||||
bool verify() const { return tenantInfo.isAuthorized(); }
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(
|
||||
|
@ -281,6 +284,8 @@ struct GetReadVersionRequest : TimedRequest {
|
|||
}
|
||||
}
|
||||
|
||||
bool verify() const { return true; }
|
||||
|
||||
bool operator<(GetReadVersionRequest const& rhs) const { return priority < rhs.priority; }
|
||||
|
||||
template <class Ar>
|
||||
|
@ -327,7 +332,7 @@ struct GetKeyServerLocationsRequest {
|
|||
constexpr static FileIdentifier file_identifier = 9144680;
|
||||
Arena arena;
|
||||
SpanContext spanContext;
|
||||
Optional<TenantNameRef> tenant;
|
||||
TenantInfo tenant;
|
||||
KeyRef begin;
|
||||
Optional<KeyRef> end;
|
||||
int limit;
|
||||
|
@ -342,7 +347,7 @@ struct GetKeyServerLocationsRequest {
|
|||
|
||||
GetKeyServerLocationsRequest() : limit(0), reverse(false), minTenantVersion(latestVersion) {}
|
||||
GetKeyServerLocationsRequest(SpanContext spanContext,
|
||||
Optional<TenantNameRef> const& tenant,
|
||||
TenantInfo const& tenant,
|
||||
KeyRef const& begin,
|
||||
Optional<KeyRef> const& end,
|
||||
int limit,
|
||||
|
@ -352,6 +357,8 @@ struct GetKeyServerLocationsRequest {
|
|||
: arena(arena), spanContext(spanContext), tenant(tenant), begin(begin), end(end), limit(limit), reverse(reverse),
|
||||
minTenantVersion(minTenantVersion) {}
|
||||
|
||||
bool verify() const { return tenant.isAuthorized(); }
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, begin, end, limit, reverse, reply, spanContext, tenant, minTenantVersion, arena);
|
||||
|
|
|
@ -242,6 +242,8 @@ struct GetLeaderRequest {
|
|||
GetLeaderRequest() {}
|
||||
explicit GetLeaderRequest(Key key, UID kl) : key(key), knownLeader(kl) {}
|
||||
|
||||
bool verify() const { return true; }
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, key, knownLeader, reply);
|
||||
|
@ -262,6 +264,8 @@ struct OpenDatabaseCoordRequest {
|
|||
std::vector<NetworkAddress> coordinators;
|
||||
ReplyPromise<CachedSerialization<struct ClientDBInfo>> reply;
|
||||
|
||||
bool verify() const { return true; }
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar,
|
||||
|
|
|
@ -255,16 +255,16 @@ public:
|
|||
return cx;
|
||||
}
|
||||
|
||||
Optional<KeyRangeLocationInfo> getCachedLocation(const Optional<TenantName>& tenant,
|
||||
Optional<KeyRangeLocationInfo> getCachedLocation(const Optional<TenantNameRef>& tenant,
|
||||
const KeyRef&,
|
||||
Reverse isBackward = Reverse::False);
|
||||
bool getCachedLocations(const Optional<TenantName>& tenant,
|
||||
bool getCachedLocations(const Optional<TenantNameRef>& tenant,
|
||||
const KeyRangeRef&,
|
||||
std::vector<KeyRangeLocationInfo>&,
|
||||
int limit,
|
||||
Reverse reverse);
|
||||
void cacheTenant(const TenantName& tenant, const TenantMapEntry& tenantEntry);
|
||||
Reference<LocationInfo> setCachedLocation(const Optional<TenantName>& tenant,
|
||||
Reference<LocationInfo> setCachedLocation(const Optional<TenantNameRef>& tenant,
|
||||
const TenantMapEntry& tenantEntry,
|
||||
const KeyRangeRef&,
|
||||
const std::vector<struct StorageServerInterface>&);
|
||||
|
@ -527,6 +527,7 @@ public:
|
|||
Counter transactionsExpensiveClearCostEstCount;
|
||||
Counter transactionGrvFullBatches;
|
||||
Counter transactionGrvTimedOutBatches;
|
||||
Counter transactionCommitVersionNotFoundForSS;
|
||||
|
||||
ContinuousSample<double> latencies, readLatencies, commitLatencies, GRVLatencies, mutationsPerCommit,
|
||||
bytesPerCommit, bgLatencies, bgGranulesPerRequest;
|
||||
|
|
|
@ -235,9 +235,12 @@ struct Watch : public ReferenceCounted<Watch>, NonCopyable {
|
|||
void setWatch(Future<Void> watchFuture);
|
||||
};
|
||||
|
||||
FDB_DECLARE_BOOLEAN_PARAM(AllowInvalidTenantID);
|
||||
|
||||
struct TransactionState : ReferenceCounted<TransactionState> {
|
||||
Database cx;
|
||||
int64_t tenantId = TenantInfo::INVALID_TENANT;
|
||||
Optional<Standalone<StringRef>> authToken;
|
||||
Reference<TransactionLogInfo> trLogInfo;
|
||||
TransactionOptions options;
|
||||
|
||||
|
@ -247,6 +250,13 @@ struct TransactionState : ReferenceCounted<TransactionState> {
|
|||
UseProvisionalProxies useProvisionalProxies = UseProvisionalProxies::False;
|
||||
bool readVersionObtainedFromGrvProxy;
|
||||
|
||||
// Special flag to skip prepending tenant prefix to mutations and conflict ranges
|
||||
// when a dummy, internal transaction gets commited. The sole purpose of commitDummyTransaction() is to
|
||||
// resolve the state of earlier transaction that returned commit_unknown_result or request_maybe_delivered.
|
||||
// Therefore, the dummy transaction can simply reuse one conflict range of the earlier commit, if it already has
|
||||
// been prefixed.
|
||||
bool skipApplyTenantPrefix = false;
|
||||
|
||||
int numErrors = 0;
|
||||
double startTime = 0;
|
||||
Promise<Standalone<StringRef>> versionstampPromise;
|
||||
|
@ -270,7 +280,7 @@ struct TransactionState : ReferenceCounted<TransactionState> {
|
|||
Reference<TransactionLogInfo> trLogInfo);
|
||||
|
||||
Reference<TransactionState> cloneAndReset(Reference<TransactionLogInfo> newTrLogInfo, bool generateNewSpan) const;
|
||||
TenantInfo getTenantInfo();
|
||||
TenantInfo getTenantInfo(AllowInvalidTenantID allowInvalidId = AllowInvalidTenantID::False);
|
||||
|
||||
Optional<TenantName> const& tenant();
|
||||
bool hasTenant() const;
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include "fdbrpc/LoadBalance.actor.h"
|
||||
#include "fdbrpc/Stats.h"
|
||||
#include "fdbrpc/TimedRequest.h"
|
||||
#include "fdbrpc/TenantInfo.h"
|
||||
#include "fdbrpc/TSSComparison.h"
|
||||
#include "fdbclient/CommitTransaction.h"
|
||||
#include "fdbclient/TagThrottle.actor.h"
|
||||
|
@ -85,13 +86,13 @@ struct StorageServerInterface {
|
|||
RequestStream<struct ReadHotSubRangeRequest> getReadHotRanges;
|
||||
RequestStream<struct SplitRangeRequest> getRangeSplitPoints;
|
||||
PublicRequestStream<struct GetKeyValuesStreamRequest> getKeyValuesStream;
|
||||
PublicRequestStream<struct ChangeFeedStreamRequest> changeFeedStream;
|
||||
PublicRequestStream<struct OverlappingChangeFeedsRequest> overlappingChangeFeeds;
|
||||
PublicRequestStream<struct ChangeFeedPopRequest> changeFeedPop;
|
||||
PublicRequestStream<struct ChangeFeedVersionUpdateRequest> changeFeedVersionUpdate;
|
||||
PublicRequestStream<struct GetCheckpointRequest> checkpoint;
|
||||
PublicRequestStream<struct FetchCheckpointRequest> fetchCheckpoint;
|
||||
PublicRequestStream<struct FetchCheckpointKeyValuesRequest> fetchCheckpointKeyValues;
|
||||
RequestStream<struct ChangeFeedStreamRequest> changeFeedStream;
|
||||
RequestStream<struct OverlappingChangeFeedsRequest> overlappingChangeFeeds;
|
||||
RequestStream<struct ChangeFeedPopRequest> changeFeedPop;
|
||||
RequestStream<struct ChangeFeedVersionUpdateRequest> changeFeedVersionUpdate;
|
||||
RequestStream<struct GetCheckpointRequest> checkpoint;
|
||||
RequestStream<struct FetchCheckpointRequest> fetchCheckpoint;
|
||||
RequestStream<struct FetchCheckpointKeyValuesRequest> fetchCheckpointKeyValues;
|
||||
|
||||
private:
|
||||
bool acceptingRequests;
|
||||
|
@ -150,18 +151,17 @@ public:
|
|||
getMappedKeyValues = PublicRequestStream<struct GetMappedKeyValuesRequest>(
|
||||
getValue.getEndpoint().getAdjustedEndpoint(14));
|
||||
changeFeedStream =
|
||||
PublicRequestStream<struct ChangeFeedStreamRequest>(getValue.getEndpoint().getAdjustedEndpoint(15));
|
||||
overlappingChangeFeeds = PublicRequestStream<struct OverlappingChangeFeedsRequest>(
|
||||
getValue.getEndpoint().getAdjustedEndpoint(16));
|
||||
RequestStream<struct ChangeFeedStreamRequest>(getValue.getEndpoint().getAdjustedEndpoint(15));
|
||||
overlappingChangeFeeds =
|
||||
RequestStream<struct OverlappingChangeFeedsRequest>(getValue.getEndpoint().getAdjustedEndpoint(16));
|
||||
changeFeedPop =
|
||||
PublicRequestStream<struct ChangeFeedPopRequest>(getValue.getEndpoint().getAdjustedEndpoint(17));
|
||||
changeFeedVersionUpdate = PublicRequestStream<struct ChangeFeedVersionUpdateRequest>(
|
||||
RequestStream<struct ChangeFeedPopRequest>(getValue.getEndpoint().getAdjustedEndpoint(17));
|
||||
changeFeedVersionUpdate = RequestStream<struct ChangeFeedVersionUpdateRequest>(
|
||||
getValue.getEndpoint().getAdjustedEndpoint(18));
|
||||
checkpoint =
|
||||
PublicRequestStream<struct GetCheckpointRequest>(getValue.getEndpoint().getAdjustedEndpoint(19));
|
||||
checkpoint = RequestStream<struct GetCheckpointRequest>(getValue.getEndpoint().getAdjustedEndpoint(19));
|
||||
fetchCheckpoint =
|
||||
PublicRequestStream<struct FetchCheckpointRequest>(getValue.getEndpoint().getAdjustedEndpoint(20));
|
||||
fetchCheckpointKeyValues = PublicRequestStream<struct FetchCheckpointKeyValuesRequest>(
|
||||
RequestStream<struct FetchCheckpointRequest>(getValue.getEndpoint().getAdjustedEndpoint(20));
|
||||
fetchCheckpointKeyValues = RequestStream<struct FetchCheckpointKeyValuesRequest>(
|
||||
getValue.getEndpoint().getAdjustedEndpoint(21));
|
||||
}
|
||||
} else {
|
||||
|
@ -242,21 +242,6 @@ struct ServerCacheInfo {
|
|||
}
|
||||
};
|
||||
|
||||
struct TenantInfo {
|
||||
static const int64_t INVALID_TENANT = -1;
|
||||
|
||||
Optional<TenantName> name;
|
||||
int64_t tenantId;
|
||||
|
||||
TenantInfo() : tenantId(INVALID_TENANT) {}
|
||||
TenantInfo(TenantName name, int64_t tenantId) : name(name), tenantId(tenantId) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, name, tenantId);
|
||||
}
|
||||
};
|
||||
|
||||
struct GetValueReply : public LoadBalancedReply {
|
||||
constexpr static FileIdentifier file_identifier = 1378929;
|
||||
Optional<Value> value;
|
||||
|
@ -284,6 +269,8 @@ struct GetValueRequest : TimedRequest {
|
|||
// to this client, of all storage replicas that
|
||||
// serve the given key
|
||||
|
||||
bool verify() const { return tenantInfo.isAuthorized(); }
|
||||
|
||||
GetValueRequest() {}
|
||||
GetValueRequest(SpanContext spanContext,
|
||||
const TenantInfo& tenantInfo,
|
||||
|
@ -338,6 +325,8 @@ struct WatchValueRequest {
|
|||
: spanContext(spanContext), tenantInfo(tenantInfo), key(key), value(value), version(ver), tags(tags),
|
||||
debugID(debugID) {}
|
||||
|
||||
bool verify() const { return tenantInfo.isAuthorized(); }
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, key, value, version, tags, debugID, reply, spanContext, tenantInfo);
|
||||
|
@ -381,6 +370,8 @@ struct GetKeyValuesRequest : TimedRequest {
|
|||
|
||||
GetKeyValuesRequest() : isFetchKeys(false) {}
|
||||
|
||||
bool verify() const { return tenantInfo.isAuthorized(); }
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar,
|
||||
|
@ -437,6 +428,9 @@ struct GetMappedKeyValuesRequest : TimedRequest {
|
|||
// serve the given key range
|
||||
|
||||
GetMappedKeyValuesRequest() : isFetchKeys(false) {}
|
||||
|
||||
bool verify() const { return tenantInfo.isAuthorized(); }
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar,
|
||||
|
@ -503,6 +497,8 @@ struct GetKeyValuesStreamRequest {
|
|||
|
||||
GetKeyValuesStreamRequest() : isFetchKeys(false) {}
|
||||
|
||||
bool verify() const { return tenantInfo.isAuthorized(); }
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar,
|
||||
|
@ -550,6 +546,8 @@ struct GetKeyRequest : TimedRequest {
|
|||
// to this client, of all storage replicas that
|
||||
// serve the given key
|
||||
|
||||
bool verify() const { return tenantInfo.isAuthorized(); }
|
||||
|
||||
GetKeyRequest() {}
|
||||
|
||||
GetKeyRequest(SpanContext spanContext,
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbclient/KeyBackedTypes.h"
|
||||
#include "fdbclient/VersionedMap.h"
|
||||
#include "fdbrpc/TenantInfo.h"
|
||||
#include "flow/flat_buffers.h"
|
||||
|
||||
typedef StringRef TenantNameRef;
|
||||
|
@ -47,13 +48,14 @@ struct TenantMapEntry {
|
|||
Key prefix;
|
||||
TenantState tenantState = TenantState::READY;
|
||||
Optional<TenantGroupName> tenantGroup;
|
||||
bool encrypted = false;
|
||||
|
||||
constexpr static int PREFIX_SIZE = sizeof(id);
|
||||
|
||||
public:
|
||||
TenantMapEntry();
|
||||
TenantMapEntry(int64_t id, TenantState tenantState);
|
||||
TenantMapEntry(int64_t id, TenantState tenantState, Optional<TenantGroupName> tenantGroup);
|
||||
TenantMapEntry(int64_t id, TenantState tenantState, bool encrypted);
|
||||
TenantMapEntry(int64_t id, TenantState tenantState, Optional<TenantGroupName> tenantGroup, bool encrypted);
|
||||
|
||||
void setId(int64_t id);
|
||||
std::string toJson(int apiVersion) const;
|
||||
|
@ -68,7 +70,7 @@ public:
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, id, tenantState, tenantGroup);
|
||||
serializer(ar, id, tenantState, tenantGroup, encrypted);
|
||||
if constexpr (Ar::isDeserializing) {
|
||||
if (id >= 0) {
|
||||
prefix = idToPrefix(id);
|
||||
|
@ -127,4 +129,4 @@ public:
|
|||
typedef VersionedMap<TenantName, TenantMapEntry> TenantMap;
|
||||
typedef VersionedMap<Key, TenantName> TenantPrefixIndex;
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -116,6 +116,7 @@ private:
|
|||
std::map<TenantGroupName, int>* tenantGroupNetTenantDelta) {
|
||||
state TenantMapEntry tenantEntry;
|
||||
tenantEntry.setId(tenantId);
|
||||
tenantEntry.encrypted = ryw->getTransactionState()->cx->clientInfo->get().isEncryptionEnabled;
|
||||
|
||||
for (auto const& [name, value] : configMutations) {
|
||||
tenantEntry.configure(name, value);
|
||||
|
|
|
@ -302,6 +302,10 @@ description is not currently required but encouraged.
|
|||
<Option name="skip_grv_cache" code="1102"
|
||||
description="Specifically instruct this transaction to NOT use cached GRV. Primarily used for the read version cache's background updater to avoid attempting to read a cached entry in specific situations."
|
||||
hidden="true"/>
|
||||
<Option name="authorization_token" code="2000"
|
||||
description="Add a given authorization token to the network thread so that future requests are authorized"
|
||||
paramType="String" paramDescription="A signed token serialized using flatbuffers"
|
||||
hidden="true" />
|
||||
</Scope>
|
||||
|
||||
<!-- The enumeration values matter - do not change them without
|
||||
|
|
|
@ -258,7 +258,7 @@ Optional<Standalone<StringRef>> AsyncFileEncrypted::RandomCache::get(uint32_t bl
|
|||
TEST_CASE("fdbrpc/AsyncFileEncrypted") {
|
||||
state const int bytes = FLOW_KNOBS->ENCRYPTION_BLOCK_SIZE * deterministicRandom()->randomInt(0, 1000);
|
||||
state std::vector<unsigned char> writeBuffer(bytes, 0);
|
||||
generateRandomData(&writeBuffer.front(), bytes);
|
||||
deterministicRandom()->randomBytes(&writeBuffer.front(), bytes);
|
||||
state std::vector<unsigned char> readBuffer(bytes, 0);
|
||||
ASSERT(g_network->isSimulated());
|
||||
StreamCipherKey::initializeGlobalRandomTestKey();
|
||||
|
|
|
@ -117,6 +117,9 @@ Optional<StringRef> decode(Arena& arena, StringRef base64UrlStr) {
|
|||
}
|
||||
auto out = new (arena) uint8_t[decodedLen];
|
||||
auto actualLen = decode(base64UrlStr.begin(), base64UrlStr.size(), out);
|
||||
if (actualLen == -1) {
|
||||
return {};
|
||||
}
|
||||
ASSERT_EQ(decodedLen, actualLen);
|
||||
return StringRef(out, decodedLen);
|
||||
}
|
||||
|
|
|
@ -28,12 +28,15 @@
|
|||
#include <memcheck.h>
|
||||
#endif
|
||||
|
||||
#include "fdbrpc/TenantInfo.h"
|
||||
#include <boost/unordered_map.hpp>
|
||||
|
||||
#include "fdbrpc/TokenSign.h"
|
||||
#include "fdbrpc/fdbrpc.h"
|
||||
#include "fdbrpc/FailureMonitor.h"
|
||||
#include "fdbrpc/HealthMonitor.h"
|
||||
#include "fdbrpc/genericactors.actor.h"
|
||||
#include "fdbrpc/IPAllowList.h"
|
||||
#include "fdbrpc/TokenCache.h"
|
||||
#include "fdbrpc/simulator.h"
|
||||
#include "flow/ActorCollection.h"
|
||||
#include "flow/Error.h"
|
||||
|
@ -47,8 +50,13 @@
|
|||
#include "flow/xxhash.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
static NetworkAddressList g_currentDeliveryPeerAddress = NetworkAddressList();
|
||||
static Future<Void> g_currentDeliveryPeerDisconnect;
|
||||
namespace {
|
||||
|
||||
NetworkAddressList g_currentDeliveryPeerAddress = NetworkAddressList();
|
||||
bool g_currentDeliverPeerAddressTrusted = false;
|
||||
Future<Void> g_currentDeliveryPeerDisconnect;
|
||||
|
||||
} // namespace
|
||||
|
||||
constexpr int PACKET_LEN_WIDTH = sizeof(uint32_t);
|
||||
const uint64_t TOKEN_STREAM_FLAG = 1;
|
||||
|
@ -239,31 +247,6 @@ struct PingReceiver final : NetworkMessageReceiver {
|
|||
bool isPublic() const override { return true; }
|
||||
};
|
||||
|
||||
struct TenantAuthorizer final : NetworkMessageReceiver {
|
||||
TenantAuthorizer(EndpointMap& endpoints) {
|
||||
endpoints.insertWellKnown(this, Endpoint::wellKnownToken(WLTOKEN_AUTH_TENANT), TaskPriority::ReadSocket);
|
||||
}
|
||||
void receive(ArenaObjectReader& reader) override {
|
||||
AuthorizationRequest req;
|
||||
try {
|
||||
reader.deserialize(req);
|
||||
// TODO: verify that token is valid
|
||||
AuthorizedTenants& auth = reader.variable<AuthorizedTenants>("AuthorizedTenants");
|
||||
for (auto const& t : req.tenants) {
|
||||
auth.authorizedTenants.insert(TenantInfoRef(auth.arena, t));
|
||||
}
|
||||
req.reply.send(Void());
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_permission_denied) {
|
||||
req.reply.sendError(e);
|
||||
} else {
|
||||
throw;
|
||||
}
|
||||
}
|
||||
}
|
||||
bool isPublic() const override { return true; }
|
||||
};
|
||||
|
||||
struct UnauthorizedEndpointReceiver final : NetworkMessageReceiver {
|
||||
UnauthorizedEndpointReceiver(EndpointMap& endpoints) {
|
||||
endpoints.insertWellKnown(
|
||||
|
@ -339,7 +322,6 @@ public:
|
|||
EndpointMap endpoints;
|
||||
EndpointNotFoundReceiver endpointNotFoundReceiver{ endpoints };
|
||||
PingReceiver pingReceiver{ endpoints };
|
||||
TenantAuthorizer tenantReceiver{ endpoints };
|
||||
UnauthorizedEndpointReceiver unauthorizedEndpointReceiver{ endpoints };
|
||||
|
||||
Int64MetricHandle bytesSent;
|
||||
|
@ -356,10 +338,11 @@ public:
|
|||
double lastIncompatibleMessage;
|
||||
uint64_t transportId;
|
||||
IPAllowList allowList;
|
||||
std::shared_ptr<ContextVariableMap> localCVM = std::make_shared<ContextVariableMap>(); // for local delivery
|
||||
|
||||
Future<Void> multiVersionCleanup;
|
||||
Future<Void> pingLogger;
|
||||
|
||||
std::unordered_map<Standalone<StringRef>, PublicKey> publicKeys;
|
||||
};
|
||||
|
||||
ACTOR Future<Void> pingLatencyLogger(TransportData* self) {
|
||||
|
@ -926,10 +909,20 @@ void Peer::prependConnectPacket() {
|
|||
pkt.protocolVersion.addObjectSerializerFlag();
|
||||
pkt.connectionId = transport->transportId;
|
||||
|
||||
PacketBuffer* pb_first = PacketBuffer::create();
|
||||
PacketBuffer *pb_first = PacketBuffer::create(), *pb_end = nullptr;
|
||||
PacketWriter wr(pb_first, nullptr, Unversioned());
|
||||
pkt.serialize(wr);
|
||||
unsent.prependWriteBuffer(pb_first, wr.finish());
|
||||
pb_end = wr.finish();
|
||||
#if VALGRIND
|
||||
SendBuffer* checkbuf = pb_first;
|
||||
while (checkbuf) {
|
||||
int size = checkbuf->bytes_written;
|
||||
const uint8_t* data = checkbuf->data();
|
||||
VALGRIND_CHECK_MEM_IS_DEFINED(data, size);
|
||||
checkbuf = checkbuf->next;
|
||||
}
|
||||
#endif
|
||||
unsent.prependWriteBuffer(pb_first, pb_end);
|
||||
}
|
||||
|
||||
void Peer::discardUnreliablePackets() {
|
||||
|
@ -1013,8 +1006,7 @@ ACTOR static void deliver(TransportData* self,
|
|||
TaskPriority priority,
|
||||
ArenaReader reader,
|
||||
NetworkAddress peerAddress,
|
||||
Reference<AuthorizedTenants> authorizedTenants,
|
||||
std::shared_ptr<ContextVariableMap> cvm,
|
||||
bool isTrustedPeer,
|
||||
InReadSocket inReadSocket,
|
||||
Future<Void> disconnect) {
|
||||
// We want to run the task at the right priority. If the priority is higher than the current priority (which is
|
||||
|
@ -1029,22 +1021,26 @@ ACTOR static void deliver(TransportData* self,
|
|||
}
|
||||
|
||||
auto receiver = self->endpoints.get(destination.token);
|
||||
if (receiver && (authorizedTenants->trusted || receiver->isPublic())) {
|
||||
if (receiver && (isTrustedPeer || receiver->isPublic())) {
|
||||
if (!checkCompatible(receiver->peerCompatibilityPolicy(), reader.protocolVersion())) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
ASSERT(g_currentDeliveryPeerAddress == NetworkAddressList());
|
||||
ASSERT(!g_currentDeliverPeerAddressTrusted);
|
||||
g_currentDeliveryPeerAddress = destination.addresses;
|
||||
g_currentDeliverPeerAddressTrusted = isTrustedPeer;
|
||||
g_currentDeliveryPeerDisconnect = disconnect;
|
||||
StringRef data = reader.arenaReadAll();
|
||||
ASSERT(data.size() > 8);
|
||||
ArenaObjectReader objReader(reader.arena(), reader.arenaReadAll(), AssumeVersion(reader.protocolVersion()));
|
||||
objReader.setContextVariableMap(cvm);
|
||||
receiver->receive(objReader);
|
||||
g_currentDeliveryPeerAddress = { NetworkAddress() };
|
||||
g_currentDeliveryPeerAddress = NetworkAddressList();
|
||||
g_currentDeliverPeerAddressTrusted = false;
|
||||
g_currentDeliveryPeerDisconnect = Future<Void>();
|
||||
} catch (Error& e) {
|
||||
g_currentDeliveryPeerAddress = { NetworkAddress() };
|
||||
g_currentDeliveryPeerAddress = NetworkAddressList();
|
||||
g_currentDeliverPeerAddressTrusted = false;
|
||||
g_currentDeliveryPeerDisconnect = Future<Void>();
|
||||
TraceEvent(SevError, "ReceiverError")
|
||||
.error(e)
|
||||
|
@ -1092,8 +1088,7 @@ static void scanPackets(TransportData* transport,
|
|||
const uint8_t* e,
|
||||
Arena& arena,
|
||||
NetworkAddress const& peerAddress,
|
||||
Reference<AuthorizedTenants> const& authorizedTenants,
|
||||
std::shared_ptr<ContextVariableMap> cvm,
|
||||
bool isTrustedPeer,
|
||||
ProtocolVersion peerProtocolVersion,
|
||||
Future<Void> disconnect,
|
||||
IsStableConnection isStableConnection) {
|
||||
|
@ -1215,8 +1210,7 @@ static void scanPackets(TransportData* transport,
|
|||
priority,
|
||||
std::move(reader),
|
||||
peerAddress,
|
||||
authorizedTenants,
|
||||
cvm,
|
||||
isTrustedPeer,
|
||||
InReadSocket::True,
|
||||
disconnect);
|
||||
}
|
||||
|
@ -1263,14 +1257,9 @@ ACTOR static Future<Void> connectionReader(TransportData* transport,
|
|||
state bool incompatiblePeerCounted = false;
|
||||
state NetworkAddress peerAddress;
|
||||
state ProtocolVersion peerProtocolVersion;
|
||||
state Reference<AuthorizedTenants> authorizedTenants = makeReference<AuthorizedTenants>();
|
||||
state std::shared_ptr<ContextVariableMap> cvm = std::make_shared<ContextVariableMap>();
|
||||
state bool trusted = transport->allowList(conn->getPeerAddress().ip);
|
||||
peerAddress = conn->getPeerAddress();
|
||||
authorizedTenants->trusted = transport->allowList(conn->getPeerAddress().ip);
|
||||
(*cvm)["AuthorizedTenants"] = &authorizedTenants;
|
||||
(*cvm)["PeerAddress"] = &peerAddress;
|
||||
|
||||
authorizedTenants->trusted = transport->allowList(peerAddress.ip);
|
||||
if (!peer) {
|
||||
ASSERT(!peerAddress.isPublic());
|
||||
}
|
||||
|
@ -1420,8 +1409,7 @@ ACTOR static Future<Void> connectionReader(TransportData* transport,
|
|||
unprocessed_end,
|
||||
arena,
|
||||
peerAddress,
|
||||
authorizedTenants,
|
||||
cvm,
|
||||
trusted,
|
||||
peerProtocolVersion,
|
||||
peer->disconnect.getFuture(),
|
||||
IsStableConnection(g_network->isSimulated() && conn->isStableConnection()));
|
||||
|
@ -1572,6 +1560,11 @@ ACTOR static Future<Void> multiVersionCleanupWorker(TransportData* self) {
|
|||
FlowTransport::FlowTransport(uint64_t transportId, int maxWellKnownEndpoints, IPAllowList const* allowList)
|
||||
: self(new TransportData(transportId, maxWellKnownEndpoints, allowList)) {
|
||||
self->multiVersionCleanup = multiVersionCleanupWorker(self);
|
||||
if (g_network->isSimulated()) {
|
||||
for (auto const& p : g_simulator.authKeys) {
|
||||
self->publicKeys.emplace(p.first, p.second.toPublic());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
FlowTransport::~FlowTransport() {
|
||||
|
@ -1717,15 +1710,12 @@ static void sendLocal(TransportData* self, ISerializeSource const& what, const E
|
|||
ASSERT(copy.size() > 0);
|
||||
TaskPriority priority = self->endpoints.getPriority(destination.token);
|
||||
if (priority != TaskPriority::UnknownEndpoint || (destination.token.first() & TOKEN_STREAM_FLAG) != 0) {
|
||||
Reference<AuthorizedTenants> authorizedTenants = makeReference<AuthorizedTenants>();
|
||||
authorizedTenants->trusted = true;
|
||||
deliver(self,
|
||||
destination,
|
||||
priority,
|
||||
ArenaReader(copy.arena(), copy, AssumeVersion(currentProtocolVersion)),
|
||||
NetworkAddress(),
|
||||
authorizedTenants,
|
||||
self->localCVM,
|
||||
true,
|
||||
InReadSocket::False,
|
||||
Never());
|
||||
}
|
||||
|
@ -1936,6 +1926,7 @@ void FlowTransport::createInstance(bool isClient,
|
|||
uint64_t transportId,
|
||||
int maxWellKnownEndpoints,
|
||||
IPAllowList const* allowList) {
|
||||
TokenCache::createInstance();
|
||||
g_network->setGlobal(INetwork::enFlowTransport,
|
||||
(flowGlobalType) new FlowTransport(transportId, maxWellKnownEndpoints, allowList));
|
||||
g_network->setGlobal(INetwork::enNetworkAddressFunc, (flowGlobalType)&FlowTransport::getGlobalLocalAddress);
|
||||
|
@ -1947,3 +1938,31 @@ void FlowTransport::createInstance(bool isClient,
|
|||
HealthMonitor* FlowTransport::healthMonitor() {
|
||||
return &self->healthMonitor;
|
||||
}
|
||||
|
||||
Optional<PublicKey> FlowTransport::getPublicKeyByName(StringRef name) const {
|
||||
auto iter = self->publicKeys.find(name);
|
||||
if (iter != self->publicKeys.end()) {
|
||||
return iter->second;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
NetworkAddress FlowTransport::currentDeliveryPeerAddress() const {
|
||||
return g_currentDeliveryPeerAddress.address;
|
||||
}
|
||||
|
||||
bool FlowTransport::currentDeliveryPeerIsTrusted() const {
|
||||
return g_currentDeliverPeerAddressTrusted;
|
||||
}
|
||||
|
||||
void FlowTransport::addPublicKey(StringRef name, PublicKey key) {
|
||||
self->publicKeys[name] = key;
|
||||
}
|
||||
|
||||
void FlowTransport::removePublicKey(StringRef name) {
|
||||
self->publicKeys.erase(name);
|
||||
}
|
||||
|
||||
void FlowTransport::removeAllPublicKeys() {
|
||||
self->publicKeys.clear();
|
||||
}
|
||||
|
|
|
@ -0,0 +1,358 @@
|
|||
#include "fdbrpc/FlowTransport.h"
|
||||
#include "fdbrpc/TokenCache.h"
|
||||
#include "fdbrpc/TokenSign.h"
|
||||
#include "fdbrpc/TenantInfo.h"
|
||||
#include "flow/MkCert.h"
|
||||
#include "flow/ScopeExit.h"
|
||||
#include "flow/UnitTest.h"
|
||||
#include "flow/network.h"
|
||||
|
||||
#include <boost/unordered_map.hpp>
|
||||
|
||||
#include <fmt/format.h>
|
||||
#include <list>
|
||||
#include <deque>
|
||||
|
||||
#include "flow/actorcompiler.h" // has to be last include
|
||||
|
||||
template <class Key, class Value>
|
||||
class LRUCache {
|
||||
public:
|
||||
using key_type = Key;
|
||||
using list_type = std::list<key_type>;
|
||||
using mapped_type = Value;
|
||||
using map_type = boost::unordered_map<key_type, std::pair<mapped_type, typename list_type::iterator>>;
|
||||
using size_type = unsigned;
|
||||
|
||||
explicit LRUCache(size_type capacity) : _capacity(capacity) { _map.reserve(capacity); }
|
||||
|
||||
size_type size() const { return _map.size(); }
|
||||
size_type capacity() const { return _capacity; }
|
||||
bool empty() const { return _map.empty(); }
|
||||
|
||||
Optional<mapped_type*> get(key_type const& key) {
|
||||
auto i = _map.find(key);
|
||||
if (i == _map.end()) {
|
||||
return Optional<mapped_type*>();
|
||||
}
|
||||
auto j = i->second.second;
|
||||
if (j != _list.begin()) {
|
||||
_list.erase(j);
|
||||
_list.push_front(i->first);
|
||||
i->second.second = _list.begin();
|
||||
}
|
||||
return &i->second.first;
|
||||
}
|
||||
|
||||
template <class K, class V>
|
||||
mapped_type* insert(K&& key, V&& value) {
|
||||
auto iter = _map.find(key);
|
||||
if (iter != _map.end()) {
|
||||
return &iter->second.first;
|
||||
}
|
||||
if (size() == capacity()) {
|
||||
auto i = --_list.end();
|
||||
_map.erase(*i);
|
||||
_list.erase(i);
|
||||
}
|
||||
_list.push_front(std::forward<K>(key));
|
||||
std::tie(iter, std::ignore) =
|
||||
_map.insert(std::make_pair(*_list.begin(), std::make_pair(std::forward<V>(value), _list.begin())));
|
||||
return &iter->second.first;
|
||||
}
|
||||
|
||||
private:
|
||||
const size_type _capacity;
|
||||
map_type _map;
|
||||
list_type _list;
|
||||
};
|
||||
|
||||
TEST_CASE("/fdbrpc/authz/LRUCache") {
|
||||
auto& rng = *deterministicRandom();
|
||||
{
|
||||
// test very small LRU cache
|
||||
LRUCache<int, StringRef> cache(rng.randomInt(2, 10));
|
||||
for (int i = 0; i < 200; ++i) {
|
||||
cache.insert(i, "val"_sr);
|
||||
if (i >= cache.capacity()) {
|
||||
for (auto j = 0; j <= i - cache.capacity(); j++)
|
||||
ASSERT(!cache.get(j).present());
|
||||
// ordering is important so as not to disrupt the LRU order
|
||||
for (auto j = i - cache.capacity() + 1; j <= i; j++)
|
||||
ASSERT(cache.get(j).present());
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
// Test larger cache
|
||||
LRUCache<int, StringRef> cache(1000);
|
||||
for (auto i = 0; i < 1000; ++i) {
|
||||
cache.insert(i, "value"_sr);
|
||||
}
|
||||
cache.insert(1000, "value"_sr); // should evict 0
|
||||
ASSERT(!cache.get(0).present());
|
||||
}
|
||||
{
|
||||
// memory test -- this is what the boost implementation didn't do correctly
|
||||
LRUCache<StringRef, Standalone<StringRef>> cache(10);
|
||||
std::deque<std::string> cachedStrings;
|
||||
std::deque<std::string> evictedStrings;
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
auto str = rng.randomAlphaNumeric(rng.randomInt(100, 1024));
|
||||
Standalone<StringRef> sref(str);
|
||||
cache.insert(sref, sref);
|
||||
cachedStrings.push_back(str);
|
||||
}
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
Standalone<StringRef> existingStr(cachedStrings.back());
|
||||
auto cachedStr = cache.get(existingStr);
|
||||
ASSERT(cachedStr.present());
|
||||
ASSERT(*cachedStr.get() == existingStr);
|
||||
if (!evictedStrings.empty()) {
|
||||
Standalone<StringRef> nonexisting(evictedStrings.at(rng.randomInt(0, evictedStrings.size())));
|
||||
ASSERT(!cache.get(nonexisting).present());
|
||||
}
|
||||
auto str = rng.randomAlphaNumeric(rng.randomInt(100, 1024));
|
||||
Standalone<StringRef> sref(str);
|
||||
evictedStrings.push_back(cachedStrings.front());
|
||||
cachedStrings.pop_front();
|
||||
cachedStrings.push_back(str);
|
||||
cache.insert(sref, sref);
|
||||
}
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
struct TokenCacheImpl {
|
||||
struct CacheEntry {
|
||||
Arena arena;
|
||||
VectorRef<TenantNameRef> tenants;
|
||||
double expirationTime = 0.0;
|
||||
};
|
||||
|
||||
LRUCache<StringRef, CacheEntry> cache;
|
||||
TokenCacheImpl() : cache(FLOW_KNOBS->TOKEN_CACHE_SIZE) {}
|
||||
|
||||
bool validate(TenantNameRef tenant, StringRef token);
|
||||
bool validateAndAdd(double currentTime, StringRef token, NetworkAddress const& peer);
|
||||
};
|
||||
|
||||
TokenCache::TokenCache() : impl(new TokenCacheImpl()) {}
|
||||
TokenCache::~TokenCache() {
|
||||
delete impl;
|
||||
}
|
||||
|
||||
void TokenCache::createInstance() {
|
||||
g_network->setGlobal(INetwork::enTokenCache, new TokenCache());
|
||||
}
|
||||
|
||||
TokenCache& TokenCache::instance() {
|
||||
return *reinterpret_cast<TokenCache*>(g_network->global(INetwork::enTokenCache));
|
||||
}
|
||||
|
||||
bool TokenCache::validate(TenantNameRef name, StringRef token) {
|
||||
return impl->validate(name, token);
|
||||
}
|
||||
|
||||
#define TRACE_INVALID_PARSED_TOKEN(reason, token) \
|
||||
TraceEvent(SevWarn, "InvalidToken") \
|
||||
.detail("From", peer) \
|
||||
.detail("Reason", reason) \
|
||||
.detail("CurrentTime", currentTime) \
|
||||
.detail("Token", token.toStringRef(arena).toStringView())
|
||||
|
||||
bool TokenCacheImpl::validateAndAdd(double currentTime, StringRef token, NetworkAddress const& peer) {
|
||||
Arena arena;
|
||||
authz::jwt::TokenRef t;
|
||||
if (!authz::jwt::parseToken(arena, t, token)) {
|
||||
CODE_PROBE(true, "Token can't be parsed");
|
||||
TraceEvent(SevWarn, "InvalidToken")
|
||||
.detail("From", peer)
|
||||
.detail("Reason", "ParseError")
|
||||
.detail("Token", token.toString());
|
||||
return false;
|
||||
}
|
||||
auto key = FlowTransport::transport().getPublicKeyByName(t.keyId);
|
||||
if (!key.present()) {
|
||||
CODE_PROBE(true, "Token referencing non-existing key");
|
||||
TRACE_INVALID_PARSED_TOKEN("UnknownKey", t);
|
||||
return false;
|
||||
} else if (!t.expiresAtUnixTime.present()) {
|
||||
CODE_PROBE(true, "Token has no expiration time");
|
||||
TRACE_INVALID_PARSED_TOKEN("NoExpirationTime", t);
|
||||
return false;
|
||||
} else if (double(t.expiresAtUnixTime.get()) <= currentTime) {
|
||||
CODE_PROBE(true, "Expired token");
|
||||
TRACE_INVALID_PARSED_TOKEN("Expired", t);
|
||||
return false;
|
||||
} else if (!t.notBeforeUnixTime.present()) {
|
||||
CODE_PROBE(true, "Token has no not-before field");
|
||||
TRACE_INVALID_PARSED_TOKEN("NoNotBefore", t);
|
||||
return false;
|
||||
} else if (double(t.notBeforeUnixTime.get()) > currentTime) {
|
||||
CODE_PROBE(true, "Tokens not-before is in the future");
|
||||
TRACE_INVALID_PARSED_TOKEN("TokenNotYetValid", t);
|
||||
return false;
|
||||
} else if (!t.tenants.present()) {
|
||||
CODE_PROBE(true, "Token with no tenants");
|
||||
TRACE_INVALID_PARSED_TOKEN("NoTenants", t);
|
||||
return false;
|
||||
} else if (!authz::jwt::verifyToken(token, key.get())) {
|
||||
CODE_PROBE(true, "Token with invalid signature");
|
||||
TRACE_INVALID_PARSED_TOKEN("InvalidSignature", t);
|
||||
return false;
|
||||
} else {
|
||||
CacheEntry c;
|
||||
c.expirationTime = double(t.expiresAtUnixTime.get());
|
||||
c.tenants.reserve(c.arena, t.tenants.get().size());
|
||||
for (auto tenant : t.tenants.get()) {
|
||||
c.tenants.push_back_deep(c.arena, tenant);
|
||||
}
|
||||
cache.insert(StringRef(c.arena, token), c);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
bool TokenCacheImpl::validate(TenantNameRef name, StringRef token) {
|
||||
NetworkAddress peer = FlowTransport::transport().currentDeliveryPeerAddress();
|
||||
auto cachedEntry = cache.get(token);
|
||||
double currentTime = g_network->timer();
|
||||
|
||||
if (!cachedEntry.present()) {
|
||||
if (validateAndAdd(currentTime, token, peer)) {
|
||||
cachedEntry = cache.get(token);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(cachedEntry.present());
|
||||
|
||||
auto& entry = cachedEntry.get();
|
||||
if (entry->expirationTime < currentTime) {
|
||||
CODE_PROBE(true, "Found expired token in cache");
|
||||
TraceEvent(SevWarn, "InvalidToken").detail("From", peer).detail("Reason", "ExpiredInCache");
|
||||
return false;
|
||||
}
|
||||
bool tenantFound = false;
|
||||
for (auto const& t : entry->tenants) {
|
||||
if (t == name) {
|
||||
tenantFound = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!tenantFound) {
|
||||
CODE_PROBE(true, "Valid token doesn't reference tenant");
|
||||
TraceEvent(SevWarn, "TenantTokenMismatch").detail("From", peer).detail("Tenant", name.toString());
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
namespace authz::jwt {
|
||||
extern TokenRef makeRandomTokenSpec(Arena&, IRandom&, authz::Algorithm);
|
||||
}
|
||||
|
||||
TEST_CASE("/fdbrpc/authz/TokenCache/BadTokens") {
|
||||
std::pair<void (*)(Arena&, IRandom&, authz::jwt::TokenRef&), char const*> badMutations[]{
|
||||
{
|
||||
[](Arena&, IRandom&, authz::jwt::TokenRef&) { FlowTransport::transport().removeAllPublicKeys(); },
|
||||
"NoKeyWithSuchName",
|
||||
},
|
||||
{
|
||||
[](Arena&, IRandom&, authz::jwt::TokenRef& token) { token.expiresAtUnixTime.reset(); },
|
||||
"NoExpirationTime",
|
||||
},
|
||||
{
|
||||
[](Arena&, IRandom& rng, authz::jwt::TokenRef& token) {
|
||||
token.expiresAtUnixTime = uint64_t(std::max<double>(g_network->timer() - 10 - rng.random01() * 50, 0));
|
||||
},
|
||||
"ExpiredToken",
|
||||
},
|
||||
{
|
||||
[](Arena&, IRandom&, authz::jwt::TokenRef& token) { token.notBeforeUnixTime.reset(); },
|
||||
"NoNotBefore",
|
||||
},
|
||||
{
|
||||
[](Arena&, IRandom& rng, authz::jwt::TokenRef& token) {
|
||||
token.notBeforeUnixTime = uint64_t(g_network->timer() + 10 + rng.random01() * 50);
|
||||
},
|
||||
"TokenNotYetValid",
|
||||
},
|
||||
{
|
||||
[](Arena& arena, IRandom&, authz::jwt::TokenRef& token) { token.tenants.reset(); },
|
||||
"NoTenants",
|
||||
},
|
||||
};
|
||||
auto const pubKeyName = "somePublicKey"_sr;
|
||||
auto privateKey = mkcert::makeEcP256();
|
||||
auto const numBadMutations = sizeof(badMutations) / sizeof(badMutations[0]);
|
||||
for (auto repeat = 0; repeat < 50; repeat++) {
|
||||
auto arena = Arena();
|
||||
auto& rng = *deterministicRandom();
|
||||
auto validTokenSpec = authz::jwt::makeRandomTokenSpec(arena, rng, authz::Algorithm::ES256);
|
||||
validTokenSpec.keyId = pubKeyName;
|
||||
for (auto i = 0; i < numBadMutations; i++) {
|
||||
FlowTransport::transport().addPublicKey(pubKeyName, privateKey.toPublic());
|
||||
auto publicKeyClearGuard =
|
||||
ScopeExit([pubKeyName]() { FlowTransport::transport().removePublicKey(pubKeyName); });
|
||||
auto [mutationFn, mutationDesc] = badMutations[i];
|
||||
auto tmpArena = Arena();
|
||||
auto mutatedTokenSpec = validTokenSpec;
|
||||
mutationFn(tmpArena, rng, mutatedTokenSpec);
|
||||
auto signedToken = authz::jwt::signToken(tmpArena, mutatedTokenSpec, privateKey);
|
||||
if (TokenCache::instance().validate(validTokenSpec.tenants.get()[0], signedToken)) {
|
||||
fmt::print("Unexpected successful validation at mutation {}, token spec: {}\n",
|
||||
mutationDesc,
|
||||
mutatedTokenSpec.toStringRef(tmpArena).toStringView());
|
||||
ASSERT(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (TokenCache::instance().validate("TenantNameDontMatterHere"_sr, StringRef())) {
|
||||
fmt::print("Unexpected successful validation of ill-formed token (no signature part)\n");
|
||||
ASSERT(false);
|
||||
}
|
||||
if (TokenCache::instance().validate("TenantNameDontMatterHere"_sr, "1111.22"_sr)) {
|
||||
fmt::print("Unexpected successful validation of ill-formed token (no signature part)\n");
|
||||
ASSERT(false);
|
||||
}
|
||||
if (TokenCache::instance().validate("TenantNameDontMatterHere2"_sr, "////.////.////"_sr)) {
|
||||
fmt::print("Unexpected successful validation of unparseable token\n");
|
||||
ASSERT(false);
|
||||
}
|
||||
fmt::print("TEST OK\n");
|
||||
return Void();
|
||||
}
|
||||
|
||||
TEST_CASE("/fdbrpc/authz/TokenCache/GoodTokens") {
|
||||
// Don't repeat because token expiry is at seconds granularity and sleeps are costly in unit tests
|
||||
state Arena arena;
|
||||
state PrivateKey privateKey = mkcert::makeEcP256();
|
||||
state StringRef pubKeyName = "somePublicKey"_sr;
|
||||
state ScopeExit<std::function<void()>> publicKeyClearGuard(
|
||||
[pubKeyName = pubKeyName]() { FlowTransport::transport().removePublicKey(pubKeyName); });
|
||||
state authz::jwt::TokenRef tokenSpec =
|
||||
authz::jwt::makeRandomTokenSpec(arena, *deterministicRandom(), authz::Algorithm::ES256);
|
||||
state StringRef signedToken;
|
||||
FlowTransport::transport().addPublicKey(pubKeyName, privateKey.toPublic());
|
||||
tokenSpec.expiresAtUnixTime = static_cast<uint64_t>(g_network->timer() + 2.0);
|
||||
tokenSpec.keyId = pubKeyName;
|
||||
signedToken = authz::jwt::signToken(arena, tokenSpec, privateKey);
|
||||
if (!TokenCache::instance().validate(tokenSpec.tenants.get()[0], signedToken)) {
|
||||
fmt::print("Unexpected failed token validation, token spec: {}, now: {}\n",
|
||||
tokenSpec.toStringRef(arena).toStringView(),
|
||||
g_network->timer());
|
||||
ASSERT(false);
|
||||
}
|
||||
wait(delay(3.5));
|
||||
if (TokenCache::instance().validate(tokenSpec.tenants.get()[0], signedToken)) {
|
||||
fmt::print(
|
||||
"Unexpected successful token validation after supposedly expiring in cache, token spec: {}, now: {}\n",
|
||||
tokenSpec.toStringRef(arena).toStringView(),
|
||||
g_network->timer());
|
||||
ASSERT(false);
|
||||
}
|
||||
fmt::print("TEST OK\n");
|
||||
return Void();
|
||||
}
|
|
@ -30,6 +30,7 @@
|
|||
#include "flow/Trace.h"
|
||||
#include "flow/UnitTest.h"
|
||||
#include <fmt/format.h>
|
||||
#include <iterator>
|
||||
#include <string_view>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
@ -161,6 +162,43 @@ TokenRef makeRandomTokenSpec(Arena& arena, IRandom& rng) {
|
|||
|
||||
namespace authz::jwt {
|
||||
|
||||
template <class FieldType, size_t NameLen>
|
||||
void appendField(fmt::memory_buffer& b, char const (&name)[NameLen], Optional<FieldType> const& field) {
|
||||
if (!field.present())
|
||||
return;
|
||||
auto const& f = field.get();
|
||||
auto bi = std::back_inserter(b);
|
||||
if constexpr (std::is_same_v<FieldType, VectorRef<StringRef>>) {
|
||||
fmt::format_to(bi, " {}=[", name);
|
||||
for (auto i = 0; i < f.size(); i++) {
|
||||
if (i)
|
||||
fmt::format_to(bi, ",");
|
||||
fmt::format_to(bi, f[i].toStringView());
|
||||
}
|
||||
fmt::format_to(bi, "]");
|
||||
} else if constexpr (std::is_same_v<FieldType, StringRef>) {
|
||||
fmt::format_to(bi, " {}={}", name, f.toStringView());
|
||||
} else {
|
||||
fmt::format_to(bi, " {}={}", name, f);
|
||||
}
|
||||
}
|
||||
|
||||
StringRef TokenRef::toStringRef(Arena& arena) {
|
||||
auto buf = fmt::memory_buffer();
|
||||
fmt::format_to(std::back_inserter(buf), "alg={} kid={}", getAlgorithmName(algorithm), keyId.toStringView());
|
||||
appendField(buf, "iss", issuer);
|
||||
appendField(buf, "sub", subject);
|
||||
appendField(buf, "aud", audience);
|
||||
appendField(buf, "iat", issuedAtUnixTime);
|
||||
appendField(buf, "exp", expiresAtUnixTime);
|
||||
appendField(buf, "nbf", notBeforeUnixTime);
|
||||
appendField(buf, "jti", tokenId);
|
||||
appendField(buf, "tenants", tenants);
|
||||
auto str = new (arena) uint8_t[buf.size()];
|
||||
memcpy(str, buf.data(), buf.size());
|
||||
return StringRef(str, buf.size());
|
||||
}
|
||||
|
||||
template <class FieldType, class Writer>
|
||||
void putField(Optional<FieldType> const& field, Writer& wr, const char* fieldName) {
|
||||
if (!field.present())
|
||||
|
@ -192,9 +230,12 @@ StringRef makeTokenPart(Arena& arena, TokenRef tokenSpec) {
|
|||
header.StartObject();
|
||||
header.Key("typ");
|
||||
header.String("JWT");
|
||||
header.Key("alg");
|
||||
auto algo = getAlgorithmName(tokenSpec.algorithm);
|
||||
header.Key("alg");
|
||||
header.String(algo.data(), algo.size());
|
||||
auto kid = tokenSpec.keyId.toStringView();
|
||||
header.Key("kid");
|
||||
header.String(kid.data(), kid.size());
|
||||
header.EndObject();
|
||||
payload.StartObject();
|
||||
putField(tokenSpec.issuer, payload, "iss");
|
||||
|
@ -203,7 +244,6 @@ StringRef makeTokenPart(Arena& arena, TokenRef tokenSpec) {
|
|||
putField(tokenSpec.issuedAtUnixTime, payload, "iat");
|
||||
putField(tokenSpec.expiresAtUnixTime, payload, "exp");
|
||||
putField(tokenSpec.notBeforeUnixTime, payload, "nbf");
|
||||
putField(tokenSpec.keyId, payload, "kid");
|
||||
putField(tokenSpec.tokenId, payload, "jti");
|
||||
putField(tokenSpec.tenants, payload, "tenants");
|
||||
payload.EndObject();
|
||||
|
@ -240,7 +280,7 @@ StringRef signToken(Arena& arena, TokenRef tokenSpec, PrivateKey privateKey) {
|
|||
return StringRef(out, totalLen);
|
||||
}
|
||||
|
||||
bool parseHeaderPart(TokenRef& token, StringRef b64urlHeader) {
|
||||
bool parseHeaderPart(Arena& arena, TokenRef& token, StringRef b64urlHeader) {
|
||||
auto tmpArena = Arena();
|
||||
auto optHeader = base64url::decode(tmpArena, b64urlHeader);
|
||||
if (!optHeader.present())
|
||||
|
@ -256,24 +296,30 @@ bool parseHeaderPart(TokenRef& token, StringRef b64urlHeader) {
|
|||
.detail("Offset", d.GetErrorOffset());
|
||||
return false;
|
||||
}
|
||||
auto algItr = d.FindMember("alg");
|
||||
if (!d.IsObject())
|
||||
return false;
|
||||
auto typItr = d.FindMember("typ");
|
||||
if (d.IsObject() && algItr != d.MemberEnd() && typItr != d.MemberEnd()) {
|
||||
auto const& alg = algItr->value;
|
||||
auto const& typ = typItr->value;
|
||||
if (alg.IsString() && typ.IsString()) {
|
||||
auto algValue = StringRef(reinterpret_cast<const uint8_t*>(alg.GetString()), alg.GetStringLength());
|
||||
auto algType = algorithmFromString(algValue);
|
||||
if (algType == Algorithm::UNKNOWN)
|
||||
return false;
|
||||
token.algorithm = algType;
|
||||
auto typValue = StringRef(reinterpret_cast<const uint8_t*>(typ.GetString()), typ.GetStringLength());
|
||||
if (typValue != "JWT"_sr)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
if (typItr == d.MemberEnd() || !typItr->value.IsString())
|
||||
return false;
|
||||
auto algItr = d.FindMember("alg");
|
||||
if (algItr == d.MemberEnd() || !algItr->value.IsString())
|
||||
return false;
|
||||
auto kidItr = d.FindMember("kid");
|
||||
if (kidItr == d.MemberEnd() || !kidItr->value.IsString())
|
||||
return false;
|
||||
auto const& typ = typItr->value;
|
||||
auto const& alg = algItr->value;
|
||||
auto const& kid = kidItr->value;
|
||||
auto typValue = StringRef(reinterpret_cast<const uint8_t*>(typ.GetString()), typ.GetStringLength());
|
||||
if (typValue != "JWT"_sr)
|
||||
return false;
|
||||
auto algValue = StringRef(reinterpret_cast<const uint8_t*>(alg.GetString()), alg.GetStringLength());
|
||||
auto algType = algorithmFromString(algValue);
|
||||
if (algType == Algorithm::UNKNOWN)
|
||||
return false;
|
||||
token.algorithm = algType;
|
||||
token.keyId = StringRef(arena, reinterpret_cast<const uint8_t*>(kid.GetString()), kid.GetStringLength());
|
||||
return true;
|
||||
}
|
||||
|
||||
template <class FieldType>
|
||||
|
@ -343,8 +389,6 @@ bool parsePayloadPart(Arena& arena, TokenRef& token, StringRef b64urlPayload) {
|
|||
return false;
|
||||
if (!parseField(arena, token.notBeforeUnixTime, d, "nbf"))
|
||||
return false;
|
||||
if (!parseField(arena, token.keyId, d, "kid"))
|
||||
return false;
|
||||
if (!parseField(arena, token.tenants, d, "tenants"))
|
||||
return false;
|
||||
return true;
|
||||
|
@ -358,13 +402,19 @@ bool parseSignaturePart(Arena& arena, TokenRef& token, StringRef b64urlSignature
|
|||
return true;
|
||||
}
|
||||
|
||||
StringRef signaturePart(StringRef token) {
|
||||
token.eat("."_sr);
|
||||
token.eat("."_sr);
|
||||
return token;
|
||||
}
|
||||
|
||||
bool parseToken(Arena& arena, TokenRef& token, StringRef signedToken) {
|
||||
auto b64urlHeader = signedToken.eat("."_sr);
|
||||
auto b64urlPayload = signedToken.eat("."_sr);
|
||||
auto b64urlSignature = signedToken;
|
||||
if (b64urlHeader.empty() || b64urlPayload.empty() || b64urlSignature.empty())
|
||||
return false;
|
||||
if (!parseHeaderPart(token, b64urlHeader))
|
||||
if (!parseHeaderPart(arena, token, b64urlHeader))
|
||||
return false;
|
||||
if (!parsePayloadPart(arena, token, b64urlPayload))
|
||||
return false;
|
||||
|
@ -387,7 +437,7 @@ bool verifyToken(StringRef signedToken, PublicKey publicKey) {
|
|||
return false;
|
||||
auto sig = optSig.get();
|
||||
auto parsedToken = TokenRef();
|
||||
if (!parseHeaderPart(parsedToken, b64urlHeader))
|
||||
if (!parseHeaderPart(arena, parsedToken, b64urlHeader))
|
||||
return false;
|
||||
auto [verifyAlgo, digest] = getMethod(parsedToken.algorithm);
|
||||
if (!checkVerifyAlgorithm(verifyAlgo, publicKey))
|
||||
|
@ -401,6 +451,7 @@ TokenRef makeRandomTokenSpec(Arena& arena, IRandom& rng, Algorithm alg) {
|
|||
}
|
||||
auto ret = TokenRef{};
|
||||
ret.algorithm = alg;
|
||||
ret.keyId = genRandomAlphanumStringRef(arena, rng, MaxKeyNameLenPlus1);
|
||||
ret.issuer = genRandomAlphanumStringRef(arena, rng, MaxIssuerNameLenPlus1);
|
||||
ret.subject = genRandomAlphanumStringRef(arena, rng, MaxIssuerNameLenPlus1);
|
||||
ret.tokenId = genRandomAlphanumStringRef(arena, rng, 31);
|
||||
|
@ -409,10 +460,9 @@ TokenRef makeRandomTokenSpec(Arena& arena, IRandom& rng, Algorithm alg) {
|
|||
for (auto i = 0; i < numAudience; i++)
|
||||
aud[i] = genRandomAlphanumStringRef(arena, rng, MaxTenantNameLenPlus1);
|
||||
ret.audience = VectorRef<StringRef>(aud, numAudience);
|
||||
ret.issuedAtUnixTime = timer_int() / 1'000'000'000ul;
|
||||
ret.notBeforeUnixTime = timer_int() / 1'000'000'000ul;
|
||||
ret.issuedAtUnixTime = uint64_t(std::floor(g_network->timer()));
|
||||
ret.notBeforeUnixTime = ret.issuedAtUnixTime.get();
|
||||
ret.expiresAtUnixTime = ret.issuedAtUnixTime.get() + rng.randomInt(360, 1080 + 1);
|
||||
ret.keyId = genRandomAlphanumStringRef(arena, rng, MaxKeyNameLenPlus1);
|
||||
auto numTenants = rng.randomInt(1, 3);
|
||||
auto tenants = new (arena) StringRef[numTenants];
|
||||
for (auto i = 0; i < numTenants; i++)
|
||||
|
@ -491,6 +541,33 @@ TEST_CASE("/fdbrpc/TokenSign/JWT") {
|
|||
return Void();
|
||||
}
|
||||
|
||||
TEST_CASE("/fdbrpc/TokenSign/JWT/ToStringRef") {
|
||||
auto t = authz::jwt::TokenRef();
|
||||
t.algorithm = authz::Algorithm::ES256;
|
||||
t.issuer = "issuer"_sr;
|
||||
t.subject = "subject"_sr;
|
||||
StringRef aud[3]{ "aud1"_sr, "aud2"_sr, "aud3"_sr };
|
||||
t.audience = VectorRef<StringRef>(aud, 3);
|
||||
t.issuedAtUnixTime = 123ul;
|
||||
t.expiresAtUnixTime = 456ul;
|
||||
t.notBeforeUnixTime = 789ul;
|
||||
t.keyId = "keyId"_sr;
|
||||
t.tokenId = "tokenId"_sr;
|
||||
StringRef tenants[2]{ "tenant1"_sr, "tenant2"_sr };
|
||||
t.tenants = VectorRef<StringRef>(tenants, 2);
|
||||
auto arena = Arena();
|
||||
auto tokenStr = t.toStringRef(arena);
|
||||
auto tokenStrExpected =
|
||||
"alg=ES256 kid=keyId iss=issuer sub=subject aud=[aud1,aud2,aud3] iat=123 exp=456 nbf=789 jti=tokenId tenants=[tenant1,tenant2]"_sr;
|
||||
if (tokenStr != tokenStrExpected) {
|
||||
fmt::print("Expected: {}\nGot : {}\n", tokenStrExpected.toStringView(), tokenStr.toStringView());
|
||||
ASSERT(false);
|
||||
} else {
|
||||
fmt::print("TEST OK\n");
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
TEST_CASE("/fdbrpc/TokenSign/bench") {
|
||||
constexpr auto repeat = 5;
|
||||
constexpr auto numSamples = 10000;
|
||||
|
|
|
@ -20,25 +20,21 @@
|
|||
|
||||
#ifndef FLOW_TRANSPORT_H
|
||||
#define FLOW_TRANSPORT_H
|
||||
#include "flow/Arena.h"
|
||||
#pragma once
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "fdbrpc/ContinuousSample.h"
|
||||
#include "fdbrpc/HealthMonitor.h"
|
||||
#include "flow/genericactors.actor.h"
|
||||
#include "flow/network.h"
|
||||
#include "flow/FileIdentifier.h"
|
||||
#include "flow/ProtocolVersion.h"
|
||||
#include "flow/Net2Packet.h"
|
||||
#include "fdbrpc/ContinuousSample.h"
|
||||
#include "flow/Arena.h"
|
||||
#include "flow/PKey.h"
|
||||
|
||||
enum {
|
||||
WLTOKEN_ENDPOINT_NOT_FOUND = 0,
|
||||
WLTOKEN_PING_PACKET,
|
||||
WLTOKEN_AUTH_TENANT,
|
||||
WLTOKEN_UNAUTHORIZED_ENDPOINT,
|
||||
WLTOKEN_FIRST_AVAILABLE
|
||||
};
|
||||
enum { WLTOKEN_ENDPOINT_NOT_FOUND = 0, WLTOKEN_PING_PACKET, WLTOKEN_UNAUTHORIZED_ENDPOINT, WLTOKEN_FIRST_AVAILABLE };
|
||||
|
||||
#pragma pack(push, 4)
|
||||
class Endpoint {
|
||||
|
@ -191,7 +187,7 @@ struct Peer : public ReferenceCounted<Peer> {
|
|||
|
||||
class IPAllowList;
|
||||
|
||||
class FlowTransport {
|
||||
class FlowTransport : NonCopyable {
|
||||
public:
|
||||
FlowTransport(uint64_t transportId, int maxWellKnownEndpoints, IPAllowList const* allowList);
|
||||
~FlowTransport();
|
||||
|
@ -293,6 +289,15 @@ public:
|
|||
|
||||
HealthMonitor* healthMonitor();
|
||||
|
||||
bool currentDeliveryPeerIsTrusted() const;
|
||||
NetworkAddress currentDeliveryPeerAddress() const;
|
||||
|
||||
Optional<PublicKey> getPublicKeyByName(StringRef name) const;
|
||||
// Adds or replaces a public key
|
||||
void addPublicKey(StringRef name, PublicKey key);
|
||||
void removePublicKey(StringRef name);
|
||||
void removeAllPublicKeys();
|
||||
|
||||
private:
|
||||
class TransportData* self;
|
||||
};
|
||||
|
|
|
@ -21,52 +21,61 @@
|
|||
#pragma once
|
||||
#ifndef FDBRPC_TENANTINFO_H_
|
||||
#define FDBRPC_TENANTINFO_H_
|
||||
#include "fdbrpc/TenantName.h"
|
||||
#include "fdbrpc/TokenSign.h"
|
||||
#include "fdbrpc/TokenCache.h"
|
||||
#include "fdbrpc/FlowTransport.h"
|
||||
#include "flow/Arena.h"
|
||||
#include "fdbrpc/fdbrpc.h"
|
||||
#include <set>
|
||||
|
||||
struct TenantInfoRef {
|
||||
TenantInfoRef() {}
|
||||
TenantInfoRef(Arena& p, StringRef toCopy) : tenantName(StringRef(p, toCopy)) {}
|
||||
TenantInfoRef(Arena& p, TenantInfoRef toCopy)
|
||||
: tenantName(toCopy.tenantName.present() ? Optional<StringRef>(StringRef(p, toCopy.tenantName.get()))
|
||||
: Optional<StringRef>()) {}
|
||||
// Empty tenant name means that the peer is trusted
|
||||
Optional<StringRef> tenantName;
|
||||
struct TenantInfo {
|
||||
static constexpr const int64_t INVALID_TENANT = -1;
|
||||
|
||||
bool operator<(TenantInfoRef const& other) const {
|
||||
if (!other.tenantName.present()) {
|
||||
return false;
|
||||
}
|
||||
if (!tenantName.present()) {
|
||||
return true;
|
||||
}
|
||||
return tenantName.get() < other.tenantName.get();
|
||||
}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, tenantName);
|
||||
}
|
||||
};
|
||||
|
||||
struct AuthorizedTenants : ReferenceCounted<AuthorizedTenants> {
|
||||
Arena arena;
|
||||
std::set<TenantInfoRef> authorizedTenants;
|
||||
Optional<TenantNameRef> name;
|
||||
Optional<StringRef> token;
|
||||
int64_t tenantId;
|
||||
// this field is not serialized and instead set by FlowTransport during
|
||||
// deserialization. This field indicates whether the client is trusted.
|
||||
// Untrusted clients are generally expected to set a TenantName
|
||||
bool trusted = false;
|
||||
// Is set during deserialization. It will be set to true if the tenant
|
||||
// name is set and the client is authorized to use this tenant.
|
||||
bool tenantAuthorized = false;
|
||||
|
||||
// Helper function for most endpoints that read/write data. This returns true iff
|
||||
// the client is either a) a trusted peer or b) is accessing keyspace belonging to a tenant,
|
||||
// for which it has a valid authorization token.
|
||||
// NOTE: In a cluster where TenantMode is OPTIONAL or DISABLED, tenant name may be unset.
|
||||
// In such case, the request containing such TenantInfo is valid iff the requesting peer is trusted.
|
||||
bool isAuthorized() const { return trusted || tenantAuthorized; }
|
||||
|
||||
TenantInfo() : tenantId(INVALID_TENANT) {}
|
||||
TenantInfo(Optional<TenantName> const& tenantName, Optional<Standalone<StringRef>> const& token, int64_t tenantId)
|
||||
: tenantId(tenantId) {
|
||||
if (tenantName.present()) {
|
||||
arena.dependsOn(tenantName.get().arena());
|
||||
name = tenantName.get();
|
||||
}
|
||||
if (token.present()) {
|
||||
arena.dependsOn(token.get().arena());
|
||||
this->token = token.get();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// TODO: receive and validate token instead
|
||||
struct AuthorizationRequest {
|
||||
constexpr static FileIdentifier file_identifier = 11499331;
|
||||
|
||||
Arena arena;
|
||||
VectorRef<TenantInfoRef> tenants;
|
||||
ReplyPromise<Void> reply;
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, tenants, reply, arena);
|
||||
template <>
|
||||
struct serializable_traits<TenantInfo> : std::true_type {
|
||||
template <class Archiver>
|
||||
static void serialize(Archiver& ar, TenantInfo& v) {
|
||||
serializer(ar, v.name, v.tenantId, v.token, v.arena);
|
||||
if constexpr (Archiver::isDeserializing) {
|
||||
bool tenantAuthorized = false;
|
||||
if (v.name.present() && v.token.present()) {
|
||||
tenantAuthorized = TokenCache::instance().validate(v.name.get(), v.token.get());
|
||||
}
|
||||
v.trusted = FlowTransport::transport().currentDeliveryPeerIsTrusted();
|
||||
v.tenantAuthorized = tenantAuthorized;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
/*
|
||||
* TenantName.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
#ifndef FDBRPC_TENANTNAME_H
|
||||
#define FDBRPC_TENANTNAME_H
|
||||
#include "flow/Arena.h"
|
||||
typedef StringRef TenantNameRef;
|
||||
typedef Standalone<TenantNameRef> TenantName;
|
||||
#endif // FDBRPC_TENANTNAME_H
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* TokenCache.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef TOKENCACHE_H_
|
||||
#define TOKENCACHE_H_
|
||||
#include "fdbrpc/TenantName.h"
|
||||
#include "flow/Arena.h"
|
||||
|
||||
class TokenCache : NonCopyable {
|
||||
struct TokenCacheImpl* impl;
|
||||
TokenCache();
|
||||
|
||||
public:
|
||||
~TokenCache();
|
||||
static void createInstance();
|
||||
static TokenCache& instance();
|
||||
bool validate(TenantNameRef tenant, StringRef token);
|
||||
};
|
||||
|
||||
#endif // TOKENCACHE_H_
|
|
@ -26,6 +26,7 @@
|
|||
#include "flow/Arena.h"
|
||||
#include "flow/FastRef.h"
|
||||
#include "flow/FileIdentifier.h"
|
||||
#include "fdbrpc/TenantInfo.h"
|
||||
#include "flow/PKey.h"
|
||||
|
||||
namespace authz {
|
||||
|
@ -63,6 +64,8 @@ struct SignedTokenRef {
|
|||
void serialize(Ar& ar) {
|
||||
serializer(ar, token, keyName, signature);
|
||||
}
|
||||
|
||||
int expectedSize() const { return token.size() + keyName.size() + signature.size(); }
|
||||
};
|
||||
|
||||
SignedTokenRef signToken(Arena& arena, TokenRef token, StringRef keyName, PrivateKey privateKey);
|
||||
|
@ -82,6 +85,7 @@ namespace authz::jwt {
|
|||
struct TokenRef {
|
||||
// header part ("typ": "JWT" implicitly enforced)
|
||||
Algorithm algorithm; // alg
|
||||
StringRef keyId; // kid
|
||||
// payload part
|
||||
Optional<StringRef> issuer; // iss
|
||||
Optional<StringRef> subject; // sub
|
||||
|
@ -89,11 +93,13 @@ struct TokenRef {
|
|||
Optional<uint64_t> issuedAtUnixTime; // iat
|
||||
Optional<uint64_t> expiresAtUnixTime; // exp
|
||||
Optional<uint64_t> notBeforeUnixTime; // nbf
|
||||
Optional<StringRef> keyId; // kid
|
||||
Optional<StringRef> tokenId; // jti
|
||||
Optional<VectorRef<StringRef>> tenants; // tenants
|
||||
// signature part
|
||||
StringRef signature;
|
||||
|
||||
// print each non-signature field in non-JSON, human-readable format e.g. for trace
|
||||
StringRef toStringRef(Arena& arena);
|
||||
};
|
||||
|
||||
// Make plain JSON token string with fields (except signature) from passed spec
|
||||
|
@ -107,7 +113,7 @@ StringRef signToken(Arena& arena, TokenRef tokenSpec, PrivateKey privateKey);
|
|||
|
||||
// Parse passed b64url-encoded header part and materialize its contents into tokenOut,
|
||||
// using memory allocated from arena
|
||||
bool parseHeaderPart(TokenRef& tokenOut, StringRef b64urlHeaderIn);
|
||||
bool parseHeaderPart(Arena& arena, TokenRef& tokenOut, StringRef b64urlHeaderIn);
|
||||
|
||||
// Parse passed b64url-encoded payload part and materialize its contents into tokenOut,
|
||||
// using memory allocated from arena
|
||||
|
@ -117,6 +123,9 @@ bool parsePayloadPart(Arena& arena, TokenRef& tokenOut, StringRef b64urlPayloadI
|
|||
// using memory allocated from arena
|
||||
bool parseSignaturePart(Arena& arena, TokenRef& tokenOut, StringRef b64urlSignatureIn);
|
||||
|
||||
// Returns the base64 encoded signature of the token
|
||||
StringRef signaturePart(StringRef token);
|
||||
|
||||
// Parse passed token string and materialize its contents into tokenOut,
|
||||
// using memory allocated from arena
|
||||
// Return whether the signed token string is well-formed
|
||||
|
|
|
@ -28,28 +28,28 @@
|
|||
* All well-known endpoints of FDB must be listed here to guarantee their uniqueness
|
||||
*/
|
||||
enum WellKnownEndpoints {
|
||||
WLTOKEN_CLIENTLEADERREG_GETLEADER = WLTOKEN_FIRST_AVAILABLE, // 4
|
||||
WLTOKEN_CLIENTLEADERREG_OPENDATABASE, // 5
|
||||
WLTOKEN_LEADERELECTIONREG_CANDIDACY, // 6
|
||||
WLTOKEN_LEADERELECTIONREG_ELECTIONRESULT, // 7
|
||||
WLTOKEN_LEADERELECTIONREG_LEADERHEARTBEAT, // 8
|
||||
WLTOKEN_LEADERELECTIONREG_FORWARD, // 9
|
||||
WLTOKEN_CLIENTLEADERREG_GETLEADER = WLTOKEN_FIRST_AVAILABLE, // 3
|
||||
WLTOKEN_CLIENTLEADERREG_OPENDATABASE, // 4
|
||||
WLTOKEN_LEADERELECTIONREG_CANDIDACY, // 5
|
||||
WLTOKEN_LEADERELECTIONREG_ELECTIONRESULT, // 6
|
||||
WLTOKEN_LEADERELECTIONREG_LEADERHEARTBEAT, // 7
|
||||
WLTOKEN_LEADERELECTIONREG_FORWARD, // 8
|
||||
WLTOKEN_GENERATIONREG_READ, // 9
|
||||
WLTOKEN_PROTOCOL_INFO, // 10 : the value of this endpoint should be stable and not change.
|
||||
WLTOKEN_GENERATIONREG_READ, // 11
|
||||
WLTOKEN_GENERATIONREG_WRITE, // 12
|
||||
WLTOKEN_CLIENTLEADERREG_DESCRIPTOR_MUTABLE, // 13
|
||||
WLTOKEN_CONFIGTXN_GETGENERATION, // 14
|
||||
WLTOKEN_CONFIGTXN_GET, // 15
|
||||
WLTOKEN_CONFIGTXN_GETCLASSES, // 16
|
||||
WLTOKEN_CONFIGTXN_GETKNOBS, // 17
|
||||
WLTOKEN_CONFIGTXN_COMMIT, // 18
|
||||
WLTOKEN_CONFIGFOLLOWER_GETSNAPSHOTANDCHANGES, // 19
|
||||
WLTOKEN_CONFIGFOLLOWER_GETCHANGES, // 20
|
||||
WLTOKEN_CONFIGFOLLOWER_COMPACT, // 21
|
||||
WLTOKEN_CONFIGFOLLOWER_ROLLFORWARD, // 22
|
||||
WLTOKEN_CONFIGFOLLOWER_GETCOMMITTEDVERSION, // 23
|
||||
WLTOKEN_PROCESS, // 24
|
||||
WLTOKEN_RESERVED_COUNT // 25
|
||||
WLTOKEN_GENERATIONREG_WRITE, // 11
|
||||
WLTOKEN_CLIENTLEADERREG_DESCRIPTOR_MUTABLE, // 12
|
||||
WLTOKEN_CONFIGTXN_GETGENERATION, // 13
|
||||
WLTOKEN_CONFIGTXN_GET, // 14
|
||||
WLTOKEN_CONFIGTXN_GETCLASSES, // 15
|
||||
WLTOKEN_CONFIGTXN_GETKNOBS, // 16
|
||||
WLTOKEN_CONFIGTXN_COMMIT, // 17
|
||||
WLTOKEN_CONFIGFOLLOWER_GETSNAPSHOTANDCHANGES, // 18
|
||||
WLTOKEN_CONFIGFOLLOWER_GETCHANGES, // 19
|
||||
WLTOKEN_CONFIGFOLLOWER_COMPACT, // 20
|
||||
WLTOKEN_CONFIGFOLLOWER_ROLLFORWARD, // 21
|
||||
WLTOKEN_CONFIGFOLLOWER_GETCOMMITTEDVERSION, // 22
|
||||
WLTOKEN_PROCESS, // 23
|
||||
WLTOKEN_RESERVED_COUNT // 24
|
||||
};
|
||||
|
||||
static_assert(WLTOKEN_PROTOCOL_INFO ==
|
||||
|
|
|
@ -648,10 +648,29 @@ struct serializable_traits<ReplyPromiseStream<T>> : std::true_type {
|
|||
}
|
||||
};
|
||||
|
||||
template <class T, class = int>
|
||||
struct HasReply_t : std::false_type {};
|
||||
|
||||
template <class T>
|
||||
struct HasReply_t<T, decltype((void)T::reply, 0)> : std::true_type {};
|
||||
|
||||
template <class T>
|
||||
constexpr bool HasReply = HasReply_t<T>::value;
|
||||
|
||||
template <class T, class = int>
|
||||
struct HasVerify_t : std::false_type {};
|
||||
|
||||
template <class T>
|
||||
struct HasVerify_t<T, decltype(void(std::declval<T>().verify()), 0)> : std::true_type {};
|
||||
|
||||
template <class T>
|
||||
constexpr bool HasVerify = HasVerify_t<T>::value;
|
||||
|
||||
template <class T, bool IsPublic>
|
||||
struct NetNotifiedQueue final : NotifiedQueue<T>, FlowReceiver, FastAllocated<NetNotifiedQueue<T, IsPublic>> {
|
||||
using FastAllocated<NetNotifiedQueue<T, IsPublic>>::operator new;
|
||||
using FastAllocated<NetNotifiedQueue<T, IsPublic>>::operator delete;
|
||||
static_assert(!IsPublic || HasVerify<T>, "Public request stream objects need to implement bool T::verify()");
|
||||
|
||||
NetNotifiedQueue(int futures, int promises) : NotifiedQueue<T>(futures, promises) {}
|
||||
NetNotifiedQueue(int futures, int promises, const Endpoint& remoteEndpoint)
|
||||
|
@ -662,7 +681,17 @@ struct NetNotifiedQueue final : NotifiedQueue<T>, FlowReceiver, FastAllocated<Ne
|
|||
this->addPromiseRef();
|
||||
T message;
|
||||
reader.deserialize(message);
|
||||
this->send(std::move(message));
|
||||
if constexpr (IsPublic) {
|
||||
if (!message.verify()) {
|
||||
if constexpr (HasReply<T>) {
|
||||
message.reply.sendError(permission_denied());
|
||||
}
|
||||
} else {
|
||||
this->send(std::move(message));
|
||||
}
|
||||
} else {
|
||||
this->send(std::move(message));
|
||||
}
|
||||
this->delPromiseRef();
|
||||
}
|
||||
bool isStream() const override { return true; }
|
||||
|
|
|
@ -20,20 +20,23 @@
|
|||
|
||||
#ifndef FLOW_SIMULATOR_H
|
||||
#define FLOW_SIMULATOR_H
|
||||
#include "flow/ProtocolVersion.h"
|
||||
#pragma once
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
#include <random>
|
||||
#include <limits>
|
||||
#pragma once
|
||||
|
||||
#include "flow/flow.h"
|
||||
#include "flow/Histogram.h"
|
||||
#include "flow/ProtocolVersion.h"
|
||||
#include "fdbrpc/FailureMonitor.h"
|
||||
#include "fdbrpc/Locality.h"
|
||||
#include "flow/IAsyncFile.h"
|
||||
#include "flow/TDMetric.actor.h"
|
||||
#include <random>
|
||||
#include "fdbrpc/FailureMonitor.h"
|
||||
#include "fdbrpc/Locality.h"
|
||||
#include "fdbrpc/ReplicationPolicy.h"
|
||||
#include "fdbrpc/TokenSign.h"
|
||||
|
||||
enum ClogMode { ClogDefault, ClogAll, ClogSend, ClogReceive };
|
||||
|
||||
|
@ -473,6 +476,8 @@ public:
|
|||
double injectTargetedSSRestartTime = std::numeric_limits<double>::max();
|
||||
double injectSSDelayTime = std::numeric_limits<double>::max();
|
||||
|
||||
std::unordered_map<Standalone<StringRef>, PrivateKey> authKeys;
|
||||
|
||||
flowGlobalType global(int id) const final { return getCurrentProcess()->global(id); };
|
||||
void setGlobal(size_t id, flowGlobalType v) final { getCurrentProcess()->setGlobal(id, v); };
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "flow/MkCert.h"
|
||||
#include "fmt/format.h"
|
||||
#include "fdbrpc/simulator.h"
|
||||
#include "flow/Arena.h"
|
||||
|
@ -2178,6 +2179,9 @@ public:
|
|||
this,
|
||||
"",
|
||||
"");
|
||||
// create a key pair for AuthZ testing
|
||||
auto key = mkcert::makeEcP256();
|
||||
authKeys.insert(std::make_pair(Standalone<StringRef>("DefaultKey"_sr), key));
|
||||
g_network = net2 = newNet2(TLSConfig(), false, true);
|
||||
g_network->addStopCallback(Net2FileSystem::stop);
|
||||
Net2FileSystem::newFileSystem();
|
||||
|
|
|
@ -128,6 +128,10 @@ ACTOR Future<GranuleFiles> loadHistoryFiles(Database cx, UID granuleID) {
|
|||
// key range, the granule may have a snapshot file at version X, where beginVersion < X <= readVersion. In this case, if
|
||||
// the number of bytes in delta files between beginVersion and X is larger than the snapshot file at version X, it is
|
||||
// strictly more efficient (in terms of files and bytes read) to just use the snapshot file at version X instead.
|
||||
//
|
||||
// To assist BlobGranule file (snapshot and/or delta) file encryption, the routine while populating snapshot and/or
|
||||
// delta files, constructs BlobFilePointerRef->cipherKeysMeta field. Approach avoids this method to be defined as an
|
||||
// ACTOR, as fetching desired EncryptionKey may potentially involve reaching out to EncryptKeyProxy or external KMS.
|
||||
void GranuleFiles::getFiles(Version beginVersion,
|
||||
Version readVersion,
|
||||
bool canCollapse,
|
||||
|
@ -195,8 +199,12 @@ void GranuleFiles::getFiles(Version beginVersion,
|
|||
}
|
||||
|
||||
while (deltaF != deltaFiles.end() && deltaF->version < readVersion) {
|
||||
chunk.deltaFiles.emplace_back_deep(
|
||||
replyArena, deltaF->filename, deltaF->offset, deltaF->length, deltaF->fullFileLength);
|
||||
chunk.deltaFiles.emplace_back_deep(replyArena,
|
||||
deltaF->filename,
|
||||
deltaF->offset,
|
||||
deltaF->length,
|
||||
deltaF->fullFileLength,
|
||||
deltaF->cipherKeysMeta);
|
||||
deltaBytesCounter += deltaF->length;
|
||||
ASSERT(lastIncluded < deltaF->version);
|
||||
lastIncluded = deltaF->version;
|
||||
|
@ -204,8 +212,12 @@ void GranuleFiles::getFiles(Version beginVersion,
|
|||
}
|
||||
// include last delta file that passes readVersion, if it exists
|
||||
if (deltaF != deltaFiles.end() && lastIncluded < readVersion) {
|
||||
chunk.deltaFiles.emplace_back_deep(
|
||||
replyArena, deltaF->filename, deltaF->offset, deltaF->length, deltaF->fullFileLength);
|
||||
chunk.deltaFiles.emplace_back_deep(replyArena,
|
||||
deltaF->filename,
|
||||
deltaF->offset,
|
||||
deltaF->length,
|
||||
deltaF->fullFileLength,
|
||||
deltaF->cipherKeysMeta);
|
||||
deltaBytesCounter += deltaF->length;
|
||||
lastIncluded = deltaF->version;
|
||||
}
|
||||
|
|
|
@ -314,7 +314,7 @@ ACTOR Future<BlobGranuleCipherKeysCtx> getLatestGranuleCipherKeys(Reference<Blob
|
|||
cipherKeysCtx.headerCipherKey = BlobGranuleCipherKey::fromBlobCipherKey(systemCipherKeys.cipherHeaderKey, *arena);
|
||||
|
||||
cipherKeysCtx.ivRef = makeString(AES_256_IV_LENGTH, *arena);
|
||||
generateRandomData(mutateString(cipherKeysCtx.ivRef), AES_256_IV_LENGTH);
|
||||
deterministicRandom()->randomBytes(mutateString(cipherKeysCtx.ivRef), AES_256_IV_LENGTH);
|
||||
|
||||
if (BG_ENCRYPT_COMPRESS_DEBUG) {
|
||||
TraceEvent(SevDebug, "GetLatestGranuleCipherKey")
|
||||
|
@ -352,29 +352,27 @@ ACTOR Future<BlobGranuleCipherKey> lookupCipherKey(Reference<BlobWorkerData> bwD
|
|||
return BlobGranuleCipherKey::fromBlobCipherKey(cipherKeyMapItr->second, *arena);
|
||||
}
|
||||
|
||||
ACTOR Future<BlobGranuleCipherKeysCtx> getGranuleCipherKeys(Reference<BlobWorkerData> bwData,
|
||||
BlobGranuleCipherKeysMetaRef cipherKeysMetaRef,
|
||||
Arena* arena) {
|
||||
ACTOR Future<BlobGranuleCipherKeysCtx> getGranuleCipherKeysImpl(Reference<BlobWorkerData> bwData,
|
||||
BlobCipherDetails textCipherDetails,
|
||||
BlobCipherDetails headerCipherDetails,
|
||||
StringRef ivRef,
|
||||
Arena* arena) {
|
||||
state BlobGranuleCipherKeysCtx cipherKeysCtx;
|
||||
|
||||
// Fetch 'textCipher' key
|
||||
state BlobCipherDetails textCipherDetails(
|
||||
cipherKeysMetaRef.textDomainId, cipherKeysMetaRef.textBaseCipherId, cipherKeysMetaRef.textSalt);
|
||||
BlobGranuleCipherKey textCipherKey = wait(lookupCipherKey(bwData, textCipherDetails, arena));
|
||||
cipherKeysCtx.textCipherKey = textCipherKey;
|
||||
|
||||
// Fetch 'headerCipher' key
|
||||
state BlobCipherDetails headerCipherDetails(
|
||||
cipherKeysMetaRef.headerDomainId, cipherKeysMetaRef.headerBaseCipherId, cipherKeysMetaRef.headerSalt);
|
||||
BlobGranuleCipherKey headerCipherKey = wait(lookupCipherKey(bwData, headerCipherDetails, arena));
|
||||
cipherKeysCtx.headerCipherKey = headerCipherKey;
|
||||
|
||||
// Populate 'Intialization Vector'
|
||||
ASSERT_EQ(cipherKeysMetaRef.ivRef.size(), AES_256_IV_LENGTH);
|
||||
cipherKeysCtx.ivRef = StringRef(*arena, cipherKeysMetaRef.ivRef);
|
||||
ASSERT_EQ(ivRef.size(), AES_256_IV_LENGTH);
|
||||
cipherKeysCtx.ivRef = StringRef(*arena, ivRef);
|
||||
|
||||
if (BG_ENCRYPT_COMPRESS_DEBUG) {
|
||||
TraceEvent("GetGranuleCipherKey")
|
||||
TraceEvent(SevDebug, "GetGranuleCipherKey")
|
||||
.detail("TextDomainId", cipherKeysCtx.textCipherKey.encryptDomainId)
|
||||
.detail("TextBaseCipherId", cipherKeysCtx.textCipherKey.baseCipherId)
|
||||
.detail("TextSalt", cipherKeysCtx.textCipherKey.salt)
|
||||
|
@ -387,6 +385,32 @@ ACTOR Future<BlobGranuleCipherKeysCtx> getGranuleCipherKeys(Reference<BlobWorker
|
|||
return cipherKeysCtx;
|
||||
}
|
||||
|
||||
Future<BlobGranuleCipherKeysCtx> getGranuleCipherKeysFromKeysMeta(Reference<BlobWorkerData> bwData,
|
||||
BlobGranuleCipherKeysMeta cipherKeysMeta,
|
||||
Arena* arena) {
|
||||
BlobCipherDetails textCipherDetails(
|
||||
cipherKeysMeta.textDomainId, cipherKeysMeta.textBaseCipherId, cipherKeysMeta.textSalt);
|
||||
|
||||
BlobCipherDetails headerCipherDetails(
|
||||
cipherKeysMeta.headerDomainId, cipherKeysMeta.headerBaseCipherId, cipherKeysMeta.headerSalt);
|
||||
|
||||
StringRef ivRef = StringRef(*arena, cipherKeysMeta.ivStr);
|
||||
|
||||
return getGranuleCipherKeysImpl(bwData, textCipherDetails, headerCipherDetails, ivRef, arena);
|
||||
}
|
||||
|
||||
Future<BlobGranuleCipherKeysCtx> getGranuleCipherKeysFromKeysMetaRef(Reference<BlobWorkerData> bwData,
|
||||
BlobGranuleCipherKeysMetaRef cipherKeysMetaRef,
|
||||
Arena* arena) {
|
||||
BlobCipherDetails textCipherDetails(
|
||||
cipherKeysMetaRef.textDomainId, cipherKeysMetaRef.textBaseCipherId, cipherKeysMetaRef.textSalt);
|
||||
|
||||
BlobCipherDetails headerCipherDetails(
|
||||
cipherKeysMetaRef.headerDomainId, cipherKeysMetaRef.headerBaseCipherId, cipherKeysMetaRef.headerSalt);
|
||||
|
||||
return getGranuleCipherKeysImpl(bwData, textCipherDetails, headerCipherDetails, cipherKeysMetaRef.ivRef, arena);
|
||||
}
|
||||
|
||||
ACTOR Future<Void> readAndCheckGranuleLock(Reference<ReadYourWritesTransaction> tr,
|
||||
KeyRange granuleRange,
|
||||
int64_t epoch,
|
||||
|
@ -605,17 +629,20 @@ ACTOR Future<BlobFileIndex> writeDeltaFile(Reference<BlobWorkerData> bwData,
|
|||
state Optional<BlobGranuleCipherKeysCtx> cipherKeysCtx;
|
||||
state Optional<BlobGranuleCipherKeysMeta> cipherKeysMeta;
|
||||
state Arena arena;
|
||||
// TODO support encryption, figure out proper state stuff
|
||||
/*if (isBlobFileEncryptionSupported()) {
|
||||
BlobGranuleCipherKeysCtx ciphKeysCtx = wait(getLatestGranuleCipherKeys(bwData, keyRange, &arena));
|
||||
cipherKeysCtx = ciphKeysCtx;
|
||||
cipherKeysMeta = BlobGranuleCipherKeysCtx::toCipherKeysMeta(cipherKeysCtx.get());
|
||||
}*/
|
||||
|
||||
Optional<CompressionFilter> compressFilter = getBlobFileCompressFilter();
|
||||
if (isBlobFileEncryptionSupported()) {
|
||||
BlobGranuleCipherKeysCtx ciphKeysCtx = wait(getLatestGranuleCipherKeys(bwData, keyRange, &arena));
|
||||
cipherKeysCtx = std::move(ciphKeysCtx);
|
||||
cipherKeysMeta = BlobGranuleCipherKeysCtx::toCipherKeysMeta(cipherKeysCtx.get());
|
||||
}
|
||||
|
||||
state Value serialized = serializeChunkedDeltaFile(
|
||||
deltasToWrite, keyRange, SERVER_KNOBS->BG_DELTA_FILE_TARGET_CHUNK_BYTES, compressFilter, cipherKeysCtx);
|
||||
state Optional<CompressionFilter> compressFilter = getBlobFileCompressFilter();
|
||||
state Value serialized = serializeChunkedDeltaFile(StringRef(fileName),
|
||||
deltasToWrite,
|
||||
keyRange,
|
||||
SERVER_KNOBS->BG_DELTA_FILE_TARGET_CHUNK_BYTES,
|
||||
compressFilter,
|
||||
cipherKeysCtx);
|
||||
state size_t serializedSize = serialized.size();
|
||||
|
||||
// Free up deltasToWrite here to reduce memory
|
||||
|
@ -680,6 +707,14 @@ ACTOR Future<BlobFileIndex> writeDeltaFile(Reference<BlobWorkerData> bwData,
|
|||
if (BUGGIFY_WITH_PROB(0.01)) {
|
||||
wait(delay(deterministicRandom()->random01()));
|
||||
}
|
||||
|
||||
if (BW_DEBUG) {
|
||||
TraceEvent(SevDebug, "DeltaFileWritten")
|
||||
.detail("FileName", fname)
|
||||
.detail("Encrypted", cipherKeysCtx.present())
|
||||
.detail("Compressed", compressFilter.present());
|
||||
}
|
||||
|
||||
// FIXME: change when we implement multiplexing
|
||||
return BlobFileIndex(currentDeltaVersion, fname, 0, serializedSize, serializedSize, cipherKeysMeta);
|
||||
} catch (Error& e) {
|
||||
|
@ -759,15 +794,19 @@ ACTOR Future<BlobFileIndex> writeSnapshot(Reference<BlobWorkerData> bwData,
|
|||
state Optional<BlobGranuleCipherKeysCtx> cipherKeysCtx;
|
||||
state Optional<BlobGranuleCipherKeysMeta> cipherKeysMeta;
|
||||
state Arena arena;
|
||||
|
||||
if (isBlobFileEncryptionSupported()) {
|
||||
BlobGranuleCipherKeysCtx ciphKeysCtx = wait(getLatestGranuleCipherKeys(bwData, keyRange, &arena));
|
||||
cipherKeysCtx = ciphKeysCtx;
|
||||
cipherKeysCtx = std::move(ciphKeysCtx);
|
||||
cipherKeysMeta = BlobGranuleCipherKeysCtx::toCipherKeysMeta(cipherKeysCtx.get());
|
||||
}
|
||||
|
||||
Optional<CompressionFilter> compressFilter = getBlobFileCompressFilter();
|
||||
state Value serialized = serializeChunkedSnapshot(
|
||||
snapshot, SERVER_KNOBS->BG_SNAPSHOT_FILE_TARGET_CHUNK_BYTES, compressFilter, cipherKeysCtx);
|
||||
state Optional<CompressionFilter> compressFilter = getBlobFileCompressFilter();
|
||||
state Value serialized = serializeChunkedSnapshot(StringRef(fileName),
|
||||
snapshot,
|
||||
SERVER_KNOBS->BG_SNAPSHOT_FILE_TARGET_CHUNK_BYTES,
|
||||
compressFilter,
|
||||
cipherKeysCtx);
|
||||
state size_t serializedSize = serialized.size();
|
||||
|
||||
// free snapshot to reduce memory
|
||||
|
@ -848,6 +887,13 @@ ACTOR Future<BlobFileIndex> writeSnapshot(Reference<BlobWorkerData> bwData,
|
|||
wait(delay(deterministicRandom()->random01()));
|
||||
}
|
||||
|
||||
if (BW_DEBUG) {
|
||||
TraceEvent(SevDebug, "SnapshotFileWritten")
|
||||
.detail("FileName", fileName)
|
||||
.detail("Encrypted", cipherKeysCtx.present())
|
||||
.detail("Compressed", compressFilter.present());
|
||||
}
|
||||
|
||||
// FIXME: change when we implement multiplexing
|
||||
return BlobFileIndex(version, fname, 0, serializedSize, serializedSize, cipherKeysMeta);
|
||||
}
|
||||
|
@ -975,33 +1021,49 @@ ACTOR Future<BlobFileIndex> compactFromBlob(Reference<BlobWorkerData> bwData,
|
|||
}
|
||||
ASSERT(snapshotVersion < version);
|
||||
|
||||
state Optional<BlobGranuleCipherKeysCtx> snapCipherKeysCtx;
|
||||
if (snapshotF.cipherKeysMeta.present()) {
|
||||
ASSERT(isBlobFileEncryptionSupported());
|
||||
|
||||
BlobGranuleCipherKeysCtx keysCtx =
|
||||
wait(getGranuleCipherKeysFromKeysMeta(bwData, snapshotF.cipherKeysMeta.get(), &filenameArena));
|
||||
snapCipherKeysCtx = std::move(keysCtx);
|
||||
}
|
||||
|
||||
chunk.snapshotFile = BlobFilePointerRef(filenameArena,
|
||||
snapshotF.filename,
|
||||
snapshotF.offset,
|
||||
snapshotF.length,
|
||||
snapshotF.fullFileLength,
|
||||
snapshotF.cipherKeysMeta);
|
||||
|
||||
// TODO: optimization - batch 'encryption-key' lookup given the GranuleFile set is known
|
||||
// FIXME: get cipher keys for delta as well!
|
||||
if (chunk.snapshotFile.get().cipherKeysMetaRef.present()) {
|
||||
ASSERT(isBlobFileEncryptionSupported());
|
||||
BlobGranuleCipherKeysCtx cipherKeysCtx =
|
||||
wait(getGranuleCipherKeys(bwData, chunk.snapshotFile.get().cipherKeysMetaRef.get(), &filenameArena));
|
||||
chunk.cipherKeysCtx = cipherKeysCtx;
|
||||
}
|
||||
snapCipherKeysCtx);
|
||||
|
||||
compactBytesRead += snapshotF.length;
|
||||
int deltaIdx = files.deltaFiles.size() - 1;
|
||||
state int deltaIdx = files.deltaFiles.size() - 1;
|
||||
while (deltaIdx >= 0 && files.deltaFiles[deltaIdx].version > snapshotVersion) {
|
||||
deltaIdx--;
|
||||
}
|
||||
deltaIdx++;
|
||||
Version lastDeltaVersion = snapshotVersion;
|
||||
state Version lastDeltaVersion = snapshotVersion;
|
||||
state BlobFileIndex deltaF;
|
||||
while (deltaIdx < files.deltaFiles.size() && lastDeltaVersion < version) {
|
||||
BlobFileIndex deltaF = files.deltaFiles[deltaIdx];
|
||||
chunk.deltaFiles.emplace_back_deep(
|
||||
filenameArena, deltaF.filename, deltaF.offset, deltaF.length, deltaF.fullFileLength);
|
||||
state Optional<BlobGranuleCipherKeysCtx> deltaCipherKeysCtx;
|
||||
|
||||
deltaF = files.deltaFiles[deltaIdx];
|
||||
|
||||
if (deltaF.cipherKeysMeta.present()) {
|
||||
ASSERT(isBlobFileEncryptionSupported());
|
||||
|
||||
BlobGranuleCipherKeysCtx keysCtx =
|
||||
wait(getGranuleCipherKeysFromKeysMeta(bwData, deltaF.cipherKeysMeta.get(), &filenameArena));
|
||||
deltaCipherKeysCtx = std::move(keysCtx);
|
||||
}
|
||||
|
||||
chunk.deltaFiles.emplace_back_deep(filenameArena,
|
||||
deltaF.filename,
|
||||
deltaF.offset,
|
||||
deltaF.length,
|
||||
deltaF.fullFileLength,
|
||||
deltaCipherKeysCtx);
|
||||
compactBytesRead += deltaF.length;
|
||||
lastDeltaVersion = files.deltaFiles[deltaIdx].version;
|
||||
deltaIdx++;
|
||||
|
@ -3193,12 +3255,44 @@ ACTOR Future<Void> doBlobGranuleFileRequest(Reference<BlobWorkerData> bwData, Bl
|
|||
didCollapse = true;
|
||||
}
|
||||
|
||||
// TODO: optimization - batch 'encryption-key' lookup given the GranuleFile set is known
|
||||
state Future<BlobGranuleCipherKeysCtx> cipherKeysCtx;
|
||||
if (chunk.snapshotFile.present() && chunk.snapshotFile.get().cipherKeysMetaRef.present()) {
|
||||
ASSERT(isBlobFileEncryptionSupported());
|
||||
cipherKeysCtx =
|
||||
getGranuleCipherKeys(bwData, chunk.snapshotFile.get().cipherKeysMetaRef.get(), &rep.arena);
|
||||
// Invoke calls to populate 'EncryptionKeysCtx' for snapshot and/or deltaFiles asynchronously
|
||||
state Optional<Future<BlobGranuleCipherKeysCtx>> snapCipherKeysCtx;
|
||||
if (chunk.snapshotFile.present()) {
|
||||
const bool encrypted = chunk.snapshotFile.get().cipherKeysMetaRef.present();
|
||||
|
||||
if (BW_DEBUG) {
|
||||
TraceEvent("DoBlobGranuleFileRequestDelta_KeysCtxPrepare")
|
||||
.detail("FileName", chunk.snapshotFile.get().filename.toString())
|
||||
.detail("Encrypted", encrypted);
|
||||
}
|
||||
|
||||
if (encrypted) {
|
||||
ASSERT(isBlobFileEncryptionSupported());
|
||||
ASSERT(!chunk.snapshotFile.get().cipherKeysCtx.present());
|
||||
|
||||
snapCipherKeysCtx = getGranuleCipherKeysFromKeysMetaRef(
|
||||
bwData, chunk.snapshotFile.get().cipherKeysMetaRef.get(), &rep.arena);
|
||||
}
|
||||
}
|
||||
state std::unordered_map<int, Future<BlobGranuleCipherKeysCtx>> deltaCipherKeysCtxs;
|
||||
for (int deltaIdx = 0; deltaIdx < chunk.deltaFiles.size(); deltaIdx++) {
|
||||
const bool encrypted = chunk.deltaFiles[deltaIdx].cipherKeysMetaRef.present();
|
||||
|
||||
if (BW_DEBUG) {
|
||||
TraceEvent("DoBlobGranuleFileRequestDelta_KeysCtxPrepare")
|
||||
.detail("FileName", chunk.deltaFiles[deltaIdx].filename.toString())
|
||||
.detail("Encrypted", encrypted);
|
||||
}
|
||||
|
||||
if (encrypted) {
|
||||
ASSERT(isBlobFileEncryptionSupported());
|
||||
ASSERT(!chunk.deltaFiles[deltaIdx].cipherKeysCtx.present());
|
||||
|
||||
deltaCipherKeysCtxs.emplace(
|
||||
deltaIdx,
|
||||
getGranuleCipherKeysFromKeysMetaRef(
|
||||
bwData, chunk.deltaFiles[deltaIdx].cipherKeysMetaRef.get(), &rep.arena));
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: get cipher keys for delta files too!
|
||||
|
@ -3245,9 +3339,37 @@ ACTOR Future<Void> doBlobGranuleFileRequest(Reference<BlobWorkerData> bwData, Bl
|
|||
}
|
||||
}
|
||||
|
||||
if (chunk.snapshotFile.present() && chunk.snapshotFile.get().cipherKeysMetaRef.present()) {
|
||||
BlobGranuleCipherKeysCtx ctx = wait(cipherKeysCtx);
|
||||
chunk.cipherKeysCtx = std::move(ctx);
|
||||
// Update EncryptionKeysCtx information for the chunk->snapshotFile
|
||||
if (chunk.snapshotFile.present() && snapCipherKeysCtx.present()) {
|
||||
ASSERT(chunk.snapshotFile.get().cipherKeysMetaRef.present());
|
||||
|
||||
BlobGranuleCipherKeysCtx keysCtx = wait(snapCipherKeysCtx.get());
|
||||
chunk.snapshotFile.get().cipherKeysCtx = std::move(keysCtx);
|
||||
// reclaim memory from non-serializable field
|
||||
chunk.snapshotFile.get().cipherKeysMetaRef.reset();
|
||||
|
||||
if (BW_DEBUG) {
|
||||
TraceEvent("DoBlobGranuleFileRequestSnap_KeysCtxDone")
|
||||
.detail("FileName", chunk.snapshotFile.get().filename.toString());
|
||||
}
|
||||
}
|
||||
|
||||
// Update EncryptionKeysCtx information for the chunk->deltaFiles
|
||||
if (!deltaCipherKeysCtxs.empty()) {
|
||||
ASSERT(!chunk.deltaFiles.empty());
|
||||
|
||||
state std::unordered_map<int, Future<BlobGranuleCipherKeysCtx>>::const_iterator itr;
|
||||
for (itr = deltaCipherKeysCtxs.begin(); itr != deltaCipherKeysCtxs.end(); itr++) {
|
||||
BlobGranuleCipherKeysCtx keysCtx = wait(itr->second);
|
||||
chunk.deltaFiles[itr->first].cipherKeysCtx = std::move(keysCtx);
|
||||
// reclaim memory from non-serializable field
|
||||
chunk.deltaFiles[itr->first].cipherKeysMetaRef.reset();
|
||||
|
||||
if (BW_DEBUG) {
|
||||
TraceEvent("DoBlobGranuleFileRequestDelta_KeysCtxDone")
|
||||
.detail("FileName", chunk.deltaFiles[itr->first].filename.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rep.chunks.push_back(rep.arena, chunk);
|
||||
|
|
|
@ -248,6 +248,7 @@ ACTOR Future<Void> clusterWatchDatabase(ClusterControllerData* cluster,
|
|||
dbInfo.latencyBandConfig = db->serverInfo->get().latencyBandConfig;
|
||||
dbInfo.myLocality = db->serverInfo->get().myLocality;
|
||||
dbInfo.client = ClientDBInfo();
|
||||
dbInfo.client.isEncryptionEnabled = SERVER_KNOBS->ENABLE_ENCRYPTION;
|
||||
dbInfo.client.tenantMode = db->config.tenantMode;
|
||||
dbInfo.client.clusterId = db->serverInfo->get().client.clusterId;
|
||||
|
||||
|
@ -1011,7 +1012,8 @@ void clusterRegisterMaster(ClusterControllerData* self, RegisterMasterRequest co
|
|||
// Construct the client information
|
||||
if (db->clientInfo->get().commitProxies != req.commitProxies ||
|
||||
db->clientInfo->get().grvProxies != req.grvProxies ||
|
||||
db->clientInfo->get().tenantMode != db->config.tenantMode || db->clientInfo->get().clusterId != req.clusterId) {
|
||||
db->clientInfo->get().tenantMode != db->config.tenantMode || db->clientInfo->get().clusterId != req.clusterId ||
|
||||
db->clientInfo->get().isEncryptionEnabled != SERVER_KNOBS->ENABLE_ENCRYPTION) {
|
||||
TraceEvent("PublishNewClientInfo", self->id)
|
||||
.detail("Master", dbInfo.master.id())
|
||||
.detail("GrvProxies", db->clientInfo->get().grvProxies)
|
||||
|
@ -1021,11 +1023,13 @@ void clusterRegisterMaster(ClusterControllerData* self, RegisterMasterRequest co
|
|||
.detail("TenantMode", db->clientInfo->get().tenantMode.toString())
|
||||
.detail("ReqTenantMode", db->config.tenantMode.toString())
|
||||
.detail("ClusterId", db->clientInfo->get().clusterId)
|
||||
.detail("EncryptionEnabled", SERVER_KNOBS->ENABLE_ENCRYPTION)
|
||||
.detail("ReqClusterId", req.clusterId);
|
||||
isChanged = true;
|
||||
// TODO why construct a new one and not just copy the old one and change proxies + id?
|
||||
ClientDBInfo clientInfo;
|
||||
clientInfo.id = deterministicRandom()->randomUniqueID();
|
||||
clientInfo.isEncryptionEnabled = SERVER_KNOBS->ENABLE_ENCRYPTION;
|
||||
clientInfo.commitProxies = req.commitProxies;
|
||||
clientInfo.grvProxies = req.grvProxies;
|
||||
clientInfo.tenantMode = db->config.tenantMode;
|
||||
|
|
|
@ -521,6 +521,19 @@ ACTOR Future<Void> changeCoordinators(Reference<ClusterRecoveryData> self) {
|
|||
TraceEvent("ChangeCoordinators", self->dbgid).log();
|
||||
++self->changeCoordinatorsRequests;
|
||||
state ChangeCoordinatorsRequest changeCoordinatorsRequest = req;
|
||||
if (self->masterInterface.id() != changeCoordinatorsRequest.masterId) {
|
||||
// Make sure the request is coming from a proxy from the same
|
||||
// generation. If not, throw coordinators_changed - this is OK
|
||||
// because the client will still receive commit_unknown_result, and
|
||||
// will retry the request. This check is necessary because
|
||||
// otherwise in rare circumstances where a recovery occurs between
|
||||
// the change coordinators request from the client and the cstate
|
||||
// actually being moved, the client may think the change
|
||||
// coordinators command failed when it is still in progress. So we
|
||||
// preempt the issue here and force failure if the generations
|
||||
// don't match.
|
||||
throw coordinators_changed();
|
||||
}
|
||||
|
||||
// Kill cluster controller to facilitate coordinator registration update
|
||||
if (self->controllerData->shouldCommitSuicide) {
|
||||
|
|
|
@ -283,16 +283,20 @@ bool verifyTenantPrefix(ProxyCommitData* const commitData, const CommitTransacti
|
|||
if (!m.param1.startsWith(tenantPrefix)) {
|
||||
TraceEvent(SevWarnAlways, "TenantPrefixMismatch")
|
||||
.suppressFor(60)
|
||||
.detail("Prefix", tenantPrefix.toHexString())
|
||||
.detail("Key", m.param1.toHexString());
|
||||
.detail("Tenant", req.tenantInfo.name)
|
||||
.detail("TenantID", req.tenantInfo.tenantId)
|
||||
.detail("Prefix", tenantPrefix)
|
||||
.detail("Key", m.param1);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (m.type == MutationRef::ClearRange && !m.param2.startsWith(tenantPrefix)) {
|
||||
TraceEvent(SevWarnAlways, "TenantClearRangePrefixMismatch")
|
||||
.suppressFor(60)
|
||||
.detail("Prefix", tenantPrefix.toHexString())
|
||||
.detail("Key", m.param2.toHexString());
|
||||
.detail("Tenant", req.tenantInfo.name)
|
||||
.detail("TenantID", req.tenantInfo.tenantId)
|
||||
.detail("Prefix", tenantPrefix)
|
||||
.detail("Key", m.param2);
|
||||
return false;
|
||||
} else if (m.type == MutationRef::SetVersionstampedKey) {
|
||||
ASSERT(m.param1.size() >= 4);
|
||||
|
@ -301,8 +305,10 @@ bool verifyTenantPrefix(ProxyCommitData* const commitData, const CommitTransacti
|
|||
if (*offset < tenantPrefix.size()) {
|
||||
TraceEvent(SevWarnAlways, "TenantVersionstampInvalidOffset")
|
||||
.suppressFor(60)
|
||||
.detail("Prefix", tenantPrefix.toHexString())
|
||||
.detail("Key", m.param1.toHexString())
|
||||
.detail("Tenant", req.tenantInfo.name)
|
||||
.detail("TenantID", req.tenantInfo.tenantId)
|
||||
.detail("Prefix", tenantPrefix)
|
||||
.detail("Key", m.param1)
|
||||
.detail("Offset", *offset);
|
||||
return false;
|
||||
}
|
||||
|
@ -315,9 +321,11 @@ bool verifyTenantPrefix(ProxyCommitData* const commitData, const CommitTransacti
|
|||
(!rc.begin.startsWith(tenantPrefix) || !rc.end.startsWith(tenantPrefix))) {
|
||||
TraceEvent(SevWarnAlways, "TenantReadConflictPrefixMismatch")
|
||||
.suppressFor(60)
|
||||
.detail("Prefix", tenantPrefix.toHexString())
|
||||
.detail("BeginKey", rc.begin.toHexString())
|
||||
.detail("EndKey", rc.end.toHexString());
|
||||
.detail("Tenant", req.tenantInfo.name)
|
||||
.detail("TenantID", req.tenantInfo.tenantId)
|
||||
.detail("Prefix", tenantPrefix)
|
||||
.detail("BeginKey", rc.begin)
|
||||
.detail("EndKey", rc.end);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -327,9 +335,11 @@ bool verifyTenantPrefix(ProxyCommitData* const commitData, const CommitTransacti
|
|||
(!wc.begin.startsWith(tenantPrefix) || !wc.end.startsWith(tenantPrefix))) {
|
||||
TraceEvent(SevWarnAlways, "TenantWriteConflictPrefixMismatch")
|
||||
.suppressFor(60)
|
||||
.detail("Prefix", tenantPrefix.toHexString())
|
||||
.detail("BeginKey", wc.begin.toHexString())
|
||||
.detail("EndKey", wc.end.toHexString());
|
||||
.detail("Tenant", req.tenantInfo.name)
|
||||
.detail("TenantID", req.tenantInfo.tenantId)
|
||||
.detail("Prefix", tenantPrefix)
|
||||
.detail("BeginKey", wc.begin)
|
||||
.detail("EndKey", wc.end);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -913,8 +923,9 @@ ACTOR Future<Void> getResolution(CommitBatchContext* self) {
|
|||
};
|
||||
std::unordered_map<EncryptCipherDomainId, EncryptCipherDomainName> encryptDomains = defaultDomains;
|
||||
for (int t = 0; t < trs.size(); t++) {
|
||||
int64_t tenantId = trs[t].tenantInfo.tenantId;
|
||||
Optional<TenantName> tenantName = trs[t].tenantInfo.name;
|
||||
TenantInfo const& tenantInfo = trs[t].tenantInfo;
|
||||
int64_t tenantId = tenantInfo.tenantId;
|
||||
Optional<TenantNameRef> const& tenantName = tenantInfo.name;
|
||||
// TODO(yiwu): In raw access mode, use tenant prefix to figure out tenant id for user data
|
||||
if (tenantId != TenantInfo::INVALID_TENANT) {
|
||||
ASSERT(tenantName.present());
|
||||
|
@ -1138,7 +1149,8 @@ ACTOR Future<Void> applyMetadataToCommittedTransactions(CommitBatchContext* self
|
|||
if (!self->isMyFirstBatch &&
|
||||
pProxyCommitData->txnStateStore->readValue(coordinatorsKey).get().get() != self->oldCoordinators.get()) {
|
||||
wait(brokenPromiseToNever(pProxyCommitData->db->get().clusterInterface.changeCoordinators.getReply(
|
||||
ChangeCoordinatorsRequest(pProxyCommitData->txnStateStore->readValue(coordinatorsKey).get().get()))));
|
||||
ChangeCoordinatorsRequest(pProxyCommitData->txnStateStore->readValue(coordinatorsKey).get().get(),
|
||||
self->pProxyCommitData->master.id()))));
|
||||
ASSERT(false); // ChangeCoordinatorsRequest should always throw
|
||||
}
|
||||
|
||||
|
@ -1845,7 +1857,7 @@ ACTOR static Future<Void> doKeyServerLocationRequest(GetKeyServerLocationsReques
|
|||
while (tenantEntry.isError()) {
|
||||
bool finalQuery = commitData->version.get() >= minTenantVersion;
|
||||
ErrorOr<Optional<TenantMapEntry>> _tenantEntry =
|
||||
getTenantEntry(commitData, req.tenant, Optional<int64_t>(), finalQuery);
|
||||
getTenantEntry(commitData, req.tenant.name, Optional<int64_t>(), finalQuery);
|
||||
tenantEntry = _tenantEntry;
|
||||
|
||||
if (tenantEntry.isError()) {
|
||||
|
|
|
@ -393,6 +393,7 @@ ACTOR Future<Void> leaderRegister(LeaderElectionRegInterface interf, Key key) {
|
|||
notify[i].send(newInfo);
|
||||
notify.clear();
|
||||
ClientDBInfo outInfo;
|
||||
outInfo.isEncryptionEnabled = SERVER_KNOBS->ENABLE_ENCRYPTION;
|
||||
outInfo.id = deterministicRandom()->randomUniqueID();
|
||||
outInfo.forward = req.conn.toString();
|
||||
clientData.clientInfo->set(CachedSerialization<ClientDBInfo>(outInfo));
|
||||
|
@ -632,6 +633,7 @@ ACTOR Future<Void> leaderServer(LeaderElectionRegInterface interf,
|
|||
Optional<LeaderInfo> forward = regs.getForward(req.clusterKey);
|
||||
if (forward.present()) {
|
||||
ClientDBInfo info;
|
||||
info.isEncryptionEnabled = SERVER_KNOBS->ENABLE_ENCRYPTION;
|
||||
info.id = deterministicRandom()->randomUniqueID();
|
||||
info.forward = forward.get().serializedInfo;
|
||||
req.reply.send(CachedSerialization<ClientDBInfo>(info));
|
||||
|
|
|
@ -1964,33 +1964,19 @@ ACTOR Future<Void> BgDDLoadRebalance(DDQueueData* self, int teamCollectionIndex,
|
|||
traceEvent.detail("QueuedRelocations", self->priority_relocations[ddPriority]);
|
||||
|
||||
if (self->priority_relocations[ddPriority] < SERVER_KNOBS->DD_REBALANCE_PARALLELISM) {
|
||||
if (isDataMovementForMountainChopper(reason)) {
|
||||
srcReq = GetTeamRequest(WantNewServers::True,
|
||||
WantTrueBest::True,
|
||||
PreferLowerDiskUtil::False,
|
||||
TeamMustHaveShards::True,
|
||||
ForReadBalance(readRebalance),
|
||||
PreferLowerReadUtil::False);
|
||||
destReq = GetTeamRequest(WantNewServers::True,
|
||||
WantTrueBest::False,
|
||||
PreferLowerDiskUtil::True,
|
||||
TeamMustHaveShards::False,
|
||||
ForReadBalance(readRebalance),
|
||||
PreferLowerReadUtil::True);
|
||||
} else {
|
||||
srcReq = GetTeamRequest(WantNewServers::True,
|
||||
WantTrueBest::False,
|
||||
PreferLowerDiskUtil::False,
|
||||
TeamMustHaveShards::True,
|
||||
ForReadBalance(readRebalance),
|
||||
PreferLowerReadUtil::False);
|
||||
destReq = GetTeamRequest(WantNewServers::True,
|
||||
WantTrueBest::True,
|
||||
PreferLowerDiskUtil::True,
|
||||
TeamMustHaveShards::False,
|
||||
ForReadBalance(readRebalance),
|
||||
PreferLowerReadUtil::True);
|
||||
}
|
||||
bool mcMove = isDataMovementForMountainChopper(reason);
|
||||
srcReq = GetTeamRequest(WantNewServers::True,
|
||||
WantTrueBest(mcMove),
|
||||
PreferLowerDiskUtil::False,
|
||||
TeamMustHaveShards::True,
|
||||
ForReadBalance(readRebalance),
|
||||
PreferLowerReadUtil::False);
|
||||
destReq = GetTeamRequest(WantNewServers::True,
|
||||
WantTrueBest(!mcMove),
|
||||
PreferLowerDiskUtil::True,
|
||||
TeamMustHaveShards::False,
|
||||
ForReadBalance(readRebalance),
|
||||
PreferLowerReadUtil::True);
|
||||
state Future<SrcDestTeamPair> getTeamFuture =
|
||||
getSrcDestTeams(self, teamCollectionIndex, srcReq, destReq, ddPriority, &traceEvent);
|
||||
wait(ready(getTeamFuture));
|
||||
|
|
|
@ -1055,173 +1055,3 @@ ACTOR Future<Void> dataDistributionTracker(Reference<InitialDataDistribution> in
|
|||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<KeyRange> ShardsAffectedByTeamFailure::getShardsFor(Team team) const {
|
||||
std::vector<KeyRange> r;
|
||||
for (auto it = team_shards.lower_bound(std::pair<Team, KeyRange>(team, KeyRangeRef()));
|
||||
it != team_shards.end() && it->first == team;
|
||||
++it)
|
||||
r.push_back(it->second);
|
||||
return r;
|
||||
}
|
||||
|
||||
bool ShardsAffectedByTeamFailure::hasShards(Team team) const {
|
||||
auto it = team_shards.lower_bound(std::pair<Team, KeyRange>(team, KeyRangeRef()));
|
||||
return it != team_shards.end() && it->first == team;
|
||||
}
|
||||
|
||||
int ShardsAffectedByTeamFailure::getNumberOfShards(UID ssID) const {
|
||||
auto it = storageServerShards.find(ssID);
|
||||
return it == storageServerShards.end() ? 0 : it->second;
|
||||
}
|
||||
|
||||
std::pair<std::vector<ShardsAffectedByTeamFailure::Team>, std::vector<ShardsAffectedByTeamFailure::Team>>
|
||||
ShardsAffectedByTeamFailure::getTeamsFor(KeyRangeRef keys) {
|
||||
return shard_teams[keys.begin];
|
||||
}
|
||||
|
||||
void ShardsAffectedByTeamFailure::erase(Team team, KeyRange const& range) {
|
||||
if (team_shards.erase(std::pair<Team, KeyRange>(team, range)) > 0) {
|
||||
for (auto uid = team.servers.begin(); uid != team.servers.end(); ++uid) {
|
||||
// Safeguard against going negative after eraseServer() sets value to 0
|
||||
if (storageServerShards[*uid] > 0) {
|
||||
storageServerShards[*uid]--;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ShardsAffectedByTeamFailure::insert(Team team, KeyRange const& range) {
|
||||
if (team_shards.insert(std::pair<Team, KeyRange>(team, range)).second) {
|
||||
for (auto uid = team.servers.begin(); uid != team.servers.end(); ++uid)
|
||||
storageServerShards[*uid]++;
|
||||
}
|
||||
}
|
||||
|
||||
void ShardsAffectedByTeamFailure::defineShard(KeyRangeRef keys) {
|
||||
std::vector<Team> teams;
|
||||
std::vector<Team> prevTeams;
|
||||
auto rs = shard_teams.intersectingRanges(keys);
|
||||
for (auto it = rs.begin(); it != rs.end(); ++it) {
|
||||
for (auto t = it->value().first.begin(); t != it->value().first.end(); ++t) {
|
||||
teams.push_back(*t);
|
||||
erase(*t, it->range());
|
||||
}
|
||||
for (auto t = it->value().second.begin(); t != it->value().second.end(); ++t) {
|
||||
prevTeams.push_back(*t);
|
||||
}
|
||||
}
|
||||
uniquify(teams);
|
||||
uniquify(prevTeams);
|
||||
|
||||
/*TraceEvent("ShardsAffectedByTeamFailureDefine")
|
||||
.detail("KeyBegin", keys.begin)
|
||||
.detail("KeyEnd", keys.end)
|
||||
.detail("TeamCount", teams.size());*/
|
||||
|
||||
auto affectedRanges = shard_teams.getAffectedRangesAfterInsertion(keys);
|
||||
shard_teams.insert(keys, std::make_pair(teams, prevTeams));
|
||||
|
||||
for (auto r = affectedRanges.begin(); r != affectedRanges.end(); ++r) {
|
||||
auto& t = shard_teams[r->begin];
|
||||
for (auto it = t.first.begin(); it != t.first.end(); ++it) {
|
||||
insert(*it, *r);
|
||||
}
|
||||
}
|
||||
check();
|
||||
}
|
||||
|
||||
// Move keys to destinationTeams by updating shard_teams
|
||||
void ShardsAffectedByTeamFailure::moveShard(KeyRangeRef keys, std::vector<Team> destinationTeams) {
|
||||
/*TraceEvent("ShardsAffectedByTeamFailureMove")
|
||||
.detail("KeyBegin", keys.begin)
|
||||
.detail("KeyEnd", keys.end)
|
||||
.detail("NewTeamSize", destinationTeam.size())
|
||||
.detail("NewTeam", describe(destinationTeam));*/
|
||||
|
||||
auto ranges = shard_teams.intersectingRanges(keys);
|
||||
std::vector<std::pair<std::pair<std::vector<Team>, std::vector<Team>>, KeyRange>> modifiedShards;
|
||||
for (auto it = ranges.begin(); it != ranges.end(); ++it) {
|
||||
if (keys.contains(it->range())) {
|
||||
// erase the many teams that were associated with this one shard
|
||||
for (auto t = it->value().first.begin(); t != it->value().first.end(); ++t) {
|
||||
erase(*t, it->range());
|
||||
}
|
||||
|
||||
// save this modification for later insertion
|
||||
std::vector<Team> prevTeams = it->value().second;
|
||||
prevTeams.insert(prevTeams.end(), it->value().first.begin(), it->value().first.end());
|
||||
uniquify(prevTeams);
|
||||
|
||||
modifiedShards.push_back(std::pair<std::pair<std::vector<Team>, std::vector<Team>>, KeyRange>(
|
||||
std::make_pair(destinationTeams, prevTeams), it->range()));
|
||||
} else {
|
||||
// for each range that touches this move, add our team as affecting this range
|
||||
for (auto& team : destinationTeams) {
|
||||
insert(team, it->range());
|
||||
}
|
||||
|
||||
// if we are not in the list of teams associated with this shard, add us in
|
||||
auto& teams = it->value();
|
||||
teams.second.insert(teams.second.end(), teams.first.begin(), teams.first.end());
|
||||
uniquify(teams.second);
|
||||
|
||||
teams.first.insert(teams.first.end(), destinationTeams.begin(), destinationTeams.end());
|
||||
uniquify(teams.first);
|
||||
}
|
||||
}
|
||||
|
||||
// we cannot modify the KeyRangeMap while iterating through it, so add saved modifications now
|
||||
for (int i = 0; i < modifiedShards.size(); i++) {
|
||||
for (auto& t : modifiedShards[i].first.first) {
|
||||
insert(t, modifiedShards[i].second);
|
||||
}
|
||||
shard_teams.insert(modifiedShards[i].second, modifiedShards[i].first);
|
||||
}
|
||||
|
||||
check();
|
||||
}
|
||||
|
||||
void ShardsAffectedByTeamFailure::finishMove(KeyRangeRef keys) {
|
||||
auto ranges = shard_teams.containedRanges(keys);
|
||||
for (auto it = ranges.begin(); it != ranges.end(); ++it) {
|
||||
it.value().second.clear();
|
||||
}
|
||||
}
|
||||
|
||||
void ShardsAffectedByTeamFailure::setCheckMode(CheckMode mode) {
|
||||
checkMode = mode;
|
||||
}
|
||||
|
||||
void ShardsAffectedByTeamFailure::check() const {
|
||||
if (checkMode == CheckMode::ForceNoCheck)
|
||||
return;
|
||||
if (EXPENSIVE_VALIDATION || checkMode == CheckMode::ForceCheck) {
|
||||
for (auto t = team_shards.begin(); t != team_shards.end(); ++t) {
|
||||
auto i = shard_teams.rangeContaining(t->second.begin);
|
||||
if (i->range() != t->second || !std::count(i->value().first.begin(), i->value().first.end(), t->first)) {
|
||||
ASSERT(false);
|
||||
}
|
||||
}
|
||||
auto rs = shard_teams.ranges();
|
||||
for (auto i = rs.begin(); i != rs.end(); ++i) {
|
||||
for (auto t = i->value().first.begin(); t != i->value().first.end(); ++t) {
|
||||
if (!team_shards.count(std::make_pair(*t, i->range()))) {
|
||||
std::string teamDesc, shards;
|
||||
for (int k = 0; k < t->servers.size(); k++)
|
||||
teamDesc += format("%llx ", t->servers[k].first());
|
||||
for (auto x = team_shards.lower_bound(std::make_pair(*t, KeyRangeRef()));
|
||||
x != team_shards.end() && x->first == *t;
|
||||
++x)
|
||||
shards += printable(x->second.begin) + "-" + printable(x->second.end) + ",";
|
||||
TraceEvent(SevError, "SATFInvariantError2")
|
||||
.detail("KB", i->begin())
|
||||
.detail("KE", i->end())
|
||||
.detail("Team", teamDesc)
|
||||
.detail("Shards", shards);
|
||||
ASSERT(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -303,6 +303,9 @@ rocksdb::Options getOptions() {
|
|||
|
||||
// TODO: enable rocksdb metrics.
|
||||
options.db_log_dir = SERVER_KNOBS->LOG_DIRECTORY;
|
||||
if (g_network->isSimulated()) {
|
||||
options.OptimizeForSmallDb();
|
||||
}
|
||||
return options;
|
||||
}
|
||||
|
||||
|
|
|
@ -1712,15 +1712,19 @@ ACTOR static Future<Void> finishMoveShards(Database occ,
|
|||
Void(),
|
||||
TaskPriority::MoveKeys));
|
||||
|
||||
int count = 0;
|
||||
std::vector<UID> readyServers;
|
||||
for (int s = 0; s < serverReady.size(); ++s) {
|
||||
count += serverReady[s].isReady() && !serverReady[s].isError();
|
||||
if (serverReady[s].isReady() && !serverReady[s].isError()) {
|
||||
readyServers.push_back(storageServerInterfaces[s].uniqueID);
|
||||
}
|
||||
}
|
||||
|
||||
TraceEvent(SevVerbose, "FinishMoveShardsWaitedServers", relocationIntervalId)
|
||||
.detail("ReadyServers", count);
|
||||
.detail("DataMoveID", dataMoveId)
|
||||
.detail("ReadyServers", describe(readyServers));
|
||||
|
||||
if (readyServers.size() == newDestinations.size()) {
|
||||
|
||||
if (count == newDestinations.size()) {
|
||||
std::vector<Future<Void>> actors;
|
||||
actors.push_back(krmSetRangeCoalescing(
|
||||
&tr, keyServersPrefix, range, allKeys, keyServersValue(destServers, {}, dataMoveId, UID())));
|
||||
|
|
|
@ -880,7 +880,7 @@ std::shared_ptr<platform::TmpFile> prepareTokenFile(const uint8_t* buff, const i
|
|||
|
||||
std::shared_ptr<platform::TmpFile> prepareTokenFile(const int tokenLen) {
|
||||
Standalone<StringRef> buff = makeString(tokenLen);
|
||||
generateRandomData(mutateString(buff), tokenLen);
|
||||
deterministicRandom()->randomBytes(mutateString(buff), tokenLen);
|
||||
|
||||
return prepareTokenFile(buff.begin(), tokenLen);
|
||||
}
|
||||
|
@ -941,7 +941,7 @@ ACTOR Future<Void> testValidationFileTokenPayloadTooLarge(Reference<RESTKmsConne
|
|||
SERVER_KNOBS->REST_KMS_CONNECTOR_VALIDATION_TOKEN_MAX_SIZE +
|
||||
2;
|
||||
Standalone<StringRef> buff = makeString(tokenLen);
|
||||
generateRandomData(mutateString(buff), tokenLen);
|
||||
deterministicRandom()->randomBytes(mutateString(buff), tokenLen);
|
||||
|
||||
std::string details;
|
||||
state std::vector<std::shared_ptr<platform::TmpFile>> tokenfiles;
|
||||
|
@ -972,7 +972,7 @@ ACTOR Future<Void> testMultiValidationFileTokenFiles(Reference<RESTKmsConnectorC
|
|||
state std::unordered_map<std::string, std::string> tokenNameValueMap;
|
||||
state std::string tokenDetailsStr;
|
||||
|
||||
generateRandomData(mutateString(buff), tokenLen);
|
||||
deterministicRandom()->randomBytes(mutateString(buff), tokenLen);
|
||||
|
||||
for (int i = 1; i <= numFiles; i++) {
|
||||
std::string tokenName = std::to_string(i);
|
||||
|
@ -1350,7 +1350,7 @@ TEST_CASE("/KmsConnector/REST/ParseKmsDiscoveryUrls") {
|
|||
state Arena arena;
|
||||
|
||||
// initialize cipher key used for testing
|
||||
generateRandomData(&BASE_CIPHER_KEY_TEST[0], 32);
|
||||
deterministicRandom()->randomBytes(&BASE_CIPHER_KEY_TEST[0], 32);
|
||||
|
||||
wait(testParseDiscoverKmsUrlFileNotFound(ctx));
|
||||
wait(testParseDiscoverKmsUrlFile(ctx));
|
||||
|
@ -1363,7 +1363,7 @@ TEST_CASE("/KmsConnector/REST/ParseValidationTokenFile") {
|
|||
state Arena arena;
|
||||
|
||||
// initialize cipher key used for testing
|
||||
generateRandomData(&BASE_CIPHER_KEY_TEST[0], 32);
|
||||
deterministicRandom()->randomBytes(&BASE_CIPHER_KEY_TEST[0], 32);
|
||||
|
||||
wait(testEmptyValidationFileDetails(ctx));
|
||||
wait(testMalformedFileValidationTokenDetails(ctx));
|
||||
|
@ -1380,7 +1380,7 @@ TEST_CASE("/KmsConnector/REST/ParseKmsResponse") {
|
|||
state Arena arena;
|
||||
|
||||
// initialize cipher key used for testing
|
||||
generateRandomData(&BASE_CIPHER_KEY_TEST[0], 32);
|
||||
deterministicRandom()->randomBytes(&BASE_CIPHER_KEY_TEST[0], 32);
|
||||
|
||||
testMissingCipherDetailsTag(ctx);
|
||||
testMalformedCipherDetails(ctx);
|
||||
|
@ -1394,7 +1394,7 @@ TEST_CASE("/KmsConnector/REST/GetEncryptionKeyOps") {
|
|||
state Arena arena;
|
||||
|
||||
// initialize cipher key used for testing
|
||||
generateRandomData(&BASE_CIPHER_KEY_TEST[0], 32);
|
||||
deterministicRandom()->randomBytes(&BASE_CIPHER_KEY_TEST[0], 32);
|
||||
|
||||
// Prepare KmsConnector context details
|
||||
wait(testParseDiscoverKmsUrlFile(ctx));
|
||||
|
|
|
@ -0,0 +1,197 @@
|
|||
/*
|
||||
* ShardsAffectedByTeamFailure.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbserver/ShardsAffectedByTeamFailure.h"
|
||||
|
||||
std::vector<KeyRange> ShardsAffectedByTeamFailure::getShardsFor(Team team) const {
|
||||
std::vector<KeyRange> r;
|
||||
for (auto it = team_shards.lower_bound(std::pair<Team, KeyRange>(team, KeyRangeRef()));
|
||||
it != team_shards.end() && it->first == team;
|
||||
++it)
|
||||
r.push_back(it->second);
|
||||
return r;
|
||||
}
|
||||
|
||||
bool ShardsAffectedByTeamFailure::hasShards(Team team) const {
|
||||
auto it = team_shards.lower_bound(std::pair<Team, KeyRange>(team, KeyRangeRef()));
|
||||
return it != team_shards.end() && it->first == team;
|
||||
}
|
||||
|
||||
int ShardsAffectedByTeamFailure::getNumberOfShards(UID ssID) const {
|
||||
auto it = storageServerShards.find(ssID);
|
||||
return it == storageServerShards.end() ? 0 : it->second;
|
||||
}
|
||||
|
||||
std::pair<std::vector<ShardsAffectedByTeamFailure::Team>, std::vector<ShardsAffectedByTeamFailure::Team>>
|
||||
ShardsAffectedByTeamFailure::getTeamsFor(KeyRangeRef keys) {
|
||||
return shard_teams[keys.begin];
|
||||
}
|
||||
|
||||
void ShardsAffectedByTeamFailure::erase(Team team, KeyRange const& range) {
|
||||
DisabledTraceEvent(SevDebug, "ShardsAffectedByTeamFailureErase")
|
||||
.detail("Range", range)
|
||||
.detail("Team", team.toString());
|
||||
if (team_shards.erase(std::pair<Team, KeyRange>(team, range)) > 0) {
|
||||
for (auto uid = team.servers.begin(); uid != team.servers.end(); ++uid) {
|
||||
// Safeguard against going negative after eraseServer() sets value to 0
|
||||
if (storageServerShards[*uid] > 0) {
|
||||
storageServerShards[*uid]--;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ShardsAffectedByTeamFailure::insert(Team team, KeyRange const& range) {
|
||||
DisabledTraceEvent(SevDebug, "ShardsAffectedByTeamFailureInsert")
|
||||
.detail("Range", range)
|
||||
.detail("Team", team.toString());
|
||||
if (team_shards.insert(std::pair<Team, KeyRange>(team, range)).second) {
|
||||
for (auto uid = team.servers.begin(); uid != team.servers.end(); ++uid)
|
||||
storageServerShards[*uid]++;
|
||||
}
|
||||
}
|
||||
|
||||
void ShardsAffectedByTeamFailure::defineShard(KeyRangeRef keys) {
|
||||
std::vector<Team> teams;
|
||||
std::vector<Team> prevTeams;
|
||||
auto rs = shard_teams.intersectingRanges(keys);
|
||||
for (auto it = rs.begin(); it != rs.end(); ++it) {
|
||||
for (auto t = it->value().first.begin(); t != it->value().first.end(); ++t) {
|
||||
teams.push_back(*t);
|
||||
erase(*t, it->range());
|
||||
}
|
||||
for (auto t = it->value().second.begin(); t != it->value().second.end(); ++t) {
|
||||
prevTeams.push_back(*t);
|
||||
}
|
||||
}
|
||||
uniquify(teams);
|
||||
uniquify(prevTeams);
|
||||
|
||||
/*TraceEvent("ShardsAffectedByTeamFailureDefine")
|
||||
.detail("KeyBegin", keys.begin)
|
||||
.detail("KeyEnd", keys.end)
|
||||
.detail("TeamCount", teams.size());*/
|
||||
|
||||
auto affectedRanges = shard_teams.getAffectedRangesAfterInsertion(keys);
|
||||
shard_teams.insert(keys, std::make_pair(teams, prevTeams));
|
||||
|
||||
for (auto r = affectedRanges.begin(); r != affectedRanges.end(); ++r) {
|
||||
auto& t = shard_teams[r->begin];
|
||||
for (auto it = t.first.begin(); it != t.first.end(); ++it) {
|
||||
insert(*it, *r);
|
||||
}
|
||||
}
|
||||
check();
|
||||
}
|
||||
|
||||
// Move keys to destinationTeams by updating shard_teams
|
||||
void ShardsAffectedByTeamFailure::moveShard(KeyRangeRef keys, std::vector<Team> destinationTeams) {
|
||||
/*TraceEvent("ShardsAffectedByTeamFailureMove")
|
||||
.detail("KeyBegin", keys.begin)
|
||||
.detail("KeyEnd", keys.end)
|
||||
.detail("NewTeamSize", destinationTeam.size())
|
||||
.detail("NewTeam", describe(destinationTeam));*/
|
||||
|
||||
auto ranges = shard_teams.intersectingRanges(keys);
|
||||
std::vector<std::pair<std::pair<std::vector<Team>, std::vector<Team>>, KeyRange>> modifiedShards;
|
||||
for (auto it = ranges.begin(); it != ranges.end(); ++it) {
|
||||
if (keys.contains(it->range())) {
|
||||
// erase the many teams that were associated with this one shard
|
||||
for (auto t = it->value().first.begin(); t != it->value().first.end(); ++t) {
|
||||
erase(*t, it->range());
|
||||
}
|
||||
|
||||
// save this modification for later insertion
|
||||
std::vector<Team> prevTeams = it->value().second;
|
||||
prevTeams.insert(prevTeams.end(), it->value().first.begin(), it->value().first.end());
|
||||
uniquify(prevTeams);
|
||||
|
||||
modifiedShards.push_back(std::pair<std::pair<std::vector<Team>, std::vector<Team>>, KeyRange>(
|
||||
std::make_pair(destinationTeams, prevTeams), it->range()));
|
||||
} else {
|
||||
// for each range that touches this move, add our team as affecting this range
|
||||
for (auto& team : destinationTeams) {
|
||||
insert(team, it->range());
|
||||
}
|
||||
|
||||
// if we are not in the list of teams associated with this shard, add us in
|
||||
auto& teams = it->value();
|
||||
teams.second.insert(teams.second.end(), teams.first.begin(), teams.first.end());
|
||||
uniquify(teams.second);
|
||||
|
||||
teams.first.insert(teams.first.end(), destinationTeams.begin(), destinationTeams.end());
|
||||
uniquify(teams.first);
|
||||
}
|
||||
}
|
||||
|
||||
// we cannot modify the KeyRangeMap while iterating through it, so add saved modifications now
|
||||
for (int i = 0; i < modifiedShards.size(); i++) {
|
||||
for (auto& t : modifiedShards[i].first.first) {
|
||||
insert(t, modifiedShards[i].second);
|
||||
}
|
||||
shard_teams.insert(modifiedShards[i].second, modifiedShards[i].first);
|
||||
}
|
||||
|
||||
check();
|
||||
}
|
||||
|
||||
void ShardsAffectedByTeamFailure::finishMove(KeyRangeRef keys) {
|
||||
auto ranges = shard_teams.containedRanges(keys);
|
||||
for (auto it = ranges.begin(); it != ranges.end(); ++it) {
|
||||
it.value().second.clear();
|
||||
}
|
||||
}
|
||||
|
||||
void ShardsAffectedByTeamFailure::setCheckMode(CheckMode mode) {
|
||||
checkMode = mode;
|
||||
}
|
||||
|
||||
void ShardsAffectedByTeamFailure::check() const {
|
||||
if (checkMode == CheckMode::ForceNoCheck)
|
||||
return;
|
||||
if (EXPENSIVE_VALIDATION || checkMode == CheckMode::ForceCheck) {
|
||||
for (auto t = team_shards.begin(); t != team_shards.end(); ++t) {
|
||||
auto i = shard_teams.rangeContaining(t->second.begin);
|
||||
if (i->range() != t->second || !std::count(i->value().first.begin(), i->value().first.end(), t->first)) {
|
||||
ASSERT(false);
|
||||
}
|
||||
}
|
||||
auto rs = shard_teams.ranges();
|
||||
for (auto i = rs.begin(); i != rs.end(); ++i) {
|
||||
for (auto t = i->value().first.begin(); t != i->value().first.end(); ++t) {
|
||||
if (!team_shards.count(std::make_pair(*t, i->range()))) {
|
||||
std::string teamDesc, shards;
|
||||
for (int k = 0; k < t->servers.size(); k++)
|
||||
teamDesc += format("%llx ", t->servers[k].first());
|
||||
for (auto x = team_shards.lower_bound(std::make_pair(*t, KeyRangeRef()));
|
||||
x != team_shards.end() && x->first == *t;
|
||||
++x)
|
||||
shards += printable(x->second.begin) + "-" + printable(x->second.end) + ",";
|
||||
TraceEvent(SevError, "SATFInvariantError2")
|
||||
.detail("KB", i->begin())
|
||||
.detail("KE", i->end())
|
||||
.detail("Team", teamDesc)
|
||||
.detail("Shards", shards);
|
||||
ASSERT(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -41,6 +41,7 @@
|
|||
#include "fdbclient/NativeAPI.actor.h"
|
||||
#include "fdbclient/BackupAgent.actor.h"
|
||||
#include "fdbclient/versions.h"
|
||||
#include "flow/MkCert.h"
|
||||
#include "fdbrpc/WellKnownEndpoints.h"
|
||||
#include "flow/ProtocolVersion.h"
|
||||
#include "flow/network.h"
|
||||
|
@ -598,6 +599,9 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(Reference<IClusterConne
|
|||
1,
|
||||
WLTOKEN_RESERVED_COUNT,
|
||||
&allowList);
|
||||
for (const auto& p : g_simulator.authKeys) {
|
||||
FlowTransport::transport().addPublicKey(p.first, p.second.toPublic());
|
||||
}
|
||||
Sim2FileSystem::newFileSystem();
|
||||
|
||||
std::vector<Future<Void>> futures;
|
||||
|
@ -1446,8 +1450,6 @@ void SimulationConfig::setStorageEngine(const TestConfig& testConfig) {
|
|||
TraceEvent(SevWarnAlways, "RocksDBNonDeterminism")
|
||||
.detail("Explanation", "The Sharded RocksDB storage engine is threaded and non-deterministic");
|
||||
noUnseed = true;
|
||||
auto& g_knobs = IKnobCollection::getMutableGlobalKnobCollection();
|
||||
g_knobs.setKnob("shard_encode_location_metadata", KnobValueRef::create(bool{ true }));
|
||||
break;
|
||||
}
|
||||
default:
|
||||
|
@ -2392,6 +2394,10 @@ ACTOR void setupAndRun(std::string dataFolder,
|
|||
state bool allowDisablingTenants = testConfig.allowDisablingTenants;
|
||||
state bool allowCreatingTenants = true;
|
||||
|
||||
if (!CLIENT_KNOBS->SHARD_ENCODE_LOCATION_METADATA) {
|
||||
testConfig.storageEngineExcludeTypes.push_back(5);
|
||||
}
|
||||
|
||||
// The RocksDB storage engine does not support the restarting tests because you cannot consistently get a clean
|
||||
// snapshot of the storage engine without a snapshotting file system.
|
||||
// https://github.com/apple/foundationdb/issues/5155
|
||||
|
|
|
@ -117,7 +117,7 @@ void TenantCache::insert(TenantName& tenantName, TenantMapEntry& tenant) {
|
|||
KeyRef tenantPrefix(tenant.prefix.begin(), tenant.prefix.size());
|
||||
ASSERT(tenantCache.find(tenantPrefix) == tenantCache.end());
|
||||
|
||||
TenantInfo tenantInfo(tenantName, tenant.id);
|
||||
TenantInfo tenantInfo(tenantName, Optional<Standalone<StringRef>>(), tenant.id);
|
||||
tenantCache[tenantPrefix] = makeReference<TCTenantInfo>(tenantInfo, tenant.prefix);
|
||||
tenantCache[tenantPrefix]->updateCacheGeneration(generation);
|
||||
}
|
||||
|
@ -217,7 +217,7 @@ public:
|
|||
|
||||
for (uint16_t i = 0; i < tenantCount; i++) {
|
||||
TenantName tenantName(format("%s_%08d", "ddtc_test_tenant", tenantNumber + i));
|
||||
TenantMapEntry tenant(tenantNumber + i, TenantState::READY);
|
||||
TenantMapEntry tenant(tenantNumber + i, TenantState::READY, SERVER_KNOBS->ENABLE_ENCRYPTION);
|
||||
|
||||
tenantCache.insert(tenantName, tenant);
|
||||
}
|
||||
|
@ -245,7 +245,7 @@ public:
|
|||
|
||||
for (uint16_t i = 0; i < tenantCount; i++) {
|
||||
TenantName tenantName(format("%s_%08d", "ddtc_test_tenant", tenantNumber + i));
|
||||
TenantMapEntry tenant(tenantNumber + i, TenantState::READY);
|
||||
TenantMapEntry tenant(tenantNumber + i, TenantState::READY, SERVER_KNOBS->ENABLE_ENCRYPTION);
|
||||
|
||||
tenantCache.insert(tenantName, tenant);
|
||||
}
|
||||
|
@ -259,7 +259,7 @@ public:
|
|||
|
||||
if (tenantOrdinal % staleTenantFraction != 0) {
|
||||
TenantName tenantName(format("%s_%08d", "ddtc_test_tenant", tenantOrdinal));
|
||||
TenantMapEntry tenant(tenantOrdinal, TenantState::READY);
|
||||
TenantMapEntry tenant(tenantOrdinal, TenantState::READY, SERVER_KNOBS->ENABLE_ENCRYPTION);
|
||||
bool newTenant = tenantCache.update(tenantName, tenant);
|
||||
ASSERT(!newTenant);
|
||||
keepCount++;
|
||||
|
|
|
@ -10273,7 +10273,7 @@ TEST_CASE(":/redwood/performance/extentQueue") {
|
|||
|
||||
state int v;
|
||||
state ExtentQueueEntry<16> e;
|
||||
generateRandomData(e.entry, 16);
|
||||
deterministicRandom()->randomBytes(e.entry, 16);
|
||||
state int sinceYield = 0;
|
||||
for (v = 1; v <= numEntries; ++v) {
|
||||
// Sometimes do a commit
|
||||
|
|
|
@ -1875,8 +1875,10 @@ int main(int argc, char* argv[]) {
|
|||
auto opts = CLIOptions::parseArgs(argc, argv);
|
||||
const auto role = opts.role;
|
||||
|
||||
if (role == ServerRole::Simulation)
|
||||
if (role == ServerRole::Simulation) {
|
||||
printf("Random seed is %u...\n", opts.randomSeed);
|
||||
bindDeterministicRandomToOpenssl();
|
||||
}
|
||||
|
||||
if (opts.zoneId.present())
|
||||
printf("ZoneId set to %s, dcId to %s\n", printable(opts.zoneId).c_str(), printable(opts.dcId).c_str());
|
||||
|
|
|
@ -31,10 +31,13 @@
|
|||
#include "fdbclient/CommitTransaction.h"
|
||||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbclient/Tenant.h"
|
||||
|
||||
#include "fdbserver/ServerDBInfo.h"
|
||||
#include "flow/actorcompiler.h" // has to be last include
|
||||
|
||||
#include "flow/flow.h"
|
||||
|
||||
#include "flow/actorcompiler.h" // has to be last include
|
||||
|
||||
struct GranuleHistory {
|
||||
KeyRange range;
|
||||
Version version;
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include "fdbserver/Knobs.h"
|
||||
#include "fdbserver/LogSystem.h"
|
||||
#include "fdbserver/MoveKeys.actor.h"
|
||||
#include "fdbserver/ShardsAffectedByTeamFailure.h"
|
||||
#include <boost/heap/policies.hpp>
|
||||
#include <boost/heap/skew_heap.hpp>
|
||||
|
||||
|
@ -290,86 +291,6 @@ struct TeamCollectionInterface {
|
|||
PromiseStream<GetTeamRequest> getTeam;
|
||||
};
|
||||
|
||||
class ShardsAffectedByTeamFailure : public ReferenceCounted<ShardsAffectedByTeamFailure> {
|
||||
public:
|
||||
ShardsAffectedByTeamFailure() {}
|
||||
|
||||
enum class CheckMode { Normal = 0, ForceCheck, ForceNoCheck };
|
||||
struct Team {
|
||||
std::vector<UID> servers; // sorted
|
||||
bool primary;
|
||||
|
||||
Team() : primary(true) {}
|
||||
Team(std::vector<UID> const& servers, bool primary) : servers(servers), primary(primary) {}
|
||||
|
||||
bool operator<(const Team& r) const {
|
||||
if (servers == r.servers)
|
||||
return primary < r.primary;
|
||||
return servers < r.servers;
|
||||
}
|
||||
bool operator>(const Team& r) const { return r < *this; }
|
||||
bool operator<=(const Team& r) const { return !(*this > r); }
|
||||
bool operator>=(const Team& r) const { return !(*this < r); }
|
||||
bool operator==(const Team& r) const { return servers == r.servers && primary == r.primary; }
|
||||
bool operator!=(const Team& r) const { return !(*this == r); }
|
||||
|
||||
std::string toString() const { return describe(servers); };
|
||||
};
|
||||
|
||||
// This tracks the data distribution on the data distribution server so that teamTrackers can
|
||||
// relocate the right shards when a team is degraded.
|
||||
|
||||
// The following are important to make sure that failure responses don't revert splits or merges:
|
||||
// - The shards boundaries in the two data structures reflect "queued" RelocateShard requests
|
||||
// (i.e. reflects the desired set of shards being tracked by dataDistributionTracker,
|
||||
// rather than the status quo). These boundaries are modified in defineShard and the content
|
||||
// of what servers correspond to each shard is a copy or union of the shards already there
|
||||
// - The teams associated with each shard reflect either the sources for non-moving shards
|
||||
// or the destination team for in-flight shards (the change is atomic with respect to team selection).
|
||||
// moveShard() changes the servers associated with a shard and will never adjust the shard
|
||||
// boundaries. If a move is received for a shard that has been redefined (the exact shard is
|
||||
// no longer in the map), the servers will be set for all contained shards and added to all
|
||||
// intersecting shards.
|
||||
|
||||
int getNumberOfShards(UID ssID) const;
|
||||
std::vector<KeyRange> getShardsFor(Team team) const;
|
||||
bool hasShards(Team team) const;
|
||||
|
||||
// The first element of the pair is either the source for non-moving shards or the destination team for in-flight
|
||||
// shards The second element of the pair is all previous sources for in-flight shards
|
||||
std::pair<std::vector<Team>, std::vector<Team>> getTeamsFor(KeyRangeRef keys);
|
||||
|
||||
void defineShard(KeyRangeRef keys);
|
||||
void moveShard(KeyRangeRef keys, std::vector<Team> destinationTeam);
|
||||
void finishMove(KeyRangeRef keys);
|
||||
void check() const;
|
||||
|
||||
void setCheckMode(CheckMode);
|
||||
|
||||
PromiseStream<KeyRange> restartShardTracker;
|
||||
|
||||
private:
|
||||
struct OrderByTeamKey {
|
||||
bool operator()(const std::pair<Team, KeyRange>& lhs, const std::pair<Team, KeyRange>& rhs) const {
|
||||
if (lhs.first < rhs.first)
|
||||
return true;
|
||||
if (lhs.first > rhs.first)
|
||||
return false;
|
||||
return lhs.second.begin < rhs.second.begin;
|
||||
}
|
||||
};
|
||||
|
||||
CheckMode checkMode = CheckMode::Normal;
|
||||
KeyRangeMap<std::pair<std::vector<Team>, std::vector<Team>>>
|
||||
shard_teams; // A shard can be affected by the failure of multiple teams if it is a queued merge, or when
|
||||
// usable_regions > 1
|
||||
std::set<std::pair<Team, KeyRange>, OrderByTeamKey> team_shards;
|
||||
std::map<UID, int> storageServerShards;
|
||||
|
||||
void erase(Team team, KeyRange const& range);
|
||||
void insert(Team team, KeyRange const& range);
|
||||
};
|
||||
|
||||
// DDShardInfo is so named to avoid link-time name collision with ShardInfo within the StorageServer
|
||||
struct DDShardInfo {
|
||||
Key key;
|
||||
|
|
|
@ -34,40 +34,6 @@
|
|||
#define deltatree_printf(...)
|
||||
#endif
|
||||
|
||||
typedef uint64_t Word;
|
||||
// Get the number of prefix bytes that are the same between a and b, up to their common length of cl
|
||||
static inline int commonPrefixLength(uint8_t const* ap, uint8_t const* bp, int cl) {
|
||||
int i = 0;
|
||||
const int wordEnd = cl - sizeof(Word) + 1;
|
||||
|
||||
for (; i < wordEnd; i += sizeof(Word)) {
|
||||
Word a = *(Word*)ap;
|
||||
Word b = *(Word*)bp;
|
||||
if (a != b) {
|
||||
return i + ctzll(a ^ b) / 8;
|
||||
}
|
||||
ap += sizeof(Word);
|
||||
bp += sizeof(Word);
|
||||
}
|
||||
|
||||
for (; i < cl; i++) {
|
||||
if (*ap != *bp) {
|
||||
return i;
|
||||
}
|
||||
++ap;
|
||||
++bp;
|
||||
}
|
||||
return cl;
|
||||
}
|
||||
|
||||
static inline int commonPrefixLength(const StringRef& a, const StringRef& b) {
|
||||
return commonPrefixLength(a.begin(), b.begin(), std::min(a.size(), b.size()));
|
||||
}
|
||||
|
||||
static inline int commonPrefixLength(const StringRef& a, const StringRef& b, int skipLen) {
|
||||
return commonPrefixLength(a.begin() + skipLen, b.begin() + skipLen, std::min(a.size(), b.size()) - skipLen);
|
||||
}
|
||||
|
||||
// This appears to be the fastest version
|
||||
static int lessOrEqualPowerOfTwo(int n) {
|
||||
int p;
|
||||
|
|
|
@ -66,7 +66,7 @@ struct EncryptedMutationMessage {
|
|||
ASSERT(textCipherItr != cipherKeys.end() && textCipherItr->second.isValid());
|
||||
ASSERT(headerCipherItr != cipherKeys.end() && headerCipherItr->second.isValid());
|
||||
uint8_t iv[AES_256_IV_LENGTH];
|
||||
generateRandomData(iv, AES_256_IV_LENGTH);
|
||||
deterministicRandom()->randomBytes(iv, AES_256_IV_LENGTH);
|
||||
BinaryWriter bw(AssumeVersion(g_network->protocolVersion()));
|
||||
bw << mutation;
|
||||
EncryptedMutationMessage encrypted_mutation;
|
||||
|
@ -116,4 +116,4 @@ struct EncryptedMutationMessage {
|
|||
return mutation;
|
||||
}
|
||||
};
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -83,13 +83,15 @@ struct ChangeCoordinatorsRequest {
|
|||
constexpr static FileIdentifier file_identifier = 13605416;
|
||||
Standalone<StringRef> newConnectionString;
|
||||
ReplyPromise<Void> reply; // normally throws even on success!
|
||||
UID masterId;
|
||||
|
||||
ChangeCoordinatorsRequest() {}
|
||||
ChangeCoordinatorsRequest(Standalone<StringRef> newConnectionString) : newConnectionString(newConnectionString) {}
|
||||
ChangeCoordinatorsRequest(Standalone<StringRef> newConnectionString, UID masterId)
|
||||
: newConnectionString(newConnectionString), masterId(masterId) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, newConnectionString, reply);
|
||||
serializer(ar, newConnectionString, reply, masterId);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -0,0 +1,108 @@
|
|||
/*
|
||||
* ShardsAffectedByTeamFailure.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef FOUNDATIONDB_SHARDSAFFECTEDBYTEAMFAILURE_H
|
||||
#define FOUNDATIONDB_SHARDSAFFECTEDBYTEAMFAILURE_H
|
||||
|
||||
#include "flow/FastRef.h"
|
||||
#include "flow/IRandom.h"
|
||||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbclient/KeyRangeMap.h"
|
||||
|
||||
class ShardsAffectedByTeamFailure : public ReferenceCounted<ShardsAffectedByTeamFailure> {
|
||||
public:
|
||||
ShardsAffectedByTeamFailure() {}
|
||||
|
||||
enum class CheckMode { Normal = 0, ForceCheck, ForceNoCheck };
|
||||
struct Team {
|
||||
std::vector<UID> servers; // sorted
|
||||
bool primary;
|
||||
|
||||
Team() : primary(true) {}
|
||||
Team(std::vector<UID> const& servers, bool primary) : servers(servers), primary(primary) {}
|
||||
|
||||
bool operator<(const Team& r) const {
|
||||
if (servers == r.servers)
|
||||
return primary < r.primary;
|
||||
return servers < r.servers;
|
||||
}
|
||||
bool operator>(const Team& r) const { return r < *this; }
|
||||
bool operator<=(const Team& r) const { return !(*this > r); }
|
||||
bool operator>=(const Team& r) const { return !(*this < r); }
|
||||
bool operator==(const Team& r) const { return servers == r.servers && primary == r.primary; }
|
||||
bool operator!=(const Team& r) const { return !(*this == r); }
|
||||
|
||||
std::string toString() const { return describe(servers); };
|
||||
};
|
||||
|
||||
// This tracks the data distribution on the data distribution server so that teamTrackers can
|
||||
// relocate the right shards when a team is degraded.
|
||||
|
||||
// The following are important to make sure that failure responses don't revert splits or merges:
|
||||
// - The shards boundaries in the two data structures reflect "queued" RelocateShard requests
|
||||
// (i.e. reflects the desired set of shards being tracked by dataDistributionTracker,
|
||||
// rather than the status quo). These boundaries are modified in defineShard and the content
|
||||
// of what servers correspond to each shard is a copy or union of the shards already there
|
||||
// - The teams associated with each shard reflect either the sources for non-moving shards
|
||||
// or the destination team for in-flight shards (the change is atomic with respect to team selection).
|
||||
// moveShard() changes the servers associated with a shard and will never adjust the shard
|
||||
// boundaries. If a move is received for a shard that has been redefined (the exact shard is
|
||||
// no longer in the map), the servers will be set for all contained shards and added to all
|
||||
// intersecting shards.
|
||||
|
||||
int getNumberOfShards(UID ssID) const;
|
||||
std::vector<KeyRange> getShardsFor(Team team) const;
|
||||
bool hasShards(Team team) const;
|
||||
|
||||
// The first element of the pair is either the source for non-moving shards or the destination team for in-flight
|
||||
// shards The second element of the pair is all previous sources for in-flight shards
|
||||
std::pair<std::vector<Team>, std::vector<Team>> getTeamsFor(KeyRangeRef keys);
|
||||
|
||||
void defineShard(KeyRangeRef keys);
|
||||
void moveShard(KeyRangeRef keys, std::vector<Team> destinationTeam);
|
||||
void finishMove(KeyRangeRef keys);
|
||||
void check() const;
|
||||
|
||||
void setCheckMode(CheckMode);
|
||||
|
||||
PromiseStream<KeyRange> restartShardTracker;
|
||||
|
||||
private:
|
||||
struct OrderByTeamKey {
|
||||
bool operator()(const std::pair<Team, KeyRange>& lhs, const std::pair<Team, KeyRange>& rhs) const {
|
||||
if (lhs.first < rhs.first)
|
||||
return true;
|
||||
if (lhs.first > rhs.first)
|
||||
return false;
|
||||
return lhs.second.begin < rhs.second.begin;
|
||||
}
|
||||
};
|
||||
|
||||
CheckMode checkMode = CheckMode::Normal;
|
||||
KeyRangeMap<std::pair<std::vector<Team>, std::vector<Team>>>
|
||||
shard_teams; // A shard can be affected by the failure of multiple teams if it is a queued merge, or when
|
||||
// usable_regions > 1
|
||||
std::set<std::pair<Team, KeyRange>, OrderByTeamKey> team_shards;
|
||||
std::map<UID, int> storageServerShards;
|
||||
|
||||
void erase(Team team, KeyRange const& range);
|
||||
void insert(Team team, KeyRange const& range);
|
||||
};
|
||||
|
||||
#endif // FOUNDATIONDB_SHARDSAFFECTEDBYTEAMFAILURE_H
|
|
@ -39,10 +39,29 @@
|
|||
#include "fdbrpc/simulator.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
template <class T>
|
||||
struct sfinae_true : std::true_type {};
|
||||
|
||||
template <class T>
|
||||
auto testAuthToken(int) -> sfinae_true<decltype(std::declval<T>().getAuthToken())>;
|
||||
template <class>
|
||||
auto testAuthToken(long) -> std::false_type;
|
||||
|
||||
template <class T>
|
||||
struct hasAuthToken : decltype(testAuthToken<T>(0)) {};
|
||||
|
||||
template <class T>
|
||||
void setAuthToken(T const& self, Transaction& tr) {
|
||||
if constexpr (hasAuthToken<T>::value) {
|
||||
tr.setOption(FDBTransactionOptions::AUTHORIZATION_TOKEN, self.getAuthToken());
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR template <class T>
|
||||
Future<bool> checkRangeSimpleValueSize(Database cx, T* workload, uint64_t begin, uint64_t end) {
|
||||
loop {
|
||||
state Transaction tr(cx);
|
||||
setAuthToken(*workload, tr);
|
||||
try {
|
||||
state Standalone<KeyValueRef> firstKV = (*workload)(begin);
|
||||
state Standalone<KeyValueRef> lastKV = (*workload)(end - 1);
|
||||
|
@ -63,6 +82,7 @@ Future<uint64_t> setupRange(Database cx, T* workload, uint64_t begin, uint64_t e
|
|||
state uint64_t bytesInserted = 0;
|
||||
loop {
|
||||
state Transaction tr(cx);
|
||||
setAuthToken(*workload, tr);
|
||||
try {
|
||||
// if( deterministicRandom()->random01() < 0.001 )
|
||||
// tr.debugTransaction( deterministicRandom()->randomUniqueID() );
|
||||
|
@ -128,6 +148,7 @@ Future<uint64_t> setupRangeWorker(Database cx,
|
|||
|
||||
if (keysLoaded - lastStoredKeysLoaded >= keySaveIncrement || jobs->size() == 0) {
|
||||
state Transaction tr(cx);
|
||||
setAuthToken(*workload, tr);
|
||||
try {
|
||||
std::string countKey = format("keycount|%d|%d", workload->clientId, actorId);
|
||||
std::string bytesKey = format("bytesstored|%d|%d", workload->clientId, actorId);
|
||||
|
|
|
@ -3222,7 +3222,7 @@ ACTOR Future<GetValueReqAndResultRef> quickGetValue(StorageServer* data,
|
|||
|
||||
++data->counters.quickGetValueMiss;
|
||||
if (SERVER_KNOBS->QUICK_GET_VALUE_FALLBACK) {
|
||||
state Transaction tr(data->cx, pOriginalReq->tenantInfo.name);
|
||||
state Transaction tr(data->cx, pOriginalReq->tenantInfo.name.castTo<TenantName>());
|
||||
tr.setVersion(version);
|
||||
// TODO: is DefaultPromiseEndpoint the best priority for this?
|
||||
tr.trState->taskID = TaskPriority::DefaultPromiseEndpoint;
|
||||
|
@ -3818,6 +3818,9 @@ ACTOR Future<GetRangeReqAndResultRef> quickGetKeyValues(
|
|||
state double getValuesStart = g_network->timer();
|
||||
getRange.begin = firstGreaterOrEqual(KeyRef(*a, prefix));
|
||||
getRange.end = firstGreaterOrEqual(strinc(prefix, *a));
|
||||
if (pOriginalReq->debugID.present())
|
||||
g_traceBatch.addEvent(
|
||||
"TransactionDebug", pOriginalReq->debugID.get().first(), "storageserver.quickGetKeyValues.Before");
|
||||
try {
|
||||
// TODO: Use a lower level API may be better? Or tweak priorities?
|
||||
GetKeyValuesRequest req;
|
||||
|
@ -3848,6 +3851,10 @@ ACTOR Future<GetRangeReqAndResultRef> quickGetKeyValues(
|
|||
getRange.result = RangeResultRef(reply.data, reply.more);
|
||||
const double duration = g_network->timer() - getValuesStart;
|
||||
data->counters.mappedRangeLocalSample.addMeasurement(duration);
|
||||
if (pOriginalReq->debugID.present())
|
||||
g_traceBatch.addEvent("TransactionDebug",
|
||||
pOriginalReq->debugID.get().first(),
|
||||
"storageserver.quickGetKeyValues.AfterLocalFetch");
|
||||
return getRange;
|
||||
}
|
||||
// Otherwise fallback.
|
||||
|
@ -3857,8 +3864,11 @@ ACTOR Future<GetRangeReqAndResultRef> quickGetKeyValues(
|
|||
|
||||
++data->counters.quickGetKeyValuesMiss;
|
||||
if (SERVER_KNOBS->QUICK_GET_KEY_VALUES_FALLBACK) {
|
||||
state Transaction tr(data->cx, pOriginalReq->tenantInfo.name);
|
||||
state Transaction tr(data->cx, pOriginalReq->tenantInfo.name.castTo<TenantName>());
|
||||
tr.setVersion(version);
|
||||
if (pOriginalReq->debugID.present()) {
|
||||
tr.debugTransaction(pOriginalReq->debugID.get());
|
||||
}
|
||||
// TODO: is DefaultPromiseEndpoint the best priority for this?
|
||||
tr.trState->taskID = TaskPriority::DefaultPromiseEndpoint;
|
||||
Future<RangeResult> rangeResultFuture =
|
||||
|
@ -3869,6 +3879,10 @@ ACTOR Future<GetRangeReqAndResultRef> quickGetKeyValues(
|
|||
getRange.result = rangeResult;
|
||||
const double duration = g_network->timer() - getValuesStart;
|
||||
data->counters.mappedRangeRemoteSample.addMeasurement(duration);
|
||||
if (pOriginalReq->debugID.present())
|
||||
g_traceBatch.addEvent("TransactionDebug",
|
||||
pOriginalReq->debugID.get().first(),
|
||||
"storageserver.quickGetKeyValues.AfterRemoteFetch");
|
||||
return getRange;
|
||||
} else {
|
||||
throw quick_get_key_values_miss();
|
||||
|
@ -4156,7 +4170,9 @@ ACTOR Future<GetMappedKeyValuesReply> mapKeyValues(StorageServer* data,
|
|||
result.arena.dependsOn(input.arena);
|
||||
|
||||
result.data.reserve(result.arena, input.data.size());
|
||||
|
||||
if (pOriginalReq->debugID.present())
|
||||
g_traceBatch.addEvent(
|
||||
"TransactionDebug", pOriginalReq->debugID.get().first(), "storageserver.mapKeyValues.Start");
|
||||
state Tuple mappedKeyFormatTuple;
|
||||
state Tuple mappedKeyTuple;
|
||||
|
||||
|
@ -4175,6 +4191,9 @@ ACTOR Future<GetMappedKeyValuesReply> mapKeyValues(StorageServer* data,
|
|||
state std::vector<MappedKeyValueRef> kvms(k);
|
||||
state std::vector<Future<Void>> subqueries;
|
||||
state int offset = 0;
|
||||
if (pOriginalReq->debugID.present())
|
||||
g_traceBatch.addEvent(
|
||||
"TransactionDebug", pOriginalReq->debugID.get().first(), "storageserver.mapKeyValues.BeforeLoop");
|
||||
for (; offset < sz; offset += SERVER_KNOBS->MAX_PARALLEL_QUICK_GET_VALUE) {
|
||||
// Divide into batches of MAX_PARALLEL_QUICK_GET_VALUE subqueries
|
||||
for (int i = 0; i + offset < sz && i < SERVER_KNOBS->MAX_PARALLEL_QUICK_GET_VALUE; i++) {
|
||||
|
@ -4210,11 +4229,17 @@ ACTOR Future<GetMappedKeyValuesReply> mapKeyValues(StorageServer* data,
|
|||
mappedKey));
|
||||
}
|
||||
wait(waitForAll(subqueries));
|
||||
if (pOriginalReq->debugID.present())
|
||||
g_traceBatch.addEvent(
|
||||
"TransactionDebug", pOriginalReq->debugID.get().first(), "storageserver.mapKeyValues.AfterBatch");
|
||||
subqueries.clear();
|
||||
for (int i = 0; i + offset < sz && i < SERVER_KNOBS->MAX_PARALLEL_QUICK_GET_VALUE; i++) {
|
||||
result.data.push_back(result.arena, kvms[i]);
|
||||
}
|
||||
}
|
||||
if (pOriginalReq->debugID.present())
|
||||
g_traceBatch.addEvent(
|
||||
"TransactionDebug", pOriginalReq->debugID.get().first(), "storageserver.mapKeyValues.AfterAll");
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -4240,11 +4265,11 @@ bool rangeIntersectsAnyTenant(TenantPrefixIndex& prefixIndex, KeyRangeRef range,
|
|||
|
||||
TEST_CASE("/fdbserver/storageserver/rangeIntersectsAnyTenant") {
|
||||
std::map<TenantName, TenantMapEntry> entries = {
|
||||
std::make_pair("tenant0"_sr, TenantMapEntry(0, TenantState::READY)),
|
||||
std::make_pair("tenant2"_sr, TenantMapEntry(2, TenantState::READY)),
|
||||
std::make_pair("tenant3"_sr, TenantMapEntry(3, TenantState::READY)),
|
||||
std::make_pair("tenant4"_sr, TenantMapEntry(4, TenantState::READY)),
|
||||
std::make_pair("tenant6"_sr, TenantMapEntry(6, TenantState::READY))
|
||||
std::make_pair("tenant0"_sr, TenantMapEntry(0, TenantState::READY, SERVER_KNOBS->ENABLE_ENCRYPTION)),
|
||||
std::make_pair("tenant2"_sr, TenantMapEntry(2, TenantState::READY, SERVER_KNOBS->ENABLE_ENCRYPTION)),
|
||||
std::make_pair("tenant3"_sr, TenantMapEntry(3, TenantState::READY, SERVER_KNOBS->ENABLE_ENCRYPTION)),
|
||||
std::make_pair("tenant4"_sr, TenantMapEntry(4, TenantState::READY, SERVER_KNOBS->ENABLE_ENCRYPTION)),
|
||||
std::make_pair("tenant6"_sr, TenantMapEntry(6, TenantState::READY, SERVER_KNOBS->ENABLE_ENCRYPTION))
|
||||
};
|
||||
TenantPrefixIndex index;
|
||||
index.createNewVersion(1);
|
||||
|
@ -7084,6 +7109,10 @@ void changeServerKeysWithPhysicalShards(StorageServer* data,
|
|||
for (int i = 0; i < ranges.size(); i++) {
|
||||
const Reference<ShardInfo> currentShard = ranges[i].value;
|
||||
const KeyRangeRef currentRange = static_cast<KeyRangeRef>(ranges[i]);
|
||||
if (currentShard.isValid()) {
|
||||
TraceEvent(SevVerbose, "OverlappingPhysicalShard", data->thisServerID)
|
||||
.detail("PhysicalShard", currentShard->toStorageServerShard().toString());
|
||||
}
|
||||
if (!currentShard.isValid()) {
|
||||
ASSERT(currentRange == keys); // there shouldn't be any nulls except for the range being inserted
|
||||
} else if (currentShard->notAssigned()) {
|
||||
|
@ -7105,7 +7134,7 @@ void changeServerKeysWithPhysicalShards(StorageServer* data,
|
|||
.detail("NowAssigned", nowAssigned)
|
||||
.detail("Version", cVer)
|
||||
.detail("ResultingShard", newShard.toString());
|
||||
} else if (ranges[i].value->adding) {
|
||||
} else if (currentShard->adding) {
|
||||
ASSERT(!nowAssigned);
|
||||
StorageServerShard newShard = currentShard->toStorageServerShard();
|
||||
newShard.range = currentRange;
|
||||
|
|
|
@ -710,6 +710,7 @@ ACTOR Future<Void> testerServerWorkload(WorkloadRequest work,
|
|||
|
||||
endRole(Role::TESTER, workIface.id(), "Complete");
|
||||
} catch (Error& e) {
|
||||
TraceEvent(SevDebug, "TesterWorkloadFailed").errorUnsuppressed(e);
|
||||
if (!replied) {
|
||||
if (e.code() == error_code_test_specification_invalid)
|
||||
work.reply.sendError(e);
|
||||
|
@ -1639,6 +1640,7 @@ ACTOR Future<Void> runTests(Reference<AsyncVar<Optional<struct ClusterController
|
|||
if (deterministicRandom()->coinflip()) {
|
||||
entry.tenantGroup = "TestTenantGroup"_sr;
|
||||
}
|
||||
entry.encrypted = SERVER_KNOBS->ENABLE_ENCRYPTION;
|
||||
TraceEvent("CreatingTenant").detail("Tenant", tenant).detail("TenantGroup", entry.tenantGroup);
|
||||
tenantFutures.push_back(success(TenantAPI::createTenant(cx.getReference(), tenant, entry)));
|
||||
}
|
||||
|
|
|
@ -377,7 +377,7 @@ struct AsyncFileCorrectnessWorkload : public AsyncFileWorkload {
|
|||
}
|
||||
} else if (info.operation == WRITE) {
|
||||
info.data = self->allocateBuffer(info.length);
|
||||
generateRandomData(reinterpret_cast<uint8_t*>(info.data->buffer), info.length);
|
||||
deterministicRandom()->randomBytes(reinterpret_cast<uint8_t*>(info.data->buffer), info.length);
|
||||
memcpy(&self->memoryFile->buffer[info.offset], info.data->buffer, info.length);
|
||||
memset(&self->fileValidityMask[info.offset], 0xFF, info.length);
|
||||
|
||||
|
|
|
@ -248,7 +248,7 @@ ACTOR Future<Void> liveReader(Database cx, Reference<FeedTestData> data, Version
|
|||
buffered.pop_front();
|
||||
}
|
||||
if (buffered.empty()) {
|
||||
if (data->poppingVersion < data->pendingCheck.front().first) {
|
||||
if (data->poppingVersion < data->pendingCheck.front().first && !data->destroying) {
|
||||
fmt::print("DBG) {0} Buffered empty after ready for check, and data not popped! popped "
|
||||
"{1}, popping {2}, check {3}\n",
|
||||
data->key.printable(),
|
||||
|
@ -256,7 +256,7 @@ ACTOR Future<Void> liveReader(Database cx, Reference<FeedTestData> data, Version
|
|||
data->poppingVersion,
|
||||
data->pendingCheck.front().first);
|
||||
}
|
||||
ASSERT(data->poppingVersion >= data->pendingCheck.front().first);
|
||||
ASSERT(data->poppingVersion >= data->pendingCheck.front().first || data->destroying);
|
||||
data->pendingCheck.pop_front();
|
||||
} else {
|
||||
Version v = buffered.front().version;
|
||||
|
@ -694,6 +694,14 @@ struct ChangeFeedOperationsWorkload : TestWorkload {
|
|||
state Transaction tr(cx);
|
||||
state Optional<Value> updateValue;
|
||||
|
||||
// FIXME: right now there is technically a bug in the change feed contract (mutations can appear in the stream
|
||||
// at a higher version than the stop version) But because stopping a feed is sort of just an optimization, and
|
||||
// no current user of change feeds currently relies on the stop version for correctness, it's fine to not test
|
||||
// this for now
|
||||
if (feedData->stopVersion.present()) {
|
||||
return Void();
|
||||
}
|
||||
|
||||
// if value is already not set, don't do a clear, otherwise pick either
|
||||
if (feedData->lastCleared || deterministicRandom()->random01() > self->clearFrequency) {
|
||||
updateValue = feedData->nextValue();
|
||||
|
|
|
@ -119,6 +119,7 @@ public:
|
|||
|
||||
struct WorkloadProcess {
|
||||
WorkloadProcessState* processState;
|
||||
WorkloadContext childWorkloadContext;
|
||||
UID id = deterministicRandom()->randomUniqueID();
|
||||
Database cx;
|
||||
Future<Void> databaseOpened;
|
||||
|
@ -166,36 +167,56 @@ struct WorkloadProcess {
|
|||
WorkloadProcess(ClientWorkload::CreateWorkload const& childCreator, WorkloadContext const& wcx)
|
||||
: processState(WorkloadProcessState::instance(wcx.clientId)) {
|
||||
TraceEvent("StartingClinetWorkload", id).detail("OnClientProcess", processState->id);
|
||||
databaseOpened = openDatabase(this, childCreator, wcx);
|
||||
childWorkloadContext.clientCount = wcx.clientCount;
|
||||
childWorkloadContext.clientId = wcx.clientId;
|
||||
childWorkloadContext.ccr = wcx.ccr;
|
||||
childWorkloadContext.options = wcx.options;
|
||||
childWorkloadContext.sharedRandomNumber = wcx.sharedRandomNumber;
|
||||
databaseOpened = openDatabase(this, childCreator, childWorkloadContext);
|
||||
}
|
||||
|
||||
ACTOR static void destroy(WorkloadProcess* self) {
|
||||
state ISimulator::ProcessInfo* parent = g_simulator.getCurrentProcess();
|
||||
wait(g_simulator.onProcess(self->childProcess(), TaskPriority::DefaultYield));
|
||||
TraceEvent("DeleteWorkloadProcess").backtrace();
|
||||
delete self;
|
||||
wait(g_simulator.onProcess(parent, TaskPriority::DefaultYield));
|
||||
}
|
||||
|
||||
std::string description() { return desc; }
|
||||
|
||||
// This actor will keep a reference to a future alive, switch to another process and then return. If the future
|
||||
// count of `f` is 1, this will cause the future to be destroyed in the process `process`
|
||||
ACTOR template <class T>
|
||||
static void cancelChild(ISimulator::ProcessInfo* process, Future<T> f) {
|
||||
wait(g_simulator.onProcess(process, TaskPriority::DefaultYield));
|
||||
}
|
||||
|
||||
ACTOR template <class Ret, class Fun>
|
||||
Future<Ret> runActor(WorkloadProcess* self, Optional<TenantName> defaultTenant, Fun f) {
|
||||
state Optional<Error> err;
|
||||
state Ret res;
|
||||
state Future<Ret> fut;
|
||||
state ISimulator::ProcessInfo* parent = g_simulator.getCurrentProcess();
|
||||
wait(self->databaseOpened);
|
||||
wait(g_simulator.onProcess(self->childProcess(), TaskPriority::DefaultYield));
|
||||
self->cx->defaultTenant = defaultTenant;
|
||||
try {
|
||||
Ret r = wait(f(self->cx));
|
||||
fut = f(self->cx);
|
||||
Ret r = wait(fut);
|
||||
res = r;
|
||||
} catch (Error& e) {
|
||||
// if we're getting cancelled, we could run in the scope of the parent process, but we're not allowed to
|
||||
// cancel `fut` in any other process than the child process. So we're going to pass the future to an
|
||||
// uncancellable actor (it has to be uncancellable because if we got cancelled here we can't wait on
|
||||
// anything) which will then destroy the future on the child process.
|
||||
cancelChild(self->childProcess(), fut);
|
||||
if (e.code() == error_code_actor_cancelled) {
|
||||
ASSERT(g_simulator.getCurrentProcess() == parent);
|
||||
throw;
|
||||
throw e;
|
||||
}
|
||||
err = e;
|
||||
}
|
||||
fut = Future<Ret>();
|
||||
wait(g_simulator.onProcess(parent, TaskPriority::DefaultYield));
|
||||
if (err.present()) {
|
||||
throw err.get();
|
||||
|
@ -208,6 +229,7 @@ ClientWorkload::ClientWorkload(CreateWorkload const& childCreator, WorkloadConte
|
|||
: TestWorkload(wcx), impl(new WorkloadProcess(childCreator, wcx)) {}
|
||||
|
||||
ClientWorkload::~ClientWorkload() {
|
||||
TraceEvent(SevDebug, "DestroyClientWorkload").backtrace();
|
||||
WorkloadProcess::destroy(impl);
|
||||
}
|
||||
|
||||
|
|
|
@ -885,16 +885,11 @@ struct ConsistencyCheckWorkload : TestWorkload {
|
|||
for (int i = 0; i < commitProxyInfo->size(); i++)
|
||||
keyServerLocationFutures.push_back(
|
||||
commitProxyInfo->get(i, &CommitProxyInterface::getKeyServersLocations)
|
||||
.getReplyUnlessFailedFor(GetKeyServerLocationsRequest(span.context,
|
||||
Optional<TenantNameRef>(),
|
||||
begin,
|
||||
end,
|
||||
limitKeyServers,
|
||||
false,
|
||||
latestVersion,
|
||||
Arena()),
|
||||
2,
|
||||
0));
|
||||
.getReplyUnlessFailedFor(
|
||||
GetKeyServerLocationsRequest(
|
||||
span.context, TenantInfo(), begin, end, limitKeyServers, false, latestVersion, Arena()),
|
||||
2,
|
||||
0));
|
||||
|
||||
state bool keyServersInsertedForThisIteration = false;
|
||||
choose {
|
||||
|
|
|
@ -0,0 +1,62 @@
|
|||
/*
|
||||
* CreateTenant.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
#include "fdbclient/TenantManagement.actor.h"
|
||||
#include "fdbserver/workloads/workloads.actor.h"
|
||||
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
struct CreateTenantWorkload : TestWorkload {
|
||||
TenantName tenant;
|
||||
|
||||
CreateTenantWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) {
|
||||
tenant = getOption(options, "name"_sr, "DefaultTenant"_sr);
|
||||
}
|
||||
|
||||
std::string description() const override { return "CreateTenant"; }
|
||||
Future<Void> setup(Database const& cx) override {
|
||||
if (clientId == 0) {
|
||||
return _setup(this, cx);
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<Void> start(Database const& cx) override { return Void(); }
|
||||
Future<bool> check(Database const& cx) override { return true; }
|
||||
virtual void getMetrics(std::vector<PerfMetric>& m) override {}
|
||||
|
||||
ACTOR static Future<Void> _setup(CreateTenantWorkload* self, Database db) {
|
||||
try {
|
||||
Optional<TenantMapEntry> entry = wait(TenantAPI::createTenant(db.getReference(), self->tenant));
|
||||
ASSERT(entry.present());
|
||||
} catch (Error& e) {
|
||||
TraceEvent(SevError, "TenantCreationFailed").error(e);
|
||||
if (e.code() == error_code_actor_cancelled) {
|
||||
throw;
|
||||
}
|
||||
ASSERT(false);
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
|
||||
WorkloadFactory<CreateTenantWorkload> CreateTenantWorkload("CreateTenant");
|
|
@ -20,19 +20,33 @@
|
|||
|
||||
#include <cstring>
|
||||
|
||||
#include "flow/Arena.h"
|
||||
#include "flow/IRandom.h"
|
||||
#include "flow/Trace.h"
|
||||
#include "flow/serialize.h"
|
||||
#include "fdbrpc/simulator.h"
|
||||
#include "fdbrpc/TokenSign.h"
|
||||
#include "fdbclient/FDBOptions.g.h"
|
||||
#include "fdbclient/NativeAPI.actor.h"
|
||||
#include "fdbserver/TesterInterface.actor.h"
|
||||
#include "fdbserver/workloads/workloads.actor.h"
|
||||
#include "fdbserver/workloads/BulkSetup.actor.h"
|
||||
#include "flow/Arena.h"
|
||||
#include "flow/IRandom.h"
|
||||
#include "flow/Trace.h"
|
||||
#include "flow/serialize.h"
|
||||
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
struct CycleWorkload : TestWorkload {
|
||||
template <bool MultiTenancy>
|
||||
struct CycleMembers {};
|
||||
|
||||
template <>
|
||||
struct CycleMembers<true> {
|
||||
Arena arena;
|
||||
TenantName tenant;
|
||||
authz::jwt::TokenRef token;
|
||||
StringRef signedToken;
|
||||
};
|
||||
|
||||
template <bool MultiTenancy>
|
||||
struct CycleWorkload : TestWorkload, CycleMembers<MultiTenancy> {
|
||||
int actorCount, nodeCount;
|
||||
double testDuration, transactionsPerSecond, minExpectedTransactionsPerSecond, traceParentProbability;
|
||||
Key keyPrefix;
|
||||
|
@ -51,17 +65,58 @@ struct CycleWorkload : TestWorkload {
|
|||
keyPrefix = unprintable(getOption(options, "keyPrefix"_sr, LiteralStringRef("")).toString());
|
||||
traceParentProbability = getOption(options, "traceParentProbability"_sr, 0.01);
|
||||
minExpectedTransactionsPerSecond = transactionsPerSecond * getOption(options, "expectedRate"_sr, 0.7);
|
||||
if constexpr (MultiTenancy) {
|
||||
ASSERT(g_network->isSimulated());
|
||||
auto k = g_simulator.authKeys.begin();
|
||||
this->tenant = getOption(options, "tenant"_sr, "CycleTenant"_sr);
|
||||
// make it comfortably longer than the timeout of the workload
|
||||
auto currentTime = uint64_t(lround(g_network->timer()));
|
||||
this->token.algorithm = authz::Algorithm::ES256;
|
||||
this->token.issuedAtUnixTime = currentTime;
|
||||
this->token.expiresAtUnixTime =
|
||||
currentTime + uint64_t(std::lround(getCheckTimeout())) + uint64_t(std::lround(testDuration)) + 100;
|
||||
this->token.keyId = k->first;
|
||||
this->token.notBeforeUnixTime = currentTime - 10;
|
||||
VectorRef<StringRef> tenants;
|
||||
tenants.push_back_deep(this->arena, this->tenant);
|
||||
this->token.tenants = tenants;
|
||||
// we currently don't support this workload to be run outside of simulation
|
||||
this->signedToken = authz::jwt::signToken(this->arena, this->token, k->second);
|
||||
}
|
||||
}
|
||||
|
||||
std::string description() const override { return "CycleWorkload"; }
|
||||
Future<Void> setup(Database const& cx) override { return bulkSetup(cx, this, nodeCount, Promise<double>()); }
|
||||
template <bool MT = MultiTenancy>
|
||||
std::enable_if_t<MT, StringRef> getAuthToken() const {
|
||||
return this->signedToken;
|
||||
}
|
||||
|
||||
std::string description() const override {
|
||||
if constexpr (MultiTenancy) {
|
||||
return "TenantCycleWorkload";
|
||||
} else {
|
||||
return "CycleWorkload";
|
||||
}
|
||||
}
|
||||
|
||||
Future<Void> setup(Database const& cx) override {
|
||||
if constexpr (MultiTenancy) {
|
||||
cx->defaultTenant = this->tenant;
|
||||
}
|
||||
return bulkSetup(cx, this, nodeCount, Promise<double>());
|
||||
}
|
||||
Future<Void> start(Database const& cx) override {
|
||||
if constexpr (MultiTenancy) {
|
||||
cx->defaultTenant = this->tenant;
|
||||
}
|
||||
for (int c = 0; c < actorCount; c++)
|
||||
clients.push_back(
|
||||
timeout(cycleClient(cx->clone(), this, actorCount / transactionsPerSecond), testDuration, Void()));
|
||||
return delay(testDuration);
|
||||
}
|
||||
Future<bool> check(Database const& cx) override {
|
||||
if constexpr (MultiTenancy) {
|
||||
cx->defaultTenant = this->tenant;
|
||||
}
|
||||
int errors = 0;
|
||||
for (int c = 0; c < clients.size(); c++)
|
||||
errors += clients[c].isError();
|
||||
|
@ -95,6 +150,14 @@ struct CycleWorkload : TestWorkload {
|
|||
.detailf("From", "%016llx", debug_lastLoadBalanceResultEndpointToken);
|
||||
}
|
||||
|
||||
template <bool B = MultiTenancy>
|
||||
std::enable_if_t<B> setAuthToken(Transaction& tr) {
|
||||
tr.setOption(FDBTransactionOptions::AUTHORIZATION_TOKEN, this->signedToken);
|
||||
}
|
||||
|
||||
template <bool B = MultiTenancy>
|
||||
std::enable_if_t<!B> setAuthToken(Transaction& tr) {}
|
||||
|
||||
ACTOR Future<Void> cycleClient(Database cx, CycleWorkload* self, double delay) {
|
||||
state double lastTime = now();
|
||||
try {
|
||||
|
@ -104,6 +167,7 @@ struct CycleWorkload : TestWorkload {
|
|||
state double tstart = now();
|
||||
state int r = deterministicRandom()->randomInt(0, self->nodeCount);
|
||||
state Transaction tr(cx);
|
||||
self->setAuthToken(tr);
|
||||
if (deterministicRandom()->random01() <= self->traceParentProbability) {
|
||||
state Span span("CycleClient"_loc);
|
||||
TraceEvent("CycleTracingTransaction", span.context.traceID).log();
|
||||
|
@ -231,6 +295,7 @@ struct CycleWorkload : TestWorkload {
|
|||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
ACTOR Future<bool> cycleCheck(Database cx, CycleWorkload* self, bool ok) {
|
||||
if (self->transactions.getMetric().value() < self->testDuration * self->minExpectedTransactionsPerSecond) {
|
||||
TraceEvent(SevWarnAlways, "TestFailure")
|
||||
|
@ -249,6 +314,7 @@ struct CycleWorkload : TestWorkload {
|
|||
// One client checks the validity of the cycle
|
||||
state Transaction tr(cx);
|
||||
state int retryCount = 0;
|
||||
self->setAuthToken(tr);
|
||||
loop {
|
||||
try {
|
||||
state Version v = wait(tr.getReadVersion());
|
||||
|
@ -273,4 +339,5 @@ struct CycleWorkload : TestWorkload {
|
|||
}
|
||||
};
|
||||
|
||||
WorkloadFactory<CycleWorkload> CycleWorkloadFactory("Cycle", true);
|
||||
WorkloadFactory<CycleWorkload<false>> CycleWorkloadFactory("Cycle", false);
|
||||
WorkloadFactory<CycleWorkload<true>> TenantCycleWorkloadFactory("TenantCycle", true);
|
||||
|
|
|
@ -159,7 +159,7 @@ struct EncryptionOpsWorkload : TestWorkload {
|
|||
void generateRandomBaseCipher(const int maxLen, uint8_t* buff, int* retLen) {
|
||||
memset(buff, 0, maxLen);
|
||||
*retLen = deterministicRandom()->randomInt(maxLen / 2, maxLen);
|
||||
generateRandomData(buff, *retLen);
|
||||
deterministicRandom()->randomBytes(buff, *retLen);
|
||||
}
|
||||
|
||||
void setupCipherEssentials() {
|
||||
|
@ -247,7 +247,7 @@ struct EncryptionOpsWorkload : TestWorkload {
|
|||
const EncryptAuthTokenMode authMode,
|
||||
BlobCipherEncryptHeader* header) {
|
||||
uint8_t iv[AES_256_IV_LENGTH];
|
||||
generateRandomData(&iv[0], AES_256_IV_LENGTH);
|
||||
deterministicRandom()->randomBytes(&iv[0], AES_256_IV_LENGTH);
|
||||
EncryptBlobCipherAes265Ctr encryptor(textCipherKey, headerCipherKey, &iv[0], AES_256_IV_LENGTH, authMode);
|
||||
|
||||
auto start = std::chrono::high_resolution_clock::now();
|
||||
|
@ -341,7 +341,7 @@ struct EncryptionOpsWorkload : TestWorkload {
|
|||
}
|
||||
|
||||
int dataLen = isFixedSizePayload() ? pageSize : deterministicRandom()->randomInt(100, maxBufSize);
|
||||
generateRandomData(buff.get(), dataLen);
|
||||
deterministicRandom()->randomBytes(buff.get(), dataLen);
|
||||
|
||||
// Encrypt the payload - generates BlobCipherEncryptHeader to assist decryption later
|
||||
BlobCipherEncryptHeader header;
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <sstream>
|
||||
|
||||
#include "fdbclient/FDBOptions.g.h"
|
||||
#include "fdbserver/Knobs.h"
|
||||
#include "fdbserver/TesterInterface.actor.h"
|
||||
#include "fdbclient/GenericManagementAPI.actor.h"
|
||||
#include "fdbclient/TenantManagement.actor.h"
|
||||
|
@ -240,6 +241,7 @@ struct FuzzApiCorrectnessWorkload : TestWorkload {
|
|||
if (i < self->numTenants) {
|
||||
TenantMapEntry entry;
|
||||
entry.tenantGroup = self->getTenantGroup(i);
|
||||
entry.encrypted = SERVER_KNOBS->ENABLE_ENCRYPTION;
|
||||
tenantFutures.push_back(::success(TenantAPI::createTenant(cx.getReference(), tenantName, entry)));
|
||||
self->createdTenants.insert(tenantName);
|
||||
}
|
||||
|
|
|
@ -75,4 +75,4 @@ struct StorageQuotaWorkload : TestWorkload {
|
|||
}
|
||||
};
|
||||
|
||||
WorkloadFactory<StorageQuotaWorkload> StorageQuotaWorkloadFactory("StorageQuota", true);
|
||||
WorkloadFactory<StorageQuotaWorkload> StorageQuotaWorkloadFactory("StorageQuota");
|
||||
|
|
|
@ -40,10 +40,11 @@ struct TenantManagementWorkload : TestWorkload {
|
|||
int64_t id;
|
||||
Optional<TenantGroupName> tenantGroup;
|
||||
bool empty;
|
||||
bool encrypted;
|
||||
|
||||
TenantData() : id(-1), empty(true) {}
|
||||
TenantData(int64_t id, Optional<TenantGroupName> tenantGroup, bool empty)
|
||||
: id(id), tenantGroup(tenantGroup), empty(empty) {}
|
||||
TenantData(int64_t id, Optional<TenantGroupName> tenantGroup, bool empty, bool encrypted)
|
||||
: id(id), tenantGroup(tenantGroup), empty(empty), encrypted(encrypted) {}
|
||||
};
|
||||
|
||||
struct TenantGroupData {
|
||||
|
@ -209,6 +210,11 @@ struct TenantManagementWorkload : TestWorkload {
|
|||
|
||||
TenantMapEntry entry;
|
||||
entry.tenantGroup = self->chooseTenantGroup(true);
|
||||
if (operationType == OperationType::SPECIAL_KEYS) {
|
||||
entry.encrypted = SERVER_KNOBS->ENABLE_ENCRYPTION;
|
||||
} else {
|
||||
entry.encrypted = deterministicRandom()->coinflip();
|
||||
}
|
||||
|
||||
if (self->createdTenants.count(tenant)) {
|
||||
alreadyExists = true;
|
||||
|
@ -266,7 +272,7 @@ struct TenantManagementWorkload : TestWorkload {
|
|||
// Update our local tenant state to include the newly created one
|
||||
self->maxId = entry.get().id;
|
||||
self->createdTenants[tenantItr->first] =
|
||||
TenantData(entry.get().id, tenantItr->second.tenantGroup, true);
|
||||
TenantData(entry.get().id, tenantItr->second.tenantGroup, true, tenantItr->second.encrypted);
|
||||
|
||||
// If this tenant has a tenant group, create or update the entry for it
|
||||
if (tenantItr->second.tenantGroup.present()) {
|
||||
|
@ -582,10 +588,12 @@ struct TenantManagementWorkload : TestWorkload {
|
|||
std::string tenantStateStr;
|
||||
std::string base64TenantGroup;
|
||||
std::string printableTenantGroup;
|
||||
bool encrypted;
|
||||
|
||||
jsonDoc.get("id", id);
|
||||
jsonDoc.get("prefix.base64", base64Prefix);
|
||||
jsonDoc.get("prefix.printable", printablePrefix);
|
||||
jsonDoc.get("prefix.encrypted", encrypted);
|
||||
|
||||
prefix = base64::decoder::from_string(base64Prefix);
|
||||
ASSERT(prefix == unprintable(printablePrefix));
|
||||
|
@ -600,7 +608,7 @@ struct TenantManagementWorkload : TestWorkload {
|
|||
tenantGroup = TenantGroupNameRef(tenantGroupStr);
|
||||
}
|
||||
|
||||
TenantMapEntry entry(id, TenantState::READY, tenantGroup);
|
||||
TenantMapEntry entry(id, TenantState::READY, tenantGroup, encrypted);
|
||||
ASSERT(entry.prefix == prefix);
|
||||
return entry;
|
||||
}
|
||||
|
@ -1127,6 +1135,7 @@ struct TenantManagementWorkload : TestWorkload {
|
|||
ASSERT(localItr != self->createdTenants.end());
|
||||
ASSERT(dataItr->first == localItr->first);
|
||||
ASSERT(dataItr->second.tenantGroup == localItr->second.tenantGroup);
|
||||
ASSERT(dataItr->second.encrypted == localItr->second.encrypted);
|
||||
|
||||
checkTenants.push_back(checkTenantContents(cx, self, dataItr->first, localItr->second));
|
||||
lastTenant = dataItr->first;
|
||||
|
|
|
@ -389,7 +389,7 @@ EncryptBlobCipherAes265Ctr::EncryptBlobCipherAes265Ctr(Reference<BlobCipherKey>
|
|||
const EncryptAuthTokenMode mode)
|
||||
: ctx(EVP_CIPHER_CTX_new()), textCipherKey(tCipherKey), headerCipherKey(hCipherKey), authTokenMode(mode) {
|
||||
ASSERT(isEncryptHeaderAuthTokenModeValid(mode));
|
||||
generateRandomData(iv, AES_256_IV_LENGTH);
|
||||
deterministicRandom()->randomBytes(iv, AES_256_IV_LENGTH);
|
||||
init();
|
||||
}
|
||||
|
||||
|
@ -796,7 +796,7 @@ TEST_CASE("flow/BlobCipher") {
|
|||
BaseCipher(const EncryptCipherDomainId& dId, const EncryptCipherBaseKeyId& kId)
|
||||
: domainId(dId), len(deterministicRandom()->randomInt(AES_256_KEY_LENGTH / 2, AES_256_KEY_LENGTH + 1)),
|
||||
keyId(kId), key(std::make_unique<uint8_t[]>(len)) {
|
||||
generateRandomData(key.get(), len);
|
||||
deterministicRandom()->randomBytes(key.get(), len);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -899,11 +899,11 @@ TEST_CASE("flow/BlobCipher") {
|
|||
Reference<BlobCipherKey> headerCipherKey = cipherKeyCache->getLatestCipherKey(ENCRYPT_HEADER_DOMAIN_ID);
|
||||
const int bufLen = deterministicRandom()->randomInt(786, 2127) + 512;
|
||||
uint8_t orgData[bufLen];
|
||||
generateRandomData(&orgData[0], bufLen);
|
||||
deterministicRandom()->randomBytes(&orgData[0], bufLen);
|
||||
|
||||
Arena arena;
|
||||
uint8_t iv[AES_256_IV_LENGTH];
|
||||
generateRandomData(&iv[0], AES_256_IV_LENGTH);
|
||||
deterministicRandom()->randomBytes(&iv[0], AES_256_IV_LENGTH);
|
||||
|
||||
BlobCipherEncryptHeader headerCopy;
|
||||
// validate basic encrypt followed by decrypt operation for AUTH_MODE_NONE
|
||||
|
|
|
@ -98,7 +98,7 @@ TEST_CASE("/CompressionUtils/noCompression") {
|
|||
Arena arena;
|
||||
const int size = deterministicRandom()->randomInt(512, 1024);
|
||||
Standalone<StringRef> uncompressed = makeString(size);
|
||||
generateRandomData(mutateString(uncompressed), size);
|
||||
deterministicRandom()->randomBytes(mutateString(uncompressed), size);
|
||||
|
||||
Standalone<StringRef> compressed = CompressionUtils::compress(CompressionFilter::NONE, uncompressed, arena);
|
||||
ASSERT_EQ(compressed.compare(uncompressed), 0);
|
||||
|
@ -116,7 +116,7 @@ TEST_CASE("/CompressionUtils/gzipCompression") {
|
|||
Arena arena;
|
||||
const int size = deterministicRandom()->randomInt(512, 1024);
|
||||
Standalone<StringRef> uncompressed = makeString(size);
|
||||
generateRandomData(mutateString(uncompressed), size);
|
||||
deterministicRandom()->randomBytes(mutateString(uncompressed), size);
|
||||
|
||||
Standalone<StringRef> compressed = CompressionUtils::compress(CompressionFilter::GZIP, uncompressed, arena);
|
||||
ASSERT_NE(compressed.compare(uncompressed), 0);
|
||||
|
@ -128,4 +128,25 @@ TEST_CASE("/CompressionUtils/gzipCompression") {
|
|||
|
||||
return Void();
|
||||
}
|
||||
|
||||
TEST_CASE("/CompressionUtils/gzipCompression2") {
|
||||
Arena arena;
|
||||
const int size = deterministicRandom()->randomInt(512, 1024);
|
||||
std::string s(size, 'x');
|
||||
Standalone<StringRef> uncompressed = Standalone<StringRef>(StringRef(s));
|
||||
printf("Size before: %d\n", (int)uncompressed.size());
|
||||
|
||||
Standalone<StringRef> compressed = CompressionUtils::compress(CompressionFilter::GZIP, uncompressed, arena);
|
||||
ASSERT_NE(compressed.compare(uncompressed), 0);
|
||||
printf("Size after: %d\n", (int)compressed.size());
|
||||
// Assert compressed size is less than half.
|
||||
ASSERT(compressed.size() * 2 < uncompressed.size());
|
||||
|
||||
StringRef verify = CompressionUtils::decompress(CompressionFilter::GZIP, compressed, arena);
|
||||
ASSERT_EQ(verify.compare(uncompressed), 0);
|
||||
|
||||
TraceEvent("GzipCompression_Done").log();
|
||||
|
||||
return Void();
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
*/
|
||||
|
||||
#include "fmt/format.h"
|
||||
#include "flow/Arena.h"
|
||||
#include "flow/DeterministicRandom.h"
|
||||
|
||||
#include <cstring>
|
||||
|
@ -124,6 +125,23 @@ std::string DeterministicRandom::randomAlphaNumeric(int length) {
|
|||
return s;
|
||||
}
|
||||
|
||||
void DeterministicRandom::randomBytes(uint8_t* buf, int length) {
|
||||
constexpr const int unitLen = sizeof(decltype(gen64()));
|
||||
for (int i = 0; i < length; i += unitLen) {
|
||||
auto val = gen64();
|
||||
memcpy(buf + i, &val, std::min(unitLen, length - i));
|
||||
}
|
||||
if (randLog && useRandLog) {
|
||||
constexpr const int cutOff = 32;
|
||||
bool tooLong = length > cutOff;
|
||||
fmt::print(randLog,
|
||||
"Rbytes[{}] {}{}\n",
|
||||
length,
|
||||
StringRef(buf, std::min(cutOff, length)).printable(),
|
||||
tooLong ? "..." : "");
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t DeterministicRandom::peek() const {
|
||||
return next;
|
||||
}
|
||||
|
@ -134,10 +152,3 @@ void DeterministicRandom::addref() {
|
|||
void DeterministicRandom::delref() {
|
||||
ReferenceCounted<DeterministicRandom>::delref();
|
||||
}
|
||||
|
||||
void generateRandomData(uint8_t* buffer, int length) {
|
||||
for (int i = 0; i < length; i += sizeof(uint32_t)) {
|
||||
uint32_t val = deterministicRandom()->randomUInt32();
|
||||
memcpy(&buffer[i], &val, std::min(length - i, (int)sizeof(uint32_t)));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ ACTOR Future<Optional<NetworkAddress>> resolveImpl(Hostname* self) {
|
|||
try {
|
||||
std::vector<NetworkAddress> addresses =
|
||||
wait(INetworkConnections::net()->resolveTCPEndpointWithDNSCache(self->host, self->service));
|
||||
NetworkAddress address = addresses[deterministicRandom()->randomInt(0, addresses.size())];
|
||||
NetworkAddress address = INetworkConnections::pickOneAddress(addresses);
|
||||
address.flags = 0; // Reset the parsed address to public
|
||||
address.fromHostname = NetworkAddressFromHostname::True;
|
||||
if (self->isTLS) {
|
||||
|
@ -84,7 +84,7 @@ Optional<NetworkAddress> Hostname::resolveBlocking() {
|
|||
try {
|
||||
std::vector<NetworkAddress> addresses =
|
||||
INetworkConnections::net()->resolveTCPEndpointBlockingWithDNSCache(host, service);
|
||||
NetworkAddress address = addresses[deterministicRandom()->randomInt(0, addresses.size())];
|
||||
NetworkAddress address = INetworkConnections::pickOneAddress(addresses);
|
||||
address.flags = 0; // Reset the parsed address to public
|
||||
address.fromHostname = NetworkAddressFromHostname::True;
|
||||
if (isTLS) {
|
||||
|
|
|
@ -126,6 +126,7 @@ void FlowKnobs::initialize(Randomize randomize, IsSimulated isSimulated) {
|
|||
init( NETWORK_TEST_REQUEST_COUNT, 0 ); // 0 -> run forever
|
||||
init( NETWORK_TEST_REQUEST_SIZE, 1 );
|
||||
init( NETWORK_TEST_SCRIPT_MODE, false );
|
||||
init( MAX_CACHED_EXPIRED_TOKENS, 1024 );
|
||||
|
||||
//AsyncFileCached
|
||||
init( PAGE_CACHE_4K, 2LL<<30 );
|
||||
|
@ -286,6 +287,7 @@ void FlowKnobs::initialize(Randomize randomize, IsSimulated isSimulated) {
|
|||
if ( randomize && BUGGIFY) { ENCRYPT_CIPHER_KEY_CACHE_TTL = deterministicRandom()->randomInt(50, 100); }
|
||||
init( ENCRYPT_KEY_REFRESH_INTERVAL, isSimulated ? 60 : 8 * 60 );
|
||||
if ( randomize && BUGGIFY) { ENCRYPT_KEY_REFRESH_INTERVAL = deterministicRandom()->randomInt(2, 10); }
|
||||
init( TOKEN_CACHE_SIZE, 100 );
|
||||
|
||||
// REST Client
|
||||
init( RESTCLIENT_MAX_CONNECTIONPOOL_SIZE, 10 );
|
||||
|
|
|
@ -199,11 +199,11 @@ TEST_CASE("flow/StreamCipher") {
|
|||
StreamCipherKey const* key = StreamCipherKey::getGlobalCipherKey();
|
||||
|
||||
StreamCipher::IV iv;
|
||||
generateRandomData(iv.data(), iv.size());
|
||||
deterministicRandom()->randomBytes(iv.data(), iv.size());
|
||||
|
||||
Arena arena;
|
||||
std::vector<unsigned char> plaintext(deterministicRandom()->randomInt(0, 10001));
|
||||
generateRandomData(&plaintext.front(), plaintext.size());
|
||||
deterministicRandom()->randomBytes(&plaintext.front(), plaintext.size());
|
||||
std::vector<unsigned char> ciphertext(plaintext.size() + AES_BLOCK_SIZE);
|
||||
std::vector<unsigned char> decryptedtext(plaintext.size() + AES_BLOCK_SIZE);
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
|
||||
#include "flow/flow.h"
|
||||
#include "flow/DeterministicRandom.h"
|
||||
#include "flow/Error.h"
|
||||
#include "flow/UnitTest.h"
|
||||
#include "flow/rte_memcpy.h"
|
||||
#ifdef WITH_FOLLY_MEMCPY
|
||||
|
@ -27,6 +28,8 @@
|
|||
#endif
|
||||
#include <stdarg.h>
|
||||
#include <cinttypes>
|
||||
#include <openssl/err.h>
|
||||
#include <openssl/rand.h>
|
||||
|
||||
std::atomic<bool> startSampling = false;
|
||||
LineageReference rootLineage;
|
||||
|
@ -374,6 +377,77 @@ void enableBuggify(bool enabled, BuggifyType type) {
|
|||
buggifyActivated[int(type)] = enabled;
|
||||
}
|
||||
|
||||
// Make OpenSSL use DeterministicRandom as RNG source such that simulation runs stay deterministic w/ e.g. signature ops
|
||||
void bindDeterministicRandomToOpenssl() {
|
||||
// TODO: implement ifdef branch for 3.x using provider API
|
||||
#ifndef OPENSSL_IS_BORINGSSL
|
||||
static const RAND_METHOD method = {
|
||||
// replacement for RAND_seed(), which reseeds OpenSSL RNG
|
||||
[](const void*, int) -> int { return 1; },
|
||||
// replacement for RAND_bytes(), which fills given buffer with random byte sequence
|
||||
[](unsigned char* buf, int length) -> int {
|
||||
if (g_network)
|
||||
ASSERT_ABORT(g_network->isSimulated());
|
||||
deterministicRandom()->randomBytes(buf, length);
|
||||
return 1;
|
||||
},
|
||||
// replacement for RAND_cleanup(), a no-op for simulation
|
||||
[]() -> void {},
|
||||
// replacement for RAND_add(), which reseeds OpenSSL RNG with randomness hint
|
||||
[](const void*, int, double) -> int { return 1; },
|
||||
// replacement for default pseudobytes getter (same as RAND_bytes by default)
|
||||
[](unsigned char* buf, int length) -> int {
|
||||
if (g_network)
|
||||
ASSERT_ABORT(g_network->isSimulated());
|
||||
deterministicRandom()->randomBytes(buf, length);
|
||||
return 1;
|
||||
},
|
||||
// status function for PRNG readiness check
|
||||
[]() -> int { return 1; },
|
||||
};
|
||||
|
||||
if (1 != ::RAND_set_rand_method(&method)) {
|
||||
auto ec = ::ERR_get_error();
|
||||
char msg[256]{
|
||||
0,
|
||||
};
|
||||
if (ec) {
|
||||
::ERR_error_string_n(ec, msg, sizeof(msg));
|
||||
}
|
||||
fprintf(stderr,
|
||||
"ERROR: Failed to bind DeterministicRandom to OpenSSL RNG\n"
|
||||
" OpenSSL error message: '%s'\n",
|
||||
msg);
|
||||
throw internal_error();
|
||||
} else {
|
||||
printf("DeterministicRandom successfully bound to OpenSSL RNG\n");
|
||||
}
|
||||
#else // OPENSSL_IS_BORINGSSL
|
||||
static const RAND_METHOD method = {
|
||||
[](const void*, int) -> void {},
|
||||
[](unsigned char* buf, unsigned long length) -> int {
|
||||
if (g_network)
|
||||
ASSERT_ABORT(g_network->isSimulated());
|
||||
ASSERT(length <= std::numeric_limits<int>::max());
|
||||
deterministicRandom()->randomBytes(buf, length);
|
||||
return 1;
|
||||
},
|
||||
[]() -> void {},
|
||||
[](const void*, int, double) -> void {},
|
||||
[](unsigned char* buf, unsigned long length) -> int {
|
||||
if (g_network)
|
||||
ASSERT_ABORT(g_network->isSimulated());
|
||||
ASSERT(length <= std::numeric_limits<int>::max());
|
||||
deterministicRandom()->randomBytes(buf, length);
|
||||
return 1;
|
||||
},
|
||||
[]() -> int { return 1; },
|
||||
};
|
||||
::RAND_set_rand_method(&method);
|
||||
printf("DeterministicRandom successfully bound to OpenSSL RNG\n");
|
||||
#endif // OPENSSL_IS_BORINGSSL
|
||||
}
|
||||
|
||||
namespace {
|
||||
// Simple message for flatbuffers unittests
|
||||
struct Int {
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include <algorithm>
|
||||
#include <boost/functional/hash.hpp>
|
||||
#include <stdint.h>
|
||||
#include <string_view>
|
||||
#include <string>
|
||||
#include <cstring>
|
||||
#include <limits>
|
||||
|
@ -532,7 +533,9 @@ public:
|
|||
return substr(0, size() - s.size());
|
||||
}
|
||||
|
||||
std::string toString() const { return std::string((const char*)data, length); }
|
||||
std::string toString() const { return std::string(reinterpret_cast<const char*>(data), length); }
|
||||
|
||||
std::string_view toStringView() const { return std::string_view(reinterpret_cast<const char*>(data), length); }
|
||||
|
||||
static bool isPrintable(char c) { return c > 32 && c < 127; }
|
||||
inline std::string printable() const;
|
||||
|
@ -850,6 +853,40 @@ inline bool operator>=(const StringRef& lhs, const StringRef& rhs) {
|
|||
return !(lhs < rhs);
|
||||
}
|
||||
|
||||
typedef uint64_t Word;
|
||||
// Get the number of prefix bytes that are the same between a and b, up to their common length of cl
|
||||
static inline int commonPrefixLength(uint8_t const* ap, uint8_t const* bp, int cl) {
|
||||
int i = 0;
|
||||
const int wordEnd = cl - sizeof(Word) + 1;
|
||||
|
||||
for (; i < wordEnd; i += sizeof(Word)) {
|
||||
Word a = *(Word*)ap;
|
||||
Word b = *(Word*)bp;
|
||||
if (a != b) {
|
||||
return i + ctzll(a ^ b) / 8;
|
||||
}
|
||||
ap += sizeof(Word);
|
||||
bp += sizeof(Word);
|
||||
}
|
||||
|
||||
for (; i < cl; i++) {
|
||||
if (*ap != *bp) {
|
||||
return i;
|
||||
}
|
||||
++ap;
|
||||
++bp;
|
||||
}
|
||||
return cl;
|
||||
}
|
||||
|
||||
static inline int commonPrefixLength(const StringRef& a, const StringRef& b) {
|
||||
return commonPrefixLength(a.begin(), b.begin(), std::min(a.size(), b.size()));
|
||||
}
|
||||
|
||||
static inline int commonPrefixLength(const StringRef& a, const StringRef& b, int skipLen) {
|
||||
return commonPrefixLength(a.begin() + skipLen, b.begin() + skipLen, std::min(a.size(), b.size()) - skipLen);
|
||||
}
|
||||
|
||||
// This trait is used by VectorRef to determine if deep copy constructor should recursively
|
||||
// call deep copies of each element.
|
||||
//
|
||||
|
|
|
@ -228,7 +228,7 @@ struct CodeProbeImpl : ICodeProbe {
|
|||
evt.detail("File", filename())
|
||||
.detail("Line", Line)
|
||||
.detail("Condition", Condition::value())
|
||||
.detail("ProbeHit", condition)
|
||||
.detail("Covered", condition)
|
||||
.detail("Comment", Comment::value());
|
||||
annotations.trace(this, evt, condition);
|
||||
}
|
||||
|
|
|
@ -49,6 +49,7 @@ public:
|
|||
UID randomUniqueID() override;
|
||||
char randomAlphaNumeric() override;
|
||||
std::string randomAlphaNumeric(int length) override;
|
||||
void randomBytes(uint8_t* buf, int length) override;
|
||||
uint64_t peek() const override;
|
||||
void addref() override;
|
||||
void delref() override;
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
#include <utility>
|
||||
|
||||
// The thread safety this class provides is that it's safe to call addref and
|
||||
// delref on the same object concurrently in different threads. Subclass does
|
||||
|
|
|
@ -143,6 +143,7 @@ public:
|
|||
virtual UID randomUniqueID() = 0;
|
||||
virtual char randomAlphaNumeric() = 0;
|
||||
virtual std::string randomAlphaNumeric(int length) = 0;
|
||||
virtual void randomBytes(uint8_t* buf, int length) = 0;
|
||||
virtual uint32_t randomSkewedUInt32(uint32_t min, uint32_t maxPlusOne) = 0;
|
||||
virtual uint64_t peek() const = 0; // returns something that is probably different for different random states.
|
||||
// Deterministic (and idempotent) for a deterministic generator.
|
||||
|
@ -209,7 +210,4 @@ Reference<IRandom> nondeterministicRandom();
|
|||
// WARNING: This is not thread safe and must not be called from any other thread than the network thread!
|
||||
Reference<IRandom> debugRandom();
|
||||
|
||||
// Populates a buffer with a random sequence of bytes
|
||||
void generateRandomData(uint8_t* buffer, int length);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -195,6 +195,8 @@ public:
|
|||
int NETWORK_TEST_REQUEST_SIZE;
|
||||
bool NETWORK_TEST_SCRIPT_MODE;
|
||||
|
||||
int MAX_CACHED_EXPIRED_TOKENS;
|
||||
|
||||
// AsyncFileCached
|
||||
int64_t PAGE_CACHE_4K;
|
||||
int64_t PAGE_CACHE_64K;
|
||||
|
@ -354,6 +356,9 @@ public:
|
|||
int64_t ENCRYPT_CIPHER_KEY_CACHE_TTL;
|
||||
int64_t ENCRYPT_KEY_REFRESH_INTERVAL;
|
||||
|
||||
// Authorization
|
||||
int TOKEN_CACHE_SIZE;
|
||||
|
||||
// RESTClient
|
||||
int RESTCLIENT_MAX_CONNECTIONPOOL_SIZE;
|
||||
int RESTCLIENT_CONNECT_TRIES;
|
||||
|
|
|
@ -25,8 +25,15 @@
|
|||
#include "flow/ProtocolVersion.h"
|
||||
|
||||
#include <unordered_map>
|
||||
#include <any>
|
||||
|
||||
using ContextVariableMap = std::unordered_map<std::string_view, void*>;
|
||||
using ContextVariableMap = std::unordered_map<std::string_view, std::any>;
|
||||
|
||||
template <class T>
|
||||
struct HasVariableMap_t : std::false_type {};
|
||||
|
||||
template <class T>
|
||||
constexpr bool HasVariableMap = HasVariableMap_t<T>::value;
|
||||
|
||||
template <class Ar>
|
||||
struct LoadContext {
|
||||
|
@ -53,6 +60,11 @@ struct LoadContext {
|
|||
void addArena(Arena& arena) { arena = ar->arena(); }
|
||||
|
||||
LoadContext& context() { return *this; }
|
||||
|
||||
template <class Archiver = Ar>
|
||||
std::enable_if_t<HasVariableMap<Archiver>, std::any&> variable(std::string_view name) {
|
||||
return ar->variable(name);
|
||||
}
|
||||
};
|
||||
|
||||
template <class Ar, class Allocator>
|
||||
|
@ -110,23 +122,9 @@ public:
|
|||
deserialize(FileIdentifierFor<Item>::value, item);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool variable(std::string_view name, T* val) {
|
||||
auto p = variables->insert(std::make_pair(name, val));
|
||||
return p.second;
|
||||
}
|
||||
std::any& variable(std::string_view name) { return variables->at(name); }
|
||||
|
||||
template <class T>
|
||||
T& variable(std::string_view name) {
|
||||
auto res = variables->at(name);
|
||||
return *reinterpret_cast<T*>(res);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
T const& variable(std::string_view name) const {
|
||||
auto res = variables->at(name);
|
||||
return *reinterpret_cast<T*>(res);
|
||||
}
|
||||
std::any const& variable(std::string_view name) const { return variables->at(name); }
|
||||
};
|
||||
|
||||
class ObjectReader : public _ObjectReader<ObjectReader> {
|
||||
|
@ -267,6 +265,11 @@ private:
|
|||
int size = 0;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct HasVariableMap_t<ObjectReader> : std::true_type {};
|
||||
template <>
|
||||
struct HasVariableMap_t<ArenaObjectReader> : std::true_type {};
|
||||
|
||||
// this special case is needed - the code expects
|
||||
// Standalone<T> and T to be equivalent for serialization
|
||||
namespace detail {
|
||||
|
|
|
@ -282,7 +282,7 @@ void getLocalTime(const time_t* timep, struct tm* result);
|
|||
// get GMT time string from an epoch seconds double
|
||||
std::string epochsToGMTString(double epochs);
|
||||
|
||||
#define ENVIRONMENT_KNOB_OPTION_PREFIX "FDB_KNOB_"
|
||||
#define ENVIRONMENT_KNOB_OPTION_PREFIX "JOSHUA_FDB_KNOB_"
|
||||
// returns list of environment variables with prefix ENVIRONMENT_KNOB_OPTION_PREFIX
|
||||
std::vector<std::string> getEnvironmentKnobOptions();
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ public:
|
|||
int size() const { return keySize; }
|
||||
uint8_t* data() const { return arr.get(); }
|
||||
void initializeKey(uint8_t* data, int len);
|
||||
void initializeRandomTestKey() { generateRandomData(arr.get(), keySize); }
|
||||
void initializeRandomTestKey() { deterministicRandom()->randomBytes(arr.get(), keySize); }
|
||||
void reset() { memset(arr.get(), 0, keySize); }
|
||||
|
||||
static bool isGlobalKeyPresent();
|
||||
|
|
|
@ -1365,5 +1365,7 @@ inline bool check_yield(TaskPriority taskID = TaskPriority::DefaultYield) {
|
|||
return g_network->check_yield(taskID);
|
||||
}
|
||||
|
||||
void bindDeterministicRandomToOpenssl();
|
||||
|
||||
#include "flow/genericactors.actor.h"
|
||||
#endif
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue