update histogram constructor
This commit is contained in:
commit
782a47f45d
|
@ -79,6 +79,7 @@ if(NOT WIN32)
|
|||
test/unit/fdb_api.hpp)
|
||||
|
||||
set(UNIT_TEST_VERSION_510_SRCS test/unit/unit_tests_version_510.cpp)
|
||||
set(TRACE_PARTIAL_FILE_SUFFIX_TEST_SRCS test/unit/trace_partial_file_suffix_test.cpp)
|
||||
|
||||
if(OPEN_FOR_IDE)
|
||||
add_library(fdb_c_performance_test OBJECT test/performance_test.c test/test.h)
|
||||
|
@ -88,6 +89,7 @@ if(NOT WIN32)
|
|||
add_library(fdb_c_setup_tests OBJECT test/unit/setup_tests.cpp)
|
||||
add_library(fdb_c_unit_tests OBJECT ${UNIT_TEST_SRCS})
|
||||
add_library(fdb_c_unit_tests_version_510 OBJECT ${UNIT_TEST_VERSION_510_SRCS})
|
||||
add_library(trace_partial_file_suffix_test OBJECT ${TRACE_PARTIAL_FILE_SUFFIX_TEST_SRCS})
|
||||
else()
|
||||
add_executable(fdb_c_performance_test test/performance_test.c test/test.h)
|
||||
add_executable(fdb_c_ryw_benchmark test/ryw_benchmark.c test/test.h)
|
||||
|
@ -96,6 +98,7 @@ if(NOT WIN32)
|
|||
add_executable(fdb_c_setup_tests test/unit/setup_tests.cpp)
|
||||
add_executable(fdb_c_unit_tests ${UNIT_TEST_SRCS})
|
||||
add_executable(fdb_c_unit_tests_version_510 ${UNIT_TEST_VERSION_510_SRCS})
|
||||
add_executable(trace_partial_file_suffix_test ${TRACE_PARTIAL_FILE_SUFFIX_TEST_SRCS})
|
||||
strip_debug_symbols(fdb_c_performance_test)
|
||||
strip_debug_symbols(fdb_c_ryw_benchmark)
|
||||
strip_debug_symbols(fdb_c_txn_size_test)
|
||||
|
@ -106,12 +109,14 @@ if(NOT WIN32)
|
|||
|
||||
add_dependencies(fdb_c_setup_tests doctest)
|
||||
add_dependencies(fdb_c_unit_tests doctest)
|
||||
add_dependencies(fdb_c_unit_tests_version_510 doctest)
|
||||
target_include_directories(fdb_c_setup_tests PUBLIC ${DOCTEST_INCLUDE_DIR})
|
||||
target_include_directories(fdb_c_unit_tests PUBLIC ${DOCTEST_INCLUDE_DIR})
|
||||
target_include_directories(fdb_c_unit_tests_version_510 PUBLIC ${DOCTEST_INCLUDE_DIR})
|
||||
target_link_libraries(fdb_c_setup_tests PRIVATE fdb_c Threads::Threads)
|
||||
target_link_libraries(fdb_c_unit_tests PRIVATE fdb_c Threads::Threads)
|
||||
target_link_libraries(fdb_c_unit_tests_version_510 PRIVATE fdb_c Threads::Threads)
|
||||
target_link_libraries(trace_partial_file_suffix_test PRIVATE fdb_c Threads::Threads)
|
||||
|
||||
# do not set RPATH for mako
|
||||
set_property(TARGET mako PROPERTY SKIP_BUILD_RPATH TRUE)
|
||||
|
@ -146,6 +151,11 @@ if(NOT WIN32)
|
|||
COMMAND $<TARGET_FILE:fdb_c_unit_tests_version_510>
|
||||
@CLUSTER_FILE@
|
||||
fdb)
|
||||
add_fdbclient_test(
|
||||
NAME trace_partial_file_suffix_test
|
||||
COMMAND $<TARGET_FILE:trace_partial_file_suffix_test>
|
||||
@CLUSTER_FILE@
|
||||
fdb)
|
||||
add_fdbclient_test(
|
||||
NAME fdb_c_external_client_unit_tests
|
||||
COMMAND $<TARGET_FILE:fdb_c_unit_tests>
|
||||
|
|
|
@ -0,0 +1,111 @@
|
|||
/*
|
||||
* trace_partial_file_suffix_test.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
|
||||
#include "flow/Platform.h"
|
||||
|
||||
#define FDB_API_VERSION 710
|
||||
#include "foundationdb/fdb_c.h"
|
||||
|
||||
#undef NDEBUG
|
||||
#include <cassert>
|
||||
|
||||
void fdb_check(fdb_error_t e) {
|
||||
if (e) {
|
||||
std::cerr << fdb_get_error(e) << std::endl;
|
||||
std::abort();
|
||||
}
|
||||
}
|
||||
|
||||
void set_net_opt(FDBNetworkOption option, const std::string& value) {
|
||||
fdb_check(fdb_network_set_option(option, reinterpret_cast<const uint8_t*>(value.c_str()), value.size()));
|
||||
}
|
||||
|
||||
bool file_exists(const char* path) {
|
||||
FILE* f = fopen(path, "r");
|
||||
if (f) {
|
||||
fclose(f);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
fdb_check(fdb_select_api_version(710));
|
||||
|
||||
std::string file_identifier = "trace_partial_file_suffix_test" + std::to_string(std::random_device{}());
|
||||
std::string trace_partial_file_suffix = ".tmp";
|
||||
std::string simulated_stray_partial_file =
|
||||
"trace.127.0.0.1." + file_identifier + ".simulated.xml" + trace_partial_file_suffix;
|
||||
|
||||
// Simulate this process crashing previously by creating a ".tmp" file
|
||||
{ std::ofstream file{ simulated_stray_partial_file }; }
|
||||
|
||||
set_net_opt(FDBNetworkOption::FDB_NET_OPTION_TRACE_ENABLE, "");
|
||||
set_net_opt(FDBNetworkOption::FDB_NET_OPTION_TRACE_FILE_IDENTIFIER, file_identifier);
|
||||
set_net_opt(FDBNetworkOption::FDB_NET_OPTION_TRACE_PARTIAL_FILE_SUFFIX, trace_partial_file_suffix);
|
||||
|
||||
fdb_check(fdb_setup_network());
|
||||
std::thread network_thread{ &fdb_run_network };
|
||||
|
||||
// Apparently you need to open a database to initialize logging
|
||||
FDBDatabase* out;
|
||||
fdb_check(fdb_create_database(nullptr, &out));
|
||||
fdb_database_destroy(out);
|
||||
|
||||
// Eventually there's a new trace file for this test ending in .tmp
|
||||
std::string name;
|
||||
for (;;) {
|
||||
for (const auto& path : platform::listFiles(".")) {
|
||||
if (path.find(file_identifier) != std::string::npos && path.find(".simulated.") == std::string::npos) {
|
||||
assert(path.substr(path.size() - trace_partial_file_suffix.size()) == trace_partial_file_suffix);
|
||||
name = path;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!name.empty()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
fdb_check(fdb_stop_network());
|
||||
network_thread.join();
|
||||
|
||||
// After shutting down, the suffix is removed for both the simulated stray file and our new file
|
||||
if (!trace_partial_file_suffix.empty()) {
|
||||
assert(!file_exists(name.c_str()));
|
||||
assert(!file_exists(simulated_stray_partial_file.c_str()));
|
||||
}
|
||||
|
||||
auto new_name = name.substr(0, name.size() - trace_partial_file_suffix.size());
|
||||
auto new_stray_name =
|
||||
simulated_stray_partial_file.substr(0, simulated_stray_partial_file.size() - trace_partial_file_suffix.size());
|
||||
assert(file_exists(new_name.c_str()));
|
||||
assert(file_exists(new_stray_name.c_str()));
|
||||
remove(new_name.c_str());
|
||||
remove(new_stray_name.c_str());
|
||||
assert(!file_exists(new_name.c_str()));
|
||||
assert(!file_exists(new_stray_name.c_str()));
|
||||
}
|
|
@ -36,8 +36,8 @@ const Subspace DirectoryLayer::DEFAULT_CONTENT_SUBSPACE = Subspace();
|
|||
const StringRef DirectoryLayer::PARTITION_LAYER = LiteralStringRef("partition");
|
||||
|
||||
DirectoryLayer::DirectoryLayer(Subspace nodeSubspace, Subspace contentSubspace, bool allowManualPrefixes)
|
||||
: nodeSubspace(nodeSubspace), contentSubspace(contentSubspace), allowManualPrefixes(allowManualPrefixes),
|
||||
rootNode(nodeSubspace.get(nodeSubspace.key())), allocator(rootNode.get(HIGH_CONTENTION_KEY)) {}
|
||||
: rootNode(nodeSubspace.get(nodeSubspace.key())), nodeSubspace(nodeSubspace), contentSubspace(contentSubspace),
|
||||
allocator(rootNode.get(HIGH_CONTENTION_KEY)), allowManualPrefixes(allowManualPrefixes) {}
|
||||
|
||||
Subspace DirectoryLayer::nodeWithPrefix(StringRef const& prefix) const {
|
||||
return nodeSubspace.get(prefix);
|
||||
|
|
|
@ -167,9 +167,9 @@ struct RangeResultRef : VectorRef<KeyValueRef> {
|
|||
|
||||
RangeResultRef() : more(false), readToBegin(false), readThroughEnd(false) {}
|
||||
RangeResultRef(Arena& p, const RangeResultRef& toCopy)
|
||||
: more(toCopy.more), readToBegin(toCopy.readToBegin), readThroughEnd(toCopy.readThroughEnd),
|
||||
: VectorRef<KeyValueRef>(p, toCopy), more(toCopy.more),
|
||||
readThrough(toCopy.readThrough.present() ? KeyRef(p, toCopy.readThrough.get()) : Optional<KeyRef>()),
|
||||
VectorRef<KeyValueRef>(p, toCopy) {}
|
||||
readToBegin(toCopy.readToBegin), readThroughEnd(toCopy.readThroughEnd) {}
|
||||
RangeResultRef(const VectorRef<KeyValueRef>& value, bool more, Optional<KeyRef> readThrough = Optional<KeyRef>())
|
||||
: VectorRef<KeyValueRef>(value), more(more), readThrough(readThrough), readToBegin(false), readThroughEnd(false) {
|
||||
}
|
||||
|
|
|
@ -449,7 +449,10 @@ if __name__ == '__main__':
|
|||
throttle()
|
||||
else:
|
||||
assert process_number > 1, "Process number should be positive"
|
||||
coordinators()
|
||||
exclude()
|
||||
# the kill command which used to list processes seems to not work as expected sometime
|
||||
# which makes the test flaky.
|
||||
# We need to figure out the reason and then re-enable these tests
|
||||
#coordinators()
|
||||
#exclude()
|
||||
|
||||
|
||||
|
|
|
@ -36,8 +36,8 @@ if (RocksDB_FOUND)
|
|||
${BINARY_DIR}/librocksdb.a)
|
||||
else()
|
||||
ExternalProject_Add(rocksdb
|
||||
URL https://github.com/facebook/rocksdb/archive/v6.10.1.tar.gz
|
||||
URL_HASH SHA256=d573d2f15cdda883714f7e0bc87b814a8d4a53a82edde558f08f940e905541ee
|
||||
URL https://github.com/facebook/rocksdb/archive/v6.22.1.tar.gz
|
||||
URL_HASH SHA256=2df8f34a44eda182e22cf84dee7a14f17f55d305ff79c06fb3cd1e5f8831e00d
|
||||
CMAKE_ARGS -DUSE_RTTI=1 -DPORTABLE=${PORTABLE_ROCKSDB}
|
||||
-DCMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD}
|
||||
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
|
||||
|
|
|
@ -287,13 +287,10 @@ else()
|
|||
-Wshift-sign-overflow
|
||||
# Here's the current set of warnings we need to explicitly disable to compile warning-free with clang 11
|
||||
-Wno-comment
|
||||
-Wno-dangling-else
|
||||
-Wno-delete-non-virtual-dtor
|
||||
-Wno-format
|
||||
-Wno-mismatched-tags
|
||||
-Wno-missing-field-initializers
|
||||
-Wno-reorder
|
||||
-Wno-reorder-ctor
|
||||
-Wno-sign-compare
|
||||
-Wno-tautological-pointer-compare
|
||||
-Wno-undefined-var-template
|
||||
|
|
|
@ -144,7 +144,9 @@ namespace SummarizeTest
|
|||
string oldBinaryFolder = (args.Length > 1) ? args[1] : Path.Combine("/opt", "joshua", "global_data", "oldBinaries");
|
||||
bool useValgrind = args.Length > 2 && args[2].ToLower() == "true";
|
||||
int maxTries = (args.Length > 3) ? int.Parse(args[3]) : 3;
|
||||
return Run(Path.Combine("bin", BINARY), "", "tests", "summary.xml", "error.xml", "tmp", oldBinaryFolder, useValgrind, maxTries, true, Path.Combine("/app", "deploy", "runtime", ".tls_5_1", PLUGIN));
|
||||
bool buggifyEnabled = (args.Length > 4) ? bool.Parse(args[4]) : true;
|
||||
bool faultInjectionEnabled = (args.Length > 5) ? bool.Parse(args[5]) : true;
|
||||
return Run(Path.Combine("bin", BINARY), "", "tests", "summary.xml", "error.xml", "tmp", oldBinaryFolder, useValgrind, maxTries, true, Path.Combine("/app", "deploy", "runtime", ".tls_5_1", PLUGIN), buggifyEnabled, faultInjectionEnabled);
|
||||
}
|
||||
catch(Exception e)
|
||||
{
|
||||
|
@ -240,10 +242,10 @@ namespace SummarizeTest
|
|||
}
|
||||
}
|
||||
|
||||
static int Run(string fdbserverName, string tlsPluginFile, string testFolder, string summaryFileName, string errorFileName, string runDir, string oldBinaryFolder, bool useValgrind, int maxTries, bool traceToStdout = false, string tlsPluginFile_5_1 = "")
|
||||
static int Run(string fdbserverName, string tlsPluginFile, string testFolder, string summaryFileName, string errorFileName, string runDir, string oldBinaryFolder, bool useValgrind, int maxTries, bool traceToStdout = false, string tlsPluginFile_5_1 = "", bool buggifyEnabled = true, bool faultInjectionEnabled = true)
|
||||
{
|
||||
int seed = random.Next(1000000000);
|
||||
bool buggify = random.NextDouble() < buggifyOnRatio;
|
||||
bool buggify = buggifyEnabled ? (random.NextDouble() < buggifyOnRatio) : false;
|
||||
string testFile = null;
|
||||
string testDir = "";
|
||||
string oldServerName = "";
|
||||
|
@ -353,11 +355,11 @@ namespace SummarizeTest
|
|||
bool useNewPlugin = (oldServerName == fdbserverName) || versionGreaterThanOrEqual(oldServerName.Split('-').Last(), "5.2.0");
|
||||
bool useToml = File.Exists(testFile + "-1.toml");
|
||||
string testFile1 = useToml ? testFile + "-1.toml" : testFile + "-1.txt";
|
||||
result = RunTest(firstServerName, useNewPlugin ? tlsPluginFile : tlsPluginFile_5_1, summaryFileName, errorFileName, seed, buggify, testFile1, runDir, uid, expectedUnseed, out unseed, out retryableError, logOnRetryableError, useValgrind, false, true, oldServerName, traceToStdout, noSim);
|
||||
result = RunTest(firstServerName, useNewPlugin ? tlsPluginFile : tlsPluginFile_5_1, summaryFileName, errorFileName, seed, buggify, testFile1, runDir, uid, expectedUnseed, out unseed, out retryableError, logOnRetryableError, useValgrind, false, true, oldServerName, traceToStdout, noSim, faultInjectionEnabled);
|
||||
if (result == 0)
|
||||
{
|
||||
string testFile2 = useToml ? testFile + "-2.toml" : testFile + "-2.txt";
|
||||
result = RunTest(secondServerName, tlsPluginFile, summaryFileName, errorFileName, seed+1, buggify, testFile2, runDir, uid, expectedUnseed, out unseed, out retryableError, logOnRetryableError, useValgrind, true, false, oldServerName, traceToStdout, noSim);
|
||||
result = RunTest(secondServerName, tlsPluginFile, summaryFileName, errorFileName, seed+1, buggify, testFile2, runDir, uid, expectedUnseed, out unseed, out retryableError, logOnRetryableError, useValgrind, true, false, oldServerName, traceToStdout, noSim, faultInjectionEnabled);
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -365,13 +367,13 @@ namespace SummarizeTest
|
|||
int expectedUnseed = -1;
|
||||
if (!useValgrind && unseedCheck)
|
||||
{
|
||||
result = RunTest(fdbserverName, tlsPluginFile, null, null, seed, buggify, testFile, runDir, Guid.NewGuid().ToString(), -1, out expectedUnseed, out retryableError, logOnRetryableError, false, false, false, "", traceToStdout, noSim);
|
||||
result = RunTest(fdbserverName, tlsPluginFile, null, null, seed, buggify, testFile, runDir, Guid.NewGuid().ToString(), -1, out expectedUnseed, out retryableError, logOnRetryableError, false, false, false, "", traceToStdout, noSim, faultInjectionEnabled);
|
||||
}
|
||||
|
||||
if (!retryableError)
|
||||
{
|
||||
int unseed;
|
||||
result = RunTest(fdbserverName, tlsPluginFile, summaryFileName, errorFileName, seed, buggify, testFile, runDir, Guid.NewGuid().ToString(), expectedUnseed, out unseed, out retryableError, logOnRetryableError, useValgrind, false, false, "", traceToStdout, noSim);
|
||||
result = RunTest(fdbserverName, tlsPluginFile, summaryFileName, errorFileName, seed, buggify, testFile, runDir, Guid.NewGuid().ToString(), expectedUnseed, out unseed, out retryableError, logOnRetryableError, useValgrind, false, false, "", traceToStdout, noSim, faultInjectionEnabled);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -386,7 +388,7 @@ namespace SummarizeTest
|
|||
|
||||
private static int RunTest(string fdbserverName, string tlsPluginFile, string summaryFileName, string errorFileName, int seed,
|
||||
bool buggify, string testFile, string runDir, string uid, int expectedUnseed, out int unseed, out bool retryableError, bool logOnRetryableError, bool useValgrind, bool restarting = false,
|
||||
bool willRestart = false, string oldBinaryName = "", bool traceToStdout = false, bool noSim = false)
|
||||
bool willRestart = false, string oldBinaryName = "", bool traceToStdout = false, bool noSim = false, bool faultInjectionEnabled = true)
|
||||
{
|
||||
unseed = -1;
|
||||
|
||||
|
@ -407,7 +409,7 @@ namespace SummarizeTest
|
|||
Directory.CreateDirectory(tempPath);
|
||||
Directory.SetCurrentDirectory(tempPath);
|
||||
|
||||
if (!restarting) LogTestPlan(summaryFileName, testFile, seed, buggify, expectedUnseed != -1, uid, oldBinaryName);
|
||||
if (!restarting) LogTestPlan(summaryFileName, testFile, seed, buggify, expectedUnseed != -1, uid, faultInjectionEnabled, oldBinaryName);
|
||||
|
||||
string valgrindOutputFile = null;
|
||||
using (var process = new System.Diagnostics.Process())
|
||||
|
@ -422,15 +424,16 @@ namespace SummarizeTest
|
|||
process.StartInfo.RedirectStandardOutput = true;
|
||||
string role = (noSim) ? "test" : "simulation";
|
||||
var args = "";
|
||||
string faultInjectionArg = string.IsNullOrEmpty(oldBinaryName) ? string.Format("-fi {0}", faultInjectionEnabled ? "on" : "off") : "";
|
||||
if (willRestart && oldBinaryName.EndsWith("alpha6"))
|
||||
{
|
||||
args = string.Format("-Rs 1000000000 -r {0} {1} -s {2} -f \"{3}\" -b {4} {5} --crash",
|
||||
role, IsRunningOnMono() ? "" : "-q", seed, testFile, buggify ? "on" : "off", tlsPluginArg);
|
||||
args = string.Format("-Rs 1000000000 -r {0} {1} -s {2} -f \"{3}\" -b {4} {5} {6} --crash",
|
||||
role, IsRunningOnMono() ? "" : "-q", seed, testFile, buggify ? "on" : "off", faultInjectionArg, tlsPluginArg);
|
||||
}
|
||||
else
|
||||
{
|
||||
args = string.Format("-Rs 1GB -r {0} {1} -s {2} -f \"{3}\" -b {4} {5} --crash",
|
||||
role, IsRunningOnMono() ? "" : "-q", seed, testFile, buggify ? "on" : "off", tlsPluginArg);
|
||||
args = string.Format("-Rs 1GB -r {0} {1} -s {2} -f \"{3}\" -b {4} {5} {6} --crash",
|
||||
role, IsRunningOnMono() ? "" : "-q", seed, testFile, buggify ? "on" : "off", faultInjectionArg, tlsPluginArg);
|
||||
}
|
||||
if (restarting) args = args + " --restarting";
|
||||
if (useValgrind && !willRestart)
|
||||
|
@ -524,7 +527,7 @@ namespace SummarizeTest
|
|||
var xout = new XElement("UnableToKillProcess",
|
||||
new XAttribute("Severity", (int)Magnesium.Severity.SevWarnAlways));
|
||||
|
||||
AppendXmlMessageToSummary(summaryFileName, xout, traceToStdout, testFile, seed, buggify, expectedUnseed != -1, oldBinaryName);
|
||||
AppendXmlMessageToSummary(summaryFileName, xout, traceToStdout, testFile, seed, buggify, expectedUnseed != -1, oldBinaryName, faultInjectionEnabled);
|
||||
return 104;
|
||||
}
|
||||
}
|
||||
|
@ -549,7 +552,7 @@ namespace SummarizeTest
|
|||
new XAttribute("Plugin", tlsPluginFile),
|
||||
new XAttribute("MachineName", System.Environment.MachineName));
|
||||
|
||||
AppendXmlMessageToSummary(summaryFileName, xout, traceToStdout, testFile, seed, buggify, expectedUnseed != -1, oldBinaryName);
|
||||
AppendXmlMessageToSummary(summaryFileName, xout, traceToStdout, testFile, seed, buggify, expectedUnseed != -1, oldBinaryName, faultInjectionEnabled);
|
||||
ok = useValgrind ? 0 : 103;
|
||||
}
|
||||
else
|
||||
|
@ -588,7 +591,7 @@ namespace SummarizeTest
|
|||
new XAttribute("Severity", (int)Magnesium.Severity.SevError),
|
||||
new XAttribute("ErrorMessage", e.Message));
|
||||
|
||||
AppendXmlMessageToSummary(summaryFileName, xout, traceToStdout, testFile, seed, buggify, expectedUnseed != -1, oldBinaryName);
|
||||
AppendXmlMessageToSummary(summaryFileName, xout, traceToStdout, testFile, seed, buggify, expectedUnseed != -1, oldBinaryName, faultInjectionEnabled);
|
||||
return 101;
|
||||
}
|
||||
finally
|
||||
|
@ -704,13 +707,14 @@ namespace SummarizeTest
|
|||
}
|
||||
}
|
||||
|
||||
static void LogTestPlan(string summaryFileName, string testFileName, int randomSeed, bool buggify, bool testDeterminism, string uid, string oldBinary="")
|
||||
static void LogTestPlan(string summaryFileName, string testFileName, int randomSeed, bool buggify, bool testDeterminism, string uid, bool faultInjectionEnabled, string oldBinary="")
|
||||
{
|
||||
var xout = new XElement("TestPlan",
|
||||
new XAttribute("TestUID", uid),
|
||||
new XAttribute("RandomSeed", randomSeed),
|
||||
new XAttribute("TestFile", testFileName),
|
||||
new XAttribute("BuggifyEnabled", buggify ? "1" : "0"),
|
||||
new XAttribute("FaultInjectionEnabled", faultInjectionEnabled ? "1" : "0"),
|
||||
new XAttribute("DeterminismCheck", testDeterminism ? "1" : "0"),
|
||||
new XAttribute("OldBinary", Path.GetFileName(oldBinary)));
|
||||
AppendToSummary(summaryFileName, xout);
|
||||
|
@ -800,6 +804,8 @@ namespace SummarizeTest
|
|||
new XAttribute("DeterminismCheck", expectedUnseed != -1 ? "1" : "0"),
|
||||
new XAttribute("OldBinary", Path.GetFileName(oldBinaryName)));
|
||||
testBeginFound = true;
|
||||
if (ev.DDetails.ContainsKey("FaultInjectionEnabled"))
|
||||
xout.Add(new XAttribute("FaultInjectionEnabled", ev.Details.FaultInjectionEnabled));
|
||||
}
|
||||
if (ev.Type == "Simulation")
|
||||
{
|
||||
|
@ -1235,7 +1241,7 @@ namespace SummarizeTest
|
|||
}
|
||||
|
||||
private static void AppendXmlMessageToSummary(string summaryFileName, XElement xout, bool traceToStdout = false, string testFile = null,
|
||||
int? seed = null, bool? buggify = null, bool? determinismCheck = null, string oldBinaryName = null)
|
||||
int? seed = null, bool? buggify = null, bool? determinismCheck = null, string oldBinaryName = null, bool? faultInjectionEnabled = null)
|
||||
{
|
||||
var test = new XElement("Test", xout);
|
||||
if(testFile != null)
|
||||
|
@ -1244,6 +1250,8 @@ namespace SummarizeTest
|
|||
test.Add(new XAttribute("RandomSeed", seed));
|
||||
if(buggify != null)
|
||||
test.Add(new XAttribute("BuggifyEnabled", buggify.Value ? "1" : "0"));
|
||||
if(faultInjectionEnabled != null)
|
||||
test.Add(new XAttribute("FaultInjectionEnabled", faultInjectionEnabled.Value ? "1" : "0"));
|
||||
if(determinismCheck != null)
|
||||
test.Add(new XAttribute("DeterminismCheck", determinismCheck.Value ? "1" : "0"));
|
||||
if(oldBinaryName != null)
|
||||
|
|
|
@ -41,6 +41,11 @@ enum {
|
|||
OPT_TRACE_LOG_GROUP,
|
||||
OPT_INPUT_FILE,
|
||||
OPT_BUILD_FLAGS,
|
||||
OPT_LIST_ONLY,
|
||||
OPT_KEY_PREFIX,
|
||||
OPT_HEX_KEY_PREFIX,
|
||||
OPT_BEGIN_VERSION_FILTER,
|
||||
OPT_END_VERSION_FILTER,
|
||||
OPT_HELP
|
||||
};
|
||||
|
||||
|
@ -62,6 +67,11 @@ CSimpleOpt::SOption gConverterOptions[] = { { OPT_CONTAINER, "-r", SO_REQ_SEP },
|
|||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
{ OPT_BUILD_FLAGS, "--build_flags", SO_NONE },
|
||||
{ OPT_LIST_ONLY, "--list_only", SO_NONE },
|
||||
{ OPT_KEY_PREFIX, "-k", SO_REQ_SEP },
|
||||
{ OPT_HEX_KEY_PREFIX, "--hex_prefix", SO_REQ_SEP },
|
||||
{ OPT_BEGIN_VERSION_FILTER, "--begin_version_filter", SO_REQ_SEP },
|
||||
{ OPT_END_VERSION_FILTER, "--end_version_filter", SO_REQ_SEP },
|
||||
{ OPT_HELP, "-?", SO_NONE },
|
||||
{ OPT_HELP, "-h", SO_NONE },
|
||||
{ OPT_HELP, "--help", SO_NONE },
|
||||
|
|
|
@ -19,14 +19,20 @@
|
|||
*/
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdlib>
|
||||
#include <iostream>
|
||||
#include <limits>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "fdbbackup/BackupTLSConfig.h"
|
||||
#include "fdbclient/BackupAgent.actor.h"
|
||||
#include "fdbclient/BackupContainer.h"
|
||||
#include "fdbbackup/FileConverter.h"
|
||||
#include "fdbclient/CommitTransaction.h"
|
||||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbclient/MutationList.h"
|
||||
#include "flow/IRandom.h"
|
||||
#include "flow/Trace.h"
|
||||
#include "flow/flow.h"
|
||||
#include "flow/serialize.h"
|
||||
|
@ -65,6 +71,14 @@ void printDecodeUsage() {
|
|||
TLS_HELP
|
||||
#endif
|
||||
" --build_flags Print build information and exit.\n"
|
||||
" --list_only Print file list and exit.\n"
|
||||
" -k KEY_PREFIX Use the prefix for filtering mutations\n"
|
||||
" --hex_prefix HEX_PREFIX\n"
|
||||
" The prefix specified in HEX format, e.g., \\x05\\x01.\n"
|
||||
" --begin_version_filter BEGIN_VERSION\n"
|
||||
" The version range's begin version (inclusive) for filtering.\n"
|
||||
" --end_version_filter END_VERSION\n"
|
||||
" The version range's end version (exclusive) for filtering.\n"
|
||||
"\n";
|
||||
return;
|
||||
}
|
||||
|
@ -76,9 +90,19 @@ void printBuildInformation() {
|
|||
struct DecodeParams {
|
||||
std::string container_url;
|
||||
std::string fileFilter; // only files match the filter will be decoded
|
||||
bool log_enabled = false;
|
||||
bool log_enabled = true;
|
||||
std::string log_dir, trace_format, trace_log_group;
|
||||
BackupTLSConfig tlsConfig;
|
||||
bool list_only = false;
|
||||
std::string prefix; // Key prefix for filtering
|
||||
Version beginVersionFilter = 0;
|
||||
Version endVersionFilter = std::numeric_limits<Version>::max();
|
||||
|
||||
// Returns if [begin, end) overlap with the filter range
|
||||
bool overlap(Version begin, Version end) const {
|
||||
// Filter [100, 200), [50,75) [200, 300)
|
||||
return !(begin >= endVersionFilter || end <= beginVersionFilter);
|
||||
}
|
||||
|
||||
std::string toString() {
|
||||
std::string s;
|
||||
|
@ -97,12 +121,69 @@ struct DecodeParams {
|
|||
s.append(" LogGroup:").append(trace_log_group);
|
||||
}
|
||||
}
|
||||
s.append(", list_only: ").append(list_only ? "true" : "false");
|
||||
if (beginVersionFilter != 0) {
|
||||
s.append(", beginVersionFilter: ").append(std::to_string(beginVersionFilter));
|
||||
}
|
||||
if (endVersionFilter < std::numeric_limits<Version>::max()) {
|
||||
s.append(", endVersionFilter: ").append(std::to_string(endVersionFilter));
|
||||
}
|
||||
if (!prefix.empty()) {
|
||||
s.append(", KeyPrefix: ").append(printable(KeyRef(prefix)));
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
// Decode an ASCII string, e.g., "\x15\x1b\x19\x04\xaf\x0c\x28\x0a",
|
||||
// into the binary string.
|
||||
std::string decode_hex_string(std::string line) {
|
||||
size_t i = 0;
|
||||
std::string ret;
|
||||
|
||||
while (i <= line.length()) {
|
||||
switch (line[i]) {
|
||||
case '\\':
|
||||
if (i + 2 > line.length()) {
|
||||
std::cerr << "Invalid hex string at: " << i << "\n";
|
||||
return ret;
|
||||
}
|
||||
switch (line[i + 1]) {
|
||||
char ent, save;
|
||||
case '"':
|
||||
case '\\':
|
||||
case ' ':
|
||||
case ';':
|
||||
line.erase(i, 1);
|
||||
break;
|
||||
case 'x':
|
||||
if (i + 4 > line.length()) {
|
||||
std::cerr << "Invalid hex string at: " << i << "\n";
|
||||
return ret;
|
||||
}
|
||||
char* pEnd;
|
||||
save = line[i + 4];
|
||||
line[i + 4] = 0;
|
||||
ent = char(strtoul(line.data() + i + 2, &pEnd, 16));
|
||||
if (*pEnd) {
|
||||
std::cerr << "Invalid hex string at: " << i << "\n";
|
||||
return ret;
|
||||
}
|
||||
line[i + 4] = save;
|
||||
line.replace(i, 4, 1, ent);
|
||||
break;
|
||||
default:
|
||||
std::cerr << "Invalid hex string at: " << i << "\n";
|
||||
return ret;
|
||||
}
|
||||
default:
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
return line.substr(0, i);
|
||||
}
|
||||
|
||||
int parseDecodeCommandLine(DecodeParams* param, CSimpleOpt* args) {
|
||||
while (args->Next()) {
|
||||
auto lastError = args->LastError();
|
||||
|
@ -124,6 +205,26 @@ int parseDecodeCommandLine(DecodeParams* param, CSimpleOpt* args) {
|
|||
param->container_url = args->OptionArg();
|
||||
break;
|
||||
|
||||
case OPT_LIST_ONLY:
|
||||
param->list_only = true;
|
||||
break;
|
||||
|
||||
case OPT_KEY_PREFIX:
|
||||
param->prefix = args->OptionArg();
|
||||
break;
|
||||
|
||||
case OPT_HEX_KEY_PREFIX:
|
||||
param->prefix = decode_hex_string(args->OptionArg());
|
||||
break;
|
||||
|
||||
case OPT_BEGIN_VERSION_FILTER:
|
||||
param->beginVersionFilter = std::atoll(args->OptionArg());
|
||||
break;
|
||||
|
||||
case OPT_END_VERSION_FILTER:
|
||||
param->endVersionFilter = std::atoll(args->OptionArg());
|
||||
break;
|
||||
|
||||
case OPT_CRASHONERROR:
|
||||
g_crashOnError = true;
|
||||
break;
|
||||
|
@ -141,7 +242,7 @@ int parseDecodeCommandLine(DecodeParams* param, CSimpleOpt* args) {
|
|||
break;
|
||||
|
||||
case OPT_TRACE_FORMAT:
|
||||
if (!validateTraceFormat(args->OptionArg())) {
|
||||
if (!selectTraceFormatter(args->OptionArg())) {
|
||||
std::cerr << "ERROR: Unrecognized trace format " << args->OptionArg() << "\n";
|
||||
return FDB_EXIT_ERROR;
|
||||
}
|
||||
|
@ -202,78 +303,18 @@ void printLogFiles(std::string msg, const std::vector<LogFile>& files) {
|
|||
std::vector<LogFile> getRelevantLogFiles(const std::vector<LogFile>& files, const DecodeParams& params) {
|
||||
std::vector<LogFile> filtered;
|
||||
for (const auto& file : files) {
|
||||
if (file.fileName.find(params.fileFilter) != std::string::npos) {
|
||||
if (file.fileName.find(params.fileFilter) != std::string::npos &&
|
||||
params.overlap(file.beginVersion, file.endVersion + 1)) {
|
||||
filtered.push_back(file);
|
||||
}
|
||||
}
|
||||
return filtered;
|
||||
}
|
||||
|
||||
std::pair<Version, int32_t> decode_key(const StringRef& key) {
|
||||
ASSERT(key.size() == sizeof(uint8_t) + sizeof(Version) + sizeof(int32_t));
|
||||
|
||||
uint8_t hash;
|
||||
Version version;
|
||||
int32_t part;
|
||||
BinaryReader rd(key, Unversioned());
|
||||
rd >> hash >> version >> part;
|
||||
version = bigEndian64(version);
|
||||
part = bigEndian32(part);
|
||||
|
||||
int32_t v = version / CLIENT_KNOBS->LOG_RANGE_BLOCK_SIZE;
|
||||
ASSERT(((uint8_t)hashlittle(&v, sizeof(v), 0)) == hash);
|
||||
|
||||
return std::make_pair(version, part);
|
||||
}
|
||||
|
||||
// Decodes an encoded list of mutations in the format of:
|
||||
// [includeVersion:uint64_t][val_length:uint32_t][mutation_1][mutation_2]...[mutation_k],
|
||||
// where a mutation is encoded as:
|
||||
// [type:uint32_t][keyLength:uint32_t][valueLength:uint32_t][key][value]
|
||||
std::vector<MutationRef> decode_value(const StringRef& value) {
|
||||
StringRefReader reader(value, restore_corrupted_data());
|
||||
|
||||
reader.consume<uint64_t>(); // Consume the includeVersion
|
||||
uint32_t val_length = reader.consume<uint32_t>();
|
||||
if (val_length != value.size() - sizeof(uint64_t) - sizeof(uint32_t)) {
|
||||
TraceEvent(SevError, "ValueError")
|
||||
.detail("ValueLen", val_length)
|
||||
.detail("ValueSize", value.size())
|
||||
.detail("Value", printable(value));
|
||||
}
|
||||
|
||||
std::vector<MutationRef> mutations;
|
||||
while (1) {
|
||||
if (reader.eof())
|
||||
break;
|
||||
|
||||
// Deserialization of a MutationRef, which was packed by MutationListRef::push_back_deep()
|
||||
uint32_t type, p1len, p2len;
|
||||
type = reader.consume<uint32_t>();
|
||||
p1len = reader.consume<uint32_t>();
|
||||
p2len = reader.consume<uint32_t>();
|
||||
|
||||
const uint8_t* key = reader.consume(p1len);
|
||||
const uint8_t* val = reader.consume(p2len);
|
||||
|
||||
mutations.emplace_back((MutationRef::Type)type, StringRef(key, p1len), StringRef(val, p2len));
|
||||
}
|
||||
return mutations;
|
||||
}
|
||||
|
||||
struct VersionedMutations {
|
||||
Version version;
|
||||
std::vector<MutationRef> mutations;
|
||||
Arena arena; // The arena that contains the mutations.
|
||||
};
|
||||
|
||||
struct VersionedKVPart {
|
||||
Arena arena;
|
||||
Version version;
|
||||
int32_t part;
|
||||
StringRef kv;
|
||||
VersionedKVPart(Arena arena, Version version, int32_t part, StringRef kv)
|
||||
: arena(arena), version(version), part(part), kv(kv) {}
|
||||
std::string serializedMutations; // buffer that contains mutations
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -293,174 +334,66 @@ struct VersionedKVPart {
|
|||
* at any time this object might have two blocks of data in memory.
|
||||
*/
|
||||
class DecodeProgress {
|
||||
std::vector<VersionedKVPart> keyValues;
|
||||
std::vector<Standalone<VectorRef<KeyValueRef>>> blocks;
|
||||
std::unordered_map<Version, fileBackup::AccumulatedMutations> mutationBlocksByVersion;
|
||||
|
||||
public:
|
||||
DecodeProgress() = default;
|
||||
template <class U>
|
||||
DecodeProgress(const LogFile& file, U&& values) : file(file), keyValues(std::forward<U>(values)) {}
|
||||
DecodeProgress(const LogFile& file) : file(file) {}
|
||||
|
||||
// If there are no more mutations to pull from the file.
|
||||
// However, we could have unfinished version in the buffer when EOF is true,
|
||||
// which means we should look for data in the next file. The caller
|
||||
// should call getUnfinishedBuffer() to get these left data.
|
||||
bool finished() const { return (eof && keyValues.empty()) || (leftover && !keyValues.empty()); }
|
||||
|
||||
std::vector<VersionedKVPart>&& getUnfinishedBuffer() && { return std::move(keyValues); }
|
||||
|
||||
// Returns all mutations of the next version in a batch.
|
||||
Future<VersionedMutations> getNextBatch() { return getNextBatchImpl(this); }
|
||||
bool finished() const { return done; }
|
||||
|
||||
// Open and loads file into memory
|
||||
Future<Void> openFile(Reference<IBackupContainer> container) { return openFileImpl(this, container); }
|
||||
|
||||
// The following are private APIs:
|
||||
|
||||
// Returns true if value contains complete data.
|
||||
static bool isValueComplete(StringRef value) {
|
||||
StringRefReader reader(value, restore_corrupted_data());
|
||||
|
||||
reader.consume<uint64_t>(); // Consume the includeVersion
|
||||
uint32_t val_length = reader.consume<uint32_t>();
|
||||
return val_length == value.size() - sizeof(uint64_t) - sizeof(uint32_t);
|
||||
}
|
||||
|
||||
// PRECONDITION: finished() must return false before calling this function.
|
||||
// Returns the next batch of mutations along with the arena backing it.
|
||||
// Note the returned batch can be empty when the file has unfinished
|
||||
// version batch data that are in the next file.
|
||||
ACTOR static Future<VersionedMutations> getNextBatchImpl(DecodeProgress* self) {
|
||||
ASSERT(!self->finished());
|
||||
VersionedMutations getNextBatch() {
|
||||
ASSERT(!finished());
|
||||
|
||||
loop {
|
||||
if (self->keyValues.size() <= 1) {
|
||||
// Try to decode another block when less than one left
|
||||
wait(readAndDecodeFile(self));
|
||||
}
|
||||
|
||||
const auto& kv = self->keyValues[0];
|
||||
ASSERT(kv.part == 0);
|
||||
|
||||
// decode next versions, check if they are continuous parts
|
||||
int idx = 1; // next kv pair in "keyValues"
|
||||
int bufSize = kv.kv.size();
|
||||
for (int lastPart = 0; idx < self->keyValues.size(); idx++, lastPart++) {
|
||||
if (idx == self->keyValues.size())
|
||||
break;
|
||||
|
||||
const auto& nextKV = self->keyValues[idx];
|
||||
if (kv.version != nextKV.version) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (lastPart + 1 != nextKV.part) {
|
||||
TraceEvent("DecodeError").detail("Part1", lastPart).detail("Part2", nextKV.part);
|
||||
throw restore_corrupted_data();
|
||||
}
|
||||
bufSize += nextKV.kv.size();
|
||||
}
|
||||
|
||||
VersionedMutations m;
|
||||
m.version = kv.version;
|
||||
TraceEvent("Decode").detail("Version", m.version).detail("Idx", idx).detail("Q", self->keyValues.size());
|
||||
StringRef value = kv.kv;
|
||||
if (idx > 1) {
|
||||
// Stitch parts into one and then decode one by one
|
||||
Standalone<StringRef> buf = self->combineValues(idx, bufSize);
|
||||
value = buf;
|
||||
m.arena = buf.arena();
|
||||
}
|
||||
if (isValueComplete(value)) {
|
||||
m.mutations = decode_value(value);
|
||||
if (m.arena.getSize() == 0) {
|
||||
m.arena = kv.arena;
|
||||
}
|
||||
self->keyValues.erase(self->keyValues.begin(), self->keyValues.begin() + idx);
|
||||
return m;
|
||||
} else if (!self->eof) {
|
||||
// Read one more block, hopefully the missing part of the value can be found.
|
||||
wait(readAndDecodeFile(self));
|
||||
} else {
|
||||
TraceEvent(SevWarn, "MissingValue").detail("Version", m.version);
|
||||
self->leftover = true;
|
||||
return m; // Empty mutations
|
||||
VersionedMutations vms;
|
||||
for (auto& [version, m] : mutationBlocksByVersion) {
|
||||
if (m.isComplete()) {
|
||||
vms.version = version;
|
||||
std::vector<MutationRef> mutations = fileBackup::decodeMutationLogValue(m.serializedMutations);
|
||||
TraceEvent("Decode").detail("Version", vms.version).detail("N", mutations.size());
|
||||
vms.mutations.insert(vms.mutations.end(), mutations.begin(), mutations.end());
|
||||
vms.serializedMutations = m.serializedMutations;
|
||||
mutationBlocksByVersion.erase(version);
|
||||
return vms;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a buffer which stitches first "idx" values into one.
|
||||
// "len" MUST equal the summation of these values.
|
||||
Standalone<StringRef> combineValues(const int idx, const int len) {
|
||||
ASSERT(idx <= keyValues.size() && idx > 1);
|
||||
|
||||
Standalone<StringRef> buf = makeString(len);
|
||||
int n = 0;
|
||||
for (int i = 0; i < idx; i++) {
|
||||
const auto& value = keyValues[i].kv;
|
||||
memcpy(mutateString(buf) + n, value.begin(), value.size());
|
||||
n += value.size();
|
||||
}
|
||||
|
||||
ASSERT(n == len);
|
||||
return buf;
|
||||
}
|
||||
|
||||
// Decodes a block into KeyValueRef stored in "keyValues".
|
||||
void decode_block(const Standalone<StringRef>& buf, int len) {
|
||||
StringRef block(buf.begin(), len);
|
||||
StringRefReader reader(block, restore_corrupted_data());
|
||||
|
||||
try {
|
||||
// Read header, currently only decoding version BACKUP_AGENT_MLOG_VERSION
|
||||
if (reader.consume<int32_t>() != BACKUP_AGENT_MLOG_VERSION)
|
||||
throw restore_unsupported_file_version();
|
||||
|
||||
// Read k/v pairs. Block ends either at end of last value exactly or with 0xFF as first key len byte.
|
||||
while (1) {
|
||||
// If eof reached or first key len bytes is 0xFF then end of block was reached.
|
||||
if (reader.eof() || *reader.rptr == 0xFF)
|
||||
break;
|
||||
|
||||
// Read key and value. If anything throws then there is a problem.
|
||||
uint32_t kLen = reader.consumeNetworkUInt32();
|
||||
const uint8_t* k = reader.consume(kLen);
|
||||
std::pair<Version, int32_t> version_part = decode_key(StringRef(k, kLen));
|
||||
uint32_t vLen = reader.consumeNetworkUInt32();
|
||||
const uint8_t* v = reader.consume(vLen);
|
||||
TraceEvent(SevDecodeInfo, "Block")
|
||||
.detail("KeySize", kLen)
|
||||
.detail("valueSize", vLen)
|
||||
.detail("Offset", reader.rptr - buf.begin())
|
||||
.detail("Version", version_part.first)
|
||||
.detail("Part", version_part.second);
|
||||
keyValues.emplace_back(buf.arena(), version_part.first, version_part.second, StringRef(v, vLen));
|
||||
}
|
||||
|
||||
// Make sure any remaining bytes in the block are 0xFF
|
||||
for (auto b : reader.remainder()) {
|
||||
if (b != 0xFF)
|
||||
throw restore_corrupted_data_padding();
|
||||
}
|
||||
|
||||
// The (version, part) in a block can be out of order, i.e., (3, 0)
|
||||
// can be followed by (4, 0), and then (3, 1). So we need to sort them
|
||||
// first by version, and then by part number.
|
||||
std::sort(keyValues.begin(), keyValues.end(), [](const VersionedKVPart& a, const VersionedKVPart& b) {
|
||||
return a.version == b.version ? a.part < b.part : a.version < b.version;
|
||||
});
|
||||
return;
|
||||
} catch (Error& e) {
|
||||
TraceEvent(SevWarn, "CorruptBlock").error(e).detail("Offset", reader.rptr - buf.begin());
|
||||
throw;
|
||||
// No complete versions
|
||||
if (!mutationBlocksByVersion.empty()) {
|
||||
TraceEvent(SevWarn, "UnfishedBlocks").detail("NumberOfVersions", mutationBlocksByVersion.size());
|
||||
}
|
||||
done = true;
|
||||
return vms;
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> openFileImpl(DecodeProgress* self, Reference<IBackupContainer> container) {
|
||||
Reference<IAsyncFile> fd = wait(container->readFile(self->file.fileName));
|
||||
self->fd = fd;
|
||||
wait(readAndDecodeFile(self));
|
||||
while (!self->eof) {
|
||||
wait(readAndDecodeFile(self));
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
// Add chunks to mutationBlocksByVersion
|
||||
void addBlockKVPairs(VectorRef<KeyValueRef> chunks) {
|
||||
for (auto& kv : chunks) {
|
||||
auto versionAndChunkNumber = fileBackup::decodeMutationLogKey(kv.key);
|
||||
mutationBlocksByVersion[versionAndChunkNumber.first].addChunk(versionAndChunkNumber.second, kv);
|
||||
}
|
||||
}
|
||||
|
||||
// Reads a file block, decodes it into key/value pairs, and stores these pairs.
|
||||
ACTOR static Future<Void> readAndDecodeFile(DecodeProgress* self) {
|
||||
try {
|
||||
|
@ -470,17 +403,18 @@ public:
|
|||
return Void();
|
||||
}
|
||||
|
||||
state Standalone<StringRef> buf = makeString(len);
|
||||
state int rLen = wait(self->fd->read(mutateString(buf), len, self->offset));
|
||||
// Decode a file block into log_key and log_value chunks
|
||||
Standalone<VectorRef<KeyValueRef>> chunks =
|
||||
wait(fileBackup::decodeMutationLogFileBlock(self->fd, self->offset, len));
|
||||
self->blocks.push_back(chunks);
|
||||
|
||||
TraceEvent("ReadFile")
|
||||
.detail("Name", self->file.fileName)
|
||||
.detail("Len", rLen)
|
||||
.detail("Len", len)
|
||||
.detail("Offset", self->offset);
|
||||
if (rLen != len) {
|
||||
throw restore_corrupted_data();
|
||||
}
|
||||
self->decode_block(buf, rLen);
|
||||
self->offset += rLen;
|
||||
self->addBlockKVPairs(chunks);
|
||||
self->offset += len;
|
||||
|
||||
return Void();
|
||||
} catch (Error& e) {
|
||||
TraceEvent(SevWarn, "CorruptLogFileBlock")
|
||||
|
@ -496,12 +430,55 @@ public:
|
|||
Reference<IAsyncFile> fd;
|
||||
int64_t offset = 0;
|
||||
bool eof = false;
|
||||
bool leftover = false; // Done but has unfinished version batch data left
|
||||
bool done = false;
|
||||
};
|
||||
|
||||
ACTOR Future<Void> process_file(Reference<IBackupContainer> container, LogFile file, UID uid, DecodeParams params) {
|
||||
if (file.fileSize == 0) {
|
||||
TraceEvent("SkipEmptyFile", uid).detail("Name", file.fileName);
|
||||
return Void();
|
||||
}
|
||||
|
||||
state DecodeProgress progress(file);
|
||||
wait(progress.openFile(container));
|
||||
while (!progress.finished()) {
|
||||
VersionedMutations vms = progress.getNextBatch();
|
||||
if (vms.version < params.beginVersionFilter || vms.version >= params.endVersionFilter) {
|
||||
TraceEvent("SkipVersion").detail("Version", vms.version);
|
||||
continue;
|
||||
}
|
||||
|
||||
int sub = 0;
|
||||
for (const auto& m : vms.mutations) {
|
||||
sub++; // sub sequence number starts at 1
|
||||
bool print = params.prefix.empty(); // no filtering
|
||||
|
||||
if (!print) {
|
||||
if (isSingleKeyMutation((MutationRef::Type)m.type)) {
|
||||
print = m.param1.startsWith(StringRef(params.prefix));
|
||||
} else if (m.type == MutationRef::ClearRange) {
|
||||
KeyRange range(KeyRangeRef(m.param1, m.param2));
|
||||
print = range.contains(StringRef(params.prefix));
|
||||
} else {
|
||||
ASSERT(false);
|
||||
}
|
||||
}
|
||||
if (print) {
|
||||
TraceEvent(format("Mutation_%llu_%d", vms.version, sub).c_str(), uid)
|
||||
.detail("Version", vms.version)
|
||||
.setMaxFieldLength(10000)
|
||||
.detail("M", m.toString());
|
||||
std::cout << vms.version << " " << m.toString() << "\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
TraceEvent("ProcessFileDone", uid).detail("File", file.fileName);
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> decode_logs(DecodeParams params) {
|
||||
state Reference<IBackupContainer> container = IBackupContainer::openContainer(params.container_url);
|
||||
|
||||
state UID uid = deterministicRandom()->randomUniqueID();
|
||||
state BackupFileList listing = wait(container->dumpFileList());
|
||||
// remove partitioned logs
|
||||
listing.logs.erase(std::remove_if(listing.logs.begin(),
|
||||
|
@ -512,7 +489,8 @@ ACTOR Future<Void> decode_logs(DecodeParams params) {
|
|||
}),
|
||||
listing.logs.end());
|
||||
std::sort(listing.logs.begin(), listing.logs.end());
|
||||
TraceEvent("Container").detail("URL", params.container_url).detail("Logs", listing.logs.size());
|
||||
TraceEvent("Container", uid).detail("URL", params.container_url).detail("Logs", listing.logs.size());
|
||||
TraceEvent("DecodeParam", uid).setMaxFieldLength(100000).detail("Value", params.toString());
|
||||
|
||||
BackupDescription desc = wait(container->describeBackup());
|
||||
std::cout << "\n" << desc.toString() << "\n";
|
||||
|
@ -520,26 +498,15 @@ ACTOR Future<Void> decode_logs(DecodeParams params) {
|
|||
state std::vector<LogFile> logs = getRelevantLogFiles(listing.logs, params);
|
||||
printLogFiles("Relevant files are: ", logs);
|
||||
|
||||
state int i = 0;
|
||||
// Previous file's unfinished version data
|
||||
state std::vector<VersionedKVPart> left;
|
||||
for (; i < logs.size(); i++) {
|
||||
if (logs[i].fileSize == 0)
|
||||
continue;
|
||||
if (params.list_only) return Void();
|
||||
|
||||
state DecodeProgress progress(logs[i], std::move(left));
|
||||
wait(progress.openFile(container));
|
||||
while (!progress.finished()) {
|
||||
VersionedMutations vms = wait(progress.getNextBatch());
|
||||
for (const auto& m : vms.mutations) {
|
||||
std::cout << vms.version << " " << m.toString() << "\n";
|
||||
}
|
||||
}
|
||||
left = std::move(progress).getUnfinishedBuffer();
|
||||
if (!left.empty()) {
|
||||
TraceEvent("UnfinishedFile").detail("File", logs[i].fileName).detail("Q", left.size());
|
||||
}
|
||||
state int idx = 0;
|
||||
while (idx < logs.size()) {
|
||||
TraceEvent("ProcessFile").detail("Name", logs[idx].fileName).detail("I", idx);
|
||||
wait(process_file(container, logs[idx], uid, params));
|
||||
idx++;
|
||||
}
|
||||
TraceEvent("DecodeDone", uid);
|
||||
return Void();
|
||||
}
|
||||
|
||||
|
@ -564,6 +531,8 @@ int main(int argc, char** argv) {
|
|||
}
|
||||
if (!param.trace_format.empty()) {
|
||||
setNetworkOption(FDBNetworkOptions::TRACE_FORMAT, StringRef(param.trace_format));
|
||||
} else {
|
||||
setNetworkOption(FDBNetworkOptions::TRACE_FORMAT, "json"_sr);
|
||||
}
|
||||
if (!param.trace_log_group.empty()) {
|
||||
setNetworkOption(FDBNetworkOptions::TRACE_LOG_GROUP, StringRef(param.trace_log_group));
|
||||
|
@ -582,12 +551,17 @@ int main(int argc, char** argv) {
|
|||
setupNetwork(0, UseMetrics::True);
|
||||
|
||||
TraceEvent::setNetworkThread();
|
||||
openTraceFile(NetworkAddress(), 10 << 20, 10 << 20, param.log_dir, "decode", param.trace_log_group);
|
||||
openTraceFile(NetworkAddress(), 10 << 20, 500 << 20, param.log_dir, "decode", param.trace_log_group);
|
||||
param.tlsConfig.setupBlobCredentials();
|
||||
|
||||
auto f = stopAfter(decode_logs(param));
|
||||
|
||||
runNetwork();
|
||||
|
||||
flushTraceFileVoid();
|
||||
fflush(stdout);
|
||||
closeTraceFile();
|
||||
|
||||
return status;
|
||||
} catch (Error& e) {
|
||||
std::cerr << "ERROR: " << e.what() << "\n";
|
||||
|
|
|
@ -368,8 +368,8 @@ public:
|
|||
DatabaseBackupAgent(DatabaseBackupAgent&& r) noexcept
|
||||
: subspace(std::move(r.subspace)), states(std::move(r.states)), config(std::move(r.config)),
|
||||
errors(std::move(r.errors)), ranges(std::move(r.ranges)), tagNames(std::move(r.tagNames)),
|
||||
taskBucket(std::move(r.taskBucket)), futureBucket(std::move(r.futureBucket)),
|
||||
sourceStates(std::move(r.sourceStates)), sourceTagNames(std::move(r.sourceTagNames)) {}
|
||||
sourceStates(std::move(r.sourceStates)), sourceTagNames(std::move(r.sourceTagNames)),
|
||||
taskBucket(std::move(r.taskBucket)), futureBucket(std::move(r.futureBucket)) {}
|
||||
|
||||
void operator=(DatabaseBackupAgent&& r) noexcept {
|
||||
subspace = std::move(r.subspace);
|
||||
|
@ -970,6 +970,11 @@ ACTOR Future<Standalone<VectorRef<KeyValueRef>>> decodeRangeFileBlock(Reference<
|
|||
int64_t offset,
|
||||
int len);
|
||||
|
||||
// Reads a mutation log block from file and parses into batch mutation blocks for further parsing.
|
||||
ACTOR Future<Standalone<VectorRef<KeyValueRef>>> decodeMutationLogFileBlock(Reference<IAsyncFile> file,
|
||||
int64_t offset,
|
||||
int len);
|
||||
|
||||
// Return a block of contiguous padding bytes "\0xff" for backup files, growing if needed.
|
||||
Value makePadding(int size);
|
||||
} // namespace fileBackup
|
||||
|
|
|
@ -306,4 +306,43 @@ private:
|
|||
std::string URL;
|
||||
};
|
||||
|
||||
namespace fileBackup {
|
||||
// Accumulates mutation log value chunks, as both a vector of chunks and as a combined chunk,
|
||||
// in chunk order, and can check the chunk set for completion or intersection with a set
|
||||
// of ranges.
|
||||
struct AccumulatedMutations {
|
||||
AccumulatedMutations() : lastChunkNumber(-1) {}
|
||||
|
||||
// Add a KV pair for this mutation chunk set
|
||||
// It will be accumulated onto serializedMutations if the chunk number is
|
||||
// the next expected value.
|
||||
void addChunk(int chunkNumber, const KeyValueRef& kv);
|
||||
|
||||
// Returns true if both
|
||||
// - 1 or more chunks were added to this set
|
||||
// - The header of the first chunk contains a valid protocol version and a length
|
||||
// that matches the bytes after the header in the combined value in serializedMutations
|
||||
bool isComplete() const;
|
||||
|
||||
// Returns true if a complete chunk contains any MutationRefs which intersect with any
|
||||
// range in ranges.
|
||||
// It is undefined behavior to run this if isComplete() does not return true.
|
||||
bool matchesAnyRange(const std::vector<KeyRange>& ranges) const;
|
||||
|
||||
std::vector<KeyValueRef> kvs;
|
||||
std::string serializedMutations;
|
||||
int lastChunkNumber;
|
||||
};
|
||||
|
||||
// Decodes a mutation log key, which contains (hash, commitVersion, chunkNumber) and
|
||||
// returns (commitVersion, chunkNumber)
|
||||
std::pair<Version, int32_t> decodeMutationLogKey(const StringRef& key);
|
||||
|
||||
// Decodes an encoded list of mutations in the format of:
|
||||
// [includeVersion:uint64_t][val_length:uint32_t][mutation_1][mutation_2]...[mutation_k],
|
||||
// where a mutation is encoded as:
|
||||
// [type:uint32_t][keyLength:uint32_t][valueLength:uint32_t][param1][param2]
|
||||
std::vector<MutationRef> decodeMutationLogValue(const StringRef& value);
|
||||
} // namespace fileBackup
|
||||
|
||||
#endif
|
||||
|
|
|
@ -163,7 +163,6 @@ public:
|
|||
state Version maxVer = 0;
|
||||
state RangeFile rf;
|
||||
state json_spirit::mArray fileArray;
|
||||
state int i;
|
||||
|
||||
// Validate each filename, update version range
|
||||
for (const auto& f : fileNames) {
|
||||
|
@ -1488,7 +1487,7 @@ void BackupContainerFileSystem::setEncryptionKey(Optional<std::string> const& en
|
|||
#endif
|
||||
}
|
||||
}
|
||||
Future<Void> BackupContainerFileSystem::createTestEncryptionKeyFile(std::string const &filename) {
|
||||
Future<Void> BackupContainerFileSystem::createTestEncryptionKeyFile(std::string const& filename) {
|
||||
#if ENCRYPTION_ENABLED
|
||||
return BackupContainerFileSystemImpl::createTestEncryptionKeyFile(filename);
|
||||
#else
|
||||
|
@ -1507,13 +1506,16 @@ int chooseFileSize(std::vector<int>& sizes) {
|
|||
return deterministicRandom()->randomInt(0, 2e6);
|
||||
}
|
||||
|
||||
ACTOR Future<Void> writeAndVerifyFile(Reference<IBackupContainer> c, Reference<IBackupFile> f, int size, FlowLock* lock) {
|
||||
ACTOR Future<Void> writeAndVerifyFile(Reference<IBackupContainer> c,
|
||||
Reference<IBackupFile> f,
|
||||
int size,
|
||||
FlowLock* lock) {
|
||||
state Standalone<VectorRef<uint8_t>> content;
|
||||
|
||||
wait(lock->take(TaskPriority::DefaultYield, size));
|
||||
state FlowLock::Releaser releaser(*lock, size);
|
||||
state FlowLock::Releaser releaser(*lock, size);
|
||||
|
||||
printf("writeAndVerify size=%d file=%s\n", size, f->getFileName().c_str());
|
||||
printf("writeAndVerify size=%d file=%s\n", size, f->getFileName().c_str());
|
||||
content.resize(content.arena(), size);
|
||||
for (int i = 0; i < content.size(); ++i) {
|
||||
content[i] = (uint8_t)deterministicRandom()->randomInt(0, 256);
|
||||
|
@ -1601,9 +1603,9 @@ ACTOR Future<Void> testBackupContainer(std::string url, Optional<std::string> en
|
|||
// List of sizes to use to test edge cases on underlying file implementations
|
||||
state std::vector<int> fileSizes = { 0 };
|
||||
if (StringRef(url).startsWith(LiteralStringRef("blob"))) {
|
||||
fileSizes.push_back(CLIENT_KNOBS->BLOBSTORE_MULTIPART_MIN_PART_SIZE);
|
||||
fileSizes.push_back(CLIENT_KNOBS->BLOBSTORE_MULTIPART_MIN_PART_SIZE + 10);
|
||||
}
|
||||
fileSizes.push_back(CLIENT_KNOBS->BLOBSTORE_MULTIPART_MIN_PART_SIZE);
|
||||
fileSizes.push_back(CLIENT_KNOBS->BLOBSTORE_MULTIPART_MIN_PART_SIZE + 10);
|
||||
}
|
||||
|
||||
loop {
|
||||
state Version logStart = v;
|
||||
|
|
|
@ -31,7 +31,8 @@ namespace {
|
|||
class BackupFile : public IBackupFile, ReferenceCounted<BackupFile> {
|
||||
public:
|
||||
BackupFile(const std::string& fileName, Reference<IAsyncFile> file, const std::string& finalFullPath)
|
||||
: IBackupFile(fileName), m_file(file), m_finalFullPath(finalFullPath), m_writeOffset(0), m_blockSize(CLIENT_KNOBS->BACKUP_LOCAL_FILE_WRITE_BLOCK) {
|
||||
: IBackupFile(fileName), m_file(file), m_writeOffset(0), m_finalFullPath(finalFullPath),
|
||||
m_blockSize(CLIENT_KNOBS->BACKUP_LOCAL_FILE_WRITE_BLOCK) {
|
||||
if (BUGGIFY) {
|
||||
m_blockSize = deterministicRandom()->randomInt(100, 20000);
|
||||
}
|
||||
|
|
|
@ -244,7 +244,7 @@ struct GetReadVersionRequest : TimedRequest {
|
|||
uint32_t flags = 0,
|
||||
TransactionTagMap<uint32_t> tags = TransactionTagMap<uint32_t>(),
|
||||
Optional<UID> debugID = Optional<UID>())
|
||||
: spanContext(spanContext), transactionCount(transactionCount), priority(priority), flags(flags), tags(tags),
|
||||
: spanContext(spanContext), transactionCount(transactionCount), flags(flags), priority(priority), tags(tags),
|
||||
debugID(debugID) {
|
||||
flags = flags & ~FLAG_PRIORITY_MASK;
|
||||
switch (priority) {
|
||||
|
@ -313,7 +313,7 @@ struct GetKeyServerLocationsRequest {
|
|||
int limit,
|
||||
bool reverse,
|
||||
Arena const& arena)
|
||||
: spanContext(spanContext), begin(begin), end(end), limit(limit), reverse(reverse), arena(arena) {}
|
||||
: arena(arena), spanContext(spanContext), begin(begin), end(end), limit(limit), reverse(reverse) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
|
|
|
@ -20,12 +20,13 @@
|
|||
|
||||
#include "fdbclient/ConfigTransactionInterface.h"
|
||||
#include "fdbclient/CoordinationInterface.h"
|
||||
#include "fdbclient/SystemData.h"
|
||||
#include "flow/IRandom.h"
|
||||
|
||||
ConfigTransactionInterface::ConfigTransactionInterface() : _id(deterministicRandom()->randomUniqueID()) {}
|
||||
|
||||
void ConfigTransactionInterface::setupWellKnownEndpoints() {
|
||||
getVersion.makeWellKnownEndpoint(WLTOKEN_CONFIGTXN_GETVERSION, TaskPriority::Coordination);
|
||||
getGeneration.makeWellKnownEndpoint(WLTOKEN_CONFIGTXN_GETGENERATION, TaskPriority::Coordination);
|
||||
get.makeWellKnownEndpoint(WLTOKEN_CONFIGTXN_GET, TaskPriority::Coordination);
|
||||
getClasses.makeWellKnownEndpoint(WLTOKEN_CONFIGTXN_GETCLASSES, TaskPriority::Coordination);
|
||||
getKnobs.makeWellKnownEndpoint(WLTOKEN_CONFIGTXN_GETKNOBS, TaskPriority::Coordination);
|
||||
|
@ -33,8 +34,8 @@ void ConfigTransactionInterface::setupWellKnownEndpoints() {
|
|||
}
|
||||
|
||||
ConfigTransactionInterface::ConfigTransactionInterface(NetworkAddress const& remote)
|
||||
: getVersion(Endpoint({ remote }, WLTOKEN_CONFIGTXN_GETVERSION)), get(Endpoint({ remote }, WLTOKEN_CONFIGTXN_GET)),
|
||||
getClasses(Endpoint({ remote }, WLTOKEN_CONFIGTXN_GETCLASSES)),
|
||||
: getGeneration(Endpoint({ remote }, WLTOKEN_CONFIGTXN_GETGENERATION)),
|
||||
get(Endpoint({ remote }, WLTOKEN_CONFIGTXN_GET)), getClasses(Endpoint({ remote }, WLTOKEN_CONFIGTXN_GETCLASSES)),
|
||||
getKnobs(Endpoint({ remote }, WLTOKEN_CONFIGTXN_GETKNOBS)), commit(Endpoint({ remote }, WLTOKEN_CONFIGTXN_COMMIT)) {
|
||||
}
|
||||
|
||||
|
@ -45,3 +46,30 @@ bool ConfigTransactionInterface::operator==(ConfigTransactionInterface const& rh
|
|||
bool ConfigTransactionInterface::operator!=(ConfigTransactionInterface const& rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
bool ConfigGeneration::operator==(ConfigGeneration const& rhs) const {
|
||||
return liveVersion == rhs.liveVersion && committedVersion == rhs.committedVersion;
|
||||
}
|
||||
|
||||
bool ConfigGeneration::operator!=(ConfigGeneration const& rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
void ConfigTransactionCommitRequest::set(KeyRef key, ValueRef value) {
|
||||
if (key == configTransactionDescriptionKey) {
|
||||
annotation.description = KeyRef(arena, value);
|
||||
} else {
|
||||
ConfigKey configKey = ConfigKeyRef::decodeKey(key);
|
||||
auto knobValue = IKnobCollection::parseKnobValue(
|
||||
configKey.knobName.toString(), value.toString(), IKnobCollection::Type::TEST);
|
||||
mutations.emplace_back_deep(arena, configKey, knobValue.contents());
|
||||
}
|
||||
}
|
||||
|
||||
void ConfigTransactionCommitRequest::clear(KeyRef key) {
|
||||
if (key == configTransactionDescriptionKey) {
|
||||
annotation.description = ""_sr;
|
||||
} else {
|
||||
mutations.emplace_back_deep(arena, ConfigKeyRef::decodeKey(key), Optional<KnobValueRef>{});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,22 +27,38 @@
|
|||
#include "fdbrpc/fdbrpc.h"
|
||||
#include "flow/flow.h"
|
||||
|
||||
struct ConfigTransactionGetVersionReply {
|
||||
static constexpr FileIdentifier file_identifier = 2934851;
|
||||
ConfigTransactionGetVersionReply() = default;
|
||||
explicit ConfigTransactionGetVersionReply(Version version) : version(version) {}
|
||||
Version version;
|
||||
struct ConfigGeneration {
|
||||
// The live version of each node is monotonically increasing
|
||||
Version liveVersion{ 0 };
|
||||
// The committedVersion of each node is the version of the last commit made durable.
|
||||
// Each committedVersion was previously given to clients as a liveVersion, prior to commit.
|
||||
Version committedVersion{ 0 };
|
||||
|
||||
bool operator==(ConfigGeneration const&) const;
|
||||
bool operator!=(ConfigGeneration const&) const;
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, version);
|
||||
serializer(ar, liveVersion, committedVersion);
|
||||
}
|
||||
};
|
||||
|
||||
struct ConfigTransactionGetVersionRequest {
|
||||
struct ConfigTransactionGetGenerationReply {
|
||||
static constexpr FileIdentifier file_identifier = 2934851;
|
||||
ConfigTransactionGetGenerationReply() = default;
|
||||
explicit ConfigTransactionGetGenerationReply(ConfigGeneration generation) : generation(generation) {}
|
||||
ConfigGeneration generation;
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, generation);
|
||||
}
|
||||
};
|
||||
|
||||
struct ConfigTransactionGetGenerationRequest {
|
||||
static constexpr FileIdentifier file_identifier = 138941;
|
||||
ReplyPromise<ConfigTransactionGetVersionReply> reply;
|
||||
ConfigTransactionGetVersionRequest() = default;
|
||||
ReplyPromise<ConfigTransactionGetGenerationReply> reply;
|
||||
ConfigTransactionGetGenerationRequest() = default;
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
|
@ -64,45 +80,36 @@ struct ConfigTransactionGetReply {
|
|||
|
||||
struct ConfigTransactionGetRequest {
|
||||
static constexpr FileIdentifier file_identifier = 923040;
|
||||
Version version;
|
||||
ConfigGeneration generation;
|
||||
ConfigKey key;
|
||||
ReplyPromise<ConfigTransactionGetReply> reply;
|
||||
|
||||
ConfigTransactionGetRequest() = default;
|
||||
explicit ConfigTransactionGetRequest(Version version, ConfigKey key) : version(version), key(key) {}
|
||||
explicit ConfigTransactionGetRequest(ConfigGeneration generation, ConfigKey key)
|
||||
: generation(generation), key(key) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, version, key, reply);
|
||||
serializer(ar, generation, key, reply);
|
||||
}
|
||||
};
|
||||
|
||||
struct ConfigTransactionCommitRequest {
|
||||
static constexpr FileIdentifier file_identifier = 103841;
|
||||
Arena arena;
|
||||
Version version{ ::invalidVersion };
|
||||
ConfigGeneration generation{ ::invalidVersion, ::invalidVersion };
|
||||
VectorRef<ConfigMutationRef> mutations;
|
||||
ConfigCommitAnnotationRef annotation;
|
||||
ReplyPromise<Void> reply;
|
||||
|
||||
size_t expectedSize() const { return mutations.expectedSize() + annotation.expectedSize(); }
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, arena, version, mutations, annotation, reply);
|
||||
}
|
||||
};
|
||||
|
||||
struct ConfigTransactionGetRangeReply {
|
||||
static constexpr FileIdentifier file_identifier = 430263;
|
||||
Standalone<RangeResultRef> range;
|
||||
|
||||
ConfigTransactionGetRangeReply() = default;
|
||||
explicit ConfigTransactionGetRangeReply(Standalone<RangeResultRef> range) : range(range) {}
|
||||
void set(KeyRef key, ValueRef value);
|
||||
void clear(KeyRef key);
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, range);
|
||||
serializer(ar, arena, generation, mutations, annotation, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -122,15 +129,15 @@ struct ConfigTransactionGetConfigClassesReply {
|
|||
|
||||
struct ConfigTransactionGetConfigClassesRequest {
|
||||
static constexpr FileIdentifier file_identifier = 7163400;
|
||||
Version version;
|
||||
ConfigGeneration generation;
|
||||
ReplyPromise<ConfigTransactionGetConfigClassesReply> reply;
|
||||
|
||||
ConfigTransactionGetConfigClassesRequest() = default;
|
||||
explicit ConfigTransactionGetConfigClassesRequest(Version version) : version(version) {}
|
||||
explicit ConfigTransactionGetConfigClassesRequest(ConfigGeneration generation) : generation(generation) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, version);
|
||||
serializer(ar, generation);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -149,17 +156,17 @@ struct ConfigTransactionGetKnobsReply {
|
|||
|
||||
struct ConfigTransactionGetKnobsRequest {
|
||||
static constexpr FileIdentifier file_identifier = 987410;
|
||||
Version version;
|
||||
ConfigGeneration generation;
|
||||
Optional<Key> configClass;
|
||||
ReplyPromise<ConfigTransactionGetKnobsReply> reply;
|
||||
|
||||
ConfigTransactionGetKnobsRequest() = default;
|
||||
explicit ConfigTransactionGetKnobsRequest(Version version, Optional<Key> configClass)
|
||||
: version(version), configClass(configClass) {}
|
||||
explicit ConfigTransactionGetKnobsRequest(ConfigGeneration generation, Optional<Key> configClass)
|
||||
: generation(generation), configClass(configClass) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, version, configClass, reply);
|
||||
serializer(ar, generation, configClass, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -172,7 +179,7 @@ struct ConfigTransactionInterface {
|
|||
|
||||
public:
|
||||
static constexpr FileIdentifier file_identifier = 982485;
|
||||
struct RequestStream<ConfigTransactionGetVersionRequest> getVersion;
|
||||
struct RequestStream<ConfigTransactionGetGenerationRequest> getGeneration;
|
||||
struct RequestStream<ConfigTransactionGetRequest> get;
|
||||
struct RequestStream<ConfigTransactionGetConfigClassesRequest> getClasses;
|
||||
struct RequestStream<ConfigTransactionGetKnobsRequest> getKnobs;
|
||||
|
@ -188,6 +195,6 @@ public:
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, getVersion, get, getClasses, getKnobs, commit);
|
||||
serializer(ar, getGeneration, get, getClasses, getKnobs, commit);
|
||||
}
|
||||
};
|
||||
|
|
|
@ -38,7 +38,7 @@ constexpr UID WLTOKEN_CLIENTLEADERREG_OPENDATABASE(-1, 3);
|
|||
constexpr UID WLTOKEN_PROTOCOL_INFO(-1, 10);
|
||||
constexpr UID WLTOKEN_CLIENTLEADERREG_DESCRIPTOR_MUTABLE(-1, 11);
|
||||
|
||||
constexpr UID WLTOKEN_CONFIGTXN_GETVERSION(-1, 12);
|
||||
constexpr UID WLTOKEN_CONFIGTXN_GETGENERATION(-1, 12);
|
||||
constexpr UID WLTOKEN_CONFIGTXN_GET(-1, 13);
|
||||
constexpr UID WLTOKEN_CONFIGTXN_GETCLASSES(-1, 14);
|
||||
constexpr UID WLTOKEN_CONFIGTXN_GETKNOBS(-1, 15);
|
||||
|
|
|
@ -44,28 +44,29 @@ const Key DatabaseBackupAgent::keyDatabasesInSync = LiteralStringRef("databases_
|
|||
const int DatabaseBackupAgent::LATEST_DR_VERSION = 1;
|
||||
|
||||
DatabaseBackupAgent::DatabaseBackupAgent()
|
||||
: subspace(Subspace(databaseBackupPrefixRange.begin)), tagNames(subspace.get(BackupAgentBase::keyTagName)),
|
||||
states(subspace.get(BackupAgentBase::keyStates)), config(subspace.get(BackupAgentBase::keyConfig)),
|
||||
errors(subspace.get(BackupAgentBase::keyErrors)), ranges(subspace.get(BackupAgentBase::keyRanges)),
|
||||
: subspace(Subspace(databaseBackupPrefixRange.begin)), states(subspace.get(BackupAgentBase::keyStates)),
|
||||
config(subspace.get(BackupAgentBase::keyConfig)), errors(subspace.get(BackupAgentBase::keyErrors)),
|
||||
ranges(subspace.get(BackupAgentBase::keyRanges)), tagNames(subspace.get(BackupAgentBase::keyTagName)),
|
||||
sourceStates(subspace.get(BackupAgentBase::keySourceStates)),
|
||||
sourceTagNames(subspace.get(BackupAgentBase::keyTagName)),
|
||||
taskBucket(new TaskBucket(subspace.get(BackupAgentBase::keyTasks),
|
||||
AccessSystemKeys::True,
|
||||
PriorityBatch::False,
|
||||
LockAware::True)),
|
||||
futureBucket(new FutureBucket(subspace.get(BackupAgentBase::keyFutures), AccessSystemKeys::True, LockAware::True)),
|
||||
sourceStates(subspace.get(BackupAgentBase::keySourceStates)),
|
||||
sourceTagNames(subspace.get(BackupAgentBase::keyTagName)) {}
|
||||
futureBucket(new FutureBucket(subspace.get(BackupAgentBase::keyFutures), AccessSystemKeys::True, LockAware::True)) {
|
||||
}
|
||||
|
||||
DatabaseBackupAgent::DatabaseBackupAgent(Database src)
|
||||
: subspace(Subspace(databaseBackupPrefixRange.begin)), tagNames(subspace.get(BackupAgentBase::keyTagName)),
|
||||
states(subspace.get(BackupAgentBase::keyStates)), config(subspace.get(BackupAgentBase::keyConfig)),
|
||||
errors(subspace.get(BackupAgentBase::keyErrors)), ranges(subspace.get(BackupAgentBase::keyRanges)),
|
||||
: subspace(Subspace(databaseBackupPrefixRange.begin)), states(subspace.get(BackupAgentBase::keyStates)),
|
||||
config(subspace.get(BackupAgentBase::keyConfig)), errors(subspace.get(BackupAgentBase::keyErrors)),
|
||||
ranges(subspace.get(BackupAgentBase::keyRanges)), tagNames(subspace.get(BackupAgentBase::keyTagName)),
|
||||
sourceStates(subspace.get(BackupAgentBase::keySourceStates)),
|
||||
sourceTagNames(subspace.get(BackupAgentBase::keyTagName)),
|
||||
taskBucket(new TaskBucket(subspace.get(BackupAgentBase::keyTasks),
|
||||
AccessSystemKeys::True,
|
||||
PriorityBatch::False,
|
||||
LockAware::True)),
|
||||
futureBucket(new FutureBucket(subspace.get(BackupAgentBase::keyFutures), AccessSystemKeys::True, LockAware::True)),
|
||||
sourceStates(subspace.get(BackupAgentBase::keySourceStates)),
|
||||
sourceTagNames(subspace.get(BackupAgentBase::keyTagName)) {
|
||||
futureBucket(new FutureBucket(subspace.get(BackupAgentBase::keyFutures), AccessSystemKeys::True, LockAware::True)) {
|
||||
taskBucket->src = src;
|
||||
}
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ public:
|
|||
private:
|
||||
DatabaseContext* cx;
|
||||
StorageServerInfo(DatabaseContext* cx, StorageServerInterface const& interf, LocalityData const& locality)
|
||||
: cx(cx), ReferencedInterface<StorageServerInterface>(interf, locality) {}
|
||||
: ReferencedInterface<StorageServerInterface>(interf, locality), cx(cx) {}
|
||||
};
|
||||
|
||||
struct LocationInfo : MultiInterface<ReferencedInterface<StorageServerInterface>>, FastAllocated<LocationInfo> {
|
||||
|
|
|
@ -655,9 +655,9 @@ struct RangeResultRef : VectorRef<KeyValueRef> {
|
|||
|
||||
RangeResultRef() : more(false), readToBegin(false), readThroughEnd(false) {}
|
||||
RangeResultRef(Arena& p, const RangeResultRef& toCopy)
|
||||
: more(toCopy.more), readToBegin(toCopy.readToBegin), readThroughEnd(toCopy.readThroughEnd),
|
||||
: VectorRef<KeyValueRef>(p, toCopy), more(toCopy.more),
|
||||
readThrough(toCopy.readThrough.present() ? KeyRef(p, toCopy.readThrough.get()) : Optional<KeyRef>()),
|
||||
VectorRef<KeyValueRef>(p, toCopy) {}
|
||||
readToBegin(toCopy.readToBegin), readThroughEnd(toCopy.readThroughEnd) {}
|
||||
RangeResultRef(const VectorRef<KeyValueRef>& value, bool more, Optional<KeyRef> readThrough = Optional<KeyRef>())
|
||||
: VectorRef<KeyValueRef>(value), more(more), readThrough(readThrough), readToBegin(false), readThroughEnd(false) {
|
||||
}
|
||||
|
|
|
@ -691,9 +691,9 @@ private:
|
|||
int64_t blockEnd;
|
||||
};
|
||||
|
||||
ACTOR Future<Standalone<VectorRef<KeyValueRef>>> decodeLogFileBlock(Reference<IAsyncFile> file,
|
||||
int64_t offset,
|
||||
int len) {
|
||||
ACTOR Future<Standalone<VectorRef<KeyValueRef>>> decodeMutationLogFileBlock(Reference<IAsyncFile> file,
|
||||
int64_t offset,
|
||||
int len) {
|
||||
state Standalone<StringRef> buf = makeString(len);
|
||||
int rLen = wait(file->read(mutateString(buf), len, offset));
|
||||
if (rLen != len)
|
||||
|
@ -3244,7 +3244,7 @@ REGISTER_TASKFUNC(RestoreRangeTaskFunc);
|
|||
|
||||
// Decodes a mutation log key, which contains (hash, commitVersion, chunkNumber) and
|
||||
// returns (commitVersion, chunkNumber)
|
||||
std::pair<Version, int32_t> decodeLogKey(const StringRef& key) {
|
||||
std::pair<Version, int32_t> decodeMutationLogKey(const StringRef& key) {
|
||||
ASSERT(key.size() == sizeof(uint8_t) + sizeof(Version) + sizeof(int32_t));
|
||||
|
||||
uint8_t hash;
|
||||
|
@ -3265,7 +3265,7 @@ std::pair<Version, int32_t> decodeLogKey(const StringRef& key) {
|
|||
// [includeVersion:uint64_t][val_length:uint32_t][mutation_1][mutation_2]...[mutation_k],
|
||||
// where a mutation is encoded as:
|
||||
// [type:uint32_t][keyLength:uint32_t][valueLength:uint32_t][param1][param2]
|
||||
std::vector<MutationRef> decodeLogValue(const StringRef& value) {
|
||||
std::vector<MutationRef> decodeMutationLogValue(const StringRef& value) {
|
||||
StringRefReader reader(value, restore_corrupted_data());
|
||||
|
||||
Version protocolVersion = reader.consume<uint64_t>();
|
||||
|
@ -3300,72 +3300,54 @@ std::vector<MutationRef> decodeLogValue(const StringRef& value) {
|
|||
return mutations;
|
||||
}
|
||||
|
||||
// Accumulates mutation log value chunks, as both a vector of chunks and as a combined chunk,
|
||||
// in chunk order, and can check the chunk set for completion or intersection with a set
|
||||
// of ranges.
|
||||
struct AccumulatedMutations {
|
||||
AccumulatedMutations() : lastChunkNumber(-1) {}
|
||||
|
||||
// Add a KV pair for this mutation chunk set
|
||||
// It will be accumulated onto serializedMutations if the chunk number is
|
||||
// the next expected value.
|
||||
void addChunk(int chunkNumber, const KeyValueRef& kv) {
|
||||
if (chunkNumber == lastChunkNumber + 1) {
|
||||
lastChunkNumber = chunkNumber;
|
||||
serializedMutations += kv.value.toString();
|
||||
} else {
|
||||
lastChunkNumber = -2;
|
||||
serializedMutations.clear();
|
||||
}
|
||||
kvs.push_back(kv);
|
||||
void AccumulatedMutations::addChunk(int chunkNumber, const KeyValueRef& kv) {
|
||||
if (chunkNumber == lastChunkNumber + 1) {
|
||||
lastChunkNumber = chunkNumber;
|
||||
serializedMutations += kv.value.toString();
|
||||
} else {
|
||||
lastChunkNumber = -2;
|
||||
serializedMutations.clear();
|
||||
}
|
||||
kvs.push_back(kv);
|
||||
}
|
||||
|
||||
// Returns true if both
|
||||
// - 1 or more chunks were added to this set
|
||||
// - The header of the first chunk contains a valid protocol version and a length
|
||||
// that matches the bytes after the header in the combined value in serializedMutations
|
||||
bool isComplete() const {
|
||||
if (lastChunkNumber >= 0) {
|
||||
StringRefReader reader(serializedMutations, restore_corrupted_data());
|
||||
bool AccumulatedMutations::isComplete() const {
|
||||
if (lastChunkNumber >= 0) {
|
||||
StringRefReader reader(serializedMutations, restore_corrupted_data());
|
||||
|
||||
Version protocolVersion = reader.consume<uint64_t>();
|
||||
if (protocolVersion <= 0x0FDB00A200090001) {
|
||||
throw incompatible_protocol_version();
|
||||
}
|
||||
|
||||
uint32_t vLen = reader.consume<uint32_t>();
|
||||
return vLen == reader.remainder().size();
|
||||
Version protocolVersion = reader.consume<uint64_t>();
|
||||
if (protocolVersion <= 0x0FDB00A200090001) {
|
||||
throw incompatible_protocol_version();
|
||||
}
|
||||
|
||||
return false;
|
||||
uint32_t vLen = reader.consume<uint32_t>();
|
||||
return vLen == reader.remainder().size();
|
||||
}
|
||||
|
||||
// Returns true if a complete chunk contains any MutationRefs which intersect with any
|
||||
// range in ranges.
|
||||
// It is undefined behavior to run this if isComplete() does not return true.
|
||||
bool matchesAnyRange(const std::vector<KeyRange>& ranges) const {
|
||||
std::vector<MutationRef> mutations = decodeLogValue(serializedMutations);
|
||||
for (auto& m : mutations) {
|
||||
for (auto& r : ranges) {
|
||||
if (m.type == MutationRef::ClearRange) {
|
||||
if (r.intersects(KeyRangeRef(m.param1, m.param2))) {
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
if (r.contains(m.param1)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Returns true if a complete chunk contains any MutationRefs which intersect with any
|
||||
// range in ranges.
|
||||
// It is undefined behavior to run this if isComplete() does not return true.
|
||||
bool AccumulatedMutations::matchesAnyRange(const std::vector<KeyRange>& ranges) const {
|
||||
std::vector<MutationRef> mutations = decodeMutationLogValue(serializedMutations);
|
||||
for (auto& m : mutations) {
|
||||
for (auto& r : ranges) {
|
||||
if (m.type == MutationRef::ClearRange) {
|
||||
if (r.intersects(KeyRangeRef(m.param1, m.param2))) {
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
if (r.contains(m.param1)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
std::vector<KeyValueRef> kvs;
|
||||
std::string serializedMutations;
|
||||
int lastChunkNumber;
|
||||
};
|
||||
return false;
|
||||
}
|
||||
|
||||
// Returns a vector of filtered KV refs from data which are either part of incomplete mutation groups OR complete
|
||||
// and have data relevant to one of the KV ranges in ranges
|
||||
|
@ -3373,7 +3355,7 @@ std::vector<KeyValueRef> filterLogMutationKVPairs(VectorRef<KeyValueRef> data, c
|
|||
std::unordered_map<Version, AccumulatedMutations> mutationBlocksByVersion;
|
||||
|
||||
for (auto& kv : data) {
|
||||
auto versionAndChunkNumber = decodeLogKey(kv.key);
|
||||
auto versionAndChunkNumber = decodeMutationLogKey(kv.key);
|
||||
mutationBlocksByVersion[versionAndChunkNumber.first].addChunk(versionAndChunkNumber.second, kv);
|
||||
}
|
||||
|
||||
|
@ -3444,7 +3426,7 @@ struct RestoreLogDataTaskFunc : RestoreFileTaskFuncBase {
|
|||
|
||||
state Key mutationLogPrefix = restore.mutationLogPrefix();
|
||||
state Reference<IAsyncFile> inFile = wait(bc->readFile(logFile.fileName));
|
||||
state Standalone<VectorRef<KeyValueRef>> dataOriginal = wait(decodeLogFileBlock(inFile, readOffset, readLen));
|
||||
state Standalone<VectorRef<KeyValueRef>> dataOriginal = wait(decodeMutationLogFileBlock(inFile, readOffset, readLen));
|
||||
|
||||
// Filter the KV pairs extracted from the log file block to remove any records known to not be needed for this
|
||||
// restore based on the restore range set.
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "fdbclient/IConfigTransaction.h"
|
||||
#include "fdbclient/SimpleConfigTransaction.h"
|
||||
#include "fdbclient/PaxosConfigTransaction.h"
|
||||
|
@ -25,3 +27,7 @@
|
|||
Reference<IConfigTransaction> IConfigTransaction::createTestSimple(ConfigTransactionInterface const& cti) {
|
||||
return makeReference<SimpleConfigTransaction>(cti);
|
||||
}
|
||||
|
||||
Reference<IConfigTransaction> IConfigTransaction::createTestPaxos(std::vector<ConfigTransactionInterface> const& ctis) {
|
||||
return makeReference<PaxosConfigTransaction>(ctis);
|
||||
}
|
||||
|
|
|
@ -40,6 +40,7 @@ public:
|
|||
virtual ~IConfigTransaction() = default;
|
||||
|
||||
static Reference<IConfigTransaction> createTestSimple(ConfigTransactionInterface const&);
|
||||
static Reference<IConfigTransaction> createTestPaxos(std::vector<ConfigTransactionInterface> const&);
|
||||
|
||||
// Not implemented:
|
||||
void setVersion(Version) override { throw client_invalid_operation(); }
|
||||
|
|
|
@ -52,16 +52,16 @@ public:
|
|||
virtual Optional<Version> getCachedReadVersion() const = 0;
|
||||
virtual Future<Optional<Value>> get(const Key& key, Snapshot = Snapshot::False) = 0;
|
||||
virtual Future<Key> getKey(const KeySelector& key, Snapshot = Snapshot::False) = 0;
|
||||
virtual Future<Standalone<RangeResultRef>> getRange(const KeySelector& begin,
|
||||
const KeySelector& end,
|
||||
int limit,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) = 0;
|
||||
virtual Future<Standalone<RangeResultRef>> getRange(KeySelector begin,
|
||||
KeySelector end,
|
||||
GetRangeLimits limits,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) = 0;
|
||||
virtual Future<RangeResult> getRange(const KeySelector& begin,
|
||||
const KeySelector& end,
|
||||
int limit,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) = 0;
|
||||
virtual Future<RangeResult> getRange(KeySelector begin,
|
||||
KeySelector end,
|
||||
GetRangeLimits limits,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) = 0;
|
||||
virtual Future<Standalone<VectorRef<const char*>>> getAddressesForKey(Key const& key) = 0;
|
||||
virtual Future<Standalone<VectorRef<KeyRef>>> getRangeSplitPoints(KeyRange const& range, int64_t chunkSize) = 0;
|
||||
virtual Future<int64_t> getEstimatedRangeSizeBytes(KeyRange const& keys) = 0;
|
||||
|
|
|
@ -58,7 +58,7 @@ struct MonitorLeaderInfo {
|
|||
|
||||
MonitorLeaderInfo() : hasConnected(false) {}
|
||||
explicit MonitorLeaderInfo(Reference<ClusterConnectionFile> intermediateConnFile)
|
||||
: intermediateConnFile(intermediateConnFile), hasConnected(false) {}
|
||||
: hasConnected(false), intermediateConnFile(intermediateConnFile) {}
|
||||
};
|
||||
|
||||
// Monitors the given coordination group's leader election process and provides a best current guess
|
||||
|
|
|
@ -281,7 +281,7 @@ template <class S, class T>
|
|||
class FlatMapSingleAssignmentVar final : public ThreadSingleAssignmentVar<T>, ThreadCallback {
|
||||
public:
|
||||
FlatMapSingleAssignmentVar(ThreadFuture<S> source, std::function<ErrorOr<ThreadFuture<T>>(ErrorOr<S>)> mapValue)
|
||||
: source(source), mapValue(mapValue), cancelled(false), released(false) {
|
||||
: source(source), cancelled(false), released(false), mapValue(mapValue) {
|
||||
ThreadSingleAssignmentVar<T>::addref();
|
||||
|
||||
int userParam;
|
||||
|
|
|
@ -396,7 +396,7 @@ void loadClientFunction(T* fp, void* lib, std::string libPath, const char* funct
|
|||
}
|
||||
|
||||
DLApi::DLApi(std::string fdbCPath, bool unlinkOnLoad)
|
||||
: api(new FdbCApi()), fdbCPath(fdbCPath), unlinkOnLoad(unlinkOnLoad), networkSetup(false) {}
|
||||
: fdbCPath(fdbCPath), api(new FdbCApi()), unlinkOnLoad(unlinkOnLoad), networkSetup(false) {}
|
||||
|
||||
// Loads client API functions (definitions are in FdbCApi struct)
|
||||
void DLApi::init() {
|
||||
|
@ -1014,8 +1014,8 @@ ThreadFuture<ProtocolVersion> MultiVersionDatabase::getServerProtocol(Optional<P
|
|||
}
|
||||
|
||||
MultiVersionDatabase::DatabaseState::DatabaseState(std::string clusterFilePath, Reference<IDatabase> versionMonitorDb)
|
||||
: clusterFilePath(clusterFilePath), versionMonitorDb(versionMonitorDb),
|
||||
dbVar(new ThreadSafeAsyncVar<Reference<IDatabase>>(Reference<IDatabase>(nullptr))), closed(false) {}
|
||||
: dbVar(new ThreadSafeAsyncVar<Reference<IDatabase>>(Reference<IDatabase>(nullptr))),
|
||||
clusterFilePath(clusterFilePath), versionMonitorDb(versionMonitorDb), closed(false) {}
|
||||
|
||||
// Adds a client (local or externally loaded) that can be used to connect to the cluster
|
||||
void MultiVersionDatabase::DatabaseState::addClient(Reference<ClientInfo> client) {
|
||||
|
@ -1912,8 +1912,8 @@ void MultiVersionApi::loadEnvironmentVariableNetworkOptions() {
|
|||
}
|
||||
|
||||
MultiVersionApi::MultiVersionApi()
|
||||
: bypassMultiClientApi(false), networkStartSetup(false), networkSetup(false), callbackOnMainThread(true),
|
||||
externalClient(false), localClientDisabled(false), apiVersion(0), envOptionsLoaded(false), threadCount(0) {}
|
||||
: callbackOnMainThread(true), localClientDisabled(false), networkStartSetup(false), networkSetup(false),
|
||||
bypassMultiClientApi(false), externalClient(false), apiVersion(0), threadCount(0), envOptionsLoaded(false) {}
|
||||
|
||||
MultiVersionApi* MultiVersionApi::api = new MultiVersionApi();
|
||||
|
||||
|
|
|
@ -116,10 +116,10 @@ TLSConfig tlsConfig(TLSEndpointType::CLIENT);
|
|||
|
||||
// The default values, TRACE_DEFAULT_ROLL_SIZE and TRACE_DEFAULT_MAX_LOGS_SIZE are located in Trace.h.
|
||||
NetworkOptions::NetworkOptions()
|
||||
: localAddress(""), clusterFile(""), traceDirectory(Optional<std::string>()), traceRollSize(TRACE_DEFAULT_ROLL_SIZE),
|
||||
traceMaxLogsSize(TRACE_DEFAULT_MAX_LOGS_SIZE), traceLogGroup("default"), traceFormat("xml"),
|
||||
traceClockSource("now"), runLoopProfilingEnabled(false),
|
||||
supportedVersions(new ReferencedObject<Standalone<VectorRef<ClientVersionRef>>>()) {}
|
||||
: traceRollSize(TRACE_DEFAULT_ROLL_SIZE), traceMaxLogsSize(TRACE_DEFAULT_MAX_LOGS_SIZE), traceLogGroup("default"),
|
||||
traceFormat("xml"), traceClockSource("now"),
|
||||
supportedVersions(new ReferencedObject<Standalone<VectorRef<ClientVersionRef>>>()), runLoopProfilingEnabled(false) {
|
||||
}
|
||||
|
||||
static const Key CLIENT_LATENCY_INFO_PREFIX = LiteralStringRef("client_latency/");
|
||||
static const Key CLIENT_LATENCY_INFO_CTR_PREFIX = LiteralStringRef("client_latency_counter/");
|
||||
|
@ -1094,11 +1094,10 @@ DatabaseContext::DatabaseContext(Reference<AsyncVar<Reference<ClusterConnectionF
|
|||
IsInternal internal,
|
||||
int apiVersion,
|
||||
IsSwitchable switchable)
|
||||
: connectionFile(connectionFile), clientInfo(clientInfo), coordinator(coordinator),
|
||||
clientInfoMonitor(clientInfoMonitor), taskID(taskID), clientLocality(clientLocality),
|
||||
enableLocalityLoadBalance(enableLocalityLoadBalance), lockAware(lockAware), apiVersion(apiVersion),
|
||||
switchable(switchable), proxyProvisional(false), cc("TransactionMetrics"),
|
||||
transactionReadVersions("ReadVersions", cc), transactionReadVersionsThrottled("ReadVersionsThrottled", cc),
|
||||
: lockAware(lockAware), switchable(switchable), connectionFile(connectionFile), proxyProvisional(false),
|
||||
clientLocality(clientLocality), enableLocalityLoadBalance(enableLocalityLoadBalance), internal(internal),
|
||||
cc("TransactionMetrics"), transactionReadVersions("ReadVersions", cc),
|
||||
transactionReadVersionsThrottled("ReadVersionsThrottled", cc),
|
||||
transactionReadVersionsCompleted("ReadVersionsCompleted", cc),
|
||||
transactionReadVersionBatches("ReadVersionBatches", cc),
|
||||
transactionBatchReadVersions("BatchPriorityReadVersions", cc),
|
||||
|
@ -1123,11 +1122,12 @@ DatabaseContext::DatabaseContext(Reference<AsyncVar<Reference<ClusterConnectionF
|
|||
transactionStatusRequests("StatusRequests", cc), transactionsTooOld("TooOld", cc),
|
||||
transactionsFutureVersions("FutureVersions", cc), transactionsNotCommitted("NotCommitted", cc),
|
||||
transactionsMaybeCommitted("MaybeCommitted", cc), transactionsResourceConstrained("ResourceConstrained", cc),
|
||||
transactionsThrottled("Throttled", cc), transactionsProcessBehind("ProcessBehind", cc), outstandingWatches(0),
|
||||
latencies(1000), readLatencies(1000), commitLatencies(1000), GRVLatencies(1000), mutationsPerCommit(1000),
|
||||
bytesPerCommit(1000), mvCacheInsertLocation(0), healthMetricsLastUpdated(0), detailedHealthMetricsLastUpdated(0),
|
||||
internal(internal), transactionTracingEnabled(true), smoothMidShardSize(CLIENT_KNOBS->SHARD_STAT_SMOOTH_AMOUNT),
|
||||
transactionsExpensiveClearCostEstCount("ExpensiveClearCostEstCount", cc),
|
||||
transactionsProcessBehind("ProcessBehind", cc), transactionsThrottled("Throttled", cc),
|
||||
transactionsExpensiveClearCostEstCount("ExpensiveClearCostEstCount", cc), latencies(1000), readLatencies(1000),
|
||||
commitLatencies(1000), GRVLatencies(1000), mutationsPerCommit(1000), bytesPerCommit(1000), outstandingWatches(0),
|
||||
transactionTracingEnabled(true), taskID(taskID), clientInfo(clientInfo), clientInfoMonitor(clientInfoMonitor),
|
||||
coordinator(coordinator), apiVersion(apiVersion), mvCacheInsertLocation(0), healthMetricsLastUpdated(0),
|
||||
detailedHealthMetricsLastUpdated(0), smoothMidShardSize(CLIENT_KNOBS->SHARD_STAT_SMOOTH_AMOUNT),
|
||||
specialKeySpace(std::make_unique<SpecialKeySpace>(specialKeys.begin, specialKeys.end, /* test */ false)) {
|
||||
dbId = deterministicRandom()->randomUniqueID();
|
||||
connected = (clientInfo->get().commitProxies.size() && clientInfo->get().grvProxies.size())
|
||||
|
@ -1340,8 +1340,8 @@ DatabaseContext::DatabaseContext(Reference<AsyncVar<Reference<ClusterConnectionF
|
|||
}
|
||||
|
||||
DatabaseContext::DatabaseContext(const Error& err)
|
||||
: deferredError(err), cc("TransactionMetrics"), transactionReadVersions("ReadVersions", cc),
|
||||
transactionReadVersionsThrottled("ReadVersionsThrottled", cc),
|
||||
: deferredError(err), internal(IsInternal::False), cc("TransactionMetrics"),
|
||||
transactionReadVersions("ReadVersions", cc), transactionReadVersionsThrottled("ReadVersionsThrottled", cc),
|
||||
transactionReadVersionsCompleted("ReadVersionsCompleted", cc),
|
||||
transactionReadVersionBatches("ReadVersionBatches", cc),
|
||||
transactionBatchReadVersions("BatchPriorityReadVersions", cc),
|
||||
|
@ -1366,11 +1366,10 @@ DatabaseContext::DatabaseContext(const Error& err)
|
|||
transactionStatusRequests("StatusRequests", cc), transactionsTooOld("TooOld", cc),
|
||||
transactionsFutureVersions("FutureVersions", cc), transactionsNotCommitted("NotCommitted", cc),
|
||||
transactionsMaybeCommitted("MaybeCommitted", cc), transactionsResourceConstrained("ResourceConstrained", cc),
|
||||
transactionsThrottled("Throttled", cc), transactionsProcessBehind("ProcessBehind", cc), latencies(1000),
|
||||
readLatencies(1000), commitLatencies(1000), GRVLatencies(1000), mutationsPerCommit(1000), bytesPerCommit(1000),
|
||||
smoothMidShardSize(CLIENT_KNOBS->SHARD_STAT_SMOOTH_AMOUNT),
|
||||
transactionsExpensiveClearCostEstCount("ExpensiveClearCostEstCount", cc), internal(IsInternal::False),
|
||||
transactionTracingEnabled(true) {}
|
||||
transactionsProcessBehind("ProcessBehind", cc), transactionsThrottled("Throttled", cc),
|
||||
transactionsExpensiveClearCostEstCount("ExpensiveClearCostEstCount", cc), latencies(1000), readLatencies(1000),
|
||||
commitLatencies(1000), GRVLatencies(1000), mutationsPerCommit(1000), bytesPerCommit(1000),
|
||||
transactionTracingEnabled(true), smoothMidShardSize(CLIENT_KNOBS->SHARD_STAT_SMOOTH_AMOUNT) {}
|
||||
|
||||
// Static constructor used by server processes to create a DatabaseContext
|
||||
// For internal (fdbserver) use only
|
||||
|
@ -1699,7 +1698,8 @@ Database Database::createDatabase(Reference<ClusterConnectionFile> connFile,
|
|||
networkOptions.traceDirectory.get(),
|
||||
"trace",
|
||||
networkOptions.traceLogGroup,
|
||||
networkOptions.traceFileIdentifier);
|
||||
networkOptions.traceFileIdentifier,
|
||||
networkOptions.tracePartialFileSuffix);
|
||||
|
||||
TraceEvent("ClientStart")
|
||||
.detail("SourceVersion", getSourceVersion())
|
||||
|
@ -1857,6 +1857,10 @@ void setNetworkOption(FDBNetworkOptions::Option option, Optional<StringRef> valu
|
|||
throw invalid_option_value();
|
||||
}
|
||||
break;
|
||||
case FDBNetworkOptions::TRACE_PARTIAL_FILE_SUFFIX:
|
||||
validateOptionValuePresent(value);
|
||||
networkOptions.tracePartialFileSuffix = value.get().toString();
|
||||
break;
|
||||
case FDBNetworkOptions::KNOB: {
|
||||
validateOptionValuePresent(value);
|
||||
|
||||
|
@ -4093,9 +4097,9 @@ Transaction::Transaction()
|
|||
: info(TaskPriority::DefaultEndpoint, generateSpanID(true)), span(info.spanID, "Transaction"_loc) {}
|
||||
|
||||
Transaction::Transaction(Database const& cx)
|
||||
: cx(cx), info(cx->taskID, generateSpanID(cx->transactionTracingEnabled)), backoff(CLIENT_KNOBS->DEFAULT_BACKOFF),
|
||||
committedVersion(invalidVersion), versionstampPromise(Promise<Standalone<StringRef>>()), options(cx), numErrors(0),
|
||||
trLogInfo(createTrLogInfoProbabilistically(cx)), tr(info.spanID), span(info.spanID, "Transaction"_loc) {
|
||||
: info(cx->taskID, generateSpanID(cx->transactionTracingEnabled)), numErrors(0), options(cx),
|
||||
span(info.spanID, "Transaction"_loc), trLogInfo(createTrLogInfoProbabilistically(cx)), cx(cx),
|
||||
backoff(CLIENT_KNOBS->DEFAULT_BACKOFF), committedVersion(invalidVersion), tr(info.spanID) {
|
||||
if (DatabaseContext::debugUseTags) {
|
||||
debugAddTags(this);
|
||||
}
|
||||
|
|
|
@ -68,6 +68,7 @@ struct NetworkOptions {
|
|||
std::string traceFormat;
|
||||
std::string traceClockSource;
|
||||
std::string traceFileIdentifier;
|
||||
std::string tracePartialFileSuffix;
|
||||
Optional<bool> logClientInfo;
|
||||
Reference<ReferencedObject<Standalone<VectorRef<ClientVersionRef>>>> supportedVersions;
|
||||
bool runLoopProfilingEnabled;
|
||||
|
@ -189,7 +190,7 @@ struct TransactionLogInfo : public ReferenceCounted<TransactionLogInfo>, NonCopy
|
|||
TransactionLogInfo() : logLocation(DONT_LOG), maxFieldLength(0) {}
|
||||
TransactionLogInfo(LoggingLocation location) : logLocation(location), maxFieldLength(0) {}
|
||||
TransactionLogInfo(std::string id, LoggingLocation location)
|
||||
: logLocation(location), identifier(id), maxFieldLength(0) {}
|
||||
: logLocation(location), maxFieldLength(0), identifier(id) {}
|
||||
|
||||
void setIdentifier(std::string id) { identifier = id; }
|
||||
void logTo(LoggingLocation loc) { logLocation = logLocation | loc; }
|
||||
|
@ -231,10 +232,10 @@ struct Watch : public ReferenceCounted<Watch>, NonCopyable {
|
|||
Promise<Void> onSetWatchTrigger;
|
||||
Future<Void> watchFuture;
|
||||
|
||||
Watch() : watchFuture(Never()), valuePresent(false), setPresent(false) {}
|
||||
Watch(Key key) : key(key), watchFuture(Never()), valuePresent(false), setPresent(false) {}
|
||||
Watch() : valuePresent(false), setPresent(false), watchFuture(Never()) {}
|
||||
Watch(Key key) : key(key), valuePresent(false), setPresent(false), watchFuture(Never()) {}
|
||||
Watch(Key key, Optional<Value> val)
|
||||
: key(key), value(val), watchFuture(Never()), valuePresent(true), setPresent(false) {}
|
||||
: key(key), value(val), valuePresent(true), setPresent(false), watchFuture(Never()) {}
|
||||
|
||||
void setWatch(Future<Void> watchFuture);
|
||||
};
|
||||
|
|
|
@ -18,118 +18,269 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbclient/DatabaseContext.h"
|
||||
#include "fdbclient/PaxosConfigTransaction.h"
|
||||
#include "flow/actorcompiler.h" // must be last include
|
||||
|
||||
class PaxosConfigTransactionImpl {};
|
||||
class PaxosConfigTransactionImpl {
|
||||
ConfigTransactionCommitRequest toCommit;
|
||||
Future<ConfigGeneration> getGenerationFuture;
|
||||
std::vector<ConfigTransactionInterface> ctis;
|
||||
int numRetries{ 0 };
|
||||
bool committed{ false };
|
||||
Optional<UID> dID;
|
||||
Database cx;
|
||||
|
||||
ACTOR static Future<ConfigGeneration> getGeneration(PaxosConfigTransactionImpl* self) {
|
||||
state std::vector<Future<ConfigTransactionGetGenerationReply>> getGenerationFutures;
|
||||
getGenerationFutures.reserve(self->ctis.size());
|
||||
for (auto const& cti : self->ctis) {
|
||||
getGenerationFutures.push_back(cti.getGeneration.getReply(ConfigTransactionGetGenerationRequest{}));
|
||||
}
|
||||
// FIXME: Must tolerate failures and disagreement
|
||||
wait(waitForAll(getGenerationFutures));
|
||||
return getGenerationFutures[0].get().generation;
|
||||
}
|
||||
|
||||
ACTOR static Future<Optional<Value>> get(PaxosConfigTransactionImpl* self, Key key) {
|
||||
if (!self->getGenerationFuture.isValid()) {
|
||||
self->getGenerationFuture = getGeneration(self);
|
||||
}
|
||||
state ConfigKey configKey = ConfigKey::decodeKey(key);
|
||||
ConfigGeneration generation = wait(self->getGenerationFuture);
|
||||
// TODO: Load balance
|
||||
ConfigTransactionGetReply reply =
|
||||
wait(self->ctis[0].get.getReply(ConfigTransactionGetRequest{ generation, configKey }));
|
||||
if (reply.value.present()) {
|
||||
return reply.value.get().toValue();
|
||||
} else {
|
||||
return Optional<Value>{};
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR static Future<RangeResult> getConfigClasses(PaxosConfigTransactionImpl* self) {
|
||||
if (!self->getGenerationFuture.isValid()) {
|
||||
self->getGenerationFuture = getGeneration(self);
|
||||
}
|
||||
ConfigGeneration generation = wait(self->getGenerationFuture);
|
||||
// TODO: Load balance
|
||||
ConfigTransactionGetConfigClassesReply reply =
|
||||
wait(self->ctis[0].getClasses.getReply(ConfigTransactionGetConfigClassesRequest{ generation }));
|
||||
RangeResult result;
|
||||
result.reserve(result.arena(), reply.configClasses.size());
|
||||
for (const auto& configClass : reply.configClasses) {
|
||||
result.push_back_deep(result.arena(), KeyValueRef(configClass, ""_sr));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
ACTOR static Future<RangeResult> getKnobs(PaxosConfigTransactionImpl* self, Optional<Key> configClass) {
|
||||
if (!self->getGenerationFuture.isValid()) {
|
||||
self->getGenerationFuture = getGeneration(self);
|
||||
}
|
||||
ConfigGeneration generation = wait(self->getGenerationFuture);
|
||||
// TODO: Load balance
|
||||
ConfigTransactionGetKnobsReply reply =
|
||||
wait(self->ctis[0].getKnobs.getReply(ConfigTransactionGetKnobsRequest{ generation, configClass }));
|
||||
RangeResult result;
|
||||
result.reserve(result.arena(), reply.knobNames.size());
|
||||
for (const auto& knobName : reply.knobNames) {
|
||||
result.push_back_deep(result.arena(), KeyValueRef(knobName, ""_sr));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> commit(PaxosConfigTransactionImpl* self) {
|
||||
if (!self->getGenerationFuture.isValid()) {
|
||||
self->getGenerationFuture = getGeneration(self);
|
||||
}
|
||||
wait(store(self->toCommit.generation, self->getGenerationFuture));
|
||||
self->toCommit.annotation.timestamp = now();
|
||||
std::vector<Future<Void>> commitFutures;
|
||||
commitFutures.reserve(self->ctis.size());
|
||||
for (const auto& cti : self->ctis) {
|
||||
commitFutures.push_back(cti.commit.getReply(self->toCommit));
|
||||
}
|
||||
// FIXME: Must tolerate failures and disagreement
|
||||
wait(quorum(commitFutures, commitFutures.size() / 2 + 1));
|
||||
self->committed = true;
|
||||
return Void();
|
||||
}
|
||||
|
||||
public:
|
||||
Future<Version> getReadVersion() {
|
||||
if (!getGenerationFuture.isValid()) {
|
||||
getGenerationFuture = getGeneration(this);
|
||||
}
|
||||
return map(getGenerationFuture, [](auto const& gen) { return gen.committedVersion; });
|
||||
}
|
||||
|
||||
Optional<Version> getCachedReadVersion() const {
|
||||
if (getGenerationFuture.isValid() && getGenerationFuture.isReady() && !getGenerationFuture.isError()) {
|
||||
return getGenerationFuture.get().committedVersion;
|
||||
} else {
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
Version getCommittedVersion() const { return committed ? getGenerationFuture.get().liveVersion : ::invalidVersion; }
|
||||
|
||||
int64_t getApproximateSize() const { return toCommit.expectedSize(); }
|
||||
|
||||
void set(KeyRef key, ValueRef value) { toCommit.set(key, value); }
|
||||
|
||||
void clear(KeyRef key) { toCommit.clear(key); }
|
||||
|
||||
Future<Optional<Value>> get(Key const& key) { return get(this, key); }
|
||||
|
||||
Future<RangeResult> getRange(KeyRangeRef keys) {
|
||||
if (keys == configClassKeys) {
|
||||
return getConfigClasses(this);
|
||||
} else if (keys == globalConfigKnobKeys) {
|
||||
return getKnobs(this, {});
|
||||
} else if (configKnobKeys.contains(keys) && keys.singleKeyRange()) {
|
||||
const auto configClass = keys.begin.removePrefix(configKnobKeys.begin);
|
||||
return getKnobs(this, configClass);
|
||||
} else {
|
||||
throw invalid_config_db_range_read();
|
||||
}
|
||||
}
|
||||
|
||||
Future<Void> onError(Error const& e) {
|
||||
// TODO: Improve this:
|
||||
if (e.code() == error_code_transaction_too_old) {
|
||||
reset();
|
||||
return delay((1 << numRetries++) * 0.01 * deterministicRandom()->random01());
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
|
||||
void debugTransaction(UID dID) { this->dID = dID; }
|
||||
|
||||
void reset() {
|
||||
getGenerationFuture = Future<ConfigGeneration>{};
|
||||
toCommit = {};
|
||||
committed = false;
|
||||
}
|
||||
|
||||
void fullReset() {
|
||||
numRetries = 0;
|
||||
dID = {};
|
||||
reset();
|
||||
}
|
||||
|
||||
void checkDeferredError(Error const& deferredError) const {
|
||||
if (deferredError.code() != invalid_error_code) {
|
||||
throw deferredError;
|
||||
}
|
||||
if (cx.getPtr()) {
|
||||
cx->checkDeferredError();
|
||||
}
|
||||
}
|
||||
|
||||
Future<Void> commit() { return commit(this); }
|
||||
|
||||
PaxosConfigTransactionImpl(Database const& cx) : cx(cx) {
|
||||
auto coordinators = cx->getConnectionFile()->getConnectionString().coordinators();
|
||||
ctis.reserve(coordinators.size());
|
||||
for (const auto& coordinator : coordinators) {
|
||||
ctis.emplace_back(coordinator);
|
||||
}
|
||||
}
|
||||
|
||||
PaxosConfigTransactionImpl(std::vector<ConfigTransactionInterface> const& ctis) : ctis(ctis) {}
|
||||
};
|
||||
|
||||
Future<Version> PaxosConfigTransaction::getReadVersion() {
|
||||
// TODO: Implement
|
||||
return ::invalidVersion;
|
||||
return impl().getReadVersion();
|
||||
}
|
||||
|
||||
Optional<Version> PaxosConfigTransaction::getCachedReadVersion() const {
|
||||
// TODO: Implement
|
||||
return ::invalidVersion;
|
||||
return impl().getCachedReadVersion();
|
||||
}
|
||||
|
||||
Future<Optional<Value>> PaxosConfigTransaction::get(Key const& key, Snapshot snapshot) {
|
||||
// TODO: Implement
|
||||
return Optional<Value>{};
|
||||
Future<Optional<Value>> PaxosConfigTransaction::get(Key const& key, Snapshot) {
|
||||
return impl().get(key);
|
||||
}
|
||||
|
||||
Future<Standalone<RangeResultRef>> PaxosConfigTransaction::getRange(KeySelector const& begin,
|
||||
KeySelector const& end,
|
||||
int limit,
|
||||
Snapshot snapshot,
|
||||
Reverse reverse) {
|
||||
// TODO: Implement
|
||||
ASSERT(false);
|
||||
return Standalone<RangeResultRef>{};
|
||||
Future<RangeResult> PaxosConfigTransaction::getRange(KeySelector const& begin,
|
||||
KeySelector const& end,
|
||||
int limit,
|
||||
Snapshot snapshot,
|
||||
Reverse reverse) {
|
||||
if (reverse) {
|
||||
throw client_invalid_operation();
|
||||
}
|
||||
return impl().getRange(KeyRangeRef(begin.getKey(), end.getKey()));
|
||||
}
|
||||
|
||||
Future<Standalone<RangeResultRef>> PaxosConfigTransaction::getRange(KeySelector begin,
|
||||
KeySelector end,
|
||||
GetRangeLimits limits,
|
||||
Snapshot snapshot,
|
||||
Reverse reverse) {
|
||||
// TODO: Implement
|
||||
ASSERT(false);
|
||||
return Standalone<RangeResultRef>{};
|
||||
Future<RangeResult> PaxosConfigTransaction::getRange(KeySelector begin,
|
||||
KeySelector end,
|
||||
GetRangeLimits limits,
|
||||
Snapshot snapshot,
|
||||
Reverse reverse) {
|
||||
if (reverse) {
|
||||
throw client_invalid_operation();
|
||||
}
|
||||
return impl().getRange(KeyRangeRef(begin.getKey(), end.getKey()));
|
||||
}
|
||||
|
||||
void PaxosConfigTransaction::set(KeyRef const& key, ValueRef const& value) {
|
||||
// TODO: Implememnt
|
||||
ASSERT(false);
|
||||
return impl().set(key, value);
|
||||
}
|
||||
|
||||
void PaxosConfigTransaction::clear(KeyRef const& key) {
|
||||
// TODO: Implememnt
|
||||
ASSERT(false);
|
||||
return impl().clear(key);
|
||||
}
|
||||
|
||||
Future<Void> PaxosConfigTransaction::commit() {
|
||||
// TODO: Implememnt
|
||||
ASSERT(false);
|
||||
return Void();
|
||||
return impl().commit();
|
||||
}
|
||||
|
||||
Version PaxosConfigTransaction::getCommittedVersion() const {
|
||||
// TODO: Implement
|
||||
ASSERT(false);
|
||||
return ::invalidVersion;
|
||||
return impl().getCommittedVersion();
|
||||
}
|
||||
|
||||
int64_t PaxosConfigTransaction::getApproximateSize() const {
|
||||
// TODO: Implement
|
||||
ASSERT(false);
|
||||
return 0;
|
||||
return impl().getApproximateSize();
|
||||
}
|
||||
|
||||
void PaxosConfigTransaction::setOption(FDBTransactionOptions::Option option, Optional<StringRef> value) {
|
||||
// TODO: Implement
|
||||
ASSERT(false);
|
||||
// TODO: Support using this option to determine atomicity
|
||||
}
|
||||
|
||||
Future<Void> PaxosConfigTransaction::onError(Error const& e) {
|
||||
// TODO: Implement
|
||||
ASSERT(false);
|
||||
return Void();
|
||||
return impl().onError(e);
|
||||
}
|
||||
|
||||
void PaxosConfigTransaction::cancel() {
|
||||
// TODO: Implement
|
||||
ASSERT(false);
|
||||
// TODO: Implement someday
|
||||
throw client_invalid_operation();
|
||||
}
|
||||
|
||||
void PaxosConfigTransaction::reset() {
|
||||
// TODO: Implement
|
||||
ASSERT(false);
|
||||
impl().reset();
|
||||
}
|
||||
|
||||
void PaxosConfigTransaction::fullReset() {
|
||||
// TODO: Implement
|
||||
ASSERT(false);
|
||||
impl().fullReset();
|
||||
}
|
||||
|
||||
void PaxosConfigTransaction::debugTransaction(UID dID) {
|
||||
// TODO: Implement
|
||||
ASSERT(false);
|
||||
impl().debugTransaction(dID);
|
||||
}
|
||||
|
||||
void PaxosConfigTransaction::checkDeferredError() const {
|
||||
// TODO: Implement
|
||||
ASSERT(false);
|
||||
impl().checkDeferredError(deferredError);
|
||||
}
|
||||
|
||||
PaxosConfigTransaction::PaxosConfigTransaction() {
|
||||
// TODO: Implement
|
||||
ASSERT(false);
|
||||
}
|
||||
PaxosConfigTransaction::PaxosConfigTransaction(std::vector<ConfigTransactionInterface> const& ctis)
|
||||
: _impl(std::make_unique<PaxosConfigTransactionImpl>(ctis)) {}
|
||||
|
||||
PaxosConfigTransaction::PaxosConfigTransaction() = default;
|
||||
|
||||
PaxosConfigTransaction::~PaxosConfigTransaction() = default;
|
||||
|
||||
void PaxosConfigTransaction::setDatabase(Database const& cx) {
|
||||
// TODO: Implement
|
||||
ASSERT(false);
|
||||
_impl = std::make_unique<PaxosConfigTransactionImpl>(cx);
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@ class PaxosConfigTransaction final : public IConfigTransaction, public FastAlloc
|
|||
PaxosConfigTransactionImpl& impl() { return *_impl; }
|
||||
|
||||
public:
|
||||
PaxosConfigTransaction(std::vector<ConfigTransactionInterface> const&);
|
||||
PaxosConfigTransaction();
|
||||
~PaxosConfigTransaction();
|
||||
void setDatabase(Database const&) override;
|
||||
|
@ -40,16 +41,16 @@ public:
|
|||
Optional<Version> getCachedReadVersion() const override;
|
||||
|
||||
Future<Optional<Value>> get(Key const& key, Snapshot = Snapshot::False) override;
|
||||
Future<Standalone<RangeResultRef>> getRange(KeySelector const& begin,
|
||||
KeySelector const& end,
|
||||
int limit,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) override;
|
||||
Future<Standalone<RangeResultRef>> getRange(KeySelector begin,
|
||||
KeySelector end,
|
||||
GetRangeLimits limits,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) override;
|
||||
Future<RangeResult> getRange(KeySelector const& begin,
|
||||
KeySelector const& end,
|
||||
int limit,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) override;
|
||||
Future<RangeResult> getRange(KeySelector begin,
|
||||
KeySelector end,
|
||||
GetRangeLimits limits,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) override;
|
||||
void set(KeyRef const& key, ValueRef const& value) override;
|
||||
void clear(KeyRangeRef const&) override { throw client_invalid_operation(); }
|
||||
void clear(KeyRef const&) override;
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
class RYWIterator {
|
||||
public:
|
||||
RYWIterator(SnapshotCache* snapshotCache, WriteMap* writeMap)
|
||||
: cache(snapshotCache), writes(writeMap), begin_key_cmp(0), end_key_cmp(0), bypassUnreadable(false) {}
|
||||
: begin_key_cmp(0), end_key_cmp(0), cache(snapshotCache), writes(writeMap), bypassUnreadable(false) {}
|
||||
|
||||
enum SEGMENT_TYPE { UNKNOWN_RANGE, EMPTY_RANGE, KV };
|
||||
static const SEGMENT_TYPE typeMap[12];
|
||||
|
|
|
@ -1285,9 +1285,9 @@ public:
|
|||
};
|
||||
|
||||
ReadYourWritesTransaction::ReadYourWritesTransaction(Database const& cx)
|
||||
: ISingleThreadTransaction(cx->deferredError), cache(&arena), writes(&arena), tr(cx), retries(0), approximateSize(0),
|
||||
creationTime(now()), commitStarted(false), options(tr), versionStampFuture(tr.getVersionstamp()),
|
||||
specialKeySpaceWriteMap(std::make_pair(false, Optional<Value>()), specialKeys.end) {
|
||||
: ISingleThreadTransaction(cx->deferredError), tr(cx), cache(&arena), writes(&arena), retries(0), approximateSize(0),
|
||||
creationTime(now()), commitStarted(false), versionStampFuture(tr.getVersionstamp()),
|
||||
specialKeySpaceWriteMap(std::make_pair(false, Optional<Value>()), specialKeys.end), options(tr) {
|
||||
std::copy(
|
||||
cx.getTransactionDefaults().begin(), cx.getTransactionDefaults().end(), std::back_inserter(persistentOptions));
|
||||
applyPersistentOptions();
|
||||
|
@ -2284,10 +2284,11 @@ void ReadYourWritesTransaction::operator=(ReadYourWritesTransaction&& r) noexcep
|
|||
}
|
||||
|
||||
ReadYourWritesTransaction::ReadYourWritesTransaction(ReadYourWritesTransaction&& r) noexcept
|
||||
: ISingleThreadTransaction(std::move(r.deferredError)), cache(std::move(r.cache)), writes(std::move(r.writes)),
|
||||
arena(std::move(r.arena)), reading(std::move(r.reading)), retries(r.retries), approximateSize(r.approximateSize),
|
||||
creationTime(r.creationTime), timeoutActor(std::move(r.timeoutActor)), resetPromise(std::move(r.resetPromise)),
|
||||
commitStarted(r.commitStarted), options(r.options), transactionDebugInfo(r.transactionDebugInfo) {
|
||||
: ISingleThreadTransaction(std::move(r.deferredError)), arena(std::move(r.arena)), cache(std::move(r.cache)),
|
||||
writes(std::move(r.writes)), resetPromise(std::move(r.resetPromise)), reading(std::move(r.reading)),
|
||||
retries(r.retries), approximateSize(r.approximateSize), timeoutActor(std::move(r.timeoutActor)),
|
||||
creationTime(r.creationTime), commitStarted(r.commitStarted), transactionDebugInfo(r.transactionDebugInfo),
|
||||
options(r.options) {
|
||||
cache.arena = &arena;
|
||||
writes.arena = &arena;
|
||||
tr = std::move(r.tr);
|
||||
|
|
|
@ -74,20 +74,20 @@ public:
|
|||
Optional<Version> getCachedReadVersion() const override { return tr.getCachedReadVersion(); }
|
||||
Future<Optional<Value>> get(const Key& key, Snapshot = Snapshot::False) override;
|
||||
Future<Key> getKey(const KeySelector& key, Snapshot = Snapshot::False) override;
|
||||
Future<Standalone<RangeResultRef>> getRange(const KeySelector& begin,
|
||||
const KeySelector& end,
|
||||
int limit,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) override;
|
||||
Future<Standalone<RangeResultRef>> getRange(KeySelector begin,
|
||||
KeySelector end,
|
||||
GetRangeLimits limits,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) override;
|
||||
Future<Standalone<RangeResultRef>> getRange(const KeyRange& keys,
|
||||
int limit,
|
||||
Snapshot snapshot = Snapshot::False,
|
||||
Reverse reverse = Reverse::False) {
|
||||
Future<RangeResult> getRange(const KeySelector& begin,
|
||||
const KeySelector& end,
|
||||
int limit,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) override;
|
||||
Future<RangeResult> getRange(KeySelector begin,
|
||||
KeySelector end,
|
||||
GetRangeLimits limits,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) override;
|
||||
Future<RangeResult> getRange(const KeyRange& keys,
|
||||
int limit,
|
||||
Snapshot snapshot = Snapshot::False,
|
||||
Reverse reverse = Reverse::False) {
|
||||
return getRange(KeySelector(firstGreaterOrEqual(keys.begin), keys.arena()),
|
||||
KeySelector(firstGreaterOrEqual(keys.end), keys.arena()),
|
||||
limit,
|
||||
|
|
|
@ -342,6 +342,7 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
|
|||
init( ROCKSDB_PERIODIC_COMPACTION_SECONDS, 0 );
|
||||
init( ROCKSDB_PREFIX_LEN, 0 );
|
||||
init( ROCKSDB_BLOCK_CACHE_SIZE, 0 );
|
||||
init( ROCKSDB_METRICS_DELAY, 60.0 );
|
||||
|
||||
// Leader election
|
||||
bool longLeaderElection = randomize && BUGGIFY;
|
||||
|
|
|
@ -274,6 +274,7 @@ public:
|
|||
int64_t ROCKSDB_PERIODIC_COMPACTION_SECONDS;
|
||||
int ROCKSDB_PREFIX_LEN;
|
||||
int64_t ROCKSDB_BLOCK_CACHE_SIZE;
|
||||
double ROCKSDB_METRICS_DELAY;
|
||||
|
||||
// Leader election
|
||||
int MAX_NOTIFICATIONS;
|
||||
|
|
|
@ -30,39 +30,40 @@
|
|||
|
||||
class SimpleConfigTransactionImpl {
|
||||
ConfigTransactionCommitRequest toCommit;
|
||||
Future<Version> getVersionFuture;
|
||||
Future<ConfigGeneration> getGenerationFuture;
|
||||
ConfigTransactionInterface cti;
|
||||
int numRetries{ 0 };
|
||||
bool committed{ false };
|
||||
Optional<UID> dID;
|
||||
Database cx;
|
||||
|
||||
ACTOR static Future<Version> getReadVersion(SimpleConfigTransactionImpl* self) {
|
||||
ACTOR static Future<ConfigGeneration> getGeneration(SimpleConfigTransactionImpl* self) {
|
||||
if (self->dID.present()) {
|
||||
TraceEvent("SimpleConfigTransactionGettingReadVersion", self->dID.get());
|
||||
}
|
||||
ConfigTransactionGetVersionRequest req;
|
||||
ConfigTransactionGetVersionReply reply =
|
||||
wait(self->cti.getVersion.getReply(ConfigTransactionGetVersionRequest{}));
|
||||
ConfigTransactionGetGenerationRequest req;
|
||||
ConfigTransactionGetGenerationReply reply =
|
||||
wait(self->cti.getGeneration.getReply(ConfigTransactionGetGenerationRequest{}));
|
||||
if (self->dID.present()) {
|
||||
TraceEvent("SimpleConfigTransactionGotReadVersion", self->dID.get()).detail("Version", reply.version);
|
||||
TraceEvent("SimpleConfigTransactionGotReadVersion", self->dID.get())
|
||||
.detail("Version", reply.generation.liveVersion);
|
||||
}
|
||||
return reply.version;
|
||||
return reply.generation;
|
||||
}
|
||||
|
||||
ACTOR static Future<Optional<Value>> get(SimpleConfigTransactionImpl* self, KeyRef key) {
|
||||
if (!self->getVersionFuture.isValid()) {
|
||||
self->getVersionFuture = getReadVersion(self);
|
||||
if (!self->getGenerationFuture.isValid()) {
|
||||
self->getGenerationFuture = getGeneration(self);
|
||||
}
|
||||
state ConfigKey configKey = ConfigKey::decodeKey(key);
|
||||
Version version = wait(self->getVersionFuture);
|
||||
ConfigGeneration generation = wait(self->getGenerationFuture);
|
||||
if (self->dID.present()) {
|
||||
TraceEvent("SimpleConfigTransactionGettingValue", self->dID.get())
|
||||
.detail("ConfigClass", configKey.configClass)
|
||||
.detail("KnobName", configKey.knobName);
|
||||
}
|
||||
ConfigTransactionGetReply reply =
|
||||
wait(self->cti.get.getReply(ConfigTransactionGetRequest{ version, configKey }));
|
||||
wait(self->cti.get.getReply(ConfigTransactionGetRequest{ generation, configKey }));
|
||||
if (self->dID.present()) {
|
||||
TraceEvent("SimpleConfigTransactionGotValue", self->dID.get())
|
||||
.detail("Value", reply.value.get().toString());
|
||||
|
@ -74,29 +75,28 @@ class SimpleConfigTransactionImpl {
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR static Future<Standalone<RangeResultRef>> getConfigClasses(SimpleConfigTransactionImpl* self) {
|
||||
if (!self->getVersionFuture.isValid()) {
|
||||
self->getVersionFuture = getReadVersion(self);
|
||||
ACTOR static Future<RangeResult> getConfigClasses(SimpleConfigTransactionImpl* self) {
|
||||
if (!self->getGenerationFuture.isValid()) {
|
||||
self->getGenerationFuture = getGeneration(self);
|
||||
}
|
||||
Version version = wait(self->getVersionFuture);
|
||||
ConfigGeneration generation = wait(self->getGenerationFuture);
|
||||
ConfigTransactionGetConfigClassesReply reply =
|
||||
wait(self->cti.getClasses.getReply(ConfigTransactionGetConfigClassesRequest{ version }));
|
||||
Standalone<RangeResultRef> result;
|
||||
wait(self->cti.getClasses.getReply(ConfigTransactionGetConfigClassesRequest{ generation }));
|
||||
RangeResult result;
|
||||
for (const auto& configClass : reply.configClasses) {
|
||||
result.push_back_deep(result.arena(), KeyValueRef(configClass, ""_sr));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
ACTOR static Future<Standalone<RangeResultRef>> getKnobs(SimpleConfigTransactionImpl* self,
|
||||
Optional<Key> configClass) {
|
||||
if (!self->getVersionFuture.isValid()) {
|
||||
self->getVersionFuture = getReadVersion(self);
|
||||
ACTOR static Future<RangeResult> getKnobs(SimpleConfigTransactionImpl* self, Optional<Key> configClass) {
|
||||
if (!self->getGenerationFuture.isValid()) {
|
||||
self->getGenerationFuture = getGeneration(self);
|
||||
}
|
||||
Version version = wait(self->getVersionFuture);
|
||||
ConfigGeneration generation = wait(self->getGenerationFuture);
|
||||
ConfigTransactionGetKnobsReply reply =
|
||||
wait(self->cti.getKnobs.getReply(ConfigTransactionGetKnobsRequest{ version, configClass }));
|
||||
Standalone<RangeResultRef> result;
|
||||
wait(self->cti.getKnobs.getReply(ConfigTransactionGetKnobsRequest{ generation, configClass }));
|
||||
RangeResult result;
|
||||
for (const auto& knobName : reply.knobNames) {
|
||||
result.push_back_deep(result.arena(), KeyValueRef(knobName, ""_sr));
|
||||
}
|
||||
|
@ -104,10 +104,10 @@ class SimpleConfigTransactionImpl {
|
|||
}
|
||||
|
||||
ACTOR static Future<Void> commit(SimpleConfigTransactionImpl* self) {
|
||||
if (!self->getVersionFuture.isValid()) {
|
||||
self->getVersionFuture = getReadVersion(self);
|
||||
if (!self->getGenerationFuture.isValid()) {
|
||||
self->getGenerationFuture = getGeneration(self);
|
||||
}
|
||||
wait(store(self->toCommit.version, self->getVersionFuture));
|
||||
wait(store(self->toCommit.generation, self->getGenerationFuture));
|
||||
self->toCommit.annotation.timestamp = now();
|
||||
wait(self->cti.commit.getReply(self->toCommit));
|
||||
self->committed = true;
|
||||
|
@ -123,29 +123,13 @@ public:
|
|||
|
||||
SimpleConfigTransactionImpl(ConfigTransactionInterface const& cti) : cti(cti) {}
|
||||
|
||||
void set(KeyRef key, ValueRef value) {
|
||||
if (key == configTransactionDescriptionKey) {
|
||||
toCommit.annotation.description = KeyRef(toCommit.arena, value);
|
||||
} else {
|
||||
ConfigKey configKey = ConfigKeyRef::decodeKey(key);
|
||||
auto knobValue = IKnobCollection::parseKnobValue(
|
||||
configKey.knobName.toString(), value.toString(), IKnobCollection::Type::TEST);
|
||||
toCommit.mutations.emplace_back_deep(toCommit.arena, configKey, knobValue.contents());
|
||||
}
|
||||
}
|
||||
void set(KeyRef key, ValueRef value) { toCommit.set(key, value); }
|
||||
|
||||
void clear(KeyRef key) {
|
||||
if (key == configTransactionDescriptionKey) {
|
||||
toCommit.annotation.description = ""_sr;
|
||||
} else {
|
||||
toCommit.mutations.emplace_back_deep(
|
||||
toCommit.arena, ConfigKeyRef::decodeKey(key), Optional<KnobValueRef>{});
|
||||
}
|
||||
}
|
||||
void clear(KeyRef key) { toCommit.clear(key); }
|
||||
|
||||
Future<Optional<Value>> get(KeyRef key) { return get(this, key); }
|
||||
|
||||
Future<Standalone<RangeResultRef>> getRange(KeyRangeRef keys) {
|
||||
Future<RangeResult> getRange(KeyRangeRef keys) {
|
||||
if (keys == configClassKeys) {
|
||||
return getConfigClasses(this);
|
||||
} else if (keys == globalConfigKnobKeys) {
|
||||
|
@ -170,23 +154,23 @@ public:
|
|||
}
|
||||
|
||||
Future<Version> getReadVersion() {
|
||||
if (!getVersionFuture.isValid())
|
||||
getVersionFuture = getReadVersion(this);
|
||||
return getVersionFuture;
|
||||
if (!getGenerationFuture.isValid())
|
||||
getGenerationFuture = getGeneration(this);
|
||||
return map(getGenerationFuture, [](auto const& gen) { return gen.committedVersion; });
|
||||
}
|
||||
|
||||
Optional<Version> getCachedReadVersion() const {
|
||||
if (getVersionFuture.isValid() && getVersionFuture.isReady() && !getVersionFuture.isError()) {
|
||||
return getVersionFuture.get();
|
||||
if (getGenerationFuture.isValid() && getGenerationFuture.isReady() && !getGenerationFuture.isError()) {
|
||||
return getGenerationFuture.get().committedVersion;
|
||||
} else {
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
Version getCommittedVersion() const { return committed ? getVersionFuture.get() : ::invalidVersion; }
|
||||
Version getCommittedVersion() const { return committed ? getGenerationFuture.get().liveVersion : ::invalidVersion; }
|
||||
|
||||
void reset() {
|
||||
getVersionFuture = Future<Version>{};
|
||||
getGenerationFuture = Future<ConfigGeneration>{};
|
||||
toCommit = {};
|
||||
committed = false;
|
||||
}
|
||||
|
@ -225,19 +209,25 @@ Future<Optional<Value>> SimpleConfigTransaction::get(Key const& key, Snapshot sn
|
|||
return impl().get(key);
|
||||
}
|
||||
|
||||
Future<Standalone<RangeResultRef>> SimpleConfigTransaction::getRange(KeySelector const& begin,
|
||||
KeySelector const& end,
|
||||
int limit,
|
||||
Snapshot snapshot,
|
||||
Reverse reverse) {
|
||||
Future<RangeResult> SimpleConfigTransaction::getRange(KeySelector const& begin,
|
||||
KeySelector const& end,
|
||||
int limit,
|
||||
Snapshot snapshot,
|
||||
Reverse reverse) {
|
||||
if (reverse) {
|
||||
throw client_invalid_operation();
|
||||
}
|
||||
return impl().getRange(KeyRangeRef(begin.getKey(), end.getKey()));
|
||||
}
|
||||
|
||||
Future<Standalone<RangeResultRef>> SimpleConfigTransaction::getRange(KeySelector begin,
|
||||
KeySelector end,
|
||||
GetRangeLimits limits,
|
||||
Snapshot snapshot,
|
||||
Reverse reverse) {
|
||||
Future<RangeResult> SimpleConfigTransaction::getRange(KeySelector begin,
|
||||
KeySelector end,
|
||||
GetRangeLimits limits,
|
||||
Snapshot snapshot,
|
||||
Reverse reverse) {
|
||||
if (reverse) {
|
||||
throw client_invalid_operation();
|
||||
}
|
||||
return impl().getRange(KeyRangeRef(begin.getKey(), end.getKey()));
|
||||
}
|
||||
|
||||
|
|
|
@ -50,16 +50,16 @@ public:
|
|||
Optional<Version> getCachedReadVersion() const override;
|
||||
|
||||
Future<Optional<Value>> get(Key const& key, Snapshot = Snapshot::False) override;
|
||||
Future<Standalone<RangeResultRef>> getRange(KeySelector const& begin,
|
||||
KeySelector const& end,
|
||||
int limit,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) override;
|
||||
Future<Standalone<RangeResultRef>> getRange(KeySelector begin,
|
||||
KeySelector end,
|
||||
GetRangeLimits limits,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) override;
|
||||
Future<RangeResult> getRange(KeySelector const& begin,
|
||||
KeySelector const& end,
|
||||
int limit,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) override;
|
||||
Future<RangeResult> getRange(KeySelector begin,
|
||||
KeySelector end,
|
||||
GetRangeLimits limits,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) override;
|
||||
Future<Void> commit() override;
|
||||
Version getCommittedVersion() const override;
|
||||
void setOption(FDBTransactionOptions::Option option, Optional<StringRef> value = Optional<StringRef>()) override;
|
||||
|
|
|
@ -311,7 +311,7 @@ public:
|
|||
entries.insert(Entry(allKeys.end, afterAllKeys, VectorRef<KeyValueRef>()), NoMetric(), true);
|
||||
}
|
||||
// Visual Studio refuses to generate these, apparently despite the standard
|
||||
SnapshotCache(SnapshotCache&& r) noexcept : entries(std::move(r.entries)), arena(r.arena) {}
|
||||
SnapshotCache(SnapshotCache&& r) noexcept : arena(r.arena), entries(std::move(r.entries)) {}
|
||||
SnapshotCache& operator=(SnapshotCache&& r) noexcept {
|
||||
entries = std::move(r.entries);
|
||||
arena = r.arena;
|
||||
|
|
|
@ -248,8 +248,9 @@ ACTOR Future<Void> normalizeKeySelectorActor(SpecialKeySpace* sks,
|
|||
}
|
||||
|
||||
SpecialKeySpace::SpecialKeySpace(KeyRef spaceStartKey, KeyRef spaceEndKey, bool testOnly)
|
||||
: range(KeyRangeRef(spaceStartKey, spaceEndKey)), readImpls(nullptr, spaceEndKey), writeImpls(nullptr, spaceEndKey),
|
||||
modules(testOnly ? SpecialKeySpace::MODULE::TESTONLY : SpecialKeySpace::MODULE::UNKNOWN, spaceEndKey) {
|
||||
: readImpls(nullptr, spaceEndKey),
|
||||
modules(testOnly ? SpecialKeySpace::MODULE::TESTONLY : SpecialKeySpace::MODULE::UNKNOWN, spaceEndKey),
|
||||
writeImpls(nullptr, spaceEndKey), range(KeyRangeRef(spaceStartKey, spaceEndKey)) {
|
||||
// Default begin of KeyRangeMap is Key(), insert the range to update start key
|
||||
readImpls.insert(range, nullptr);
|
||||
writeImpls.insert(range, nullptr);
|
||||
|
|
|
@ -873,13 +873,13 @@ TaskBucket::TaskBucket(const Subspace& subspace,
|
|||
AccessSystemKeys sysAccess,
|
||||
PriorityBatch priorityBatch,
|
||||
LockAware lockAware)
|
||||
: prefix(subspace), active(prefix.get(LiteralStringRef("ac"))), available(prefix.get(LiteralStringRef("av"))),
|
||||
available_prioritized(prefix.get(LiteralStringRef("avp"))), timeouts(prefix.get(LiteralStringRef("to"))),
|
||||
pauseKey(prefix.pack(LiteralStringRef("pause"))), timeout(CLIENT_KNOBS->TASKBUCKET_TIMEOUT_VERSIONS),
|
||||
system_access(sysAccess), priority_batch(priorityBatch), lockAware(lockAware), cc("TaskBucket"),
|
||||
dbgid(deterministicRandom()->randomUniqueID()), dispatchSlotChecksStarted("DispatchSlotChecksStarted", cc),
|
||||
dispatchErrors("DispatchErrors", cc), dispatchDoTasks("DispatchDoTasks", cc),
|
||||
dispatchEmptyTasks("DispatchEmptyTasks", cc), dispatchSlotChecksComplete("DispatchSlotChecksComplete", cc) {}
|
||||
: cc("TaskBucket"), dispatchSlotChecksStarted("DispatchSlotChecksStarted", cc), dispatchErrors("DispatchErrors", cc),
|
||||
dispatchDoTasks("DispatchDoTasks", cc), dispatchEmptyTasks("DispatchEmptyTasks", cc),
|
||||
dispatchSlotChecksComplete("DispatchSlotChecksComplete", cc), dbgid(deterministicRandom()->randomUniqueID()),
|
||||
prefix(subspace), active(prefix.get(LiteralStringRef("ac"))), pauseKey(prefix.pack(LiteralStringRef("pause"))),
|
||||
available(prefix.get(LiteralStringRef("av"))), available_prioritized(prefix.get(LiteralStringRef("avp"))),
|
||||
timeouts(prefix.get(LiteralStringRef("to"))), timeout(CLIENT_KNOBS->TASKBUCKET_TIMEOUT_VERSIONS),
|
||||
system_access(sysAccess), priority_batch(priorityBatch), lockAware(lockAware) {}
|
||||
|
||||
TaskBucket::~TaskBucket() {}
|
||||
|
||||
|
|
|
@ -58,11 +58,11 @@ struct PTree : public ReferenceCounted<PTree<T>>, FastAllocated<PTree<T>>, NonCo
|
|||
Reference<PTree> left(Version at) const { return child(false, at); }
|
||||
Reference<PTree> right(Version at) const { return child(true, at); }
|
||||
|
||||
PTree(const T& data, Version ver) : data(data), lastUpdateVersion(ver), updated(false) {
|
||||
PTree(const T& data, Version ver) : lastUpdateVersion(ver), updated(false), data(data) {
|
||||
priority = deterministicRandom()->randomUInt32();
|
||||
}
|
||||
PTree(uint32_t pri, T const& data, Reference<PTree> const& left, Reference<PTree> const& right, Version ver)
|
||||
: priority(pri), data(data), lastUpdateVersion(ver), updated(false) {
|
||||
: priority(pri), lastUpdateVersion(ver), updated(false), data(data) {
|
||||
pointer[0] = left;
|
||||
pointer[1] = right;
|
||||
}
|
||||
|
|
|
@ -168,7 +168,7 @@ private:
|
|||
typedef Reference<PTreeT> Tree;
|
||||
|
||||
public:
|
||||
explicit WriteMap(Arena* arena) : arena(arena), ver(-1), scratch_iterator(this), writeMapEmpty(true) {
|
||||
explicit WriteMap(Arena* arena) : arena(arena), writeMapEmpty(true), ver(-1), scratch_iterator(this) {
|
||||
PTreeImpl::insert(
|
||||
writes, ver, WriteMapEntry(allKeys.begin, OperationStack(), false, false, false, false, false));
|
||||
PTreeImpl::insert(writes, ver, WriteMapEntry(allKeys.end, OperationStack(), false, false, false, false, false));
|
||||
|
@ -177,8 +177,8 @@ public:
|
|||
}
|
||||
|
||||
WriteMap(WriteMap&& r) noexcept
|
||||
: writeMapEmpty(r.writeMapEmpty), writes(std::move(r.writes)), ver(r.ver),
|
||||
scratch_iterator(std::move(r.scratch_iterator)), arena(r.arena) {}
|
||||
: arena(r.arena), writeMapEmpty(r.writeMapEmpty), writes(std::move(r.writes)), ver(r.ver),
|
||||
scratch_iterator(std::move(r.scratch_iterator)) {}
|
||||
WriteMap& operator=(WriteMap&& r) noexcept {
|
||||
writeMapEmpty = r.writeMapEmpty;
|
||||
writes = std::move(r.writes);
|
||||
|
|
|
@ -57,6 +57,9 @@ description is not currently required but encouraged.
|
|||
<Option name="trace_file_identifier" code="36"
|
||||
paramType="String" paramDescription="The identifier that will be part of all trace file names"
|
||||
description="Once provided, this string will be used to replace the port/PID in the log file names." />
|
||||
<Option name="trace_partial_file_suffix" code="39"
|
||||
paramType="String" paramDesciption="Append this suffix to partially written log files. When a log file is complete, it is renamed to remove the suffix. No separator is added between the file and the suffix. If you want to add a file extension, you should include the separator - e.g. '.tmp' instead of 'tmp' to add the 'tmp' extension."
|
||||
description="" />
|
||||
<Option name="knob" code="40"
|
||||
paramType="String" paramDescription="knob_name=knob_value"
|
||||
description="Set internal tuning or debugging knobs"/>
|
||||
|
|
|
@ -393,7 +393,7 @@ public:
|
|||
|
||||
Command() : argv(nullptr) {}
|
||||
Command(const CSimpleIni& ini, std::string _section, ProcessID id, fdb_fd_set fds, int* maxfd)
|
||||
: section(_section), argv(nullptr), fork_retry_time(-1), quiet(false), delete_envvars(nullptr), fds(fds),
|
||||
: fds(fds), argv(nullptr), section(_section), fork_retry_time(-1), quiet(false), delete_envvars(nullptr),
|
||||
deconfigured(false), kill_on_configuration_change(true) {
|
||||
char _ssection[strlen(section.c_str()) + 22];
|
||||
snprintf(_ssection, strlen(section.c_str()) + 22, "%s", id.c_str());
|
||||
|
|
|
@ -298,7 +298,7 @@ private:
|
|||
const std::string& filename,
|
||||
int64_t length,
|
||||
Reference<EvictablePageCache> pageCache)
|
||||
: uncached(uncached), filename(filename), length(length), prevLength(length), pageCache(pageCache),
|
||||
: filename(filename), uncached(uncached), length(length), prevLength(length), pageCache(pageCache),
|
||||
currentTruncate(Void()), currentTruncateSize(0), rateControl(nullptr) {
|
||||
if (!g_network->isSimulated()) {
|
||||
countFileCacheWrites.init(LiteralStringRef("AsyncFile.CountFileCacheWrites"), filename);
|
||||
|
@ -610,8 +610,8 @@ struct AFCPage : public EvictablePage, public FastAllocated<AFCPage> {
|
|||
}
|
||||
|
||||
AFCPage(AsyncFileCached* owner, int64_t offset)
|
||||
: EvictablePage(owner->pageCache), owner(owner), pageOffset(offset), dirty(false), valid(false), truncated(false),
|
||||
notReading(Void()), notFlushing(Void()), zeroCopyRefCount(0), flushableIndex(-1), writeThroughCount(0) {
|
||||
: EvictablePage(owner->pageCache), owner(owner), pageOffset(offset), notReading(Void()), notFlushing(Void()),
|
||||
dirty(false), valid(false), truncated(false), writeThroughCount(0), flushableIndex(-1), zeroCopyRefCount(0) {
|
||||
pageCache->allocate(this);
|
||||
}
|
||||
|
||||
|
|
|
@ -277,7 +277,7 @@ private:
|
|||
mutable Int64MetricHandle countLogicalReads;
|
||||
|
||||
AsyncFileEIO(int fd, int flags, std::string const& filename)
|
||||
: fd(fd), flags(flags), filename(filename), err(new ErrorInfo) {
|
||||
: fd(fd), flags(flags), err(new ErrorInfo), filename(filename) {
|
||||
if (!g_network->isSimulated()) {
|
||||
countFileLogicalWrites.init(LiteralStringRef("AsyncFile.CountFileLogicalWrites"), filename);
|
||||
countFileLogicalReads.init(LiteralStringRef("AsyncFile.CountFileLogicalReads"), filename);
|
||||
|
|
|
@ -34,16 +34,18 @@ public:
|
|||
auto pos = salt.find('.');
|
||||
salt = salt.substr(0, pos);
|
||||
auto hash = XXH3_128bits(salt.c_str(), salt.size());
|
||||
auto high = reinterpret_cast<unsigned char*>(&hash.high64);
|
||||
auto low = reinterpret_cast<unsigned char*>(&hash.low64);
|
||||
std::copy(high, high + 8, &iv[0]);
|
||||
std::copy(low, low + 6, &iv[8]);
|
||||
iv[14] = iv[15] = 0; // last 16 bits identify block
|
||||
auto pHigh = reinterpret_cast<unsigned char*>(&hash.high64);
|
||||
auto pLow = reinterpret_cast<unsigned char*>(&hash.low64);
|
||||
std::copy(pHigh, pHigh + 8, &iv[0]);
|
||||
std::copy(pLow, pLow + 4, &iv[8]);
|
||||
uint32_t blockZero = 0;
|
||||
auto pBlock = reinterpret_cast<unsigned char*>(&blockZero);
|
||||
std::copy(pBlock, pBlock + 4, &iv[12]);
|
||||
return iv;
|
||||
}
|
||||
|
||||
// Read a single block of size ENCRYPTION_BLOCK_SIZE bytes, and decrypt.
|
||||
ACTOR static Future<Standalone<StringRef>> readBlock(AsyncFileEncrypted* self, uint16_t block) {
|
||||
ACTOR static Future<Standalone<StringRef>> readBlock(AsyncFileEncrypted* self, uint32_t block) {
|
||||
state Arena arena;
|
||||
state unsigned char* encrypted = new (arena) unsigned char[FLOW_KNOBS->ENCRYPTION_BLOCK_SIZE];
|
||||
int bytes = wait(
|
||||
|
@ -53,23 +55,22 @@ public:
|
|||
return Standalone<StringRef>(decrypted, arena);
|
||||
}
|
||||
|
||||
ACTOR static Future<int> read(AsyncFileEncrypted* self, void* data, int length, int offset) {
|
||||
state const uint16_t firstBlock = offset / FLOW_KNOBS->ENCRYPTION_BLOCK_SIZE;
|
||||
state const uint16_t lastBlock = (offset + length - 1) / FLOW_KNOBS->ENCRYPTION_BLOCK_SIZE;
|
||||
state uint16_t block;
|
||||
ACTOR static Future<int> read(AsyncFileEncrypted* self, void* data, int length, int64_t offset) {
|
||||
state const uint32_t firstBlock = offset / FLOW_KNOBS->ENCRYPTION_BLOCK_SIZE;
|
||||
state const uint32_t lastBlock = (offset + length - 1) / FLOW_KNOBS->ENCRYPTION_BLOCK_SIZE;
|
||||
state uint32_t block;
|
||||
state unsigned char* output = reinterpret_cast<unsigned char*>(data);
|
||||
state int bytesRead = 0;
|
||||
ASSERT(self->mode == AsyncFileEncrypted::Mode::READ_ONLY);
|
||||
for (block = firstBlock; block <= lastBlock; ++block) {
|
||||
state StringRef plaintext;
|
||||
state Standalone<StringRef> plaintext;
|
||||
|
||||
auto cachedBlock = self->readBuffers.get(block);
|
||||
if (cachedBlock.present()) {
|
||||
plaintext = cachedBlock.get();
|
||||
} else {
|
||||
Standalone<StringRef> _plaintext = wait(readBlock(self, block));
|
||||
self->readBuffers.insert(block, _plaintext);
|
||||
plaintext = _plaintext;
|
||||
wait(store(plaintext, readBlock(self, block)));
|
||||
self->readBuffers.insert(block, plaintext);
|
||||
}
|
||||
auto start = (block == firstBlock) ? plaintext.begin() + (offset % FLOW_KNOBS->ENCRYPTION_BLOCK_SIZE)
|
||||
: plaintext.begin();
|
||||
|
@ -79,6 +80,14 @@ public:
|
|||
if ((offset + length) % FLOW_KNOBS->ENCRYPTION_BLOCK_SIZE == 0) {
|
||||
end = plaintext.end();
|
||||
}
|
||||
|
||||
// The block could be short if it includes or is after the end of the file.
|
||||
end = std::min(end, plaintext.end());
|
||||
// If the start position is at or after the end of the block, the read is complete.
|
||||
if (start == end || start >= plaintext.end()) {
|
||||
break;
|
||||
}
|
||||
|
||||
std::copy(start, end, output);
|
||||
output += (end - start);
|
||||
bytesRead += (end - start);
|
||||
|
@ -103,7 +112,7 @@ public:
|
|||
if (self->offsetInBlock == FLOW_KNOBS->ENCRYPTION_BLOCK_SIZE) {
|
||||
wait(self->writeLastBlockToFile());
|
||||
self->offsetInBlock = 0;
|
||||
ASSERT_LT(self->currentBlock, std::numeric_limits<uint16_t>::max());
|
||||
ASSERT_LT(self->currentBlock, std::numeric_limits<uint32_t>::max());
|
||||
++self->currentBlock;
|
||||
self->encryptor = std::make_unique<EncryptionStreamCipher>(StreamCipher::Key::getKey(),
|
||||
self->getIV(self->currentBlock));
|
||||
|
@ -131,7 +140,7 @@ public:
|
|||
};
|
||||
|
||||
AsyncFileEncrypted::AsyncFileEncrypted(Reference<IAsyncFile> file, Mode mode)
|
||||
: file(file), mode(mode), currentBlock(0), readBuffers(FLOW_KNOBS->MAX_DECRYPTED_BLOCKS) {
|
||||
: file(file), mode(mode), readBuffers(FLOW_KNOBS->MAX_DECRYPTED_BLOCKS), currentBlock(0) {
|
||||
firstBlockIV = AsyncFileEncryptedImpl::getFirstBlockIV(file->getFilename());
|
||||
if (mode == Mode::APPEND_ONLY) {
|
||||
encryptor = std::make_unique<EncryptionStreamCipher>(StreamCipher::Key::getKey(), getIV(currentBlock));
|
||||
|
@ -196,10 +205,12 @@ int64_t AsyncFileEncrypted::debugFD() const {
|
|||
return file->debugFD();
|
||||
}
|
||||
|
||||
StreamCipher::IV AsyncFileEncrypted::getIV(uint16_t block) const {
|
||||
StreamCipher::IV AsyncFileEncrypted::getIV(uint32_t block) const {
|
||||
auto iv = firstBlockIV;
|
||||
iv[14] = block / 256;
|
||||
iv[15] = block % 256;
|
||||
|
||||
auto pBlock = reinterpret_cast<unsigned char*>(&block);
|
||||
std::copy(pBlock, pBlock + 4, &iv[12]);
|
||||
|
||||
return iv;
|
||||
}
|
||||
|
||||
|
@ -218,7 +229,7 @@ AsyncFileEncrypted::RandomCache::RandomCache(size_t maxSize) : maxSize(maxSize)
|
|||
vec.reserve(maxSize);
|
||||
}
|
||||
|
||||
void AsyncFileEncrypted::RandomCache::insert(uint16_t block, const Standalone<StringRef>& value) {
|
||||
void AsyncFileEncrypted::RandomCache::insert(uint32_t block, const Standalone<StringRef>& value) {
|
||||
auto [_, found] = hashMap.insert({ block, value });
|
||||
if (found) {
|
||||
return;
|
||||
|
@ -230,7 +241,7 @@ void AsyncFileEncrypted::RandomCache::insert(uint16_t block, const Standalone<St
|
|||
}
|
||||
}
|
||||
|
||||
Optional<Standalone<StringRef>> AsyncFileEncrypted::RandomCache::get(uint16_t block) const {
|
||||
Optional<Standalone<StringRef>> AsyncFileEncrypted::RandomCache::get(uint32_t block) const {
|
||||
auto it = hashMap.find(block);
|
||||
if (it == hashMap.end()) {
|
||||
return {};
|
||||
|
@ -255,6 +266,7 @@ TEST_CASE("fdbrpc/AsyncFileEncrypted") {
|
|||
state Reference<IAsyncFile> file =
|
||||
wait(IAsyncFileSystem::filesystem()->open(joinPath(params.getDataDir(), "test-encrypted-file"), flags, 0600));
|
||||
state int bytesWritten = 0;
|
||||
state int chunkSize;
|
||||
while (bytesWritten < bytes) {
|
||||
chunkSize = std::min(deterministicRandom()->randomInt(0, 100), bytes - bytesWritten);
|
||||
wait(file->write(&writeBuffer[bytesWritten], chunkSize, bytesWritten));
|
||||
|
@ -262,7 +274,6 @@ TEST_CASE("fdbrpc/AsyncFileEncrypted") {
|
|||
}
|
||||
wait(file->sync());
|
||||
state int bytesRead = 0;
|
||||
state int chunkSize;
|
||||
while (bytesRead < bytes) {
|
||||
chunkSize = std::min(deterministicRandom()->randomInt(0, 100), bytes - bytesRead);
|
||||
int bytesReadInChunk = wait(file->read(&readBuffer[bytesRead], chunkSize, bytesRead));
|
||||
|
|
|
@ -40,7 +40,7 @@ public:
|
|||
private:
|
||||
Reference<IAsyncFile> file;
|
||||
StreamCipher::IV firstBlockIV;
|
||||
StreamCipher::IV getIV(uint16_t block) const;
|
||||
StreamCipher::IV getIV(uint32_t block) const;
|
||||
Mode mode;
|
||||
Future<Void> writeLastBlockToFile();
|
||||
friend class AsyncFileEncryptedImpl;
|
||||
|
@ -48,19 +48,19 @@ private:
|
|||
// Reading:
|
||||
class RandomCache {
|
||||
size_t maxSize;
|
||||
std::vector<uint16_t> vec;
|
||||
std::unordered_map<uint16_t, Standalone<StringRef>> hashMap;
|
||||
std::vector<uint32_t> vec;
|
||||
std::unordered_map<uint32_t, Standalone<StringRef>> hashMap;
|
||||
size_t evict();
|
||||
|
||||
public:
|
||||
RandomCache(size_t maxSize);
|
||||
void insert(uint16_t block, const Standalone<StringRef>& value);
|
||||
Optional<Standalone<StringRef>> get(uint16_t block) const;
|
||||
void insert(uint32_t block, const Standalone<StringRef>& value);
|
||||
Optional<Standalone<StringRef>> get(uint32_t block) const;
|
||||
} readBuffers;
|
||||
|
||||
// Writing (append only):
|
||||
std::unique_ptr<EncryptionStreamCipher> encryptor;
|
||||
uint16_t currentBlock{ 0 };
|
||||
uint32_t currentBlock{ 0 };
|
||||
int offsetInBlock{ 0 };
|
||||
std::vector<unsigned char> writeBuffer;
|
||||
Future<Void> initialize();
|
||||
|
|
|
@ -568,8 +568,8 @@ private:
|
|||
|
||||
uint32_t opsIssued;
|
||||
Context()
|
||||
: iocx(0), evfd(-1), outstanding(0), opsIssued(0), ioStallBegin(0), fallocateSupported(true),
|
||||
fallocateZeroSupported(true), submittedRequestList(nullptr) {
|
||||
: iocx(0), evfd(-1), outstanding(0), ioStallBegin(0), fallocateSupported(true), fallocateZeroSupported(true),
|
||||
submittedRequestList(nullptr), opsIssued(0) {
|
||||
setIOTimeout(0);
|
||||
}
|
||||
|
||||
|
@ -619,7 +619,7 @@ private:
|
|||
static Context ctx;
|
||||
|
||||
explicit AsyncFileKAIO(int fd, int flags, std::string const& filename)
|
||||
: fd(fd), flags(flags), filename(filename), failed(false) {
|
||||
: failed(false), fd(fd), flags(flags), filename(filename) {
|
||||
ASSERT(!FLOW_KNOBS->DISABLE_POSIX_KERNEL_AIO);
|
||||
if (!g_network->isSimulated()) {
|
||||
countFileLogicalWrites.init(LiteralStringRef("AsyncFile.CountFileLogicalWrites"), filename);
|
||||
|
|
|
@ -190,9 +190,8 @@ private:
|
|||
Reference<DiskParameters> diskParameters,
|
||||
NetworkAddress openedAddress,
|
||||
bool aio)
|
||||
: filename(filename), initialFilename(initialFilename), file(file), diskParameters(diskParameters),
|
||||
openedAddress(openedAddress), pendingModifications(uint64_t(-1)), approximateSize(0), reponses(false),
|
||||
aio(aio) {
|
||||
: filename(filename), initialFilename(initialFilename), approximateSize(0), openedAddress(openedAddress),
|
||||
aio(aio), file(file), pendingModifications(uint64_t(-1)), diskParameters(diskParameters), reponses(false) {
|
||||
|
||||
// This is only designed to work in simulation
|
||||
ASSERT(g_network->isSimulated());
|
||||
|
|
|
@ -199,7 +199,7 @@ public:
|
|||
int maxConcurrentReads,
|
||||
int cacheSizeBlocks)
|
||||
: m_f(f), m_block_size(blockSize), m_read_ahead_blocks(readAheadBlocks),
|
||||
m_max_concurrent_reads(maxConcurrentReads), m_cache_block_limit(std::max<int>(1, cacheSizeBlocks)) {}
|
||||
m_cache_block_limit(std::max<int>(1, cacheSizeBlocks)), m_max_concurrent_reads(maxConcurrentReads) {}
|
||||
};
|
||||
|
||||
#include "flow/unactorcompiler.h"
|
||||
|
|
|
@ -1388,7 +1388,7 @@ TEST_CASE("/flow/DeterministicRandom/SignedOverflow") {
|
|||
struct Tracker {
|
||||
int copied;
|
||||
bool moved;
|
||||
Tracker(int copied = 0) : moved(false), copied(copied) {}
|
||||
Tracker(int copied = 0) : copied(copied), moved(false) {}
|
||||
Tracker(Tracker&& other) : Tracker(other.copied) {
|
||||
ASSERT(!other.moved);
|
||||
other.moved = true;
|
||||
|
|
|
@ -51,7 +51,7 @@ constexpr UID WLTOKEN_PING_PACKET(-1, 1);
|
|||
constexpr int PACKET_LEN_WIDTH = sizeof(uint32_t);
|
||||
const uint64_t TOKEN_STREAM_FLAG = 1;
|
||||
|
||||
static constexpr int WLTOKEN_COUNTS = 20; // number of wellKnownEndpoints
|
||||
static constexpr int WLTOKEN_COUNTS = 21; // number of wellKnownEndpoints
|
||||
|
||||
class EndpointMap : NonCopyable {
|
||||
public:
|
||||
|
@ -340,9 +340,8 @@ ACTOR Future<Void> pingLatencyLogger(TransportData* self) {
|
|||
}
|
||||
|
||||
TransportData::TransportData(uint64_t transportId)
|
||||
: endpoints(WLTOKEN_COUNTS), endpointNotFoundReceiver(endpoints), pingReceiver(endpoints),
|
||||
warnAlwaysForLargePacket(true), lastIncompatibleMessage(0), transportId(transportId),
|
||||
numIncompatibleConnections(0) {
|
||||
: warnAlwaysForLargePacket(true), endpoints(WLTOKEN_COUNTS), endpointNotFoundReceiver(endpoints),
|
||||
pingReceiver(endpoints), numIncompatibleConnections(0), lastIncompatibleMessage(0), transportId(transportId) {
|
||||
degraded = makeReference<AsyncVar<bool>>(false);
|
||||
pingLogger = pingLatencyLogger(this);
|
||||
}
|
||||
|
@ -795,13 +794,14 @@ ACTOR Future<Void> connectionKeeper(Reference<Peer> self,
|
|||
}
|
||||
|
||||
Peer::Peer(TransportData* transport, NetworkAddress const& destination)
|
||||
: transport(transport), destination(destination), outgoingConnectionIdle(true), lastConnectTime(0.0),
|
||||
reconnectionDelay(FLOW_KNOBS->INITIAL_RECONNECTION_TIME), compatible(true), outstandingReplies(0),
|
||||
incompatibleProtocolVersionNewer(false), peerReferences(-1), bytesReceived(0), lastDataPacketSentTime(now()),
|
||||
pingLatencies(destination.isPublic() ? FLOW_KNOBS->PING_SAMPLE_AMOUNT : 1), lastLoggedBytesReceived(0),
|
||||
bytesSent(0), lastLoggedBytesSent(0), timeoutCount(0), lastLoggedTime(0.0), connectOutgoingCount(0), connectIncomingCount(0),
|
||||
connectFailedCount(0), connectLatencies(destination.isPublic() ? FLOW_KNOBS->NETWORK_CONNECT_SAMPLE_AMOUNT : 1),
|
||||
protocolVersion(Reference<AsyncVar<Optional<ProtocolVersion>>>(new AsyncVar<Optional<ProtocolVersion>>())) {
|
||||
: transport(transport), destination(destination), compatible(true), outgoingConnectionIdle(true),
|
||||
lastConnectTime(0.0), reconnectionDelay(FLOW_KNOBS->INITIAL_RECONNECTION_TIME), peerReferences(-1),
|
||||
incompatibleProtocolVersionNewer(false), bytesReceived(0), bytesSent(0), lastDataPacketSentTime(now()),
|
||||
outstandingReplies(0), pingLatencies(destination.isPublic() ? FLOW_KNOBS->PING_SAMPLE_AMOUNT : 1),
|
||||
lastLoggedTime(0.0), lastLoggedBytesReceived(0), lastLoggedBytesSent(0), timeoutCount(0),
|
||||
protocolVersion(Reference<AsyncVar<Optional<ProtocolVersion>>>(new AsyncVar<Optional<ProtocolVersion>>())),
|
||||
connectOutgoingCount(0), connectIncomingCount(0), connectFailedCount(0),
|
||||
connectLatencies(destination.isPublic() ? FLOW_KNOBS->NETWORK_CONNECT_SAMPLE_AMOUNT : 1) {
|
||||
IFailureMonitor::failureMonitor().setStatus(destination, FailureStatus(false));
|
||||
}
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ struct ModelHolder : NonCopyable, public ReferenceCounted<ModelHolder> {
|
|||
double delta;
|
||||
uint64_t token;
|
||||
|
||||
ModelHolder(QueueModel* model, uint64_t token) : model(model), token(token), released(false), startTime(now()) {
|
||||
ModelHolder(QueueModel* model, uint64_t token) : model(model), released(false), startTime(now()), token(token) {
|
||||
if (model) {
|
||||
delta = model->addRequest(token);
|
||||
}
|
||||
|
|
|
@ -30,11 +30,11 @@ using std::vector;
|
|||
|
||||
struct PerfMetric {
|
||||
constexpr static FileIdentifier file_identifier = 5980618;
|
||||
PerfMetric() : m_name(""), m_value(0), m_averaged(false), m_format_code("%.3g") {}
|
||||
PerfMetric() : m_name(""), m_format_code("%.3g"), m_value(0), m_averaged(false) {}
|
||||
PerfMetric(std::string name, double value, bool averaged)
|
||||
: m_name(name), m_value(value), m_averaged(averaged), m_format_code("%.3g") {}
|
||||
: m_name(name), m_format_code("%.3g"), m_value(value), m_averaged(averaged) {}
|
||||
PerfMetric(std::string name, double value, bool averaged, std::string format_code)
|
||||
: m_name(name), m_value(value), m_averaged(averaged), m_format_code(format_code) {}
|
||||
: m_name(name), m_format_code(format_code), m_value(value), m_averaged(averaged) {}
|
||||
|
||||
std::string name() const { return m_name; }
|
||||
double value() const { return m_value; }
|
||||
|
|
|
@ -75,7 +75,7 @@ struct QueueData {
|
|||
Optional<TSSEndpointData> tssData;
|
||||
|
||||
QueueData()
|
||||
: latency(0.001), penalty(1.0), smoothOutstanding(FLOW_KNOBS->QUEUE_MODEL_SMOOTHING_AMOUNT), failedUntil(0),
|
||||
: smoothOutstanding(FLOW_KNOBS->QUEUE_MODEL_SMOOTHING_AMOUNT), latency(0.001), penalty(1.0), failedUntil(0),
|
||||
futureVersionBackoff(FLOW_KNOBS->FUTURE_VERSION_INITIAL_BACKOFF), increaseBackoffTime(0) {}
|
||||
};
|
||||
|
||||
|
|
|
@ -29,12 +29,11 @@
|
|||
struct LocalitySet : public ReferenceCounted<LocalitySet> {
|
||||
public:
|
||||
LocalitySet(LocalitySet const& source)
|
||||
: _entryArray(source._entryArray), _mutableEntryArray(source._mutableEntryArray),
|
||||
: _keymap(source._keymap), _entryArray(source._entryArray), _mutableEntryArray(source._mutableEntryArray),
|
||||
_keyValueArray(source._keyValueArray), _keyIndexArray(source._keyIndexArray), _cacheArray(source._cacheArray),
|
||||
_keymap(source._keymap), _localitygroup(source._localitygroup), _cachehits(source._cachehits),
|
||||
_cachemisses(source._cachemisses) {}
|
||||
_localitygroup(source._localitygroup), _cachehits(source._cachehits), _cachemisses(source._cachemisses) {}
|
||||
LocalitySet(LocalitySet& localityGroup)
|
||||
: _localitygroup(&localityGroup), _keymap(new StringToIntMap()), _cachehits(0), _cachemisses(0) {}
|
||||
: _keymap(new StringToIntMap()), _localitygroup(&localityGroup), _cachehits(0), _cachemisses(0) {}
|
||||
virtual ~LocalitySet() {}
|
||||
|
||||
virtual void addref() { ReferenceCounted<LocalitySet>::addref(); }
|
||||
|
|
|
@ -22,8 +22,8 @@
|
|||
#include "flow/actorcompiler.h" // has to be last include
|
||||
|
||||
Counter::Counter(std::string const& name, CounterCollection& collection)
|
||||
: name(name), interval_start(0), last_event(0), interval_sq_time(0), interval_start_value(0), interval_delta(0),
|
||||
roughness_interval_start(0) {
|
||||
: name(name), interval_start(0), last_event(0), interval_sq_time(0), roughness_interval_start(0), interval_delta(0),
|
||||
interval_start_value(0) {
|
||||
metric.init(collection.name + "." + (char)toupper(name.at(0)) + name.substr(1), collection.id);
|
||||
collection.counters.push_back(this);
|
||||
}
|
||||
|
|
|
@ -227,7 +227,7 @@ private:
|
|||
class LatencySample {
|
||||
public:
|
||||
LatencySample(std::string name, UID id, double loggingInterval, int sampleSize)
|
||||
: name(name), id(id), sample(sampleSize), sampleStart(now()) {
|
||||
: name(name), id(id), sampleStart(now()), sample(sampleSize) {
|
||||
logger = recurring([this]() { logSample(); }, loggingInterval);
|
||||
}
|
||||
|
||||
|
|
|
@ -179,7 +179,7 @@ SimClogging g_clogging;
|
|||
|
||||
struct Sim2Conn final : IConnection, ReferenceCounted<Sim2Conn> {
|
||||
Sim2Conn(ISimulator::ProcessInfo* process)
|
||||
: process(process), dbgid(deterministicRandom()->randomUniqueID()), opened(false), closedByCaller(false),
|
||||
: opened(false), closedByCaller(false), process(process), dbgid(deterministicRandom()->randomUniqueID()),
|
||||
stopReceive(Never()) {
|
||||
pipes = sender(this) && receiver(this);
|
||||
}
|
||||
|
@ -563,8 +563,8 @@ private:
|
|||
const std::string& filename,
|
||||
const std::string& actualFilename,
|
||||
int flags)
|
||||
: h(h), diskParameters(diskParameters), delayOnWrite(delayOnWrite), filename(filename),
|
||||
actualFilename(actualFilename), dbgId(deterministicRandom()->randomUniqueID()), flags(flags) {}
|
||||
: h(h), diskParameters(diskParameters), filename(filename), actualFilename(actualFilename), flags(flags),
|
||||
dbgId(deterministicRandom()->randomUniqueID()), delayOnWrite(delayOnWrite) {}
|
||||
|
||||
static int flagConversion(int flags) {
|
||||
int outFlags = O_BINARY | O_CLOEXEC;
|
||||
|
@ -1993,7 +1993,7 @@ public:
|
|||
}
|
||||
|
||||
Sim2()
|
||||
: time(0.0), timerTime(0.0), taskCount(0), yielded(false), yield_limit(0), currentTaskID(TaskPriority::Zero) {
|
||||
: time(0.0), timerTime(0.0), currentTaskID(TaskPriority::Zero), taskCount(0), yielded(false), yield_limit(0) {
|
||||
// Not letting currentProcess be nullptr eliminates some annoying special cases
|
||||
currentProcess =
|
||||
new ProcessInfo("NoMachine",
|
||||
|
@ -2017,13 +2017,13 @@ public:
|
|||
ProcessInfo* machine;
|
||||
Promise<Void> action;
|
||||
Task(double time, TaskPriority taskID, uint64_t stable, ProcessInfo* machine, Promise<Void>&& action)
|
||||
: time(time), taskID(taskID), stable(stable), machine(machine), action(std::move(action)) {}
|
||||
: taskID(taskID), time(time), stable(stable), machine(machine), action(std::move(action)) {}
|
||||
Task(double time, TaskPriority taskID, uint64_t stable, ProcessInfo* machine, Future<Void>& future)
|
||||
: time(time), taskID(taskID), stable(stable), machine(machine) {
|
||||
: taskID(taskID), time(time), stable(stable), machine(machine) {
|
||||
future = action.getFuture();
|
||||
}
|
||||
Task(Task&& rhs) noexcept
|
||||
: time(rhs.time), taskID(rhs.taskID), stable(rhs.stable), machine(rhs.machine),
|
||||
: taskID(rhs.taskID), time(rhs.time), stable(rhs.stable), machine(rhs.machine),
|
||||
action(std::move(rhs.action)) {}
|
||||
void operator=(Task const& rhs) {
|
||||
taskID = rhs.taskID;
|
||||
|
|
|
@ -39,9 +39,9 @@ class ISimulator : public INetwork {
|
|||
public:
|
||||
ISimulator()
|
||||
: desiredCoordinators(1), physicalDatacenters(1), processesPerMachine(0), listenersPerProcess(1),
|
||||
isStopped(false), lastConnectionFailure(0), connectionFailuresDisableDuration(0), speedUpSimulation(false),
|
||||
allSwapsDisabled(false), backupAgents(BackupAgentType::WaitForType), drAgents(BackupAgentType::WaitForType),
|
||||
extraDB(nullptr), allowLogSetKills(true), usableRegions(1), tssMode(TSSMode::Disabled) {}
|
||||
extraDB(nullptr), usableRegions(1), allowLogSetKills(true), tssMode(TSSMode::Disabled), isStopped(false),
|
||||
lastConnectionFailure(0), connectionFailuresDisableDuration(0), speedUpSimulation(false),
|
||||
backupAgents(BackupAgentType::WaitForType), drAgents(BackupAgentType::WaitForType), allSwapsDisabled(false) {}
|
||||
|
||||
// Order matters!
|
||||
enum KillType {
|
||||
|
@ -99,10 +99,10 @@ public:
|
|||
INetworkConnections* net,
|
||||
const char* dataFolder,
|
||||
const char* coordinationFolder)
|
||||
: name(name), locality(locality), startingClass(startingClass), addresses(addresses),
|
||||
address(addresses.address), dataFolder(dataFolder), network(net), coordinationFolder(coordinationFolder),
|
||||
failed(false), excluded(false), rebooting(false), fault_injection_p1(0), fault_injection_p2(0),
|
||||
fault_injection_r(0), machine(0), cleared(false), failedDisk(false) {
|
||||
: name(name), coordinationFolder(coordinationFolder), dataFolder(dataFolder), machine(nullptr),
|
||||
addresses(addresses), address(addresses.address), locality(locality), startingClass(startingClass),
|
||||
failed(false), excluded(false), cleared(false), rebooting(false), network(net), fault_injection_r(0),
|
||||
fault_injection_p1(0), fault_injection_p2(0), failedDisk(false) {
|
||||
uid = deterministicRandom()->randomUniqueID();
|
||||
}
|
||||
|
||||
|
|
|
@ -241,8 +241,8 @@ struct BackupData {
|
|||
: myId(id), tag(req.routerTag), totalTags(req.totalTags), startVersion(req.startVersion),
|
||||
endVersion(req.endVersion), recruitedEpoch(req.recruitedEpoch), backupEpoch(req.backupEpoch),
|
||||
minKnownCommittedVersion(invalidVersion), savedVersion(req.startVersion - 1), popVersion(req.startVersion - 1),
|
||||
cc("BackupWorker", myId.toString()), pulledVersion(0), paused(false),
|
||||
lock(new FlowLock(SERVER_KNOBS->BACKUP_LOCK_BYTES)) {
|
||||
pulledVersion(0), paused(false), lock(new FlowLock(SERVER_KNOBS->BACKUP_LOCK_BYTES)),
|
||||
cc("BackupWorker", myId.toString()) {
|
||||
cx = openDBOnServer(db, TaskPriority::DefaultEndpoint, LockAware::True);
|
||||
|
||||
specialCounter(cc, "SavedVersion", [this]() { return this->savedVersion; });
|
||||
|
|
|
@ -11,6 +11,8 @@ set(FDBSERVER_SRCS
|
|||
ConfigDatabaseUnitTests.actor.cpp
|
||||
ConfigFollowerInterface.cpp
|
||||
ConfigFollowerInterface.h
|
||||
ConfigNode.actor.cpp
|
||||
ConfigNode.h
|
||||
ConflictSet.h
|
||||
CoordinatedState.actor.cpp
|
||||
CoordinatedState.h
|
||||
|
@ -28,8 +30,6 @@ set(FDBSERVER_SRCS
|
|||
FDBExecHelper.actor.cpp
|
||||
FDBExecHelper.actor.h
|
||||
GrvProxyServer.actor.cpp
|
||||
IConfigDatabaseNode.cpp
|
||||
IConfigDatabaseNode.h
|
||||
IConfigConsumer.cpp
|
||||
IConfigConsumer.h
|
||||
IDiskQueue.h
|
||||
|
@ -72,8 +72,6 @@ set(FDBSERVER_SRCS
|
|||
OnDemandStore.h
|
||||
PaxosConfigConsumer.actor.cpp
|
||||
PaxosConfigConsumer.h
|
||||
PaxosConfigDatabaseNode.actor.cpp
|
||||
PaxosConfigDatabaseNode.h
|
||||
ProxyCommitData.actor.h
|
||||
pubsub.actor.cpp
|
||||
pubsub.h
|
||||
|
@ -105,7 +103,6 @@ set(FDBSERVER_SRCS
|
|||
ServerDBInfo.h
|
||||
SimpleConfigConsumer.actor.cpp
|
||||
SimpleConfigConsumer.h
|
||||
SimpleConfigDatabaseNode.actor.cpp
|
||||
SimulatedCluster.actor.cpp
|
||||
SimulatedCluster.h
|
||||
SkipList.cpp
|
||||
|
|
|
@ -128,15 +128,15 @@ public:
|
|||
std::map<NetworkAddress, std::pair<double, OpenDatabaseRequest>> clientStatus;
|
||||
|
||||
DBInfo()
|
||||
: masterRegistrationCount(0), recoveryStalled(false), forceRecovery(false), unfinishedRecoveries(0),
|
||||
logGenerations(0), cachePopulated(false), clientInfo(new AsyncVar<ClientDBInfo>()), dbInfoCount(0),
|
||||
serverInfo(new AsyncVar<ServerDBInfo>()), db(DatabaseContext::create(clientInfo,
|
||||
Future<Void>(),
|
||||
LocalityData(),
|
||||
EnableLocalityLoadBalance::True,
|
||||
TaskPriority::DefaultEndpoint,
|
||||
LockAware::True)) // SOMEDAY: Locality!
|
||||
{}
|
||||
: clientInfo(new AsyncVar<ClientDBInfo>()), serverInfo(new AsyncVar<ServerDBInfo>()),
|
||||
masterRegistrationCount(0), dbInfoCount(0), recoveryStalled(false), forceRecovery(false),
|
||||
db(DatabaseContext::create(clientInfo,
|
||||
Future<Void>(),
|
||||
LocalityData(),
|
||||
EnableLocalityLoadBalance::True,
|
||||
TaskPriority::DefaultEndpoint,
|
||||
LockAware::True)), // SOMEDAY: Locality!
|
||||
unfinishedRecoveries(0), logGenerations(0), cachePopulated(false) {}
|
||||
|
||||
void setDistributor(const DataDistributorInterface& interf) {
|
||||
auto newInfo = serverInfo->get();
|
||||
|
@ -1433,12 +1433,12 @@ public:
|
|||
bool degraded = false;
|
||||
|
||||
RoleFitness(int bestFit, int worstFit, int count, ProcessClass::ClusterRole role)
|
||||
: bestFit((ProcessClass::Fitness)bestFit), worstFit((ProcessClass::Fitness)worstFit), count(count),
|
||||
role(role) {}
|
||||
: bestFit((ProcessClass::Fitness)bestFit), worstFit((ProcessClass::Fitness)worstFit), role(role),
|
||||
count(count) {}
|
||||
|
||||
RoleFitness(int fitness, int count, ProcessClass::ClusterRole role)
|
||||
: bestFit((ProcessClass::Fitness)fitness), worstFit((ProcessClass::Fitness)fitness), count(count),
|
||||
role(role) {}
|
||||
: bestFit((ProcessClass::Fitness)fitness), worstFit((ProcessClass::Fitness)fitness), role(role),
|
||||
count(count) {}
|
||||
|
||||
RoleFitness()
|
||||
: bestFit(ProcessClass::NeverAssign), worstFit(ProcessClass::NeverAssign), role(ProcessClass::NoRole),
|
||||
|
@ -3061,9 +3061,9 @@ public:
|
|||
ClusterControllerData(ClusterControllerFullInterface const& ccInterface,
|
||||
LocalityData const& locality,
|
||||
ServerCoordinators const& coordinators)
|
||||
: clusterControllerProcessId(locality.processId()), clusterControllerDcId(locality.dcId()), id(ccInterface.id()),
|
||||
ac(false), outstandingRequestChecker(Void()), outstandingRemoteRequestChecker(Void()), gotProcessClasses(false),
|
||||
gotFullyRecoveredConfig(false), startTime(now()), goodRecruitmentTime(Never()),
|
||||
: gotProcessClasses(false), gotFullyRecoveredConfig(false), clusterControllerProcessId(locality.processId()),
|
||||
clusterControllerDcId(locality.dcId()), id(ccInterface.id()), ac(false), outstandingRequestChecker(Void()),
|
||||
outstandingRemoteRequestChecker(Void()), startTime(now()), goodRecruitmentTime(Never()),
|
||||
goodRemoteRecruitmentTime(Never()), datacenterVersionDifference(0), versionDifferenceUpdated(false),
|
||||
recruitingDistributor(false), recruitRatekeeper(false),
|
||||
clusterControllerMetrics("ClusterController", id.toString()),
|
||||
|
|
|
@ -500,9 +500,7 @@ CommitBatchContext::CommitBatchContext(ProxyCommitData* const pProxyCommitData_,
|
|||
|
||||
localBatchNumber(++pProxyCommitData->localCommitBatchesStarted), toCommit(pProxyCommitData->logSystem),
|
||||
|
||||
committed(trs.size()),
|
||||
|
||||
span("MP:commitBatch"_loc) {
|
||||
span("MP:commitBatch"_loc), committed(trs.size()) {
|
||||
|
||||
evaluateBatchSize();
|
||||
|
||||
|
|
|
@ -203,7 +203,7 @@ class ConfigBroadcasterImpl {
|
|||
}
|
||||
|
||||
ConfigBroadcasterImpl()
|
||||
: id(deterministicRandom()->randomUniqueID()), lastCompactedVersion(0), mostRecentVersion(0),
|
||||
: lastCompactedVersion(0), mostRecentVersion(0), id(deterministicRandom()->randomUniqueID()),
|
||||
cc("ConfigBroadcaster"), compactRequest("CompactRequest", cc),
|
||||
successfulChangeRequest("SuccessfulChangeRequest", cc), failedChangeRequest("FailedChangeRequest", cc),
|
||||
snapshotRequest("SnapshotRequest", cc) {
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
#include "fdbclient/IConfigTransaction.h"
|
||||
#include "fdbclient/TestKnobCollection.h"
|
||||
#include "fdbserver/ConfigBroadcaster.h"
|
||||
#include "fdbserver/IConfigDatabaseNode.h"
|
||||
#include "fdbserver/ConfigNode.h"
|
||||
#include "fdbserver/LocalConfiguration.h"
|
||||
#include "fdbclient/Tuple.h"
|
||||
#include "flow/UnitTest.h"
|
||||
|
@ -55,7 +55,7 @@ class WriteToTransactionEnvironment {
|
|||
std::string dataDir;
|
||||
ConfigTransactionInterface cti;
|
||||
ConfigFollowerInterface cfi;
|
||||
Reference<IConfigDatabaseNode> node;
|
||||
Reference<ConfigNode> node;
|
||||
Future<Void> ctiServer;
|
||||
Future<Void> cfiServer;
|
||||
Version lastWrittenVersion{ 0 };
|
||||
|
@ -65,11 +65,10 @@ class WriteToTransactionEnvironment {
|
|||
return StringRef(reinterpret_cast<uint8_t const*>(s.c_str()), s.size());
|
||||
}
|
||||
|
||||
ACTOR template <class T>
|
||||
static Future<Void> set(WriteToTransactionEnvironment* self,
|
||||
Optional<KeyRef> configClass,
|
||||
T value,
|
||||
KeyRef knobName) {
|
||||
ACTOR static Future<Void> set(WriteToTransactionEnvironment* self,
|
||||
Optional<KeyRef> configClass,
|
||||
int64_t value,
|
||||
KeyRef knobName) {
|
||||
state Reference<IConfigTransaction> tr = IConfigTransaction::createTestSimple(self->cti);
|
||||
auto configKey = encodeConfigKey(configClass, knobName);
|
||||
tr->set(configKey, longToValue(value));
|
||||
|
@ -94,13 +93,12 @@ class WriteToTransactionEnvironment {
|
|||
|
||||
public:
|
||||
WriteToTransactionEnvironment(std::string const& dataDir)
|
||||
: dataDir(dataDir), node(IConfigDatabaseNode::createSimple(dataDir)) {
|
||||
: dataDir(dataDir), node(makeReference<ConfigNode>(dataDir)) {
|
||||
platform::eraseDirectoryRecursive(dataDir);
|
||||
setup();
|
||||
}
|
||||
|
||||
template <class T>
|
||||
Future<Void> set(Optional<KeyRef> configClass, T value, KeyRef knobName = "test_long"_sr) {
|
||||
Future<Void> set(Optional<KeyRef> configClass, int64_t value, KeyRef knobName = "test_long"_sr) {
|
||||
return set(this, configClass, value, knobName);
|
||||
}
|
||||
|
||||
|
@ -111,7 +109,7 @@ public:
|
|||
void restartNode() {
|
||||
cfiServer.cancel();
|
||||
ctiServer.cancel();
|
||||
node = IConfigDatabaseNode::createSimple(dataDir);
|
||||
node = makeReference<ConfigNode>(dataDir);
|
||||
setup();
|
||||
}
|
||||
|
||||
|
@ -241,8 +239,8 @@ class BroadcasterToLocalConfigEnvironment {
|
|||
|
||||
public:
|
||||
BroadcasterToLocalConfigEnvironment(std::string const& dataDir, std::string const& configPath)
|
||||
: broadcaster(ConfigFollowerInterface{}), cbfi(makeReference<AsyncVar<ConfigBroadcastFollowerInterface>>()),
|
||||
readFrom(dataDir, configPath, {}) {}
|
||||
: readFrom(dataDir, configPath, {}), cbfi(makeReference<AsyncVar<ConfigBroadcastFollowerInterface>>()),
|
||||
broadcaster(ConfigFollowerInterface{}) {}
|
||||
|
||||
Future<Void> setup() { return setup(this); }
|
||||
|
||||
|
@ -293,7 +291,7 @@ class TransactionEnvironment {
|
|||
IConfigTransaction::createTestSimple(self->writeTo.getTransactionInterface());
|
||||
state KeySelector begin = firstGreaterOrEqual(configClassKeys.begin);
|
||||
state KeySelector end = firstGreaterOrEqual(configClassKeys.end);
|
||||
Standalone<RangeResultRef> range = wait(tr->getRange(begin, end, 1000));
|
||||
RangeResult range = wait(tr->getRange(begin, end, 1000));
|
||||
Standalone<VectorRef<KeyRef>> result;
|
||||
for (const auto& kv : range) {
|
||||
result.push_back_deep(result.arena(), kv.key);
|
||||
|
@ -312,7 +310,7 @@ class TransactionEnvironment {
|
|||
}
|
||||
KeySelector begin = firstGreaterOrEqual(keys.begin);
|
||||
KeySelector end = firstGreaterOrEqual(keys.end);
|
||||
Standalone<RangeResultRef> range = wait(tr->getRange(begin, end, 1000));
|
||||
RangeResult range = wait(tr->getRange(begin, end, 1000));
|
||||
Standalone<VectorRef<KeyRef>> result;
|
||||
for (const auto& kv : range) {
|
||||
result.push_back_deep(result.arena(), kv.key);
|
||||
|
@ -371,8 +369,9 @@ class TransactionToLocalConfigEnvironment {
|
|||
|
||||
public:
|
||||
TransactionToLocalConfigEnvironment(std::string const& dataDir, std::string const& configPath)
|
||||
: writeTo(dataDir), readFrom(dataDir, configPath, {}), broadcaster(writeTo.getFollowerInterface()),
|
||||
cbfi(makeReference<AsyncVar<ConfigBroadcastFollowerInterface>>()) {}
|
||||
: writeTo(dataDir), readFrom(dataDir, configPath, {}),
|
||||
cbfi(makeReference<AsyncVar<ConfigBroadcastFollowerInterface>>()), broadcaster(writeTo.getFollowerInterface()) {
|
||||
}
|
||||
|
||||
Future<Void> setup() { return setup(this); }
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ void ConfigFollowerInterface::setupWellKnownEndpoints() {
|
|||
TaskPriority::Coordination);
|
||||
getChanges.makeWellKnownEndpoint(WLTOKEN_CONFIGFOLLOWER_GETCHANGES, TaskPriority::Coordination);
|
||||
compact.makeWellKnownEndpoint(WLTOKEN_CONFIGFOLLOWER_COMPACT, TaskPriority::Coordination);
|
||||
getCommittedVersion.makeWellKnownEndpoint(WLTOKEN_CONFIGFOLLOWER_GETCOMMITTEDVERSION, TaskPriority::Coordination);
|
||||
}
|
||||
|
||||
ConfigFollowerInterface::ConfigFollowerInterface() : _id(deterministicRandom()->randomUniqueID()) {}
|
||||
|
@ -35,7 +36,8 @@ ConfigFollowerInterface::ConfigFollowerInterface(NetworkAddress const& remote)
|
|||
: _id(deterministicRandom()->randomUniqueID()),
|
||||
getSnapshotAndChanges(Endpoint({ remote }, WLTOKEN_CONFIGFOLLOWER_GETSNAPSHOTANDCHANGES)),
|
||||
getChanges(Endpoint({ remote }, WLTOKEN_CONFIGFOLLOWER_GETCHANGES)),
|
||||
compact(Endpoint({ remote }, WLTOKEN_CONFIGFOLLOWER_COMPACT)) {}
|
||||
compact(Endpoint({ remote }, WLTOKEN_CONFIGFOLLOWER_COMPACT)),
|
||||
getCommittedVersion(Endpoint({ remote }, WLTOKEN_CONFIGFOLLOWER_GETCOMMITTEDVERSION)) {}
|
||||
|
||||
bool ConfigFollowerInterface::operator==(ConfigFollowerInterface const& rhs) const {
|
||||
return _id == rhs._id;
|
||||
|
|
|
@ -66,7 +66,6 @@ using VersionedConfigCommitAnnotation = Standalone<VersionedConfigCommitAnnotati
|
|||
struct ConfigFollowerGetSnapshotAndChangesReply {
|
||||
static constexpr FileIdentifier file_identifier = 1734095;
|
||||
Version snapshotVersion;
|
||||
Version changesVersion;
|
||||
std::map<ConfigKey, KnobValue> snapshot;
|
||||
// TODO: Share arena
|
||||
Standalone<VectorRef<VersionedConfigMutationRef>> changes;
|
||||
|
@ -76,61 +75,64 @@ struct ConfigFollowerGetSnapshotAndChangesReply {
|
|||
template <class Snapshot>
|
||||
explicit ConfigFollowerGetSnapshotAndChangesReply(
|
||||
Version snapshotVersion,
|
||||
Version changesVersion,
|
||||
Snapshot&& snapshot,
|
||||
Standalone<VectorRef<VersionedConfigMutationRef>> changes,
|
||||
Standalone<VectorRef<VersionedConfigCommitAnnotationRef>> annotations)
|
||||
: snapshotVersion(snapshotVersion), changesVersion(changesVersion), snapshot(std::forward<Snapshot>(snapshot)),
|
||||
changes(changes), annotations(annotations) {
|
||||
ASSERT_GE(changesVersion, snapshotVersion);
|
||||
}
|
||||
: snapshotVersion(snapshotVersion), snapshot(std::forward<Snapshot>(snapshot)), changes(changes),
|
||||
annotations(annotations) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, snapshotVersion, changesVersion, snapshot, changes);
|
||||
serializer(ar, snapshotVersion, snapshot, changes);
|
||||
}
|
||||
};
|
||||
|
||||
struct ConfigFollowerGetSnapshotAndChangesRequest {
|
||||
static constexpr FileIdentifier file_identifier = 294811;
|
||||
ReplyPromise<ConfigFollowerGetSnapshotAndChangesReply> reply;
|
||||
Version mostRecentVersion;
|
||||
|
||||
ConfigFollowerGetSnapshotAndChangesRequest() = default;
|
||||
explicit ConfigFollowerGetSnapshotAndChangesRequest(Version mostRecentVersion)
|
||||
: mostRecentVersion(mostRecentVersion) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, reply);
|
||||
serializer(ar, reply, mostRecentVersion);
|
||||
}
|
||||
};
|
||||
|
||||
struct ConfigFollowerGetChangesReply {
|
||||
static constexpr FileIdentifier file_identifier = 234859;
|
||||
Version mostRecentVersion;
|
||||
// TODO: Share arena
|
||||
Standalone<VectorRef<VersionedConfigMutationRef>> changes;
|
||||
Standalone<VectorRef<VersionedConfigCommitAnnotationRef>> annotations;
|
||||
|
||||
ConfigFollowerGetChangesReply() : mostRecentVersion(0) {}
|
||||
ConfigFollowerGetChangesReply() = default;
|
||||
explicit ConfigFollowerGetChangesReply(Version mostRecentVersion,
|
||||
Standalone<VectorRef<VersionedConfigMutationRef>> const& changes,
|
||||
Standalone<VectorRef<VersionedConfigCommitAnnotationRef>> const& annotations)
|
||||
: mostRecentVersion(mostRecentVersion), changes(changes), annotations(annotations) {}
|
||||
: changes(changes), annotations(annotations) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, mostRecentVersion, changes, annotations);
|
||||
serializer(ar, changes, annotations);
|
||||
}
|
||||
};
|
||||
|
||||
struct ConfigFollowerGetChangesRequest {
|
||||
static constexpr FileIdentifier file_identifier = 178935;
|
||||
Version lastSeenVersion{ 0 };
|
||||
Version mostRecentVersion{ 0 };
|
||||
ReplyPromise<ConfigFollowerGetChangesReply> reply;
|
||||
|
||||
ConfigFollowerGetChangesRequest() = default;
|
||||
explicit ConfigFollowerGetChangesRequest(Version lastSeenVersion) : lastSeenVersion(lastSeenVersion) {}
|
||||
explicit ConfigFollowerGetChangesRequest(Version lastSeenVersion, Version mostRecentVersion)
|
||||
: lastSeenVersion(lastSeenVersion), mostRecentVersion(mostRecentVersion) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, lastSeenVersion, reply);
|
||||
serializer(ar, lastSeenVersion, mostRecentVersion, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -148,6 +150,29 @@ struct ConfigFollowerCompactRequest {
|
|||
}
|
||||
};
|
||||
|
||||
struct ConfigFollowerGetCommittedVersionReply {
|
||||
static constexpr FileIdentifier file_identifier = 9214735;
|
||||
Version version;
|
||||
|
||||
ConfigFollowerGetCommittedVersionReply() = default;
|
||||
explicit ConfigFollowerGetCommittedVersionReply(Version version) : version(version) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, version);
|
||||
}
|
||||
};
|
||||
|
||||
struct ConfigFollowerGetCommittedVersionRequest {
|
||||
static constexpr FileIdentifier file_identifier = 1093472;
|
||||
ReplyPromise<ConfigFollowerGetCommittedVersionReply> reply;
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, reply);
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* Configuration database nodes serve a ConfigFollowerInterface which contains well known endpoints,
|
||||
* used by workers to receive configuration database updates
|
||||
|
@ -160,6 +185,7 @@ public:
|
|||
RequestStream<ConfigFollowerGetSnapshotAndChangesRequest> getSnapshotAndChanges;
|
||||
RequestStream<ConfigFollowerGetChangesRequest> getChanges;
|
||||
RequestStream<ConfigFollowerCompactRequest> compact;
|
||||
RequestStream<ConfigFollowerGetCommittedVersionRequest> getCommittedVersion;
|
||||
|
||||
ConfigFollowerInterface();
|
||||
void setupWellKnownEndpoints();
|
||||
|
@ -170,6 +196,6 @@ public:
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, _id, getSnapshotAndChanges, getChanges, compact);
|
||||
serializer(ar, _id, getSnapshotAndChanges, getChanges, compact, getCommittedVersion);
|
||||
}
|
||||
};
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* SimpleConfigDatabaseNode.actor.cpp
|
||||
* ConfigNode.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
|
@ -21,7 +21,7 @@
|
|||
#include <map>
|
||||
|
||||
#include "fdbclient/SystemData.h"
|
||||
#include "fdbserver/SimpleConfigDatabaseNode.h"
|
||||
#include "fdbserver/ConfigNode.h"
|
||||
#include "fdbserver/IKeyValueStore.h"
|
||||
#include "fdbserver/OnDemandStore.h"
|
||||
#include "flow/Arena.h"
|
||||
|
@ -33,8 +33,7 @@
|
|||
namespace {
|
||||
|
||||
const KeyRef lastCompactedVersionKey = "lastCompactedVersion"_sr;
|
||||
const KeyRef liveTransactionVersionKey = "liveTransactionVersion"_sr;
|
||||
const KeyRef committedVersionKey = "committedVersion"_sr;
|
||||
const KeyRef currentGenerationKey = "currentGeneration"_sr;
|
||||
const KeyRangeRef kvKeys = KeyRangeRef("kv/"_sr, "kv0"_sr);
|
||||
const KeyRangeRef mutationKeys = KeyRangeRef("mutation/"_sr, "mutation0"_sr);
|
||||
const KeyRangeRef annotationKeys = KeyRangeRef("annotation/"_sr, "annotation0"_sr);
|
||||
|
@ -65,9 +64,9 @@ Version getVersionFromVersionedMutationKey(KeyRef versionedMutationKey) {
|
|||
return fromBigEndian64(bigEndianResult);
|
||||
}
|
||||
|
||||
} //namespace
|
||||
} // namespace
|
||||
|
||||
TEST_CASE("/fdbserver/ConfigDB/SimpleConfigDatabaseNode/Internal/versionedMutationKeys") {
|
||||
TEST_CASE("/fdbserver/ConfigDB/ConfigNode/Internal/versionedMutationKeys") {
|
||||
std::vector<Key> keys;
|
||||
for (Version version = 0; version < 1000; ++version) {
|
||||
for (int index = 0; index < 5; ++index) {
|
||||
|
@ -80,7 +79,7 @@ TEST_CASE("/fdbserver/ConfigDB/SimpleConfigDatabaseNode/Internal/versionedMutati
|
|||
return Void();
|
||||
}
|
||||
|
||||
TEST_CASE("/fdbserver/ConfigDB/SimpleConfigDatabaseNode/Internal/versionedMutationKeyOrdering") {
|
||||
TEST_CASE("/fdbserver/ConfigDB/ConfigNode/Internal/versionedMutationKeyOrdering") {
|
||||
Standalone<VectorRef<KeyRef>> keys;
|
||||
for (Version version = 0; version < 1000; ++version) {
|
||||
for (auto index = 0; index < 5; ++index) {
|
||||
|
@ -94,7 +93,7 @@ TEST_CASE("/fdbserver/ConfigDB/SimpleConfigDatabaseNode/Internal/versionedMutati
|
|||
return Void();
|
||||
}
|
||||
|
||||
class SimpleConfigDatabaseNodeImpl {
|
||||
class ConfigNodeImpl {
|
||||
UID id;
|
||||
OnDemandStore kvStore;
|
||||
CounterCollection cc;
|
||||
|
@ -104,6 +103,7 @@ class SimpleConfigDatabaseNodeImpl {
|
|||
Counter successfulChangeRequests;
|
||||
Counter failedChangeRequests;
|
||||
Counter snapshotRequests;
|
||||
Counter getCommittedVersionRequests;
|
||||
|
||||
// Transaction counters
|
||||
Counter successfulCommits;
|
||||
|
@ -114,31 +114,19 @@ class SimpleConfigDatabaseNodeImpl {
|
|||
Counter newVersionRequests;
|
||||
Future<Void> logger;
|
||||
|
||||
ACTOR static Future<Version> getLiveTransactionVersion(SimpleConfigDatabaseNodeImpl *self) {
|
||||
Optional<Value> value = wait(self->kvStore->readValue(liveTransactionVersionKey));
|
||||
state Version liveTransactionVersion = 0;
|
||||
ACTOR static Future<ConfigGeneration> getGeneration(ConfigNodeImpl* self) {
|
||||
state ConfigGeneration generation;
|
||||
Optional<Value> value = wait(self->kvStore->readValue(currentGenerationKey));
|
||||
if (value.present()) {
|
||||
liveTransactionVersion = BinaryReader::fromStringRef<Version>(value.get(), IncludeVersion());
|
||||
generation = BinaryReader::fromStringRef<ConfigGeneration>(value.get(), IncludeVersion());
|
||||
} else {
|
||||
self->kvStore->set(KeyValueRef(liveTransactionVersionKey, BinaryWriter::toValue(liveTransactionVersion, IncludeVersion())));
|
||||
self->kvStore->set(KeyValueRef(currentGenerationKey, BinaryWriter::toValue(generation, IncludeVersion())));
|
||||
wait(self->kvStore->commit());
|
||||
}
|
||||
return liveTransactionVersion;
|
||||
return generation;
|
||||
}
|
||||
|
||||
ACTOR static Future<Version> getCommittedVersion(SimpleConfigDatabaseNodeImpl *self) {
|
||||
Optional<Value> value = wait(self->kvStore->readValue(committedVersionKey));
|
||||
state Version committedVersion = 0;
|
||||
if (value.present()) {
|
||||
committedVersion = BinaryReader::fromStringRef<Version>(value.get(), IncludeVersion());
|
||||
} else {
|
||||
self->kvStore->set(KeyValueRef(committedVersionKey, BinaryWriter::toValue(committedVersion, IncludeVersion())));
|
||||
wait(self->kvStore->commit());
|
||||
}
|
||||
return committedVersion;
|
||||
}
|
||||
|
||||
ACTOR static Future<Version> getLastCompactedVersion(SimpleConfigDatabaseNodeImpl* self) {
|
||||
ACTOR static Future<Version> getLastCompactedVersion(ConfigNodeImpl* self) {
|
||||
Optional<Value> value = wait(self->kvStore->readValue(lastCompactedVersionKey));
|
||||
state Version lastCompactedVersion = 0;
|
||||
if (value.present()) {
|
||||
|
@ -152,12 +140,13 @@ class SimpleConfigDatabaseNodeImpl {
|
|||
}
|
||||
|
||||
// Returns all commit annotations between for commits with version in [startVersion, endVersion]
|
||||
ACTOR static Future<Standalone<VectorRef<VersionedConfigCommitAnnotationRef>>>
|
||||
getAnnotations(SimpleConfigDatabaseNodeImpl* self, Version startVersion, Version endVersion) {
|
||||
ACTOR static Future<Standalone<VectorRef<VersionedConfigCommitAnnotationRef>>> getAnnotations(ConfigNodeImpl* self,
|
||||
Version startVersion,
|
||||
Version endVersion) {
|
||||
Key startKey = versionedAnnotationKey(startVersion);
|
||||
Key endKey = versionedAnnotationKey(endVersion + 1);
|
||||
state KeyRangeRef keys(startKey, endKey);
|
||||
Standalone<RangeResultRef> range = wait(self->kvStore->readRange(keys));
|
||||
RangeResult range = wait(self->kvStore->readRange(keys));
|
||||
Standalone<VectorRef<VersionedConfigCommitAnnotationRef>> result;
|
||||
for (const auto& kv : range) {
|
||||
auto version = getVersionFromVersionedAnnotationKey(kv.key);
|
||||
|
@ -169,14 +158,15 @@ class SimpleConfigDatabaseNodeImpl {
|
|||
}
|
||||
|
||||
// Returns all mutations with version in [startVersion, endVersion]
|
||||
ACTOR static Future<Standalone<VectorRef<VersionedConfigMutationRef>>>
|
||||
getMutations(SimpleConfigDatabaseNodeImpl* self, Version startVersion, Version endVersion) {
|
||||
ACTOR static Future<Standalone<VectorRef<VersionedConfigMutationRef>>> getMutations(ConfigNodeImpl* self,
|
||||
Version startVersion,
|
||||
Version endVersion) {
|
||||
Key startKey = versionedMutationKey(startVersion, 0);
|
||||
Key endKey = versionedMutationKey(endVersion + 1, 0);
|
||||
state KeyRangeRef keys(startKey, endKey);
|
||||
Standalone<RangeResultRef> range = wait(self->kvStore->readRange(keys));
|
||||
RangeResult range = wait(self->kvStore->readRange(keys));
|
||||
Standalone<VectorRef<VersionedConfigMutationRef>> result;
|
||||
for (const auto &kv : range) {
|
||||
for (const auto& kv : range) {
|
||||
auto version = getVersionFromVersionedMutationKey(kv.key);
|
||||
ASSERT_LE(version, endVersion);
|
||||
auto mutation = ObjectReader::fromStringRef<ConfigMutation>(kv.value, IncludeVersion());
|
||||
|
@ -185,19 +175,20 @@ class SimpleConfigDatabaseNodeImpl {
|
|||
return result;
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> getChanges(SimpleConfigDatabaseNodeImpl *self, ConfigFollowerGetChangesRequest req) {
|
||||
ACTOR static Future<Void> getChanges(ConfigNodeImpl* self, ConfigFollowerGetChangesRequest req) {
|
||||
Version lastCompactedVersion = wait(getLastCompactedVersion(self));
|
||||
if (req.lastSeenVersion < lastCompactedVersion) {
|
||||
++self->failedChangeRequests;
|
||||
req.reply.sendError(version_already_compacted());
|
||||
return Void();
|
||||
}
|
||||
state Version committedVersion = wait(getCommittedVersion(self));
|
||||
state Version committedVersion =
|
||||
wait(map(getGeneration(self), [](auto const& gen) { return gen.committedVersion; }));
|
||||
state Standalone<VectorRef<VersionedConfigMutationRef>> versionedMutations =
|
||||
wait(getMutations(self, req.lastSeenVersion + 1, committedVersion));
|
||||
state Standalone<VectorRef<VersionedConfigCommitAnnotationRef>> versionedAnnotations =
|
||||
wait(getAnnotations(self, req.lastSeenVersion + 1, committedVersion));
|
||||
TraceEvent(SevDebug, "ConfigDatabaseNodeSendingChanges")
|
||||
TraceEvent(SevDebug, "ConfigDatabaseNodeSendingChanges", self->id)
|
||||
.detail("ReqLastSeenVersion", req.lastSeenVersion)
|
||||
.detail("CommittedVersion", committedVersion)
|
||||
.detail("NumMutations", versionedMutations.size())
|
||||
|
@ -209,17 +200,19 @@ class SimpleConfigDatabaseNodeImpl {
|
|||
|
||||
// New transactions increment the database's current live version. This effectively serves as a lock, providing
|
||||
// serializability
|
||||
ACTOR static Future<Void> getNewVersion(SimpleConfigDatabaseNodeImpl* self, ConfigTransactionGetVersionRequest req) {
|
||||
state Version currentVersion = wait(getLiveTransactionVersion(self));
|
||||
self->kvStore->set(KeyValueRef(liveTransactionVersionKey, BinaryWriter::toValue(++currentVersion, IncludeVersion())));
|
||||
ACTOR static Future<Void> getNewGeneration(ConfigNodeImpl* self, ConfigTransactionGetGenerationRequest req) {
|
||||
state ConfigGeneration generation = wait(getGeneration(self));
|
||||
++generation.liveVersion;
|
||||
self->kvStore->set(KeyValueRef(currentGenerationKey, BinaryWriter::toValue(generation, IncludeVersion())));
|
||||
wait(self->kvStore->commit());
|
||||
req.reply.send(ConfigTransactionGetVersionReply(currentVersion));
|
||||
req.reply.send(ConfigTransactionGetGenerationReply{ generation });
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> get(SimpleConfigDatabaseNodeImpl* self, ConfigTransactionGetRequest req) {
|
||||
Version currentVersion = wait(getLiveTransactionVersion(self));
|
||||
if (req.version != currentVersion) {
|
||||
ACTOR static Future<Void> get(ConfigNodeImpl* self, ConfigTransactionGetRequest req) {
|
||||
ConfigGeneration currentGeneration = wait(getGeneration(self));
|
||||
if (req.generation != currentGeneration) {
|
||||
// TODO: Also send information about highest seen version
|
||||
req.reply.sendError(transaction_too_old());
|
||||
return Void();
|
||||
}
|
||||
|
@ -229,9 +222,10 @@ class SimpleConfigDatabaseNodeImpl {
|
|||
if (serializedValue.present()) {
|
||||
value = ObjectReader::fromStringRef<KnobValue>(serializedValue.get(), IncludeVersion());
|
||||
}
|
||||
Standalone<VectorRef<VersionedConfigMutationRef>> versionedMutations = wait(getMutations(self, 0, req.version));
|
||||
for (const auto &versionedMutation : versionedMutations) {
|
||||
const auto &mutation = versionedMutation.mutation;
|
||||
Standalone<VectorRef<VersionedConfigMutationRef>> versionedMutations =
|
||||
wait(getMutations(self, 0, req.generation.committedVersion));
|
||||
for (const auto& versionedMutation : versionedMutations) {
|
||||
const auto& mutation = versionedMutation.mutation;
|
||||
if (mutation.getKey() == req.key) {
|
||||
if (mutation.isSet()) {
|
||||
value = mutation.getValue();
|
||||
|
@ -247,14 +241,13 @@ class SimpleConfigDatabaseNodeImpl {
|
|||
// Retrieve all configuration classes that contain explicitly defined knobs
|
||||
// TODO: Currently it is possible that extra configuration classes may be returned, we
|
||||
// may want to fix this to clean up the contract
|
||||
ACTOR static Future<Void> getConfigClasses(SimpleConfigDatabaseNodeImpl* self,
|
||||
ConfigTransactionGetConfigClassesRequest req) {
|
||||
Version currentVersion = wait(getLiveTransactionVersion(self));
|
||||
if (req.version != currentVersion) {
|
||||
ACTOR static Future<Void> getConfigClasses(ConfigNodeImpl* self, ConfigTransactionGetConfigClassesRequest req) {
|
||||
ConfigGeneration currentGeneration = wait(getGeneration(self));
|
||||
if (req.generation != currentGeneration) {
|
||||
req.reply.sendError(transaction_too_old());
|
||||
return Void();
|
||||
}
|
||||
state Standalone<RangeResultRef> snapshot = wait(self->kvStore->readRange(kvKeys));
|
||||
state RangeResult snapshot = wait(self->kvStore->readRange(kvKeys));
|
||||
state std::set<Key> configClassesSet;
|
||||
for (const auto& kv : snapshot) {
|
||||
auto configKey =
|
||||
|
@ -265,7 +258,7 @@ class SimpleConfigDatabaseNodeImpl {
|
|||
}
|
||||
state Version lastCompactedVersion = wait(getLastCompactedVersion(self));
|
||||
state Standalone<VectorRef<VersionedConfigMutationRef>> mutations =
|
||||
wait(getMutations(self, lastCompactedVersion + 1, req.version));
|
||||
wait(getMutations(self, lastCompactedVersion + 1, req.generation.committedVersion));
|
||||
for (const auto& versionedMutation : mutations) {
|
||||
auto configClass = versionedMutation.mutation.getConfigClass();
|
||||
if (configClass.present()) {
|
||||
|
@ -281,14 +274,14 @@ class SimpleConfigDatabaseNodeImpl {
|
|||
}
|
||||
|
||||
// Retrieve all knobs explicitly defined for the specified configuration class
|
||||
ACTOR static Future<Void> getKnobs(SimpleConfigDatabaseNodeImpl* self, ConfigTransactionGetKnobsRequest req) {
|
||||
Version currentVersion = wait(getLiveTransactionVersion(self));
|
||||
if (req.version != currentVersion) {
|
||||
ACTOR static Future<Void> getKnobs(ConfigNodeImpl* self, ConfigTransactionGetKnobsRequest req) {
|
||||
ConfigGeneration currentGeneration = wait(getGeneration(self));
|
||||
if (req.generation != currentGeneration) {
|
||||
req.reply.sendError(transaction_too_old());
|
||||
return Void();
|
||||
}
|
||||
// FIXME: Filtering after reading from disk is very inefficient
|
||||
state Standalone<RangeResultRef> snapshot = wait(self->kvStore->readRange(kvKeys));
|
||||
state RangeResult snapshot = wait(self->kvStore->readRange(kvKeys));
|
||||
state std::set<Key> knobSet;
|
||||
for (const auto& kv : snapshot) {
|
||||
auto configKey =
|
||||
|
@ -299,7 +292,7 @@ class SimpleConfigDatabaseNodeImpl {
|
|||
}
|
||||
state Version lastCompactedVersion = wait(getLastCompactedVersion(self));
|
||||
state Standalone<VectorRef<VersionedConfigMutationRef>> mutations =
|
||||
wait(getMutations(self, lastCompactedVersion + 1, req.version));
|
||||
wait(getMutations(self, lastCompactedVersion + 1, req.generation.committedVersion));
|
||||
for (const auto& versionedMutation : mutations) {
|
||||
if (versionedMutation.mutation.getConfigClass().template castTo<Key>() == req.configClass) {
|
||||
if (versionedMutation.mutation.isSet()) {
|
||||
|
@ -317,44 +310,45 @@ class SimpleConfigDatabaseNodeImpl {
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> commit(SimpleConfigDatabaseNodeImpl* self, ConfigTransactionCommitRequest req) {
|
||||
Version currentVersion = wait(getLiveTransactionVersion(self));
|
||||
if (req.version != currentVersion) {
|
||||
ACTOR static Future<Void> commit(ConfigNodeImpl* self, ConfigTransactionCommitRequest req) {
|
||||
ConfigGeneration currentGeneration = wait(getGeneration(self));
|
||||
if (req.generation != currentGeneration) {
|
||||
++self->failedCommits;
|
||||
req.reply.sendError(transaction_too_old());
|
||||
return Void();
|
||||
}
|
||||
int index = 0;
|
||||
for (const auto &mutation : req.mutations) {
|
||||
Key key = versionedMutationKey(req.version, index++);
|
||||
for (const auto& mutation : req.mutations) {
|
||||
Key key = versionedMutationKey(req.generation.liveVersion, index++);
|
||||
Value value = ObjectWriter::toValue(mutation, IncludeVersion());
|
||||
if (mutation.isSet()) {
|
||||
TraceEvent("SimpleConfigDatabaseNodeSetting")
|
||||
TraceEvent("ConfigNodeSetting")
|
||||
.detail("ConfigClass", mutation.getConfigClass())
|
||||
.detail("KnobName", mutation.getKnobName())
|
||||
.detail("Value", mutation.getValue().toString())
|
||||
.detail("Version", req.version);
|
||||
.detail("Version", req.generation.liveVersion);
|
||||
++self->setMutations;
|
||||
} else {
|
||||
++self->clearMutations;
|
||||
}
|
||||
self->kvStore->set(KeyValueRef(key, value));
|
||||
}
|
||||
self->kvStore->set(
|
||||
KeyValueRef(versionedAnnotationKey(req.version), BinaryWriter::toValue(req.annotation, IncludeVersion())));
|
||||
self->kvStore->set(KeyValueRef(committedVersionKey, BinaryWriter::toValue(req.version, IncludeVersion())));
|
||||
self->kvStore->set(KeyValueRef(versionedAnnotationKey(req.generation.liveVersion),
|
||||
BinaryWriter::toValue(req.annotation, IncludeVersion())));
|
||||
ConfigGeneration newGeneration = { req.generation.liveVersion, req.generation.liveVersion };
|
||||
self->kvStore->set(KeyValueRef(currentGenerationKey, BinaryWriter::toValue(newGeneration, IncludeVersion())));
|
||||
wait(self->kvStore->commit());
|
||||
++self->successfulCommits;
|
||||
req.reply.send(Void());
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> serve(SimpleConfigDatabaseNodeImpl* self, ConfigTransactionInterface const* cti) {
|
||||
ACTOR static Future<Void> serve(ConfigNodeImpl* self, ConfigTransactionInterface const* cti) {
|
||||
loop {
|
||||
choose {
|
||||
when(ConfigTransactionGetVersionRequest req = waitNext(cti->getVersion.getFuture())) {
|
||||
when(ConfigTransactionGetGenerationRequest req = waitNext(cti->getGeneration.getFuture())) {
|
||||
++self->newVersionRequests;
|
||||
wait(getNewVersion(self, req));
|
||||
wait(getNewGeneration(self, req));
|
||||
}
|
||||
when(ConfigTransactionGetRequest req = waitNext(cti->get.getFuture())) {
|
||||
++self->getValueRequests;
|
||||
|
@ -374,22 +368,20 @@ class SimpleConfigDatabaseNodeImpl {
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> getSnapshotAndChanges(SimpleConfigDatabaseNodeImpl* self,
|
||||
ACTOR static Future<Void> getSnapshotAndChanges(ConfigNodeImpl* self,
|
||||
ConfigFollowerGetSnapshotAndChangesRequest req) {
|
||||
state ConfigFollowerGetSnapshotAndChangesReply reply;
|
||||
Standalone<RangeResultRef> data = wait(self->kvStore->readRange(kvKeys));
|
||||
RangeResult data = wait(self->kvStore->readRange(kvKeys));
|
||||
for (const auto& kv : data) {
|
||||
reply
|
||||
.snapshot[BinaryReader::fromStringRef<ConfigKey>(kv.key.removePrefix(kvKeys.begin), IncludeVersion())] =
|
||||
ObjectReader::fromStringRef<KnobValue>(kv.value, IncludeVersion());
|
||||
}
|
||||
wait(store(reply.snapshotVersion, getLastCompactedVersion(self)));
|
||||
wait(store(reply.changesVersion, getCommittedVersion(self)));
|
||||
wait(store(reply.changes, getMutations(self, reply.snapshotVersion + 1, reply.changesVersion)));
|
||||
wait(store(reply.annotations, getAnnotations(self, reply.snapshotVersion + 1, reply.changesVersion)));
|
||||
wait(store(reply.changes, getMutations(self, reply.snapshotVersion + 1, req.mostRecentVersion)));
|
||||
wait(store(reply.annotations, getAnnotations(self, reply.snapshotVersion + 1, req.mostRecentVersion)));
|
||||
TraceEvent(SevDebug, "ConfigDatabaseNodeGettingSnapshot", self->id)
|
||||
.detail("SnapshotVersion", reply.snapshotVersion)
|
||||
.detail("ChangesVersion", reply.changesVersion)
|
||||
.detail("SnapshotSize", reply.snapshot.size())
|
||||
.detail("ChangesSize", reply.changes.size())
|
||||
.detail("AnnotationsSize", reply.annotations.size());
|
||||
|
@ -400,7 +392,7 @@ class SimpleConfigDatabaseNodeImpl {
|
|||
// Apply mutations from the WAL in mutationKeys into the kvKeys key space.
|
||||
// Periodic compaction prevents the database from growing too large, and improve read performance.
|
||||
// However, commit annotations for compacted mutations are lost
|
||||
ACTOR static Future<Void> compact(SimpleConfigDatabaseNodeImpl* self, ConfigFollowerCompactRequest req) {
|
||||
ACTOR static Future<Void> compact(ConfigNodeImpl* self, ConfigFollowerCompactRequest req) {
|
||||
state Version lastCompactedVersion = wait(getLastCompactedVersion(self));
|
||||
TraceEvent(SevDebug, "ConfigDatabaseNodeCompacting", self->id)
|
||||
.detail("Version", req.version)
|
||||
|
@ -443,7 +435,13 @@ class SimpleConfigDatabaseNodeImpl {
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> serve(SimpleConfigDatabaseNodeImpl* self, ConfigFollowerInterface const* cfi) {
|
||||
ACTOR static Future<Void> getCommittedVersion(ConfigNodeImpl* self, ConfigFollowerGetCommittedVersionRequest req) {
|
||||
ConfigGeneration generation = wait(getGeneration(self));
|
||||
req.reply.send(ConfigFollowerGetCommittedVersionReply{ generation.committedVersion });
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> serve(ConfigNodeImpl* self, ConfigFollowerInterface const* cfi) {
|
||||
loop {
|
||||
choose {
|
||||
when(ConfigFollowerGetSnapshotAndChangesRequest req =
|
||||
|
@ -458,22 +456,26 @@ class SimpleConfigDatabaseNodeImpl {
|
|||
++self->compactRequests;
|
||||
wait(compact(self, req));
|
||||
}
|
||||
when(ConfigFollowerGetCommittedVersionRequest req = waitNext(cfi->getCommittedVersion.getFuture())) {
|
||||
++self->getCommittedVersionRequests;
|
||||
wait(getCommittedVersion(self, req));
|
||||
}
|
||||
when(wait(self->kvStore->getError())) { ASSERT(false); }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
SimpleConfigDatabaseNodeImpl(std::string const& folder)
|
||||
ConfigNodeImpl(std::string const& folder)
|
||||
: id(deterministicRandom()->randomUniqueID()), kvStore(folder, id, "globalconf-"), cc("ConfigDatabaseNode"),
|
||||
compactRequests("CompactRequests", cc), successfulChangeRequests("SuccessfulChangeRequests", cc),
|
||||
failedChangeRequests("FailedChangeRequests", cc), snapshotRequests("SnapshotRequests", cc),
|
||||
successfulCommits("SuccessfulCommits", cc), failedCommits("FailedCommits", cc),
|
||||
setMutations("SetMutations", cc), clearMutations("ClearMutations", cc),
|
||||
getCommittedVersionRequests("GetCommittedVersionRequests", cc), successfulCommits("SuccessfulCommits", cc),
|
||||
failedCommits("FailedCommits", cc), setMutations("SetMutations", cc), clearMutations("ClearMutations", cc),
|
||||
getValueRequests("GetValueRequests", cc), newVersionRequests("NewVersionRequests", cc) {
|
||||
logger = traceCounters(
|
||||
"ConfigDatabaseNodeMetrics", id, SERVER_KNOBS->WORKER_LOGGING_INTERVAL, &cc, "ConfigDatabaseNode");
|
||||
TraceEvent(SevDebug, "StartingSimpleConfigDatabaseNode", id).detail("KVStoreAlreadyExists", kvStore.exists());
|
||||
TraceEvent(SevDebug, "StartingConfigNode", id).detail("KVStoreAlreadyExists", kvStore.exists());
|
||||
}
|
||||
|
||||
Future<Void> serve(ConfigTransactionInterface const& cti) { return serve(this, &cti); }
|
||||
|
@ -481,15 +483,14 @@ public:
|
|||
Future<Void> serve(ConfigFollowerInterface const& cfi) { return serve(this, &cfi); }
|
||||
};
|
||||
|
||||
SimpleConfigDatabaseNode::SimpleConfigDatabaseNode(std::string const& folder)
|
||||
: _impl(std::make_unique<SimpleConfigDatabaseNodeImpl>(folder)) {}
|
||||
ConfigNode::ConfigNode(std::string const& folder) : _impl(std::make_unique<ConfigNodeImpl>(folder)) {}
|
||||
|
||||
SimpleConfigDatabaseNode::~SimpleConfigDatabaseNode() = default;
|
||||
ConfigNode::~ConfigNode() = default;
|
||||
|
||||
Future<Void> SimpleConfigDatabaseNode::serve(ConfigTransactionInterface const& cti) {
|
||||
Future<Void> ConfigNode::serve(ConfigTransactionInterface const& cti) {
|
||||
return impl().serve(cti);
|
||||
}
|
||||
|
||||
Future<Void> SimpleConfigDatabaseNode::serve(ConfigFollowerInterface const& cfi) {
|
||||
Future<Void> ConfigNode::serve(ConfigFollowerInterface const& cfi) {
|
||||
return impl().serve(cfi);
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* PaxosConfigDatabaseNode.h
|
||||
* ConfigNode.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
|
@ -20,17 +20,19 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include "fdbserver/IConfigDatabaseNode.h"
|
||||
#include <string>
|
||||
|
||||
/*
|
||||
* Fault-tolerant configuration database node implementation
|
||||
*/
|
||||
class PaxosConfigDatabaseNode : public IConfigDatabaseNode {
|
||||
std::unique_ptr<class PaxosConfigDatabaseNodeImpl> impl;
|
||||
#include "fdbclient/ConfigTransactionInterface.h"
|
||||
#include "fdbserver/ConfigFollowerInterface.h"
|
||||
|
||||
class ConfigNode : public ReferenceCounted<ConfigNode> {
|
||||
std::unique_ptr<class ConfigNodeImpl> _impl;
|
||||
ConfigNodeImpl const& impl() const { return *_impl; }
|
||||
ConfigNodeImpl& impl() { return *_impl; }
|
||||
|
||||
public:
|
||||
PaxosConfigDatabaseNode(std::string const& folder);
|
||||
~PaxosConfigDatabaseNode();
|
||||
Future<Void> serve(ConfigTransactionInterface const&) override;
|
||||
Future<Void> serve(ConfigFollowerInterface const&) override;
|
||||
ConfigNode(std::string const& folder);
|
||||
~ConfigNode();
|
||||
Future<Void> serve(ConfigTransactionInterface const&);
|
||||
Future<Void> serve(ConfigFollowerInterface const&);
|
||||
};
|
|
@ -20,7 +20,7 @@
|
|||
|
||||
#include "fdbclient/ConfigTransactionInterface.h"
|
||||
#include "fdbserver/CoordinationInterface.h"
|
||||
#include "fdbserver/IConfigDatabaseNode.h"
|
||||
#include "fdbserver/ConfigNode.h"
|
||||
#include "fdbserver/IKeyValueStore.h"
|
||||
#include "fdbserver/Knobs.h"
|
||||
#include "fdbserver/OnDemandStore.h"
|
||||
|
@ -727,7 +727,7 @@ ACTOR Future<Void> coordinationServer(std::string dataFolder,
|
|||
state OnDemandStore store(dataFolder, myID, "coordination-");
|
||||
state ConfigTransactionInterface configTransactionInterface;
|
||||
state ConfigFollowerInterface configFollowerInterface;
|
||||
state Reference<IConfigDatabaseNode> configDatabaseNode;
|
||||
state Reference<ConfigNode> configNode;
|
||||
state Future<Void> configDatabaseServer = Never();
|
||||
TraceEvent("CoordinationServer", myID)
|
||||
.detail("MyInterfaceAddr", myInterface.read.getEndpoint().getPrimaryAddress())
|
||||
|
@ -736,13 +736,9 @@ ACTOR Future<Void> coordinationServer(std::string dataFolder,
|
|||
if (useConfigDB != UseConfigDB::DISABLED) {
|
||||
configTransactionInterface.setupWellKnownEndpoints();
|
||||
configFollowerInterface.setupWellKnownEndpoints();
|
||||
if (useConfigDB == UseConfigDB::SIMPLE) {
|
||||
configDatabaseNode = IConfigDatabaseNode::createSimple(dataFolder);
|
||||
} else {
|
||||
configDatabaseNode = IConfigDatabaseNode::createPaxos(dataFolder);
|
||||
}
|
||||
configNode = makeReference<ConfigNode>(dataFolder);
|
||||
configDatabaseServer =
|
||||
configDatabaseNode->serve(configTransactionInterface) || configDatabaseNode->serve(configFollowerInterface);
|
||||
configNode->serve(configTransactionInterface) || configNode->serve(configFollowerInterface);
|
||||
}
|
||||
|
||||
try {
|
||||
|
|
|
@ -35,6 +35,7 @@ constexpr UID WLTOKEN_GENERATIONREG_WRITE(-1, 9);
|
|||
constexpr UID WLTOKEN_CONFIGFOLLOWER_GETSNAPSHOTANDCHANGES(-1, 17);
|
||||
constexpr UID WLTOKEN_CONFIGFOLLOWER_GETCHANGES(-1, 18);
|
||||
constexpr UID WLTOKEN_CONFIGFOLLOWER_COMPACT(-1, 19);
|
||||
constexpr UID WLTOKEN_CONFIGFOLLOWER_GETCOMMITTEDVERSION(-1, 20);
|
||||
|
||||
struct GenerationRegInterface {
|
||||
constexpr static FileIdentifier file_identifier = 16726744;
|
||||
|
|
|
@ -97,7 +97,7 @@ struct OldTLogCoreData {
|
|||
std::set<int8_t> pseudoLocalities;
|
||||
LogEpoch epoch;
|
||||
|
||||
OldTLogCoreData() : epochBegin(0), epochEnd(0), logRouterTags(0), txsTags(0), epoch(0) {}
|
||||
OldTLogCoreData() : logRouterTags(0), txsTags(0), epochBegin(0), epochEnd(0), epoch(0) {}
|
||||
explicit OldTLogCoreData(const OldLogData&);
|
||||
|
||||
bool operator==(const OldTLogCoreData& rhs) const {
|
||||
|
|
|
@ -87,10 +87,10 @@ struct TCServerInfo : public ReferenceCounted<TCServerInfo> {
|
|||
bool inDesiredDC,
|
||||
Reference<LocalitySet> storageServerSet,
|
||||
Version addedVersion = 0)
|
||||
: id(ssi.id()), collection(collection), lastKnownInterface(ssi), lastKnownClass(processClass),
|
||||
dataInFlightToServer(0), onInterfaceChanged(interfaceChanged.getFuture()), onRemoved(removed.getFuture()),
|
||||
inDesiredDC(inDesiredDC), storeType(KeyValueStoreType::END), onTSSPairRemoved(Never()),
|
||||
addedVersion(addedVersion) {
|
||||
: id(ssi.id()), addedVersion(addedVersion), collection(collection), lastKnownInterface(ssi),
|
||||
lastKnownClass(processClass), dataInFlightToServer(0), onInterfaceChanged(interfaceChanged.getFuture()),
|
||||
onRemoved(removed.getFuture()), onTSSPairRemoved(Never()), inDesiredDC(inDesiredDC),
|
||||
storeType(KeyValueStoreType::END) {
|
||||
|
||||
if (!ssi.isTss()) {
|
||||
localityEntry = ((LocalityMap<UID>*)storageServerSet.getPtr())->add(ssi.locality, &id);
|
||||
|
@ -187,7 +187,7 @@ public:
|
|||
Future<Void> tracker;
|
||||
|
||||
explicit TCTeamInfo(vector<Reference<TCServerInfo>> const& servers)
|
||||
: servers(servers), healthy(true), priority(SERVER_KNOBS->PRIORITY_TEAM_HEALTHY), wrongConfiguration(false),
|
||||
: servers(servers), healthy(true), wrongConfiguration(false), priority(SERVER_KNOBS->PRIORITY_TEAM_HEALTHY),
|
||||
id(deterministicRandom()->randomUniqueID()) {
|
||||
if (servers.empty()) {
|
||||
TraceEvent(SevInfo, "ConstructTCTeamFromEmptyServers").log();
|
||||
|
@ -377,8 +377,8 @@ struct ServerStatus {
|
|||
ServerStatus()
|
||||
: isWiggling(false), isFailed(true), isUndesired(false), isWrongConfiguration(false), initialized(false) {}
|
||||
ServerStatus(bool isFailed, bool isUndesired, bool isWiggling, LocalityData const& locality)
|
||||
: isFailed(isFailed), isUndesired(isUndesired), locality(locality), isWrongConfiguration(false),
|
||||
initialized(true), isWiggling(isWiggling) {}
|
||||
: isWiggling(isWiggling), isFailed(isFailed), isUndesired(isUndesired), isWrongConfiguration(false),
|
||||
initialized(true), locality(locality) {}
|
||||
bool isUnhealthy() const { return isFailed || isUndesired; }
|
||||
const char* toString() const {
|
||||
return isFailed ? "Failed" : isUndesired ? "Undesired" : isWiggling ? "Wiggling" : "Healthy";
|
||||
|
@ -737,22 +737,22 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
PromiseStream<GetMetricsRequest> getShardMetrics,
|
||||
Promise<UID> removeFailedServer,
|
||||
PromiseStream<Promise<int>> getUnhealthyRelocationCount)
|
||||
: cx(cx), distributorId(distributorId), lock(lock), output(output),
|
||||
shardsAffectedByTeamFailure(shardsAffectedByTeamFailure), doBuildTeams(true), lastBuildTeamsFailed(false),
|
||||
teamBuilder(Void()), badTeamRemover(Void()), checkInvalidLocalities(Void()), wrongStoreTypeRemover(Void()),
|
||||
configuration(configuration), readyToStart(readyToStart), clearHealthyZoneFuture(true),
|
||||
checkTeamDelay(delay(SERVER_KNOBS->CHECK_TEAM_DELAY, TaskPriority::DataDistribution)),
|
||||
: cx(cx), distributorId(distributorId), configuration(configuration), doBuildTeams(true),
|
||||
lastBuildTeamsFailed(false), teamBuilder(Void()), lock(lock), output(output), unhealthyServers(0),
|
||||
shardsAffectedByTeamFailure(shardsAffectedByTeamFailure),
|
||||
initialFailureReactionDelay(
|
||||
delayed(readyToStart, SERVER_KNOBS->INITIAL_FAILURE_REACTION_DELAY, TaskPriority::DataDistribution)),
|
||||
healthyTeamCount(0), storageServerSet(new LocalityMap<UID>()),
|
||||
initializationDoneActor(logOnCompletion(readyToStart && initialFailureReactionDelay, this)),
|
||||
optimalTeamCount(0), recruitingStream(0), restartRecruiting(SERVER_KNOBS->DEBOUNCE_RECRUITING_DELAY),
|
||||
unhealthyServers(0), includedDCs(includedDCs), otherTrackedDCs(otherTrackedDCs),
|
||||
zeroHealthyTeams(zeroHealthyTeams), zeroOptimalTeams(true), primary(primary), isTssRecruiting(false),
|
||||
medianAvailableSpace(SERVER_KNOBS->MIN_AVAILABLE_SPACE_RATIO), lastMedianAvailableSpaceUpdate(0),
|
||||
processingUnhealthy(processingUnhealthy), lowestUtilizationTeam(0), highestUtilizationTeam(0),
|
||||
getShardMetrics(getShardMetrics), removeFailedServer(removeFailedServer),
|
||||
getUnhealthyRelocationCount(getUnhealthyRelocationCount) {
|
||||
recruitingStream(0), restartRecruiting(SERVER_KNOBS->DEBOUNCE_RECRUITING_DELAY), healthyTeamCount(0),
|
||||
zeroHealthyTeams(zeroHealthyTeams), optimalTeamCount(0), zeroOptimalTeams(true), isTssRecruiting(false),
|
||||
includedDCs(includedDCs), otherTrackedDCs(otherTrackedDCs), primary(primary),
|
||||
processingUnhealthy(processingUnhealthy), readyToStart(readyToStart),
|
||||
checkTeamDelay(delay(SERVER_KNOBS->CHECK_TEAM_DELAY, TaskPriority::DataDistribution)), badTeamRemover(Void()),
|
||||
checkInvalidLocalities(Void()), wrongStoreTypeRemover(Void()), storageServerSet(new LocalityMap<UID>()),
|
||||
clearHealthyZoneFuture(true), medianAvailableSpace(SERVER_KNOBS->MIN_AVAILABLE_SPACE_RATIO),
|
||||
lastMedianAvailableSpaceUpdate(0), lowestUtilizationTeam(0), highestUtilizationTeam(0),
|
||||
getShardMetrics(getShardMetrics), getUnhealthyRelocationCount(getUnhealthyRelocationCount),
|
||||
removeFailedServer(removeFailedServer) {
|
||||
if (!primary || configuration.usableRegions == 1) {
|
||||
TraceEvent("DDTrackerStarting", distributorId).detail("State", "Inactive").trackLatest("DDTrackerStarting");
|
||||
}
|
||||
|
@ -3967,7 +3967,6 @@ ACTOR Future<Void> updateNextWigglingStoragePID(DDTeamCollection* teamCollection
|
|||
ACTOR Future<Void> perpetualStorageWiggleIterator(AsyncVar<bool>* stopSignal,
|
||||
FutureStream<Void> finishStorageWiggleSignal,
|
||||
DDTeamCollection* teamCollection) {
|
||||
state int lastFinishTime = now();
|
||||
loop {
|
||||
choose {
|
||||
when(wait(stopSignal->onChange())) {}
|
||||
|
@ -4047,8 +4046,7 @@ ACTOR Future<Void> clusterHealthCheckForPerpetualWiggle(DDTeamCollection* self,
|
|||
// cluster is unhealthy and restarted once the cluster is healthy again.
|
||||
ACTOR Future<Void> perpetualStorageWiggler(AsyncVar<bool>* stopSignal,
|
||||
PromiseStream<Void> finishStorageWiggleSignal,
|
||||
DDTeamCollection* self,
|
||||
const DDEnabledState* ddEnabledState) {
|
||||
DDTeamCollection* self) {
|
||||
state Future<Void> watchFuture = Never();
|
||||
state Future<Void> moveFinishFuture = Never();
|
||||
state int extraTeamCount = 0;
|
||||
|
@ -4132,8 +4130,7 @@ ACTOR Future<Void> perpetualStorageWiggler(AsyncVar<bool>* stopSignal,
|
|||
// This coroutine sets a watch to monitor the value change of `perpetualStorageWiggleKey` which is controlled by command
|
||||
// `configure perpetual_storage_wiggle=$value` if the value is 1, this actor start 2 actors,
|
||||
// `perpetualStorageWiggleIterator` and `perpetualStorageWiggler`. Otherwise, it sends stop signal to them.
|
||||
ACTOR Future<Void> monitorPerpetualStorageWiggle(DDTeamCollection* teamCollection,
|
||||
const DDEnabledState* ddEnabledState) {
|
||||
ACTOR Future<Void> monitorPerpetualStorageWiggle(DDTeamCollection* teamCollection) {
|
||||
state int speed = 0;
|
||||
state AsyncVar<bool> stopWiggleSignal(true);
|
||||
state PromiseStream<Void> finishStorageWiggleSignal;
|
||||
|
@ -4158,8 +4155,8 @@ ACTOR Future<Void> monitorPerpetualStorageWiggle(DDTeamCollection* teamCollectio
|
|||
stopWiggleSignal.set(false);
|
||||
collection.add(perpetualStorageWiggleIterator(
|
||||
&stopWiggleSignal, finishStorageWiggleSignal.getFuture(), teamCollection));
|
||||
collection.add(perpetualStorageWiggler(
|
||||
&stopWiggleSignal, finishStorageWiggleSignal, teamCollection, ddEnabledState));
|
||||
collection.add(
|
||||
perpetualStorageWiggler(&stopWiggleSignal, finishStorageWiggleSignal, teamCollection));
|
||||
TraceEvent("PerpetualStorageWiggleOpen", teamCollection->distributorId).log();
|
||||
} else if (speed == 0) {
|
||||
if (!stopWiggleSignal.get()) {
|
||||
|
@ -4987,7 +4984,7 @@ struct TSSPairState : ReferenceCounted<TSSPairState>, NonCopyable {
|
|||
TSSPairState() : active(false) {}
|
||||
|
||||
TSSPairState(const LocalityData& locality)
|
||||
: active(true), dcId(locality.dcId()), dataHallId(locality.dataHallId()) {}
|
||||
: dcId(locality.dcId()), dataHallId(locality.dataHallId()), active(true) {}
|
||||
|
||||
bool inDataZone(const LocalityData& locality) {
|
||||
return locality.dcId() == dcId && locality.dataHallId() == dataHallId;
|
||||
|
@ -5564,7 +5561,7 @@ ACTOR Future<Void> dataDistributionTeamCollection(Reference<DDTeamCollection> te
|
|||
self->addActor.send(waitHealthyZoneChange(self));
|
||||
|
||||
if (self->primary) { // the primary dc also handle the satellite dc's perpetual wiggling
|
||||
self->addActor.send(monitorPerpetualStorageWiggle(self, ddEnabledState));
|
||||
self->addActor.send(monitorPerpetualStorageWiggle(self));
|
||||
}
|
||||
// SOMEDAY: Monitor FF/serverList for (new) servers that aren't in allServers and add or remove them
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ struct RelocateData {
|
|||
TraceInterval interval;
|
||||
|
||||
RelocateData()
|
||||
: startTime(-1), priority(-1), boundaryPriority(-1), healthPriority(-1), workFactor(0), wantsNewServers(false),
|
||||
: priority(-1), boundaryPriority(-1), healthPriority(-1), startTime(-1), workFactor(0), wantsNewServers(false),
|
||||
interval("QueuedRelocation") {}
|
||||
explicit RelocateData(RelocateShard const& rs)
|
||||
: keys(rs.keys), priority(rs.priority), boundaryPriority(isBoundaryPriority(rs.priority) ? rs.priority : -1),
|
||||
|
@ -448,14 +448,14 @@ struct DDQueueData {
|
|||
FutureStream<RelocateShard> input,
|
||||
PromiseStream<GetMetricsRequest> getShardMetrics,
|
||||
double* lastLimited)
|
||||
: activeRelocations(0), queuedRelocations(0), bytesWritten(0), teamCollections(teamCollections),
|
||||
shardsAffectedByTeamFailure(sABTF), getAverageShardBytes(getAverageShardBytes), distributorId(mid), lock(lock),
|
||||
cx(cx), teamSize(teamSize), singleRegionTeamSize(singleRegionTeamSize), output(output), input(input),
|
||||
getShardMetrics(getShardMetrics), startMoveKeysParallelismLock(SERVER_KNOBS->DD_MOVE_KEYS_PARALLELISM),
|
||||
: distributorId(mid), lock(lock), cx(cx), teamCollections(teamCollections), shardsAffectedByTeamFailure(sABTF),
|
||||
getAverageShardBytes(getAverageShardBytes),
|
||||
startMoveKeysParallelismLock(SERVER_KNOBS->DD_MOVE_KEYS_PARALLELISM),
|
||||
finishMoveKeysParallelismLock(SERVER_KNOBS->DD_MOVE_KEYS_PARALLELISM),
|
||||
fetchSourceLock(new FlowLock(SERVER_KNOBS->DD_FETCH_SOURCE_PARALLELISM)), lastLimited(lastLimited),
|
||||
suppressIntervals(0), lastInterval(0), unhealthyRelocations(0),
|
||||
rawProcessingUnhealthy(new AsyncVar<bool>(false)) {}
|
||||
fetchSourceLock(new FlowLock(SERVER_KNOBS->DD_FETCH_SOURCE_PARALLELISM)), activeRelocations(0),
|
||||
queuedRelocations(0), bytesWritten(0), teamSize(teamSize), singleRegionTeamSize(singleRegionTeamSize),
|
||||
output(output), input(input), getShardMetrics(getShardMetrics), lastLimited(lastLimited), lastInterval(0),
|
||||
suppressIntervals(0), rawProcessingUnhealthy(new AsyncVar<bool>(false)), unhealthyRelocations(0) {}
|
||||
|
||||
void validate() {
|
||||
if (EXPENSIVE_VALIDATION) {
|
||||
|
|
|
@ -123,10 +123,10 @@ struct DataDistributionTracker {
|
|||
Reference<AsyncVar<bool>> anyZeroHealthyTeams,
|
||||
KeyRangeMap<ShardTrackedData>& shards,
|
||||
bool& trackerCancelled)
|
||||
: cx(cx), distributorId(distributorId), dbSizeEstimate(new AsyncVar<int64_t>()), systemSizeEstimate(0),
|
||||
maxShardSize(new AsyncVar<Optional<int64_t>>()), sizeChanges(false), readyToStart(readyToStart), output(output),
|
||||
shardsAffectedByTeamFailure(shardsAffectedByTeamFailure), anyZeroHealthyTeams(anyZeroHealthyTeams),
|
||||
shards(shards), trackerCancelled(trackerCancelled) {}
|
||||
: cx(cx), distributorId(distributorId), shards(shards), sizeChanges(false), systemSizeEstimate(0),
|
||||
dbSizeEstimate(new AsyncVar<int64_t>()), maxShardSize(new AsyncVar<Optional<int64_t>>()), output(output),
|
||||
shardsAffectedByTeamFailure(shardsAffectedByTeamFailure), readyToStart(readyToStart),
|
||||
anyZeroHealthyTeams(anyZeroHealthyTeams), trackerCancelled(trackerCancelled) {}
|
||||
|
||||
~DataDistributionTracker() {
|
||||
trackerCancelled = true;
|
||||
|
|
|
@ -239,19 +239,19 @@ public:
|
|||
|
||||
// construct root node
|
||||
DecodedNode(Node* raw, const T* prev, const T* next, Arena& arena, bool large)
|
||||
: raw(raw), parent(nullptr), otherAncestor(nullptr), leftChild(nullptr), rightChild(nullptr), prev(prev),
|
||||
next(next), item(raw->delta(large).apply(raw->delta(large).getPrefixSource() ? *prev : *next, arena)),
|
||||
large(large) {
|
||||
: large(large), raw(raw), parent(nullptr), otherAncestor(nullptr), leftChild(nullptr), rightChild(nullptr),
|
||||
prev(prev), next(next),
|
||||
item(raw->delta(large).apply(raw->delta(large).getPrefixSource() ? *prev : *next, arena)) {
|
||||
// printf("DecodedNode1 raw=%p delta=%s\n", raw, raw->delta(large).toString().c_str());
|
||||
}
|
||||
|
||||
// Construct non-root node
|
||||
// wentLeft indicates that we've gone left to get to the raw node.
|
||||
DecodedNode(Node* raw, DecodedNode* parent, bool wentLeft, Arena& arena)
|
||||
: parent(parent), large(parent->large),
|
||||
otherAncestor(wentLeft ? parent->getPrevAncestor() : parent->getNextAncestor()),
|
||||
prev(wentLeft ? parent->prev : &parent->item), next(wentLeft ? &parent->item : parent->next),
|
||||
leftChild(nullptr), rightChild(nullptr), raw(raw),
|
||||
: large(parent->large), raw(raw), parent(parent),
|
||||
otherAncestor(wentLeft ? parent->getPrevAncestor() : parent->getNextAncestor()), leftChild(nullptr),
|
||||
rightChild(nullptr), prev(wentLeft ? parent->prev : &parent->item),
|
||||
next(wentLeft ? &parent->item : parent->next),
|
||||
item(raw->delta(large).apply(raw->delta(large).getPrefixSource() ? *prev : *next, arena)) {
|
||||
// printf("DecodedNode2 raw=%p delta=%s\n", raw, raw->delta(large).toString().c_str());
|
||||
}
|
||||
|
@ -1134,12 +1134,12 @@ public:
|
|||
struct Cursor {
|
||||
Cursor() : cache(nullptr), nodeIndex(-1) {}
|
||||
|
||||
Cursor(DecodeCache* cache, DeltaTree2* tree) : cache(cache), tree(tree), nodeIndex(-1) {}
|
||||
Cursor(DecodeCache* cache, DeltaTree2* tree) : tree(tree), cache(cache), nodeIndex(-1) {}
|
||||
|
||||
Cursor(DecodeCache* cache, DeltaTree2* tree, int nodeIndex) : cache(cache), tree(tree), nodeIndex(nodeIndex) {}
|
||||
Cursor(DecodeCache* cache, DeltaTree2* tree, int nodeIndex) : tree(tree), cache(cache), nodeIndex(nodeIndex) {}
|
||||
|
||||
// Copy constructor does not copy item because normally a copied cursor will be immediately moved.
|
||||
Cursor(const Cursor& c) : cache(c.cache), tree(c.tree), nodeIndex(c.nodeIndex) {}
|
||||
Cursor(const Cursor& c) : tree(c.tree), cache(c.cache), nodeIndex(c.nodeIndex) {}
|
||||
|
||||
Cursor next() const {
|
||||
Cursor c = *this;
|
||||
|
|
|
@ -168,11 +168,11 @@ private:
|
|||
class RawDiskQueue_TwoFiles : public Tracked<RawDiskQueue_TwoFiles> {
|
||||
public:
|
||||
RawDiskQueue_TwoFiles(std::string basename, std::string fileExtension, UID dbgid, int64_t fileSizeWarningLimit)
|
||||
: basename(basename), fileExtension(fileExtension), onError(delayed(error.getFuture())),
|
||||
onStopped(stopped.getFuture()), readingFile(-1), readingPage(-1), writingPos(-1), dbgid(dbgid),
|
||||
dbg_file0BeginSeq(0), fileExtensionBytes(SERVER_KNOBS->DISK_QUEUE_FILE_EXTENSION_BYTES),
|
||||
fileShrinkBytes(SERVER_KNOBS->DISK_QUEUE_FILE_SHRINK_BYTES), readingBuffer(dbgid), readyToPush(Void()),
|
||||
fileSizeWarningLimit(fileSizeWarningLimit), lastCommit(Void()), isFirstCommit(true) {
|
||||
: basename(basename), fileExtension(fileExtension), dbgid(dbgid), dbg_file0BeginSeq(0),
|
||||
fileSizeWarningLimit(fileSizeWarningLimit), onError(delayed(error.getFuture())), onStopped(stopped.getFuture()),
|
||||
readyToPush(Void()), lastCommit(Void()), isFirstCommit(true), readingBuffer(dbgid), readingFile(-1),
|
||||
readingPage(-1), writingPos(-1), fileExtensionBytes(SERVER_KNOBS->DISK_QUEUE_FILE_EXTENSION_BYTES),
|
||||
fileShrinkBytes(SERVER_KNOBS->DISK_QUEUE_FILE_SHRINK_BYTES) {
|
||||
if (BUGGIFY)
|
||||
fileExtensionBytes = _PAGE_SIZE * deterministicRandom()->randomSkewedUInt32(1, 10 << 10);
|
||||
if (BUGGIFY)
|
||||
|
@ -878,9 +878,9 @@ public:
|
|||
DiskQueueVersion diskQueueVersion,
|
||||
int64_t fileSizeWarningLimit)
|
||||
: rawQueue(new RawDiskQueue_TwoFiles(basename, fileExtension, dbgid, fileSizeWarningLimit)), dbgid(dbgid),
|
||||
diskQueueVersion(diskQueueVersion), anyPopped(false), nextPageSeq(0), poppedSeq(0), lastPoppedSeq(0),
|
||||
nextReadLocation(-1), readBufPage(nullptr), readBufPos(0), pushed_page_buffer(nullptr), recovered(false),
|
||||
initialized(false), lastCommittedSeq(-1), warnAlwaysForMemory(true) {}
|
||||
diskQueueVersion(diskQueueVersion), anyPopped(false), warnAlwaysForMemory(true), nextPageSeq(0), poppedSeq(0),
|
||||
lastPoppedSeq(0), lastCommittedSeq(-1), pushed_page_buffer(nullptr), recovered(false), initialized(false),
|
||||
nextReadLocation(-1), readBufPage(nullptr), readBufPos(0) {}
|
||||
|
||||
location push(StringRef contents) override {
|
||||
ASSERT(recovered);
|
||||
|
|
|
@ -81,8 +81,8 @@ struct GrvProxyStats {
|
|||
|
||||
// Current stats maintained for a given grv proxy server
|
||||
explicit GrvProxyStats(UID id)
|
||||
: cc("GrvProxyStats", id.toString()), recentRequests(0), lastBucketBegin(now()),
|
||||
bucketInterval(FLOW_KNOBS->BASIC_LOAD_BALANCE_UPDATE_RATE / FLOW_KNOBS->BASIC_LOAD_BALANCE_BUCKETS),
|
||||
: cc("GrvProxyStats", id.toString()),
|
||||
|
||||
txnRequestIn("TxnRequestIn", cc), txnRequestOut("TxnRequestOut", cc), txnRequestErrors("TxnRequestErrors", cc),
|
||||
txnStartIn("TxnStartIn", cc), txnStartOut("TxnStartOut", cc), txnStartBatch("TxnStartBatch", cc),
|
||||
txnSystemPriorityStartIn("TxnSystemPriorityStartIn", cc),
|
||||
|
@ -102,6 +102,7 @@ struct GrvProxyStats {
|
|||
id,
|
||||
SERVER_KNOBS->LATENCY_METRICS_LOGGING_INTERVAL,
|
||||
SERVER_KNOBS->LATENCY_SAMPLE_SIZE),
|
||||
grvLatencyBands("GRVLatencyBands", id, SERVER_KNOBS->STORAGE_LOGGING_DELAY),
|
||||
grvLatencySample("GRVLatencyMetrics",
|
||||
id,
|
||||
SERVER_KNOBS->LATENCY_METRICS_LOGGING_INTERVAL,
|
||||
|
@ -110,7 +111,8 @@ struct GrvProxyStats {
|
|||
id,
|
||||
SERVER_KNOBS->LATENCY_METRICS_LOGGING_INTERVAL,
|
||||
SERVER_KNOBS->LATENCY_SAMPLE_SIZE),
|
||||
grvLatencyBands("GRVLatencyBands", id, SERVER_KNOBS->STORAGE_LOGGING_DELAY) {
|
||||
recentRequests(0), lastBucketBegin(now()),
|
||||
bucketInterval(FLOW_KNOBS->BASIC_LOAD_BALANCE_UPDATE_RATE / FLOW_KNOBS->BASIC_LOAD_BALANCE_BUCKETS) {
|
||||
// The rate at which the limit(budget) is allowed to grow.
|
||||
specialCounter(cc, "SystemGRVQueueSize", [this]() { return this->systemGRVQueueSize; });
|
||||
specialCounter(cc, "DefaultGRVQueueSize", [this]() { return this->defaultGRVQueueSize; });
|
||||
|
|
|
@ -1,31 +0,0 @@
|
|||
/*
|
||||
* IConfigDatabaseNode.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbserver/IConfigDatabaseNode.h"
|
||||
#include "fdbserver/PaxosConfigDatabaseNode.h"
|
||||
#include "fdbserver/SimpleConfigDatabaseNode.h"
|
||||
|
||||
Reference<IConfigDatabaseNode> IConfigDatabaseNode::createSimple(std::string const& folder) {
|
||||
return makeReference<SimpleConfigDatabaseNode>(folder);
|
||||
}
|
||||
|
||||
Reference<IConfigDatabaseNode> IConfigDatabaseNode::createPaxos(std::string const& folder) {
|
||||
return makeReference<PaxosConfigDatabaseNode>(folder);
|
||||
}
|
|
@ -1,41 +0,0 @@
|
|||
/*
|
||||
* IConfigDatabaseNode.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "fdbclient/ConfigTransactionInterface.h"
|
||||
#include "fdbserver/ConfigFollowerInterface.h"
|
||||
#include "flow/FastRef.h"
|
||||
#include "flow/flow.h"
|
||||
|
||||
#include <memory>
|
||||
|
||||
/*
|
||||
* Interface for a single node in the configuration database, run on coordinators
|
||||
*/
|
||||
class IConfigDatabaseNode : public ReferenceCounted<IConfigDatabaseNode> {
|
||||
public:
|
||||
virtual ~IConfigDatabaseNode() = default;
|
||||
virtual Future<Void> serve(ConfigTransactionInterface const&) = 0;
|
||||
virtual Future<Void> serve(ConfigFollowerInterface const&) = 0;
|
||||
|
||||
static Reference<IConfigDatabaseNode> createSimple(std::string const& folder);
|
||||
static Reference<IConfigDatabaseNode> createPaxos(std::string const& folder);
|
||||
};
|
|
@ -861,10 +861,10 @@ KeyValueStoreMemory<Container>::KeyValueStoreMemory(IDiskQueue* log,
|
|||
bool disableSnapshot,
|
||||
bool replaceContent,
|
||||
bool exactRecovery)
|
||||
: log(log), id(id), type(storeType), previousSnapshotEnd(-1), currentSnapshotEnd(-1), resetSnapshot(false),
|
||||
memoryLimit(memoryLimit), committedWriteBytes(0), overheadWriteBytes(0), committedDataSize(0), transactionSize(0),
|
||||
transactionIsLarge(false), disableSnapshot(disableSnapshot), replaceContent(replaceContent), snapshotCount(0),
|
||||
firstCommitWithSnapshot(true) {
|
||||
: type(storeType), id(id), log(log), committedWriteBytes(0), overheadWriteBytes(0), currentSnapshotEnd(-1),
|
||||
previousSnapshotEnd(-1), committedDataSize(0), transactionSize(0), transactionIsLarge(false), resetSnapshot(false),
|
||||
disableSnapshot(disableSnapshot), replaceContent(replaceContent), firstCommitWithSnapshot(true), snapshotCount(0),
|
||||
memoryLimit(memoryLimit) {
|
||||
// create reserved buffer for radixtree store type
|
||||
this->reserved_buffer =
|
||||
(storeType == KeyValueStoreType::MEMORY) ? nullptr : new uint8_t[CLIENT_KNOBS->SYSTEM_KEY_SIZE_LIMIT];
|
||||
|
|
|
@ -5,11 +5,17 @@
|
|||
#include <rocksdb/filter_policy.h>
|
||||
#include <rocksdb/options.h>
|
||||
#include <rocksdb/slice_transform.h>
|
||||
#include <rocksdb/statistics.h>
|
||||
#include <rocksdb/table.h>
|
||||
#include <rocksdb/utilities/table_properties_collectors.h>
|
||||
#include "fdbserver/CoroFlow.h"
|
||||
#include "flow/flow.h"
|
||||
#include "flow/IThreadPool.h"
|
||||
#include "flow/ThreadHelper.actor.h"
|
||||
|
||||
#include <memory>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
|
||||
#endif // SSD_ROCKSDB_EXPERIMENTAL
|
||||
|
||||
|
@ -48,6 +54,9 @@ rocksdb::Options getOptions() {
|
|||
options.IncreaseParallelism(SERVER_KNOBS->ROCKSDB_BACKGROUND_PARALLELISM);
|
||||
}
|
||||
|
||||
options.statistics = rocksdb::CreateDBStatistics();
|
||||
options.statistics->set_stats_level(rocksdb::kExceptHistogramOrTimers);
|
||||
|
||||
rocksdb::BlockBasedTableOptions bbOpts;
|
||||
// TODO: Add a knob for the block cache size. (Default is 8 MB)
|
||||
if (SERVER_KNOBS->ROCKSDB_PREFIX_LEN > 0) {
|
||||
|
@ -89,6 +98,88 @@ rocksdb::ReadOptions getReadOptions() {
|
|||
return options;
|
||||
}
|
||||
|
||||
ACTOR Future<Void> rocksDBMetricLogger(std::shared_ptr<rocksdb::Statistics> statistics, rocksdb::DB* db) {
|
||||
state std::vector<std::tuple<const char*, uint32_t, uint64_t>> tickerStats = {
|
||||
{ "StallMicros", rocksdb::STALL_MICROS, 0 },
|
||||
{ "BytesRead", rocksdb::BYTES_READ, 0 },
|
||||
{ "IterBytesRead", rocksdb::ITER_BYTES_READ, 0 },
|
||||
{ "BytesWritten", rocksdb::BYTES_WRITTEN, 0 },
|
||||
{ "BlockCacheMisses", rocksdb::BLOCK_CACHE_MISS, 0 },
|
||||
{ "BlockCacheHits", rocksdb::BLOCK_CACHE_HIT, 0 },
|
||||
{ "BloomFilterUseful", rocksdb::BLOOM_FILTER_USEFUL, 0 },
|
||||
{ "BloomFilterFullPositive", rocksdb::BLOOM_FILTER_FULL_POSITIVE, 0 },
|
||||
{ "BloomFilterTruePositive", rocksdb::BLOOM_FILTER_FULL_TRUE_POSITIVE, 0 },
|
||||
{ "BloomFilterMicros", rocksdb::BLOOM_FILTER_MICROS, 0 },
|
||||
{ "MemtableHit", rocksdb::MEMTABLE_HIT, 0 },
|
||||
{ "MemtableMiss", rocksdb::MEMTABLE_MISS, 0 },
|
||||
{ "GetHitL0", rocksdb::GET_HIT_L0, 0 },
|
||||
{ "GetHitL1", rocksdb::GET_HIT_L1, 0 },
|
||||
{ "GetHitL2AndUp", rocksdb::GET_HIT_L2_AND_UP, 0 },
|
||||
{ "CountKeysWritten", rocksdb::NUMBER_KEYS_WRITTEN, 0 },
|
||||
{ "CountKeysRead", rocksdb::NUMBER_KEYS_READ, 0 },
|
||||
{ "CountDBSeek", rocksdb::NUMBER_DB_SEEK, 0 },
|
||||
{ "CountDBNext", rocksdb::NUMBER_DB_NEXT, 0 },
|
||||
{ "CountDBPrev", rocksdb::NUMBER_DB_PREV, 0 },
|
||||
{ "BloomFilterPrefixChecked", rocksdb::BLOOM_FILTER_PREFIX_CHECKED, 0 },
|
||||
{ "BloomFilterPrefixUseful", rocksdb::BLOOM_FILTER_PREFIX_USEFUL, 0 },
|
||||
{ "BlockCacheCompressedMiss", rocksdb::BLOCK_CACHE_COMPRESSED_MISS, 0 },
|
||||
{ "BlockCacheCompressedHit", rocksdb::BLOCK_CACHE_COMPRESSED_HIT, 0 },
|
||||
{ "CountWalFileSyncs", rocksdb::WAL_FILE_SYNCED, 0 },
|
||||
{ "CountWalFileBytes", rocksdb::WAL_FILE_BYTES, 0 },
|
||||
{ "CompactReadBytes", rocksdb::COMPACT_READ_BYTES, 0 },
|
||||
{ "CompactWriteBytes", rocksdb::COMPACT_WRITE_BYTES, 0 },
|
||||
{ "FlushWriteBytes", rocksdb::FLUSH_WRITE_BYTES, 0 },
|
||||
{ "CountBlocksCompressed", rocksdb::NUMBER_BLOCK_COMPRESSED, 0 },
|
||||
{ "CountBlocksDecompressed", rocksdb::NUMBER_BLOCK_DECOMPRESSED, 0 },
|
||||
{ "RowCacheHit", rocksdb::ROW_CACHE_HIT, 0 },
|
||||
{ "RowCacheMiss", rocksdb::ROW_CACHE_MISS, 0 },
|
||||
{ "CountIterSkippedKeys", rocksdb::NUMBER_ITER_SKIP, 0 },
|
||||
|
||||
};
|
||||
state std::vector<std::pair<const char*, std::string>> propertyStats = {
|
||||
{ "NumCompactionsRunning", rocksdb::DB::Properties::kNumRunningCompactions },
|
||||
{ "NumImmutableMemtables", rocksdb::DB::Properties::kNumImmutableMemTable },
|
||||
{ "NumImmutableMemtablesFlushed", rocksdb::DB::Properties::kNumImmutableMemTableFlushed },
|
||||
{ "IsMemtableFlushPending", rocksdb::DB::Properties::kMemTableFlushPending },
|
||||
{ "NumRunningFlushes", rocksdb::DB::Properties::kNumRunningFlushes },
|
||||
{ "IsCompactionPending", rocksdb::DB::Properties::kCompactionPending },
|
||||
{ "NumRunningCompactions", rocksdb::DB::Properties::kNumRunningCompactions },
|
||||
{ "CumulativeBackgroundErrors", rocksdb::DB::Properties::kBackgroundErrors },
|
||||
{ "CurrentSizeActiveMemtable", rocksdb::DB::Properties::kCurSizeActiveMemTable },
|
||||
{ "AllMemtablesBytes", rocksdb::DB::Properties::kCurSizeAllMemTables },
|
||||
{ "ActiveMemtableBytes", rocksdb::DB::Properties::kSizeAllMemTables },
|
||||
{ "CountEntriesActiveMemtable", rocksdb::DB::Properties::kNumEntriesActiveMemTable },
|
||||
{ "CountEntriesImmutMemtables", rocksdb::DB::Properties::kNumEntriesImmMemTables },
|
||||
{ "CountDeletesActiveMemtable", rocksdb::DB::Properties::kNumDeletesActiveMemTable },
|
||||
{ "CountDeletesImmutMemtables", rocksdb::DB::Properties::kNumDeletesImmMemTables },
|
||||
{ "EstimatedCountKeys", rocksdb::DB::Properties::kEstimateNumKeys },
|
||||
{ "EstimateSstReaderBytes", rocksdb::DB::Properties::kEstimateTableReadersMem },
|
||||
{ "CountActiveSnapshots", rocksdb::DB::Properties::kNumSnapshots },
|
||||
{ "OldestSnapshotTime", rocksdb::DB::Properties::kOldestSnapshotTime },
|
||||
{ "CountLiveVersions", rocksdb::DB::Properties::kNumLiveVersions },
|
||||
{ "EstimateLiveDataSize", rocksdb::DB::Properties::kEstimateLiveDataSize },
|
||||
{ "BaseLevel", rocksdb::DB::Properties::kBaseLevel },
|
||||
{ "EstPendCompactBytes", rocksdb::DB::Properties::kEstimatePendingCompactionBytes },
|
||||
};
|
||||
loop {
|
||||
wait(delay(SERVER_KNOBS->ROCKSDB_METRICS_DELAY));
|
||||
TraceEvent e("RocksDBMetrics");
|
||||
for (auto& t : tickerStats) {
|
||||
auto& [name, ticker, cum] = t;
|
||||
uint64_t val = statistics->getTickerCount(ticker);
|
||||
e.detail(name, val - cum);
|
||||
cum = val;
|
||||
}
|
||||
|
||||
for (auto& p : propertyStats) {
|
||||
auto& [name, property] = p;
|
||||
uint64_t stat = 0;
|
||||
ASSERT(db->GetIntProperty(property, &stat));
|
||||
e.detail(name, stat);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct RocksDBKeyValueStore : IKeyValueStore {
|
||||
using DB = rocksdb::DB*;
|
||||
using CF = rocksdb::ColumnFamilyHandle*;
|
||||
|
@ -118,29 +209,26 @@ struct RocksDBKeyValueStore : IKeyValueStore {
|
|||
struct OpenAction : TypedAction<Writer, OpenAction> {
|
||||
std::string path;
|
||||
ThreadReturnPromise<Void> done;
|
||||
Optional<Future<Void>>& metrics;
|
||||
OpenAction(std::string path, Optional<Future<Void>>& metrics) : path(std::move(path)), metrics(metrics) {}
|
||||
|
||||
double getTimeEstimate() const override { return SERVER_KNOBS->COMMIT_TIME_ESTIMATE; }
|
||||
};
|
||||
void action(OpenAction& a) {
|
||||
// If the DB has already been initialized, this should be a no-op.
|
||||
if (db != nullptr) {
|
||||
TraceEvent(SevInfo, "RocksDB")
|
||||
.detail("Path", a.path)
|
||||
.detail("Method", "Open")
|
||||
.detail("Skipping", "Already Open");
|
||||
a.done.send(Void());
|
||||
return;
|
||||
}
|
||||
|
||||
std::vector<rocksdb::ColumnFamilyDescriptor> defaultCF = { rocksdb::ColumnFamilyDescriptor{
|
||||
"default", getCFOptions() } };
|
||||
std::vector<rocksdb::ColumnFamilyHandle*> handle;
|
||||
auto status = rocksdb::DB::Open(getOptions(), a.path, defaultCF, &handle, &db);
|
||||
auto options = getOptions();
|
||||
auto status = rocksdb::DB::Open(options, a.path, defaultCF, &handle, &db);
|
||||
if (!status.ok()) {
|
||||
TraceEvent(SevError, "RocksDBError").detail("Error", status.ToString()).detail("Method", "Open");
|
||||
a.done.sendError(statusToError(status));
|
||||
} else {
|
||||
TraceEvent(SevInfo, "RocksDB").detail("Path", a.path).detail("Method", "Open");
|
||||
onMainThread([&] {
|
||||
a.metrics = rocksDBMetricLogger(options.statistics, db);
|
||||
return Future<bool>(true);
|
||||
}).blockUntilReady();
|
||||
a.done.send(Void());
|
||||
}
|
||||
}
|
||||
|
@ -367,7 +455,9 @@ struct RocksDBKeyValueStore : IKeyValueStore {
|
|||
Reference<IThreadPool> readThreads;
|
||||
Promise<Void> errorPromise;
|
||||
Promise<Void> closePromise;
|
||||
Future<Void> openFuture;
|
||||
std::unique_ptr<rocksdb::WriteBatch> writeBatch;
|
||||
Optional<Future<Void>> metrics;
|
||||
|
||||
explicit RocksDBKeyValueStore(const std::string& path, UID id) : path(path), id(id) {
|
||||
// In simluation, run the reader/writer threads as Coro threads (i.e. in the network thread. The storage engine
|
||||
|
@ -396,6 +486,9 @@ struct RocksDBKeyValueStore : IKeyValueStore {
|
|||
Future<Void> getError() override { return errorPromise.getFuture(); }
|
||||
|
||||
ACTOR static void doClose(RocksDBKeyValueStore* self, bool deleteOnClose) {
|
||||
// The metrics future retains a reference to the DB, so stop it before we delete it.
|
||||
self->metrics.reset();
|
||||
|
||||
wait(self->readThreads->stop());
|
||||
auto a = new Writer::CloseAction(self->path, deleteOnClose);
|
||||
auto f = a->done.getFuture();
|
||||
|
@ -418,11 +511,13 @@ struct RocksDBKeyValueStore : IKeyValueStore {
|
|||
KeyValueStoreType getType() const override { return KeyValueStoreType(KeyValueStoreType::SSD_ROCKSDB_V1); }
|
||||
|
||||
Future<Void> init() override {
|
||||
std::unique_ptr<Writer::OpenAction> a(new Writer::OpenAction());
|
||||
a->path = path;
|
||||
auto res = a->done.getFuture();
|
||||
if (openFuture.isValid()) {
|
||||
return openFuture;
|
||||
}
|
||||
auto a = std::make_unique<Writer::OpenAction>(path, metrics);
|
||||
openFuture = a->done.getFuture();
|
||||
writeThread->post(a.release());
|
||||
return res;
|
||||
return openFuture;
|
||||
}
|
||||
|
||||
void set(KeyValueRef kv, const Arena*) override {
|
||||
|
|
|
@ -681,7 +681,7 @@ struct SQLiteTransaction {
|
|||
struct IntKeyCursor {
|
||||
SQLiteDB& db;
|
||||
BtCursor* cursor;
|
||||
IntKeyCursor(SQLiteDB& db, int table, bool write) : cursor(0), db(db) {
|
||||
IntKeyCursor(SQLiteDB& db, int table, bool write) : db(db), cursor(nullptr) {
|
||||
cursor = (BtCursor*)new char[sqlite3BtreeCursorSize()];
|
||||
sqlite3BtreeCursorZero(cursor);
|
||||
db.checkError("BtreeCursor", sqlite3BtreeCursor(db.btree, table, write, nullptr, cursor));
|
||||
|
@ -705,7 +705,7 @@ struct RawCursor {
|
|||
|
||||
operator bool() const { return valid; }
|
||||
|
||||
RawCursor(SQLiteDB& db, int table, bool write) : cursor(0), db(db), valid(false) {
|
||||
RawCursor(SQLiteDB& db, int table, bool write) : db(db), cursor(nullptr), valid(false) {
|
||||
keyInfo.db = db.db;
|
||||
keyInfo.enc = db.db->aDb[0].pSchema->enc;
|
||||
keyInfo.aColl[0] = db.db->pDfltColl;
|
||||
|
@ -1732,9 +1732,9 @@ private:
|
|||
volatile int64_t& freeListPages,
|
||||
UID dbgid,
|
||||
vector<Reference<ReadCursor>>* pReadThreads)
|
||||
: kvs(kvs), conn(kvs->filename, isBtreeV2, isBtreeV2), commits(), setsThisCommit(), freeTableEmpty(false),
|
||||
writesComplete(writesComplete), springCleaningStats(springCleaningStats), diskBytesUsed(diskBytesUsed),
|
||||
freeListPages(freeListPages), cursor(nullptr), dbgid(dbgid), readThreads(*pReadThreads),
|
||||
: kvs(kvs), conn(kvs->filename, isBtreeV2, isBtreeV2), cursor(nullptr), commits(), setsThisCommit(),
|
||||
freeTableEmpty(false), writesComplete(writesComplete), springCleaningStats(springCleaningStats),
|
||||
diskBytesUsed(diskBytesUsed), freeListPages(freeListPages), dbgid(dbgid), readThreads(*pReadThreads),
|
||||
checkAllChecksumsOnOpen(checkAllChecksumsOnOpen), checkIntegrityOnOpen(checkIntegrityOnOpen) {}
|
||||
~Writer() override {
|
||||
TraceEvent("KVWriterDestroying", dbgid).log();
|
||||
|
@ -2110,7 +2110,7 @@ KeyValueStoreSQLite::KeyValueStoreSQLite(std::string const& filename,
|
|||
KeyValueStoreType storeType,
|
||||
bool checkChecksums,
|
||||
bool checkIntegrity)
|
||||
: type(storeType), filename(filename), logID(id), readThreads(CoroThreadPool::createThreadPool()),
|
||||
: type(storeType), logID(id), filename(filename), readThreads(CoroThreadPool::createThreadPool()),
|
||||
writeThread(CoroThreadPool::createThreadPool()), readsRequested(0), writesRequested(0), writesComplete(0),
|
||||
diskBytesUsed(0), freeListPages(0) {
|
||||
TraceEvent(SevDebug, "KeyValueStoreSQLiteCreate").detail("Filename", filename);
|
||||
|
|
|
@ -215,7 +215,7 @@ class LocalConfigurationImpl {
|
|||
self->updateInMemoryState(lastSeenVersion);
|
||||
return Void();
|
||||
}
|
||||
Standalone<RangeResultRef> range = wait(self->kvStore->readRange(knobOverrideKeys));
|
||||
RangeResult range = wait(self->kvStore->readRange(knobOverrideKeys));
|
||||
for (const auto& kv : range) {
|
||||
auto configKey =
|
||||
BinaryReader::fromStringRef<ConfigKey>(kv.key.removePrefix(knobOverrideKeys.begin), IncludeVersion());
|
||||
|
@ -326,10 +326,10 @@ public:
|
|||
std::string const& configPath,
|
||||
std::map<std::string, std::string> const& manualKnobOverrides,
|
||||
IsTest isTest)
|
||||
: id(deterministicRandom()->randomUniqueID()), kvStore(dataFolder, id, "localconf-"), cc("LocalConfiguration"),
|
||||
: id(deterministicRandom()->randomUniqueID()), kvStore(dataFolder, id, "localconf-"),
|
||||
configKnobOverrides(configPath), manualKnobOverrides(manualKnobOverrides), cc("LocalConfiguration"),
|
||||
broadcasterChanges("BroadcasterChanges", cc), snapshots("Snapshots", cc),
|
||||
changeRequestsFetched("ChangeRequestsFetched", cc), mutations("Mutations", cc), configKnobOverrides(configPath),
|
||||
manualKnobOverrides(manualKnobOverrides) {
|
||||
changeRequestsFetched("ChangeRequestsFetched", cc), mutations("Mutations", cc) {
|
||||
if (isTest) {
|
||||
testKnobCollection =
|
||||
IKnobCollection::create(IKnobCollection::Type::TEST,
|
||||
|
|
|
@ -43,11 +43,11 @@ struct LogRouterData {
|
|||
Tag tag;
|
||||
|
||||
TagData(Tag tag, Version popped, Version durableKnownCommittedVersion)
|
||||
: tag(tag), popped(popped), durableKnownCommittedVersion(durableKnownCommittedVersion) {}
|
||||
: popped(popped), durableKnownCommittedVersion(durableKnownCommittedVersion), tag(tag) {}
|
||||
|
||||
TagData(TagData&& r) noexcept
|
||||
: version_messages(std::move(r.version_messages)), tag(r.tag), popped(r.popped),
|
||||
durableKnownCommittedVersion(r.durableKnownCommittedVersion) {}
|
||||
: version_messages(std::move(r.version_messages)), popped(r.popped),
|
||||
durableKnownCommittedVersion(r.durableKnownCommittedVersion), tag(r.tag) {}
|
||||
void operator=(TagData&& r) noexcept {
|
||||
version_messages = std::move(r.version_messages);
|
||||
tag = r.tag;
|
||||
|
@ -136,14 +136,14 @@ struct LogRouterData {
|
|||
}
|
||||
|
||||
LogRouterData(UID dbgid, const InitializeLogRouterRequest& req)
|
||||
: dbgid(dbgid), routerTag(req.routerTag), logSystem(new AsyncVar<Reference<ILogSystem>>()),
|
||||
version(req.startVersion - 1), minPopped(0), generation(req.recoveryCount), startVersion(req.startVersion),
|
||||
allowPops(false), minKnownCommittedVersion(0), poppedVersion(0), foundEpochEnd(false),
|
||||
cc("LogRouter", dbgid.toString()), getMoreCount("GetMoreCount", cc),
|
||||
getMoreBlockedCount("GetMoreBlockedCount", cc),
|
||||
: dbgid(dbgid), logSystem(new AsyncVar<Reference<ILogSystem>>()), version(req.startVersion - 1), minPopped(0),
|
||||
startVersion(req.startVersion), minKnownCommittedVersion(0), poppedVersion(0), routerTag(req.routerTag),
|
||||
allowPops(false), foundEpochEnd(false), generation(req.recoveryCount),
|
||||
peekLatencyDist(Histogram::getHistogram(LiteralStringRef("LogRouter"),
|
||||
LiteralStringRef("PeekTLogLatency"),
|
||||
Histogram::Unit::microseconds)) {
|
||||
Histogram::Unit::microseconds)),
|
||||
cc("LogRouter", dbgid.toString()), getMoreCount("GetMoreCount", cc),
|
||||
getMoreBlockedCount("GetMoreBlockedCount", cc) {
|
||||
// setup just enough of a logSet to be able to call getPushLocations
|
||||
logSet.logServers.resize(req.tLogLocalities.size());
|
||||
logSet.tLogPolicy = req.tLogPolicy;
|
||||
|
|
|
@ -47,7 +47,7 @@ struct ConnectionResetInfo : public ReferenceCounted<ConnectionResetInfo> {
|
|||
int slowReplies;
|
||||
int fastReplies;
|
||||
|
||||
ConnectionResetInfo() : lastReset(now()), slowReplies(0), fastReplies(0), resetCheck(Void()) {}
|
||||
ConnectionResetInfo() : lastReset(now()), resetCheck(Void()), slowReplies(0), fastReplies(0) {}
|
||||
};
|
||||
|
||||
// The set of tLog servers, logRouters and backupWorkers for a log tag
|
||||
|
|
|
@ -60,9 +60,9 @@ public:
|
|||
Reference<AsyncVar<PeekTxsInfo>> peekLocality,
|
||||
Version txsPoppedVersion,
|
||||
bool recover)
|
||||
: logSystem(logSystem), peekLocality(peekLocality), enableRecovery(recover), recoveryLoc(txsPoppedVersion),
|
||||
recoveryQueueLoc(txsPoppedVersion), poppedUpTo(0), nextCommit(1), recoveryQueueDataSize(0), peekTypeSwitches(0),
|
||||
hasDiscardedData(false), totalRecoveredBytes(0) {
|
||||
: peekLocality(peekLocality), peekTypeSwitches(0), enableRecovery(recover), logSystem(logSystem),
|
||||
recoveryLoc(txsPoppedVersion), recoveryQueueLoc(txsPoppedVersion), recoveryQueueDataSize(0), poppedUpTo(0),
|
||||
nextCommit(1), hasDiscardedData(false), totalRecoveredBytes(0) {
|
||||
if (enableRecovery) {
|
||||
localityChanged = peekLocality ? peekLocality->onChange() : Never();
|
||||
cursor = logSystem->peekTxs(UID(),
|
||||
|
|
|
@ -31,11 +31,10 @@ ILogSystem::ServerPeekCursor::ServerPeekCursor(Reference<AsyncVar<OptionalInterf
|
|||
Version end,
|
||||
bool returnIfBlocked,
|
||||
bool parallelGetMore)
|
||||
: interf(interf), tag(tag), messageVersion(begin), end(end), hasMsg(false),
|
||||
rd(results.arena, results.messages, Unversioned()), randomID(deterministicRandom()->randomUniqueID()),
|
||||
poppedVersion(0), returnIfBlocked(returnIfBlocked), sequence(0), onlySpilled(false),
|
||||
parallelGetMore(parallelGetMore), lastReset(0), slowReplies(0), fastReplies(0), unknownReplies(0),
|
||||
resetCheck(Void()) {
|
||||
: interf(interf), tag(tag), rd(results.arena, results.messages, Unversioned()), messageVersion(begin), end(end),
|
||||
poppedVersion(0), hasMsg(false), randomID(deterministicRandom()->randomUniqueID()),
|
||||
returnIfBlocked(returnIfBlocked), onlySpilled(false), parallelGetMore(parallelGetMore), sequence(0), lastReset(0),
|
||||
resetCheck(Void()), slowReplies(0), fastReplies(0), unknownReplies(0) {
|
||||
this->results.maxKnownVersion = 0;
|
||||
this->results.minKnownCommittedVersion = 0;
|
||||
//TraceEvent("SPC_Starting", randomID).detail("Tag", tag.toString()).detail("Begin", begin).detail("End", end).backtrace();
|
||||
|
@ -48,10 +47,11 @@ ILogSystem::ServerPeekCursor::ServerPeekCursor(TLogPeekReply const& results,
|
|||
bool hasMsg,
|
||||
Version poppedVersion,
|
||||
Tag tag)
|
||||
: results(results), tag(tag), rd(results.arena, results.messages, Unversioned()), messageVersion(messageVersion),
|
||||
end(end), messageAndTags(message), hasMsg(hasMsg), randomID(deterministicRandom()->randomUniqueID()),
|
||||
poppedVersion(poppedVersion), returnIfBlocked(false), sequence(0), onlySpilled(false), parallelGetMore(false),
|
||||
lastReset(0), slowReplies(0), fastReplies(0), unknownReplies(0), resetCheck(Void()) {
|
||||
: tag(tag), results(results), rd(results.arena, results.messages, Unversioned()), messageVersion(messageVersion),
|
||||
end(end), poppedVersion(poppedVersion), messageAndTags(message), hasMsg(hasMsg),
|
||||
randomID(deterministicRandom()->randomUniqueID()), returnIfBlocked(false), onlySpilled(false),
|
||||
parallelGetMore(false), sequence(0), lastReset(0), resetCheck(Void()), slowReplies(0), fastReplies(0),
|
||||
unknownReplies(0) {
|
||||
//TraceEvent("SPC_Clone", randomID);
|
||||
this->results.maxKnownVersion = 0;
|
||||
this->results.minKnownCommittedVersion = 0;
|
||||
|
@ -409,8 +409,8 @@ Version ILogSystem::ServerPeekCursor::popped() const {
|
|||
|
||||
ILogSystem::MergedPeekCursor::MergedPeekCursor(vector<Reference<ILogSystem::IPeekCursor>> const& serverCursors,
|
||||
Version begin)
|
||||
: serverCursors(serverCursors), bestServer(-1), readQuorum(serverCursors.size()), tag(invalidTag), currentCursor(0),
|
||||
hasNextMessage(false), messageVersion(begin), randomID(deterministicRandom()->randomUniqueID()),
|
||||
: serverCursors(serverCursors), tag(invalidTag), bestServer(-1), currentCursor(0), readQuorum(serverCursors.size()),
|
||||
messageVersion(begin), hasNextMessage(false), randomID(deterministicRandom()->randomUniqueID()),
|
||||
tLogReplicationFactor(0) {
|
||||
sortedVersions.resize(serverCursors.size());
|
||||
}
|
||||
|
@ -426,8 +426,8 @@ ILogSystem::MergedPeekCursor::MergedPeekCursor(
|
|||
std::vector<LocalityData> const& tLogLocalities,
|
||||
Reference<IReplicationPolicy> const tLogPolicy,
|
||||
int tLogReplicationFactor)
|
||||
: bestServer(bestServer), readQuorum(readQuorum), tag(tag), currentCursor(0), hasNextMessage(false),
|
||||
messageVersion(begin), randomID(deterministicRandom()->randomUniqueID()),
|
||||
: tag(tag), bestServer(bestServer), currentCursor(0), readQuorum(readQuorum), messageVersion(begin),
|
||||
hasNextMessage(false), randomID(deterministicRandom()->randomUniqueID()),
|
||||
tLogReplicationFactor(tLogReplicationFactor) {
|
||||
if (tLogPolicy) {
|
||||
logSet = makeReference<LogSet>();
|
||||
|
@ -453,8 +453,8 @@ ILogSystem::MergedPeekCursor::MergedPeekCursor(vector<Reference<ILogSystem::IPee
|
|||
Optional<LogMessageVersion> nextVersion,
|
||||
Reference<LogSet> logSet,
|
||||
int tLogReplicationFactor)
|
||||
: serverCursors(serverCursors), bestServer(bestServer), readQuorum(readQuorum), currentCursor(0),
|
||||
hasNextMessage(false), messageVersion(messageVersion), nextVersion(nextVersion), logSet(logSet),
|
||||
: logSet(logSet), serverCursors(serverCursors), bestServer(bestServer), currentCursor(0), readQuorum(readQuorum),
|
||||
nextVersion(nextVersion), messageVersion(messageVersion), hasNextMessage(false),
|
||||
randomID(deterministicRandom()->randomUniqueID()), tLogReplicationFactor(tLogReplicationFactor) {
|
||||
sortedVersions.resize(serverCursors.size());
|
||||
calcHasMessage();
|
||||
|
@ -698,8 +698,8 @@ ILogSystem::SetPeekCursor::SetPeekCursor(std::vector<Reference<LogSet>> const& l
|
|||
Version begin,
|
||||
Version end,
|
||||
bool parallelGetMore)
|
||||
: logSets(logSets), bestSet(bestSet), bestServer(bestServer), tag(tag), currentCursor(0), currentSet(bestSet),
|
||||
hasNextMessage(false), messageVersion(begin), useBestSet(true), randomID(deterministicRandom()->randomUniqueID()) {
|
||||
: logSets(logSets), tag(tag), bestSet(bestSet), bestServer(bestServer), currentSet(bestSet), currentCursor(0),
|
||||
messageVersion(begin), hasNextMessage(false), useBestSet(true), randomID(deterministicRandom()->randomUniqueID()) {
|
||||
serverCursors.resize(logSets.size());
|
||||
int maxServers = 0;
|
||||
for (int i = 0; i < logSets.size(); i++) {
|
||||
|
@ -720,8 +720,8 @@ ILogSystem::SetPeekCursor::SetPeekCursor(std::vector<Reference<LogSet>> const& l
|
|||
int bestServer,
|
||||
Optional<LogMessageVersion> nextVersion,
|
||||
bool useBestSet)
|
||||
: logSets(logSets), serverCursors(serverCursors), messageVersion(messageVersion), bestSet(bestSet),
|
||||
bestServer(bestServer), nextVersion(nextVersion), currentSet(bestSet), currentCursor(0), hasNextMessage(false),
|
||||
: logSets(logSets), serverCursors(serverCursors), bestSet(bestSet), bestServer(bestServer), currentSet(bestSet),
|
||||
currentCursor(0), nextVersion(nextVersion), messageVersion(messageVersion), hasNextMessage(false),
|
||||
useBestSet(useBestSet), randomID(deterministicRandom()->randomUniqueID()) {
|
||||
int maxServers = 0;
|
||||
for (int i = 0; i < logSets.size(); i++) {
|
||||
|
@ -1155,10 +1155,9 @@ ILogSystem::BufferedCursor::BufferedCursor(std::vector<Reference<IPeekCursor>> c
|
|||
bool withTags,
|
||||
bool collectTags,
|
||||
bool canDiscardPopped)
|
||||
: cursors(cursors), messageVersion(begin), end(end), withTags(withTags), collectTags(collectTags),
|
||||
hasNextMessage(false), messageIndex(0), poppedVersion(0), initialPoppedVersion(0),
|
||||
canDiscardPopped(canDiscardPopped), knownUnique(false), minKnownCommittedVersion(0),
|
||||
randomID(deterministicRandom()->randomUniqueID()) {
|
||||
: cursors(cursors), messageIndex(0), messageVersion(begin), end(end), hasNextMessage(false), withTags(withTags),
|
||||
knownUnique(false), minKnownCommittedVersion(0), poppedVersion(0), initialPoppedVersion(0),
|
||||
canDiscardPopped(canDiscardPopped), randomID(deterministicRandom()->randomUniqueID()), collectTags(collectTags) {
|
||||
targetQueueSize = SERVER_KNOBS->DESIRED_OUTSTANDING_MESSAGES / cursors.size();
|
||||
messages.reserve(SERVER_KNOBS->DESIRED_OUTSTANDING_MESSAGES);
|
||||
cursorMessages.resize(cursors.size());
|
||||
|
@ -1170,9 +1169,9 @@ ILogSystem::BufferedCursor::BufferedCursor(
|
|||
Version begin,
|
||||
Version end,
|
||||
bool parallelGetMore)
|
||||
: messageVersion(begin), end(end), withTags(true), collectTags(false), hasNextMessage(false), messageIndex(0),
|
||||
poppedVersion(0), initialPoppedVersion(0), canDiscardPopped(false), knownUnique(true), minKnownCommittedVersion(0),
|
||||
randomID(deterministicRandom()->randomUniqueID()) {
|
||||
: messageIndex(0), messageVersion(begin), end(end), hasNextMessage(false), withTags(true), knownUnique(true),
|
||||
minKnownCommittedVersion(0), poppedVersion(0), initialPoppedVersion(0), canDiscardPopped(false),
|
||||
randomID(deterministicRandom()->randomUniqueID()), collectTags(false) {
|
||||
targetQueueSize = SERVER_KNOBS->DESIRED_OUTSTANDING_MESSAGES / logServers.size();
|
||||
messages.reserve(SERVER_KNOBS->DESIRED_OUTSTANDING_MESSAGES);
|
||||
cursorMessages.resize(logServers.size());
|
||||
|
|
|
@ -154,7 +154,7 @@ struct GetCommitVersionReply {
|
|||
|
||||
GetCommitVersionReply() : resolverChangesVersion(0), version(0), prevVersion(0), requestNum(0) {}
|
||||
explicit GetCommitVersionReply(Version version, Version prevVersion, uint64_t requestNum)
|
||||
: version(version), prevVersion(prevVersion), resolverChangesVersion(0), requestNum(requestNum) {}
|
||||
: resolverChangesVersion(0), version(version), prevVersion(prevVersion), requestNum(requestNum) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
|
||||
struct MetricsRule {
|
||||
MetricsRule(bool enabled = false, int minLevel = 0, StringRef const& name = StringRef())
|
||||
: enabled(enabled), minLevel(minLevel), namePattern(name) {}
|
||||
: namePattern(name), enabled(enabled), minLevel(minLevel) {}
|
||||
|
||||
Standalone<StringRef> typePattern;
|
||||
Standalone<StringRef> namePattern;
|
||||
|
|
|
@ -90,7 +90,7 @@ struct TLogQueueEntryRef {
|
|||
|
||||
TLogQueueEntryRef() : version(0), knownCommittedVersion(0) {}
|
||||
TLogQueueEntryRef(Arena& a, TLogQueueEntryRef const& from)
|
||||
: version(from.version), knownCommittedVersion(from.knownCommittedVersion), id(from.id),
|
||||
: id(from.id), version(from.version), knownCommittedVersion(from.knownCommittedVersion),
|
||||
messages(a, from.messages), tags(a, from.tags) {}
|
||||
|
||||
template <class Ar>
|
||||
|
@ -322,10 +322,10 @@ struct TLogData : NonCopyable {
|
|||
IKeyValueStore* persistentData,
|
||||
IDiskQueue* persistentQueue,
|
||||
Reference<AsyncVar<ServerDBInfo> const> const& dbInfo)
|
||||
: dbgid(dbgid), workerID(workerID), instanceID(deterministicRandom()->randomUniqueID().first()),
|
||||
persistentData(persistentData), rawPersistentQueue(persistentQueue),
|
||||
persistentQueue(new TLogQueue(persistentQueue, dbgid)), dbInfo(dbInfo), queueCommitBegin(0), queueCommitEnd(0),
|
||||
prevVersion(0), diskQueueCommitBytes(0), largeDiskQueueCommitBytes(false), bytesInput(0), bytesDurable(0),
|
||||
: dbgid(dbgid), workerID(workerID), persistentData(persistentData), rawPersistentQueue(persistentQueue),
|
||||
persistentQueue(new TLogQueue(persistentQueue, dbgid)), diskQueueCommitBytes(0),
|
||||
largeDiskQueueCommitBytes(false), dbInfo(dbInfo), queueCommitEnd(0), queueCommitBegin(0),
|
||||
instanceID(deterministicRandom()->randomUniqueID().first()), bytesInput(0), bytesDurable(0), prevVersion(0),
|
||||
updatePersist(Void()), terminated(false) {}
|
||||
};
|
||||
|
||||
|
@ -339,7 +339,7 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
|
|||
bool update_version_sizes;
|
||||
|
||||
TagData(Version popped, bool nothing_persistent, bool popped_recently, OldTag tag)
|
||||
: nothing_persistent(nothing_persistent), popped(popped), popped_recently(popped_recently),
|
||||
: nothing_persistent(nothing_persistent), popped_recently(popped_recently), popped(popped),
|
||||
update_version_sizes(tag != txsTagOld) {}
|
||||
|
||||
TagData(TagData&& r) noexcept
|
||||
|
@ -440,11 +440,10 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
|
|||
Future<Void> recovery;
|
||||
|
||||
explicit LogData(TLogData* tLogData, TLogInterface interf)
|
||||
: tLogData(tLogData), knownCommittedVersion(0), tli(interf), logId(interf.id()),
|
||||
: stopped(false), initialized(false), recoveryCount(), queueCommittingVersion(0), knownCommittedVersion(0),
|
||||
cc("TLog", interf.id().toString()), bytesInput("BytesInput", cc), bytesDurable("BytesDurable", cc),
|
||||
// These are initialized differently on init() or recovery
|
||||
recoveryCount(), stopped(false), initialized(false), queueCommittingVersion(0),
|
||||
newPersistentDataVersion(invalidVersion), recovery(Void()) {
|
||||
logId(interf.id()), newPersistentDataVersion(invalidVersion), tli(interf), tLogData(tLogData),
|
||||
recovery(Void()) {
|
||||
startRole(Role::TRANSACTION_LOG,
|
||||
interf.id(),
|
||||
tLogData->workerID,
|
||||
|
|
|
@ -57,7 +57,7 @@ struct TLogQueueEntryRef {
|
|||
|
||||
TLogQueueEntryRef() : version(0), knownCommittedVersion(0) {}
|
||||
TLogQueueEntryRef(Arena& a, TLogQueueEntryRef const& from)
|
||||
: version(from.version), knownCommittedVersion(from.knownCommittedVersion), id(from.id),
|
||||
: id(from.id), version(from.version), knownCommittedVersion(from.knownCommittedVersion),
|
||||
messages(a, from.messages) {}
|
||||
|
||||
template <class Ar>
|
||||
|
@ -304,13 +304,13 @@ struct TLogData : NonCopyable {
|
|||
Reference<AsyncVar<ServerDBInfo> const> dbInfo,
|
||||
Reference<AsyncVar<bool>> degraded,
|
||||
std::string folder)
|
||||
: dbgid(dbgid), workerID(workerID), instanceID(deterministicRandom()->randomUniqueID().first()),
|
||||
persistentData(persistentData), rawPersistentQueue(persistentQueue),
|
||||
persistentQueue(new TLogQueue(persistentQueue, dbgid)), dbInfo(dbInfo), degraded(degraded), queueCommitBegin(0),
|
||||
queueCommitEnd(0), diskQueueCommitBytes(0), largeDiskQueueCommitBytes(false), bytesInput(0), bytesDurable(0),
|
||||
: dbgid(dbgid), workerID(workerID), persistentData(persistentData), rawPersistentQueue(persistentQueue),
|
||||
persistentQueue(new TLogQueue(persistentQueue, dbgid)), diskQueueCommitBytes(0),
|
||||
largeDiskQueueCommitBytes(false), dbInfo(dbInfo), queueCommitEnd(0), queueCommitBegin(0),
|
||||
instanceID(deterministicRandom()->randomUniqueID().first()), bytesInput(0), bytesDurable(0),
|
||||
targetVolatileBytes(SERVER_KNOBS->TLOG_SPILL_THRESHOLD), overheadBytesInput(0), overheadBytesDurable(0),
|
||||
concurrentLogRouterReads(SERVER_KNOBS->CONCURRENT_LOG_ROUTER_READS), ignorePopRequest(false),
|
||||
ignorePopDeadline(), ignorePopUid(), dataFolder(folder), toBePopped() {
|
||||
dataFolder(folder), degraded(degraded) {
|
||||
cx = openDBOnServer(dbInfo, TaskPriority::DefaultEndpoint, LockAware::True);
|
||||
}
|
||||
};
|
||||
|
@ -326,12 +326,12 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
|
|||
Tag tag;
|
||||
|
||||
TagData(Tag tag, Version popped, bool nothingPersistent, bool poppedRecently, bool unpoppedRecovered)
|
||||
: tag(tag), nothingPersistent(nothingPersistent), popped(popped), poppedRecently(poppedRecently),
|
||||
unpoppedRecovered(unpoppedRecovered) {}
|
||||
: nothingPersistent(nothingPersistent), poppedRecently(poppedRecently), popped(popped),
|
||||
unpoppedRecovered(unpoppedRecovered), tag(tag) {}
|
||||
|
||||
TagData(TagData&& r) noexcept
|
||||
: versionMessages(std::move(r.versionMessages)), nothingPersistent(r.nothingPersistent),
|
||||
poppedRecently(r.poppedRecently), popped(r.popped), tag(r.tag), unpoppedRecovered(r.unpoppedRecovered) {}
|
||||
poppedRecently(r.poppedRecently), popped(r.popped), unpoppedRecovered(r.unpoppedRecovered), tag(r.tag) {}
|
||||
void operator=(TagData&& r) noexcept {
|
||||
versionMessages = std::move(r.versionMessages);
|
||||
nothingPersistent = r.nothingPersistent;
|
||||
|
@ -524,15 +524,14 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
|
|||
UID recruitmentID,
|
||||
std::vector<Tag> tags,
|
||||
std::string context)
|
||||
: tLogData(tLogData), knownCommittedVersion(0), logId(interf.id()), cc("TLog", interf.id().toString()),
|
||||
bytesInput("BytesInput", cc), bytesDurable("BytesDurable", cc), remoteTag(remoteTag), isPrimary(isPrimary),
|
||||
logRouterTags(logRouterTags), txsTags(txsTags), recruitmentID(recruitmentID),
|
||||
logSystem(new AsyncVar<Reference<ILogSystem>>()), logRouterPoppedVersion(0), durableKnownCommittedVersion(0),
|
||||
minKnownCommittedVersion(0), allTags(tags.begin(), tags.end()), terminated(tLogData->terminated.getFuture()),
|
||||
// These are initialized differently on init() or recovery
|
||||
recoveryCount(), stopped(false), initialized(false), queueCommittingVersion(0),
|
||||
newPersistentDataVersion(invalidVersion), unrecoveredBefore(1), recoveredAt(1), unpoppedRecoveredTags(0),
|
||||
logRouterPopToVersion(0), locality(tagLocalityInvalid), execOpCommitInProgress(false) {
|
||||
: stopped(false), initialized(false), queueCommittingVersion(0), knownCommittedVersion(0),
|
||||
durableKnownCommittedVersion(0), minKnownCommittedVersion(0), unpoppedRecoveredTags(0),
|
||||
cc("TLog", interf.id().toString()), bytesInput("BytesInput", cc), bytesDurable("BytesDurable", cc),
|
||||
logId(interf.id()), newPersistentDataVersion(invalidVersion), tLogData(tLogData), unrecoveredBefore(1),
|
||||
recoveredAt(1), logSystem(new AsyncVar<Reference<ILogSystem>>()), remoteTag(remoteTag), isPrimary(isPrimary),
|
||||
logRouterTags(logRouterTags), logRouterPoppedVersion(0), logRouterPopToVersion(0), locality(tagLocalityInvalid),
|
||||
recruitmentID(recruitmentID), allTags(tags.begin(), tags.end()), terminated(tLogData->terminated.getFuture()),
|
||||
execOpCommitInProgress(false), txsTags(txsTags) {
|
||||
startRole(Role::TRANSACTION_LOG,
|
||||
interf.id(),
|
||||
tLogData->workerID,
|
||||
|
|
|
@ -58,7 +58,7 @@ struct TLogQueueEntryRef {
|
|||
|
||||
TLogQueueEntryRef() : version(0), knownCommittedVersion(0) {}
|
||||
TLogQueueEntryRef(Arena& a, TLogQueueEntryRef const& from)
|
||||
: version(from.version), knownCommittedVersion(from.knownCommittedVersion), id(from.id),
|
||||
: id(from.id), version(from.version), knownCommittedVersion(from.knownCommittedVersion),
|
||||
messages(a, from.messages) {}
|
||||
|
||||
template <class Ar>
|
||||
|
@ -367,14 +367,14 @@ struct TLogData : NonCopyable {
|
|||
Reference<AsyncVar<ServerDBInfo> const> dbInfo,
|
||||
Reference<AsyncVar<bool>> degraded,
|
||||
std::string folder)
|
||||
: dbgid(dbgid), workerID(workerID), instanceID(deterministicRandom()->randomUniqueID().first()),
|
||||
persistentData(persistentData), rawPersistentQueue(persistentQueue),
|
||||
persistentQueue(new TLogQueue(persistentQueue, dbgid)), dbInfo(dbInfo), degraded(degraded), queueCommitBegin(0),
|
||||
queueCommitEnd(0), diskQueueCommitBytes(0), largeDiskQueueCommitBytes(false), bytesInput(0), bytesDurable(0),
|
||||
: dbgid(dbgid), workerID(workerID), persistentData(persistentData), rawPersistentQueue(persistentQueue),
|
||||
persistentQueue(new TLogQueue(persistentQueue, dbgid)), diskQueueCommitBytes(0),
|
||||
largeDiskQueueCommitBytes(false), dbInfo(dbInfo), queueCommitEnd(0), queueCommitBegin(0),
|
||||
instanceID(deterministicRandom()->randomUniqueID().first()), bytesInput(0), bytesDurable(0),
|
||||
targetVolatileBytes(SERVER_KNOBS->TLOG_SPILL_THRESHOLD), overheadBytesInput(0), overheadBytesDurable(0),
|
||||
peekMemoryLimiter(SERVER_KNOBS->TLOG_SPILL_REFERENCE_MAX_PEEK_MEMORY_BYTES),
|
||||
concurrentLogRouterReads(SERVER_KNOBS->CONCURRENT_LOG_ROUTER_READS), ignorePopRequest(false),
|
||||
ignorePopDeadline(), ignorePopUid(), dataFolder(folder), toBePopped() {
|
||||
dataFolder(folder), degraded(degraded) {
|
||||
cx = openDBOnServer(dbInfo, TaskPriority::DefaultEndpoint, LockAware::True);
|
||||
}
|
||||
};
|
||||
|
@ -398,15 +398,15 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
|
|||
bool nothingPersistent,
|
||||
bool poppedRecently,
|
||||
bool unpoppedRecovered)
|
||||
: tag(tag), nothingPersistent(nothingPersistent), poppedRecently(poppedRecently), popped(popped),
|
||||
persistentPopped(0), versionForPoppedLocation(0), poppedLocation(poppedLocation),
|
||||
unpoppedRecovered(unpoppedRecovered) {}
|
||||
: nothingPersistent(nothingPersistent), poppedRecently(poppedRecently), popped(popped), persistentPopped(0),
|
||||
versionForPoppedLocation(0), poppedLocation(poppedLocation), unpoppedRecovered(unpoppedRecovered),
|
||||
tag(tag) {}
|
||||
|
||||
TagData(TagData&& r) noexcept
|
||||
: versionMessages(std::move(r.versionMessages)), nothingPersistent(r.nothingPersistent),
|
||||
poppedRecently(r.poppedRecently), popped(r.popped), persistentPopped(r.persistentPopped),
|
||||
versionForPoppedLocation(r.versionForPoppedLocation), poppedLocation(r.poppedLocation), tag(r.tag),
|
||||
unpoppedRecovered(r.unpoppedRecovered) {}
|
||||
versionForPoppedLocation(r.versionForPoppedLocation), poppedLocation(r.poppedLocation),
|
||||
unpoppedRecovered(r.unpoppedRecovered), tag(r.tag) {}
|
||||
void operator=(TagData&& r) noexcept {
|
||||
versionMessages = std::move(r.versionMessages);
|
||||
nothingPersistent = r.nothingPersistent;
|
||||
|
@ -607,16 +607,15 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
|
|||
ProtocolVersion protocolVersion,
|
||||
std::vector<Tag> tags,
|
||||
std::string context)
|
||||
: tLogData(tLogData), knownCommittedVersion(0), logId(interf.id()), cc("TLog", interf.id().toString()),
|
||||
bytesInput("BytesInput", cc), bytesDurable("BytesDurable", cc), remoteTag(remoteTag), isPrimary(isPrimary),
|
||||
logRouterTags(logRouterTags), txsTags(txsTags), recruitmentID(recruitmentID), protocolVersion(protocolVersion),
|
||||
logSystem(new AsyncVar<Reference<ILogSystem>>()), logRouterPoppedVersion(0), durableKnownCommittedVersion(0),
|
||||
minKnownCommittedVersion(0), queuePoppedVersion(0), allTags(tags.begin(), tags.end()),
|
||||
terminated(tLogData->terminated.getFuture()), minPoppedTagVersion(0), minPoppedTag(invalidTag),
|
||||
// These are initialized differently on init() or recovery
|
||||
recoveryCount(), stopped(false), initialized(false), queueCommittingVersion(0),
|
||||
newPersistentDataVersion(invalidVersion), unrecoveredBefore(1), recoveredAt(1), unpoppedRecoveredTags(0),
|
||||
logRouterPopToVersion(0), locality(tagLocalityInvalid), execOpCommitInProgress(false) {
|
||||
: stopped(false), initialized(false), recoveryCount(), queueCommittingVersion(0), knownCommittedVersion(0),
|
||||
durableKnownCommittedVersion(0), minKnownCommittedVersion(0), queuePoppedVersion(0), minPoppedTagVersion(0),
|
||||
minPoppedTag(invalidTag), unpoppedRecoveredTags(0), cc("TLog", interf.id().toString()),
|
||||
bytesInput("BytesInput", cc), bytesDurable("BytesDurable", cc), logId(interf.id()),
|
||||
protocolVersion(protocolVersion), newPersistentDataVersion(invalidVersion), tLogData(tLogData),
|
||||
unrecoveredBefore(1), recoveredAt(1), logSystem(new AsyncVar<Reference<ILogSystem>>()), remoteTag(remoteTag),
|
||||
isPrimary(isPrimary), logRouterTags(logRouterTags), logRouterPoppedVersion(0), logRouterPopToVersion(0),
|
||||
locality(tagLocalityInvalid), recruitmentID(recruitmentID), allTags(tags.begin(), tags.end()),
|
||||
terminated(tLogData->terminated.getFuture()), execOpCommitInProgress(false), txsTags(txsTags) {
|
||||
startRole(Role::TRANSACTION_LOG,
|
||||
interf.id(),
|
||||
tLogData->workerID,
|
||||
|
|
|
@ -34,7 +34,7 @@ void OnDemandStore::open() {
|
|||
}
|
||||
|
||||
OnDemandStore::OnDemandStore(std::string const& folder, UID myID, std::string const& prefix)
|
||||
: folder(folder), prefix(prefix), store(nullptr), myID(myID) {}
|
||||
: folder(folder), myID(myID), store(nullptr), prefix(prefix) {}
|
||||
|
||||
OnDemandStore::~OnDemandStore() {
|
||||
if (store) {
|
||||
|
|
|
@ -20,25 +20,131 @@
|
|||
|
||||
#include "fdbserver/PaxosConfigConsumer.h"
|
||||
|
||||
class PaxosConfigConsumerImpl {};
|
||||
class PaxosConfigConsumerImpl {
|
||||
std::vector<ConfigFollowerInterface> cfis;
|
||||
Version lastSeenVersion{ 0 };
|
||||
double pollingInterval;
|
||||
Optional<double> compactionInterval;
|
||||
UID id;
|
||||
|
||||
PaxosConfigConsumer::PaxosConfigConsumer(ServerCoordinators const& cfi,
|
||||
Optional<double> pollingInterval,
|
||||
Optional<double> compactionInterval) {
|
||||
// TODO: Implement
|
||||
ASSERT(false);
|
||||
}
|
||||
ACTOR static Future<Version> getCommittedVersion(PaxosConfigConsumerImpl* self) {
|
||||
state std::vector<Future<ConfigFollowerGetCommittedVersionReply>> committedVersionFutures;
|
||||
committedVersionFutures.reserve(self->cfis.size());
|
||||
for (const auto& cfi : self->cfis) {
|
||||
committedVersionFutures.push_back(
|
||||
cfi.getCommittedVersion.getReply(ConfigFollowerGetCommittedVersionRequest{}));
|
||||
}
|
||||
// FIXME: Must tolerate failure and disagreement
|
||||
wait(waitForAll(committedVersionFutures));
|
||||
return committedVersionFutures[0].get().version;
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> compactor(PaxosConfigConsumerImpl* self, ConfigBroadcaster* broadcaster) {
|
||||
if (!self->compactionInterval.present()) {
|
||||
wait(Never());
|
||||
return Void();
|
||||
}
|
||||
loop {
|
||||
state Version compactionVersion = self->lastSeenVersion;
|
||||
wait(delayJittered(self->compactionInterval.get()));
|
||||
std::vector<Future<Void>> compactionRequests;
|
||||
compactionRequests.reserve(compactionRequests.size());
|
||||
for (const auto& cfi : self->cfis) {
|
||||
compactionRequests.push_back(cfi.compact.getReply(ConfigFollowerCompactRequest{ compactionVersion }));
|
||||
}
|
||||
try {
|
||||
wait(timeoutError(waitForAll(compactionRequests), 1.0));
|
||||
} catch (Error& e) {
|
||||
TraceEvent(SevWarn, "ErrorSendingCompactionRequest").error(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> getSnapshotAndChanges(PaxosConfigConsumerImpl* self, ConfigBroadcaster* broadcaster) {
|
||||
state Version committedVersion = wait(getCommittedVersion(self));
|
||||
// TODO: Load balance
|
||||
ConfigFollowerGetSnapshotAndChangesReply reply = wait(self->cfis[0].getSnapshotAndChanges.getReply(
|
||||
ConfigFollowerGetSnapshotAndChangesRequest{ committedVersion }));
|
||||
TraceEvent(SevDebug, "ConfigConsumerGotSnapshotAndChanges", self->id)
|
||||
.detail("SnapshotVersion", reply.snapshotVersion)
|
||||
.detail("SnapshotSize", reply.snapshot.size())
|
||||
.detail("ChangesVersion", committedVersion)
|
||||
.detail("ChangesSize", reply.changes.size())
|
||||
.detail("AnnotationsSize", reply.annotations.size());
|
||||
ASSERT_GE(committedVersion, self->lastSeenVersion);
|
||||
self->lastSeenVersion = committedVersion;
|
||||
broadcaster->applySnapshotAndChanges(
|
||||
std::move(reply.snapshot), reply.snapshotVersion, reply.changes, committedVersion, reply.annotations);
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> fetchChanges(PaxosConfigConsumerImpl* self, ConfigBroadcaster* broadcaster) {
|
||||
wait(getSnapshotAndChanges(self, broadcaster));
|
||||
loop {
|
||||
try {
|
||||
state Version committedVersion = wait(getCommittedVersion(self));
|
||||
ASSERT_GE(committedVersion, self->lastSeenVersion);
|
||||
if (committedVersion > self->lastSeenVersion) {
|
||||
// TODO: Load balance
|
||||
ConfigFollowerGetChangesReply reply = wait(self->cfis[0].getChanges.getReply(
|
||||
ConfigFollowerGetChangesRequest{ self->lastSeenVersion, committedVersion }));
|
||||
for (const auto& versionedMutation : reply.changes) {
|
||||
TraceEvent te(SevDebug, "ConsumerFetchedMutation", self->id);
|
||||
te.detail("Version", versionedMutation.version)
|
||||
.detail("ConfigClass", versionedMutation.mutation.getConfigClass())
|
||||
.detail("KnobName", versionedMutation.mutation.getKnobName());
|
||||
if (versionedMutation.mutation.isSet()) {
|
||||
te.detail("Op", "Set")
|
||||
.detail("KnobValue", versionedMutation.mutation.getValue().toString());
|
||||
} else {
|
||||
te.detail("Op", "Clear");
|
||||
}
|
||||
}
|
||||
self->lastSeenVersion = committedVersion;
|
||||
broadcaster->applyChanges(reply.changes, committedVersion, reply.annotations);
|
||||
}
|
||||
wait(delayJittered(self->pollingInterval));
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_version_already_compacted) {
|
||||
TEST(true); // SimpleConfigConsumer get version_already_compacted error
|
||||
wait(getSnapshotAndChanges(self, broadcaster));
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
Future<Void> consume(ConfigBroadcaster& broadcaster) {
|
||||
return fetchChanges(this, &broadcaster) || compactor(this, &broadcaster);
|
||||
}
|
||||
|
||||
UID getID() const { return id; }
|
||||
|
||||
PaxosConfigConsumerImpl(std::vector<ConfigFollowerInterface> const& cfis,
|
||||
double pollingInterval,
|
||||
Optional<double> compactionInterval)
|
||||
: cfis(cfis), pollingInterval(pollingInterval), compactionInterval(compactionInterval),
|
||||
id(deterministicRandom()->randomUniqueID()) {}
|
||||
};
|
||||
|
||||
PaxosConfigConsumer::PaxosConfigConsumer(std::vector<ConfigFollowerInterface> const& cfis,
|
||||
double pollingInterval,
|
||||
Optional<double> compactionInterval)
|
||||
: _impl(std::make_unique<PaxosConfigConsumerImpl>(cfis, pollingInterval, compactionInterval)) {}
|
||||
|
||||
PaxosConfigConsumer::PaxosConfigConsumer(ServerCoordinators const& coordinators,
|
||||
double pollingInterval,
|
||||
Optional<double> compactionInterval)
|
||||
: _impl(std::make_unique<PaxosConfigConsumerImpl>(coordinators.configServers, pollingInterval, compactionInterval)) {}
|
||||
|
||||
PaxosConfigConsumer::~PaxosConfigConsumer() = default;
|
||||
|
||||
Future<Void> PaxosConfigConsumer::consume(ConfigBroadcaster& broadcaster) {
|
||||
// TODO: Implement
|
||||
ASSERT(false);
|
||||
return Void();
|
||||
return impl().consume(broadcaster);
|
||||
}
|
||||
|
||||
UID PaxosConfigConsumer::getID() const {
|
||||
// TODO: Implement
|
||||
ASSERT(false);
|
||||
return {};
|
||||
return impl().getID();
|
||||
}
|
||||
|
|
|
@ -31,10 +31,15 @@ class PaxosConfigConsumer : public IConfigConsumer {
|
|||
PaxosConfigConsumerImpl& impl() { return *_impl; }
|
||||
|
||||
public:
|
||||
PaxosConfigConsumer(ServerCoordinators const& cfi,
|
||||
Optional<double> pollingInterval,
|
||||
PaxosConfigConsumer(ServerCoordinators const& coordinators,
|
||||
double pollingInterval,
|
||||
Optional<double> compactionInterval);
|
||||
~PaxosConfigConsumer();
|
||||
Future<Void> consume(ConfigBroadcaster& broadcaster) override;
|
||||
UID getID() const override;
|
||||
|
||||
public: // Testing
|
||||
PaxosConfigConsumer(std::vector<ConfigFollowerInterface> const& cfis,
|
||||
double pollingInterval,
|
||||
Optional<double> compactionInterval);
|
||||
};
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue