master conflicts

This commit is contained in:
Richard Chen 2020-10-19 01:03:54 +00:00
commit 545ee4269d
204 changed files with 4492 additions and 1638 deletions

View File

@ -164,6 +164,7 @@ endif()
add_subdirectory(fdbbackup)
add_subdirectory(contrib)
add_subdirectory(tests)
add_subdirectory(flowbench EXCLUDE_FROM_ALL)
if(WITH_PYTHON)
add_subdirectory(bindings)
endif()
@ -177,11 +178,6 @@ else()
include(CPack)
endif()
set(BUILD_FLOWBENCH OFF CACHE BOOL "Build microbenchmark program (builds google microbenchmark dependency)")
if(BUILD_FLOWBENCH)
add_subdirectory(flowbench)
endif()
if(CMAKE_SYSTEM_NAME STREQUAL "FreeBSD")
add_link_options(-lexecinfo)
endif()

View File

@ -71,17 +71,27 @@ if(NOT WIN32)
test/mako/mako.h
test/mako/utils.c
test/mako/utils.h)
add_subdirectory(test/unit/third_party)
find_package(Threads REQUIRED)
set(UNIT_TEST_SRCS
test/unit/unit_tests.cpp
test/unit/fdb_api.cpp
test/unit/fdb_api.hpp)
if(OPEN_FOR_IDE)
add_library(fdb_c_performance_test OBJECT test/performance_test.c test/test.h)
add_library(fdb_c_ryw_benchmark OBJECT test/ryw_benchmark.c test/test.h)
add_library(fdb_c_txn_size_test OBJECT test/txn_size_test.c test/test.h)
add_library(mako OBJECT ${MAKO_SRCS})
add_library(fdb_c_setup_tests OBJECT test/unit/setup_tests.cpp)
add_library(fdb_c_unit_tests OBJECT ${UNIT_TEST_SRCS})
else()
add_executable(fdb_c_performance_test test/performance_test.c test/test.h)
add_executable(fdb_c_ryw_benchmark test/ryw_benchmark.c test/test.h)
add_executable(fdb_c_txn_size_test test/txn_size_test.c test/test.h)
add_executable(mako ${MAKO_SRCS})
add_executable(fdb_c_setup_tests test/unit/setup_tests.cpp)
add_executable(fdb_c_unit_tests ${UNIT_TEST_SRCS})
strip_debug_symbols(fdb_c_performance_test)
strip_debug_symbols(fdb_c_ryw_benchmark)
strip_debug_symbols(fdb_c_txn_size_test)
@ -89,9 +99,26 @@ if(NOT WIN32)
target_link_libraries(fdb_c_performance_test PRIVATE fdb_c)
target_link_libraries(fdb_c_ryw_benchmark PRIVATE fdb_c)
target_link_libraries(fdb_c_txn_size_test PRIVATE fdb_c)
add_dependencies(fdb_c_setup_tests doctest)
add_dependencies(fdb_c_unit_tests doctest)
target_include_directories(fdb_c_setup_tests PUBLIC ${DOCTEST_INCLUDE_DIR})
target_include_directories(fdb_c_unit_tests PUBLIC ${DOCTEST_INCLUDE_DIR})
target_link_libraries(fdb_c_setup_tests PRIVATE fdb_c Threads::Threads)
target_link_libraries(fdb_c_unit_tests PRIVATE fdb_c Threads::Threads)
# do not set RPATH for mako
set_property(TARGET mako PROPERTY SKIP_BUILD_RPATH TRUE)
target_link_libraries(mako PRIVATE fdb_c)
add_fdbclient_test(
NAME fdb_c_setup_tests
COMMAND $<TARGET_FILE:fdb_c_setup_tests>)
add_fdbclient_test(
NAME fdb_c_unit_tests
COMMAND $<TARGET_FILE:fdb_c_unit_tests>
@CLUSTER_FILE@
fdb)
endif()
set(c_workloads_srcs

View File

@ -171,12 +171,12 @@ public:
void* userdata)
: callbackf(callbackf), f(f), userdata(userdata) {}
virtual bool canFire(int notMadeActive) { return true; }
virtual void fire(const Void& unused, int& userParam) {
bool canFire(int notMadeActive) const override { return true; }
void fire(const Void& unused, int& userParam) override {
(*callbackf)(f, userdata);
delete this;
}
virtual void error(const Error&, int& userParam) {
void error(const Error&, int& userParam) override {
(*callbackf)(f, userdata);
delete this;
}
@ -646,9 +646,9 @@ FDBFuture* fdb_transaction_get_estimated_range_size_bytes( FDBTransaction* tr, u
extern "C" DLLEXPORT
FDBFuture* fdb_transaction_get_range_split_points( FDBTransaction* tr, uint8_t const* begin_key_name,
int begin_key_name_length, uint8_t const* end_key_name, int end_key_name_length, int64_t chunkSize) {
int begin_key_name_length, uint8_t const* end_key_name, int end_key_name_length, int64_t chunk_size) {
KeyRangeRef range(KeyRef(begin_key_name, begin_key_name_length), KeyRef(end_key_name, end_key_name_length));
return (FDBFuture*)(TXN(tr)->getRangeSplitPoints(range, chunkSize).extractPtr());
return (FDBFuture*)(TXN(tr)->getRangeSplitPoints(range, chunk_size).extractPtr());
}
#include "fdb_c_function_pointers.g.h"

View File

@ -269,7 +269,7 @@ extern "C" {
DLLEXPORT WARN_UNUSED_RESULT FDBFuture*
fdb_transaction_get_range_split_points( FDBTransaction* tr, uint8_t const* begin_key_name,
int begin_key_name_length, uint8_t const* end_key_name, int end_key_name_length, int64_t chunkSize);
int begin_key_name_length, uint8_t const* end_key_name, int end_key_name_length, int64_t chunk_size);
#define FDB_KEYSEL_LAST_LESS_THAN(k, l) k, l, 0, 0
#define FDB_KEYSEL_LAST_LESS_OR_EQUAL(k, l) k, l, 1, 0

View File

@ -0,0 +1,229 @@
/*
* fdb_api.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "fdb_api.hpp"
#include <iostream>
namespace fdb {
// Future
Future::~Future() {
fdb_future_destroy(future_);
}
bool Future::is_ready() {
return fdb_future_is_ready(future_);
}
[[nodiscard]] fdb_error_t Future::block_until_ready() {
return fdb_future_block_until_ready(future_);
}
[[nodiscard]] fdb_error_t Future::set_callback(FDBCallback callback,
void* callback_parameter) {
return fdb_future_set_callback(future_, callback, callback_parameter);
}
[[nodiscard]] fdb_error_t Future::get_error() {
return fdb_future_get_error(future_);
}
void Future::release_memory() {
fdb_future_release_memory(future_);
}
void Future::cancel() {
fdb_future_cancel(future_);
}
// Int64Future
[[nodiscard]] fdb_error_t Int64Future::get(int64_t* out) {
return fdb_future_get_int64(future_, out);
}
// ValueFuture
[[nodiscard]] fdb_error_t ValueFuture::get(fdb_bool_t* out_present,
const uint8_t** out_value,
int* out_value_length) {
return fdb_future_get_value(future_, out_present, out_value,
out_value_length);
}
// KeyFuture
[[nodiscard]] fdb_error_t KeyFuture::get(const uint8_t** out_key,
int* out_key_length) {
return fdb_future_get_key(future_, out_key, out_key_length);
}
// StringArrayFuture
[[nodiscard]] fdb_error_t StringArrayFuture::get(const char*** out_strings,
int* out_count) {
return fdb_future_get_string_array(future_, out_strings, out_count);
}
// KeyValueArrayFuture
[[nodiscard]] fdb_error_t KeyValueArrayFuture::get(const FDBKeyValue** out_kv,
int* out_count,
fdb_bool_t* out_more) {
return fdb_future_get_keyvalue_array(future_, out_kv, out_count, out_more);
}
// Transaction
Transaction::Transaction(FDBDatabase* db) {
if (fdb_error_t err = fdb_database_create_transaction(db, &tr_)) {
std::cerr << fdb_get_error(err) << std::endl;
std::abort();
}
}
Transaction::~Transaction() {
fdb_transaction_destroy(tr_);
}
void Transaction::reset() {
fdb_transaction_reset(tr_);
}
void Transaction::cancel() {
fdb_transaction_cancel(tr_);
}
[[nodiscard]] fdb_error_t Transaction::set_option(FDBTransactionOption option,
const uint8_t* value,
int value_length) {
return fdb_transaction_set_option(tr_, option, value, value_length);
}
void Transaction::set_read_version(int64_t version) {
fdb_transaction_set_read_version(tr_, version);
}
Int64Future Transaction::get_read_version() {
return Int64Future(fdb_transaction_get_read_version(tr_));
}
Int64Future Transaction::get_approximate_size() {
return Int64Future(fdb_transaction_get_approximate_size(tr_));
}
KeyFuture Transaction::get_versionstamp() {
return KeyFuture(fdb_transaction_get_versionstamp(tr_));
}
ValueFuture Transaction::get(std::string_view key, fdb_bool_t snapshot) {
return ValueFuture(fdb_transaction_get(tr_, (const uint8_t*)key.data(),
key.size(), snapshot));
}
KeyFuture Transaction::get_key(const uint8_t* key_name, int key_name_length,
fdb_bool_t or_equal, int offset,
fdb_bool_t snapshot) {
return KeyFuture(fdb_transaction_get_key(tr_, key_name, key_name_length,
or_equal, offset, snapshot));
}
StringArrayFuture Transaction::get_addresses_for_key(std::string_view key) {
return StringArrayFuture(fdb_transaction_get_addresses_for_key(tr_,
(const uint8_t*)key.data(), key.size()));
}
KeyValueArrayFuture Transaction::get_range(const uint8_t* begin_key_name,
int begin_key_name_length,
fdb_bool_t begin_or_equal,
int begin_offset,
const uint8_t* end_key_name,
int end_key_name_length,
fdb_bool_t end_or_equal,
int end_offset, int limit,
int target_bytes,
FDBStreamingMode mode,
int iteration, fdb_bool_t snapshot,
fdb_bool_t reverse) {
return KeyValueArrayFuture(fdb_transaction_get_range(tr_, begin_key_name,
begin_key_name_length,
begin_or_equal,
begin_offset,
end_key_name,
end_key_name_length,
end_or_equal,
end_offset,
limit, target_bytes,
mode, iteration,
snapshot, reverse));
}
EmptyFuture Transaction::watch(std::string_view key) {
return EmptyFuture(fdb_transaction_watch(tr_, (const uint8_t*)key.data(), key.size()));
}
EmptyFuture Transaction::commit() {
return EmptyFuture(fdb_transaction_commit(tr_));
}
EmptyFuture Transaction::on_error(fdb_error_t err) {
return EmptyFuture(fdb_transaction_on_error(tr_, err));
}
void Transaction::clear(std::string_view key) {
return fdb_transaction_clear(tr_, (const uint8_t*)key.data(), key.size());
}
void Transaction::clear_range(std::string_view begin_key,
std::string_view end_key) {
fdb_transaction_clear_range(tr_, (const uint8_t*)begin_key.data(),
begin_key.size(), (const uint8_t*)end_key.data(),
end_key.size());
}
void Transaction::set(std::string_view key, std::string_view value) {
fdb_transaction_set(tr_, (const uint8_t*)key.data(), key.size(),
(const uint8_t*)value.data(), value.size());
}
void Transaction::atomic_op(std::string_view key, const uint8_t* param,
int param_length, FDBMutationType operationType) {
return fdb_transaction_atomic_op(tr_, (const uint8_t*)key.data(), key.size(),
param, param_length, operationType);
}
[[nodiscard]] fdb_error_t Transaction::get_committed_version(int64_t* out_version) {
return fdb_transaction_get_committed_version(tr_, out_version);
}
fdb_error_t Transaction::add_conflict_range(std::string_view begin_key,
std::string_view end_key,
FDBConflictRangeType type) {
return fdb_transaction_add_conflict_range(tr_,
(const uint8_t*)begin_key.data(),
begin_key.size(),
(const uint8_t*)end_key.data(),
end_key.size(),
type);
}
} // namespace fdb

View File

@ -0,0 +1,243 @@
/*
* fdb_api.hpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// A collection of C++ classes to wrap the C API to improve memory management
// and add types to futures. Using the old C API may look something like:
//
// FDBTransaction *tr;
// fdb_database_create_transaction(db, &tr);
// FDBFuture *f = fdb_transaction_get(tr, (const uint8_t*)"foo", 3, true);
// fdb_future_block_until_ready(f);
// fdb_future_get_value(f, ...);
// fdb_future_destroy(f);
// fdb_transaction_destroy(tr);
//
// Using the wrapper classes defined here, it will instead look like:
//
// fdb::Transaction tr(db);
// fdb::ValueFuture f = tr.get((const uint8_t*)"foo", 3, true);
// f.block_until_ready();
// f.get_value(f, ...);
//
#pragma once
#define FDB_API_VERSION 620
#include <foundationdb/fdb_c.h>
#include <string>
#include <string_view>
namespace fdb {
// Wrapper parent class to manage memory of an FDBFuture pointer. Cleans up
// FDBFuture when this instance goes out of scope.
class Future {
public:
virtual ~Future() = 0;
// Wrapper around fdb_future_is_ready.
bool is_ready();
// Wrapper around fdb_future_block_until_ready.
fdb_error_t block_until_ready();
// Wrapper around fdb_future_set_callback.
fdb_error_t set_callback(FDBCallback callback, void* callback_parameter);
// Wrapper around fdb_future_get_error.
fdb_error_t get_error();
// Wrapper around fdb_future_release_memory.
void release_memory();
// Wrapper around fdb_future_cancel.
void cancel();
// Conversion operator to allow Future instances to work interchangeably as
// an FDBFuture object.
// operator FDBFuture* () const {
// return future_;
// }
protected:
Future(FDBFuture *f) : future_(f) {}
FDBFuture* future_;
};
class Int64Future : public Future {
public:
// Call this function instead of fdb_future_get_int64 when using the
// Int64Future type. It's behavior is identical to fdb_future_get_int64.
fdb_error_t get(int64_t* out);
private:
friend class Transaction;
Int64Future(FDBFuture* f) : Future(f) {}
};
class KeyFuture : public Future {
public:
// Call this function instead of fdb_future_get_key when using the KeyFuture
// type. It's behavior is identical to fdb_future_get_key.
fdb_error_t get(const uint8_t** out_key, int* out_key_length);
private:
friend class Transaction;
KeyFuture(FDBFuture* f) : Future(f) {}
};
class ValueFuture : public Future {
public:
// Call this function instead of fdb_future_get_value when using the
// ValueFuture type. It's behavior is identical to fdb_future_get_value.
fdb_error_t get(fdb_bool_t* out_present, const uint8_t** out_value,
int* out_value_length);
private:
friend class Transaction;
ValueFuture(FDBFuture* f) : Future(f) {}
};
class StringArrayFuture : public Future {
public:
// Call this function instead of fdb_future_get_string_array when using the
// StringArrayFuture type. It's behavior is identical to
// fdb_future_get_string_array.
fdb_error_t get(const char*** out_strings, int* out_count);
private:
friend class Transaction;
StringArrayFuture(FDBFuture* f) : Future(f) {}
};
class KeyValueArrayFuture : public Future {
public:
// Call this function instead of fdb_future_get_keyvalue_array when using
// the KeyValueArrayFuture type. It's behavior is identical to
// fdb_future_get_keyvalue_array.
fdb_error_t get(const FDBKeyValue** out_kv, int* out_count,
fdb_bool_t* out_more);
private:
friend class Transaction;
KeyValueArrayFuture(FDBFuture* f) : Future(f) {}
};
class EmptyFuture : public Future {
private:
friend class Transaction;
EmptyFuture(FDBFuture* f) : Future(f) {}
};
// Wrapper around FDBTransaction, providing the same set of calls as the C API.
// Handles cleanup of memory, removing the need to call
// fdb_transaction_destroy.
class Transaction final {
public:
// Given an FDBDatabase, initializes a new transaction.
Transaction(FDBDatabase* db);
~Transaction();
// Wrapper around fdb_transaction_reset.
void reset();
// Wrapper around fdb_transaction_cancel.
void cancel();
// Wrapper around fdb_transaction_set_option.
fdb_error_t set_option(FDBTransactionOption option, const uint8_t* value,
int value_length);
// Wrapper around fdb_transaction_set_read_version.
void set_read_version(int64_t version);
// Returns a future which will be set to the transaction read version.
Int64Future get_read_version();
// Returns a future which will be set to the approximate transaction size so far.
Int64Future get_approximate_size();
// Returns a future which will be set to the versionstamp which was used by
// any versionstamp operations in the transaction.
KeyFuture get_versionstamp();
// Returns a future which will be set to the value of `key` in the database.
ValueFuture get(std::string_view key, fdb_bool_t snapshot);
// Returns a future which will be set to the key in the database matching the
// passed key selector.
KeyFuture get_key(const uint8_t* key_name, int key_name_length,
fdb_bool_t or_equal, int offset, fdb_bool_t snapshot);
// Returns a future which will be set to an array of strings.
StringArrayFuture get_addresses_for_key(std::string_view key);
// Returns a future which will be set to an FDBKeyValue array.
KeyValueArrayFuture get_range(const uint8_t* begin_key_name,
int begin_key_name_length,
fdb_bool_t begin_or_equal, int begin_offset,
const uint8_t* end_key_name,
int end_key_name_length,
fdb_bool_t end_or_equal, int end_offset,
int limit, int target_bytes,
FDBStreamingMode mode, int iteration,
fdb_bool_t snapshot, fdb_bool_t reverse);
// Wrapper around fdb_transaction_watch. Returns a future representing an
// empty value.
EmptyFuture watch(std::string_view key);
// Wrapper around fdb_transaction_commit. Returns a future representing an
// empty value.
EmptyFuture commit();
// Wrapper around fdb_transaction_on_error. Returns a future representing an
// empty value.
EmptyFuture on_error(fdb_error_t err);
// Wrapper around fdb_transaction_clear.
void clear(std::string_view key);
// Wrapper around fdb_transaction_clear_range.
void clear_range(std::string_view begin_key, std::string_view end_key);
// Wrapper around fdb_transaction_set.
void set(std::string_view key, std::string_view value);
// Wrapper around fdb_transaction_atomic_op.
void atomic_op(std::string_view key, const uint8_t* param, int param_length,
FDBMutationType operationType);
// Wrapper around fdb_transaction_get_committed_version.
fdb_error_t get_committed_version(int64_t* out_version);
// Wrapper around fdb_transaction_add_conflict_range.
fdb_error_t add_conflict_range(std::string_view begin_key,
std::string_view end_key,
FDBConflictRangeType type);
private:
FDBTransaction* tr_;
};
} // namespace fdb

View File

@ -0,0 +1,75 @@
/*
* setup_tests.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Unit tests for API setup, network initialization functions from the FDB C API.
#define FDB_API_VERSION 620
#include <foundationdb/fdb_c.h>
#include <iostream>
#include <thread>
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include "doctest.h"
void fdb_check(fdb_error_t e) {
if (e) {
std::cerr << fdb_get_error(e) << std::endl;
std::abort();
}
}
TEST_CASE("setup") {
fdb_error_t err;
// Version passed here must be <= FDB_API_VERSION
err = fdb_select_api_version(9000);
CHECK(err);
// Select current API version
fdb_check(fdb_select_api_version(620));
// Error to call again after a successful return
err = fdb_select_api_version(620);
CHECK(err);
CHECK(fdb_get_max_api_version() >= 620);
fdb_check(fdb_setup_network());
// Calling a second time should fail
err = fdb_setup_network();
CHECK(err);
struct Context {
bool called = false;
};
Context context;
fdb_check(fdb_add_network_thread_completion_hook(
[](void *param) {
auto *context = static_cast<Context *>(param);
context->called = true;
},
&context));
std::thread network_thread{&fdb_run_network};
CHECK(!context.called);
fdb_check(fdb_stop_network());
network_thread.join();
CHECK(context.called);
}

View File

@ -0,0 +1,18 @@
# Download doctest repo.
include(ExternalProject)
find_package(Git REQUIRED)
ExternalProject_Add(
doctest
PREFIX ${CMAKE_BINARY_DIR}/doctest
GIT_REPOSITORY https://github.com/onqtam/doctest.git
GIT_TAG 1c8da00c978c19e00a434b2b1f854fcffc9fba35 # v2.4.0
TIMEOUT 10
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
LOG_DOWNLOAD ON
)
ExternalProject_Get_Property(doctest source_dir)
set(DOCTEST_INCLUDE_DIR ${source_dir}/doctest CACHE INTERNAL "Path to include folder for doctest")

File diff suppressed because it is too large Load Diff

View File

@ -87,7 +87,7 @@ func (s Snapshot) GetDatabase() Database {
return s.transaction.db
}
// GetEstimatedRangeSizeBytes will get an estimate for the number of bytes
// GetEstimatedRangeSizeBytes returns an estimate for the number of bytes
// stored in the given range.
func (s Snapshot) GetEstimatedRangeSizeBytes(r ExactRange) FutureInt64 {
beginKey, endKey := r.FDBRangeKeys()
@ -97,8 +97,9 @@ func (s Snapshot) GetEstimatedRangeSizeBytes(r ExactRange) FutureInt64 {
)
}
// GetRangeSplitPoints will return a list of keys that can divide the given range into
// chunks based on the chunk size provided.
// GetRangeSplitPoints returns a list of keys that can split the given range
// into (roughly) equally sized chunks based on chunkSize.
// Note: the returned split points contain the start key and end key of the given range.
func (s Snapshot) GetRangeSplitPoints(r ExactRange, chunkSize int64) FutureKeyArray {
beginKey, endKey := r.FDBRangeKeys()
return s.getRangeSplitPoints(

View File

@ -319,7 +319,7 @@ func (t *transaction) getEstimatedRangeSizeBytes(beginKey Key, endKey Key) Futur
}
}
// GetEstimatedRangeSizeBytes will get an estimate for the number of bytes
// GetEstimatedRangeSizeBytes returns an estimate for the number of bytes
// stored in the given range.
// Note: the estimated size is calculated based on the sampling done by FDB server. The sampling
// algorithm works roughly in this way: the larger the key-value pair is, the more likely it would
@ -348,8 +348,9 @@ func (t *transaction) getRangeSplitPoints(beginKey Key, endKey Key, chunkSize in
}
}
// GetRangeSplitPoints will return a list of keys that can divide the given range into
// chunks based on the chunk size provided.
// GetRangeSplitPoints returns a list of keys that can split the given range
// into (roughly) equally sized chunks based on chunkSize.
// Note: the returned split points contain the start key and end key of the given range.
func (t Transaction) GetRangeSplitPoints(r ExactRange, chunkSize int64) FutureKeyArray {
beginKey, endKey := r.FDBRangeKeys()
return t.getRangeSplitPoints(

View File

@ -456,7 +456,8 @@ public interface ReadTransaction extends ReadTransactionContext {
CompletableFuture<Long> getEstimatedRangeSizeBytes(Range range);
/**
* Gets a list of keys that can split the given range into similar sized chunks based on chunkSize
* Gets a list of keys that can split the given range into (roughly) equally sized chunks based on <code>chunkSize</code>.
* Note: the returned split points contain the start key and end key of the given range.
*
* @param begin the beginning of the range (inclusive)
* @param end the end of the range (exclusive)
@ -466,7 +467,8 @@ public interface ReadTransaction extends ReadTransactionContext {
CompletableFuture<KeyArrayResult> getRangeSplitPoints(byte[] begin, byte[] end, long chunkSize);
/**
* Gets a list of keys that can split the given range into similar sized chunks based on chunkSize
* Gets a list of keys that can split the given range into (roughly) equally sized chunks based on <code>chunkSize</code>
* Note: the returned split points contain the start key and end key of the given range.
*
* @param range the range of the keys
*

View File

@ -41,6 +41,7 @@ endif()
add_compile_options(-DCMAKE_BUILD)
add_compile_definitions(BOOST_ERROR_CODE_HEADER_ONLY BOOST_SYSTEM_NO_DEPRECATED)
set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
if(ALLOC_INSTRUMENTATION)
add_compile_options(-DALLOC_INSTRUMENTATION)

View File

@ -28,8 +28,6 @@ Starting and stopping
After installation, FoundationDB is set to start automatically. You can manually start and stop the database with the commands shown below.
These commands start and stop the master ``fdbmonitor`` process, which in turn starts ``fdbserver`` and ``backup-agent`` processes. See :ref:`administration_fdbmonitor` for details.
Linux
-----
@ -58,6 +56,15 @@ It can be stopped and prevented from starting at boot as follows::
host:~ user$ sudo launchctl unload -w /Library/LaunchDaemons/com.foundationdb.fdbmonitor.plist
Start, stop and restart behavior
=================================
These commands above start and stop the master ``fdbmonitor`` process, which in turn starts ``fdbserver`` and ``backup-agent`` processes. See :ref:`administration_fdbmonitor` for details.
After any child process has terminated by any reason, ``fdbmonitor`` tries to restart it. See :ref:`restarting parameters <configuration-restarting>`.
When ``fdbmonitor`` itself is killed unexpectedly (for example, by the ``out-of-memory killer``), all the child processes are also terminated. Then the operating system is responsible for restarting it. See :ref:`Configuring autorestart of fdbmonitor <configuration-restart-fdbmonitor>`.
.. _foundationdb-cluster-file:
Cluster files

View File

@ -301,6 +301,12 @@ See :ref:`developer-guide-programming-with-futures` for further (language-indepe
|future-get-return1| |future-get-return2|.
.. function:: fdb_error_t fdb_future_get_key_array( FDBFuture* f, FDBKey const** out_key_array, int* out_count)
Extracts an array of :type:`FDBKey` from an :type:`FDBFuture*` into a caller-provided variable of type ``FDBKey*``. The size of the array will also be extracted and passed back by a caller-provided variable of type ``int`` |future-warning|
|future-get-return1| |future-get-return2|.
.. function:: fdb_error_t fdb_future_get_key(FDBFuture* future, uint8_t const** out_key, int* out_key_length)
Extracts a key from an :type:`FDBFuture` into caller-provided variables of type ``uint8_t*`` (a pointer to the beginning of the key) and ``int`` (the length of the key). |future-warning|
@ -480,6 +486,12 @@ Applications must provide error handling and an appropriate retry loop around th
|future-return0| the estimated size of the key range given. |future-return1| call :func:`fdb_future_get_int64()` to extract the size, |future-return2|
.. function:: FDBFuture* fdb_transaction_get_range_split_points( FDBTransaction* tr, uint8_t const* begin_key_name, int begin_key_name_length, uint8_t const* end_key_name, int end_key_name_length, int64_t chunk_size)
Returns a list of keys that can split the given range into (roughly) equally sized chunks based on ``chunk_size``.
.. note:: The returned split points contain the start key and end key of the given range
|future-return0| the list of split points. |future-return1| call :func:`fdb_future_get_key_array()` to extract the array, |future-return2|
.. function:: FDBFuture* fdb_transaction_get_key(FDBTransaction* transaction, uint8_t const* key_name, int key_name_length, fdb_bool_t or_equal, int offset, fdb_bool_t snapshot)
Resolves a :ref:`key selector <key-selectors>` against the keys in the database snapshot represented by ``transaction``.

View File

@ -799,9 +799,15 @@ Transaction misc functions
.. method:: Transaction.get_estimated_range_size_bytes(begin_key, end_key)
Get the estimated byte size of the given key range. Returns a :class:`FutureInt64`.
Gets the estimated byte size of the given key range. Returns a :class:`FutureInt64`.
.. note:: The estimated size is calculated based on the sampling done by FDB server. The sampling algorithm works roughly in this way: the larger the key-value pair is, the more likely it would be sampled and the more accurate its sampled size would be. And due to that reason it is recommended to use this API to query against large ranges for accuracy considerations. For a rough reference, if the returned size is larger than 3MB, one can consider the size to be accurate.
.. method:: Transaction.get_range_split_points(self, begin_key, end_key, chunk_size)
Gets a list of keys that can split the given range into (roughly) equally sized chunks based on ``chunk_size``. Returns a :class:`FutureKeyArray`.
.. note:: The returned split points contain the start key and end key of the given range
.. _api-python-transaction-options:
Transaction misc functions

View File

@ -741,11 +741,16 @@ Most applications should use the read version that FoundationDB determines autom
Transaction misc functions
--------------------------
.. method:: Transaction.get_estimated_range_size_bytes(begin_key, end_key)
.. method:: Transaction.get_estimated_range_size_bytes(begin_key, end_key) -> Int64Future
Get the estimated byte size of the given key range. Returns a :class:`Int64Future`.
Gets the estimated byte size of the given key range. Returns a :class:`Int64Future`.
.. note:: The estimated size is calculated based on the sampling done by FDB server. The sampling algorithm works roughly in this way: the larger the key-value pair is, the more likely it would be sampled and the more accurate its sampled size would be. And due to that reason it is recommended to use this API to query against large ranges for accuracy considerations. For a rough reference, if the returned size is larger than 3MB, one can consider the size to be accurate.
.. method:: Transaction.get_range_split_points(begin_key, end_key, chunk_size) -> FutureKeyArray
Gets a list of keys that can split the given range into (roughly) equally sized chunks based on ``chunk_size``. Returns a :class:`FutureKeyArray`.
.. note:: The returned split points contain the start key and end key of the given range
.. method:: Transaction.get_approximate_size() -> Int64Future
|transaction-get-approximate-size-blurb|. Returns a :class:`Int64Future`.

View File

@ -9,6 +9,19 @@ This document provides an overview of changes that an application developer may
For more details about API versions, see :ref:`api-versions`.
.. _api-version-upgrade-guide-700:
API version 700
===============
General
-------
Python bindings
---------------
* The function ``get_estimated_range_size_bytes`` will now throw an error if the ``begin_key`` or ``end_key`` is ``None``.
.. _api-version-upgrade-guide-630:
API version 630

View File

@ -229,6 +229,8 @@ Contains settings applicable to all processes (e.g. fdbserver, backup_agent).
* ``kill_on_configuration_change``: If ``true``, affected processes will be restarted whenever the configuration file changes. Defaults to ``true``.
* ``disable_lifecycle_logging``: If ``true``, ``fdbmonitor`` will not write log events when processes start or terminate. Defaults to ``false``.
.. _configuration-restarting:
The ``[general]`` section also contains some parameters to control how processes are restarted when they die. ``fdbmonitor`` uses backoff logic to prevent a process that dies repeatedly from cycling too quickly, and it also introduces up to +/-10% random jitter into the delay to avoid multiple processes all restarting simultaneously. ``fdbmonitor`` tracks separate backoff state for each process, so the restarting of one process will have no effect on the backoff behavior of another.
* ``restart_delay``: The maximum number of seconds (subject to jitter) that fdbmonitor will delay before restarting a failed process.
@ -236,6 +238,8 @@ The ``[general]`` section also contains some parameters to control how processes
* ``restart_backoff``: Controls how quickly ``fdbmonitor`` backs off when a process dies repeatedly. The previous delay (or 1, if the previous delay is 0) is multiplied by ``restart_backoff`` to get the next delay, maxing out at the value of ``restart_delay``. Defaults to the value of ``restart_delay``, meaning that the second and subsequent failures will all delay ``restart_delay`` between restarts.
* ``restart_delay_reset_interval``: The number of seconds a process must be running before resetting the backoff back to the value of ``initial_restart_delay``. Defaults to the value of ``restart_delay``.
These ``restart_`` parameters are not applicable to the ``fdbmonitor`` process itself. See :ref:`Configuring autorestart of fdbmonitor <configuration-restart-fdbmonitor>` for details.
As an example, let's say the following parameters have been set:
.. code-block:: ini
@ -322,6 +326,24 @@ Backup agent sections
These sections run and configure the backup agent process used for :doc:`point-in-time backups <backups>` of FoundationDB. These don't usually need to be modified. The structure and functionality is similar to the ``[fdbserver]`` and ``[fdbserver.<ID>]`` sections.
.. _configuration-restart-fdbmonitor:
Configuring autorestart of fdbmonitor
=====================================
Configuring the restart parameters for ``fdbmonitor`` is operating system-specific.
Linux (RHEL/CentOS)
-------------------
``systemd`` controls the ``foundationdb`` service. When ``fdbmonitor`` is killed unexpectedly, by default, systemd restarts it in 60 seconds. To adjust this value you have to create a file ``/etc/systemd/system/foundationdb.service.d/override.conf`` with the overriding values. For example:
.. code-block:: ini
[Service]
RestartSec=20s
To disable auto-restart of ``fdbmonitor``, put ``Restart=no`` in the same section.
.. _configuration-choosing-redundancy-mode:

View File

@ -34,7 +34,7 @@ Status
Bindings
--------
* Python: The method ``get_estimated_range_size_bytes`` will now throw an error if the ``begin_key`` or ``end_key`` is ``None``. `(PR #3394) <https://github.com/apple/foundationdb/pull/3394>`_
* Python: The function ``get_estimated_range_size_bytes`` will now throw an error if the ``begin_key`` or ``end_key`` is ``None``. `(PR #3394) <https://github.com/apple/foundationdb/pull/3394>`_
Other Changes

View File

@ -36,18 +36,18 @@
#endif
#include "flow/actorcompiler.h" // This must be the last #include.
struct LineNoiseReader : IThreadPoolReceiver {
virtual void init() {}
struct LineNoiseReader final : IThreadPoolReceiver {
void init() override {}
struct Read : TypedAction<LineNoiseReader, Read> {
std::string prompt;
struct Read final : TypedAction<LineNoiseReader, Read> {
std::string prompt;
ThreadReturnPromise<Optional<std::string>> result;
virtual double getTimeEstimate() { return 0.0; }
explicit Read(std::string const& prompt) : prompt(prompt) {}
};
double getTimeEstimate() const override { return 0.0; }
explicit Read(std::string const& prompt) : prompt(prompt) {}
};
void action(Read& r) {
void action(Read& r) {
try {
r.result.send( read(r.prompt) );
} catch (Error& e) {

View File

@ -1837,7 +1837,7 @@ private:
std::string m_path;
};
class BackupContainerBlobStore : public BackupContainerFileSystem, ReferenceCounted<BackupContainerBlobStore> {
class BackupContainerBlobStore final : public BackupContainerFileSystem, ReferenceCounted<BackupContainerBlobStore> {
private:
// Backup files to under a single folder prefix with subfolders for each named backup
static const std::string DATAFOLDER;
@ -1877,15 +1877,13 @@ public:
}
}
void addref() final { return ReferenceCounted<BackupContainerBlobStore>::addref(); }
void delref() final { return ReferenceCounted<BackupContainerBlobStore>::delref(); }
void addref() override { return ReferenceCounted<BackupContainerBlobStore>::addref(); }
void delref() override { return ReferenceCounted<BackupContainerBlobStore>::delref(); }
static std::string getURLFormat() {
return BlobStoreEndpoint::getURLFormat(true) + " (Note: The 'bucket' parameter is required.)";
}
virtual ~BackupContainerBlobStore() {}
Future<Reference<IAsyncFile>> readFile(std::string path) final {
return Reference<IAsyncFile>(
new AsyncFileReadAheadCache(

View File

@ -166,7 +166,8 @@ struct CommitTransactionRequest : TimedRequest {
Optional<ClientTrCommitCostEstimation> commitCostEstimation;
Optional<TagSet> tagSet;
CommitTransactionRequest() : flags(0) {}
CommitTransactionRequest() : CommitTransactionRequest(SpanID()) {}
CommitTransactionRequest(SpanID const& context) : spanContext(context), flags(0) {}
template <class Ar>
void serialize(Ar& ar) {

View File

@ -49,6 +49,7 @@ static const char* typeString[] = { "SetValue",
"MinV2",
"AndV2",
"CompareAndClear",
"Reserved_For_SpanContextMessage",
"MAX_ATOMIC_OP" };
struct MutationRef {
@ -75,6 +76,7 @@ struct MutationRef {
MinV2,
AndV2,
CompareAndClear,
Reserved_For_SpanContextMessage /* See fdbserver/SpanContextMessage.h */,
MAX_ATOMIC_OP
};
// This is stored this way for serialization purposes.

View File

@ -129,9 +129,9 @@ namespace dbBackup {
struct BackupRangeTaskFunc : TaskFuncBase {
static StringRef name;
static const uint32_t version;
static constexpr uint32_t version = 1;
static struct {
static struct {
static TaskParam<int64_t> bytesWritten() { return LiteralStringRef(__FUNCTION__); }
} Params;
@ -421,16 +421,15 @@ namespace dbBackup {
};
StringRef BackupRangeTaskFunc::name = LiteralStringRef("dr_backup_range");
const uint32_t BackupRangeTaskFunc::version = 1;
const Key BackupRangeTaskFunc::keyAddBackupRangeTasks = LiteralStringRef("addBackupRangeTasks");
const Key BackupRangeTaskFunc::keyBackupRangeBeginKey = LiteralStringRef("backupRangeBeginKey");
REGISTER_TASKFUNC(BackupRangeTaskFunc);
struct FinishFullBackupTaskFunc : TaskFuncBase {
static StringRef name;
static const uint32_t version;
static constexpr uint32_t version = 1;
ACTOR static Future<Void> _finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
ACTOR static Future<Void> _finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
state Subspace states = Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyStates).get(task->params[BackupAgentBase::keyConfigLogUid]);
wait(checkTaskVersion(tr, task, FinishFullBackupTaskFunc::name, FinishFullBackupTaskFunc::version));
@ -467,14 +466,13 @@ namespace dbBackup {
};
StringRef FinishFullBackupTaskFunc::name = LiteralStringRef("dr_finish_full_backup");
const uint32_t FinishFullBackupTaskFunc::version = 1;
REGISTER_TASKFUNC(FinishFullBackupTaskFunc);
struct EraseLogRangeTaskFunc : TaskFuncBase {
static StringRef name;
static const uint32_t version;
static constexpr uint32_t version = 1;
StringRef getName() const { return name; };
StringRef getName() const { return name; };
Future<Void> execute(Database cx, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return _execute(cx, tb, fb, task); };
Future<Void> finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return _finish(tr, tb, fb, task); };
@ -523,14 +521,13 @@ namespace dbBackup {
}
};
StringRef EraseLogRangeTaskFunc::name = LiteralStringRef("dr_erase_log_range");
const uint32_t EraseLogRangeTaskFunc::version = 1;
REGISTER_TASKFUNC(EraseLogRangeTaskFunc);
struct CopyLogRangeTaskFunc : TaskFuncBase {
static StringRef name;
static const uint32_t version;
static constexpr uint32_t version = 1;
static struct {
static struct {
static TaskParam<int64_t> bytesWritten() { return LiteralStringRef(__FUNCTION__); }
} Params;
@ -773,15 +770,14 @@ namespace dbBackup {
}
};
StringRef CopyLogRangeTaskFunc::name = LiteralStringRef("dr_copy_log_range");
const uint32_t CopyLogRangeTaskFunc::version = 1;
const Key CopyLogRangeTaskFunc::keyNextBeginVersion = LiteralStringRef("nextBeginVersion");
REGISTER_TASKFUNC(CopyLogRangeTaskFunc);
struct CopyLogsTaskFunc : TaskFuncBase {
static StringRef name;
static const uint32_t version;
static constexpr uint32_t version = 1;
ACTOR static Future<Void> _finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
ACTOR static Future<Void> _finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
state Subspace conf = Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyConfig).get(task->params[BackupAgentBase::keyConfigLogUid]);
state Subspace states = Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyStates).get(task->params[BackupAgentBase::keyConfigLogUid]);
wait(checkTaskVersion(tr, task, CopyLogsTaskFunc::name, CopyLogsTaskFunc::version));
@ -876,13 +872,12 @@ namespace dbBackup {
Future<Void> finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return _finish(tr, tb, fb, task); };
};
StringRef CopyLogsTaskFunc::name = LiteralStringRef("dr_copy_logs");
const uint32_t CopyLogsTaskFunc::version = 1;
REGISTER_TASKFUNC(CopyLogsTaskFunc);
struct FinishedFullBackupTaskFunc : TaskFuncBase {
static StringRef name;
static const uint32_t version;
static const Key keyInsertTask;
static constexpr uint32_t version = 1;
static const Key keyInsertTask;
StringRef getName() const { return name; };
@ -976,15 +971,14 @@ namespace dbBackup {
Future<Void> finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return _finish(tr, tb, fb, task); };
};
StringRef FinishedFullBackupTaskFunc::name = LiteralStringRef("dr_finished_full_backup");
const uint32_t FinishedFullBackupTaskFunc::version = 1;
const Key FinishedFullBackupTaskFunc::keyInsertTask = LiteralStringRef("insertTask");
REGISTER_TASKFUNC(FinishedFullBackupTaskFunc);
struct CopyDiffLogsTaskFunc : TaskFuncBase {
static StringRef name;
static const uint32_t version;
static constexpr uint32_t version = 1;
ACTOR static Future<Void> _finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
ACTOR static Future<Void> _finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
state Subspace conf = Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyConfig).get(task->params[BackupAgentBase::keyConfigLogUid]);
state Subspace states = Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyStates).get(task->params[BackupAgentBase::keyConfigLogUid]);
wait(checkTaskVersion(tr, task, CopyDiffLogsTaskFunc::name, CopyDiffLogsTaskFunc::version));
@ -1059,15 +1053,14 @@ namespace dbBackup {
Future<Void> finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return _finish(tr, tb, fb, task); };
};
StringRef CopyDiffLogsTaskFunc::name = LiteralStringRef("dr_copy_diff_logs");
const uint32_t CopyDiffLogsTaskFunc::version = 1;
REGISTER_TASKFUNC(CopyDiffLogsTaskFunc);
// Skip unneeded EraseLogRangeTaskFunc in 5.1
struct SkipOldEraseLogRangeTaskFunc : TaskFuncBase {
static StringRef name;
static const uint32_t version;
static constexpr uint32_t version = 1;
ACTOR static Future<Void> _finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
ACTOR static Future<Void> _finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
state Reference<TaskFuture> taskFuture = futureBucket->unpack(task->params[Task::reservedTaskParamKeyDone]);
wait(taskFuture->set(tr, taskBucket) && taskBucket->finish(tr, task));
return Void();
@ -1079,16 +1072,15 @@ namespace dbBackup {
Future<Void> finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return _finish(tr, tb, fb, task); };
};
StringRef SkipOldEraseLogRangeTaskFunc::name = LiteralStringRef("dr_skip_legacy_task");
const uint32_t SkipOldEraseLogRangeTaskFunc::version = 1;
REGISTER_TASKFUNC(SkipOldEraseLogRangeTaskFunc);
REGISTER_TASKFUNC_ALIAS(SkipOldEraseLogRangeTaskFunc, db_erase_log_range);
// This is almost the same as CopyLogRangeTaskFunc in 5.1. The only purpose is to support DR upgrade
struct OldCopyLogRangeTaskFunc : TaskFuncBase {
static StringRef name;
static const uint32_t version;
static constexpr uint32_t version = 1;
static struct {
static struct {
static TaskParam<int64_t> bytesWritten() { return LiteralStringRef(__FUNCTION__); }
} Params;
@ -1255,15 +1247,14 @@ namespace dbBackup {
}
};
StringRef OldCopyLogRangeTaskFunc::name = LiteralStringRef("db_copy_log_range");
const uint32_t OldCopyLogRangeTaskFunc::version = 1;
const Key OldCopyLogRangeTaskFunc::keyNextBeginVersion = LiteralStringRef("nextBeginVersion");
REGISTER_TASKFUNC(OldCopyLogRangeTaskFunc);
struct AbortOldBackupTaskFunc : TaskFuncBase {
static StringRef name;
static const uint32_t version;
static constexpr uint32_t version = 1;
ACTOR static Future<Void> _execute(Database cx, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
ACTOR static Future<Void> _execute(Database cx, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
state DatabaseBackupAgent srcDrAgent(taskBucket->src);
state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(cx));
state Key tagNameKey;
@ -1316,7 +1307,6 @@ namespace dbBackup {
Future<Void> finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return _finish(tr, tb, fb, task); };
};
StringRef AbortOldBackupTaskFunc::name = LiteralStringRef("dr_abort_legacy_backup");
const uint32_t AbortOldBackupTaskFunc::version = 1;
REGISTER_TASKFUNC(AbortOldBackupTaskFunc);
REGISTER_TASKFUNC_ALIAS(AbortOldBackupTaskFunc, db_backup_range);
REGISTER_TASKFUNC_ALIAS(AbortOldBackupTaskFunc, db_finish_full_backup);
@ -1328,9 +1318,9 @@ namespace dbBackup {
//Upgrade DR from 5.1
struct CopyDiffLogsUpgradeTaskFunc : TaskFuncBase {
static StringRef name;
static const uint32_t version;
static constexpr uint32_t version = 1;
ACTOR static Future<Void> _execute(Database cx, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
ACTOR static Future<Void> _execute(Database cx, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
state Key logUidValue = task->params[DatabaseBackupAgent::keyConfigLogUid];
state Subspace sourceStates = Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keySourceStates).get(logUidValue);
state Subspace config = Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyConfig).get(logUidValue);
@ -1435,14 +1425,13 @@ namespace dbBackup {
Future<Void> finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return _finish(tr, tb, fb, task); };
};
StringRef CopyDiffLogsUpgradeTaskFunc::name = LiteralStringRef("db_copy_diff_logs");
const uint32_t CopyDiffLogsUpgradeTaskFunc::version = 1;
REGISTER_TASKFUNC(CopyDiffLogsUpgradeTaskFunc);
struct BackupRestorableTaskFunc : TaskFuncBase {
static StringRef name;
static const uint32_t version;
static constexpr uint32_t version = 1;
ACTOR static Future<Void> _execute(Database cx, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
ACTOR static Future<Void> _execute(Database cx, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
state Subspace sourceStates = Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keySourceStates).get(task->params[BackupAgentBase::keyConfigLogUid]);
wait(checkTaskVersion(cx, task, BackupRestorableTaskFunc::name, BackupRestorableTaskFunc::version));
state Transaction tr(taskBucket->src);
@ -1527,14 +1516,13 @@ namespace dbBackup {
Future<Void> finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return _finish(tr, tb, fb, task); };
};
StringRef BackupRestorableTaskFunc::name = LiteralStringRef("dr_backup_restorable");
const uint32_t BackupRestorableTaskFunc::version = 1;
REGISTER_TASKFUNC(BackupRestorableTaskFunc);
struct StartFullBackupTaskFunc : TaskFuncBase {
static StringRef name;
static const uint32_t version;
static constexpr uint32_t version = 1;
ACTOR static Future<Void> _execute(Database cx, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
ACTOR static Future<Void> _execute(Database cx, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
state Key logUidValue = task->params[DatabaseBackupAgent::keyConfigLogUid];
state Subspace sourceStates = Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keySourceStates).get(logUidValue);
wait(checkTaskVersion(cx, task, StartFullBackupTaskFunc::name, StartFullBackupTaskFunc::version));
@ -1726,7 +1714,6 @@ namespace dbBackup {
Future<Void> finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return _finish(tr, tb, fb, task); };
};
StringRef StartFullBackupTaskFunc::name = LiteralStringRef("dr_start_full_backup");
const uint32_t StartFullBackupTaskFunc::version = 1;
REGISTER_TASKFUNC(StartFullBackupTaskFunc);
}
@ -1822,7 +1809,7 @@ void checkAtomicSwitchOverConfig(StatusObjectReader srcStatus, StatusObjectReade
class DatabaseBackupAgentImpl {
public:
static const int MAX_RESTORABLE_FILE_METASECTION_BYTES = 1024 * 8;
static constexpr int MAX_RESTORABLE_FILE_METASECTION_BYTES = 1024 * 8;
ACTOR static Future<Void> waitUpgradeToLatestDrVersion(DatabaseBackupAgent* backupAgent, Database cx, Key tagName) {
state UID logUid = wait(backupAgent->getLogUid(cx, tagName));

View File

@ -754,14 +754,16 @@ struct TLogVersion {
// V3 was the introduction of spill by reference;
// V4 changed how data gets written to satellite TLogs so that we can peek from them;
// V5 merged reference and value spilling
// V6 added span context to list of serialized mutations sent from proxy to tlogs
// V1 = 1, // 4.6 is dispatched to via 6.0
V2 = 2, // 6.0
V3 = 3, // 6.1
V4 = 4, // 6.2
V5 = 5, // 6.3
V6 = 6, // 7.0
MIN_SUPPORTED = V2,
MAX_SUPPORTED = V5,
MIN_RECRUITABLE = V4,
MAX_SUPPORTED = V6,
MIN_RECRUITABLE = V5,
DEFAULT = V5,
} version;
@ -784,6 +786,7 @@ struct TLogVersion {
if (s == LiteralStringRef("3")) return V3;
if (s == LiteralStringRef("4")) return V4;
if (s == LiteralStringRef("5")) return V5;
if (s == LiteralStringRef("6")) return V6;
return default_error_or();
}
};

View File

@ -623,8 +623,6 @@ namespace fileBackup {
// Very simple format compared to KeyRange files.
// Header, [Key, Value]... Key len
struct LogFileWriter {
static const std::string &FFs;
LogFileWriter(Reference<IBackupFile> file = Reference<IBackupFile>(), int blockSize = 0)
: file(file), blockSize(blockSize), blockEnd(0) {}
@ -793,13 +791,13 @@ namespace fileBackup {
return Void();
}
virtual StringRef getName() const {
TraceEvent(SevError, "FileBackupError").detail("Cause", "AbortFiveZeroBackupTaskFunc::name() should never be called");
StringRef getName() const override {
TraceEvent(SevError, "FileBackupError").detail("Cause", "AbortFiveZeroBackupTaskFunc::name() should never be called");
ASSERT(false);
return StringRef();
}
}
Future<Void> execute(Database cx, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return Future<Void>(Void()); };
Future<Void> execute(Database cx, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return Future<Void>(Void()); };
Future<Void> finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return _finish(tr, tb, fb, task); };
};
StringRef AbortFiveZeroBackupTask::name = LiteralStringRef("abort_legacy_backup");
@ -863,13 +861,13 @@ namespace fileBackup {
return Void();
}
virtual StringRef getName() const {
TraceEvent(SevError, "FileBackupError").detail("Cause", "AbortFiveOneBackupTaskFunc::name() should never be called");
StringRef getName() const override {
TraceEvent(SevError, "FileBackupError").detail("Cause", "AbortFiveOneBackupTaskFunc::name() should never be called");
ASSERT(false);
return StringRef();
}
}
Future<Void> execute(Database cx, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return Future<Void>(Void()); };
Future<Void> execute(Database cx, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return Future<Void>(Void()); };
Future<Void> finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return _finish(tr, tb, fb, task); };
};
StringRef AbortFiveOneBackupTask::name = LiteralStringRef("abort_legacy_backup_5.2");
@ -940,24 +938,18 @@ namespace fileBackup {
// Backup and Restore taskFunc definitions will inherit from one of the following classes which
// servers to catch and log to the appropriate config any error that execute/finish didn't catch and log.
struct RestoreTaskFuncBase : TaskFuncBase {
virtual Future<Void> handleError(Database cx, Reference<Task> task, Error const &error) {
return RestoreConfig(task).logError(cx, error, format("'%s' on '%s'", error.what(), task->params[Task::reservedTaskParamKeyType].printable().c_str()));
}
virtual std::string toString(Reference<Task> task)
{
return "";
}
};
Future<Void> handleError(Database cx, Reference<Task> task, Error const& error) final {
return RestoreConfig(task).logError(cx, error, format("'%s' on '%s'", error.what(), task->params[Task::reservedTaskParamKeyType].printable().c_str()));
}
virtual std::string toString(Reference<Task> task) const { return ""; }
};
struct BackupTaskFuncBase : TaskFuncBase {
virtual Future<Void> handleError(Database cx, Reference<Task> task, Error const &error) {
return BackupConfig(task).logError(cx, error, format("'%s' on '%s'", error.what(), task->params[Task::reservedTaskParamKeyType].printable().c_str()));
}
virtual std::string toString(Reference<Task> task)
{
return "";
}
};
Future<Void> handleError(Database cx, Reference<Task> task, Error const& error) final {
return BackupConfig(task).logError(cx, error, format("'%s' on '%s'", error.what(), task->params[Task::reservedTaskParamKeyType].printable().c_str()));
}
virtual std::string toString(Reference<Task> task) const { return ""; }
};
ACTOR static Future<Standalone<VectorRef<KeyRef>>> getBlockOfShards(Reference<ReadYourWritesTransaction> tr, Key beginKey, Key endKey, int limit) {
@ -976,9 +968,9 @@ namespace fileBackup {
struct BackupRangeTaskFunc : BackupTaskFuncBase {
static StringRef name;
static const uint32_t version;
static constexpr uint32_t version = 1;
static struct {
static struct {
static TaskParam<Key> beginKey() {
return LiteralStringRef(__FUNCTION__);
}
@ -990,15 +982,15 @@ namespace fileBackup {
}
} Params;
std::string toString(Reference<Task> task) {
return format("beginKey '%s' endKey '%s' addTasks %d",
std::string toString(Reference<Task> task) const override {
return format("beginKey '%s' endKey '%s' addTasks %d",
Params.beginKey().get(task).printable().c_str(),
Params.endKey().get(task).printable().c_str(),
Params.addBackupRangeTasks().get(task)
);
}
}
StringRef getName() const { return name; };
StringRef getName() const { return name; };
Future<Void> execute(Database cx, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return _execute(cx, tb, fb, task); };
Future<Void> finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return _finish(tr, tb, fb, task); };
@ -1271,14 +1263,13 @@ namespace fileBackup {
};
StringRef BackupRangeTaskFunc::name = LiteralStringRef("file_backup_write_range_5.2");
const uint32_t BackupRangeTaskFunc::version = 1;
REGISTER_TASKFUNC(BackupRangeTaskFunc);
struct BackupSnapshotDispatchTask : BackupTaskFuncBase {
static StringRef name;
static const uint32_t version;
static constexpr uint32_t version = 1;
static struct {
static struct {
// Set by Execute, used by Finish
static TaskParam<int64_t> shardsBehind() {
return LiteralStringRef(__FUNCTION__);
@ -1792,14 +1783,13 @@ namespace fileBackup {
};
StringRef BackupSnapshotDispatchTask::name = LiteralStringRef("file_backup_dispatch_ranges_5.2");
const uint32_t BackupSnapshotDispatchTask::version = 1;
REGISTER_TASKFUNC(BackupSnapshotDispatchTask);
struct BackupLogRangeTaskFunc : BackupTaskFuncBase {
static StringRef name;
static const uint32_t version;
static constexpr uint32_t version = 1;
static struct {
static struct {
static TaskParam<bool> addBackupLogRangeTasks() {
return LiteralStringRef(__FUNCTION__);
}
@ -1994,14 +1984,13 @@ namespace fileBackup {
};
StringRef BackupLogRangeTaskFunc::name = LiteralStringRef("file_backup_write_logs_5.2");
const uint32_t BackupLogRangeTaskFunc::version = 1;
REGISTER_TASKFUNC(BackupLogRangeTaskFunc);
//This task stopped being used in 6.2, however the code remains here to handle upgrades.
struct EraseLogRangeTaskFunc : BackupTaskFuncBase {
static StringRef name;
static const uint32_t version;
StringRef getName() const { return name; };
static constexpr uint32_t version = 1;
StringRef getName() const { return name; };
static struct {
static TaskParam<Version> beginVersion() {
@ -2051,16 +2040,15 @@ namespace fileBackup {
Future<Void> finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return _finish(tr, tb, fb, task); };
};
StringRef EraseLogRangeTaskFunc::name = LiteralStringRef("file_backup_erase_logs_5.2");
const uint32_t EraseLogRangeTaskFunc::version = 1;
REGISTER_TASKFUNC(EraseLogRangeTaskFunc);
struct BackupLogsDispatchTask : BackupTaskFuncBase {
static StringRef name;
static const uint32_t version;
static constexpr uint32_t version = 1;
static struct {
static struct {
static TaskParam<Version> prevBeginVersion() {
return LiteralStringRef(__FUNCTION__);
}
@ -2179,14 +2167,13 @@ namespace fileBackup {
Future<Void> finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return _finish(tr, tb, fb, task); };
};
StringRef BackupLogsDispatchTask::name = LiteralStringRef("file_backup_dispatch_logs_5.2");
const uint32_t BackupLogsDispatchTask::version = 1;
REGISTER_TASKFUNC(BackupLogsDispatchTask);
struct FileBackupFinishedTask : BackupTaskFuncBase {
static StringRef name;
static const uint32_t version;
static constexpr uint32_t version = 1;
StringRef getName() const { return name; };
StringRef getName() const { return name; };
ACTOR static Future<Void> _finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
wait(checkTaskVersion(tr->getDatabase(), task, FileBackupFinishedTask::name, FileBackupFinishedTask::version));
@ -2220,13 +2207,12 @@ namespace fileBackup {
Future<Void> finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return _finish(tr, tb, fb, task); };
};
StringRef FileBackupFinishedTask::name = LiteralStringRef("file_backup_finished_5.2");
const uint32_t FileBackupFinishedTask::version = 1;
REGISTER_TASKFUNC(FileBackupFinishedTask);
struct BackupSnapshotManifest : BackupTaskFuncBase {
static StringRef name;
static const uint32_t version;
static struct {
static constexpr uint32_t version = 1;
static struct {
static TaskParam<Version> endVersion() { return LiteralStringRef(__FUNCTION__); }
} Params;
@ -2381,7 +2367,6 @@ namespace fileBackup {
Future<Void> finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return _finish(tr, tb, fb, task); };
};
StringRef BackupSnapshotManifest::name = LiteralStringRef("file_backup_write_snapshot_manifest_5.2");
const uint32_t BackupSnapshotManifest::version = 1;
REGISTER_TASKFUNC(BackupSnapshotManifest);
Future<Key> BackupSnapshotDispatchTask::addSnapshotManifestTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> parentTask, TaskCompletionKey completionKey, Reference<TaskFuture> waitFor) {
@ -2390,9 +2375,9 @@ namespace fileBackup {
struct StartFullBackupTaskFunc : BackupTaskFuncBase {
static StringRef name;
static const uint32_t version;
static constexpr uint32_t version = 1;
static struct {
static struct {
static TaskParam<Version> beginVersion() { return LiteralStringRef(__FUNCTION__); }
} Params;
@ -2534,7 +2519,6 @@ namespace fileBackup {
Future<Void> finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return _finish(tr, tb, fb, task); };
};
StringRef StartFullBackupTaskFunc::name = LiteralStringRef("file_backup_start_5.2");
const uint32_t StartFullBackupTaskFunc::version = 1;
REGISTER_TASKFUNC(StartFullBackupTaskFunc);
struct RestoreCompleteTaskFunc : RestoreTaskFuncBase {
@ -2577,15 +2561,14 @@ namespace fileBackup {
}
static StringRef name;
static const uint32_t version;
StringRef getName() const { return name; };
static constexpr uint32_t version = 1;
StringRef getName() const { return name; };
Future<Void> execute(Database cx, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return Void(); };
Future<Void> finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return _finish(tr, tb, fb, task); };
};
StringRef RestoreCompleteTaskFunc::name = LiteralStringRef("restore_complete");
const uint32_t RestoreCompleteTaskFunc::version = 1;
REGISTER_TASKFUNC(RestoreCompleteTaskFunc);
struct RestoreFileTaskFuncBase : RestoreTaskFuncBase {
@ -2595,13 +2578,13 @@ namespace fileBackup {
static TaskParam<int64_t> readLen() { return LiteralStringRef(__FUNCTION__); }
} Params;
std::string toString(Reference<Task> task) {
return format("fileName '%s' readLen %lld readOffset %lld",
std::string toString(Reference<Task> task) const override {
return format("fileName '%s' readLen %lld readOffset %lld",
Params.inputFile().get(task).fileName.c_str(),
Params.readLen().get(task),
Params.readOffset().get(task));
}
};
}
};
struct RestoreRangeTaskFunc : RestoreFileTaskFuncBase {
static struct : InputParams {
@ -2622,14 +2605,14 @@ namespace fileBackup {
}
} Params;
std::string toString(Reference<Task> task) {
std::string returnStr = RestoreFileTaskFuncBase::toString(task);
std::string toString(Reference<Task> task) const override {
std::string returnStr = RestoreFileTaskFuncBase::toString(task);
for(auto &range : Params.getOriginalFileRanges(task))
returnStr += format(" originalFileRange '%s'", printable(range).c_str());
return returnStr;
}
}
ACTOR static Future<Void> _execute(Database cx, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
ACTOR static Future<Void> _execute(Database cx, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
state RestoreConfig restore(task);
state RestoreFile rangeFile = Params.inputFile().get(task);
@ -2841,20 +2824,19 @@ namespace fileBackup {
}
static StringRef name;
static const uint32_t version;
StringRef getName() const { return name; };
static constexpr uint32_t version = 1;
StringRef getName() const { return name; };
Future<Void> execute(Database cx, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return _execute(cx, tb, fb, task); };
Future<Void> finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return _finish(tr, tb, fb, task); };
};
StringRef RestoreRangeTaskFunc::name = LiteralStringRef("restore_range_data");
const uint32_t RestoreRangeTaskFunc::version = 1;
REGISTER_TASKFUNC(RestoreRangeTaskFunc);
struct RestoreLogDataTaskFunc : RestoreFileTaskFuncBase {
static StringRef name;
static const uint32_t version;
StringRef getName() const { return name; };
static constexpr uint32_t version = 1;
StringRef getName() const { return name; };
static struct : InputParams {
} Params;
@ -2996,13 +2978,12 @@ namespace fileBackup {
Future<Void> finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return _finish(tr, tb, fb, task); };
};
StringRef RestoreLogDataTaskFunc::name = LiteralStringRef("restore_log_data");
const uint32_t RestoreLogDataTaskFunc::version = 1;
REGISTER_TASKFUNC(RestoreLogDataTaskFunc);
struct RestoreDispatchTaskFunc : RestoreTaskFuncBase {
static StringRef name;
static const uint32_t version;
StringRef getName() const { return name; };
static constexpr uint32_t version = 1;
StringRef getName() const { return name; };
static struct {
static TaskParam<Version> beginVersion() { return LiteralStringRef(__FUNCTION__); }
@ -3309,7 +3290,6 @@ namespace fileBackup {
Future<Void> finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return _finish(tr, tb, fb, task); };
};
StringRef RestoreDispatchTaskFunc::name = LiteralStringRef("restore_dispatch");
const uint32_t RestoreDispatchTaskFunc::version = 1;
REGISTER_TASKFUNC(RestoreDispatchTaskFunc);
ACTOR Future<std::string> restoreStatus(Reference<ReadYourWritesTransaction> tr, Key tagName) {
@ -3403,9 +3383,9 @@ namespace fileBackup {
struct StartFullRestoreTaskFunc : RestoreTaskFuncBase {
static StringRef name;
static const uint32_t version;
static constexpr uint32_t version = 1;
static struct {
static struct {
static TaskParam<Version> firstVersion() { return LiteralStringRef(__FUNCTION__); }
} Params;
@ -3598,7 +3578,6 @@ namespace fileBackup {
Future<Void> finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return _finish(tr, tb, fb, task); };
};
StringRef StartFullRestoreTaskFunc::name = LiteralStringRef("restore_start");
const uint32_t StartFullRestoreTaskFunc::version = 1;
REGISTER_TASKFUNC(StartFullRestoreTaskFunc);
}
@ -3614,7 +3593,7 @@ struct LogInfo : public ReferenceCounted<LogInfo> {
class FileBackupAgentImpl {
public:
static const int MAX_RESTORABLE_FILE_METASECTION_BYTES = 1024 * 8;
static constexpr int MAX_RESTORABLE_FILE_METASECTION_BYTES = 1024 * 8;
// Parallel restore
ACTOR static Future<Void> parallelRestoreFinish(Database cx, UID randomUID, bool unlockDB = true) {
@ -5042,4 +5021,4 @@ void simulateBlobFailure() {
throw lookup_failed();
}
}
}
}

View File

@ -1096,54 +1096,48 @@ ACTOR Future<CoordinatorsResult> changeQuorum(Database cx, Reference<IQuorumChan
}
}
struct SpecifiedQuorumChange : IQuorumChange {
struct SpecifiedQuorumChange final : IQuorumChange {
vector<NetworkAddress> desired;
explicit SpecifiedQuorumChange( vector<NetworkAddress> const& desired ) : desired(desired) {}
virtual Future<vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
vector<NetworkAddress> oldCoordinators,
Reference<ClusterConnectionFile>,
CoordinatorsResult&) {
Future<vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr, vector<NetworkAddress> oldCoordinators,
Reference<ClusterConnectionFile>,
CoordinatorsResult&) override {
return desired;
}
};
Reference<IQuorumChange> specifiedQuorumChange(vector<NetworkAddress> const& addresses) { return Reference<IQuorumChange>(new SpecifiedQuorumChange(addresses)); }
struct NoQuorumChange : IQuorumChange {
virtual Future<vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
vector<NetworkAddress> oldCoordinators,
Reference<ClusterConnectionFile>,
CoordinatorsResult&) {
struct NoQuorumChange final : IQuorumChange {
Future<vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr, vector<NetworkAddress> oldCoordinators,
Reference<ClusterConnectionFile>,
CoordinatorsResult&) override {
return oldCoordinators;
}
};
Reference<IQuorumChange> noQuorumChange() { return Reference<IQuorumChange>(new NoQuorumChange); }
struct NameQuorumChange : IQuorumChange {
struct NameQuorumChange final : IQuorumChange {
std::string newName;
Reference<IQuorumChange> otherChange;
explicit NameQuorumChange( std::string const& newName, Reference<IQuorumChange> const& otherChange ) : newName(newName), otherChange(otherChange) {}
virtual Future<vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
vector<NetworkAddress> oldCoordinators,
Reference<ClusterConnectionFile> cf,
CoordinatorsResult& t) {
Future<vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr, vector<NetworkAddress> oldCoordinators,
Reference<ClusterConnectionFile> cf,
CoordinatorsResult& t) override {
return otherChange->getDesiredCoordinators(tr, oldCoordinators, cf, t);
}
virtual std::string getDesiredClusterKeyName() {
return newName;
}
std::string getDesiredClusterKeyName() const override { return newName; }
};
Reference<IQuorumChange> nameQuorumChange(std::string const& name, Reference<IQuorumChange> const& other) {
return Reference<IQuorumChange>(new NameQuorumChange( name, other ));
}
struct AutoQuorumChange : IQuorumChange {
struct AutoQuorumChange final : IQuorumChange {
int desired;
explicit AutoQuorumChange( int desired ) : desired(desired) {}
virtual Future<vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr,
vector<NetworkAddress> oldCoordinators,
Reference<ClusterConnectionFile> ccf,
CoordinatorsResult& err) {
Future<vector<NetworkAddress>> getDesiredCoordinators(Transaction* tr, vector<NetworkAddress> oldCoordinators,
Reference<ClusterConnectionFile> ccf,
CoordinatorsResult& err) override {
return getDesired( this, tr, oldCoordinators, ccf, &err );
}

View File

@ -140,7 +140,7 @@ struct IQuorumChange : ReferenceCounted<IQuorumChange> {
vector<NetworkAddress> oldCoordinators,
Reference<ClusterConnectionFile>,
CoordinatorsResult&) = 0;
virtual std::string getDesiredClusterKeyName() { return std::string(); }
virtual std::string getDesiredClusterKeyName() const { return std::string(); }
};
// Change to use the given set of coordination servers

View File

@ -24,8 +24,8 @@
#include "flow/ThreadHelper.actor.h"
template<class T>
class AbortableSingleAssignmentVar : public ThreadSingleAssignmentVar<T>, public ThreadCallback {
template <class T>
class AbortableSingleAssignmentVar final : public ThreadSingleAssignmentVar<T>, public ThreadCallback {
public:
AbortableSingleAssignmentVar(ThreadFuture<T> future, ThreadFuture<Void> abortSignal) : future(future), abortSignal(abortSignal), hasBeenSet(false), callbacksCleared(false) {
int userParam;
@ -36,21 +36,21 @@ public:
// abortSignal comes first, because otherwise future could immediately call fire/error and attempt to remove this callback from abortSignal prematurely
abortSignal.callOrSetAsCallback(this, userParam, 0);
future.callOrSetAsCallback(this, userParam, 0);
}
}
virtual void cancel() {
void cancel() override {
cancelCallbacks();
ThreadSingleAssignmentVar<T>::cancel();
}
virtual void cleanupUnsafe() {
void cleanupUnsafe() override {
future.getPtr()->releaseMemory();
ThreadSingleAssignmentVar<T>::cleanupUnsafe();
}
bool canFire(int notMadeActive) { return true; }
bool canFire(int notMadeActive) const override { return true; }
void fire(const Void &unused, int& userParam) {
void fire(const Void& unused, int& userParam) override {
lock.enter();
if(!hasBeenSet) {
hasBeenSet = true;
@ -74,7 +74,7 @@ public:
ThreadSingleAssignmentVar<T>::delref();
}
void error(const Error& e, int& userParam) {
void error(const Error& e, int& userParam) override {
ASSERT(future.isError());
lock.enter();
if(!hasBeenSet) {
@ -124,8 +124,8 @@ ThreadFuture<T> abortableFuture(ThreadFuture<T> f, ThreadFuture<Void> abortSigna
return ThreadFuture<T>(new AbortableSingleAssignmentVar<T>(f, abortSignal));
}
template<class T>
class DLThreadSingleAssignmentVar : public ThreadSingleAssignmentVar<T> {
template <class T>
class DLThreadSingleAssignmentVar final : public ThreadSingleAssignmentVar<T> {
public:
DLThreadSingleAssignmentVar(Reference<FdbCApi> api, FdbCApi::FDBFuture *f, std::function<T(FdbCApi::FDBFuture*, FdbCApi*)> extractValue) : api(api), f(f), extractValue(extractValue), futureRefCount(1) {
ThreadSingleAssignmentVar<T>::addref();
@ -169,7 +169,7 @@ public:
return destroyNow;
}
virtual void cancel() {
void cancel() override {
if(addFutureRef()) {
api->futureCancel(f);
delFutureRef();
@ -178,7 +178,7 @@ public:
ThreadSingleAssignmentVar<T>::cancel();
}
virtual void cleanupUnsafe() {
void cleanupUnsafe() override {
delFutureRef();
ThreadSingleAssignmentVar<T>::cleanupUnsafe();
}
@ -223,8 +223,8 @@ ThreadFuture<T> toThreadFuture(Reference<FdbCApi> api, FdbCApi::FDBFuture *f, st
return ThreadFuture<T>(new DLThreadSingleAssignmentVar<T>(api, f, extractValue));
}
template<class S, class T>
class MapSingleAssignmentVar : public ThreadSingleAssignmentVar<T>, ThreadCallback {
template <class S, class T>
class MapSingleAssignmentVar final : public ThreadSingleAssignmentVar<T>, ThreadCallback {
public:
MapSingleAssignmentVar(ThreadFuture<S> source, std::function<ErrorOr<T>(ErrorOr<S>)> mapValue) : source(source), mapValue(mapValue) {
ThreadSingleAssignmentVar<T>::addref();
@ -233,25 +233,25 @@ public:
source.callOrSetAsCallback(this, userParam, 0);
}
virtual void cancel() {
void cancel() override {
source.getPtr()->addref(); // Cancel will delref our future, but we don't want to destroy it until this callback gets destroyed
source.getPtr()->cancel();
ThreadSingleAssignmentVar<T>::cancel();
}
virtual void cleanupUnsafe() {
void cleanupUnsafe() override {
source.getPtr()->releaseMemory();
ThreadSingleAssignmentVar<T>::cleanupUnsafe();
}
bool canFire(int notMadeActive) { return true; }
bool canFire(int notMadeActive) const override { return true; }
void fire(const Void &unused, int& userParam) {
void fire(const Void& unused, int& userParam) override {
sendResult(mapValue(source.get()));
ThreadSingleAssignmentVar<T>::delref();
}
void error(const Error& e, int& userParam) {
void error(const Error& e, int& userParam) override {
sendResult(mapValue(source.getError()));
ThreadSingleAssignmentVar<T>::delref();
}
@ -275,8 +275,8 @@ ThreadFuture<T> mapThreadFuture(ThreadFuture<S> source, std::function<ErrorOr<T>
return ThreadFuture<T>(new MapSingleAssignmentVar<S, T>(source, mapValue));
}
template<class S, class T>
class FlatMapSingleAssignmentVar : public ThreadSingleAssignmentVar<T>, ThreadCallback {
template <class S, class T>
class FlatMapSingleAssignmentVar final : public ThreadSingleAssignmentVar<T>, ThreadCallback {
public:
FlatMapSingleAssignmentVar(ThreadFuture<S> source, std::function<ErrorOr<ThreadFuture<T>>(ErrorOr<S>)> mapValue) : source(source), mapValue(mapValue), cancelled(false), released(false) {
ThreadSingleAssignmentVar<T>::addref();
@ -285,7 +285,7 @@ public:
source.callOrSetAsCallback(this, userParam, 0);
}
virtual void cancel() {
void cancel() override {
source.getPtr()->addref(); // Cancel will delref our future, but we don't want to destroy it until this callback gets destroyed
source.getPtr()->cancel();
@ -302,8 +302,8 @@ public:
ThreadSingleAssignmentVar<T>::cancel();
}
virtual void cleanupUnsafe() {
void cleanupUnsafe() override {
source.getPtr()->releaseMemory();
lock.enter();
@ -319,9 +319,9 @@ public:
ThreadSingleAssignmentVar<T>::cleanupUnsafe();
}
bool canFire(int notMadeActive) { return true; }
bool canFire(int notMadeActive) const override { return true; }
void fire(const Void &unused, int& userParam) {
void fire(const Void& unused, int& userParam) override {
if(mappedFuture.isValid()) {
sendResult(mappedFuture.get());
}
@ -332,7 +332,7 @@ public:
ThreadSingleAssignmentVar<T>::delref();
}
void error(const Error& e, int& userParam) {
void error(const Error& e, int& userParam) override {
if(mappedFuture.isValid()) {
sendResult(mappedFuture.getError());
}

View File

@ -1464,18 +1464,18 @@ TEST_CASE("/fdbclient/multiversionclient/EnvironmentVariableParsing" ) {
return Void();
}
class ValidateFuture : public ThreadCallback {
class ValidateFuture final : public ThreadCallback {
public:
ValidateFuture(ThreadFuture<int> f, ErrorOr<int> expectedValue, std::set<int> legalErrors) : f(f), expectedValue(expectedValue), legalErrors(legalErrors) { }
virtual bool canFire(int notMadeActive) { return true; }
bool canFire(int notMadeActive) const override { return true; }
virtual void fire(const Void &unused, int& userParam) {
void fire(const Void& unused, int& userParam) override {
ASSERT(!f.isError() && !expectedValue.isError() && f.get() == expectedValue.get());
delete this;
}
virtual void error(const Error& e, int& userParam) {
void error(const Error& e, int& userParam) override {
ASSERT(legalErrors.count(e.code()) > 0 || (f.isError() && expectedValue.isError() && f.getError().code() == expectedValue.getError().code()));
delete this;
}
@ -1721,17 +1721,17 @@ TEST_CASE("/fdbclient/multiversionclient/AbortableSingleAssignmentVar" ) {
return Void();
}
class CAPICallback : public ThreadCallback {
class CAPICallback final : public ThreadCallback {
public:
CAPICallback(void (*callbackf)(FdbCApi::FDBFuture*, void*), FdbCApi::FDBFuture* f, void* userdata)
: callbackf(callbackf), f(f), userdata(userdata) {}
virtual bool canFire(int notMadeActive) { return true; }
virtual void fire(const Void& unused, int& userParam) {
bool canFire(int notMadeActive) const override { return true; }
void fire(const Void& unused, int& userParam) override {
(*callbackf)(f, userdata);
delete this;
}
virtual void error(const Error& e, int& userParam) {
void error(const Error& e, int& userParam) override {
(*callbackf)(f, userdata);
delete this;
}

View File

@ -309,7 +309,7 @@ struct ClientInfo : ThreadSafeReferenceCounted<ClientInfo> {
class MultiVersionApi;
class MultiVersionDatabase : public IDatabase, ThreadSafeReferenceCounted<MultiVersionDatabase> {
class MultiVersionDatabase final : public IDatabase, ThreadSafeReferenceCounted<MultiVersionDatabase> {
public:
MultiVersionDatabase(MultiVersionApi *api, std::string clusterFilePath, Reference<IDatabase> db, bool openConnectors=true);
~MultiVersionDatabase();
@ -331,9 +331,9 @@ private:
void connect();
void cancel();
bool canFire(int notMadeActive) { return true; }
void fire(const Void &unused, int& userParam);
void error(const Error& e, int& userParam);
bool canFire(int notMadeActive) const override { return true; }
void fire(const Void& unused, int& userParam) override;
void error(const Error& e, int& userParam) override;
const Reference<ClientInfo> client;
const std::string clusterFilePath;

View File

@ -946,6 +946,10 @@ DatabaseContext::DatabaseContext(Reference<AsyncVar<Reference<ClusterConnectionF
std::make_unique<ProcessClassSourceRangeImpl>(
KeyRangeRef(LiteralStringRef("process/class_source/"), LiteralStringRef("process/class_source0"))
.withPrefix(SpecialKeySpace::getModuleRange(SpecialKeySpace::MODULE::CONFIGURATION).begin)));
registerSpecialKeySpaceModule(
SpecialKeySpace::MODULE::MANAGEMENT, SpecialKeySpace::IMPLTYPE::READWRITE,
std::make_unique<LockDatabaseImpl>(singleKeyRange(LiteralStringRef("dbLocked"))
.withPrefix(SpecialKeySpace::getModuleRange(SpecialKeySpace::MODULE::MANAGEMENT).begin)));
}
if (apiVersionAtLeast(630)) {
registerSpecialKeySpaceModule(SpecialKeySpace::MODULE::TRANSACTION, SpecialKeySpace::IMPLTYPE::READONLY,
@ -2695,7 +2699,7 @@ void debugAddTags(Transaction *tr) {
Transaction::Transaction(Database const& cx)
: cx(cx), info(cx->taskID, deterministicRandom()->randomUniqueID()), backoff(CLIENT_KNOBS->DEFAULT_BACKOFF),
committedVersion(invalidVersion), versionstampPromise(Promise<Standalone<StringRef>>()), options(cx), numErrors(0),
trLogInfo(createTrLogInfoProbabilistically(cx)), span(info.spanID, "Transaction"_loc) {
trLogInfo(createTrLogInfoProbabilistically(cx)), tr(info.spanID), span(info.spanID, "Transaction"_loc) {
if (DatabaseContext::debugUseTags) {
debugAddTags(this);
}

View File

@ -45,7 +45,8 @@ std::unordered_map<std::string, KeyRange> SpecialKeySpace::managementApiCommandT
{ "exclude", KeyRangeRef(LiteralStringRef("excluded/"), LiteralStringRef("excluded0"))
.withPrefix(moduleToBoundary[MODULE::MANAGEMENT].begin) },
{ "failed", KeyRangeRef(LiteralStringRef("failed/"), LiteralStringRef("failed0"))
.withPrefix(moduleToBoundary[MODULE::MANAGEMENT].begin) }
.withPrefix(moduleToBoundary[MODULE::MANAGEMENT].begin) },
{ "lock", singleKeyRange(LiteralStringRef("dbLocked")).withPrefix(moduleToBoundary[MODULE::MANAGEMENT].begin) }
};
std::set<std::string> SpecialKeySpace::options = { "excluded/force", "failed/force" };
@ -1078,19 +1079,19 @@ Future<Optional<std::string>> ProcessClassRangeImpl::commit(ReadYourWritesTransa
return processClassCommitActor(ryw, getKeyRange());
}
void throwNotAllowedError(ReadYourWritesTransaction* ryw) {
auto msg = ManagementAPIError::toJsonString(false, "setclass",
"Clear operation is meaningless thus forbidden for setclass");
void throwSpecialKeyApiFailure(ReadYourWritesTransaction* ryw, std::string command, std::string message) {
auto msg = ManagementAPIError::toJsonString(false, command, message);
ryw->setSpecialKeySpaceErrorMsg(msg);
throw special_keys_api_failure();
}
void ProcessClassRangeImpl::clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) {
return throwNotAllowedError(ryw);
return throwSpecialKeyApiFailure(ryw, "setclass", "Clear operation is meaningless thus forbidden for setclass");
}
void ProcessClassRangeImpl::clear(ReadYourWritesTransaction* ryw, const KeyRef& key) {
return throwNotAllowedError(ryw);
return throwSpecialKeyApiFailure(ryw, "setclass",
"Clear range operation is meaningless thus forbidden for setclass");
}
ACTOR Future<Standalone<RangeResultRef>> getProcessClassSourceActor(ReadYourWritesTransaction* ryw, KeyRef prefix,
@ -1120,4 +1121,74 @@ ProcessClassSourceRangeImpl::ProcessClassSourceRangeImpl(KeyRangeRef kr) : Speci
Future<Standalone<RangeResultRef>> ProcessClassSourceRangeImpl::getRange(ReadYourWritesTransaction* ryw,
KeyRangeRef kr) const {
return getProcessClassSourceActor(ryw, getKeyRange().begin, kr);
}
}
ACTOR Future<Standalone<RangeResultRef>> getLockedKeyActor(ReadYourWritesTransaction* ryw,
KeyRangeRef kr) {
ryw->getTransaction().setOption(FDBTransactionOptions::LOCK_AWARE);
Optional<Value> val = wait(ryw->getTransaction().get(databaseLockedKey));
Standalone<RangeResultRef> result;
if (val.present()) {
result.push_back_deep(result.arena(), KeyValueRef(kr.begin, val.get()));
}
return result;
}
LockDatabaseImpl::LockDatabaseImpl(KeyRangeRef kr) : SpecialKeyRangeRWImpl(kr) {}
Future<Standalone<RangeResultRef>> LockDatabaseImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const {
// sigle key range, the queried range should always be the same as the underlying range
ASSERT(kr == getKeyRange());
auto lockEntry = ryw->getSpecialKeySpaceWriteMap()[SpecialKeySpace::getManagementApiCommandPrefix("lock")];
if (!ryw->readYourWritesDisabled() && lockEntry.first) {
// ryw enabled and we have written to the special key
Standalone<RangeResultRef> result;
if (lockEntry.second.present()) {
result.push_back_deep(result.arena(), KeyValueRef(kr.begin, lockEntry.second.get()));
}
return result;
} else {
return getLockedKeyActor(ryw, kr);
}
}
ACTOR Future<Optional<std::string>> lockDatabaseCommitActor(ReadYourWritesTransaction* ryw) {
state Optional<std::string> msg;
ryw->getTransaction().setOption(FDBTransactionOptions::LOCK_AWARE);
Optional<Value> val = wait(ryw->getTransaction().get(databaseLockedKey));
UID uid = deterministicRandom()->randomUniqueID();
if (val.present() && BinaryReader::fromStringRef<UID>(val.get().substr(10), Unversioned()) != uid) {
// check database not locked
// if locked already, throw error
msg = ManagementAPIError::toJsonString(false, "lock", "Database has already been locked");
} else if (!val.present()) {
// lock database
ryw->getTransaction().atomicOp(databaseLockedKey,
BinaryWriter::toValue(uid, Unversioned())
.withPrefix(LiteralStringRef("0123456789"))
.withSuffix(LiteralStringRef("\x00\x00\x00\x00")),
MutationRef::SetVersionstampedValue);
ryw->getTransaction().addWriteConflictRange(normalKeys);
}
return msg;
}
ACTOR Future<Optional<std::string>> unlockDatabaseCommitActor(ReadYourWritesTransaction* ryw) {
ryw->getTransaction().setOption(FDBTransactionOptions::LOCK_AWARE);
Optional<Value> val = wait(ryw->getTransaction().get(databaseLockedKey));
if (val.present()) {
ryw->getTransaction().clear(singleKeyRange(databaseLockedKey));
}
return Optional<std::string>();
}
Future<Optional<std::string>> LockDatabaseImpl::commit(ReadYourWritesTransaction* ryw) {
auto lockId = ryw->getSpecialKeySpaceWriteMap()[SpecialKeySpace::getManagementApiCommandPrefix("lock")].second;
if (lockId.present()) {
return lockDatabaseCommitActor(ryw);
} else {
return unlockDatabaseCommitActor(ryw);
}
}

View File

@ -305,5 +305,12 @@ public:
Future<Standalone<RangeResultRef>> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override;
};
class LockDatabaseImpl : public SpecialKeyRangeRWImpl {
public:
explicit LockDatabaseImpl(KeyRangeRef kr);
Future<Standalone<RangeResultRef>> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override;
Future<Optional<std::string>> commit(ReadYourWritesTransaction* ryw) override;
};
#include "flow/unactorcompiler.h"
#endif

View File

@ -69,7 +69,7 @@ REGISTER_TASKFUNC(AddTaskFunc);
struct IdleTaskFunc : TaskFuncBase {
static StringRef name;
static const uint32_t version = 1;
static constexpr uint32_t version = 1;
StringRef getName() const { return name; };
Future<Void> execute(Database cx, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return Void(); };

View File

@ -256,7 +256,7 @@ public:
return pauseKey;
}
Subspace getAvailableSpace(int priority = 0) {
Subspace getAvailableSpace(int priority = 0) const {
if(priority == 0)
return available;
return available_prioritized.get(priority);

View File

@ -112,8 +112,8 @@ public:
return statdata.st_mtime;
}
virtual void addref() { ReferenceCounted<AsyncFileEIO>::addref(); }
virtual void delref() { ReferenceCounted<AsyncFileEIO>::delref(); }
void addref() override { ReferenceCounted<AsyncFileEIO>::addref(); }
void delref() override { ReferenceCounted<AsyncFileEIO>::delref(); }
int64_t debugFD() const override { return fd; }

View File

@ -80,12 +80,8 @@ public:
}
}
virtual void addref() {
ReferenceCounted<AsyncFileDetachable>::addref();
}
virtual void delref() {
ReferenceCounted<AsyncFileDetachable>::delref();
}
void addref() override { ReferenceCounted<AsyncFileDetachable>::addref(); }
void delref() override { ReferenceCounted<AsyncFileDetachable>::delref(); }
Future<int> read(void* data, int length, int64_t offset) override {
if( !file.getPtr() || g_simulator.getCurrentProcess()->shutdownSignal.getFuture().isReady() )
@ -249,10 +245,8 @@ public:
//TraceEvent("AsyncFileNonDurable_Destroy", id).detail("Filename", filename);
}
virtual void addref() {
ReferenceCounted<AsyncFileNonDurable>::addref();
}
virtual void delref() {
void addref() override { ReferenceCounted<AsyncFileNonDurable>::addref(); }
void delref() override {
if(delref_no_destroy()) {
ASSERT(filesBeingDeleted.count(filename) == 0);
//TraceEvent("AsyncFileNonDurable_StartDelete", id).detail("Filename", filename);

View File

@ -32,10 +32,10 @@
#include "flow/actorcompiler.h" // This must be the last #include.
// Read-only file type that wraps another file instance, reads in large blocks, and reads ahead of the actual range requested
class AsyncFileReadAheadCache : public IAsyncFile, public ReferenceCounted<AsyncFileReadAheadCache> {
class AsyncFileReadAheadCache final : public IAsyncFile, public ReferenceCounted<AsyncFileReadAheadCache> {
public:
virtual void addref() { ReferenceCounted<AsyncFileReadAheadCache>::addref(); }
virtual void delref() { ReferenceCounted<AsyncFileReadAheadCache>::delref(); }
void addref() override { ReferenceCounted<AsyncFileReadAheadCache>::addref(); }
void delref() override { ReferenceCounted<AsyncFileReadAheadCache>::delref(); }
struct CacheBlock : ReferenceCounted<CacheBlock> {
CacheBlock(int size = 0) : data(new uint8_t[size]), len(size) {}
@ -177,7 +177,7 @@ public:
std::string getFilename() const override { return m_f->getFilename(); }
virtual ~AsyncFileReadAheadCache() {
~AsyncFileReadAheadCache() {
for(auto &it : m_blocks) {
it.second.cancel();
}
@ -196,7 +196,6 @@ public:
: m_f(f), m_block_size(blockSize), m_read_ahead_blocks(readAheadBlocks), m_max_concurrent_reads(maxConcurrentReads),
m_cache_block_limit(std::max<int>(1, cacheSizeBlocks)) {
}
};
#include "flow/unactorcompiler.h"

View File

@ -76,13 +76,25 @@ TEST_CASE("/flow/buggifiedDelay") {
}
template <class T, class Func, class ErrFunc, class CallbackType>
class LambdaCallback : public CallbackType, public FastAllocated<LambdaCallback<T,Func,ErrFunc,CallbackType>> {
class LambdaCallback final : public CallbackType, public FastAllocated<LambdaCallback<T, Func, ErrFunc, CallbackType>> {
Func func;
ErrFunc errFunc;
virtual void fire(T const& t) { CallbackType::remove(); func(t); delete this; }
virtual void fire(T && t) { CallbackType::remove(); func(std::move(t)); delete this; }
virtual void error(Error e) { CallbackType::remove(); errFunc(e); delete this; }
void fire(T const& t) override {
CallbackType::remove();
func(t);
delete this;
}
void fire(T&& t) {
CallbackType::remove();
func(std::move(t));
delete this;
}
void error(Error e) override {
CallbackType::remove();
errFunc(e);
delete this;
}
public:
LambdaCallback(Func&& f, ErrFunc&& e) : func(std::move(f)), errFunc(std::move(e)) {}
@ -193,14 +205,14 @@ ACTOR static Future<Void> testHygeine() {
//bool expectActorCount(int x) { return actorCount == x; }
bool expectActorCount(int) { return true; }
struct YieldMockNetwork : INetwork, ReferenceCounted<YieldMockNetwork> {
struct YieldMockNetwork final : INetwork, ReferenceCounted<YieldMockNetwork> {
int ticks;
Promise<Void> nextTick;
int nextYield;
INetwork* baseNetwork;
virtual flowGlobalType global(int id) const override { return baseNetwork->global(id); }
virtual void setGlobal(size_t id, flowGlobalType v) override {
flowGlobalType global(int id) const override { return baseNetwork->global(id); }
void setGlobal(size_t id, flowGlobalType v) override {
baseNetwork->setGlobal(id, v);
return;
}
@ -220,35 +232,35 @@ struct YieldMockNetwork : INetwork, ReferenceCounted<YieldMockNetwork> {
t.send(Void());
}
virtual Future<class Void> delay(double seconds, TaskPriority taskID) override { return nextTick.getFuture(); }
Future<class Void> delay(double seconds, TaskPriority taskID) override { return nextTick.getFuture(); }
virtual Future<class Void> yield(TaskPriority taskID) override {
Future<class Void> yield(TaskPriority taskID) override {
if (check_yield(taskID))
return delay(0,taskID);
return Void();
}
virtual bool check_yield(TaskPriority taskID) override {
bool check_yield(TaskPriority taskID) override {
if (nextYield > 0) --nextYield;
return nextYield == 0;
}
// Delegate everything else. TODO: Make a base class NetworkWrapper for delegating everything in INetwork
virtual TaskPriority getCurrentTask() const override { return baseNetwork->getCurrentTask(); }
virtual void setCurrentTask(TaskPriority taskID) override { baseNetwork->setCurrentTask(taskID); }
virtual double now() const override { return baseNetwork->now(); }
virtual double timer() override { return baseNetwork->timer(); }
virtual void stop() override { return baseNetwork->stop(); }
virtual void addStopCallback(std::function<void()> fn) override {
TaskPriority getCurrentTask() const override { return baseNetwork->getCurrentTask(); }
void setCurrentTask(TaskPriority taskID) override { baseNetwork->setCurrentTask(taskID); }
double now() const override { return baseNetwork->now(); }
double timer() override { return baseNetwork->timer(); }
void stop() override { return baseNetwork->stop(); }
void addStopCallback(std::function<void()> fn) override {
ASSERT(false);
return;
}
virtual bool isSimulated() const override { return baseNetwork->isSimulated(); }
virtual void onMainThread(Promise<Void>&& signal, TaskPriority taskID) override {
bool isSimulated() const override { return baseNetwork->isSimulated(); }
void onMainThread(Promise<Void>&& signal, TaskPriority taskID) override {
return baseNetwork->onMainThread(std::move(signal), taskID);
}
bool isOnMainThread() const override { return baseNetwork->isOnMainThread(); }
virtual THREAD_HANDLE startThread(THREAD_FUNC_RETURN (*func)(void*), void* arg) override {
THREAD_HANDLE startThread(THREAD_FUNC_RETURN (*func)(void*), void* arg) override {
return baseNetwork->startThread(func, arg);
}
Future<Reference<class IAsyncFile>> open(std::string filename, int64_t flags, int64_t mode) {
@ -257,15 +269,15 @@ struct YieldMockNetwork : INetwork, ReferenceCounted<YieldMockNetwork> {
Future<Void> deleteFile(std::string filename, bool mustBeDurable) {
return IAsyncFileSystem::filesystem()->deleteFile(filename, mustBeDurable);
}
virtual void run() override { return baseNetwork->run(); }
virtual bool checkRunnable() override { return baseNetwork->checkRunnable(); }
virtual void getDiskBytes(std::string const& directory, int64_t& free, int64_t& total) override {
void run() override { return baseNetwork->run(); }
bool checkRunnable() override { return baseNetwork->checkRunnable(); }
void getDiskBytes(std::string const& directory, int64_t& free, int64_t& total) override {
return baseNetwork->getDiskBytes(directory, free, total);
}
virtual bool isAddressOnThisHost(NetworkAddress const& addr) const override {
bool isAddressOnThisHost(NetworkAddress const& addr) const override {
return baseNetwork->isAddressOnThisHost(addr);
}
virtual const TLSConfig& getTLSConfig() const override {
const TLSConfig& getTLSConfig() const override {
static TLSConfig emptyConfig;
return emptyConfig;
}

View File

@ -171,23 +171,22 @@ void EndpointMap::remove(Endpoint::Token const& token, NetworkMessageReceiver* r
}
}
struct EndpointNotFoundReceiver : NetworkMessageReceiver {
struct EndpointNotFoundReceiver final : NetworkMessageReceiver {
EndpointNotFoundReceiver(EndpointMap& endpoints) {
endpoints.insertWellKnown(this, WLTOKEN_ENDPOINT_NOT_FOUND, TaskPriority::DefaultEndpoint);
}
void receive(ArenaObjectReader& reader) override {
// Remote machine tells us it doesn't have endpoint e
Endpoint e;
reader.deserialize(e);
IFailureMonitor::failureMonitor().endpointNotFound(e);
}
};
struct PingReceiver : NetworkMessageReceiver {
struct PingReceiver final : NetworkMessageReceiver {
PingReceiver(EndpointMap& endpoints) {
endpoints.insertWellKnown(this, WLTOKEN_PING_PACKET, TaskPriority::ReadSocket);
}
void receive(ArenaObjectReader& reader) override {
ReplyPromise<Void> reply;
reader.deserialize(reply);

View File

@ -35,18 +35,18 @@ public:
};
// An IRateControl implemenation that allows at most hands out at most windowLimit units of 'credit' in windowSeconds seconds
class SpeedLimit : public IRateControl, ReferenceCounted<SpeedLimit> {
class SpeedLimit final : public IRateControl, ReferenceCounted<SpeedLimit> {
public:
SpeedLimit(int windowLimit, int windowSeconds) : m_limit(windowLimit), m_seconds(windowSeconds), m_last_update(0), m_budget(0) {
m_budget_max = m_limit * m_seconds;
m_last_update = timer();
}
virtual ~SpeedLimit() {}
~SpeedLimit() = default;
virtual void addref() { ReferenceCounted<SpeedLimit>::addref(); }
virtual void delref() { ReferenceCounted<SpeedLimit>::delref(); }
void addref() override { ReferenceCounted<SpeedLimit>::addref(); }
void delref() override { ReferenceCounted<SpeedLimit>::delref(); }
virtual Future<Void> getAllowance(unsigned int n) {
Future<Void> getAllowance(unsigned int n) override {
// Replenish budget based on time since last update
double ts = timer();
// returnUnused happens to do exactly what we want here
@ -60,7 +60,7 @@ public:
return delay(m_seconds * -m_budget / m_limit);
}
virtual void returnUnused(int n) {
void returnUnused(int n) override {
if(n < 0)
return;
m_budget = std::min<int64_t>(m_budget + n, m_budget_max);
@ -75,13 +75,13 @@ private:
};
// An IRateControl implemenation that enforces no limit
class Unlimited : public IRateControl, ReferenceCounted<Unlimited> {
class Unlimited final : public IRateControl, ReferenceCounted<Unlimited> {
public:
Unlimited() {}
virtual ~Unlimited() {}
virtual void addref() { ReferenceCounted<Unlimited>::addref(); }
virtual void delref() { ReferenceCounted<Unlimited>::delref(); }
~Unlimited() = default;
void addref() override { ReferenceCounted<Unlimited>::addref(); }
void delref() override { ReferenceCounted<Unlimited>::delref(); }
virtual Future<Void> getAllowance(unsigned int n) { return Void(); }
virtual void returnUnused(int n) {}
Future<Void> getAllowance(unsigned int n) override { return Void(); }
void returnUnused(int n) override {}
};

View File

@ -58,8 +58,8 @@ struct PerfIntCounter {
PerfIntCounter(std::string name, vector<PerfIntCounter*>& v) : name(name), value(0) { v.push_back(this); }
void operator += (int64_t delta) { value += delta; }
void operator ++ () { value += 1; }
PerfMetric getMetric() { return PerfMetric( name, (double)value, false, "%.0lf" ); }
int64_t getValue() { return value; }
PerfMetric getMetric() const { return PerfMetric(name, static_cast<double>(value), false, "%.0lf"); }
int64_t getValue() const { return value; }
void clear() { value = 0; }
private:
@ -72,8 +72,8 @@ struct PerfDoubleCounter {
PerfDoubleCounter(std::string name, vector<PerfDoubleCounter*>& v) : name(name), value(0) { v.push_back(this); }
void operator += (double delta) { value += delta; }
void operator ++ () { value += 1.0; }
PerfMetric getMetric() { return PerfMetric( name, value, false ); }
double getValue() { return value; }
PerfMetric getMetric() const { return PerfMetric(name, value, false); }
double getValue() const { return value; }
void clear() { value = 0.0; }
private:

View File

@ -118,10 +118,7 @@ PolicyAcross::PolicyAcross(int count, std::string const& attribKey, Reference<IR
PolicyAcross::PolicyAcross() : _policy(new PolicyOne()) {}
PolicyAcross::~PolicyAcross()
{
return;
}
PolicyAcross::~PolicyAcross() {}
// Debug purpose only
// Trace all record entries to help debug

View File

@ -92,41 +92,41 @@ inline void save(Archive& ar, const Reference<IReplicationPolicy>& value) {
}
}
struct PolicyOne : IReplicationPolicy, public ReferenceCounted<PolicyOne> {
struct PolicyOne final : IReplicationPolicy, public ReferenceCounted<PolicyOne> {
PolicyOne(){};
explicit PolicyOne(const PolicyOne& o) {}
virtual ~PolicyOne(){};
virtual std::string name() const { return "One"; }
virtual std::string info() const { return "1"; }
virtual int maxResults() const { return 1; }
virtual int depth() const { return 1; }
virtual bool validate(std::vector<LocalityEntry> const& solutionSet,
Reference<LocalitySet> const& fromServers) const;
virtual bool selectReplicas(Reference<LocalitySet>& fromServers, std::vector<LocalityEntry> const& alsoServers,
std::vector<LocalityEntry>& results);
std::string name() const override { return "One"; }
std::string info() const override { return "1"; }
int maxResults() const override { return 1; }
int depth() const override { return 1; }
bool validate(std::vector<LocalityEntry> const& solutionSet,
Reference<LocalitySet> const& fromServers) const override;
bool selectReplicas(Reference<LocalitySet>& fromServers, std::vector<LocalityEntry> const& alsoServers,
std::vector<LocalityEntry>& results) override;
template <class Ar>
void serialize(Ar& ar) {
static_assert(!is_fb_function<Ar>);
}
virtual void deserializationDone() {}
virtual void attributeKeys(std::set<std::string>* set) const override { return; }
void deserializationDone() override {}
void attributeKeys(std::set<std::string>* set) const override { return; }
};
struct PolicyAcross : IReplicationPolicy, public ReferenceCounted<PolicyAcross> {
struct PolicyAcross final : IReplicationPolicy, public ReferenceCounted<PolicyAcross> {
friend struct serializable_traits<PolicyAcross*>;
PolicyAcross(int count, std::string const& attribKey, Reference<IReplicationPolicy> const policy);
explicit PolicyAcross();
explicit PolicyAcross(const PolicyAcross& other) : PolicyAcross(other._count, other._attribKey, other._policy) {}
virtual ~PolicyAcross();
virtual std::string name() const { return "Across"; }
~PolicyAcross();
std::string name() const override { return "Across"; }
std::string embeddedPolicyName() const { return _policy->name(); }
int getCount() const { return _count; }
virtual std::string info() const { return format("%s^%d x ", _attribKey.c_str(), _count) + _policy->info(); }
virtual int maxResults() const { return _count * _policy->maxResults(); }
virtual int depth() const { return 1 + _policy->depth(); }
virtual bool validate(std::vector<LocalityEntry> const& solutionSet, Reference<LocalitySet> const& fromServers) const;
virtual bool selectReplicas(Reference<LocalitySet>& fromServers, std::vector<LocalityEntry> const& alsoServers,
std::vector<LocalityEntry>& results);
std::string info() const override { return format("%s^%d x ", _attribKey.c_str(), _count) + _policy->info(); }
int maxResults() const override { return _count * _policy->maxResults(); }
int depth() const override { return 1 + _policy->depth(); }
bool validate(std::vector<LocalityEntry> const& solutionSet,
Reference<LocalitySet> const& fromServers) const override;
bool selectReplicas(Reference<LocalitySet>& fromServers, std::vector<LocalityEntry> const& alsoServers,
std::vector<LocalityEntry>& results) override;
template <class Ar>
void serialize(Ar& ar) {
@ -135,13 +135,13 @@ struct PolicyAcross : IReplicationPolicy, public ReferenceCounted<PolicyAcross>
serializeReplicationPolicy(ar, _policy);
}
virtual void deserializationDone() {}
void deserializationDone() override {}
static bool compareAddedResults(const std::pair<int, int>& rhs, const std::pair<int, int>& lhs) {
return (rhs.first < lhs.first) || (!(lhs.first < rhs.first) && (rhs.second < lhs.second));
}
virtual void attributeKeys(std::set<std::string>* set) const override {
void attributeKeys(std::set<std::string>* set) const override {
set->insert(_attribKey);
_policy->attributeKeys(set);
}
@ -159,7 +159,7 @@ protected:
Arena _arena;
};
struct PolicyAnd : IReplicationPolicy, public ReferenceCounted<PolicyAnd> {
struct PolicyAnd final : IReplicationPolicy, public ReferenceCounted<PolicyAnd> {
friend struct serializable_traits<PolicyAnd*>;
PolicyAnd(std::vector<Reference<IReplicationPolicy>> policies) : _policies(policies), _sortedPolicies(policies) {
// Sort the policy array
@ -167,9 +167,8 @@ struct PolicyAnd : IReplicationPolicy, public ReferenceCounted<PolicyAnd> {
}
explicit PolicyAnd(const PolicyAnd& other) : _policies(other._policies), _sortedPolicies(other._sortedPolicies) {}
explicit PolicyAnd() {}
virtual ~PolicyAnd() {}
virtual std::string name() const { return "And"; }
virtual std::string info() const {
std::string name() const override { return "And"; }
std::string info() const override {
std::string infoText;
for (auto& policy : _policies) {
infoText += ((infoText.length()) ? " & (" : "(") + policy->info() + ")";
@ -177,14 +176,14 @@ struct PolicyAnd : IReplicationPolicy, public ReferenceCounted<PolicyAnd> {
if (_policies.size()) infoText = "(" + infoText + ")";
return infoText;
}
virtual int maxResults() const {
int maxResults() const override {
int resultsMax = 0;
for (auto& policy : _policies) {
resultsMax += policy->maxResults();
}
return resultsMax;
}
virtual int depth() const {
int depth() const override {
int policyDepth, depthMax = 0;
for (auto& policy : _policies) {
policyDepth = policy->depth();
@ -194,11 +193,11 @@ struct PolicyAnd : IReplicationPolicy, public ReferenceCounted<PolicyAnd> {
}
return depthMax;
}
virtual bool validate(std::vector<LocalityEntry> const& solutionSet,
Reference<LocalitySet> const& fromServers) const;
bool validate(std::vector<LocalityEntry> const& solutionSet,
Reference<LocalitySet> const& fromServers) const override;
virtual bool selectReplicas(Reference<LocalitySet>& fromServers, std::vector<LocalityEntry> const& alsoServers,
std::vector<LocalityEntry>& results);
bool selectReplicas(Reference<LocalitySet>& fromServers, std::vector<LocalityEntry> const& alsoServers,
std::vector<LocalityEntry>& results) override;
static bool comparePolicy(const Reference<IReplicationPolicy>& rhs, const Reference<IReplicationPolicy>& lhs) {
return (lhs->maxResults() < rhs->maxResults()) ||
@ -220,12 +219,12 @@ struct PolicyAnd : IReplicationPolicy, public ReferenceCounted<PolicyAnd> {
}
}
virtual void deserializationDone() {
void deserializationDone() override {
_sortedPolicies = _policies;
std::sort(_sortedPolicies.begin(), _sortedPolicies.end(), PolicyAnd::comparePolicy);
}
virtual void attributeKeys(std::set<std::string>* set) const override {
void attributeKeys(std::set<std::string>* set) const override {
for (const Reference<IReplicationPolicy>& r : _policies) {
r->attributeKeys(set);
}

View File

@ -69,7 +69,7 @@ typedef std::pair<AttribKey, AttribValue> AttribRecord;
// This structure represents the LocalityData class as an integer map
struct KeyValueMap : public ReferenceCounted<KeyValueMap> {
struct KeyValueMap final : public ReferenceCounted<KeyValueMap> {
std::vector<AttribRecord> _keyvaluearray;
KeyValueMap() {}
@ -102,9 +102,6 @@ struct KeyValueMap : public ReferenceCounted<KeyValueMap> {
return ((lower != _keyvaluearray.end()) && (lower->first == indexKey) && (lower->second == indexValue));
}
virtual void addref() { ReferenceCounted<KeyValueMap>::addref(); }
virtual void delref() { ReferenceCounted<KeyValueMap>::delref(); }
static bool compareKeyValue(const AttribRecord& lhs, const AttribRecord& rhs)
{ return (lhs.first < rhs.first) || (!(rhs.first < lhs.first) && (lhs.second < rhs.second)); }
@ -112,23 +109,18 @@ struct KeyValueMap : public ReferenceCounted<KeyValueMap> {
{ return (lhs.first < rhs.first); }
};
// This class stores the information for each entry within the locality map
struct LocalityRecord : public ReferenceCounted<LocalityRecord> {
struct LocalityRecord final : public ReferenceCounted<LocalityRecord> {
Reference<KeyValueMap> _dataMap;
LocalityEntry _entryIndex;
LocalityRecord(Reference<KeyValueMap> const& dataMap, int arrayIndex): _dataMap(dataMap), _entryIndex(arrayIndex) {}
LocalityRecord(LocalityRecord const& entry) : _dataMap(entry._dataMap), _entryIndex(entry._entryIndex) {}
virtual ~LocalityRecord(){}
LocalityRecord& operator=(LocalityRecord const& source) {
_dataMap = source._dataMap;
_entryIndex = source._entryIndex;
return *this;
}
virtual void addref() { ReferenceCounted<LocalityRecord>::addref(); }
virtual void delref() { ReferenceCounted<LocalityRecord>::delref(); }
Optional<AttribValue> getValue(AttribKey indexKey) const {
return _dataMap->getValue(indexKey);
}
@ -155,12 +147,11 @@ struct LocalityRecord : public ReferenceCounted<LocalityRecord> {
};
// This class stores the information for string to integer map for keys and values
struct StringToIntMap : public ReferenceCounted<StringToIntMap> {
struct StringToIntMap final : public ReferenceCounted<StringToIntMap> {
std::map<std::string, int> _hashmap;
std::vector<std::string> _lookuparray;
StringToIntMap() {}
StringToIntMap(StringToIntMap const& source):_hashmap(source._hashmap), _lookuparray(source._lookuparray){}
virtual ~StringToIntMap(){}
StringToIntMap& operator=(StringToIntMap const& source) {
_hashmap = source._hashmap;
_lookuparray = source._lookuparray;
@ -206,8 +197,6 @@ struct StringToIntMap : public ReferenceCounted<StringToIntMap> {
}
return memSize;
}
virtual void addref() { ReferenceCounted<StringToIntMap>::addref(); }
virtual void delref() { ReferenceCounted<StringToIntMap>::delref(); }
};
extern const std::vector<LocalityEntry> emptyEntryArray;

View File

@ -126,19 +126,19 @@ struct Traceable<Counter> : std::true_type {
};
template <class F>
struct SpecialCounter : ICounter, FastAllocated<SpecialCounter<F>>, NonCopyable {
struct SpecialCounter final : ICounter, FastAllocated<SpecialCounter<F>>, NonCopyable {
SpecialCounter(CounterCollection& collection, std::string const& name, F && f) : name(name), f(f) { collection.counters.push_back(this); collection.counters_to_remove.push_back(this); }
virtual void remove() { delete this; }
void remove() override { delete this; }
virtual std::string const& getName() const { return name; }
virtual int64_t getValue() const { return f(); }
std::string const& getName() const override { return name; }
int64_t getValue() const override { return f(); }
virtual void resetInterval() {}
void resetInterval() override {}
virtual bool hasRate() const { return false; }
virtual double getRate() const { throw internal_error(); }
virtual bool hasRoughness() const { return false; }
virtual double getRoughness() const { throw internal_error(); }
bool hasRate() const override { return false; }
double getRate() const override { throw internal_error(); }
bool hasRoughness() const override { return false; }
double getRoughness() const override { throw internal_error(); }
std::string name;
F f;

View File

@ -87,7 +87,7 @@ private:
};
template <class T>
struct NetSAV : SAV<T>, FlowReceiver, FastAllocated<NetSAV<T>> {
struct NetSAV final : SAV<T>, FlowReceiver, FastAllocated<NetSAV<T>> {
using FastAllocated<NetSAV<T>>::operator new;
using FastAllocated<NetSAV<T>>::operator delete;
@ -96,8 +96,8 @@ struct NetSAV : SAV<T>, FlowReceiver, FastAllocated<NetSAV<T>> {
: SAV<T>(futures, promises), FlowReceiver(remoteEndpoint, false) {
}
virtual void destroy() { delete this; }
virtual void receive(ArenaObjectReader& reader) {
void destroy() override { delete this; }
void receive(ArenaObjectReader& reader) override {
if (!SAV<T>::canBeSet()) return;
this->addPromiseRef();
ErrorOr<EnsureTable<T>> message;
@ -222,12 +222,8 @@ void setReplyPriority(ReplyPromise<Reply> & p, TaskPriority taskID) { p.getEndpo
template <class Reply>
void setReplyPriority(const ReplyPromise<Reply> & p, TaskPriority taskID) { p.getEndpoint(taskID); }
template <class T>
struct NetNotifiedQueue : NotifiedQueue<T>, FlowReceiver, FastAllocated<NetNotifiedQueue<T>> {
struct NetNotifiedQueue final : NotifiedQueue<T>, FlowReceiver, FastAllocated<NetNotifiedQueue<T>> {
using FastAllocated<NetNotifiedQueue<T>>::operator new;
using FastAllocated<NetNotifiedQueue<T>>::operator delete;
@ -235,18 +231,17 @@ struct NetNotifiedQueue : NotifiedQueue<T>, FlowReceiver, FastAllocated<NetNotif
NetNotifiedQueue(int futures, int promises, const Endpoint& remoteEndpoint)
: NotifiedQueue<T>(futures, promises), FlowReceiver(remoteEndpoint, true) {}
virtual void destroy() { delete this; }
virtual void receive(ArenaObjectReader& reader) {
void destroy() override { delete this; }
void receive(ArenaObjectReader& reader) override {
this->addPromiseRef();
T message;
reader.deserialize(message);
this->send(std::move(message));
this->delPromiseRef();
}
virtual bool isStream() const { return true; }
bool isStream() const override { return true; }
};
template <class T>
class RequestStream {
public:

View File

@ -157,7 +157,7 @@ private:
SimClogging g_clogging;
struct Sim2Conn : IConnection, ReferenceCounted<Sim2Conn> {
struct Sim2Conn final : IConnection, ReferenceCounted<Sim2Conn> {
Sim2Conn( ISimulator::ProcessInfo* process )
: process(process), dbgid( deterministicRandom()->randomUniqueID() ), opened(false), closedByCaller(false), stopReceive(Never())
{
@ -181,20 +181,21 @@ struct Sim2Conn : IConnection, ReferenceCounted<Sim2Conn> {
ASSERT_ABORT( !opened || closedByCaller );
}
virtual void addref() { ReferenceCounted<Sim2Conn>::addref(); }
virtual void delref() { ReferenceCounted<Sim2Conn>::delref(); }
virtual void close() { closedByCaller = true; closeInternal(); }
virtual Future<Void> acceptHandshake() { return delay(0.01*deterministicRandom()->random01()); }
virtual Future<Void> connectHandshake() { return delay(0.01*deterministicRandom()->random01()); }
virtual Future<Void> onWritable() { return whenWritable(this); }
virtual Future<Void> onReadable() { return whenReadable(this); }
bool isPeerGone() {
return !peer || peerProcess->failed;
void addref() override { ReferenceCounted<Sim2Conn>::addref(); }
void delref() override { ReferenceCounted<Sim2Conn>::delref(); }
void close() override {
closedByCaller = true;
closeInternal();
}
Future<Void> acceptHandshake() override { return delay(0.01 * deterministicRandom()->random01()); }
Future<Void> connectHandshake() override { return delay(0.01 * deterministicRandom()->random01()); }
Future<Void> onWritable() override { return whenWritable(this); }
Future<Void> onReadable() override { return whenReadable(this); }
bool isPeerGone() const { return !peer || peerProcess->failed; }
void peerClosed() {
leakedConnectionTracker = trackLeakedConnection(this);
stopReceive = delay(1.0);
@ -202,7 +203,7 @@ struct Sim2Conn : IConnection, ReferenceCounted<Sim2Conn> {
// Reads as many bytes as possible from the read buffer into [begin,end) and returns the number of bytes read (might be 0)
// (or may throw an error if the connection dies)
virtual int read( uint8_t* begin, uint8_t* end ) {
int read(uint8_t* begin, uint8_t* end) override {
rollRandomClose();
int64_t avail = receivedBytes.get() - readBytes.get(); // SOMEDAY: random?
@ -217,7 +218,7 @@ struct Sim2Conn : IConnection, ReferenceCounted<Sim2Conn> {
// Writes as many bytes as possible from the given SendBuffer chain into the write buffer and returns the number of bytes written (might be 0)
// (or may throw an error if the connection dies)
virtual int write( SendBuffer const* buffer, int limit) {
int write(SendBuffer const* buffer, int limit) override {
rollRandomClose();
ASSERT(limit > 0);
@ -254,8 +255,8 @@ struct Sim2Conn : IConnection, ReferenceCounted<Sim2Conn> {
// Returns the network address and port of the other end of the connection. In the case of an incoming connection, this may not
// be an address we can connect to!
virtual NetworkAddress getPeerAddress() const override { return peerEndpoint; }
virtual UID getDebugID() const override { return dbgid; }
NetworkAddress getPeerAddress() const override { return peerEndpoint; }
UID getDebugID() const override { return dbgid; }
bool opened, closedByCaller;
@ -474,8 +475,8 @@ public:
}
}
virtual void addref() { ReferenceCounted<SimpleFile>::addref(); }
virtual void delref() { ReferenceCounted<SimpleFile>::delref(); }
void addref() override { ReferenceCounted<SimpleFile>::addref(); }
void delref() override { ReferenceCounted<SimpleFile>::delref(); }
int64_t debugFD() const override { return (int64_t)h; }
@ -685,7 +686,7 @@ struct SimDiskSpace {
void doReboot( ISimulator::ProcessInfo* const& p, ISimulator::KillType const& kt );
struct Sim2Listener : IListener, ReferenceCounted<Sim2Listener> {
struct Sim2Listener final : IListener, ReferenceCounted<Sim2Listener> {
explicit Sim2Listener( ISimulator::ProcessInfo* process, const NetworkAddress& listenAddr )
: process(process),
address(listenAddr) {}
@ -694,14 +695,12 @@ struct Sim2Listener : IListener, ReferenceCounted<Sim2Listener> {
incoming( Reference<Sim2Listener>::addRef( this ), seconds, conn );
}
virtual void addref() { ReferenceCounted<Sim2Listener>::addref(); }
virtual void delref() { ReferenceCounted<Sim2Listener>::delref(); }
void addref() override { ReferenceCounted<Sim2Listener>::addref(); }
void delref() override { ReferenceCounted<Sim2Listener>::delref(); }
virtual Future<Reference<IConnection>> accept() {
return popOne( nextConnection.getFuture() );
}
Future<Reference<IConnection>> accept() override { return popOne(nextConnection.getFuture()); }
virtual NetworkAddress getListenAddress() const override { return address; }
NetworkAddress getListenAddress() const override { return address; }
private:
ISimulator::ProcessInfo* process;
@ -728,19 +727,19 @@ private:
#define g_sim2 ((Sim2&)g_simulator)
class Sim2 : public ISimulator, public INetworkConnections {
class Sim2 final : public ISimulator, public INetworkConnections {
public:
// Implement INetwork interface
// Everything actually network related is delegated to the Sim2Net class; Sim2 is only concerned with simulating machines and time
virtual double now() const override { return time; }
double now() const override { return time; }
// timer() can be up to 0.1 seconds ahead of now()
virtual double timer() {
double timer() override {
timerTime += deterministicRandom()->random01()*(time+0.1-timerTime)/2.0;
return timerTime;
return timerTime;
}
virtual Future<class Void> delay( double seconds, TaskPriority taskID ) {
Future<class Void> delay(double seconds, TaskPriority taskID) override {
ASSERT(taskID >= TaskPriority::Min && taskID <= TaskPriority::Max);
return delay( seconds, taskID, currentProcess );
}
@ -766,7 +765,7 @@ public:
self->setCurrentTask(taskID);
return Void();
}
virtual Future<class Void> yield( TaskPriority taskID ) {
Future<class Void> yield(TaskPriority taskID) override {
if (taskID == TaskPriority::DefaultYield) taskID = currentTaskID;
if (check_yield(taskID)) {
// We want to check that yielders can handle actual time elapsing (it sometimes will outside simulation), but
@ -776,7 +775,7 @@ public:
setCurrentTask(taskID);
return Void();
}
virtual bool check_yield( TaskPriority taskID ) {
bool check_yield(TaskPriority taskID) override {
if (yielded) return true;
if (--yield_limit <= 0) {
yield_limit = deterministicRandom()->randomInt(1, 150); // If yield returns false *too* many times in a row, there could be a stack overflow, since we can't deterministically check stack size as the real network does
@ -784,12 +783,10 @@ public:
}
return yielded = BUGGIFY_WITH_PROB(0.01);
}
virtual TaskPriority getCurrentTask() const override { return currentTaskID; }
virtual void setCurrentTask(TaskPriority taskID ) {
currentTaskID = taskID;
}
TaskPriority getCurrentTask() const override { return currentTaskID; }
void setCurrentTask(TaskPriority taskID) override { currentTaskID = taskID; }
// Sets the taskID/priority of the current task, without yielding
virtual Future<Reference<IConnection>> connect( NetworkAddress toAddr, std::string host ) {
Future<Reference<IConnection>> connect(NetworkAddress toAddr, std::string host) override {
ASSERT( host.empty());
if (!addressMap.count( toAddr )) {
return waitForProcessAndConnect( toAddr, this );
@ -813,7 +810,7 @@ public:
((Sim2Listener*)peerp->getListener(toAddr).getPtr())->incomingConnection( 0.5*deterministicRandom()->random01(), Reference<IConnection>(peerc) );
return onConnect( ::delay(0.5*deterministicRandom()->random01()), myc );
}
virtual Future<std::vector<NetworkAddress>> resolveTCPEndpoint( std::string host, std::string service) {
Future<std::vector<NetworkAddress>> resolveTCPEndpoint(std::string host, std::string service) override {
throw lookup_failed();
}
ACTOR static Future<Reference<IConnection>> onConnect( Future<Void> ready, Reference<Sim2Conn> conn ) {
@ -828,7 +825,7 @@ public:
conn->opened = true;
return conn;
}
virtual Reference<IListener> listen( NetworkAddress localAddr ) {
Reference<IListener> listen(NetworkAddress localAddr) override {
Reference<IListener> listener( getCurrentProcess()->getListener(localAddr) );
ASSERT(listener);
return listener;
@ -844,22 +841,16 @@ public:
}
}
}
virtual const TLSConfig& getTLSConfig() const override {
const TLSConfig& getTLSConfig() const override {
static TLSConfig emptyConfig;
return emptyConfig;
}
virtual bool checkRunnable() {
return net2->checkRunnable();
}
bool checkRunnable() override { return net2->checkRunnable(); }
virtual void stop() {
isStopped = true;
}
virtual void addStopCallback( std::function<void()> fn ) {
stopCallbacks.emplace_back(std::move(fn));
}
virtual bool isSimulated() const { return true; }
void stop() override { isStopped = true; }
void addStopCallback(std::function<void()> fn) override { stopCallbacks.emplace_back(std::move(fn)); }
bool isSimulated() const override { return true; }
struct SimThreadArgs {
THREAD_FUNC_RETURN (*func) (void*);
@ -883,12 +874,12 @@ public:
THREAD_RETURN;
}
virtual THREAD_HANDLE startThread( THREAD_FUNC_RETURN (*func) (void*), void *arg ) {
THREAD_HANDLE startThread(THREAD_FUNC_RETURN (*func)(void*), void* arg) override {
SimThreadArgs *simArgs = new SimThreadArgs(func, arg);
return ::startThread(simStartThread, simArgs);
}
virtual void getDiskBytes( std::string const& directory, int64_t& free, int64_t& total) {
void getDiskBytes(std::string const& directory, int64_t& free, int64_t& total) override {
ProcessInfo *proc = getCurrentProcess();
SimDiskSpace &diskSpace = diskSpaceMap[proc->address.ip];
@ -923,7 +914,7 @@ public:
if(free == 0)
TraceEvent(SevWarnAlways, "Sim2NoFreeSpace").detail("TotalSpace", diskSpace.totalSpace).detail("BaseFreeSpace", diskSpace.baseFreeSpace).detail("TotalFileSize", totalFileSize).detail("NumFiles", numFiles);
}
virtual bool isAddressOnThisHost(NetworkAddress const& addr) const override {
bool isAddressOnThisHost(NetworkAddress const& addr) const override {
return addr.ip == getCurrentProcess()->address.ip;
}
@ -989,13 +980,13 @@ public:
}
// Implement ISimulator interface
virtual void run() {
void run() override {
Future<Void> loopFuture = runLoop(this);
net2->run();
}
virtual ProcessInfo* newProcess(const char* name, IPAddress ip, uint16_t port, bool sslEnabled, uint16_t listenPerProcess,
LocalityData locality, ProcessClass startingClass, const char* dataFolder,
const char* coordinationFolder, ProtocolVersion protocol) {
ProcessInfo* newProcess(const char* name, IPAddress ip, uint16_t port, bool sslEnabled, uint16_t listenPerProcess,
LocalityData locality, ProcessClass startingClass, const char* dataFolder,
const char* coordinationFolder, ProtocolVersion protocol) override {
ASSERT( locality.machineId().present() );
MachineInfo& machine = machines[ locality.machineId().get() ];
if (!machine.machineId.present())
@ -1050,8 +1041,7 @@ public:
return m;
}
virtual bool isAvailable() const
{
bool isAvailable() const override {
std::vector<ProcessInfo*> processesLeft, processesDead;
for (auto processInfo : getAllProcesses()) {
if (processInfo->isAvailableClass()) {
@ -1065,8 +1055,7 @@ public:
return canKillProcesses(processesLeft, processesDead, KillInstantly, nullptr);
}
virtual bool datacenterDead(Optional<Standalone<StringRef>> dcId) const
{
bool datacenterDead(Optional<Standalone<StringRef>> dcId) const override {
if(!dcId.present()) {
return false;
}
@ -1096,8 +1085,9 @@ public:
}
// The following function will determine if the specified configuration of available and dead processes can allow the cluster to survive
virtual bool canKillProcesses(std::vector<ProcessInfo*> const& availableProcesses, std::vector<ProcessInfo*> const& deadProcesses, KillType kt, KillType* newKillType) const
{
bool canKillProcesses(std::vector<ProcessInfo*> const& availableProcesses,
std::vector<ProcessInfo*> const& deadProcesses, KillType kt,
KillType* newKillType) const override {
bool canSurvive = true;
int nQuorum = ((desiredCoordinators+1)/2)*2-1;
@ -1247,7 +1237,7 @@ public:
return canSurvive;
}
virtual void destroyProcess( ISimulator::ProcessInfo *p ) {
void destroyProcess(ISimulator::ProcessInfo* p) override {
TraceEvent("ProcessDestroyed").detail("Name", p->name).detail("Address", p->address).detail("MachineId", p->locality.machineId());
currentlyRebootingProcesses.insert(std::pair<NetworkAddress, ProcessInfo*>(p->address, p));
std::vector<ProcessInfo*>& processes = machines[ p->locality.machineId().get() ].processes;
@ -1293,14 +1283,14 @@ public:
}
ASSERT(!protectedAddresses.count(machine->address) || machine->rebooting);
}
virtual void rebootProcess( ProcessInfo* process, KillType kt ) {
void rebootProcess(ProcessInfo* process, KillType kt) override {
if( kt == RebootProcessAndDelete && protectedAddresses.count(process->address) ) {
TraceEvent("RebootChanged").detail("ZoneId", process->locality.describeZone()).detail("KillType", RebootProcess).detail("OrigKillType", kt).detail("Reason", "Protected process");
kt = RebootProcess;
}
doReboot( process, kt );
}
virtual void rebootProcess(Optional<Standalone<StringRef>> zoneId, bool allProcesses ) {
void rebootProcess(Optional<Standalone<StringRef>> zoneId, bool allProcesses) override {
if( allProcesses ) {
auto processes = getAllProcesses();
for( int i = 0; i < processes.size(); i++ )
@ -1317,20 +1307,20 @@ public:
doReboot( deterministicRandom()->randomChoice( processes ), RebootProcess );
}
}
virtual void killProcess( ProcessInfo* machine, KillType kt ) {
void killProcess(ProcessInfo* machine, KillType kt) override {
TraceEvent("AttemptingKillProcess").detail("ProcessInfo", machine->toString());
if (kt < RebootAndDelete ) {
killProcess_internal( machine, kt );
}
}
virtual void killInterface( NetworkAddress address, KillType kt ) {
void killInterface(NetworkAddress address, KillType kt) override {
if (kt < RebootAndDelete ) {
std::vector<ProcessInfo*>& processes = machines[ addressMap[address]->locality.machineId() ].processes;
for( int i = 0; i < processes.size(); i++ )
killProcess_internal( processes[i], kt );
}
}
virtual bool killZone(Optional<Standalone<StringRef>> zoneId, KillType kt, bool forceKill, KillType* ktFinal) {
bool killZone(Optional<Standalone<StringRef>> zoneId, KillType kt, bool forceKill, KillType* ktFinal) override {
auto processes = getAllProcesses();
std::set<Optional<Standalone<StringRef>>> zoneMachines;
for (auto& process : processes) {
@ -1346,7 +1336,8 @@ public:
}
return result;
}
virtual bool killMachine(Optional<Standalone<StringRef>> machineId, KillType kt, bool forceKill, KillType* ktFinal) {
bool killMachine(Optional<Standalone<StringRef>> machineId, KillType kt, bool forceKill,
KillType* ktFinal) override {
auto ktOrig = kt;
TEST(true); // Trying to killing a machine
@ -1476,7 +1467,7 @@ public:
return true;
}
virtual bool killDataCenter(Optional<Standalone<StringRef>> dcId, KillType kt, bool forceKill, KillType* ktFinal) {
bool killDataCenter(Optional<Standalone<StringRef>> dcId, KillType kt, bool forceKill, KillType* ktFinal) override {
auto ktOrig = kt;
auto processes = getAllProcesses();
std::map<Optional<Standalone<StringRef>>, int> datacenterMachines;
@ -1565,7 +1556,7 @@ public:
return (kt == ktMin);
}
virtual void clogInterface(const IPAddress& ip, double seconds, ClogMode mode = ClogDefault) {
void clogInterface(const IPAddress& ip, double seconds, ClogMode mode = ClogDefault) override {
if (mode == ClogDefault) {
double a = deterministicRandom()->random01();
if ( a < 0.3 ) mode = ClogSend;
@ -1582,10 +1573,10 @@ public:
if (mode == ClogReceive || mode==ClogAll)
g_clogging.clogRecvFor( ip, seconds );
}
virtual void clogPair(const IPAddress& from, const IPAddress& to, double seconds) {
void clogPair(const IPAddress& from, const IPAddress& to, double seconds) override {
g_clogging.clogPairFor( from, to, seconds );
}
virtual std::vector<ProcessInfo*> getAllProcesses() const {
std::vector<ProcessInfo*> getAllProcesses() const override {
std::vector<ProcessInfo*> processes;
for( auto& c : machines ) {
processes.insert( processes.end(), c.second.processes.begin(), c.second.processes.end() );
@ -1595,22 +1586,22 @@ public:
}
return processes;
}
virtual ProcessInfo* getProcessByAddress( NetworkAddress const& address ) {
ProcessInfo* getProcessByAddress(NetworkAddress const& address) override {
NetworkAddress normalizedAddress(address.ip, address.port, true, address.isTLS());
ASSERT( addressMap.count( normalizedAddress ) );
// NOTE: addressMap[normalizedAddress]->address may not equal to normalizedAddress
return addressMap[normalizedAddress];
}
virtual MachineInfo* getMachineByNetworkAddress(NetworkAddress const& address) {
MachineInfo* getMachineByNetworkAddress(NetworkAddress const& address) override {
return &machines[addressMap[address]->locality.machineId()];
}
virtual MachineInfo* getMachineById(Optional<Standalone<StringRef>> const& machineId) {
MachineInfo* getMachineById(Optional<Standalone<StringRef>> const& machineId) override {
return &machines[machineId];
}
virtual void destroyMachine(Optional<Standalone<StringRef>> const& machineId ) {
void destroyMachine(Optional<Standalone<StringRef>> const& machineId) override {
auto& machine = machines[machineId];
for( auto process : machine.processes ) {
ASSERT( process->failed );
@ -1683,7 +1674,7 @@ public:
}
}
virtual void onMainThread( Promise<Void>&& signal, TaskPriority taskID ) {
void onMainThread(Promise<Void>&& signal, TaskPriority taskID) override {
// This is presumably coming from either a "fake" thread pool thread, i.e. it is actually on this thread
// or a thread created with g_network->startThread
ASSERT(getCurrentProcess());
@ -1696,10 +1687,10 @@ public:
bool isOnMainThread() const override {
return net2->isOnMainThread();
}
virtual Future<Void> onProcess( ISimulator::ProcessInfo *process, TaskPriority taskID ) {
Future<Void> onProcess(ISimulator::ProcessInfo* process, TaskPriority taskID) override {
return delay( 0, taskID, process );
}
virtual Future<Void> onMachine( ISimulator::ProcessInfo *process, TaskPriority taskID ) {
Future<Void> onMachine(ISimulator::ProcessInfo* process, TaskPriority taskID) override {
if( process->machine == 0 )
return Void();
return delay( 0, taskID, process->machine->machineProcess );

View File

@ -45,16 +45,20 @@ Reference<StorageInfo> getStorageInfo(UID id, std::map<UID, Reference<StorageInf
// It is incredibly important that any modifications to txnStateStore are done in such a way that
// the same operations will be done on all commit proxies at the same time. Otherwise, the data
// stored in txnStateStore will become corrupted.
void applyMetadataMutations(UID const& dbgid, Arena& arena, VectorRef<MutationRef> const& mutations,
IKeyValueStore* txnStateStore, LogPushData* toCommit, bool& confChange,
Reference<ILogSystem> logSystem, Version popVersion,
KeyRangeMap<std::set<Key>>* vecBackupKeys, KeyRangeMap<ServerCacheInfo>* keyInfo,
KeyRangeMap<bool>* cacheInfo, std::map<Key, ApplyMutationsData>* uid_applyMutationsData,
RequestStream<CommitTransactionRequest> commit, Database cx, NotifiedVersion* commitVersion,
std::map<UID, Reference<StorageInfo>>* storageCache, std::map<Tag, Version>* tag_popped,
bool initialCommit) {
void applyMetadataMutations(SpanID const& spanContext, UID const& dbgid, Arena& arena,
VectorRef<MutationRef> const& mutations, IKeyValueStore* txnStateStore,
LogPushData* toCommit, bool& confChange, Reference<ILogSystem> logSystem,
Version popVersion, KeyRangeMap<std::set<Key>>* vecBackupKeys,
KeyRangeMap<ServerCacheInfo>* keyInfo, KeyRangeMap<bool>* cacheInfo, std::map<Key,
ApplyMutationsData>* uid_applyMutationsData, RequestStream<CommitTransactionRequest> commit,
Database cx, NotifiedVersion* commitVersion, std::map<UID, Reference<StorageInfo>>* storageCache,
std::map<Tag, Version>* tag_popped, bool initialCommit) {
//std::map<keyRef, vector<uint16_t>> cacheRangeInfo;
std::map<KeyRef, MutationRef> cachedRangeInfo;
if (toCommit) {
toCommit->addTransactionInfo(spanContext);
}
for (auto const& m : mutations) {
//TraceEvent("MetadataMutation", dbgid).detail("M", m.toString());
@ -102,7 +106,7 @@ void applyMetadataMutations(UID const& dbgid, Arena& arena, VectorRef<MutationRe
.detail("TagKey", serverTagKeyFor( serverKeysDecodeServer(m.param1) )).detail("Tag", decodeServerTagValue( txnStateStore->readValue( serverTagKeyFor( serverKeysDecodeServer(m.param1) ) ).get().get() ).toString());
toCommit->addTag( decodeServerTagValue( txnStateStore->readValue( serverTagKeyFor( serverKeysDecodeServer(m.param1) ) ).get().get() ) );
toCommit->addTypedMessage(privatized);
toCommit->writeTypedMessage(privatized);
}
} else if (m.param1.startsWith(serverTagPrefix)) {
UID id = decodeServerTagKey(m.param1);
@ -114,9 +118,9 @@ void applyMetadataMutations(UID const& dbgid, Arena& arena, VectorRef<MutationRe
TraceEvent("ServerTag", dbgid).detail("Server", id).detail("Tag", tag.toString());
toCommit->addTag(tag);
toCommit->addTypedMessage(LogProtocolMessage());
toCommit->writeTypedMessage(LogProtocolMessage(), true);
toCommit->addTag(tag);
toCommit->addTypedMessage(privatized);
toCommit->writeTypedMessage(privatized);
}
if(!initialCommit) {
txnStateStore->set(KeyValueRef(m.param1, m.param2));
@ -168,7 +172,7 @@ void applyMetadataMutations(UID const& dbgid, Arena& arena, VectorRef<MutationRe
privatized.param1 = m.param1.withPrefix(systemKeys.begin, arena);
//TraceEvent(SevDebug, "SendingPrivateMutation", dbgid).detail("Original", m.toString()).detail("Privatized", privatized.toString());
toCommit->addTag( cacheTag );
toCommit->addTypedMessage(privatized);
toCommit->writeTypedMessage(privatized);
}
}
else if (m.param1.startsWith(configKeysPrefix) || m.param1 == coordinatorsKey) {
@ -285,13 +289,13 @@ void applyMetadataMutations(UID const& dbgid, Arena& arena, VectorRef<MutationRe
if (m.param1 == lastEpochEndKey) {
toCommit->addTags(allTags);
toCommit->addTypedMessage(LogProtocolMessage());
toCommit->writeTypedMessage(LogProtocolMessage(), true);
}
MutationRef privatized = m;
privatized.param1 = m.param1.withPrefix(systemKeys.begin, arena);
toCommit->addTags(allTags);
toCommit->addTypedMessage(privatized);
toCommit->writeTypedMessage(privatized);
}
}
else if (m.param1 == minRequiredCommitVersionKey) {
@ -305,8 +309,7 @@ void applyMetadataMutations(UID const& dbgid, Arena& arena, VectorRef<MutationRe
if (!initialCommit) txnStateStore->set(KeyValueRef(m.param1, m.param2));
TEST(true); // Snapshot created, setting writeRecoveryKey in txnStateStore
}
}
else if (m.param2.size() && m.param2[0] == systemKeys.begin[0] && m.type == MutationRef::ClearRange) {
} else if (m.param2.size() > 1 && m.param2[0] == systemKeys.begin[0] && m.type == MutationRef::ClearRange) {
KeyRangeRef range(m.param1, m.param2);
if (keyServersKeys.intersects(range)) {
@ -351,7 +354,7 @@ void applyMetadataMutations(UID const& dbgid, Arena& arena, VectorRef<MutationRe
privatized.param2 = keyAfter(kv.key, arena).withPrefix(systemKeys.begin, arena);
toCommit->addTag(decodeServerTagValue(kv.value));
toCommit->addTypedMessage(privatized);
toCommit->writeTypedMessage(privatized);
}
}
}
@ -545,37 +548,37 @@ void applyMetadataMutations(UID const& dbgid, Arena& arena, VectorRef<MutationRe
// Add the tags to both begin and end mutations
toCommit->addTags(allTags);
toCommit->addTypedMessage(mutationBegin);
toCommit->writeTypedMessage(mutationBegin);
toCommit->addTags(allTags);
toCommit->addTypedMessage(mutationEnd);
toCommit->writeTypedMessage(mutationEnd);
}
}
}
void applyMetadataMutations(ProxyCommitData& proxyCommitData, Arena& arena, Reference<ILogSystem> logSystem,
const VectorRef<MutationRef>& mutations, LogPushData* toCommit, bool& confChange,
Version popVersion, bool initialCommit) {
void applyMetadataMutations(SpanID const& spanContext, ProxyCommitData& proxyCommitData, Arena& arena,
Reference<ILogSystem> logSystem, const VectorRef<MutationRef>& mutations,
LogPushData* toCommit, bool& confChange, Version popVersion, bool initialCommit) {
std::map<Key, ApplyMutationsData>* uid_applyMutationsData = nullptr;
if (proxyCommitData.firstProxy) {
uid_applyMutationsData = &proxyCommitData.uid_applyMutationsData;
}
applyMetadataMutations(proxyCommitData.dbgid, arena, mutations, proxyCommitData.txnStateStore, toCommit, confChange,
logSystem, popVersion, &proxyCommitData.vecBackupKeys, &proxyCommitData.keyInfo,
applyMetadataMutations(spanContext, proxyCommitData.dbgid, arena, mutations, proxyCommitData.txnStateStore, toCommit,
confChange, logSystem, popVersion, &proxyCommitData.vecBackupKeys, &proxyCommitData.keyInfo,
&proxyCommitData.cacheInfo, uid_applyMutationsData, proxyCommitData.commit,
proxyCommitData.cx, &proxyCommitData.committedVersion, &proxyCommitData.storageCache,
&proxyCommitData.tag_popped, initialCommit);
}
void applyMetadataMutations(const UID& dbgid, Arena& arena, const VectorRef<MutationRef>& mutations,
IKeyValueStore* txnStateStore) {
void applyMetadataMutations(SpanID const& spanContext, const UID& dbgid, Arena& arena,
const VectorRef<MutationRef>& mutations, IKeyValueStore* txnStateStore) {
bool confChange; // Dummy variable, not used.
applyMetadataMutations(dbgid, arena, mutations, txnStateStore, /* toCommit= */ nullptr, confChange,
applyMetadataMutations(spanContext, dbgid, arena, mutations, txnStateStore, /* toCommit= */ nullptr, confChange,
Reference<ILogSystem>(), /* popVersion= */ 0, /* vecBackupKeys= */ nullptr,
/* keyInfo= */ nullptr, /* cacheInfo= */ nullptr, /* uid_applyMutationsData= */ nullptr,
RequestStream<CommitTransactionRequest>(), Database(), /* commitVersion= */ nullptr,
/* storageCache= */ nullptr, /* tag_popped= */ nullptr, /* initialCommit= */ false);
}
}

View File

@ -33,16 +33,18 @@
inline bool isMetadataMutation(MutationRef const& m) {
// FIXME: This is conservative - not everything in system keyspace is necessarily processed by applyMetadataMutations
return (m.type == MutationRef::SetValue && m.param1.size() && m.param1[0] == systemKeys.begin[0] && !m.param1.startsWith(nonMetadataSystemKeys.begin)) ||
(m.type == MutationRef::ClearRange && m.param2.size() && m.param2[0] == systemKeys.begin[0] && !nonMetadataSystemKeys.contains(KeyRangeRef(m.param1, m.param2)) );
return (m.type == MutationRef::SetValue && m.param1.size() && m.param1[0] == systemKeys.begin[0] &&
!m.param1.startsWith(nonMetadataSystemKeys.begin)) ||
(m.type == MutationRef::ClearRange && m.param2.size() > 1 && m.param2[0] == systemKeys.begin[0] &&
!nonMetadataSystemKeys.contains(KeyRangeRef(m.param1, m.param2)));
}
Reference<StorageInfo> getStorageInfo(UID id, std::map<UID, Reference<StorageInfo>>* storageCache, IKeyValueStore* txnStateStore);
void applyMetadataMutations(ProxyCommitData& proxyCommitData, Arena& arena, Reference<ILogSystem> logSystem,
const VectorRef<MutationRef>& mutations, LogPushData* pToCommit, bool& confChange,
Version popVersion, bool initialCommit);
void applyMetadataMutations(const UID& dbgid, Arena& arena, const VectorRef<MutationRef>& mutations,
IKeyValueStore* txnStateStore);
void applyMetadataMutations(SpanID const& spanContext, ProxyCommitData& proxyCommitData, Arena& arena,
Reference<ILogSystem> logSystem, const VectorRef<MutationRef>& mutations,
LogPushData* pToCommit, bool& confChange, Version popVersion, bool initialCommit);
void applyMetadataMutations(SpanID const& spanContext, const UID& dbgid, Arena& arena,
const VectorRef<MutationRef>& mutations, IKeyValueStore* txnStateStore);
#endif

View File

@ -61,8 +61,9 @@ struct VersionedMessage {
ArenaReader reader(arena, message, AssumeVersion(g_network->protocolVersion()));
// Return false for LogProtocolMessage.
// Return false for LogProtocolMessage and SpanContextMessage metadata messages.
if (LogProtocolMessage::isNextIn(reader)) return false;
if (reader.protocolVersion().hasSpanContext() && SpanContextMessage::isNextIn(reader)) return false;
reader >> *m;
return normalKeys.contains(m->param1) || m->param1 == metadataVersionKey;

View File

@ -87,6 +87,7 @@ set(FDBSERVER_SRCS
SimulatedCluster.actor.cpp
SimulatedCluster.h
SkipList.cpp
SpanContextMessage.h
Status.actor.cpp
Status.h
StorageCache.actor.cpp
@ -182,6 +183,7 @@ set(FDBSERVER_SRCS
workloads/ReadWrite.actor.cpp
workloads/RemoveServersSafely.actor.cpp
workloads/ReportConflictingKeys.actor.cpp
workloads/RestoreBackup.actor.cpp
workloads/Rollback.actor.cpp
workloads/RyowCorrectness.actor.cpp
workloads/RYWDisable.actor.cpp
@ -197,6 +199,7 @@ set(FDBSERVER_SRCS
workloads/StatusWorkload.actor.cpp
workloads/Storefront.actor.cpp
workloads/StreamingRead.actor.cpp
workloads/SubmitBackup.actor.cpp
workloads/TagThrottleApi.actor.cpp
workloads/TargetedKill.actor.cpp
workloads/TaskBucketCorrectness.actor.cpp

View File

@ -295,6 +295,8 @@ ACTOR Future<Void> addBackupMutations(ProxyCommitData* self, std::map<Key, Mutat
state int yieldBytes = 0;
state BinaryWriter valueWriter(Unversioned());
toCommit->addTransactionInfo(SpanID());
// Serialize the log range mutations within the map
for (; logRangeMutation != logRangeMutations->end(); ++logRangeMutation)
{
@ -356,7 +358,7 @@ ACTOR Future<Void> addBackupMutations(ProxyCommitData* self, std::map<Key, Mutat
auto& tags = self->tagsForKey(backupMutation.param1);
toCommit->addTags(tags);
toCommit->addTypedMessage(backupMutation);
toCommit->writeTypedMessage(backupMutation);
// if (DEBUG_MUTATION("BackupProxyCommit", commitVersion, backupMutation)) {
// TraceEvent("BackupProxyCommitTo", self->dbgid).detail("To", describe(tags)).detail("BackupMutation", backupMutation.toString())
@ -395,7 +397,7 @@ struct CommitBatchContext {
int batchOperations = 0;
Span span = Span("MP:commitBatch"_loc);
Span span;
int64_t batchBytes = 0;
@ -475,7 +477,9 @@ CommitBatchContext::CommitBatchContext(ProxyCommitData* const pProxyCommitData_,
localBatchNumber(++pProxyCommitData->localCommitBatchesStarted), toCommit(pProxyCommitData->logSystem),
committed(trs.size()) {
committed(trs.size()),
span("MP:commitBatch"_loc) {
evaluateBatchSize();
@ -530,6 +534,7 @@ ACTOR Future<Void> preresolutionProcessing(CommitBatchContext* self) {
state const int64_t localBatchNumber = self->localBatchNumber;
state const int latencyBucket = self->latencyBucket;
state const Optional<UID>& debugID = self->debugID;
state Span span("MP:preresolutionProcessing"_loc, self->span.context);
// Pre-resolution the commits
TEST(pProxyCommitData->latestLocalCommitBatchResolving.get() < localBatchNumber - 1);
@ -545,7 +550,7 @@ ACTOR Future<Void> preresolutionProcessing(CommitBatchContext* self) {
"CommitProxyServer.commitBatch.GettingCommitVersion");
}
GetCommitVersionRequest req(self->span.context, pProxyCommitData->commitVersionRequestNumber++,
GetCommitVersionRequest req(span.context, pProxyCommitData->commitVersionRequestNumber++,
pProxyCommitData->mostRecentProcessedRequestNumber, pProxyCommitData->dbgid);
GetCommitVersionReply versionReply = wait(brokenPromiseToNever(
pProxyCommitData->master.getCommitVersion.getReply(
@ -581,13 +586,14 @@ ACTOR Future<Void> getResolution(CommitBatchContext* self) {
// resolution processing but is still using CPU
ProxyCommitData* pProxyCommitData = self->pProxyCommitData;
std::vector<CommitTransactionRequest>& trs = self->trs;
state Span span("MP:getResolution"_loc, self->span.context);
ResolutionRequestBuilder requests(
pProxyCommitData,
self->commitVersion,
self->prevVersion,
pProxyCommitData->version,
self->span
span
);
int conflictRangeCount = 0;
self->maxTransactionBytes = 0;
@ -659,7 +665,7 @@ void applyMetadataEffect(CommitBatchContext* self) {
for (int resolver = 0; resolver < self->resolution.size(); resolver++)
committed = committed && self->resolution[resolver].stateMutations[versionIndex][transactionIndex].committed;
if (committed) {
applyMetadataMutations(*self->pProxyCommitData, self->arena, self->pProxyCommitData->logSystem,
applyMetadataMutations(SpanID(), *self->pProxyCommitData, self->arena, self->pProxyCommitData->logSystem,
self->resolution[0].stateMutations[versionIndex][transactionIndex].mutations,
/* pToCommit= */ nullptr, self->forceRecovery,
/* popVersion= */ 0, /* initialCommit */ false);
@ -742,7 +748,7 @@ ACTOR Future<Void> applyMetadataToCommittedTransactions(CommitBatchContext* self
for (t = 0; t < trs.size() && !self->forceRecovery; t++) {
if (self->committed[t] == ConflictBatch::TransactionCommitted && (!self->locked || trs[t].isLockAware())) {
self->commitCount++;
applyMetadataMutations(*pProxyCommitData, self->arena, pProxyCommitData->logSystem,
applyMetadataMutations(trs[t].spanContext, *pProxyCommitData, self->arena, pProxyCommitData->logSystem,
trs[t].transaction.mutations, &self->toCommit, self->forceRecovery,
self->commitVersion + 1, /* initialCommit= */ false);
}
@ -791,6 +797,9 @@ ACTOR Future<Void> assignMutationsToStorageServers(CommitBatchContext* self) {
state Optional<ClientTrCommitCostEstimation>* trCost = &trs[self->transactionNum].commitCostEstimation;
state int mutationNum = 0;
state VectorRef<MutationRef>* pMutations = &trs[self->transactionNum].transaction.mutations;
self->toCommit.addTransactionInfo(trs[self->transactionNum].spanContext);
for (; mutationNum < pMutations->size(); mutationNum++) {
if(self->yieldBytes > SERVER_KNOBS->DESIRED_TOTAL_BYTES) {
self->yieldBytes = 0;
@ -845,7 +854,7 @@ ACTOR Future<Void> assignMutationsToStorageServers(CommitBatchContext* self) {
if(pProxyCommitData->cacheInfo[m.param1]) {
self->toCommit.addTag(cacheTag);
}
self->toCommit.addTypedMessage(m);
self->toCommit.writeTypedMessage(m);
}
else if (m.type == MutationRef::ClearRange) {
KeyRangeRef clearRange(KeyRangeRef(m.param1, m.param2));
@ -896,7 +905,7 @@ ACTOR Future<Void> assignMutationsToStorageServers(CommitBatchContext* self) {
if(pProxyCommitData->needsCacheTag(clearRange)) {
self->toCommit.addTag(cacheTag);
}
self->toCommit.addTypedMessage(m);
self->toCommit.writeTypedMessage(m);
} else {
UNREACHABLE();
}
@ -950,6 +959,7 @@ ACTOR Future<Void> postResolution(CommitBatchContext* self) {
state std::vector<CommitTransactionRequest>& trs = self->trs;
state const int64_t localBatchNumber = self->localBatchNumber;
state const Optional<UID>& debugID = self->debugID;
state Span span("MP:postResolution"_loc, self->span.context);
TEST(pProxyCommitData->latestLocalCommitBatchLogging.get() < localBatchNumber - 1); // Queuing post-resolution commit processing
wait(pProxyCommitData->latestLocalCommitBatchLogging.whenAtLeast(localBatchNumber - 1));
@ -1000,7 +1010,7 @@ ACTOR Future<Void> postResolution(CommitBatchContext* self) {
// This should be *extremely* rare in the real world, but knob buggification should make it happen in simulation
TEST(true); // Semi-committed pipeline limited by MVCC window
//TraceEvent("ProxyWaitingForCommitted", pProxyCommitData->dbgid).detail("CommittedVersion", pProxyCommitData->committedVersion.get()).detail("NeedToCommit", commitVersion);
waitVersionSpan = Span(deterministicRandom()->randomUniqueID(), "MP:overMaxReadTransactionLifeVersions"_loc, {self->span.context});
waitVersionSpan = Span(deterministicRandom()->randomUniqueID(), "MP:overMaxReadTransactionLifeVersions"_loc, {span.context});
choose{
when(wait(pProxyCommitData->committedVersion.whenAtLeast(self->commitVersion - SERVER_KNOBS->MAX_READ_TRANSACTION_LIFE_VERSIONS))) {
wait(yield());
@ -1036,7 +1046,7 @@ ACTOR Future<Void> postResolution(CommitBatchContext* self) {
if(firstMessage) {
self->toCommit.addTxsTag();
}
self->toCommit.addMessage(StringRef(m.begin(), m.size()), !firstMessage);
self->toCommit.writeMessage(StringRef(m.begin(), m.size()), !firstMessage);
firstMessage = false;
}
@ -1051,7 +1061,7 @@ ACTOR Future<Void> postResolution(CommitBatchContext* self) {
self->commitStartTime = now();
pProxyCommitData->lastStartCommit = self->commitStartTime;
self->loggingComplete = pProxyCommitData->logSystem->push( self->prevVersion, self->commitVersion, pProxyCommitData->committedVersion.get(), pProxyCommitData->minKnownCommittedVersion, self->toCommit, self->debugID );
self->loggingComplete = pProxyCommitData->logSystem->push( self->prevVersion, self->commitVersion, pProxyCommitData->committedVersion.get(), pProxyCommitData->minKnownCommittedVersion, self->toCommit, span.context, self->debugID );
if (!self->forceRecovery) {
ASSERT(pProxyCommitData->latestLocalCommitBatchLogging.get() == self->localBatchNumber-1);
@ -1073,6 +1083,7 @@ ACTOR Future<Void> postResolution(CommitBatchContext* self) {
ACTOR Future<Void> transactionLogging(CommitBatchContext* self) {
state ProxyCommitData* const pProxyCommitData = self->pProxyCommitData;
state Span span("MP:transactionLogging"_loc, self->span.context);
try {
choose {
@ -1108,6 +1119,7 @@ ACTOR Future<Void> transactionLogging(CommitBatchContext* self) {
ACTOR Future<Void> reply(CommitBatchContext* self) {
state ProxyCommitData* const pProxyCommitData = self->pProxyCommitData;
state Span span("MP:reply"_loc, self->span.context);
const Optional<UID>& debugID = self->debugID;
@ -1788,7 +1800,7 @@ ACTOR Future<Void> commitProxyServerCore(CommitProxyInterface proxy, MasterInter
Arena arena;
bool confChanges;
applyMetadataMutations(commitData, arena, Reference<ILogSystem>(), mutations,
applyMetadataMutations(SpanID(), commitData, arena, Reference<ILogSystem>(), mutations,
/* pToCommit= */ nullptr, confChanges,
/* popVersion= */ 0, /* initialCommit= */ true);
}

View File

@ -56,6 +56,7 @@ private:
std::vector<std::pair<StringRef, StringRef>> combinedWriteConflictRanges;
std::vector<struct ReadConflictRange> combinedReadConflictRanges;
bool* transactionConflictStatus;
// Stores the map: a transaction -> conflicted transactions' indices
std::map<int, VectorRef<int>>* conflictingKeyRangeMap;
Arena* resolveBatchReplyArena;

View File

@ -94,7 +94,7 @@ private:
};
template <class Threadlike, class Mutex, bool IS_CORO>
class WorkPool : public IThreadPool, public ReferenceCounted<WorkPool<Threadlike,Mutex,IS_CORO>> {
class WorkPool final : public IThreadPool, public ReferenceCounted<WorkPool<Threadlike, Mutex, IS_CORO>> {
struct Worker;
// Pool can survive the destruction of WorkPool while it waits for workers to terminate
@ -132,7 +132,7 @@ class WorkPool : public IThreadPool, public ReferenceCounted<WorkPool<Threadlike
Worker( Pool* pool, IThreadPoolReceiver* userData ) : pool(pool), userData(userData), stop(false) {
}
virtual void run() {
void run() override {
try {
if(!stop)
userData->init();
@ -199,8 +199,8 @@ public:
m_stopOnError = stopOnError( this );
}
virtual Future<Void> getError() { return pool->anyError.getResult(); }
virtual void addThread( IThreadPoolReceiver* userData ) {
Future<Void> getError() const override { return pool->anyError.getResult(); }
void addThread(IThreadPoolReceiver* userData) override {
checkError();
auto w = new Worker(pool.getPtr(), userData);
@ -217,7 +217,7 @@ public:
wait( delay(0, g_network->getCurrentTask() ));
w->start();
}
virtual void post( PThreadAction action ) {
void post(PThreadAction action) override {
checkError();
pool->queueLock.enter();
@ -230,7 +230,7 @@ public:
} else
pool->queueLock.leave();
}
virtual Future<Void> stop(Error const& e) {
Future<Void> stop(Error const& e) override {
if (error.code() == invalid_error_code) {
error = e;
}
@ -256,9 +256,9 @@ public:
return pool->allStopped.getResult();
}
virtual bool isCoro() const { return IS_CORO; }
virtual void addref() { ReferenceCounted<WorkPool>::addref(); }
virtual void delref() { ReferenceCounted<WorkPool>::delref(); }
bool isCoro() const override { return IS_CORO; }
void addref() override { ReferenceCounted<WorkPool>::addref(); }
void delref() override { ReferenceCounted<WorkPool>::delref(); }
};
typedef WorkPool<Coroutine, ThreadUnsafeSpinLock, true> CoroPool;

View File

@ -158,7 +158,7 @@ public:
};
// TeamCollection's server team info.
class TCTeamInfo : public ReferenceCounted<TCTeamInfo>, public IDataDistributionTeam {
class TCTeamInfo final : public ReferenceCounted<TCTeamInfo>, public IDataDistributionTeam {
vector< Reference<TCServerInfo> > servers;
vector<UID> serverIDs;
bool healthy;
@ -298,10 +298,10 @@ public:
void setHealthy(bool h) override { healthy = h; }
int getPriority() const override { return priority; }
void setPriority(int p) override { priority = p; }
virtual void addref() { ReferenceCounted<TCTeamInfo>::addref(); }
virtual void delref() { ReferenceCounted<TCTeamInfo>::delref(); }
void addref() override { ReferenceCounted<TCTeamInfo>::addref(); }
void delref() override { ReferenceCounted<TCTeamInfo>::delref(); }
virtual void addServers(const vector<UID> & servers) {
void addServers(const vector<UID>& servers) override {
serverIDs.reserve(servers.size());
for (int i = 0; i < servers.size(); i++) {
serverIDs.push_back(servers[i]);

View File

@ -82,7 +82,7 @@ struct RelocateData {
bool operator!=(const RelocateData& rhs) const { return !(*this == rhs); }
};
class ParallelTCInfo : public ReferenceCounted<ParallelTCInfo>, public IDataDistributionTeam {
class ParallelTCInfo final : public ReferenceCounted<ParallelTCInfo>, public IDataDistributionTeam {
vector<Reference<IDataDistributionTeam>> teams;
int64_t sum(std::function<int64_t(IDataDistributionTeam const&)> func) const {
@ -183,7 +183,7 @@ public:
return all([minRatio](IDataDistributionTeam const& team) { return team.hasHealthyAvailableSpace(minRatio); });
}
virtual Future<Void> updateStorageMetrics() {
Future<Void> updateStorageMetrics() override {
vector<Future<Void>> futures;
for (auto& team : teams) {
@ -228,8 +228,8 @@ public:
(*it)->setPriority(p);
}
}
virtual void addref() { ReferenceCounted<ParallelTCInfo>::addref(); }
virtual void delref() { ReferenceCounted<ParallelTCInfo>::delref(); }
void addref() override { ReferenceCounted<ParallelTCInfo>::addref(); }
void delref() override { ReferenceCounted<ParallelTCInfo>::delref(); }
void addServers(const std::vector<UID>& servers) override {
ASSERT(!teams.empty());

View File

@ -30,48 +30,48 @@
// This store is used in testing to let us simulate having much bigger disks than we actually
// have, in order to test really big databases.
struct KeyValueStoreCompressTestData : IKeyValueStore {
struct KeyValueStoreCompressTestData final : IKeyValueStore {
IKeyValueStore* store;
KeyValueStoreCompressTestData(IKeyValueStore* store) : store(store) {}
virtual bool canPipelineCommits() const override {return false;}
virtual Future<Void> getError() override { return store->getError(); }
virtual Future<Void> onClosed() override { return store->onClosed(); }
virtual void dispose() override {
bool canPipelineCommits() const override {return false;}
Future<Void> getError() override { return store->getError(); }
Future<Void> onClosed() override { return store->onClosed(); }
void dispose() override {
store->dispose();
delete this;
}
virtual void close() override {
void close() override {
store->close();
delete this;
}
virtual KeyValueStoreType getType() const override { return store->getType(); }
virtual StorageBytes getStorageBytes() const override { return store->getStorageBytes(); }
KeyValueStoreType getType() const override { return store->getType(); }
StorageBytes getStorageBytes() const override { return store->getStorageBytes(); }
virtual void set(KeyValueRef keyValue, const Arena* arena = nullptr) override {
void set(KeyValueRef keyValue, const Arena* arena = nullptr) override {
store->set( KeyValueRef( keyValue.key, pack(keyValue.value) ), arena );
}
virtual void clear(KeyRangeRef range, const Arena* arena = nullptr) override { store->clear(range, arena); }
virtual Future<Void> commit(bool sequential = false) { return store->commit(sequential); }
void clear(KeyRangeRef range, const Arena* arena = nullptr) override { store->clear(range, arena); }
Future<Void> commit(bool sequential = false) { return store->commit(sequential); }
virtual Future<Optional<Value>> readValue(KeyRef key, Optional<UID> debugID = Optional<UID>()) override {
Future<Optional<Value>> readValue(KeyRef key, Optional<UID> debugID = Optional<UID>()) override {
return doReadValue(store, key, debugID);
}
// Note that readValuePrefix doesn't do anything in this implementation of IKeyValueStore, so the "atomic bomb" problem is still
// present if you are using this storage interface, but this storage interface is not used by customers ever. However, if you want
// to try to test malicious atomic op workloads with compressed values for some reason, you will need to fix this.
virtual Future<Optional<Value>> readValuePrefix(KeyRef key, int maxLength,
Optional<UID> debugID = Optional<UID>()) override {
Future<Optional<Value>> readValuePrefix(KeyRef key, int maxLength,
Optional<UID> debugID = Optional<UID>()) override {
return doReadValuePrefix( store, key, maxLength, debugID );
}
// If rowLimit>=0, reads first rows sorted ascending, otherwise reads last rows sorted descending
// The total size of the returned value (less the last entry) will be less than byteLimit
virtual Future<Standalone<RangeResultRef>> readRange(KeyRangeRef keys, int rowLimit = 1 << 30,
int byteLimit = 1 << 30) override {
Future<Standalone<RangeResultRef>> readRange(KeyRangeRef keys, int rowLimit = 1 << 30,
int byteLimit = 1 << 30) override {
return doReadRange(store, keys, rowLimit, byteLimit);
}
@ -135,7 +135,6 @@ private:
memset(p, c, n);
return val;
}
};
IKeyValueStore* keyValueStoreCompressTestData(IKeyValueStore* store) {

View File

@ -33,15 +33,15 @@
extern bool noUnseed;
template <typename Container>
class KeyValueStoreMemory : public IKeyValueStore, NonCopyable {
class KeyValueStoreMemory final : public IKeyValueStore, NonCopyable {
public:
KeyValueStoreMemory(IDiskQueue* log, UID id, int64_t memoryLimit, KeyValueStoreType storeType, bool disableSnapshot,
bool replaceContent, bool exactRecovery);
// IClosable
virtual Future<Void> getError() override { return log->getError(); }
virtual Future<Void> onClosed() override { return log->onClosed(); }
virtual void dispose() override {
Future<Void> getError() override { return log->getError(); }
Future<Void> onClosed() override { return log->onClosed(); }
void dispose() override {
recovering.cancel();
log->dispose();
if (reserved_buffer != nullptr) {
@ -50,7 +50,7 @@ public:
}
delete this;
}
virtual void close() override {
void close() override {
recovering.cancel();
log->close();
if (reserved_buffer != nullptr) {
@ -61,11 +61,11 @@ public:
}
// IKeyValueStore
virtual KeyValueStoreType getType() const override { return type; }
KeyValueStoreType getType() const override { return type; }
virtual bool canPipelineCommits() const override { return false; }
virtual std::tuple<size_t, size_t, size_t> getSize() const override { return data.size(); }
std::tuple<size_t, size_t, size_t> getSize() const override { return data.size(); }
int64_t getAvailableSize() const {
int64_t residentSize = data.sumTo(data.end()) + queue.totalSize() + // doesn't account for overhead in queue
@ -74,7 +74,7 @@ public:
return memoryLimit - residentSize;
}
virtual StorageBytes getStorageBytes() const override {
StorageBytes getStorageBytes() const override {
StorageBytes diskQueueBytes = log->getStorageBytes();
// Try to bound how many in-memory bytes we might need to write to disk if we commit() now
@ -105,7 +105,7 @@ public:
committedWriteBytes += bytesWritten;
}
virtual void set(KeyValueRef keyValue, const Arena* arena) override {
void set(KeyValueRef keyValue, const Arena* arena) override {
// A commit that occurs with no available space returns Never, so we can throw out all modifications
if (getAvailableSize() <= 0) return;
@ -119,7 +119,7 @@ public:
}
}
virtual void clear(KeyRangeRef range, const Arena* arena) override {
void clear(KeyRangeRef range, const Arena* arena) override {
// A commit that occurs with no available space returns Never, so we can throw out all modifications
if (getAvailableSize() <= 0) return;
@ -133,7 +133,7 @@ public:
}
}
virtual Future<Void> commit(bool sequential) override {
Future<Void> commit(bool sequential) override {
if(getAvailableSize() <= 0) {
TraceEvent(SevError, "KeyValueStoreMemory_OutOfSpace", id);
return Never();
@ -186,7 +186,7 @@ public:
return c;
}
virtual Future<Optional<Value>> readValue(KeyRef key, Optional<UID> debugID = Optional<UID>()) override {
Future<Optional<Value>> readValue(KeyRef key, Optional<UID> debugID = Optional<UID>()) override {
if (recovering.isError()) throw recovering.getError();
if (!recovering.isReady()) return waitAndReadValue(this, key);
@ -195,8 +195,8 @@ public:
return Optional<Value>(it.getValue());
}
virtual Future<Optional<Value>> readValuePrefix(KeyRef key, int maxLength,
Optional<UID> debugID = Optional<UID>()) override {
Future<Optional<Value>> readValuePrefix(KeyRef key, int maxLength,
Optional<UID> debugID = Optional<UID>()) override {
if (recovering.isError()) throw recovering.getError();
if (!recovering.isReady()) return waitAndReadValuePrefix(this, key, maxLength);
@ -212,8 +212,8 @@ public:
// If rowLimit>=0, reads first rows sorted ascending, otherwise reads last rows sorted descending
// The total size of the returned value (less the last entry) will be less than byteLimit
virtual Future<Standalone<RangeResultRef>> readRange(KeyRangeRef keys, int rowLimit = 1 << 30,
int byteLimit = 1 << 30) override {
Future<Standalone<RangeResultRef>> readRange(KeyRangeRef keys, int rowLimit = 1 << 30,
int byteLimit = 1 << 30) override {
if(recovering.isError()) throw recovering.getError();
if (!recovering.isReady()) return waitAndReadRange(this, keys, rowLimit, byteLimit);
@ -255,13 +255,13 @@ public:
return result;
}
virtual void resyncLog() override {
void resyncLog() override {
ASSERT(recovering.isReady());
resetSnapshot = true;
log_op(OpSnapshotAbort, StringRef(), StringRef());
}
virtual void enableSnapshot() override { disableSnapshot = false; }
void enableSnapshot() override { disableSnapshot = false; }
private:
enum OpType {

View File

@ -72,9 +72,7 @@ struct RocksDBKeyValueStore : IKeyValueStore {
std::string path;
ThreadReturnPromise<Void> done;
double getTimeEstimate() {
return SERVER_KNOBS->COMMIT_TIME_ESTIMATE;
}
double getTimeEstimate() const override { return SERVER_KNOBS->COMMIT_TIME_ESTIMATE; }
};
void action(OpenAction& a) {
std::vector<rocksdb::ColumnFamilyDescriptor> defaultCF = { rocksdb::ColumnFamilyDescriptor{
@ -92,7 +90,7 @@ struct RocksDBKeyValueStore : IKeyValueStore {
struct CommitAction : TypedAction<Writer, CommitAction> {
std::unique_ptr<rocksdb::WriteBatch> batchToCommit;
ThreadReturnPromise<Void> done;
double getTimeEstimate() override { return SERVER_KNOBS->COMMIT_TIME_ESTIMATE; }
double getTimeEstimate() const override { return SERVER_KNOBS->COMMIT_TIME_ESTIMATE; }
};
void action(CommitAction& a) {
rocksdb::WriteOptions options;
@ -111,7 +109,7 @@ struct RocksDBKeyValueStore : IKeyValueStore {
std::string path;
bool deleteOnClose;
CloseAction(std::string path, bool deleteOnClose) : path(path), deleteOnClose(deleteOnClose) {}
double getTimeEstimate() override { return SERVER_KNOBS->COMMIT_TIME_ESTIMATE; }
double getTimeEstimate() const override { return SERVER_KNOBS->COMMIT_TIME_ESTIMATE; }
};
void action(CloseAction& a) {
auto s = db->Close();
@ -141,7 +139,7 @@ struct RocksDBKeyValueStore : IKeyValueStore {
ReadValueAction(KeyRef key, Optional<UID> debugID)
: key(key), debugID(debugID)
{}
double getTimeEstimate() override { return SERVER_KNOBS->READ_VALUE_TIME_ESTIMATE; }
double getTimeEstimate() const override { return SERVER_KNOBS->READ_VALUE_TIME_ESTIMATE; }
};
void action(ReadValueAction& a) {
Optional<TraceBatch> traceBatch;
@ -171,7 +169,7 @@ struct RocksDBKeyValueStore : IKeyValueStore {
Optional<UID> debugID;
ThreadReturnPromise<Optional<Value>> result;
ReadValuePrefixAction(Key key, int maxLength, Optional<UID> debugID) : key(key), maxLength(maxLength), debugID(debugID) {};
virtual double getTimeEstimate() { return SERVER_KNOBS->READ_VALUE_TIME_ESTIMATE; }
double getTimeEstimate() const override { return SERVER_KNOBS->READ_VALUE_TIME_ESTIMATE; }
};
void action(ReadValuePrefixAction& a) {
rocksdb::PinnableSlice value;
@ -201,7 +199,7 @@ struct RocksDBKeyValueStore : IKeyValueStore {
int rowLimit, byteLimit;
ThreadReturnPromise<Standalone<RangeResultRef>> result;
ReadRangeAction(KeyRange keys, int rowLimit, int byteLimit) : keys(keys), rowLimit(rowLimit), byteLimit(byteLimit) {}
virtual double getTimeEstimate() { return SERVER_KNOBS->READ_RANGE_TIME_ESTIMATE; }
double getTimeEstimate() const override { return SERVER_KNOBS->READ_RANGE_TIME_ESTIMATE; }
};
void action(ReadRangeAction& a) {
Standalone<RangeResultRef> result;

View File

@ -1443,26 +1443,26 @@ struct ThreadSafeCounter {
operator int64_t() const { return counter; }
};
class KeyValueStoreSQLite : public IKeyValueStore {
class KeyValueStoreSQLite final : public IKeyValueStore {
public:
virtual void dispose() override { doClose(this, true); }
virtual void close() override { doClose(this, false); }
void dispose() override { doClose(this, true); }
void close() override { doClose(this, false); }
virtual Future<Void> getError() override { return delayed(readThreads->getError() || writeThread->getError()); }
virtual Future<Void> onClosed() override { return stopped.getFuture(); }
Future<Void> getError() override { return delayed(readThreads->getError() || writeThread->getError()); }
Future<Void> onClosed() override { return stopped.getFuture(); }
virtual KeyValueStoreType getType() const override { return type; }
virtual StorageBytes getStorageBytes() const override;
virtual bool canPipelineCommits() const override { return false; }
KeyValueStoreType getType() const override { return type; }
StorageBytes getStorageBytes() const override;
bool canPipelineCommits() const override { return false; }
virtual void set(KeyValueRef keyValue, const Arena* arena = nullptr) override;
virtual void clear(KeyRangeRef range, const Arena* arena = nullptr) override;
virtual Future<Void> commit(bool sequential = false) override;
void set(KeyValueRef keyValue, const Arena* arena = nullptr) override;
void clear(KeyRangeRef range, const Arena* arena = nullptr) override;
Future<Void> commit(bool sequential = false) override;
virtual Future<Optional<Value>> readValue(KeyRef key, Optional<UID> debugID) override;
virtual Future<Optional<Value>> readValuePrefix(KeyRef key, int maxLength, Optional<UID> debugID) override;
virtual Future<Standalone<RangeResultRef>> readRange(KeyRangeRef keys, int rowLimit = 1 << 30,
int byteLimit = 1 << 30) override;
Future<Optional<Value>> readValue(KeyRef key, Optional<UID> debugID) override;
Future<Optional<Value>> readValuePrefix(KeyRef key, int maxLength, Optional<UID> debugID) override;
Future<Standalone<RangeResultRef>> readRange(KeyRangeRef keys, int rowLimit = 1 << 30,
int byteLimit = 1 << 30) override;
KeyValueStoreSQLite(std::string const& filename, UID logID, KeyValueStoreType type, bool checkChecksums, bool checkIntegrity);
~KeyValueStoreSQLite();
@ -1506,9 +1506,7 @@ private:
ppReadCursor->clear();
}
virtual void init() {
conn.open(false);
}
void init() override { conn.open(false); }
Reference<ReadCursor> getCursor() {
Reference<ReadCursor> cursor = *ppReadCursor;
@ -1524,7 +1522,7 @@ private:
Optional<UID> debugID;
ThreadReturnPromise<Optional<Value>> result;
ReadValueAction(Key key, Optional<UID> debugID) : key(key), debugID(debugID) {};
virtual double getTimeEstimate() { return SERVER_KNOBS->READ_VALUE_TIME_ESTIMATE; }
double getTimeEstimate() const override { return SERVER_KNOBS->READ_VALUE_TIME_ESTIMATE; }
};
void action( ReadValueAction& rv ) {
//double t = timer();
@ -1538,13 +1536,14 @@ private:
//if (t >= 1.0) TraceEvent("ReadValueActionSlow",dbgid).detail("Elapsed", t);
}
struct ReadValuePrefixAction : TypedAction<Reader, ReadValuePrefixAction>, FastAllocated<ReadValuePrefixAction> {
struct ReadValuePrefixAction final : TypedAction<Reader, ReadValuePrefixAction>,
FastAllocated<ReadValuePrefixAction> {
Key key;
int maxLength;
Optional<UID> debugID;
ThreadReturnPromise<Optional<Value>> result;
ReadValuePrefixAction(Key key, int maxLength, Optional<UID> debugID) : key(key), maxLength(maxLength), debugID(debugID) {};
virtual double getTimeEstimate() { return SERVER_KNOBS->READ_VALUE_TIME_ESTIMATE; }
double getTimeEstimate() const override { return SERVER_KNOBS->READ_VALUE_TIME_ESTIMATE; }
};
void action( ReadValuePrefixAction& rv ) {
//double t = timer();
@ -1563,7 +1562,7 @@ private:
int rowLimit, byteLimit;
ThreadReturnPromise<Standalone<RangeResultRef>> result;
ReadRangeAction(KeyRange keys, int rowLimit, int byteLimit) : keys(keys), rowLimit(rowLimit), byteLimit(byteLimit) {}
virtual double getTimeEstimate() { return SERVER_KNOBS->READ_RANGE_TIME_ESTIMATE; }
double getTimeEstimate() const override { return SERVER_KNOBS->READ_RANGE_TIME_ESTIMATE; }
};
void action( ReadRangeAction& rr ) {
rr.result.send( getCursor()->get().getRange(rr.keys, rr.rowLimit, rr.byteLimit) );
@ -1606,7 +1605,7 @@ private:
delete cursor;
TraceEvent("KVWriterDestroyed", dbgid);
}
virtual void init() {
void init() override {
if(checkAllChecksumsOnOpen) {
if(conn.checkAllPageChecksums() != 0) {
// It's not strictly necessary to discard the file immediately if a page checksum error is found
@ -1639,7 +1638,7 @@ private:
struct InitAction : TypedAction<Writer, InitAction>, FastAllocated<InitAction> {
ThreadReturnPromise<Void> result;
virtual double getTimeEstimate() { return 0; }
double getTimeEstimate() const override { return 0; }
};
void action(InitAction& a) {
// init() has already been called
@ -1649,7 +1648,7 @@ private:
struct SetAction : TypedAction<Writer, SetAction>, FastAllocated<SetAction> {
KeyValue kv;
SetAction( KeyValue kv ) : kv(kv) {}
virtual double getTimeEstimate() { return SERVER_KNOBS->SET_TIME_ESTIMATE; }
double getTimeEstimate() const override { return SERVER_KNOBS->SET_TIME_ESTIMATE; }
};
void action(SetAction& a) {
double s = now();
@ -1664,7 +1663,7 @@ private:
struct ClearAction : TypedAction<Writer, ClearAction>, FastAllocated<ClearAction> {
KeyRange range;
ClearAction( KeyRange range ) : range(range) {}
virtual double getTimeEstimate() { return SERVER_KNOBS->CLEAR_TIME_ESTIMATE; }
double getTimeEstimate() const override { return SERVER_KNOBS->CLEAR_TIME_ESTIMATE; }
};
void action(ClearAction& a) {
double s = now();
@ -1679,7 +1678,7 @@ private:
double issuedTime;
ThreadReturnPromise<Void> result;
CommitAction() : issuedTime(now()) {}
virtual double getTimeEstimate() { return SERVER_KNOBS->COMMIT_TIME_ESTIMATE; }
double getTimeEstimate() const override { return SERVER_KNOBS->COMMIT_TIME_ESTIMATE; }
};
void action(CommitAction& a) {
double t1 = now();
@ -1746,7 +1745,7 @@ private:
struct SpringCleaningAction : TypedAction<Writer, SpringCleaningAction>, FastAllocated<SpringCleaningAction> {
ThreadReturnPromise<SpringCleaningWorkPerformed> result;
virtual double getTimeEstimate() {
double getTimeEstimate() const override {
return std::max(SERVER_KNOBS->SPRING_CLEANING_LAZY_DELETE_TIME_ESTIMATE, SERVER_KNOBS->SPRING_CLEANING_VACUUM_TIME_ESTIMATE);
}
};

View File

@ -49,7 +49,7 @@ struct LatencyBandConfig {
Optional<int> maxReadBytes;
Optional<int> maxKeySelectorOffset;
virtual void fromJson(JSONDoc json);
void fromJson(JSONDoc json) override;
template <class Ar>
void serialize(Ar& ar) {
@ -57,13 +57,13 @@ struct LatencyBandConfig {
}
protected:
virtual bool isEqual(RequestConfig const& r) const;
bool isEqual(RequestConfig const& r) const override;
};
struct CommitConfig : RequestConfig {
Optional<int> maxCommitBytes;
virtual void fromJson(JSONDoc json);
void fromJson(JSONDoc json) override;
template <class Ar>
void serialize(Ar& ar) {
@ -71,7 +71,7 @@ struct LatencyBandConfig {
}
protected:
virtual bool isEqual(RequestConfig const& r) const;
bool isEqual(RequestConfig const& r) const override;
};
GrvConfig grvConfig;

View File

@ -24,11 +24,13 @@
#include <set>
#include <vector>
#include "fdbserver/SpanContextMessage.h"
#include "fdbserver/TLogInterface.h"
#include "fdbserver/WorkerInterface.actor.h"
#include "fdbclient/DatabaseConfiguration.h"
#include "fdbserver/MutationTracking.h"
#include "flow/IndexedSet.h"
#include "flow/Knobs.h"
#include "fdbrpc/ReplicationPolicy.h"
#include "fdbrpc/Locality.h"
#include "fdbrpc/Replication.h"
@ -678,7 +680,7 @@ struct ILogSystem {
// Never returns normally, but throws an error if the subsystem stops working
//Future<Void> push( UID bundle, int64_t seq, VectorRef<TaggedMessageRef> messages );
virtual Future<Version> push( Version prevVersion, Version version, Version knownCommittedVersion, Version minKnownCommittedVersion, struct LogPushData& data, Optional<UID> debugID = Optional<UID>() ) = 0;
virtual Future<Version> push( Version prevVersion, Version version, Version knownCommittedVersion, Version minKnownCommittedVersion, struct LogPushData& data, SpanID const& spanContext, Optional<UID> debugID = Optional<UID>() ) = 0;
// Waits for the version number of the bundle (in this epoch) to be prevVersion (i.e. for all pushes ordered earlier)
// Puts the given messages into the bundle, each with the given tags, and with message versions (version, 0) - (version, N)
// Changes the version number of the bundle to be version (unblocking the next push)
@ -828,6 +830,18 @@ struct CompareFirst {
}
};
// Structure to store serialized mutations sent from the proxy to the
// transaction logs. The serialization repeats with the following format:
//
// +----------------------+ +----------------------+ +----------+ +----------------+ +----------------------+
// | Message size | | Subsequence | | # of tags| | Tag | . . . . | Mutation |
// +----------------------+ +----------------------+ +----------+ +----------------+ +----------------------+
// <------- 32 bits ------> <------- 32 bits ------> <- 16 bits-> <---- 24 bits ---> <---- variable bits --->
//
// `Mutation` can be a serialized MutationRef or a special metadata message
// such as LogProtocolMessage or SpanContextMessage. The type of `Mutation` is
// uniquely identified by its first byte -- a value from MutationRef::Type.
//
struct LogPushData : NonCopyable {
// Log subsequences have to start at 1 (the MergedPeekCursor relies on this to make sure we never have !hasMessage() in the middle of data for a version
@ -859,7 +873,15 @@ struct LogPushData : NonCopyable {
next_message_tags.insert(next_message_tags.end(), tags.begin(), tags.end());
}
void addMessage( StringRef rawMessageWithoutLength, bool usePreviousLocations ) {
// Add transaction info to be written before the first mutation in the transaction.
void addTransactionInfo(SpanID const& context) {
TEST(!spanContext.isValid()); // addTransactionInfo with invalid SpanID
spanContext = context;
transactionSubseq = 0;
writtenLocations.clear();
}
void writeMessage( StringRef rawMessageWithoutLength, bool usePreviousLocations ) {
if( !usePreviousLocations ) {
prev_tags.clear();
if(logSystem->hasRemoteLogs()) {
@ -875,15 +897,16 @@ struct LogPushData : NonCopyable {
uint32_t subseq = this->subsequence++;
uint32_t msgsize = rawMessageWithoutLength.size() + sizeof(subseq) + sizeof(uint16_t) + sizeof(Tag)*prev_tags.size();
for(int loc : msg_locations) {
messagesWriter[loc] << msgsize << subseq << uint16_t(prev_tags.size());
BinaryWriter& wr = messagesWriter[loc];
wr << msgsize << subseq << uint16_t(prev_tags.size());
for(auto& tag : prev_tags)
messagesWriter[loc] << tag;
messagesWriter[loc].serializeBytes(rawMessageWithoutLength);
wr << tag;
wr.serializeBytes(rawMessageWithoutLength);
}
}
template <class T>
void addTypedMessage(T const& item, bool allLocations = false) {
void writeTypedMessage(T const& item, bool metadataMessage = false, bool allLocations = false) {
prev_tags.clear();
if(logSystem->hasRemoteLogs()) {
prev_tags.push_back( logSystem->getRandomRouterTag() );
@ -895,12 +918,31 @@ struct LogPushData : NonCopyable {
logSystem->getPushLocations(prev_tags, msg_locations, allLocations);
BinaryWriter bw(AssumeVersion(g_network->protocolVersion()));
// Metadata messages should be written before span information. If this
// isn't a metadata message, make sure all locations have had
// transaction info written to them. Mutations may have different sets
// of tags, so it is necessary to check all tag locations each time a
// mutation is written.
if (!metadataMessage) {
// If span information hasn't been written for this transaction yet,
// generate a subsequence value for the message.
if (!transactionSubseq) {
transactionSubseq = this->subsequence++;
}
for (int loc : msg_locations) {
writeTransactionInfo(loc);
}
}
uint32_t subseq = this->subsequence++;
bool first = true;
int firstOffset=-1, firstLength=-1;
for(int loc : msg_locations) {
BinaryWriter& wr = messagesWriter[loc];
if (first) {
BinaryWriter& wr = messagesWriter[loc];
firstOffset = wr.getLength();
wr << uint32_t(0) << subseq << uint16_t(prev_tags.size());
for(auto& tag : prev_tags)
@ -911,7 +953,6 @@ struct LogPushData : NonCopyable {
DEBUG_TAGS_AND_MESSAGE("ProxyPushLocations", invalidVersion, StringRef(((uint8_t*)wr.getData() + firstOffset), firstLength)).detail("PushLocations", msg_locations);
first = false;
} else {
BinaryWriter& wr = messagesWriter[loc];
BinaryWriter& from = messagesWriter[msg_locations[0]];
wr.serializeBytes( (uint8_t*)from.getData() + firstOffset, firstLength );
}
@ -929,7 +970,39 @@ private:
std::vector<Tag> prev_tags;
std::vector<BinaryWriter> messagesWriter;
std::vector<int> msg_locations;
// Stores message locations that have had span information written to them
// for the current transaction. Adding transaction info will reset this
// field.
std::unordered_set<int> writtenLocations;
uint32_t subsequence;
// Store transaction subsequence separately, as multiple mutations may need
// to write transaction info. This can happen if later mutations in a
// transaction need to write to a different location than earlier
// mutations.
uint32_t transactionSubseq;
SpanID spanContext;
// Writes transaction info to the message stream for the given location if
// it has not already been written (for the current transaction).
void writeTransactionInfo(int location) {
if (!FLOW_KNOBS->WRITE_TRACING_ENABLED) {
return;
}
if (writtenLocations.count(location) == 0) {
writtenLocations.insert(location);
BinaryWriter& wr = messagesWriter[location];
SpanContextMessage contextMessage(spanContext);
int offset = wr.getLength();
wr << uint32_t(0) << transactionSubseq << uint16_t(prev_tags.size());
for(auto& tag : prev_tags)
wr << tag;
wr << contextMessage;
int length = wr.getLength() - offset;
*(uint32_t*)((uint8_t*)wr.getData() + offset) = length - sizeof(uint32_t);
}
}
};
#endif

View File

@ -21,6 +21,7 @@
#include <vector>
#include "fdbserver/MutationTracking.h"
#include "fdbserver/LogProtocolMessage.h"
#include "fdbserver/SpanContextMessage.h"
#if defined(FDB_CLEAN_BUILD) && MUTATION_TRACKING_ENABLED
#error "You cannot use mutation tracking in a clean/release build."
@ -71,6 +72,10 @@ TraceEvent debugTagsAndMessageEnabled( const char* context, Version version, Str
LogProtocolMessage lpm;
br >> lpm;
rdr.setProtocolVersion(br.protocolVersion());
} else if (SpanContextMessage::startsSpanContextMessage(mutationType)) {
BinaryReader br(mutationData, AssumeVersion(rdr.protocolVersion()));
SpanContextMessage scm;
br >> scm;
} else {
MutationRef m;
BinaryReader br(mutationData, AssumeVersion(rdr.protocolVersion()));

View File

@ -20,6 +20,7 @@
#include "flow/ActorCollection.h"
#include "fdbclient/NativeAPI.actor.h"
#include "fdbserver/ConflictSet.h"
#include "fdbserver/ResolverInterface.h"
#include "fdbserver/MasterInterface.h"
#include "fdbserver/WorkerInterface.actor.h"
@ -27,9 +28,9 @@
#include "fdbserver/Knobs.h"
#include "fdbserver/ServerDBInfo.h"
#include "fdbserver/Orderer.actor.h"
#include "fdbserver/ConflictSet.h"
#include "fdbserver/StorageMetrics.h"
#include "fdbclient/SystemData.h"
#include "flow/actorcompiler.h" // This must be the last #include.
namespace {
@ -104,6 +105,7 @@ ACTOR Future<Void> resolveBatch(
ResolveTransactionBatchRequest req)
{
state Optional<UID> debugID;
state Span span("R:resolveBatch"_loc, req.spanContext);
// The first request (prevVersion < 0) comes from the master
state NetworkAddress proxyAddress = req.prevVersion >= 0 ? req.reply.getEndpoint().getPrimaryAddress() : NetworkAddress();

View File

@ -32,7 +32,7 @@
#include "fdbclient/FDBTypes.h"
#include "fdbclient/KeyRangeMap.h"
#include "fdbclient/SystemData.h"
#include "fdbserver/Knobs.h"
#include "fdbserver/ConflictSet.h"
using std::max;
using std::min;
@ -46,16 +46,9 @@ static inline int skfastrand() {
return g_seed;
}
void setAffinity(int proc);
PerfDoubleCounter g_buildTest("Build", skc), g_add("Add", skc), g_add_sort("A.Sort", skc),
g_detectConflicts("Detect", skc), g_sort("D.Sort", skc), g_combine("D.Combine", skc),
g_checkRead("D.CheckRead", skc), g_checkBatch("D.CheckIntraBatch", skc), g_merge("D.MergeWrite", skc),
g_merge_launch("D.Merge.Launch", skc), g_merge_fork("D.Merge.Fork", skc),
g_merge_start_var("D.Merge.StartVariance", skc), g_merge_end_var("D.Merge.EndVariance", skc),
g_merge_run_var("D.Merge.RunVariance", skc), g_merge_run_shortest("D.Merge.ShortestRun", skc),
g_merge_run_longest("D.Merge.LongestRun", skc), g_merge_run_total("D.Merge.TotalRun", skc),
g_merge_join("D.Merge.Join", skc), g_removeBefore("D.RemoveBefore", skc);
PerfDoubleCounter g_buildTest("Build", skc), g_add("Add", skc), g_detectConflicts("Detect", skc), g_sort("D.Sort", skc),
g_combine("D.Combine", skc), g_checkRead("D.CheckRead", skc), g_checkBatch("D.CheckIntraBatch", skc),
g_merge("D.MergeWrite", skc), g_removeBefore("D.RemoveBefore", skc);
static force_inline int compare(const StringRef& a, const StringRef& b) {
int c = memcmp(a.begin(), b.begin(), min(a.size(), b.size()));
@ -88,7 +81,7 @@ struct KeyInfo {
bool write;
int transaction;
KeyInfo(){};
KeyInfo() = default;
KeyInfo(StringRef key, bool begin, bool write, int transaction, int* pIndex)
: key(key), begin(begin), write(write), transaction(transaction), pIndex(pIndex) {}
};
@ -229,17 +222,25 @@ private:
return level;
}
// Represent a node in the SkipList. The node has multiple (i.e., level) pointers to
// other nodes, and keeps a record of the max versions for each level.
struct Node {
int level() { return nPointers - 1; }
uint8_t* value() { return end() + nPointers * (sizeof(Node*) + sizeof(Version)); }
int length() { return valueLength; }
Node* getNext(int i) { return *((Node**)end() + i); }
void setNext(int i, Node* n) { *((Node**)end() + i) = n; }
// Returns the next node pointer at the given level.
Node* getNext(int level) { return *((Node**)end() + level); }
// Sets the next node pointer at the given level.
void setNext(int level, Node* n) { *((Node**)end() + level) = n; }
// Returns the max version at the given level.
Version getMaxVersion(int i) { return ((Version*)(end() + nPointers * sizeof(Node*)))[i]; }
// Sets the max version at the given level.
void setMaxVersion(int i, Version v) { ((Version*)(end() + nPointers * sizeof(Node*)))[i] = v; }
// Return a node with initialized value but uninitialized pointers
// Memory layout: *this, (level+1) Node*, (level+1) Version, value
static Node* create(const StringRef& value, int level) {
int nodeSize = sizeof(Node) + value.size() + (level + 1) * (sizeof(Node*) + sizeof(Version));
@ -289,6 +290,7 @@ private:
private:
int getNodeSize() { return sizeof(Node) + valueLength + nPointers * (sizeof(Node*) + sizeof(Version)); }
// Returns the first Node* pointer
uint8_t* end() { return (uint8_t*)(this + 1); }
int nPointers, valueLength;
};
@ -311,16 +313,19 @@ private:
}
public:
// Points the location (i.e., Node*) that value would appear in the SkipList.
// If the "value" is in the list, then finger[0] points to that exact node;
// otherwise, the finger points to Nodes that the value should be inserted before.
// Note the SkipList organizes all nodes at level 0, higher levels contain jump pointers.
struct Finger {
Node* finger[MaxLevels]; // valid for levels >= level
int level;
Node* x;
Node* alreadyChecked;
int level = MaxLevels;
Node* x = nullptr;
Node* alreadyChecked = nullptr;
StringRef value;
Finger() : level(MaxLevels), x(nullptr), alreadyChecked(nullptr) {}
Finger(Node* header, const StringRef& ptr) : value(ptr), level(MaxLevels), alreadyChecked(nullptr), x(header) {}
Finger() = default;
Finger(Node* header, const StringRef& ptr) : value(ptr), x(header) {}
void init(const StringRef& value, Node* header) {
this->value = value;
@ -337,6 +342,8 @@ public:
}
// pre: !finished()
// Advances the pointer at the current level to a Node that's >= finger's value
// if possible; or move to the next level (i.e., level--).
// Returns true if we have advanced to the next level
force_inline bool advance() {
Node* next = x->getNext(level - 1);
@ -360,6 +367,7 @@ public:
force_inline bool finished() { return level == 0; }
// Returns if the finger value is found in the SkipList.
force_inline Node* found() const {
// valid after finished returns true
Node* n = finger[0]->getNext(0); // or alreadyChecked, but that is more easily invalidated
@ -375,7 +383,8 @@ public:
}
};
int count() {
// Returns the total number of nodes in the list.
int count() const {
int count = 0;
Node* x = header->getNext(0);
while (x) {
@ -453,6 +462,7 @@ public:
// partitions. In between, operations on each partition must not touch any keys outside
// the partition. Specifically, the partition to the left of 'key' must not have a range
// [...,key) inserted, since that would insert an entry at 'key'.
// Note this function is not used.
void partition(StringRef* begin, int splitCount, SkipList* output) {
for (int i = splitCount - 1; i >= 0; i--) {
Finger f(header, begin[i]);
@ -462,6 +472,8 @@ public:
swap(output[0]);
}
// Concatenates multiple SkipList objects into one and stores in input[0].
// Note this function is not used.
void concatenate(SkipList* input, int count) {
std::vector<Finger> ends(count - 1);
for (int i = 0; i < ends.size(); i++) input[i].getEnd(ends[i]);
@ -692,6 +704,7 @@ private:
}
};
// Splits the SkipLists so that those after finger is moved to "right".
void split(const Finger& f, SkipList& right) {
ASSERT(!right.header->getNext(0)); // right must be empty
right.header->setMaxVersion(0, f.finger[0]->getMaxVersion(0));
@ -701,6 +714,7 @@ private:
}
}
// Sets end's finger to the last nodes at all levels.
void getEnd(Finger& end) {
Node* node = header;
for (int l = MaxLevels - 1; l >= 0; l--) {
@ -712,21 +726,6 @@ private:
}
};
StringRef setK(Arena& arena, int i) {
char t[sizeof(i)];
*(int*)t = i;
const int keySize = 16;
char* ss = new (arena) char[keySize];
for (int c = 0; c < keySize - sizeof(i); c++) ss[c] = '.';
for (int c = 0; c < sizeof(i); c++) ss[c + keySize - sizeof(i)] = t[sizeof(i) - 1 - c];
return StringRef((const uint8_t*)ss, keySize);
}
#include "fdbserver/ConflictSet.h"
struct ConflictSet {
ConflictSet() : oldestVersion(0), removalKey(makeString(0)) {}
~ConflictSet() {}
@ -761,7 +760,7 @@ struct TransactionInfo {
};
void ConflictBatch::addTransaction(const CommitTransactionRef& tr) {
int t = transactionCount++;
const int t = transactionCount++;
Arena& arena = transactionInfo.arena();
TransactionInfo* info = new (arena) TransactionInfo;
@ -774,7 +773,6 @@ void ConflictBatch::addTransaction(const CommitTransactionRef& tr) {
info->readRanges.resize(arena, tr.read_conflict_ranges.size());
info->writeRanges.resize(arena, tr.write_conflict_ranges.size());
std::vector<KeyInfo>& points = this->points;
for (int r = 0; r < tr.read_conflict_ranges.size(); r++) {
const KeyRangeRef& range = tr.read_conflict_ranges[r];
points.emplace_back(range.begin, true, false, t, &info->readRanges[r].first);
@ -791,7 +789,7 @@ void ConflictBatch::addTransaction(const CommitTransactionRef& tr) {
}
}
this->transactionInfo.push_back(arena, info);
transactionInfo.push_back(arena, info);
}
// SOMEDAY: This should probably be replaced with a roaring bitmap.
@ -891,7 +889,7 @@ void ConflictBatch::detectConflicts(Version now, Version newOldestVersion, std::
}
void ConflictBatch::checkReadConflictRanges() {
if (!combinedReadConflictRanges.size()) return;
if (combinedReadConflictRanges.empty()) return;
cs->versionHistory.detectConflicts(&combinedReadConflictRanges[0], combinedReadConflictRanges.size(),
transactionConflictStatus);
@ -899,35 +897,34 @@ void ConflictBatch::checkReadConflictRanges() {
void ConflictBatch::addConflictRanges(Version now, std::vector<std::pair<StringRef, StringRef>>::iterator begin,
std::vector<std::pair<StringRef, StringRef>>::iterator end, SkipList* part) {
int count = end - begin;
static_assert(sizeof(begin[0]) == sizeof(StringRef) * 2,
const int count = end - begin;
static_assert(sizeof(*begin) == sizeof(StringRef) * 2,
"Write Conflict Range type not convertible to two StringPtrs");
const StringRef* strings = reinterpret_cast<const StringRef*>(&*begin);
int stringCount = count * 2;
const int stringCount = count * 2;
static const int stripeSize = 16;
const int stripeSize = 16;
SkipList::Finger fingers[stripeSize];
int temp[stripeSize];
int stripes = (stringCount + stripeSize - 1) / stripeSize;
int ss = stringCount - (stripes - 1) * stripeSize;
for (int s = stripes - 1; s >= 0; s--) {
part->find(&strings[s * stripeSize], &fingers[0], temp, ss);
part->addConflictRanges(&fingers[0], ss / 2, now);
part->find(&strings[s * stripeSize], fingers, temp, ss);
part->addConflictRanges(fingers, ss / 2, now);
ss = stripeSize;
}
}
void ConflictBatch::mergeWriteConflictRanges(Version now) {
if (!combinedWriteConflictRanges.size()) return;
if (combinedWriteConflictRanges.empty()) return;
addConflictRanges(now, combinedWriteConflictRanges.begin(), combinedWriteConflictRanges.end(), &cs->versionHistory);
}
void ConflictBatch::combineWriteConflictRanges() {
int activeWriteCount = 0;
for (int i = 0; i < points.size(); i++) {
KeyInfo& point = points[i];
for (const KeyInfo& point : points) {
if (point.write && !transactionConflictStatus[point.transaction]) {
if (point.begin) {
activeWriteCount++;
@ -940,6 +937,20 @@ void ConflictBatch::combineWriteConflictRanges() {
}
}
namespace {
StringRef setK(Arena& arena, int i) {
char t[sizeof(i)];
*(int*)t = i;
const int keySize = 16;
char* ss = new (arena) char[keySize];
for (int c = 0; c < keySize - sizeof(i); c++) ss[c] = '.';
for (int c = 0; c < sizeof(i); c++) ss[c + keySize - sizeof(i)] = t[sizeof(i) - 1 - c];
return StringRef((const uint8_t*)ss, keySize);
}
void miniConflictSetTest() {
for (int i = 0; i < 2000000; i++) {
int size = 64 * 5; // Also run 64*64*5 to test multiple words of andValues and orValues
@ -991,6 +1002,7 @@ void operatorLessThanTest() {
ASSERT(!(a == b));
}
}
} // namespace
void skipListTest() {
printf("Skip list test\n");
@ -1007,21 +1019,22 @@ void skipListTest() {
Arena testDataArena;
VectorRef<VectorRef<KeyRangeRef>> testData;
testData.resize(testDataArena, 500);
std::vector<std::vector<uint8_t>> success(testData.size());
std::vector<std::vector<uint8_t>> success2(testData.size());
for (int i = 0; i < testData.size(); i++) {
testData[i].resize(testDataArena, 5000);
success[i].assign(testData[i].size(), false);
success2[i].assign(testData[i].size(), false);
for (int j = 0; j < testData[i].size(); j++) {
const int batches = 500; // deterministicRandom()->randomInt(500, 5000);
const int data_per_batch = 5000;
testData.resize(testDataArena, batches);
std::vector<std::vector<uint8_t>> success(batches);
std::vector<std::vector<uint8_t>> success2(batches);
for (int i = 0; i < batches; i++) {
testData[i].resize(testDataArena, data_per_batch);
success[i].assign(data_per_batch, false);
success2[i].assign(data_per_batch, false);
for (int j = 0; j < data_per_batch; j++) {
int key = deterministicRandom()->randomInt(0, 20000000);
int key2 = key + 1 + deterministicRandom()->randomInt(0, 10);
testData[i][j] = KeyRangeRef(setK(testDataArena, key), setK(testDataArena, key2));
}
}
printf("Test data generated (%d)\n", deterministicRandom()->randomInt(0, 100000));
printf(" %d batches, %d/batch\n", testData.size(), testData[0].size());
printf("Test data generated: %d batches, %d/batch\n", batches, data_per_batch);
printf("Running\n");
@ -1029,23 +1042,24 @@ void skipListTest() {
int cranges = 0, tcount = 0;
start = timer();
std::vector<std::vector<int>> nonConflict(testData.size());
for (int i = 0; i < testData.size(); i++) {
std::vector<std::vector<int>> nonConflict(batches);
Version version = 0;
for (const auto& data : testData) {
Arena buf;
std::vector<CommitTransactionRef> trs;
double t = timer();
for (int j = 0; j + readCount + writeCount <= testData[i].size(); j += readCount + writeCount) {
for (int j = 0; j + readCount + writeCount <= data.size(); j += readCount + writeCount) {
CommitTransactionRef tr;
for (int k = 0; k < readCount; k++) {
KeyRangeRef r(buf, testData[i][j + k]);
KeyRangeRef r(buf, data[j + k]);
tr.read_conflict_ranges.push_back(buf, r);
}
for (int k = 0; k < writeCount; k++) {
KeyRangeRef r(buf, testData[i][j + readCount + k]);
KeyRangeRef r(buf, data[j + readCount + k]);
tr.write_conflict_ranges.push_back(buf, r);
}
cranges += tr.read_conflict_ranges.size() + tr.write_conflict_ranges.size();
tr.read_snapshot = i;
tr.read_snapshot = version;
trs.push_back(tr);
}
tcount += trs.size();
@ -1053,12 +1067,16 @@ void skipListTest() {
t = timer();
ConflictBatch batch(cs);
for (int j = 0; j < trs.size(); j++) batch.addTransaction(trs[j]);
for (const auto& tr : trs) {
batch.addTransaction(tr);
}
g_add += timer() - t;
t = timer();
batch.detectConflicts(i + 50, i, nonConflict[i]);
batch.detectConflicts(version + 50, version, nonConflict[version]);
g_detectConflicts += timer() - t;
version++;
}
double elapsed = timer() - start;
printf("New conflict set: %0.3f sec\n", elapsed);
@ -1076,8 +1094,8 @@ void skipListTest() {
printf(" %0.3f Mkeys/sec\n", cranges * 2 / elapsed / 1e6);
printf("Performance counters:\n");
for (int c = 0; c < skc.size(); c++) {
printf("%20s: %s\n", skc[c]->getMetric().name().c_str(), skc[c]->getMetric().formatted().c_str());
for (const auto& counter : skc) {
printf("%20s: %s\n", counter->getMetric().name().c_str(), counter->getMetric().formatted().c_str());
}
printf("%d entries in version history\n", cs->versionHistory.count());

View File

@ -0,0 +1,60 @@
/*
* SpanContextMessage.h
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FDBSERVER_SPANCONTEXTMESSAGE_H
#define FDBSERVER_SPANCONTEXTMESSAGE_H
#pragma once
#include "fdbclient/FDBTypes.h"
#include "fdbclient/CommitTransaction.h"
struct SpanContextMessage {
// This message is pushed into the the transaction logs' memory to inform
// it what transaction subsequent mutations were a part of. This allows
// transaction logs and storage servers to associate mutations with a
// transaction identifier, called a span context.
//
// This message is similar to LogProtocolMessage. Storage servers read the
// first byte of this message to uniquely identify it, meaning it will
// never be mistaken for another message. See LogProtocolMessage.h for more
// information.
SpanID spanContext;
SpanContextMessage() {}
SpanContextMessage(SpanID const& spanContext) : spanContext(spanContext) {}
std::string toString() const {
return format("code: %d, span context: %s", MutationRef::Reserved_For_SpanContextMessage, spanContext.toString().c_str());
}
template <class Ar>
void serialize(Ar& ar) {
uint8_t poly = MutationRef::Reserved_For_SpanContextMessage;
serializer(ar, poly, spanContext);
}
static bool startsSpanContextMessage(uint8_t byte) {
return byte == MutationRef::Reserved_For_SpanContextMessage;
}
template <class Ar> static bool isNextIn(Ar& ar) { return startsSpanContextMessage(*(const uint8_t*)ar.peekBytes(1)); }
};
#endif

View File

@ -1763,6 +1763,10 @@ ACTOR Future<Void> pullAsyncData( StorageCacheData *data ) {
dbgLastMessageWasProtocol = true;
cloneCursor1->setProtocolVersion(cloneReader.protocolVersion());
}
else if (cloneReader.protocolVersion().hasSpanContext() && SpanContextMessage::isNextIn(cloneReader)) {
SpanContextMessage scm;
cloneReader >> scm;
}
else {
MutationRef msg;
cloneReader >> msg;
@ -1835,6 +1839,10 @@ ACTOR Future<Void> pullAsyncData( StorageCacheData *data ) {
data->logProtocol = reader.protocolVersion();
cloneCursor2->setProtocolVersion(data->logProtocol);
}
else if (reader.protocolVersion().hasSpanContext() && SpanContextMessage::isNextIn(reader)) {
SpanContextMessage scm;
reader >> scm;
}
else {
MutationRef msg;
reader >> msg;

View File

@ -240,6 +240,7 @@ struct TLogCommitReply {
struct TLogCommitRequest {
constexpr static FileIdentifier file_identifier = 4022206;
SpanID spanContext;
Arena arena;
Version prevVersion, version, knownCommittedVersion, minKnownCommittedVersion;
@ -249,11 +250,11 @@ struct TLogCommitRequest {
Optional<UID> debugID;
TLogCommitRequest() {}
TLogCommitRequest( const Arena& a, Version prevVersion, Version version, Version knownCommittedVersion, Version minKnownCommittedVersion, StringRef messages, Optional<UID> debugID )
: arena(a), prevVersion(prevVersion), version(version), knownCommittedVersion(knownCommittedVersion), minKnownCommittedVersion(minKnownCommittedVersion), messages(messages), debugID(debugID) {}
TLogCommitRequest( const SpanID& context, const Arena& a, Version prevVersion, Version version, Version knownCommittedVersion, Version minKnownCommittedVersion, StringRef messages, Optional<UID> debugID )
: spanContext(context), arena(a), prevVersion(prevVersion), version(version), knownCommittedVersion(knownCommittedVersion), minKnownCommittedVersion(minKnownCommittedVersion), messages(messages), debugID(debugID) {}
template <class Ar>
void serialize( Ar& ar ) {
serializer(ar, prevVersion, version, knownCommittedVersion, minKnownCommittedVersion, messages, reply, arena, debugID);
serializer(ar, prevVersion, version, knownCommittedVersion, minKnownCommittedVersion, messages, reply, arena, debugID, spanContext);
}
};

View File

@ -28,6 +28,7 @@
#include "fdbclient/FDBTypes.h"
#include "fdbserver/WorkerInterface.actor.h"
#include "fdbserver/LogProtocolMessage.h"
#include "fdbserver/SpanContextMessage.h"
#include "fdbserver/TLogInterface.h"
#include "fdbserver/Knobs.h"
#include "fdbserver/IKeyValueStore.h"
@ -103,7 +104,7 @@ typedef Standalone<TLogQueueEntryRef> TLogQueueEntry;
struct LogData;
struct TLogData;
struct TLogQueue : public IClosable {
struct TLogQueue final : public IClosable {
public:
TLogQueue( IDiskQueue* queue, UID dbgid ) : queue(queue), dbgid(dbgid) {}
@ -135,10 +136,16 @@ public:
Future<Void> commit() { return queue->commit(); }
// Implements IClosable
virtual Future<Void> getError() { return queue->getError(); }
virtual Future<Void> onClosed() { return queue->onClosed(); }
virtual void dispose() { queue->dispose(); delete this; }
virtual void close() { queue->close(); delete this; }
Future<Void> getError() override { return queue->getError(); }
Future<Void> onClosed() override { return queue->onClosed(); }
void dispose() override {
queue->dispose();
delete this;
}
void close() override {
queue->close();
delete this;
}
private:
IDiskQueue* queue;
@ -1849,6 +1856,7 @@ ACTOR Future<Void> tLogCommit(
TLogCommitRequest req,
Reference<LogData> logData,
PromiseStream<Void> warningCollectorInput ) {
state Span span("TLog:tLogCommit"_loc, req.spanContext);
state Optional<UID> tlogDebugID;
if(req.debugID.present())
{

View File

@ -527,11 +527,13 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
}
Future<Version> push(Version prevVersion, Version version, Version knownCommittedVersion,
Version minKnownCommittedVersion, LogPushData& data, Optional<UID> debugID) final {
Version minKnownCommittedVersion, LogPushData& data,
SpanID const& spanContext, Optional<UID> debugID) final {
// FIXME: Randomize request order as in LegacyLogSystem?
vector<Future<Void>> quorumResults;
vector<Future<TLogCommitReply>> allReplies;
int location = 0;
Span span("TPLS:push"_loc, spanContext);
for(auto& it : tLogs) {
if(it->isLocal && it->logServers.size()) {
if(it->connectionResetTrackers.size() == 0) {
@ -542,7 +544,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
vector<Future<Void>> tLogCommitResults;
for(int loc=0; loc< it->logServers.size(); loc++) {
Standalone<StringRef> msg = data.getMessages(location);
allReplies.push_back( recordPushMetrics( it->connectionResetTrackers[loc], it->logServers[loc]->get().interf().address(), it->logServers[loc]->get().interf().commit.getReply( TLogCommitRequest( msg.arena(), prevVersion, version, knownCommittedVersion, minKnownCommittedVersion, msg, debugID ), TaskPriority::ProxyTLogCommitReply ) ) );
allReplies.push_back( recordPushMetrics( it->connectionResetTrackers[loc], it->logServers[loc]->get().interf().address(), it->logServers[loc]->get().interf().commit.getReply( TLogCommitRequest( spanContext, msg.arena(), prevVersion, version, knownCommittedVersion, minKnownCommittedVersion, msg, debugID ), TaskPriority::ProxyTLogCommitReply ) ) );
Future<Void> commitSuccess = success(allReplies.back());
addActor.get().send(commitSuccess);
tLogCommitResults.push_back(commitSuccess);

View File

@ -2974,7 +2974,7 @@ struct InPlaceArray {
};
#pragma pack(pop)
class VersionedBTree : public IVersionedStore {
class VersionedBTree final : public IVersionedStore {
public:
// The first possible internal record possible in the tree
static RedwoodRecordRef dbBegin;
@ -3863,19 +3863,19 @@ private:
virtual ~SuperPage() { delete[] m_data; }
virtual Reference<IPage> clone() const {
Reference<IPage> clone() const override {
return Reference<IPage>(new SuperPage({ Reference<const IPage>::addRef(this) }));
}
void addref() const { ReferenceCounted<SuperPage>::addref(); }
void addref() const override { ReferenceCounted<SuperPage>::addref(); }
void delref() const { ReferenceCounted<SuperPage>::delref(); }
void delref() const override { ReferenceCounted<SuperPage>::delref(); }
int size() const { return m_size; }
int size() const override { return m_size; }
uint8_t const* begin() const { return m_data; }
uint8_t const* begin() const override { return m_data; }
uint8_t* mutate() { return m_data; }
uint8_t* mutate() override { return m_data; }
private:
uint8_t* m_data;

View File

@ -1627,7 +1627,7 @@ ACTOR Future<Void> masterCore( Reference<MasterData> self ) {
}
}
applyMetadataMutations(self->dbgid, recoveryCommitRequest.arena, tr.mutations.slice(mmApplied, tr.mutations.size()),
applyMetadataMutations(SpanID(), self->dbgid, recoveryCommitRequest.arena, tr.mutations.slice(mmApplied, tr.mutations.size()),
self->txnStateStore);
mmApplied = tr.mutations.size();

View File

@ -43,6 +43,7 @@
#include "fdbserver/Knobs.h"
#include "fdbserver/LatencyBandConfig.h"
#include "fdbserver/LogProtocolMessage.h"
#include "fdbserver/SpanContextMessage.h"
#include "fdbserver/LogSystem.h"
#include "fdbserver/MoveKeys.actor.h"
#include "fdbserver/MutationTracking.h"
@ -2785,6 +2786,7 @@ private:
ACTOR Future<Void> update( StorageServer* data, bool* pReceivedUpdate )
{
state double start;
state Span span("SS:update"_loc);
try {
// If we are disk bound and durableVersion is very old, we need to block updates or we could run out of memory
// This is often referred to as the storage server e-brake (emergency brake)
@ -2856,6 +2858,10 @@ ACTOR Future<Void> update( StorageServer* data, bool* pReceivedUpdate )
dbgLastMessageWasProtocol = true;
cloneCursor1->setProtocolVersion(cloneReader.protocolVersion());
}
else if (cloneReader.protocolVersion().hasSpanContext() && SpanContextMessage::isNextIn(cloneReader)) {
SpanContextMessage scm;
cloneReader >> scm;
}
else {
MutationRef msg;
cloneReader >> msg;
@ -2949,6 +2955,11 @@ ACTOR Future<Void> update( StorageServer* data, bool* pReceivedUpdate )
data->storage.changeLogProtocol(ver, data->logProtocol);
cloneCursor2->setProtocolVersion(rd.protocolVersion());
}
else if (rd.protocolVersion().hasSpanContext() && SpanContextMessage::isNextIn(rd)) {
SpanContextMessage scm;
rd >> scm;
span.addParent(scm.spanContext);
}
else {
MutationRef msg;
rd >> msg;
@ -3273,6 +3284,9 @@ ACTOR Future<bool> asyncPrepareVersionsForCommit_impl(StorageServerDisk* self, S
}
if (stopEarly.isReady()) {
// Previous commit is done.
if (stopEarly.isError()) {
throw stopEarly.getError();
}
break;
}
} else {
@ -3297,7 +3311,8 @@ ACTOR Future<bool> asyncPrepareVersionsForCommit_impl(StorageServerDisk* self, S
// Set the new durable version as part of the outstanding change set, before commit
data->storage.makeVersionDurable( newOldestVersion );
}
debug_advanceMaxCommittedVersion( data->thisServerID, newOldestVersion );
debug_advanceMaxCommittedVersion(data->thisServerID, newOldestVersion);
wait(forgetter.signal());
return finalCommit;
}

View File

@ -78,11 +78,11 @@ Key doubleToTestKey(double p, const KeyRef& prefix) {
return doubleToTestKey(p).withPrefix(prefix);
}
Key KVWorkload::getRandomKey() {
Key KVWorkload::getRandomKey() const {
return getRandomKey(absentFrac);
}
Key KVWorkload::getRandomKey(double absentFrac) {
Key KVWorkload::getRandomKey(double absentFrac) const {
if ( absentFrac > 0.0000001 ) {
return getRandomKey(deterministicRandom()->random01() < absentFrac);
} else {
@ -90,11 +90,11 @@ Key KVWorkload::getRandomKey(double absentFrac) {
}
}
Key KVWorkload::getRandomKey(bool absent) {
Key KVWorkload::getRandomKey(bool absent) const {
return keyForIndex(deterministicRandom()->randomInt( 0, nodeCount ), absent);
}
Key KVWorkload::keyForIndex( uint64_t index ) {
Key KVWorkload::keyForIndex(uint64_t index) const {
if ( absentFrac > 0.0000001 ) {
return keyForIndex(index, deterministicRandom()->random01() < absentFrac);
} else {
@ -102,7 +102,7 @@ Key KVWorkload::keyForIndex( uint64_t index ) {
}
}
Key KVWorkload::keyForIndex( uint64_t index, bool absent ) {
Key KVWorkload::keyForIndex(uint64_t index, bool absent) const {
int adjustedKeyBytes = (absent) ? (keyBytes + 1) : keyBytes;
Key result = makeString( adjustedKeyBytes );
uint8_t* data = mutateString( result );
@ -254,32 +254,34 @@ struct CompoundWorkload : TestWorkload {
CompoundWorkload( WorkloadContext& wcx ) : TestWorkload( wcx ) {}
CompoundWorkload* add( TestWorkload* w ) { workloads.push_back(w); return this; }
virtual ~CompoundWorkload() { for(int w=0; w<workloads.size(); w++) delete workloads[w]; }
virtual std::string description() {
~CompoundWorkload() {
for (int w = 0; w < workloads.size(); w++) delete workloads[w];
}
std::string description() const override {
std::string d;
for(int w=0; w<workloads.size(); w++)
d += workloads[w]->description() + (w==workloads.size()-1?"":";");
return d;
}
virtual Future<Void> setup( Database const& cx ) {
Future<Void> setup(Database const& cx) override {
vector<Future<Void>> all;
for(int w=0; w<workloads.size(); w++)
all.push_back( workloads[w]->setup(cx) );
return waitForAll(all);
}
virtual Future<Void> start( Database const& cx ) {
Future<Void> start(Database const& cx) override {
vector<Future<Void>> all;
for(int w=0; w<workloads.size(); w++)
all.push_back( workloads[w]->start(cx) );
return waitForAll(all);
}
virtual Future<bool> check( Database const& cx ) {
Future<bool> check(Database const& cx) override {
vector<Future<bool>> all;
for(int w=0; w<workloads.size(); w++)
all.push_back( workloads[w]->check(cx) );
return allTrue(all);
}
virtual void getMetrics( vector<PerfMetric>& m ) {
void getMetrics(vector<PerfMetric>& m) override {
for(int w=0; w<workloads.size(); w++) {
vector<PerfMetric> p;
workloads[w]->getMetrics(p);
@ -287,7 +289,7 @@ struct CompoundWorkload : TestWorkload {
m.push_back( p[i].withPrefix( workloads[w]->description()+"." ) );
}
}
virtual double getCheckTimeout() {
double getCheckTimeout() const override {
double m = 0;
for(int w=0; w<workloads.size(); w++)
m = std::max( workloads[w]->getCheckTimeout(), m );

View File

@ -355,6 +355,7 @@ struct TLogOptions {
"_LS_" + boost::lexical_cast<std::string>(spillType);
break;
case TLogVersion::V5:
case TLogVersion::V6:
toReturn = "V_" + boost::lexical_cast<std::string>(version);
break;
}
@ -376,6 +377,7 @@ TLogFn tLogFnForOptions( TLogOptions options ) {
else
return oldTLog_6_2::tLog;
case TLogVersion::V5:
case TLogVersion::V6:
return tLog;
default:
ASSERT(false);

View File

@ -122,11 +122,9 @@ public:
virtual ~ApiCorrectnessWorkload(){ }
std::string description() {
return "ApiCorrectness";
}
std::string description() const override { return "ApiCorrectness"; }
void getMetrics(vector<PerfMetric>& m) {
void getMetrics(vector<PerfMetric>& m) override {
m.push_back(PerfMetric("Number of Random Operations Performed", numRandomOperations.getValue(), false));
}

View File

@ -285,9 +285,9 @@ struct ApiWorkload : TestWorkload {
}
}
Future<Void> setup(Database const& cx);
Future<Void> start(Database const& cx);
Future<bool> check(Database const& cx);
Future<Void> setup(Database const& cx) override;
Future<Void> start(Database const& cx) override;
Future<bool> check(Database const& cx) override;
//Compares the contents of this client's key-space in the database with the in-memory key-value store
Future<bool> compareDatabaseToMemory();

View File

@ -97,13 +97,9 @@ struct AsyncFileCorrectnessWorkload : public AsyncFileWorkload
virtual ~AsyncFileCorrectnessWorkload(){ }
virtual std::string description()
{
return "AsyncFileCorrectness";
}
std::string description() const override { return "AsyncFileCorrectness"; }
Future<Void> setup(Database const& cx)
{
Future<Void> setup(Database const& cx) override {
if(enabled)
return _setup(this);
@ -147,8 +143,7 @@ struct AsyncFileCorrectnessWorkload : public AsyncFileWorkload
fileSize = newFileSize;
}
Future<Void> start(Database const& cx)
{
Future<Void> start(Database const& cx) override {
if(enabled)
return _start(this);
@ -371,8 +366,7 @@ struct AsyncFileCorrectnessWorkload : public AsyncFileWorkload
//Checks if a file is already locked for a given set of bytes. The file is locked if it is being written (fileLock[i] = 0xFFFFFFFF)
//or if we are trying to perform a write and the read count is nonzero (fileLock[i] != 0)
bool checkFileLocked(int operation, int offset, int length)
{
bool checkFileLocked(int operation, int offset, int length) const {
for(int i = offset; i < offset + length && i < fileLock.size(); i++)
if(fileLock[i] == 0xFFFFFFFF || (fileLock[i] != 0 && operation == WRITE))
return true;
@ -381,8 +375,7 @@ struct AsyncFileCorrectnessWorkload : public AsyncFileWorkload
}
//Populates a buffer with a random sequence of bytes
void generateRandomData(unsigned char *buffer, int length)
{
void generateRandomData(unsigned char* buffer, int length) const {
for(int i = 0; i < length; i+= sizeof(uint32_t))
{
uint32_t val = deterministicRandom()->randomUInt32();
@ -491,13 +484,9 @@ struct AsyncFileCorrectnessWorkload : public AsyncFileWorkload
return info;
}
virtual Future<bool> check(Database const& cx)
{
return success;
}
Future<bool> check(Database const& cx) override { return success; }
virtual void getMetrics(vector<PerfMetric>& m)
{
void getMetrics(vector<PerfMetric>& m) override {
if(enabled)
{
m.push_back(PerfMetric("Number of Operations Performed", numOperations.getValue(), false));

View File

@ -182,13 +182,9 @@ struct AsyncFileReadWorkload : public AsyncFileWorkload
virtual ~AsyncFileReadWorkload(){ }
virtual std::string description()
{
return "AsyncFileRead";
}
std::string description() const override { return "AsyncFileRead"; }
virtual Future<Void> setup(Database const& cx)
{
Future<Void> setup(Database const& cx) override {
if(enabled)
return _setup(this);
@ -213,8 +209,7 @@ struct AsyncFileReadWorkload : public AsyncFileWorkload
return Void();
}
virtual Future<Void> start(Database const& cx)
{
Future<Void> start(Database const& cx) override {
if(enabled)
return _start(this);
@ -335,7 +330,7 @@ struct AsyncFileReadWorkload : public AsyncFileWorkload
}
}
virtual void getMetrics(std::vector<PerfMetric>& m) {
void getMetrics(std::vector<PerfMetric>& m) override {
if (enabled) {
m.emplace_back("Bytes read/sec", bytesRead.getValue() / testDuration, false);
m.emplace_back("Average CPU Utilization (Percentage)", averageCpuUtilization * 100, false);

View File

@ -54,15 +54,9 @@ struct AsyncFileWriteWorkload : public AsyncFileWorkload
sequential = getOption(options, LiteralStringRef("sequential"), true);
}
virtual ~AsyncFileWriteWorkload(){ }
std::string description() const override { return "AsyncFileWrite"; }
virtual std::string description()
{
return "AsyncFileWrite";
}
virtual Future<Void> setup(Database const& cx)
{
Future<Void> setup(Database const& cx) override {
if(enabled)
return _setup(this);
@ -91,8 +85,7 @@ struct AsyncFileWriteWorkload : public AsyncFileWorkload
return Void();
}
virtual Future<Void> start(Database const& cx)
{
Future<Void> start(Database const& cx) override {
if(enabled)
return _start(this);
@ -162,8 +155,7 @@ struct AsyncFileWriteWorkload : public AsyncFileWorkload
}
}
virtual void getMetrics(vector<PerfMetric>& m)
{
void getMetrics(vector<PerfMetric>& m) override {
if(enabled)
{
m.push_back(PerfMetric("Bytes written/sec", bytesWritten.getValue() / testDuration, false));

View File

@ -108,9 +108,9 @@ struct AtomicOpsWorkload : TestWorkload {
TraceEvent("AtomicWorkload").detail("OpType", opType);
}
virtual std::string description() { return "AtomicOps"; }
std::string description() const override { return "AtomicOps"; }
virtual Future<Void> setup( Database const& cx ) {
Future<Void> setup(Database const& cx) override {
if (apiVersion500)
cx->apiVersion = 500;
@ -119,7 +119,7 @@ struct AtomicOpsWorkload : TestWorkload {
return _setup( cx, this );
}
virtual Future<Void> start( Database const& cx ) {
Future<Void> start(Database const& cx) override {
for (int c = 0; c < actorCount; c++) {
clients.push_back(
timeout(atomicOpWorker(cx->clone(), this, actorCount / transactionsPerSecond), testDuration, Void()));
@ -128,14 +128,13 @@ struct AtomicOpsWorkload : TestWorkload {
return delay(testDuration);
}
virtual Future<bool> check( Database const& cx ) {
Future<bool> check(Database const& cx) override {
if(clientId != 0)
return true;
return _check( cx, this );
}
virtual void getMetrics( vector<PerfMetric>& m ) {
}
void getMetrics(vector<PerfMetric>& m) override {}
std::pair<Key, Key> logDebugKey(int group) {
Key logKey(format("log%08x%08x%08x", group, clientId, opNum));

View File

@ -49,13 +49,11 @@ public:
opType = getOption(options, LiteralStringRef("opType"), -1);
}
virtual std::string description() { return "AtomicOpsApiCorrectness"; }
std::string description() const override { return "AtomicOpsApiCorrectness"; }
virtual Future<Void> setup(Database const& cx) {
return Void();
}
Future<Void> setup(Database const& cx) override { return Void(); }
virtual Future<Void> start(Database const& cx) {
Future<Void> start(Database const& cx) override {
if (opType == -1)
opType = sharedRandomNumber % 9;
@ -94,12 +92,9 @@ public:
return Void();
}
virtual Future<bool> check(Database const& cx) {
return !testFailed;
}
Future<bool> check(Database const& cx) override { return !testFailed; }
virtual void getMetrics(vector<PerfMetric>& m) {
}
virtual void getMetrics(vector<PerfMetric>& m) override {}
// Test Atomic ops on non existing keys that results in a set
ACTOR Future<Void> testAtomicOpSetOnNonExistingKey(Database cx, AtomicOpsApiCorrectnessWorkload* self, uint32_t opType, Key key) {

View File

@ -68,28 +68,21 @@ struct AtomicRestoreWorkload : TestWorkload {
ASSERT(removePrefix.size() == 0);
}
virtual std::string description() {
return "AtomicRestore";
}
std::string description() const override { return "AtomicRestore"; }
virtual Future<Void> setup(Database const& cx) {
return Void();
}
Future<Void> setup(Database const& cx) override { return Void(); }
virtual Future<Void> start(Database const& cx) {
Future<Void> start(Database const& cx) override {
if (clientId != 0)
return Void();
return _start(cx, this);
}
virtual Future<bool> check(Database const& cx) {
return true;
}
Future<bool> check(Database const& cx) override { return true; }
virtual void getMetrics(vector<PerfMetric>& m) {
}
void getMetrics(vector<PerfMetric>& m) override {}
bool hasPrefix() { return addPrefix != LiteralStringRef("") || removePrefix != LiteralStringRef(""); }
bool hasPrefix() const { return addPrefix != LiteralStringRef("") || removePrefix != LiteralStringRef(""); }
ACTOR static Future<Void> _start(Database cx, AtomicRestoreWorkload* self) {
state FileBackupAgent backupAgent;

View File

@ -43,11 +43,9 @@ struct AtomicSwitchoverWorkload : TestWorkload {
extraDB = Database::createDatabase(extraFile, -1);
}
virtual std::string description() {
return "AtomicSwitchover";
}
std::string description() const override { return "AtomicSwitchover"; }
virtual Future<Void> setup(Database const& cx) {
Future<Void> setup(Database const& cx) override {
if (clientId != 0)
return Void();
return _setup(cx, this);
@ -66,18 +64,15 @@ struct AtomicSwitchoverWorkload : TestWorkload {
return Void();
}
virtual Future<Void> start(Database const& cx) {
Future<Void> start(Database const& cx) override {
if (clientId != 0)
return Void();
return _start(cx, this);
}
virtual Future<bool> check(Database const& cx) {
return true;
}
Future<bool> check(Database const& cx) override { return true; }
virtual void getMetrics(vector<PerfMetric>& m) {
}
void getMetrics(vector<PerfMetric>& m) override {}
ACTOR static Future<Void> diffRanges(Standalone<VectorRef<KeyRangeRef>> ranges, StringRef backupPrefix, Database src, Database dest) {
state int rangeIndex;

View File

@ -49,15 +49,11 @@ struct BackgroundSelectorWorkload : TestWorkload {
resultLimit = 10*maxDiff;
}
virtual std::string description() { return "BackgroundSelector"; }
std::string description() const override { return "BackgroundSelector"; }
virtual Future<Void> setup( Database const& cx ) {
return Void();
}
Future<Void> setup(Database const& cx) override { return Void(); }
virtual Future<Void> start( Database const& cx ) {
return _start( cx, this );
}
Future<Void> start(Database const& cx) override { return _start(cx, this); }
ACTOR Future<Void> _start( Database cx, BackgroundSelectorWorkload *self ) {
for(int c=0; c<self->actorsPerClient; c++)
@ -68,7 +64,7 @@ struct BackgroundSelectorWorkload : TestWorkload {
return Void();
}
virtual Future<bool> check( Database const& cx ) {
Future<bool> check(Database const& cx) override {
bool ok = true;
for( int i = 0; i < clients.size(); i++ )
if( clients[i].isError() )
@ -77,7 +73,7 @@ struct BackgroundSelectorWorkload : TestWorkload {
return ok;
}
virtual void getMetrics( vector<PerfMetric>& m ) {
void getMetrics(vector<PerfMetric>& m) override {
double duration = testDuration;
m.push_back( PerfMetric( "Operations/sec", operations.getValue() / duration, false ) );
m.push_back( operations.getMetric() );

View File

@ -136,11 +136,11 @@ struct BackupAndParallelRestoreCorrectnessWorkload : TestWorkload {
}
}
virtual std::string description() { return "BackupAndParallelRestoreCorrectness"; }
std::string description() const override { return "BackupAndParallelRestoreCorrectness"; }
virtual Future<Void> setup(Database const& cx) { return Void(); }
Future<Void> setup(Database const& cx) override { return Void(); }
virtual Future<Void> start(Database const& cx) {
Future<Void> start(Database const& cx) override {
if (clientId != 0) return Void();
TraceEvent(SevInfo, "BARW_Param").detail("Locked", locked);
@ -158,11 +158,11 @@ struct BackupAndParallelRestoreCorrectnessWorkload : TestWorkload {
return _start(cx, this);
}
bool hasPrefix() { return addPrefix != LiteralStringRef("") || removePrefix != LiteralStringRef(""); }
bool hasPrefix() const { return addPrefix != LiteralStringRef("") || removePrefix != LiteralStringRef(""); }
virtual Future<bool> check(Database const& cx) { return true; }
Future<bool> check(Database const& cx) override { return true; }
virtual void getMetrics(vector<PerfMetric>& m) {}
void getMetrics(vector<PerfMetric>& m) override {}
ACTOR static Future<Void> changePaused(Database cx, FileBackupAgent* backupAgent) {
loop {

View File

@ -118,15 +118,11 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
}
}
virtual std::string description() {
return "BackupAndRestoreCorrectness";
}
std::string description() const override { return "BackupAndRestoreCorrectness"; }
virtual Future<Void> setup(Database const& cx) {
return Void();
}
Future<Void> setup(Database const& cx) override { return Void(); }
virtual Future<Void> start(Database const& cx) {
Future<Void> start(Database const& cx) override {
if (clientId != 0)
return Void();
@ -145,7 +141,7 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
return _start(cx, this);
}
virtual Future<bool> check(Database const& cx) {
Future<bool> check(Database const& cx) override {
if (clientId != 0)
return true;
else
@ -175,8 +171,7 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
return true;
}
virtual void getMetrics(vector<PerfMetric>& m) {
}
void getMetrics(vector<PerfMetric>& m) override {}
ACTOR static Future<Void> changePaused(Database cx, FileBackupAgent* backupAgent) {
loop {

View File

@ -42,11 +42,9 @@ struct BackupToDBAbort : TestWorkload {
lockid = UID(0xbeeffeed, 0xdecaf00d);
}
virtual std::string description() override {
return "BackupToDBAbort";
}
std::string description() const override { return "BackupToDBAbort"; }
virtual Future<Void> setup(const Database& cx) override {
Future<Void> setup(const Database& cx) override {
if (clientId != 0) return Void();
return _setup(this, cx);
}
@ -64,7 +62,7 @@ struct BackupToDBAbort : TestWorkload {
return Void();
}
virtual Future<Void> start(Database const& cx) override {
Future<Void> start(Database const& cx) override {
if (clientId != 0) return Void();
return _start(this, cx);
}
@ -100,11 +98,9 @@ struct BackupToDBAbort : TestWorkload {
return true;
}
virtual Future<bool> check(const Database& cx) override {
return _check(this, cx);
}
Future<bool> check(const Database& cx) override { return _check(this, cx); }
virtual void getMetrics(vector<PerfMetric>& m) {}
void getMetrics(vector<PerfMetric>& m) override {}
};
REGISTER_WORKLOAD(BackupToDBAbort);

View File

@ -109,26 +109,19 @@ struct BackupToDBCorrectnessWorkload : TestWorkload {
TraceEvent("BARW_Start").detail("Locked", locked);
}
virtual std::string description() {
return "BackupToDBCorrectness";
}
std::string description() const override { return "BackupToDBCorrectness"; }
virtual Future<Void> setup(Database const& cx) {
return Void();
}
Future<Void> setup(Database const& cx) override { return Void(); }
virtual Future<Void> start(Database const& cx) {
Future<Void> start(Database const& cx) override {
if (clientId != 0)
return Void();
return _start(cx, this);
}
virtual Future<bool> check(Database const& cx) {
return true;
}
Future<bool> check(Database const& cx) override { return true; }
virtual void getMetrics(vector<PerfMetric>& m) {
}
void getMetrics(vector<PerfMetric>& m) override {}
ACTOR static Future<Void> diffRanges(Standalone<VectorRef<KeyRangeRef>> ranges, StringRef backupPrefix, Database src, Database dest) {
state int rangeIndex;

View File

@ -74,28 +74,23 @@ struct BackupToDBUpgradeWorkload : TestWorkload {
TraceEvent("DRU_Start");
}
virtual std::string description() {
return "BackupToDBUpgrade";
}
virtual std::string description() const override { return "BackupToDBUpgrade"; }
virtual Future<Void> setup(Database const& cx) {
Future<Void> setup(Database const& cx) override {
if (clientId != 0)
return Void();
return _setup(cx, this);
}
virtual Future<Void> start(Database const& cx) {
Future<Void> start(Database const& cx) override {
if (clientId != 0)
return Void();
return _start(cx, this);
}
virtual Future<bool> check(Database const& cx) {
return true;
}
Future<bool> check(Database const& cx) override { return true; }
virtual void getMetrics(vector<PerfMetric>& m) {
}
void getMetrics(vector<PerfMetric>& m) override {}
ACTOR static Future<Void> doBackup(BackupToDBUpgradeWorkload* self, DatabaseBackupAgent* backupAgent, Database cx, Key tag, Standalone<VectorRef<KeyRangeRef>> backupRanges) {
try {

View File

@ -49,20 +49,20 @@ struct BulkLoadWorkload : TestWorkload {
keyPrefix = unprintable( keyPrefix.toString() );
}
virtual std::string description() { return "BulkLoad"; }
std::string description() const override { return "BulkLoad"; }
virtual Future<Void> start( Database const& cx ) {
Future<Void> start(Database const& cx) override {
for(int c = 0; c < actorCount; c++)
clients.push_back( timeout( bulkLoadClient( cx, this, clientId, c ), testDuration, Void() ) );
return waitForAll( clients );
}
virtual Future<bool> check( Database const& cx ) {
Future<bool> check(Database const& cx) override {
clients.clear();
return true;
}
virtual void getMetrics( vector<PerfMetric>& m ) {
void getMetrics(vector<PerfMetric>& m) override {
m.push_back( transactions.getMetric() );
m.push_back( retries.getMetric() );
m.push_back( PerfMetric( "Rows written", transactions.getValue() * writesPerTransaction, false ) );

View File

@ -12,22 +12,17 @@ struct CacheWorkload : TestWorkload {
keyPrefix = unprintable( getOption(options, LiteralStringRef("keyPrefix"), LiteralStringRef("")).toString() );
}
virtual std::string description() { return "CacheWorkload"; }
virtual Future<Void> setup( Database const& cx ) {
if (clientId == 0) {
//Call management API to cache keys under the given prefix
return addCachedRange(cx, prefixRange(keyPrefix));
}
return Void();
}
virtual Future<Void> start( Database const& cx ) {
return Void();
}
virtual Future<bool> check( Database const& cx ) {
return true;
}
virtual void getMetrics( vector<PerfMetric>& m ) {
std::string description() const override { return "CacheWorkload"; }
Future<Void> setup(Database const& cx) override {
if (clientId == 0) {
// Call management API to cache keys under the given prefix
return addCachedRange(cx, prefixRange(keyPrefix));
}
return Void();
}
Future<Void> start(Database const& cx) override { return Void(); }
Future<bool> check(Database const& cx) override { return true; }
void getMetrics(vector<PerfMetric>& m) override {}
};
WorkloadFactory<CacheWorkload> CacheWorkloadFactory("Cache");

View File

@ -41,18 +41,16 @@ struct ChangeConfigWorkload : TestWorkload {
networkAddresses = getOption( options, LiteralStringRef("coordinators"), StringRef() ).toString();
}
virtual std::string description() { return "ChangeConfig"; }
std::string description() const override { return "ChangeConfig"; }
virtual Future<Void> start( Database const& cx ) {
Future<Void> start(Database const& cx) override {
if( this->clientId != 0 ) return Void();
return ChangeConfigClient( cx->clone(), this );
}
virtual Future<bool> check( Database const& cx ) {
return true;
}
Future<bool> check(Database const& cx) override { return true; }
virtual void getMetrics( vector<PerfMetric>& m ) {}
void getMetrics(vector<PerfMetric>& m) override {}
ACTOR Future<Void> extraDatabaseConfigure(ChangeConfigWorkload *self) {
if (g_network->isSimulated() && g_simulator.extraDB) {

View File

@ -184,9 +184,9 @@ struct ClientTransactionProfileCorrectnessWorkload : TestWorkload {
TraceEvent(SevInfo, "ClientTransactionProfilingSetup").detail("ClientId", clientId).detail("SamplingProbability", samplingProbability).detail("TrInfoSizeLimit", trInfoSizeLimit);
}
virtual std::string description() { return "ClientTransactionProfileCorrectness"; }
std::string description() const override { return "ClientTransactionProfileCorrectness"; }
virtual Future<Void> setup(Database const& cx) {
Future<Void> setup(Database const& cx) override {
if (clientId == 0) {
const_cast<ClientKnobs *>(CLIENT_KNOBS)->CSI_STATUS_DELAY = 2.0; // 2 seconds
return changeProfilingParameters(cx, trInfoSizeLimit, samplingProbability);
@ -194,21 +194,17 @@ struct ClientTransactionProfileCorrectnessWorkload : TestWorkload {
return Void();
}
virtual Future<Void> start(Database const& cx) {
return Void();
}
Future<Void> start(Database const& cx) override { return Void(); }
int getNumChunks(KeyRef key) {
int getNumChunks(KeyRef key) const {
return bigEndian32(BinaryReader::fromStringRef<int>(key.substr(numChunksStartIndex, chunkFormatSize), Unversioned()));
}
int getChunkNum(KeyRef key) {
int getChunkNum(KeyRef key) const {
return bigEndian32(BinaryReader::fromStringRef<int>(key.substr(chunkNumStartIndex, chunkFormatSize), Unversioned()));
}
std::string getTrId(KeyRef key) {
return key.substr(trIdStartIndex, trIdFormatSize).toString();
}
std::string getTrId(KeyRef key) const { return key.substr(trIdStartIndex, trIdFormatSize).toString(); }
bool checkTxInfoEntriesFormat(const Standalone<RangeResultRef> &txInfoEntries) {
std::string val;
@ -337,15 +333,13 @@ struct ClientTransactionProfileCorrectnessWorkload : TestWorkload {
return self->checkTxInfoEntriesFormat(txInfoEntries);
}
virtual Future<bool> check(Database const& cx) {
Future<bool> check(Database const& cx) override {
if (clientId != 0)
return true;
return _check(cx, this);
}
virtual void getMetrics(vector<PerfMetric>& m) {
}
void getMetrics(vector<PerfMetric>& m) override {}
};
WorkloadFactory<ClientTransactionProfileCorrectnessWorkload> ClientTransactionProfileCorrectnessWorkloadFactory("ClientTransactionProfileCorrectness");

View File

@ -33,20 +33,11 @@ struct CommitBugWorkload : TestWorkload
success = true;
}
virtual std::string description()
{
return "CommitBugWorkload";
}
std::string description() const override { return "CommitBugWorkload"; }
virtual Future<Void> setup(Database const& cx)
{
return Void();
}
Future<Void> setup(Database const& cx) override { return Void(); }
virtual Future<Void> start(Database const& cx)
{
return timeout(bug1(cx, this) && bug2(cx, this), 60, Void());
}
Future<Void> start(Database const& cx) override { return timeout(bug1(cx, this) && bug2(cx, this), 60, Void()); }
ACTOR Future<Void> bug1(Database cx, CommitBugWorkload *self)
{
@ -170,15 +161,9 @@ struct CommitBugWorkload : TestWorkload
return Void();
}
virtual Future<bool> check(Database const& cx)
{
return success;
}
Future<bool> check(Database const& cx) override { return success; }
virtual void getMetrics( vector<PerfMetric>& m )
{
}
void getMetrics(vector<PerfMetric>& m) override {}
};
WorkloadFactory<CommitBugWorkload> CommitBugWorkloadFactory("CommitBug");

View File

@ -31,7 +31,7 @@ static const char* storeTypes[] = { "ssd", "ssd-1", "ssd-2", "memory", "memory-1
static const char* logTypes[] = {
"log_engine:=1", "log_engine:=2",
"log_spill:=1", "log_spill:=2",
"log_version:=2", "log_version:=3", "log_version:=4"
"log_version:=2", "log_version:=3", "log_version:=4", "log_version:=5", "log_version:=6"
};
static const char* redundancies[] = { "single", "double", "triple" };
static const char* backupTypes[] = { "backup_worker_enabled:=0", "backup_worker_enabled:=1" };
@ -216,22 +216,14 @@ struct ConfigureDatabaseWorkload : TestWorkload {
g_simulator.usableRegions = 1;
}
virtual std::string description() { return "DestroyDatabaseWorkload"; }
std::string description() const override { return "DestroyDatabaseWorkload"; }
virtual Future<Void> setup( Database const& cx ) {
return _setup( cx, this );
}
Future<Void> setup(Database const& cx) override { return _setup(cx, this); }
virtual Future<Void> start( Database const& cx ) {
return _start( this, cx );
}
virtual Future<bool> check( Database const& cx ) {
return true;
}
Future<Void> start(Database const& cx) override { return _start(this, cx); }
Future<bool> check(Database const& cx) override { return true; }
virtual void getMetrics( vector<PerfMetric>& m ) {
m.push_back( retries.getMetric() );
}
void getMetrics(vector<PerfMetric>& m) override { m.push_back(retries.getMetric()); }
static inline uint64_t valueToUInt64( const StringRef& v ) {
long long unsigned int x = 0;

View File

@ -46,22 +46,18 @@ struct ConflictRangeWorkload : TestWorkload {
testReadYourWrites = getOption( options, LiteralStringRef("testReadYourWrites"), false );
}
virtual std::string description() { return "ConflictRange"; }
std::string description() const override { return "ConflictRange"; }
virtual Future<Void> setup( Database const& cx ) {
return Void();
}
Future<Void> setup(Database const& cx) override { return Void(); }
virtual Future<Void> start( Database const& cx ) {
return _start( cx, this );
}
Future<Void> start(Database const& cx) override { return _start(cx, this); }
virtual Future<bool> check( Database const& cx ) {
Future<bool> check(Database const& cx) override {
clients.clear();
return true;
}
virtual void getMetrics( vector<PerfMetric>& m ) {
void getMetrics(vector<PerfMetric>& m) override {
m.push_back( withConflicts.getMetric() );
m.push_back( withoutConflicts.getMetric() );
m.push_back( retries.getMetric() );

Some files were not shown because too many files have changed in this diff Show More